hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c4747390ffa6d1824d43557a023d590eb857e75
| 22,080
|
py
|
Python
|
qiskit/ignis/verification/randomized_benchmarking/circuits.py
|
hodgestar/qiskit-ignis
|
0e511df442e864cd0e06efcdd1db7b03c011168b
|
[
"Apache-2.0"
] | null | null | null |
qiskit/ignis/verification/randomized_benchmarking/circuits.py
|
hodgestar/qiskit-ignis
|
0e511df442e864cd0e06efcdd1db7b03c011168b
|
[
"Apache-2.0"
] | null | null | null |
qiskit/ignis/verification/randomized_benchmarking/circuits.py
|
hodgestar/qiskit-ignis
|
0e511df442e864cd0e06efcdd1db7b03c011168b
|
[
"Apache-2.0"
] | 1
|
2021-04-01T17:28:33.000Z
|
2021-04-01T17:28:33.000Z
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# TODO(mtreinish): Remove these disables when implementation is finished
# pylint: disable=unused-argument,unnecessary-pass
"""
Generates randomized benchmarking sequences
"""
import copy
import numpy as np
import qiskit
from .Clifford import Clifford
from .clifford_utils import CliffordUtils as clutils
from .dihedral import CNOTDihedral
from .dihedral_utils import DihedralUtils as dutils
def handle_length_multiplier(length_multiplier, len_pattern,
is_purity=False):
"""
Check validity of length_multiplier.
In addition, transform it into a vector if it is a constant.
In case of purity rb the length multiplier should be None.
Args:
length_multiplier: length of the multiplier
len_pattern: length of the RB pattern
is_purity: True only for purity rb (default is False)
Returns:
length_multiplier
"""
if hasattr(length_multiplier, "__len__"):
if is_purity:
raise ValueError(
"In case of Purity RB the length multiplier should be None")
if len(length_multiplier) != len_pattern:
raise ValueError(
"Length mulitiplier must be the same length as the pattern")
length_multiplier = np.array(length_multiplier)
if length_multiplier.dtype != 'int' or (length_multiplier < 1).any():
raise ValueError("Invalid length multiplier")
else:
length_multiplier = np.ones(len_pattern, dtype='int')*length_multiplier
return length_multiplier
def check_pattern(pattern, is_purity=False):
"""
Verifies that the input pattern is valid
i.e., that each qubit appears at most once
In case of purity rb, checks that all simultaneous sequences have the same
dimension (e.g. only 1-qubit sequences, or only 2-qubit sequences etc.)
Args:
pattern: RB pattern
n_qubits: number of qubits
is_purity: True only for purity rb (default is False)
Raises:
ValueError: if the pattern is not valid
Return:
qlist: flat list of all the qubits in the pattern
maxqubit: the maximum qubit number
maxdim: the maximal dimension (maximal number of qubits
in all sequences)
"""
pattern_flat = []
pattern_dim = []
for pat in pattern:
pattern_flat.extend(pat)
pattern_dim.append(len(pat))
_, uni_counts = np.unique(np.array(pattern_flat), return_counts=True)
if (uni_counts > 1).any():
raise ValueError("Invalid pattern. Duplicate qubit index.")
dim_distinct = np.unique(pattern_dim)
if is_purity:
if len(dim_distinct) > 1:
raise ValueError("Invalid pattern for purity RB. \
All simultaneous sequences should have the \
same dimension.")
return pattern_flat, np.max(pattern_flat).item(), np.max(pattern_dim)
def calc_xdata(length_vector, length_multiplier):
"""
Calculate the set of sequences lengths
Args:
length_vector: vector length
length_multiplier: length of the multiplier of the vector length
Returns:
An array of sequences lengths
"""
xdata = []
for mult in length_multiplier:
xdata.append(np.array(length_vector)*mult)
return np.array(xdata)
def randomized_benchmarking_seq(nseeds=1, length_vector=None,
rb_pattern=None,
length_multiplier=1, seed_offset=0,
align_cliffs=False,
interleaved_gates=None,
is_purity=False,
group_gates=None):
"""Get a generic randomized benchmarking sequence
Args:
nseeds: number of seeds
length_vector: 'm' length vector of sequence lengths. Must be in
ascending order. RB sequences of increasing length grow on top of
the previous sequences.
rb_pattern: A list of the form [[i,j],[k],...] which will make
simultaneous RB sequences where
Qi,Qj are a 2Q RB sequence and Qk is a 1Q sequence, etc.
E.g. [[0,3],[2],[1]] would create RB sequences that are
2Q for Q0/Q3, 1Q for Q1+Q2
The number of qubits is the sum of the entries.
For 'regular' RB the qubit_pattern is just [[0]],[[0,1]].
length_multiplier: if this is an array it scales each rb_sequence by
the multiplier
seed_offset: What to start the seeds at (e.g. if we
want to add more seeds later)
align_cliffs: If true adds a barrier across all qubits in rb_pattern
after each set of elements, not necessarily Cliffords
(note: aligns after each increment of elements including the
length multiplier so if the multiplier is [1,3] it will barrier
after 1 element for the first pattern and 3 for the second).
interleaved_gates: A list of gates of elements that
will be interleaved (for interleaved randomized benchmarking)
The length of the list would equal the length of the rb_pattern.
is_purity: True only for purity rb (default is False)
group_gates: On which group (or gate set) we perform RB
(default is the Clifford group)
'0' or None or 'Clifford': Clifford group
'1' or 'CNOT-Dihedral' or 'Non-Clifford': CNOT-Dihedral group
Returns:
A tuple of different fields depending on inputs. The different fields
are:
* ``circuits``: list of lists of circuits for the rb sequences
(separate list for each seed)
* ``xdata``: the sequences lengths (with multiplier if applicable)
* ``circuits_interleaved`` `(only if interleaved_gates is not None)`:
list of lists of circuits for the interleaved rb sequences
(separate list for each seed)
* ``circuits_purity`` `(only if is_purity=True)`:
list of lists of lists of circuits for purity rb
(separate list for each seed and each of the 3^n circuits)
* ``npurity`` `(only if is_purity=True)`:
the number of purity rb circuits (per seed)
which equals to 3^n, where n is the dimension
"""
# Set modules (default is Clifford)
if group_gates is None or group_gates in ('0',
'Clifford',
'clifford'):
Gutils = clutils()
Ggroup = Clifford
rb_circ_type = 'rb'
group_gates_type = 0
elif group_gates in ('1', 'Non-Clifford',
'NonClifford'
'CNOTDihedral',
'CNOT-Dihedral'):
Gutils = dutils()
Ggroup = CNOTDihedral
rb_circ_type = 'rb_cnotdihedral'
group_gates_type = 1
else:
raise ValueError("Unknown group or set of gates.")
if rb_pattern is None:
rb_pattern = [[0]]
if length_vector is None:
length_vector = [1, 10, 20]
qlist_flat, n_q_max, max_dim = check_pattern(rb_pattern, is_purity)
length_multiplier = handle_length_multiplier(length_multiplier,
len(rb_pattern),
is_purity)
# number of purity rb circuits per seed
npurity = 3**max_dim
xdata = calc_xdata(length_vector, length_multiplier)
pattern_sizes = [len(pat) for pat in rb_pattern]
max_nrb = np.max(pattern_sizes)
# load group tables
group_tables = [[] for _ in range(max_nrb)]
for rb_num in range(max_nrb):
group_tables[rb_num] = Gutils.load_tables(rb_num+1)
# initialization: rb sequences
circuits = [[] for e in range(nseeds)]
# initialization: interleaved rb sequences
circuits_interleaved = [[] for e in range(nseeds)]
# initialization: non-clifford cnot-dihedral
# rb sequences
circuits_cnotdihedral = [[] for e in range(nseeds)]
# initialization: non-clifford cnot-dihedral
# interleaved rb sequences
circuits_cnotdihedral_interleaved = [[] for e in range(nseeds)]
# initialization: purity rb sequences
circuits_purity = [[[] for d in range(npurity)]
for e in range(nseeds)]
# go through for each seed
for seed in range(nseeds):
qr = qiskit.QuantumRegister(n_q_max+1, 'qr')
cr = qiskit.ClassicalRegister(len(qlist_flat), 'cr')
general_circ = qiskit.QuantumCircuit(qr, cr)
interleaved_circ = qiskit.QuantumCircuit(qr, cr)
# make sequences for each of the separate sequences in
# rb_pattern
Elmnts = []
for rb_q_num in pattern_sizes:
Elmnts.append(Ggroup(rb_q_num))
# Sequences for interleaved rb sequences
Elmnts_interleaved = []
for rb_q_num in pattern_sizes:
Elmnts_interleaved.append(Ggroup(rb_q_num))
# go through and add elements to RB sequences
length_index = 0
for elmnts_index in range(length_vector[-1]):
for (rb_pattern_index, rb_q_num) in enumerate(pattern_sizes):
for _ in range(length_multiplier[rb_pattern_index]):
new_elmnt_gatelist = Gutils.random_gates(
rb_q_num)
Elmnts[rb_pattern_index] = Gutils.compose_gates(
Elmnts[rb_pattern_index], new_elmnt_gatelist)
general_circ += replace_q_indices(
get_quantum_circuit(Gutils.gatelist(),
rb_q_num),
rb_pattern[rb_pattern_index], qr)
# add a barrier
general_circ.barrier(
*[qr[x] for x in rb_pattern[rb_pattern_index]])
# interleaved rb sequences
if interleaved_gates is not None:
Elmnts_interleaved[rb_pattern_index] = \
Gutils.compose_gates(
Elmnts_interleaved[rb_pattern_index],
new_elmnt_gatelist)
interleaved_circ += replace_q_indices(
get_quantum_circuit(Gutils.gatelist(),
rb_q_num),
rb_pattern[rb_pattern_index], qr)
Elmnts_interleaved[rb_pattern_index] = \
Gutils.compose_gates(
Elmnts_interleaved[rb_pattern_index],
interleaved_gates[rb_pattern_index])
# add a barrier - interleaved rb
interleaved_circ.barrier(
*[qr[x] for x in rb_pattern[rb_pattern_index]])
interleaved_circ += replace_q_indices(
get_quantum_circuit(Gutils.gatelist(),
rb_q_num),
rb_pattern[rb_pattern_index], qr)
# add a barrier - interleaved rb
interleaved_circ.barrier(
*[qr[x] for x in rb_pattern[rb_pattern_index]])
if align_cliffs:
# if align at a barrier across all patterns
general_circ.barrier(
*[qr[x] for x in qlist_flat])
# align for interleaved rb
if interleaved_gates is not None:
interleaved_circ.barrier(
*[qr[x] for x in qlist_flat])
# if the number of elements matches one of the sequence lengths
# then calculate the inverse and produce the circuit
if (elmnts_index+1) == length_vector[length_index]:
# circ for rb:
circ = qiskit.QuantumCircuit(qr, cr)
circ += general_circ
# circ_interleaved for interleaved rb:
circ_interleaved = qiskit.QuantumCircuit(qr, cr)
circ_interleaved += interleaved_circ
for (rb_pattern_index, rb_q_num) in enumerate(pattern_sizes):
inv_key = Gutils.find_key(Elmnts[rb_pattern_index],
rb_q_num)
inv_circuit = Gutils.find_inverse_gates(
rb_q_num,
group_tables[rb_q_num-1][inv_key])
circ += replace_q_indices(
get_quantum_circuit(inv_circuit, rb_q_num),
rb_pattern[rb_pattern_index], qr)
# calculate the inverse and produce the circuit
# for interleaved rb
if interleaved_gates is not None:
inv_key = Gutils.find_key(Elmnts_interleaved
[rb_pattern_index],
rb_q_num)
inv_circuit = Gutils.find_inverse_gates(
rb_q_num,
group_tables[rb_q_num - 1][inv_key])
circ_interleaved += replace_q_indices(
get_quantum_circuit(inv_circuit, rb_q_num),
rb_pattern[rb_pattern_index], qr)
# Circuits for purity rb
if is_purity:
circ_purity = [[] for d in range(npurity)]
for d in range(npurity):
circ_purity[d] = qiskit.QuantumCircuit(qr, cr)
circ_purity[d] += circ
circ_purity[d].name = rb_circ_type + '_purity_'
ind_d = d
purity_qubit_num = 0
while True:
# Per each qubit:
# do nothing or rx(pi/2) or ry(pi/2)
purity_qubit_rot = np.mod(ind_d, 3)
ind_d = np.floor_divide(ind_d, 3)
if purity_qubit_rot == 0: # do nothing
circ_purity[d].name += 'Z'
if purity_qubit_rot == 1: # add rx(pi/2)
for pat in rb_pattern:
circ_purity[d].rx(np.pi / 2,
qr[pat[
purity_qubit_num]])
circ_purity[d].name += 'X'
if purity_qubit_rot == 2: # add ry(pi/2)
for pat in rb_pattern:
circ_purity[d].ry(np.pi / 2,
qr[pat[
purity_qubit_num]])
circ_purity[d].name += 'Y'
purity_qubit_num = purity_qubit_num + 1
if ind_d == 0:
break
# padding the circuit name with Z's so that
# all circuits will have names of the same length
for _ in range(max_dim - purity_qubit_num):
circ_purity[d].name += 'Z'
# add measurement for purity rb
for qind, qb in enumerate(qlist_flat):
circ_purity[d].measure(qr[qb], cr[qind])
circ_purity[d].name += '_length_%d_seed_%d' \
% (length_index,
seed + seed_offset)
# add measurement for Non-Clifford cnot-dihedral rb
# measure both the ground state |0...0> (circ)
# and the |+...+> state (cnot-dihedral_circ)
cnotdihedral_circ = qiskit.QuantumCircuit(qr, cr)
cnotdihedral_interleaved_circ = qiskit.QuantumCircuit(qr, cr)
if group_gates_type == 1:
for _, qb in enumerate(qlist_flat):
cnotdihedral_circ.h(qr[qb])
cnotdihedral_circ.barrier(qr[qb])
cnotdihedral_interleaved_circ.h(qr[qb])
cnotdihedral_interleaved_circ.barrier(qr[qb])
cnotdihedral_circ += circ
cnotdihedral_interleaved_circ += circ_interleaved
for _, qb in enumerate(qlist_flat):
cnotdihedral_circ.barrier(qr[qb])
cnotdihedral_circ.h(qr[qb])
cnotdihedral_interleaved_circ.barrier(qr[qb])
cnotdihedral_interleaved_circ.h(qr[qb])
for qind, qb in enumerate(qlist_flat):
cnotdihedral_circ.measure(qr[qb], cr[qind])
cnotdihedral_interleaved_circ.measure(qr[qb], cr[qind])
# add measurement for standard rb
# qubits measure to the c registers as
# they appear in the pattern
for qind, qb in enumerate(qlist_flat):
circ.measure(qr[qb], cr[qind])
# add measurement for interleaved rb
circ_interleaved.measure(qr[qb], cr[qind])
circ.name = \
rb_circ_type + '_length_%d_seed_%d' % \
(length_index, seed + seed_offset)
circ_interleaved.name = \
rb_circ_type + '_interleaved_length_%d_seed_%d' % \
(length_index, seed + seed_offset)
if group_gates_type == 1:
circ.name = rb_circ_type + '_Z_length_%d_seed_%d' % \
(length_index, seed + seed_offset)
circ_interleaved.name = \
rb_circ_type + '_interleaved_Z_length_%d_seed_%d' % \
(length_index, seed + seed_offset)
cnotdihedral_circ.name = \
rb_circ_type + '_X_length_%d_seed_%d' % \
(length_index, seed + seed_offset)
cnotdihedral_interleaved_circ.name = \
rb_circ_type + 'interleaved_X_length_%d_seed_%d' % \
(length_index, seed + seed_offset)
circuits[seed].append(circ)
circuits_interleaved[seed].append(circ_interleaved)
circuits_cnotdihedral[seed].append(cnotdihedral_circ)
circuits_cnotdihedral_interleaved[seed].append(
cnotdihedral_interleaved_circ)
if is_purity:
for d in range(npurity):
circuits_purity[seed][d].append(circ_purity[d])
length_index += 1
# output of purity rb
if is_purity:
return circuits_purity, xdata, npurity
# output of non-clifford cnot-dihedral interleaved rb
if interleaved_gates is not None and group_gates_type == 1:
return circuits, xdata, circuits_cnotdihedral, circuits_interleaved, \
circuits_cnotdihedral_interleaved
# output of interleaved rb
if interleaved_gates is not None:
return circuits, xdata, circuits_interleaved
# output of Non-Clifford cnot-dihedral rb
if group_gates_type == 1:
return circuits, xdata, circuits_cnotdihedral
# output of standard (simultaneous) rb
return circuits, xdata
def replace_q_indices(circuit, q_nums, qr):
"""
Take a circuit that is ordered from 0,1,2 qubits and replace 0 with the
qubit label in the first index of q_nums, 1 with the second index...
Args:
circuit: circuit to operate on
q_nums: list of qubit indices
Returns:
updated circuit
"""
new_circuit = qiskit.QuantumCircuit(qr)
for instr, qargs, cargs in circuit.data:
new_qargs = [
qr[q_nums[x]] for x in [arg.index for arg in qargs]]
new_op = copy.deepcopy((instr, new_qargs, cargs))
new_circuit.data.append(new_op)
return new_circuit
def get_quantum_circuit(gatelist, num_qubits):
"""
Returns the circuit in the form of a QuantumCircuit object.
Args:
num_qubits: the number of qubits (dimension).
gatelist: a list of gates.
Returns:
A QuantumCircuit object.
"""
qr = qiskit.QuantumRegister(num_qubits)
qc = qiskit.QuantumCircuit(qr)
for op in gatelist:
split = op.split()
op_names = [split[0]]
# temporary correcting the ops name since QuantumCircuit has no
# attributes 'v' or 'w' yet:
if op_names == ['v']:
op_names = ['sdg', 'h']
elif op_names == ['w']:
op_names = ['h', 's']
if op_names == ['u1']:
qubits = [qr[int(x)] for x in split[2:]]
theta = float(split[1])
else:
qubits = [qr[int(x)] for x in split[1:]]
for sub_op in op_names:
operation = eval('qiskit.QuantumCircuit.' + sub_op)
if sub_op == 'u1':
operation(qc, theta, *qubits)
else:
operation(qc, *qubits)
return qc
| 42.217973
| 79
| 0.550634
|
import copy
import numpy as np
import qiskit
from .Clifford import Clifford
from .clifford_utils import CliffordUtils as clutils
from .dihedral import CNOTDihedral
from .dihedral_utils import DihedralUtils as dutils
def handle_length_multiplier(length_multiplier, len_pattern,
is_purity=False):
if hasattr(length_multiplier, "__len__"):
if is_purity:
raise ValueError(
"In case of Purity RB the length multiplier should be None")
if len(length_multiplier) != len_pattern:
raise ValueError(
"Length mulitiplier must be the same length as the pattern")
length_multiplier = np.array(length_multiplier)
if length_multiplier.dtype != 'int' or (length_multiplier < 1).any():
raise ValueError("Invalid length multiplier")
else:
length_multiplier = np.ones(len_pattern, dtype='int')*length_multiplier
return length_multiplier
def check_pattern(pattern, is_purity=False):
pattern_flat = []
pattern_dim = []
for pat in pattern:
pattern_flat.extend(pat)
pattern_dim.append(len(pat))
_, uni_counts = np.unique(np.array(pattern_flat), return_counts=True)
if (uni_counts > 1).any():
raise ValueError("Invalid pattern. Duplicate qubit index.")
dim_distinct = np.unique(pattern_dim)
if is_purity:
if len(dim_distinct) > 1:
raise ValueError("Invalid pattern for purity RB. \
All simultaneous sequences should have the \
same dimension.")
return pattern_flat, np.max(pattern_flat).item(), np.max(pattern_dim)
def calc_xdata(length_vector, length_multiplier):
xdata = []
for mult in length_multiplier:
xdata.append(np.array(length_vector)*mult)
return np.array(xdata)
def randomized_benchmarking_seq(nseeds=1, length_vector=None,
rb_pattern=None,
length_multiplier=1, seed_offset=0,
align_cliffs=False,
interleaved_gates=None,
is_purity=False,
group_gates=None):
if group_gates is None or group_gates in ('0',
'Clifford',
'clifford'):
Gutils = clutils()
Ggroup = Clifford
rb_circ_type = 'rb'
group_gates_type = 0
elif group_gates in ('1', 'Non-Clifford',
'NonClifford'
'CNOTDihedral',
'CNOT-Dihedral'):
Gutils = dutils()
Ggroup = CNOTDihedral
rb_circ_type = 'rb_cnotdihedral'
group_gates_type = 1
else:
raise ValueError("Unknown group or set of gates.")
if rb_pattern is None:
rb_pattern = [[0]]
if length_vector is None:
length_vector = [1, 10, 20]
qlist_flat, n_q_max, max_dim = check_pattern(rb_pattern, is_purity)
length_multiplier = handle_length_multiplier(length_multiplier,
len(rb_pattern),
is_purity)
npurity = 3**max_dim
xdata = calc_xdata(length_vector, length_multiplier)
pattern_sizes = [len(pat) for pat in rb_pattern]
max_nrb = np.max(pattern_sizes)
group_tables = [[] for _ in range(max_nrb)]
for rb_num in range(max_nrb):
group_tables[rb_num] = Gutils.load_tables(rb_num+1)
circuits = [[] for e in range(nseeds)]
circuits_interleaved = [[] for e in range(nseeds)]
circuits_cnotdihedral = [[] for e in range(nseeds)]
circuits_cnotdihedral_interleaved = [[] for e in range(nseeds)]
circuits_purity = [[[] for d in range(npurity)]
for e in range(nseeds)]
for seed in range(nseeds):
qr = qiskit.QuantumRegister(n_q_max+1, 'qr')
cr = qiskit.ClassicalRegister(len(qlist_flat), 'cr')
general_circ = qiskit.QuantumCircuit(qr, cr)
interleaved_circ = qiskit.QuantumCircuit(qr, cr)
Elmnts = []
for rb_q_num in pattern_sizes:
Elmnts.append(Ggroup(rb_q_num))
Elmnts_interleaved = []
for rb_q_num in pattern_sizes:
Elmnts_interleaved.append(Ggroup(rb_q_num))
length_index = 0
for elmnts_index in range(length_vector[-1]):
for (rb_pattern_index, rb_q_num) in enumerate(pattern_sizes):
for _ in range(length_multiplier[rb_pattern_index]):
new_elmnt_gatelist = Gutils.random_gates(
rb_q_num)
Elmnts[rb_pattern_index] = Gutils.compose_gates(
Elmnts[rb_pattern_index], new_elmnt_gatelist)
general_circ += replace_q_indices(
get_quantum_circuit(Gutils.gatelist(),
rb_q_num),
rb_pattern[rb_pattern_index], qr)
general_circ.barrier(
*[qr[x] for x in rb_pattern[rb_pattern_index]])
if interleaved_gates is not None:
Elmnts_interleaved[rb_pattern_index] = \
Gutils.compose_gates(
Elmnts_interleaved[rb_pattern_index],
new_elmnt_gatelist)
interleaved_circ += replace_q_indices(
get_quantum_circuit(Gutils.gatelist(),
rb_q_num),
rb_pattern[rb_pattern_index], qr)
Elmnts_interleaved[rb_pattern_index] = \
Gutils.compose_gates(
Elmnts_interleaved[rb_pattern_index],
interleaved_gates[rb_pattern_index])
interleaved_circ.barrier(
*[qr[x] for x in rb_pattern[rb_pattern_index]])
interleaved_circ += replace_q_indices(
get_quantum_circuit(Gutils.gatelist(),
rb_q_num),
rb_pattern[rb_pattern_index], qr)
interleaved_circ.barrier(
*[qr[x] for x in rb_pattern[rb_pattern_index]])
if align_cliffs:
general_circ.barrier(
*[qr[x] for x in qlist_flat])
if interleaved_gates is not None:
interleaved_circ.barrier(
*[qr[x] for x in qlist_flat])
if (elmnts_index+1) == length_vector[length_index]:
circ = qiskit.QuantumCircuit(qr, cr)
circ += general_circ
circ_interleaved = qiskit.QuantumCircuit(qr, cr)
circ_interleaved += interleaved_circ
for (rb_pattern_index, rb_q_num) in enumerate(pattern_sizes):
inv_key = Gutils.find_key(Elmnts[rb_pattern_index],
rb_q_num)
inv_circuit = Gutils.find_inverse_gates(
rb_q_num,
group_tables[rb_q_num-1][inv_key])
circ += replace_q_indices(
get_quantum_circuit(inv_circuit, rb_q_num),
rb_pattern[rb_pattern_index], qr)
if interleaved_gates is not None:
inv_key = Gutils.find_key(Elmnts_interleaved
[rb_pattern_index],
rb_q_num)
inv_circuit = Gutils.find_inverse_gates(
rb_q_num,
group_tables[rb_q_num - 1][inv_key])
circ_interleaved += replace_q_indices(
get_quantum_circuit(inv_circuit, rb_q_num),
rb_pattern[rb_pattern_index], qr)
if is_purity:
circ_purity = [[] for d in range(npurity)]
for d in range(npurity):
circ_purity[d] = qiskit.QuantumCircuit(qr, cr)
circ_purity[d] += circ
circ_purity[d].name = rb_circ_type + '_purity_'
ind_d = d
purity_qubit_num = 0
while True:
purity_qubit_rot = np.mod(ind_d, 3)
ind_d = np.floor_divide(ind_d, 3)
if purity_qubit_rot == 0:
circ_purity[d].name += 'Z'
if purity_qubit_rot == 1:
for pat in rb_pattern:
circ_purity[d].rx(np.pi / 2,
qr[pat[
purity_qubit_num]])
circ_purity[d].name += 'X'
if purity_qubit_rot == 2:
for pat in rb_pattern:
circ_purity[d].ry(np.pi / 2,
qr[pat[
purity_qubit_num]])
circ_purity[d].name += 'Y'
purity_qubit_num = purity_qubit_num + 1
if ind_d == 0:
break
# all circuits will have names of the same length
for _ in range(max_dim - purity_qubit_num):
circ_purity[d].name += 'Z'
# add measurement for purity rb
for qind, qb in enumerate(qlist_flat):
circ_purity[d].measure(qr[qb], cr[qind])
circ_purity[d].name += '_length_%d_seed_%d' \
% (length_index,
seed + seed_offset)
# add measurement for Non-Clifford cnot-dihedral rb
# measure both the ground state |0...0> (circ)
# and the |+...+> state (cnot-dihedral_circ)
cnotdihedral_circ = qiskit.QuantumCircuit(qr, cr)
cnotdihedral_interleaved_circ = qiskit.QuantumCircuit(qr, cr)
if group_gates_type == 1:
for _, qb in enumerate(qlist_flat):
cnotdihedral_circ.h(qr[qb])
cnotdihedral_circ.barrier(qr[qb])
cnotdihedral_interleaved_circ.h(qr[qb])
cnotdihedral_interleaved_circ.barrier(qr[qb])
cnotdihedral_circ += circ
cnotdihedral_interleaved_circ += circ_interleaved
for _, qb in enumerate(qlist_flat):
cnotdihedral_circ.barrier(qr[qb])
cnotdihedral_circ.h(qr[qb])
cnotdihedral_interleaved_circ.barrier(qr[qb])
cnotdihedral_interleaved_circ.h(qr[qb])
for qind, qb in enumerate(qlist_flat):
cnotdihedral_circ.measure(qr[qb], cr[qind])
cnotdihedral_interleaved_circ.measure(qr[qb], cr[qind])
# add measurement for standard rb
# qubits measure to the c registers as
# they appear in the pattern
for qind, qb in enumerate(qlist_flat):
circ.measure(qr[qb], cr[qind])
# add measurement for interleaved rb
circ_interleaved.measure(qr[qb], cr[qind])
circ.name = \
rb_circ_type + '_length_%d_seed_%d' % \
(length_index, seed + seed_offset)
circ_interleaved.name = \
rb_circ_type + '_interleaved_length_%d_seed_%d' % \
(length_index, seed + seed_offset)
if group_gates_type == 1:
circ.name = rb_circ_type + '_Z_length_%d_seed_%d' % \
(length_index, seed + seed_offset)
circ_interleaved.name = \
rb_circ_type + '_interleaved_Z_length_%d_seed_%d' % \
(length_index, seed + seed_offset)
cnotdihedral_circ.name = \
rb_circ_type + '_X_length_%d_seed_%d' % \
(length_index, seed + seed_offset)
cnotdihedral_interleaved_circ.name = \
rb_circ_type + 'interleaved_X_length_%d_seed_%d' % \
(length_index, seed + seed_offset)
circuits[seed].append(circ)
circuits_interleaved[seed].append(circ_interleaved)
circuits_cnotdihedral[seed].append(cnotdihedral_circ)
circuits_cnotdihedral_interleaved[seed].append(
cnotdihedral_interleaved_circ)
if is_purity:
for d in range(npurity):
circuits_purity[seed][d].append(circ_purity[d])
length_index += 1
# output of purity rb
if is_purity:
return circuits_purity, xdata, npurity
# output of non-clifford cnot-dihedral interleaved rb
if interleaved_gates is not None and group_gates_type == 1:
return circuits, xdata, circuits_cnotdihedral, circuits_interleaved, \
circuits_cnotdihedral_interleaved
# output of interleaved rb
if interleaved_gates is not None:
return circuits, xdata, circuits_interleaved
# output of Non-Clifford cnot-dihedral rb
if group_gates_type == 1:
return circuits, xdata, circuits_cnotdihedral
# output of standard (simultaneous) rb
return circuits, xdata
def replace_q_indices(circuit, q_nums, qr):
new_circuit = qiskit.QuantumCircuit(qr)
for instr, qargs, cargs in circuit.data:
new_qargs = [
qr[q_nums[x]] for x in [arg.index for arg in qargs]]
new_op = copy.deepcopy((instr, new_qargs, cargs))
new_circuit.data.append(new_op)
return new_circuit
def get_quantum_circuit(gatelist, num_qubits):
qr = qiskit.QuantumRegister(num_qubits)
qc = qiskit.QuantumCircuit(qr)
for op in gatelist:
split = op.split()
op_names = [split[0]]
# temporary correcting the ops name since QuantumCircuit has no
# attributes 'v' or 'w' yet:
if op_names == ['v']:
op_names = ['sdg', 'h']
elif op_names == ['w']:
op_names = ['h', 's']
if op_names == ['u1']:
qubits = [qr[int(x)] for x in split[2:]]
theta = float(split[1])
else:
qubits = [qr[int(x)] for x in split[1:]]
for sub_op in op_names:
operation = eval('qiskit.QuantumCircuit.' + sub_op)
if sub_op == 'u1':
operation(qc, theta, *qubits)
else:
operation(qc, *qubits)
return qc
| true
| true
|
1c4748aa711a339da4d0853a24e1a562118a999c
| 1,347
|
py
|
Python
|
bokchoy/utils/log.py
|
ulule/bokchoy
|
58afaf325ce275edf5c4a955379afb1cc5eb5de3
|
[
"MIT"
] | null | null | null |
bokchoy/utils/log.py
|
ulule/bokchoy
|
58afaf325ce275edf5c4a955379afb1cc5eb5de3
|
[
"MIT"
] | null | null | null |
bokchoy/utils/log.py
|
ulule/bokchoy
|
58afaf325ce275edf5c4a955379afb1cc5eb5de3
|
[
"MIT"
] | null | null | null |
import six
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
def logger_isa(l, p, max=1000):
this, seen = l, set()
for _ in range(max):
if this == p:
return True
else:
if this in seen:
raise RuntimeError(
'Logger {0!r} parents recursive'.format(l),
)
seen.add(this)
this = this.parent
if not this:
break
else: # pragma: no cover
raise RuntimeError('Logger hierarchy exceeds {0}'.format(max))
return False
def _get_logger(logger):
if isinstance(logger, six.string_types):
logger = logging.getLogger(logger)
if not logger.handlers:
logger.addHandler(NullHandler())
return logger
def get_logger(name):
l = _get_logger(name)
if logging.root not in (l, l.parent) and l is not base_logger:
if not logger_isa(l, base_logger): # pragma: no cover
l.parent = base_logger
return l
base_logger = logger = _get_logger('bokchoy')
task_logger = get_logger('bokchoy.task')
worker_logger = get_logger('bokchoy.worker')
def get_task_logger(name):
logger = get_logger(name)
if not logger_isa(logger, task_logger):
logger.parent = task_logger
return logger
| 22.081967
| 70
| 0.603563
|
import six
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
def logger_isa(l, p, max=1000):
this, seen = l, set()
for _ in range(max):
if this == p:
return True
else:
if this in seen:
raise RuntimeError(
'Logger {0!r} parents recursive'.format(l),
)
seen.add(this)
this = this.parent
if not this:
break
else:
raise RuntimeError('Logger hierarchy exceeds {0}'.format(max))
return False
def _get_logger(logger):
if isinstance(logger, six.string_types):
logger = logging.getLogger(logger)
if not logger.handlers:
logger.addHandler(NullHandler())
return logger
def get_logger(name):
l = _get_logger(name)
if logging.root not in (l, l.parent) and l is not base_logger:
if not logger_isa(l, base_logger):
l.parent = base_logger
return l
base_logger = logger = _get_logger('bokchoy')
task_logger = get_logger('bokchoy.task')
worker_logger = get_logger('bokchoy.worker')
def get_task_logger(name):
logger = get_logger(name)
if not logger_isa(logger, task_logger):
logger.parent = task_logger
return logger
| true
| true
|
1c474bb0722209c98d256697379ddc9a21064447
| 14,683
|
py
|
Python
|
salt/cloud/clouds/vultrpy.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | 1
|
2020-03-31T22:51:16.000Z
|
2020-03-31T22:51:16.000Z
|
salt/cloud/clouds/vultrpy.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
salt/cloud/clouds/vultrpy.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-09-30T07:00:01.000Z
|
2021-09-30T07:00:01.000Z
|
# -*- coding: utf-8 -*-
'''
Vultr Cloud Module using python-vultr bindings
==============================================
.. versionadded:: 2016.3.0
The Vultr cloud module is used to control access to the Vultr VPS system.
Use of this module only requires the ``api_key`` parameter.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or
``/etc/salt/cloud.providers.d/vultr.conf``:
.. code-block:: yaml
my-vultr-config:
# Vultr account api key
api_key: <supersecretapi_key>
driver: vultr
Set up the cloud profile at ``/etc/salt/cloud.profiles`` or
``/etc/salt/cloud.profiles.d/vultr.conf``:
.. code-block:: yaml
nyc-4gb-4cpu-ubuntu-14-04:
location: 1
provider: my-vultr-config
image: 160
size: 95
enable_private_network: True
This driver also supports Vultr's `startup script` feature. You can list startup
scripts in your account with
.. code-block:: bash
salt-cloud -f list_scripts <name of vultr provider>
That list will include the IDs of the scripts in your account. Thus, if you
have a script called 'setup-networking' with an ID of 493234 you can specify
that startup script in a profile like so:
.. code-block:: yaml
nyc-2gb-1cpu-ubuntu-17-04:
location: 1
provider: my-vultr-config
image: 223
size: 13
startup_script_id: 493234
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import pprint
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves.urllib.parse import urlencode as _urlencode # pylint: disable=E0611
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = 'vultr'
DETAILS = {}
def __virtual__():
'''
Set up the Vultr functions and check for configurations
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or 'vultr',
('api_key',)
)
def _cache_provider_details(conn=None):
'''
Provide a place to hang onto results of --list-[locations|sizes|images]
so we don't have to go out to the API and get them every time.
'''
DETAILS['avail_locations'] = {}
DETAILS['avail_sizes'] = {}
DETAILS['avail_images'] = {}
locations = avail_locations(conn)
images = avail_images(conn)
sizes = avail_sizes(conn)
for key, location in six.iteritems(locations):
DETAILS['avail_locations'][location['name']] = location
DETAILS['avail_locations'][key] = location
for key, image in six.iteritems(images):
DETAILS['avail_images'][image['name']] = image
DETAILS['avail_images'][key] = image
for key, vm_size in six.iteritems(sizes):
DETAILS['avail_sizes'][vm_size['name']] = vm_size
DETAILS['avail_sizes'][key] = vm_size
def avail_locations(conn=None):
'''
return available datacenter locations
'''
return _query('regions/list')
def avail_scripts(conn=None):
'''
return available startup scripts
'''
return _query('startupscript/list')
def list_scripts(conn=None, call=None):
'''
return list of Startup Scripts
'''
return avail_scripts()
def avail_sizes(conn=None):
'''
Return available sizes ("plans" in VultrSpeak)
'''
return _query('plans/list')
def avail_images(conn=None):
'''
Return available images
'''
return _query('os/list')
def list_nodes(**kwargs):
'''
Return basic data on nodes
'''
ret = {}
nodes = list_nodes_full()
for node in nodes:
ret[node] = {}
for prop in 'id', 'image', 'size', 'state', 'private_ips', 'public_ips':
ret[node][prop] = nodes[node][prop]
return ret
def list_nodes_full(**kwargs):
'''
Return all data on nodes
'''
nodes = _query('server/list')
ret = {}
for node in nodes:
name = nodes[node]['label']
ret[name] = nodes[node].copy()
ret[name]['id'] = node
ret[name]['image'] = nodes[node]['os']
ret[name]['size'] = nodes[node]['VPSPLANID']
ret[name]['state'] = nodes[node]['status']
ret[name]['private_ips'] = nodes[node]['internal_ip']
ret[name]['public_ips'] = nodes[node]['main_ip']
return ret
def list_nodes_select(conn=None, call=None):
'''
Return a list of the VMs that are on the provider, with select fields
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def destroy(name):
'''
Remove a node from Vultr
'''
node = show_instance(name, call='action')
params = {'SUBID': node['SUBID']}
result = _query('server/destroy', method='POST', decode=False, data=_urlencode(params))
# The return of a destroy call is empty in the case of a success.
# Errors are only indicated via HTTP status code. Status code 200
# effetively therefore means "success".
if result.get('body') == '' and result.get('text') == '':
return True
return result
def stop(*args, **kwargs):
'''
Execute a "stop" action on a VM
'''
return _query('server/halt')
def start(*args, **kwargs):
'''
Execute a "start" action on a VM
'''
return _query('server/start')
def show_instance(name, call=None):
'''
Show the details from the provider concerning an instance
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_instance action must be called with -a or --action.'
)
nodes = list_nodes_full()
# Find under which cloud service the name is listed, if any
if name not in nodes:
return {}
__utils__['cloud.cache_node'](nodes[name], __active_provider_name__, __opts__)
return nodes[name]
def _lookup_vultrid(which_key, availkey, keyname):
'''
Helper function to retrieve a Vultr ID
'''
if DETAILS == {}:
_cache_provider_details()
which_key = six.text_type(which_key)
try:
return DETAILS[availkey][which_key][keyname]
except KeyError:
return False
def create(vm_):
'''
Create a single VM from a data dict
'''
if 'driver' not in vm_:
vm_['driver'] = vm_['provider']
private_networking = config.get_cloud_config_value(
'enable_private_network', vm_, __opts__, search_global=False, default=False,
)
startup_script = config.get_cloud_config_value(
'startup_script_id', vm_, __opts__, search_global=False, default=None,
)
if startup_script and str(startup_script) not in avail_scripts():
log.error('Your Vultr account does not have a startup script with ID %s', str(startup_script))
return False
if private_networking is not None:
if not isinstance(private_networking, bool):
raise SaltCloudConfigError("'private_networking' should be a boolean value.")
if private_networking is True:
enable_private_network = 'yes'
else:
enable_private_network = 'no'
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
osid = _lookup_vultrid(vm_['image'], 'avail_images', 'OSID')
if not osid:
log.error('Vultr does not have an image with id or name %s', vm_['image'])
return False
vpsplanid = _lookup_vultrid(vm_['size'], 'avail_sizes', 'VPSPLANID')
if not vpsplanid:
log.error('Vultr does not have a size with id or name %s', vm_['size'])
return False
dcid = _lookup_vultrid(vm_['location'], 'avail_locations', 'DCID')
if not dcid:
log.error('Vultr does not have a location with id or name %s', vm_['location'])
return False
kwargs = {
'label': vm_['name'],
'OSID': osid,
'VPSPLANID': vpsplanid,
'DCID': dcid,
'hostname': vm_['name'],
'enable_private_network': enable_private_network,
}
if startup_script:
kwargs['SCRIPTID'] = startup_script
log.info('Creating Cloud VM %s', vm_['name'])
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args={
'kwargs': __utils__['cloud.filter_event']('requesting', kwargs, list(kwargs)),
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport'],
)
try:
data = _query('server/create', method='POST', data=_urlencode(kwargs))
if int(data.get('status', '200')) >= 300:
log.error(
'Error creating %s on Vultr\n\n'
'Vultr API returned %s\n', vm_['name'], data
)
log.error('Status 412 may mean that you are requesting an\n'
'invalid location, image, or size.')
__utils__['cloud.fire_event'](
'event',
'instance request failed',
'salt/cloud/{0}/requesting/failed'.format(vm_['name']),
args={'kwargs': kwargs},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport'],
)
return False
except Exception as exc: # pylint: disable=broad-except
log.error(
'Error creating %s on Vultr\n\n'
'The following exception was thrown when trying to '
'run the initial deployment:\n%s',
vm_['name'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
__utils__['cloud.fire_event'](
'event',
'instance request failed',
'salt/cloud/{0}/requesting/failed'.format(vm_['name']),
args={'kwargs': kwargs},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport'],
)
return False
def wait_for_hostname():
'''
Wait for the IP address to become available
'''
data = show_instance(vm_['name'], call='action')
main_ip = six.text_type(data.get('main_ip', '0'))
if main_ip.startswith('0'):
time.sleep(3)
return False
return data['main_ip']
def wait_for_default_password():
'''
Wait for the IP address to become available
'''
data = show_instance(vm_['name'], call='action')
# print("Waiting for default password")
# pprint.pprint(data)
if six.text_type(data.get('default_password', '')) == '':
time.sleep(1)
return False
return data['default_password']
def wait_for_status():
'''
Wait for the IP address to become available
'''
data = show_instance(vm_['name'], call='action')
# print("Waiting for status normal")
# pprint.pprint(data)
if six.text_type(data.get('status', '')) != 'active':
time.sleep(1)
return False
return data['default_password']
def wait_for_server_state():
'''
Wait for the IP address to become available
'''
data = show_instance(vm_['name'], call='action')
# print("Waiting for server state ok")
# pprint.pprint(data)
if six.text_type(data.get('server_state', '')) != 'ok':
time.sleep(1)
return False
return data['default_password']
vm_['ssh_host'] = __utils__['cloud.wait_for_fun'](
wait_for_hostname,
timeout=config.get_cloud_config_value(
'wait_for_fun_timeout', vm_, __opts__, default=15 * 60),
)
vm_['password'] = __utils__['cloud.wait_for_fun'](
wait_for_default_password,
timeout=config.get_cloud_config_value(
'wait_for_fun_timeout', vm_, __opts__, default=15 * 60),
)
__utils__['cloud.wait_for_fun'](
wait_for_status,
timeout=config.get_cloud_config_value(
'wait_for_fun_timeout', vm_, __opts__, default=15 * 60),
)
__utils__['cloud.wait_for_fun'](
wait_for_server_state,
timeout=config.get_cloud_config_value(
'wait_for_fun_timeout', vm_, __opts__, default=15 * 60),
)
__opts__['hard_timeout'] = config.get_cloud_config_value(
'hard_timeout',
get_configured_provider(),
__opts__,
search_global=False,
default=None,
)
# Bootstrap
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(show_instance(vm_['name'], call='action'))
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data)
)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _query(path, method='GET', data=None, params=None, header_dict=None, decode=True):
'''
Perform a query directly against the Vultr REST API
'''
api_key = config.get_cloud_config_value(
'api_key',
get_configured_provider(),
__opts__,
search_global=False,
)
management_host = config.get_cloud_config_value(
'management_host',
get_configured_provider(),
__opts__,
search_global=False,
default='api.vultr.com'
)
url = 'https://{management_host}/v1/{path}?api_key={api_key}'.format(
management_host=management_host,
path=path,
api_key=api_key,
)
if header_dict is None:
header_dict = {}
result = __utils__['http.query'](
url,
method=method,
params=params,
data=data,
header_dict=header_dict,
port=443,
text=True,
decode=decode,
decode_type='json',
hide_fields=['api_key'],
opts=__opts__,
)
if 'dict' in result:
return result['dict']
return result
| 28.236538
| 105
| 0.60914
|
from __future__ import absolute_import, print_function, unicode_literals
import pprint
import logging
import time
import salt.config as config
from salt.ext import six
from salt.ext.six.moves.urllib.parse import urlencode as _urlencode
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudSystemExit
)
log = logging.getLogger(__name__)
__virtualname__ = 'vultr'
DETAILS = {}
def __virtual__():
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
return config.is_provider_configured(
__opts__,
__active_provider_name__ or 'vultr',
('api_key',)
)
def _cache_provider_details(conn=None):
DETAILS['avail_locations'] = {}
DETAILS['avail_sizes'] = {}
DETAILS['avail_images'] = {}
locations = avail_locations(conn)
images = avail_images(conn)
sizes = avail_sizes(conn)
for key, location in six.iteritems(locations):
DETAILS['avail_locations'][location['name']] = location
DETAILS['avail_locations'][key] = location
for key, image in six.iteritems(images):
DETAILS['avail_images'][image['name']] = image
DETAILS['avail_images'][key] = image
for key, vm_size in six.iteritems(sizes):
DETAILS['avail_sizes'][vm_size['name']] = vm_size
DETAILS['avail_sizes'][key] = vm_size
def avail_locations(conn=None):
return _query('regions/list')
def avail_scripts(conn=None):
return _query('startupscript/list')
def list_scripts(conn=None, call=None):
return avail_scripts()
def avail_sizes(conn=None):
return _query('plans/list')
def avail_images(conn=None):
return _query('os/list')
def list_nodes(**kwargs):
ret = {}
nodes = list_nodes_full()
for node in nodes:
ret[node] = {}
for prop in 'id', 'image', 'size', 'state', 'private_ips', 'public_ips':
ret[node][prop] = nodes[node][prop]
return ret
def list_nodes_full(**kwargs):
nodes = _query('server/list')
ret = {}
for node in nodes:
name = nodes[node]['label']
ret[name] = nodes[node].copy()
ret[name]['id'] = node
ret[name]['image'] = nodes[node]['os']
ret[name]['size'] = nodes[node]['VPSPLANID']
ret[name]['state'] = nodes[node]['status']
ret[name]['private_ips'] = nodes[node]['internal_ip']
ret[name]['public_ips'] = nodes[node]['main_ip']
return ret
def list_nodes_select(conn=None, call=None):
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def destroy(name):
node = show_instance(name, call='action')
params = {'SUBID': node['SUBID']}
result = _query('server/destroy', method='POST', decode=False, data=_urlencode(params))
if result.get('body') == '' and result.get('text') == '':
return True
return result
def stop(*args, **kwargs):
return _query('server/halt')
def start(*args, **kwargs):
return _query('server/start')
def show_instance(name, call=None):
if call != 'action':
raise SaltCloudSystemExit(
'The show_instance action must be called with -a or --action.'
)
nodes = list_nodes_full()
if name not in nodes:
return {}
__utils__['cloud.cache_node'](nodes[name], __active_provider_name__, __opts__)
return nodes[name]
def _lookup_vultrid(which_key, availkey, keyname):
if DETAILS == {}:
_cache_provider_details()
which_key = six.text_type(which_key)
try:
return DETAILS[availkey][which_key][keyname]
except KeyError:
return False
def create(vm_):
if 'driver' not in vm_:
vm_['driver'] = vm_['provider']
private_networking = config.get_cloud_config_value(
'enable_private_network', vm_, __opts__, search_global=False, default=False,
)
startup_script = config.get_cloud_config_value(
'startup_script_id', vm_, __opts__, search_global=False, default=None,
)
if startup_script and str(startup_script) not in avail_scripts():
log.error('Your Vultr account does not have a startup script with ID %s', str(startup_script))
return False
if private_networking is not None:
if not isinstance(private_networking, bool):
raise SaltCloudConfigError("'private_networking' should be a boolean value.")
if private_networking is True:
enable_private_network = 'yes'
else:
enable_private_network = 'no'
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
osid = _lookup_vultrid(vm_['image'], 'avail_images', 'OSID')
if not osid:
log.error('Vultr does not have an image with id or name %s', vm_['image'])
return False
vpsplanid = _lookup_vultrid(vm_['size'], 'avail_sizes', 'VPSPLANID')
if not vpsplanid:
log.error('Vultr does not have a size with id or name %s', vm_['size'])
return False
dcid = _lookup_vultrid(vm_['location'], 'avail_locations', 'DCID')
if not dcid:
log.error('Vultr does not have a location with id or name %s', vm_['location'])
return False
kwargs = {
'label': vm_['name'],
'OSID': osid,
'VPSPLANID': vpsplanid,
'DCID': dcid,
'hostname': vm_['name'],
'enable_private_network': enable_private_network,
}
if startup_script:
kwargs['SCRIPTID'] = startup_script
log.info('Creating Cloud VM %s', vm_['name'])
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args={
'kwargs': __utils__['cloud.filter_event']('requesting', kwargs, list(kwargs)),
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport'],
)
try:
data = _query('server/create', method='POST', data=_urlencode(kwargs))
if int(data.get('status', '200')) >= 300:
log.error(
'Error creating %s on Vultr\n\n'
'Vultr API returned %s\n', vm_['name'], data
)
log.error('Status 412 may mean that you are requesting an\n'
'invalid location, image, or size.')
__utils__['cloud.fire_event'](
'event',
'instance request failed',
'salt/cloud/{0}/requesting/failed'.format(vm_['name']),
args={'kwargs': kwargs},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport'],
)
return False
except Exception as exc:
log.error(
'Error creating %s on Vultr\n\n'
'The following exception was thrown when trying to '
'run the initial deployment:\n%s',
vm_['name'], exc,
exc_info_on_loglevel=logging.DEBUG
)
__utils__['cloud.fire_event'](
'event',
'instance request failed',
'salt/cloud/{0}/requesting/failed'.format(vm_['name']),
args={'kwargs': kwargs},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport'],
)
return False
def wait_for_hostname():
data = show_instance(vm_['name'], call='action')
main_ip = six.text_type(data.get('main_ip', '0'))
if main_ip.startswith('0'):
time.sleep(3)
return False
return data['main_ip']
def wait_for_default_password():
data = show_instance(vm_['name'], call='action')
if six.text_type(data.get('default_password', '')) == '':
time.sleep(1)
return False
return data['default_password']
def wait_for_status():
data = show_instance(vm_['name'], call='action')
if six.text_type(data.get('status', '')) != 'active':
time.sleep(1)
return False
return data['default_password']
def wait_for_server_state():
data = show_instance(vm_['name'], call='action')
if six.text_type(data.get('server_state', '')) != 'ok':
time.sleep(1)
return False
return data['default_password']
vm_['ssh_host'] = __utils__['cloud.wait_for_fun'](
wait_for_hostname,
timeout=config.get_cloud_config_value(
'wait_for_fun_timeout', vm_, __opts__, default=15 * 60),
)
vm_['password'] = __utils__['cloud.wait_for_fun'](
wait_for_default_password,
timeout=config.get_cloud_config_value(
'wait_for_fun_timeout', vm_, __opts__, default=15 * 60),
)
__utils__['cloud.wait_for_fun'](
wait_for_status,
timeout=config.get_cloud_config_value(
'wait_for_fun_timeout', vm_, __opts__, default=15 * 60),
)
__utils__['cloud.wait_for_fun'](
wait_for_server_state,
timeout=config.get_cloud_config_value(
'wait_for_fun_timeout', vm_, __opts__, default=15 * 60),
)
__opts__['hard_timeout'] = config.get_cloud_config_value(
'hard_timeout',
get_configured_provider(),
__opts__,
search_global=False,
default=None,
)
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(show_instance(vm_['name'], call='action'))
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data)
)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _query(path, method='GET', data=None, params=None, header_dict=None, decode=True):
api_key = config.get_cloud_config_value(
'api_key',
get_configured_provider(),
__opts__,
search_global=False,
)
management_host = config.get_cloud_config_value(
'management_host',
get_configured_provider(),
__opts__,
search_global=False,
default='api.vultr.com'
)
url = 'https://{management_host}/v1/{path}?api_key={api_key}'.format(
management_host=management_host,
path=path,
api_key=api_key,
)
if header_dict is None:
header_dict = {}
result = __utils__['http.query'](
url,
method=method,
params=params,
data=data,
header_dict=header_dict,
port=443,
text=True,
decode=decode,
decode_type='json',
hide_fields=['api_key'],
opts=__opts__,
)
if 'dict' in result:
return result['dict']
return result
| true
| true
|
1c474bcbae3af33fdc44d18a2aa1c4f0fe87dcdd
| 7,974
|
py
|
Python
|
scripts/process_perspective.py
|
dbckz/crossing-the-line
|
c5debb20e263e03eab9188ce7229753034939964
|
[
"MIT"
] | 1
|
2022-02-14T17:11:30.000Z
|
2022-02-14T17:11:30.000Z
|
scripts/process_perspective.py
|
dbckz/crossing-the-line
|
c5debb20e263e03eab9188ce7229753034939964
|
[
"MIT"
] | null | null | null |
scripts/process_perspective.py
|
dbckz/crossing-the-line
|
c5debb20e263e03eab9188ce7229753034939964
|
[
"MIT"
] | null | null | null |
"""
Script to evaluate tweets against the Perspective API
How it's used:
* Loads "tweets.csv" files according to 'root_path' and 'day_paths' vars
* Sends one tweet at a time to the API
* Sleeps for 1 second between requests due to API rate-limit
* Appends results to perspective_processed_tweets.csv after every 50 tweets, so that not all progress is lost if the
script were to die midway through processing a file
"""
import os
import time
import numpy as np
import pandas as pd
from googleapiclient import discovery
def get_perspective_client(api_key):
return discovery.build(
"commentanalyzer",
"v1alpha1",
developerKey=api_key,
discoveryServiceUrl="https://commentanalyzer.googleapis.com/$discovery/rest?version=v1alpha1",
static_discovery=False,
)
def query_perspective(client, text, tweet_id, logfile):
analyze_request = {
'comment': {
'text': text
},
'requestedAttributes': {
'TOXICITY': {},
'SEVERE_TOXICITY': {},
'IDENTITY_ATTACK': {},
'INSULT': {},
'THREAT': {},
'SEXUALLY_EXPLICIT': {}
}
}
try:
response = client.comments().analyze(body=analyze_request).execute()
toxicity_score = response['attributeScores']['TOXICITY']['summaryScore']['value']
severe_toxicity_score = response['attributeScores']['SEVERE_TOXICITY']['summaryScore']['value']
identity_attack_score = response['attributeScores']['IDENTITY_ATTACK']['summaryScore']['value']
insult_score = response['attributeScores']['INSULT']['summaryScore']['value']
threat_score = response['attributeScores']['THREAT']['summaryScore']['value']
sexually_explicit_score = response['attributeScores']['SEXUALLY_EXPLICIT']['summaryScore']['value']
return {
"toxicity_score": toxicity_score,
"severe_toxicity_score": severe_toxicity_score,
"identity_attack_score": identity_attack_score,
"insult_score": insult_score,
"threat_score": threat_score,
"sexually_explicit_score": sexually_explicit_score,
"error": ""
}
except Exception as e:
with open(logfile, 'a') as f:
f.write(f"{time.ctime()}: EXCEPTION. Tweet Id: {tweet_id}: {e}")
f.write('\n')
print(f"EXCEPTION. Tweet Id: {tweet_id}: {e}")
if ('reason' in e.error_details[0] and e.error_details[0]['reason'] == 'RATE_LIMIT_EXCEEDED'):
with open(logfile, 'a') as f:
sleeptime = 70
f.write(f"{time.ctime()}: Sleeping for {sleeptime} seconds")
f.write('\n')
print(f"Sleeping for {sleeptime} seconds")
time.sleep(70)
return query_perspective(client, text, tweet_id, logfile)
return {
"toxicity_score": -1,
"severe_toxicity_score": -1,
"identity_attack_score": -1,
"insult_score": -1,
"threat_score": -1,
"sexually_explicit_score": -1,
"error": "ERROR"
}
def process_tweet(tweet, perspective_client, output_dataframe, logfile):
data = query_perspective(perspective_client, tweet['tweet_text'], tweet['tweet_id'], logfile)
output_dataframe.loc[tweet['tweet_id']] = [
tweet['tweet_id'],
data['toxicity_score'],
data['severe_toxicity_score'],
data['identity_attack_score'],
data['insult_score'],
data['threat_score'],
data['sexually_explicit_score'],
data['error']
]
def process_day(directory):
logfile = directory + "/perspective_error_log.txt"
progress_logfile = directory + "/perspective_progress_log.txt"
with open(progress_logfile, 'a') as f:
f.write(f"{time.ctime()}: Starting processing for {directory}")
f.write('\n')
print(f"Starting processing for {directory}")
# Load tweet CSV file
in_csv = directory + "/tweets.csv"
out_csv = directory + "/perspective_processed_tweets.csv"
# Delete existing output file if it exists
if os.path.exists(out_csv):
os.remove(out_csv)
number_lines = sum(1 for row in (open(in_csv)))
chunk_size = 50
tweets_remaining = number_lines - 1
with open(progress_logfile, 'a') as f:
f.write(f"{time.ctime()}: Number of tweets: {tweets_remaining}")
f.write('\n')
print(f"Number of tweets: {tweets_remaining}")
for i in range(0, number_lines, chunk_size):
start = time.time()
in_tweets = pd.read_csv(in_csv,
header=0,
nrows=chunk_size, # number of rows to read at each loop
skiprows=range(1, i)) # skip rows that have been read
if (i == 0):
print(f"Loaded first {len(in_tweets.index)} tweets.")
out_tweets = pd.DataFrame(
columns=["tweet_id", "toxicity_score", "severe_toxicity_score", "identity_attack_score", "insult_score",
"threat_score", "sexually_explicit_score", "error"])
# Do processing for tweet
for _, row in in_tweets.iterrows():
process_tweet(row, perspective_client, out_tweets, logfile)
time.sleep(1) # Sleep due to 1 req/second limit on Perspective API
# Ensure tweet_id written as int
new_dtypes = {
"tweet_id": int,
"toxicity_score": np.float64,
"severe_toxicity_score": np.float64,
"identity_attack_score": np.float64,
"insult_score": np.float64,
"threat_score": np.float64,
"sexually_explicit_score": np.float64,
"error": str
}
out_tweets = out_tweets.astype(new_dtypes)
if (i == 0):
out_tweets.to_csv(out_csv,
index=False,
header=True,
mode='a', # append data to csv file
chunksize=chunk_size) # size of data to append for each loop
else:
out_tweets.to_csv(out_csv,
index=False,
header=False,
mode='a', # append data to csv file
chunksize=chunk_size) # size of data to append for each loop
tweets_remaining = tweets_remaining - len(out_tweets.index)
msg = f"Processed {len(out_tweets.index)} tweets in {time.time() - start} seconds. {tweets_remaining} tweets remaining."
with open(progress_logfile, 'a') as f:
f.write(f"{time.ctime()}: {msg}")
f.write('\n')
print(msg)
with open(progress_logfile, 'a') as f:
f.write(f"{time.ctime()}: Completed processing for {directory}")
f.write('\n')
print(f"Completed processing for {directory}")
if __name__ == "__main__":
root_path = "/Users/davebuckley/Documents/Kings/Dissertation/dissertation/data_collection"
day_paths = [
"/01",
"/02",
"/03",
"/04",
"/05",
"/06",
"/07",
"/08",
"/09",
"/10",
"/11",
"/12",
"/13",
"/14",
"/15",
"/16",
"/17",
"/18",
"/19",
"/20",
"/21",
"/22",
"/23",
"/24",
"/25",
"/26",
"/27",
"/28",
"/29",
"/30",
"/31",
"/32",
"/33",
"/34",
"/35",
"/36"
]
# Auth to Perspective API
print("Connecting to Perspective API")
API_KEY = os.getenv("PERSPECTIVE_API_KEY")
perspective_client = get_perspective_client(API_KEY)
print("Connected to Perspective API")
for day in day_paths:
process_day(root_path + day)
print("All completed")
| 34.37069
| 128
| 0.568096
|
import os
import time
import numpy as np
import pandas as pd
from googleapiclient import discovery
def get_perspective_client(api_key):
return discovery.build(
"commentanalyzer",
"v1alpha1",
developerKey=api_key,
discoveryServiceUrl="https://commentanalyzer.googleapis.com/$discovery/rest?version=v1alpha1",
static_discovery=False,
)
def query_perspective(client, text, tweet_id, logfile):
analyze_request = {
'comment': {
'text': text
},
'requestedAttributes': {
'TOXICITY': {},
'SEVERE_TOXICITY': {},
'IDENTITY_ATTACK': {},
'INSULT': {},
'THREAT': {},
'SEXUALLY_EXPLICIT': {}
}
}
try:
response = client.comments().analyze(body=analyze_request).execute()
toxicity_score = response['attributeScores']['TOXICITY']['summaryScore']['value']
severe_toxicity_score = response['attributeScores']['SEVERE_TOXICITY']['summaryScore']['value']
identity_attack_score = response['attributeScores']['IDENTITY_ATTACK']['summaryScore']['value']
insult_score = response['attributeScores']['INSULT']['summaryScore']['value']
threat_score = response['attributeScores']['THREAT']['summaryScore']['value']
sexually_explicit_score = response['attributeScores']['SEXUALLY_EXPLICIT']['summaryScore']['value']
return {
"toxicity_score": toxicity_score,
"severe_toxicity_score": severe_toxicity_score,
"identity_attack_score": identity_attack_score,
"insult_score": insult_score,
"threat_score": threat_score,
"sexually_explicit_score": sexually_explicit_score,
"error": ""
}
except Exception as e:
with open(logfile, 'a') as f:
f.write(f"{time.ctime()}: EXCEPTION. Tweet Id: {tweet_id}: {e}")
f.write('\n')
print(f"EXCEPTION. Tweet Id: {tweet_id}: {e}")
if ('reason' in e.error_details[0] and e.error_details[0]['reason'] == 'RATE_LIMIT_EXCEEDED'):
with open(logfile, 'a') as f:
sleeptime = 70
f.write(f"{time.ctime()}: Sleeping for {sleeptime} seconds")
f.write('\n')
print(f"Sleeping for {sleeptime} seconds")
time.sleep(70)
return query_perspective(client, text, tweet_id, logfile)
return {
"toxicity_score": -1,
"severe_toxicity_score": -1,
"identity_attack_score": -1,
"insult_score": -1,
"threat_score": -1,
"sexually_explicit_score": -1,
"error": "ERROR"
}
def process_tweet(tweet, perspective_client, output_dataframe, logfile):
data = query_perspective(perspective_client, tweet['tweet_text'], tweet['tweet_id'], logfile)
output_dataframe.loc[tweet['tweet_id']] = [
tweet['tweet_id'],
data['toxicity_score'],
data['severe_toxicity_score'],
data['identity_attack_score'],
data['insult_score'],
data['threat_score'],
data['sexually_explicit_score'],
data['error']
]
def process_day(directory):
logfile = directory + "/perspective_error_log.txt"
progress_logfile = directory + "/perspective_progress_log.txt"
with open(progress_logfile, 'a') as f:
f.write(f"{time.ctime()}: Starting processing for {directory}")
f.write('\n')
print(f"Starting processing for {directory}")
in_csv = directory + "/tweets.csv"
out_csv = directory + "/perspective_processed_tweets.csv"
if os.path.exists(out_csv):
os.remove(out_csv)
number_lines = sum(1 for row in (open(in_csv)))
chunk_size = 50
tweets_remaining = number_lines - 1
with open(progress_logfile, 'a') as f:
f.write(f"{time.ctime()}: Number of tweets: {tweets_remaining}")
f.write('\n')
print(f"Number of tweets: {tweets_remaining}")
for i in range(0, number_lines, chunk_size):
start = time.time()
in_tweets = pd.read_csv(in_csv,
header=0,
nrows=chunk_size,
skiprows=range(1, i))
if (i == 0):
print(f"Loaded first {len(in_tweets.index)} tweets.")
out_tweets = pd.DataFrame(
columns=["tweet_id", "toxicity_score", "severe_toxicity_score", "identity_attack_score", "insult_score",
"threat_score", "sexually_explicit_score", "error"])
for _, row in in_tweets.iterrows():
process_tweet(row, perspective_client, out_tweets, logfile)
time.sleep(1)
new_dtypes = {
"tweet_id": int,
"toxicity_score": np.float64,
"severe_toxicity_score": np.float64,
"identity_attack_score": np.float64,
"insult_score": np.float64,
"threat_score": np.float64,
"sexually_explicit_score": np.float64,
"error": str
}
out_tweets = out_tweets.astype(new_dtypes)
if (i == 0):
out_tweets.to_csv(out_csv,
index=False,
header=True,
mode='a',
chunksize=chunk_size)
else:
out_tweets.to_csv(out_csv,
index=False,
header=False,
mode='a',
chunksize=chunk_size)
tweets_remaining = tweets_remaining - len(out_tweets.index)
msg = f"Processed {len(out_tweets.index)} tweets in {time.time() - start} seconds. {tweets_remaining} tweets remaining."
with open(progress_logfile, 'a') as f:
f.write(f"{time.ctime()}: {msg}")
f.write('\n')
print(msg)
with open(progress_logfile, 'a') as f:
f.write(f"{time.ctime()}: Completed processing for {directory}")
f.write('\n')
print(f"Completed processing for {directory}")
if __name__ == "__main__":
root_path = "/Users/davebuckley/Documents/Kings/Dissertation/dissertation/data_collection"
day_paths = [
"/01",
"/02",
"/03",
"/04",
"/05",
"/06",
"/07",
"/08",
"/09",
"/10",
"/11",
"/12",
"/13",
"/14",
"/15",
"/16",
"/17",
"/18",
"/19",
"/20",
"/21",
"/22",
"/23",
"/24",
"/25",
"/26",
"/27",
"/28",
"/29",
"/30",
"/31",
"/32",
"/33",
"/34",
"/35",
"/36"
]
print("Connecting to Perspective API")
API_KEY = os.getenv("PERSPECTIVE_API_KEY")
perspective_client = get_perspective_client(API_KEY)
print("Connected to Perspective API")
for day in day_paths:
process_day(root_path + day)
print("All completed")
| true
| true
|
1c474c7f2acba2c62fabc8f02e4bf556a023e101
| 1,066
|
py
|
Python
|
jesse/indicators/pfe.py
|
leaiannotti/jesse
|
564c54845774891ff3b5a8d3c02cc7cea890ac54
|
[
"MIT"
] | 5
|
2021-05-21T07:39:16.000Z
|
2021-11-17T11:08:41.000Z
|
jesse/indicators/pfe.py
|
leaiannotti/jesse
|
564c54845774891ff3b5a8d3c02cc7cea890ac54
|
[
"MIT"
] | null | null | null |
jesse/indicators/pfe.py
|
leaiannotti/jesse
|
564c54845774891ff3b5a8d3c02cc7cea890ac54
|
[
"MIT"
] | 2
|
2021-05-21T10:14:53.000Z
|
2021-05-27T04:39:51.000Z
|
from typing import Union
import numpy as np
import talib
from jesse.helpers import get_candle_source, slice_candles, same_length
def pfe(candles: np.ndarray, period: int = 10, smoothing: int = 5, source_type: str = "close", sequential: bool = False) -> Union[
float, np.ndarray]:
"""
Polarized Fractal Efficiency (PFE)
:param candles: np.ndarray
:param period: int - default: 10
:param smoothing: int - default: 5
:param source_type: str - default: "close"
:param sequential: bool - default=False
:return: float | np.ndarray
"""
candles = slice_candles(candles, sequential)
source = get_candle_source(candles, source_type=source_type)
ln = period - 1
diff = np.diff(source, ln)
a = np.sqrt(np.power(diff, 2) + np.power(period, 2))
b = talib.SUM(np.sqrt(1 + np.power(np.diff(source, 1), 2)), ln)
pfetmp = 100 * same_length(source, a) / same_length(source, b)
res = talib.EMA(np.where(same_length(source, diff) > 0, pfetmp, -pfetmp), smoothing)
return res if sequential else res[-1]
| 31.352941
| 130
| 0.67167
|
from typing import Union
import numpy as np
import talib
from jesse.helpers import get_candle_source, slice_candles, same_length
def pfe(candles: np.ndarray, period: int = 10, smoothing: int = 5, source_type: str = "close", sequential: bool = False) -> Union[
float, np.ndarray]:
candles = slice_candles(candles, sequential)
source = get_candle_source(candles, source_type=source_type)
ln = period - 1
diff = np.diff(source, ln)
a = np.sqrt(np.power(diff, 2) + np.power(period, 2))
b = talib.SUM(np.sqrt(1 + np.power(np.diff(source, 1), 2)), ln)
pfetmp = 100 * same_length(source, a) / same_length(source, b)
res = talib.EMA(np.where(same_length(source, diff) > 0, pfetmp, -pfetmp), smoothing)
return res if sequential else res[-1]
| true
| true
|
1c474d6b5e003a2cec79900ccf7c78c070a40e62
| 24,545
|
py
|
Python
|
lib/model_eval/model_eval_ncnet_adap.py
|
JiwonCocoder/-Joint-Learning-of-Feature-Extraction-and-Cost-Aggregation-for-Semantic-Matching
|
b79e0e20fd5a1a9ddc0ffa9d7a92e0ebd21018b9
|
[
"MIT"
] | 1
|
2021-07-22T05:18:10.000Z
|
2021-07-22T05:18:10.000Z
|
lib/model_eval/model_eval_ncnet_adap.py
|
JiwonCocoder/-Joint-Learning-of-Feature-Extraction-and-Cost-Aggregation-for-Semantic-Matching
|
b79e0e20fd5a1a9ddc0ffa9d7a92e0ebd21018b9
|
[
"MIT"
] | null | null | null |
lib/model_eval/model_eval_ncnet_adap.py
|
JiwonCocoder/-Joint-Learning-of-Feature-Extraction-and-Cost-Aggregation-for-Semantic-Matching
|
b79e0e20fd5a1a9ddc0ffa9d7a92e0ebd21018b9
|
[
"MIT"
] | null | null | null |
from __future__ import print_function, division
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.autograd import Variable
import torchvision.models as models
import numpy as np
import numpy.matlib
import pickle
from lib.torch_util import Softmax1D
from lib.conv4d import Conv4d
from lib.matching_model import CMDTop
from lib.matching_model import unNormMap1D_to_NormMap2D, NormMap2D_to_unNormMap2D
from lib.showPlot import plot_test_map, plot_test_flow, warpImg_fromMap, warpImg_fromMap2, matplotlib_imshow, return_plot_test_map, get_img_from_fig
import torch.nn.functional as F
def featureL2Norm(feature):
epsilon = 1e-6
norm = torch.pow(torch.sum(torch.pow(feature, 2), 1) + epsilon, 0.5).unsqueeze(1).expand_as(feature)
return torch.div(feature, norm)
class FeatureExtraction(torch.nn.Module):
def __init__(self, train_fe=False, feature_extraction_cnn='resnet101', feature_extraction_model_file='',
normalization=False, last_layer='', use_cuda=True):
super(FeatureExtraction, self).__init__()
self.normalization = normalization
self.feature_extraction_cnn = feature_extraction_cnn
if feature_extraction_cnn == 'vgg':
self.model = models.vgg16(pretrained=True)
# keep feature extraction network up to indicated layer
vgg_feature_layers = ['conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1',
'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1',
'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'pool3', 'conv4_1',
'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3', 'pool4',
'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'pool5']
if last_layer == '':
last_layer = 'pool4'
last_layer_idx = vgg_feature_layers.index(last_layer)
self.model = nn.Sequential(*list(self.model.features.children())[:last_layer_idx + 1])
# for resnet below
resnet_feature_layers = ['conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2', 'layer3', 'layer4']
if feature_extraction_cnn == 'resnet101':
self.model = models.resnet101(pretrained=True)
if last_layer == '':
last_layer = 'layer3'
resnet_module_list = [getattr(self.model, l) for l in resnet_feature_layers]
last_layer_idx = resnet_feature_layers.index(last_layer)
self.model = nn.Sequential(*resnet_module_list[:last_layer_idx + 1])
if feature_extraction_cnn == 'resnet101fpn':
if feature_extraction_model_file != '':
resnet = models.resnet101(pretrained=True)
# swap stride (2,2) and (1,1) in first layers (PyTorch ResNet is slightly different to caffe2 ResNet)
# this is required for compatibility with caffe2 models
resnet.layer2[0].conv1.stride = (2, 2)
resnet.layer2[0].conv2.stride = (1, 1)
resnet.layer3[0].conv1.stride = (2, 2)
resnet.layer3[0].conv2.stride = (1, 1)
resnet.layer4[0].conv1.stride = (2, 2)
resnet.layer4[0].conv2.stride = (1, 1)
else:
resnet = models.resnet101(pretrained=True)
resnet_module_list = [getattr(resnet, l) for l in resnet_feature_layers]
conv_body = nn.Sequential(*resnet_module_list)
self.model = fpn_body(conv_body,
resnet_feature_layers,
fpn_layers=['layer1', 'layer2', 'layer3'],
normalize=normalization,
hypercols=True)
if feature_extraction_model_file != '':
self.model.load_pretrained_weights(feature_extraction_model_file)
if feature_extraction_cnn == 'densenet201':
self.model = models.densenet201(pretrained=True)
# keep feature extraction network up to denseblock3
# self.model = nn.Sequential(*list(self.model.features.children())[:-3])
# keep feature extraction network up to transitionlayer2
self.model = nn.Sequential(*list(self.model.features.children())[:-4])
if train_fe == False:
# freeze parameters
for param in self.model.parameters():
param.requires_grad = False
# move to GPU
if use_cuda:
self.model = self.model.cuda()
def forward(self, image_batch):
features = self.model(image_batch)
return features
class adap_layer_feat3(nn.Module):
def __init__(self):
super(adap_layer_feat3, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(1024, 1024, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(1024),
nn.ReLU()
)
self.conv2 = nn.Sequential(
nn.Conv2d(1024, 1024, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(1024),
nn.ReLU()
)
GPU_NUM = torch.cuda.current_device()
device = torch.device(f'cuda:{GPU_NUM}' if torch.cuda.is_available() else 'cpu')
print("find_correspondence_gpu:",device)
use_cuda = torch.cuda.is_available()
if use_cuda:
self.conv1.cuda()
self.conv2.cuda()
def forward(self, feature):
feature = feature + self.conv1(feature)
feature = feature + self.conv2(feature)
return feature
class FeatureCorrelation(torch.nn.Module):
def __init__(self, shape='3D', normalization=True):
super(FeatureCorrelation, self).__init__()
self.normalization = normalization
self.shape = shape
self.ReLU = nn.ReLU()
def forward(self, feature_A, feature_B):
if self.shape == '3D':
b, c, h, w = feature_A.size()
# reshape features for matrix multiplication
feature_A = feature_A.transpose(2, 3).contiguous().view(b, c, h * w)
feature_B = feature_B.view(b, c, h * w).transpose(1, 2)
# perform matrix mult.
feature_mul = torch.bmm(feature_B, feature_A)
# indexed [batch,idx_A=row_A+h*col_A,row_B,col_B]
correlation_tensor = feature_mul.view(b, h, w, h * w).transpose(2, 3).transpose(1, 2)
elif self.shape == '4D':
b, c, hA, wA = feature_A.size()
b, c, hB, wB = feature_B.size()
# reshape features for matrix multiplication
feature_A = feature_A.view(b, c, hA * wA).transpose(1, 2) # size [b,c,h*w]
feature_B = feature_B.view(b, c, hB * wB) # size [b,c,h*w]
# perform matrix mult.
feature_mul = torch.bmm(feature_A, feature_B)
# indexed [batch,row_A,col_A,row_B,col_B]
correlation_tensor = feature_mul.view(b, hA, wA, hB, wB).unsqueeze(1)
if self.normalization:
correlation_tensor = featureL2Norm(self.ReLU(correlation_tensor))
return correlation_tensor
class NeighConsensus(torch.nn.Module):
def __init__(self, use_cuda=True, kernel_sizes=[3, 3, 3], channels=[10, 10, 1], symmetric_mode=False):
super(NeighConsensus, self).__init__()
self.symmetric_mode = symmetric_mode
self.kernel_sizes = kernel_sizes
self.channels = channels
num_layers = len(kernel_sizes)
nn_modules = list()
for i in range(num_layers):
if i == 0:
ch_in = 1
else:
ch_in = channels[i - 1]
ch_out = channels[i]
k_size = kernel_sizes[i]
nn_modules.append(Conv4d(in_channels=ch_in, out_channels=ch_out, kernel_size=k_size, bias=True))
nn_modules.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*nn_modules)
if use_cuda:
self.conv.cuda()
def forward(self, x):
if self.symmetric_mode:
# apply network on the input and its "transpose" (swapping A-B to B-A ordering of the correlation tensor),
# this second result is "transposed back" to the A-B ordering to match the first result and be able to add together
x = self.conv(x) + self.conv(x.permute(0, 1, 4, 5, 2, 3)).permute(0, 1, 4, 5, 2, 3)
# because of the ReLU layers in between linear layers,
# this operation is different than convolving a single time with the filters+filters^T
# and therefore it makes sense to do this.
else:
x = self.conv(x)
return x
def MutualMatching(corr4d):
# mutual matching
batch_size, ch, fs1, fs2, fs3, fs4 = corr4d.size()
corr4d_B = corr4d.view(batch_size, fs1 * fs2, fs3, fs4) # [batch_idx,k_A,i_B,j_B]
corr4d_A = corr4d.view(batch_size, fs1, fs2, fs3 * fs4)
# get max
corr4d_B_max, _ = torch.max(corr4d_B, dim=1, keepdim=True)
corr4d_A_max, _ = torch.max(corr4d_A, dim=3, keepdim=True)
eps = 1e-5
corr4d_B = corr4d_B / (corr4d_B_max + eps)
corr4d_A = corr4d_A / (corr4d_A_max + eps)
corr4d_B = corr4d_B.view(batch_size, 1, fs1, fs2, fs3, fs4)
corr4d_A = corr4d_A.view(batch_size, 1, fs1, fs2, fs3, fs4)
corr4d = corr4d * (corr4d_A * corr4d_B) # parenthesis are important for symmetric output
return corr4d
def maxpool4d(corr4d_hres, k_size=4):
slices = []
for i in range(k_size):
for j in range(k_size):
for k in range(k_size):
for l in range(k_size):
slices.append(corr4d_hres[:, 0, i::k_size, j::k_size, k::k_size, l::k_size].unsqueeze(0))
slices = torch.cat(tuple(slices), dim=1)
corr4d, max_idx = torch.max(slices, dim=1, keepdim=True)
max_l = torch.fmod(max_idx, k_size)
max_k = torch.fmod(max_idx.sub(max_l).div(k_size), k_size)
max_j = torch.fmod(max_idx.sub(max_l).div(k_size).sub(max_k).div(k_size), k_size)
max_i = max_idx.sub(max_l).div(k_size).sub(max_k).div(k_size).sub(max_j).div(k_size)
# i,j,k,l represent the *relative* coords of the max point in the box of size k_size*k_size*k_size*k_size
return (corr4d, max_i, max_j, max_k, max_l)
class find_correspondence(nn.Module):
def __init__(self, feature_H, feature_W, beta, kernel_sigma):
super(find_correspondence, self).__init__()
GPU_NUM = torch.cuda.current_device()
device = torch.device(f'cuda:{GPU_NUM}' if torch.cuda.is_available() else 'cpu')
print("find_correspondence_gpu:",device)
self.beta = beta
self.kernel_sigma = kernel_sigma
# regular grid / [-1,1] normalized
self.grid_X, self.grid_Y = np.meshgrid(np.linspace(-1, 1, feature_W),
np.linspace(-1, 1, feature_H)) # grid_X & grid_Y : feature_H x feature_W
self.grid_X = torch.tensor(self.grid_X, dtype=torch.float, requires_grad=False).to(device)
self.grid_Y = torch.tensor(self.grid_Y, dtype=torch.float, requires_grad=False).to(device)
# kernels for computing gradients
self.dx_kernel = torch.tensor([-1, 0, 1], dtype=torch.float, requires_grad=False).view(1, 1, 1, 3).expand(1, 2,
1,
3).to(
device)
self.dy_kernel = torch.tensor([-1, 0, 1], dtype=torch.float, requires_grad=False).view(1, 1, 3, 1).expand(1, 2,
3,
1).to(
device)
# 1-d indices for generating Gaussian kernels
self.x = np.linspace(0, feature_W - 1, feature_W)
self.x = torch.tensor(self.x, dtype=torch.float, requires_grad=False).to(device)
self.y = np.linspace(0, feature_H - 1, feature_H)
self.y = torch.tensor(self.y, dtype=torch.float, requires_grad=False).to(device)
# 1-d indices for kernel-soft-argmax / [-1,1] normalized
self.x_normal = np.linspace(-1, 1, feature_W)
self.x_normal = torch.tensor(self.x_normal, dtype=torch.float, requires_grad=False).to(device)
self.y_normal = np.linspace(-1, 1, feature_H)
self.y_normal = torch.tensor(self.y_normal, dtype=torch.float, requires_grad=False).to(device)
def apply_gaussian_kernel(self, corr, sigma=5):
b, hw, h, w = corr.size()
idx = corr.max(dim=1)[1] # b x h x w get maximum value along channel
idx_y = (idx // w).view(b, 1, 1, h, w).float()
idx_x = (idx % w).view(b, 1, 1, h, w).float()
x = self.x.view(1, 1, w, 1, 1).expand(b, 1, w, h, w)
y = self.y.view(1, h, 1, 1, 1).expand(b, h, 1, h, w)
gauss_kernel = torch.exp(-((x - idx_x) ** 2 + (y - idx_y) ** 2) / (2 * sigma ** 2))
gauss_kernel = gauss_kernel.view(b, hw, h, w)
return gauss_kernel * corr
def softmax_with_temperature(self, x, beta, d=1):
M, _ = x.max(dim=d, keepdim=True)
x = x - M # subtract maximum value for stability
exp_x = torch.exp(beta * x)
exp_x_sum = exp_x.sum(dim=d, keepdim=True)
return exp_x / exp_x_sum
def kernel_soft_argmax(self, corr):
b, _, h, w = corr.size()
# corr = self.apply_gaussian_kernel(corr, sigma=self.kernel_sigma)
corr = self.softmax_with_temperature(corr, beta=self.beta, d=1)
corr = corr.view(-1, h, w, h, w) # (target hxw) x (source hxw)
grid_x = corr.sum(dim=1, keepdim=False) # marginalize to x-coord.
x_normal = self.x_normal.expand(b, w)
x_normal = x_normal.view(b, w, 1, 1)
grid_x = (grid_x * x_normal).sum(dim=1, keepdim=True) # b x 1 x h x w
grid_y = corr.sum(dim=2, keepdim=False) # marginalize to y-coord.
y_normal = self.y_normal.expand(b, h)
y_normal = y_normal.view(b, h, 1, 1)
grid_y = (grid_y * y_normal).sum(dim=1, keepdim=True) # b x 1 x h x w
return grid_x, grid_y
def get_flow_smoothness(self, flow, GT_mask):
flow_dx = F.conv2d(F.pad(flow, (1, 1, 0, 0)), self.dx_kernel) / 2 # (padLeft, padRight, padTop, padBottom)
flow_dy = F.conv2d(F.pad(flow, (0, 0, 1, 1)), self.dy_kernel) / 2 # (padLeft, padRight, padTop, padBottom)
flow_dx = torch.abs(flow_dx) * GT_mask # consider foreground regions only
flow_dy = torch.abs(flow_dy) * GT_mask
smoothness = torch.cat((flow_dx, flow_dy), 1)
return smoothness
def forward(self, corr, GT_mask=None):
b, _, h, w = corr.size()
grid_X = self.grid_X.expand(b, h, w) # x coordinates of a regular grid
grid_X = grid_X.unsqueeze(1) # b x 1 x h x w
grid_Y = self.grid_Y.expand(b, h, w) # y coordinates of a regular grid
grid_Y = grid_Y.unsqueeze(1)
if self.beta is not None:
grid_x, grid_y = self.kernel_soft_argmax(corr)
else: # discrete argmax
_, idx = torch.max(corr, dim=1)
grid_x = idx % w
grid_x = (grid_x.float() / (w - 1) - 0.5) * 2
grid_y = idx // w
grid_y = (grid_y.float() / (h - 1) - 0.5) * 2
grid_x = grid_x.unsqueeze(1) # b x 1 x h x w
grid_y = grid_y.unsqueeze(1)
grid = torch.cat((grid_x.permute(0, 2, 3, 1), grid_y.permute(0, 2, 3, 1)),
3)
# 2-channels@3rd-dim, first channel for x / second channel for y
flow = torch.cat((grid_x - grid_X, grid_y - grid_Y),
1) # 2-channels@1st-dim, first channel for x / second channel for y
if GT_mask is None: # test
return grid.permute(0, 3, 1, 2), flow.permute(0, 3, 1, 2)
else: # train
smoothness = self.get_flow_smoothness(flow, GT_mask)
return grid, flow, smoothness
class ImMatchNet(nn.Module):
def __init__(self,
feature_extraction_cnn='resnet101',
feature_extraction_last_layer='',
feature_extraction_model_file=None,
return_correlation=False,
ncons_kernel_sizes=[3, 3, 3],
ncons_channels=[10, 10, 1],
normalize_features=True,
train_fe=False,
use_cuda=True,
relocalization_k_size=0,
half_precision=False,
checkpoint=None,
):
super(ImMatchNet, self).__init__()
# Load checkpoint
if checkpoint is not None and checkpoint is not '':
print('Loading checkpoint...')
checkpoint = torch.load(checkpoint, map_location=lambda storage, loc: storage)
checkpoint['state_dict'] = OrderedDict(
[(k.replace('vgg', 'model'), v) for k, v in checkpoint['state_dict'].items()])
# override relevant parameters
print('Using checkpoint parameters: ')
ncons_channels = checkpoint['args'].ncons_channels
print(' ncons_channels: ' + str(ncons_channels))
ncons_kernel_sizes = checkpoint['args'].ncons_kernel_sizes
print(' ncons_kernel_sizes: ' + str(ncons_kernel_sizes))
self.ReLU = nn.ReLU()
self.use_cuda = use_cuda
self.normalize_features = normalize_features
print("self.normalize_features", self.normalize_features)
self.return_correlation = return_correlation
self.relocalization_k_size = relocalization_k_size
self.half_precision = half_precision
self.FeatureExtraction = FeatureExtraction(train_fe=train_fe,
feature_extraction_cnn=feature_extraction_cnn,
feature_extraction_model_file=feature_extraction_model_file,
last_layer=feature_extraction_last_layer,
normalization=False,
use_cuda=self.use_cuda)
self.adap_layer_feat3 = adap_layer_feat3()
self.FeatureCorrelation = FeatureCorrelation(shape='4D', normalization=False)
self.NeighConsensus = NeighConsensus(use_cuda=self.use_cuda,
kernel_sizes=ncons_kernel_sizes,
channels=ncons_channels)
feature_H = 25
feature_W = 25
beta = 50
kernel_sigma = 5
self.find_correspondence = find_correspondence(feature_H, feature_W, beta, kernel_sigma)
# nd = 25 * 25 # global correlation
# od = nd + 2
# batch_norm = True
# self.decoder4 = CMDTop(in_channels=od, bn=batch_norm, use_cuda=self.use_cuda)
# Load weights
if checkpoint is not None and checkpoint is not '':
print('Copying weights...')
for name, param in self.FeatureExtraction.state_dict().items():
if 'num_batches_tracked' not in name:
self.FeatureExtraction.state_dict()[name].copy_(
checkpoint['state_dict']['FeatureExtraction.' + name])
for name, param in self.NeighConsensus.state_dict().items():
self.NeighConsensus.state_dict()[name].copy_(checkpoint['state_dict']['NeighConsensus.' + name])
for name, param in self.adap_layer_feat3.state_dict().items():
self.adap_layer_feat3.state_dict()[name].copy_(checkpoint['state_dict']['adap_layer_feat3.' + name])
print('Done!')
self.FeatureExtraction.eval()
if self.half_precision:
for p in self.NeighConsensus.parameters():
p.data = p.data.half()
for l in self.NeighConsensus.conv:
if isinstance(l, Conv4d):
l.use_half = True
# used only for foward pass at eval and for training with strong supervision
def forward(self, tnf_batch, writer, writer_position):
# feature extraction
feature_A = self.FeatureExtraction(tnf_batch['source_image'])
feature_B = self.FeatureExtraction(tnf_batch['target_image'])
adap_feature_A = self.adap_layer_feat3(feature_A)
adap_feature_B = self.adap_layer_feat3(feature_B)
adap_feature_A = featureL2Norm(adap_feature_A)
adap_feature_B = featureL2Norm(adap_feature_B)
if self.half_precision:
feature_A = feature_A.half()
feature_B = feature_B.half()
# feature correlation
corr4d = self.FeatureCorrelation(adap_feature_A, adap_feature_B)
# corr4d = self.FeatureCorrelation(feature_A, feature_B)
# do 4d maxpooling for relocalization
if self.relocalization_k_size > 1:
corr4d, max_i, max_j, max_k, max_l = maxpool4d(corr4d, k_size=self.relocalization_k_size)
# WTA
batch_size, ch, fs1, fs2, fs3, fs4 = corr4d.size()
nc_B_Avec_WTA = corr4d.view(batch_size, fs1 * fs2, fs3, fs4) # [batch_idx,k_A,i_B,j_B]
# nc_B_Avec = featureL2Norm(self.ReLU(nc_B_Avec))
# compute matching scores
scores_WTA_B, index_WTA_B = torch.max(nc_B_Avec_WTA, dim=1)
# warping Map
index1D_WTA_B = index_WTA_B.view(batch_size, -1)
Map2D_WTA = unNormMap1D_to_NormMap2D(index1D_WTA_B) # (B,2,S,S)
# Map2D_WTA_np = Map2D_WTA.detach().cpu().numpy()
# scores_B_np =scores_B.detach().cpu().numpy()
# grid_np = grid.detach().cpu().numpy()
# corr4d_Net = corr4d.clone()
# corr4d_Net = corr4d_Net.detach()
# run match processing model
corr4d = MutualMatching(corr4d)
corr4d_Net = self.NeighConsensus(corr4d.detach())
corr4d_Net = MutualMatching(corr4d_Net)
nc_B_Avec_NET = corr4d_Net.view(batch_size, fs1 * fs2, fs3, fs4) # [batch_idx,k_A,i_B,j_B]
# nc_B_Avec2 = featureL2Norm(self.ReLU(nc_B_Avec2))
# nc_B_Avec_NET = torch.nn.functional.softmax(nc_B_Avec_NET, 1)
Map2D_NET, Flow2D_NET = self.find_correspondence(nc_B_Avec_NET)
# scores_B2, index_B2 = torch.max(nc_B_Avec2, dim=1)
# index1D_B2 = index_B2.view(batch_size, -1)
unNormMap2D_NET = NormMap2D_to_unNormMap2D(Map2D_NET) # (B,2,S,S
# img_grid = return_plot_test_map(tnf_batch['source_image'][0].unsqueeze(0), tnf_batch['target_image'][0].unsqueeze(0), Map2D_WTA[0].unsqueeze(0),
# Map2D_NET[0].unsqueeze(0), scale_factor=16, plot_name='AtoB_MAP')
# writer.add_figure('adap_grid/adap_NET_{}'.format(writer_position), img_grid)
# plot_test_map(tnf_batch['source_image'], tnf_batch['target_image'], MAP2D_NET, Map2D_WTA, scale_factor=16,plot_name='AtoB_MAP' )
# Flow2D_WTA = F.interpolate(input=Map2D_WTA, scale_factor=16, mode='bilinear', align_corners= True)
# Flow2D_NET = F.interpolate(input=grid, scale_factor=16, mode='bilinear', align_corners= True)
#
# Flow2D_WTA = unnormalise_and_convert_mapping_to_flow(Flow2D_WTA)
# Flow2D_NET = unnormalise_and_convert_mapping_to_flow(Flow2D_NET)
# plot_test_flow(tnf_batch['source_image'], tnf_batch['target_image'], Flow2D_NET, Flow2D_WTA, scale_factor=16,plot_name='AtoB_FLOW' )
# Flow2D_WTA = F.interpolate(input = Map2D_WTA, scale_factor = 16, mode = 'bilinear', align_corners= True)
# grid = F.interpolate(input=grid, scale_factor=16, mode='bilinear', align_corners=True)
# if torch.cuda.is_available():
# init_map = torch.FloatTensor(batch_size, 2, fs3, fs4).zero_().cuda()
# else:
# init_map = torch.FloatTensor(batch_size, 2, fs3, fs4).zero_()
# est_map4 = self.decoder4(x1=nc_B_Avec, x3=init_map)
# flow4 = unnormalise_and_convert_mapping_to_flow(est_map4) / self.div
# ratio = 16
# flow4[:, 0, :, :] = flow4[:, 0, :, :] / ratio
# flow4[:, 1, :, :] = flow4[:, 1, :, :] / ratio
if self.relocalization_k_size > 1:
delta4d = (max_i, max_j, max_k, max_l)
return (corr4d, delta4d)
else:
return corr4d_Net
| 48.992016
| 154
| 0.5989
|
from __future__ import print_function, division
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.autograd import Variable
import torchvision.models as models
import numpy as np
import numpy.matlib
import pickle
from lib.torch_util import Softmax1D
from lib.conv4d import Conv4d
from lib.matching_model import CMDTop
from lib.matching_model import unNormMap1D_to_NormMap2D, NormMap2D_to_unNormMap2D
from lib.showPlot import plot_test_map, plot_test_flow, warpImg_fromMap, warpImg_fromMap2, matplotlib_imshow, return_plot_test_map, get_img_from_fig
import torch.nn.functional as F
def featureL2Norm(feature):
epsilon = 1e-6
norm = torch.pow(torch.sum(torch.pow(feature, 2), 1) + epsilon, 0.5).unsqueeze(1).expand_as(feature)
return torch.div(feature, norm)
class FeatureExtraction(torch.nn.Module):
def __init__(self, train_fe=False, feature_extraction_cnn='resnet101', feature_extraction_model_file='',
normalization=False, last_layer='', use_cuda=True):
super(FeatureExtraction, self).__init__()
self.normalization = normalization
self.feature_extraction_cnn = feature_extraction_cnn
if feature_extraction_cnn == 'vgg':
self.model = models.vgg16(pretrained=True)
vgg_feature_layers = ['conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1',
'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1',
'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'pool3', 'conv4_1',
'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3', 'pool4',
'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'pool5']
if last_layer == '':
last_layer = 'pool4'
last_layer_idx = vgg_feature_layers.index(last_layer)
self.model = nn.Sequential(*list(self.model.features.children())[:last_layer_idx + 1])
resnet_feature_layers = ['conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2', 'layer3', 'layer4']
if feature_extraction_cnn == 'resnet101':
self.model = models.resnet101(pretrained=True)
if last_layer == '':
last_layer = 'layer3'
resnet_module_list = [getattr(self.model, l) for l in resnet_feature_layers]
last_layer_idx = resnet_feature_layers.index(last_layer)
self.model = nn.Sequential(*resnet_module_list[:last_layer_idx + 1])
if feature_extraction_cnn == 'resnet101fpn':
if feature_extraction_model_file != '':
resnet = models.resnet101(pretrained=True)
resnet.layer2[0].conv1.stride = (2, 2)
resnet.layer2[0].conv2.stride = (1, 1)
resnet.layer3[0].conv1.stride = (2, 2)
resnet.layer3[0].conv2.stride = (1, 1)
resnet.layer4[0].conv1.stride = (2, 2)
resnet.layer4[0].conv2.stride = (1, 1)
else:
resnet = models.resnet101(pretrained=True)
resnet_module_list = [getattr(resnet, l) for l in resnet_feature_layers]
conv_body = nn.Sequential(*resnet_module_list)
self.model = fpn_body(conv_body,
resnet_feature_layers,
fpn_layers=['layer1', 'layer2', 'layer3'],
normalize=normalization,
hypercols=True)
if feature_extraction_model_file != '':
self.model.load_pretrained_weights(feature_extraction_model_file)
if feature_extraction_cnn == 'densenet201':
self.model = models.densenet201(pretrained=True)
self.model = nn.Sequential(*list(self.model.features.children())[:-4])
if train_fe == False:
for param in self.model.parameters():
param.requires_grad = False
if use_cuda:
self.model = self.model.cuda()
def forward(self, image_batch):
features = self.model(image_batch)
return features
class adap_layer_feat3(nn.Module):
def __init__(self):
super(adap_layer_feat3, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(1024, 1024, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(1024),
nn.ReLU()
)
self.conv2 = nn.Sequential(
nn.Conv2d(1024, 1024, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(1024),
nn.ReLU()
)
GPU_NUM = torch.cuda.current_device()
device = torch.device(f'cuda:{GPU_NUM}' if torch.cuda.is_available() else 'cpu')
print("find_correspondence_gpu:",device)
use_cuda = torch.cuda.is_available()
if use_cuda:
self.conv1.cuda()
self.conv2.cuda()
def forward(self, feature):
feature = feature + self.conv1(feature)
feature = feature + self.conv2(feature)
return feature
class FeatureCorrelation(torch.nn.Module):
def __init__(self, shape='3D', normalization=True):
super(FeatureCorrelation, self).__init__()
self.normalization = normalization
self.shape = shape
self.ReLU = nn.ReLU()
def forward(self, feature_A, feature_B):
if self.shape == '3D':
b, c, h, w = feature_A.size()
feature_A = feature_A.transpose(2, 3).contiguous().view(b, c, h * w)
feature_B = feature_B.view(b, c, h * w).transpose(1, 2)
feature_mul = torch.bmm(feature_B, feature_A)
correlation_tensor = feature_mul.view(b, h, w, h * w).transpose(2, 3).transpose(1, 2)
elif self.shape == '4D':
b, c, hA, wA = feature_A.size()
b, c, hB, wB = feature_B.size()
feature_A = feature_A.view(b, c, hA * wA).transpose(1, 2)
feature_B = feature_B.view(b, c, hB * wB)
feature_mul = torch.bmm(feature_A, feature_B)
correlation_tensor = feature_mul.view(b, hA, wA, hB, wB).unsqueeze(1)
if self.normalization:
correlation_tensor = featureL2Norm(self.ReLU(correlation_tensor))
return correlation_tensor
class NeighConsensus(torch.nn.Module):
def __init__(self, use_cuda=True, kernel_sizes=[3, 3, 3], channels=[10, 10, 1], symmetric_mode=False):
super(NeighConsensus, self).__init__()
self.symmetric_mode = symmetric_mode
self.kernel_sizes = kernel_sizes
self.channels = channels
num_layers = len(kernel_sizes)
nn_modules = list()
for i in range(num_layers):
if i == 0:
ch_in = 1
else:
ch_in = channels[i - 1]
ch_out = channels[i]
k_size = kernel_sizes[i]
nn_modules.append(Conv4d(in_channels=ch_in, out_channels=ch_out, kernel_size=k_size, bias=True))
nn_modules.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*nn_modules)
if use_cuda:
self.conv.cuda()
def forward(self, x):
if self.symmetric_mode:
x = self.conv(x) + self.conv(x.permute(0, 1, 4, 5, 2, 3)).permute(0, 1, 4, 5, 2, 3)
else:
x = self.conv(x)
return x
def MutualMatching(corr4d):
batch_size, ch, fs1, fs2, fs3, fs4 = corr4d.size()
corr4d_B = corr4d.view(batch_size, fs1 * fs2, fs3, fs4)
corr4d_A = corr4d.view(batch_size, fs1, fs2, fs3 * fs4)
corr4d_B_max, _ = torch.max(corr4d_B, dim=1, keepdim=True)
corr4d_A_max, _ = torch.max(corr4d_A, dim=3, keepdim=True)
eps = 1e-5
corr4d_B = corr4d_B / (corr4d_B_max + eps)
corr4d_A = corr4d_A / (corr4d_A_max + eps)
corr4d_B = corr4d_B.view(batch_size, 1, fs1, fs2, fs3, fs4)
corr4d_A = corr4d_A.view(batch_size, 1, fs1, fs2, fs3, fs4)
corr4d = corr4d * (corr4d_A * corr4d_B)
return corr4d
def maxpool4d(corr4d_hres, k_size=4):
slices = []
for i in range(k_size):
for j in range(k_size):
for k in range(k_size):
for l in range(k_size):
slices.append(corr4d_hres[:, 0, i::k_size, j::k_size, k::k_size, l::k_size].unsqueeze(0))
slices = torch.cat(tuple(slices), dim=1)
corr4d, max_idx = torch.max(slices, dim=1, keepdim=True)
max_l = torch.fmod(max_idx, k_size)
max_k = torch.fmod(max_idx.sub(max_l).div(k_size), k_size)
max_j = torch.fmod(max_idx.sub(max_l).div(k_size).sub(max_k).div(k_size), k_size)
max_i = max_idx.sub(max_l).div(k_size).sub(max_k).div(k_size).sub(max_j).div(k_size)
return (corr4d, max_i, max_j, max_k, max_l)
class find_correspondence(nn.Module):
def __init__(self, feature_H, feature_W, beta, kernel_sigma):
super(find_correspondence, self).__init__()
GPU_NUM = torch.cuda.current_device()
device = torch.device(f'cuda:{GPU_NUM}' if torch.cuda.is_available() else 'cpu')
print("find_correspondence_gpu:",device)
self.beta = beta
self.kernel_sigma = kernel_sigma
self.grid_X, self.grid_Y = np.meshgrid(np.linspace(-1, 1, feature_W),
np.linspace(-1, 1, feature_H))
self.grid_X = torch.tensor(self.grid_X, dtype=torch.float, requires_grad=False).to(device)
self.grid_Y = torch.tensor(self.grid_Y, dtype=torch.float, requires_grad=False).to(device)
self.dx_kernel = torch.tensor([-1, 0, 1], dtype=torch.float, requires_grad=False).view(1, 1, 1, 3).expand(1, 2,
1,
3).to(
device)
self.dy_kernel = torch.tensor([-1, 0, 1], dtype=torch.float, requires_grad=False).view(1, 1, 3, 1).expand(1, 2,
3,
1).to(
device)
self.x = np.linspace(0, feature_W - 1, feature_W)
self.x = torch.tensor(self.x, dtype=torch.float, requires_grad=False).to(device)
self.y = np.linspace(0, feature_H - 1, feature_H)
self.y = torch.tensor(self.y, dtype=torch.float, requires_grad=False).to(device)
self.x_normal = np.linspace(-1, 1, feature_W)
self.x_normal = torch.tensor(self.x_normal, dtype=torch.float, requires_grad=False).to(device)
self.y_normal = np.linspace(-1, 1, feature_H)
self.y_normal = torch.tensor(self.y_normal, dtype=torch.float, requires_grad=False).to(device)
def apply_gaussian_kernel(self, corr, sigma=5):
b, hw, h, w = corr.size()
idx = corr.max(dim=1)[1]
idx_y = (idx // w).view(b, 1, 1, h, w).float()
idx_x = (idx % w).view(b, 1, 1, h, w).float()
x = self.x.view(1, 1, w, 1, 1).expand(b, 1, w, h, w)
y = self.y.view(1, h, 1, 1, 1).expand(b, h, 1, h, w)
gauss_kernel = torch.exp(-((x - idx_x) ** 2 + (y - idx_y) ** 2) / (2 * sigma ** 2))
gauss_kernel = gauss_kernel.view(b, hw, h, w)
return gauss_kernel * corr
def softmax_with_temperature(self, x, beta, d=1):
M, _ = x.max(dim=d, keepdim=True)
x = x - M
exp_x = torch.exp(beta * x)
exp_x_sum = exp_x.sum(dim=d, keepdim=True)
return exp_x / exp_x_sum
def kernel_soft_argmax(self, corr):
b, _, h, w = corr.size()
corr = self.softmax_with_temperature(corr, beta=self.beta, d=1)
corr = corr.view(-1, h, w, h, w)
grid_x = corr.sum(dim=1, keepdim=False)
x_normal = self.x_normal.expand(b, w)
x_normal = x_normal.view(b, w, 1, 1)
grid_x = (grid_x * x_normal).sum(dim=1, keepdim=True)
grid_y = corr.sum(dim=2, keepdim=False)
y_normal = self.y_normal.expand(b, h)
y_normal = y_normal.view(b, h, 1, 1)
grid_y = (grid_y * y_normal).sum(dim=1, keepdim=True)
return grid_x, grid_y
def get_flow_smoothness(self, flow, GT_mask):
flow_dx = F.conv2d(F.pad(flow, (1, 1, 0, 0)), self.dx_kernel) / 2
flow_dy = F.conv2d(F.pad(flow, (0, 0, 1, 1)), self.dy_kernel) / 2
flow_dx = torch.abs(flow_dx) * GT_mask
flow_dy = torch.abs(flow_dy) * GT_mask
smoothness = torch.cat((flow_dx, flow_dy), 1)
return smoothness
def forward(self, corr, GT_mask=None):
b, _, h, w = corr.size()
grid_X = self.grid_X.expand(b, h, w)
grid_X = grid_X.unsqueeze(1)
grid_Y = self.grid_Y.expand(b, h, w)
grid_Y = grid_Y.unsqueeze(1)
if self.beta is not None:
grid_x, grid_y = self.kernel_soft_argmax(corr)
else:
_, idx = torch.max(corr, dim=1)
grid_x = idx % w
grid_x = (grid_x.float() / (w - 1) - 0.5) * 2
grid_y = idx // w
grid_y = (grid_y.float() / (h - 1) - 0.5) * 2
grid_x = grid_x.unsqueeze(1)
grid_y = grid_y.unsqueeze(1)
grid = torch.cat((grid_x.permute(0, 2, 3, 1), grid_y.permute(0, 2, 3, 1)),
3)
flow = torch.cat((grid_x - grid_X, grid_y - grid_Y),
1)
if GT_mask is None:
return grid.permute(0, 3, 1, 2), flow.permute(0, 3, 1, 2)
else:
smoothness = self.get_flow_smoothness(flow, GT_mask)
return grid, flow, smoothness
class ImMatchNet(nn.Module):
def __init__(self,
feature_extraction_cnn='resnet101',
feature_extraction_last_layer='',
feature_extraction_model_file=None,
return_correlation=False,
ncons_kernel_sizes=[3, 3, 3],
ncons_channels=[10, 10, 1],
normalize_features=True,
train_fe=False,
use_cuda=True,
relocalization_k_size=0,
half_precision=False,
checkpoint=None,
):
super(ImMatchNet, self).__init__()
if checkpoint is not None and checkpoint is not '':
print('Loading checkpoint...')
checkpoint = torch.load(checkpoint, map_location=lambda storage, loc: storage)
checkpoint['state_dict'] = OrderedDict(
[(k.replace('vgg', 'model'), v) for k, v in checkpoint['state_dict'].items()])
print('Using checkpoint parameters: ')
ncons_channels = checkpoint['args'].ncons_channels
print(' ncons_channels: ' + str(ncons_channels))
ncons_kernel_sizes = checkpoint['args'].ncons_kernel_sizes
print(' ncons_kernel_sizes: ' + str(ncons_kernel_sizes))
self.ReLU = nn.ReLU()
self.use_cuda = use_cuda
self.normalize_features = normalize_features
print("self.normalize_features", self.normalize_features)
self.return_correlation = return_correlation
self.relocalization_k_size = relocalization_k_size
self.half_precision = half_precision
self.FeatureExtraction = FeatureExtraction(train_fe=train_fe,
feature_extraction_cnn=feature_extraction_cnn,
feature_extraction_model_file=feature_extraction_model_file,
last_layer=feature_extraction_last_layer,
normalization=False,
use_cuda=self.use_cuda)
self.adap_layer_feat3 = adap_layer_feat3()
self.FeatureCorrelation = FeatureCorrelation(shape='4D', normalization=False)
self.NeighConsensus = NeighConsensus(use_cuda=self.use_cuda,
kernel_sizes=ncons_kernel_sizes,
channels=ncons_channels)
feature_H = 25
feature_W = 25
beta = 50
kernel_sigma = 5
self.find_correspondence = find_correspondence(feature_H, feature_W, beta, kernel_sigma)
if checkpoint is not None and checkpoint is not '':
print('Copying weights...')
for name, param in self.FeatureExtraction.state_dict().items():
if 'num_batches_tracked' not in name:
self.FeatureExtraction.state_dict()[name].copy_(
checkpoint['state_dict']['FeatureExtraction.' + name])
for name, param in self.NeighConsensus.state_dict().items():
self.NeighConsensus.state_dict()[name].copy_(checkpoint['state_dict']['NeighConsensus.' + name])
for name, param in self.adap_layer_feat3.state_dict().items():
self.adap_layer_feat3.state_dict()[name].copy_(checkpoint['state_dict']['adap_layer_feat3.' + name])
print('Done!')
self.FeatureExtraction.eval()
if self.half_precision:
for p in self.NeighConsensus.parameters():
p.data = p.data.half()
for l in self.NeighConsensus.conv:
if isinstance(l, Conv4d):
l.use_half = True
def forward(self, tnf_batch, writer, writer_position):
feature_A = self.FeatureExtraction(tnf_batch['source_image'])
feature_B = self.FeatureExtraction(tnf_batch['target_image'])
adap_feature_A = self.adap_layer_feat3(feature_A)
adap_feature_B = self.adap_layer_feat3(feature_B)
adap_feature_A = featureL2Norm(adap_feature_A)
adap_feature_B = featureL2Norm(adap_feature_B)
if self.half_precision:
feature_A = feature_A.half()
feature_B = feature_B.half()
corr4d = self.FeatureCorrelation(adap_feature_A, adap_feature_B)
if self.relocalization_k_size > 1:
corr4d, max_i, max_j, max_k, max_l = maxpool4d(corr4d, k_size=self.relocalization_k_size)
batch_size, ch, fs1, fs2, fs3, fs4 = corr4d.size()
nc_B_Avec_WTA = corr4d.view(batch_size, fs1 * fs2, fs3, fs4)
scores_WTA_B, index_WTA_B = torch.max(nc_B_Avec_WTA, dim=1)
index1D_WTA_B = index_WTA_B.view(batch_size, -1)
Map2D_WTA = unNormMap1D_to_NormMap2D(index1D_WTA_B)
corr4d = MutualMatching(corr4d)
corr4d_Net = self.NeighConsensus(corr4d.detach())
corr4d_Net = MutualMatching(corr4d_Net)
nc_B_Avec_NET = corr4d_Net.view(batch_size, fs1 * fs2, fs3, fs4)
Map2D_NET, Flow2D_NET = self.find_correspondence(nc_B_Avec_NET)
unNormMap2D_NET = NormMap2D_to_unNormMap2D(Map2D_NET)
if self.relocalization_k_size > 1:
delta4d = (max_i, max_j, max_k, max_l)
return (corr4d, delta4d)
else:
return corr4d_Net
| true
| true
|
1c474eb2a7180c4b80cf9601418dd0b801e92818
| 1,880
|
py
|
Python
|
pyleecan/Methods/Slot/HoleM53/check.py
|
Kelos-Zhu/pyleecan
|
368f8379688e31a6c26d2c1cd426f21dfbceff2a
|
[
"Apache-2.0"
] | 2
|
2019-06-08T15:04:39.000Z
|
2020-09-07T13:32:22.000Z
|
pyleecan/Methods/Slot/HoleM53/check.py
|
lyhehehe/pyleecan
|
421e9a843bf30d796415c77dc934546adffd1cd7
|
[
"Apache-2.0"
] | null | null | null |
pyleecan/Methods/Slot/HoleM53/check.py
|
lyhehehe/pyleecan
|
421e9a843bf30d796415c77dc934546adffd1cd7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from numpy import pi
from ....Methods.Slot.Slot.check import SlotCheckError
def check(self):
"""Check that the HoleM53 object is correct
Parameters
----------
self : HoleM53
A HoleM53 object
Returns
-------
None
Raises
-------
S53_Rbo0CheckError
You must have H0 < Rbo
S53_Rbo1CheckError
You must have H1 < Rbo
S53_W4CheckError
You must have W4 < pi/2
S53_W5CheckError
You must have W5 >=0
"""
# Check that everything is set
if self.W1 is None:
raise S53_NoneError("You must set W1 !")
elif self.W2 is None:
raise S53_NoneError("You must set W2 !")
elif self.W3 is None:
raise S53_NoneError("You must set W3 !")
elif self.W4 is None:
raise S53_NoneError("You must set W4 !")
elif self.H0 is None:
raise S53_NoneError("You must set H0 !")
elif self.H1 is None:
raise S53_NoneError("You must set H1 !")
elif self.H2 is None:
raise S53_NoneError("You must set H2 !")
elif self.H3 is None:
raise S53_NoneError("You must set H3 !")
Rbo = self.get_Rbo()
if Rbo <= self.H0:
raise S53_Rbo0CheckError("You must have H0 < Rbo")
if Rbo <= self.H1:
raise S53_Rbo1CheckError("You must have H1 < Rbo")
if pi / 2 <= self.W4:
raise S53_W4CheckError("You must have W4 < pi/2")
if self.comp_W5() < 0:
raise S53_W5CheckError("You must have W5 >=0")
class S53_NoneError(SlotCheckError):
"""Raised when a propery of HoleM53 is None
"""
pass
class S53_Rbo0CheckError(SlotCheckError):
""" """
pass
class S53_Rbo1CheckError(SlotCheckError):
""" """
pass
class S53_W4CheckError(SlotCheckError):
""" """
pass
class S53_W5CheckError(SlotCheckError):
""" """
pass
| 20
| 58
| 0.600532
|
from numpy import pi
from ....Methods.Slot.Slot.check import SlotCheckError
def check(self):
if self.W1 is None:
raise S53_NoneError("You must set W1 !")
elif self.W2 is None:
raise S53_NoneError("You must set W2 !")
elif self.W3 is None:
raise S53_NoneError("You must set W3 !")
elif self.W4 is None:
raise S53_NoneError("You must set W4 !")
elif self.H0 is None:
raise S53_NoneError("You must set H0 !")
elif self.H1 is None:
raise S53_NoneError("You must set H1 !")
elif self.H2 is None:
raise S53_NoneError("You must set H2 !")
elif self.H3 is None:
raise S53_NoneError("You must set H3 !")
Rbo = self.get_Rbo()
if Rbo <= self.H0:
raise S53_Rbo0CheckError("You must have H0 < Rbo")
if Rbo <= self.H1:
raise S53_Rbo1CheckError("You must have H1 < Rbo")
if pi / 2 <= self.W4:
raise S53_W4CheckError("You must have W4 < pi/2")
if self.comp_W5() < 0:
raise S53_W5CheckError("You must have W5 >=0")
class S53_NoneError(SlotCheckError):
pass
class S53_Rbo0CheckError(SlotCheckError):
pass
class S53_Rbo1CheckError(SlotCheckError):
pass
class S53_W4CheckError(SlotCheckError):
pass
class S53_W5CheckError(SlotCheckError):
pass
| true
| true
|
1c47503a63b297ae151dad61e17a23efab7bef67
| 664
|
py
|
Python
|
bot/bot/base.py
|
TSPS-Team/Project
|
b1d83cb7957420b8348939f0a1d36f506095519c
|
[
"MIT"
] | null | null | null |
bot/bot/base.py
|
TSPS-Team/Project
|
b1d83cb7957420b8348939f0a1d36f506095519c
|
[
"MIT"
] | null | null | null |
bot/bot/base.py
|
TSPS-Team/Project
|
b1d83cb7957420b8348939f0a1d36f506095519c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from __future__ import annotations
from server import Interface
from telegram.bot import Bot
class State:
player: Player
app_info: 'AppInfo'
bot: Bot
def __init__(self, player, app_info) -> None:
self.player = player
self.bot = app_info.bot
self.app_info = app_info
def callback(self, update, context):
pass
def text_callback(self, update, context):
pass
class Player:
lobby: 'Lobby'
state: State
game: 'Game'
def __init__(self, name: str, id: int) -> None:
self.name = name
self.id = id
def __str__(self):
return self.name
| 18.971429
| 51
| 0.621988
|
from __future__ import annotations
from server import Interface
from telegram.bot import Bot
class State:
player: Player
app_info: 'AppInfo'
bot: Bot
def __init__(self, player, app_info) -> None:
self.player = player
self.bot = app_info.bot
self.app_info = app_info
def callback(self, update, context):
pass
def text_callback(self, update, context):
pass
class Player:
lobby: 'Lobby'
state: State
game: 'Game'
def __init__(self, name: str, id: int) -> None:
self.name = name
self.id = id
def __str__(self):
return self.name
| true
| true
|
1c47504f9eb14b016fc1dc1c1fcbb3dea481e1a2
| 856
|
py
|
Python
|
aiofcm/client.py
|
cyberbudy/aiofcm
|
30e66b872aa2e1fc43ef4884fb84ba23b91879c5
|
[
"Apache-2.0"
] | 30
|
2017-05-11T08:21:45.000Z
|
2021-11-20T13:52:13.000Z
|
aiofcm/client.py
|
cyberbudy/aiofcm
|
30e66b872aa2e1fc43ef4884fb84ba23b91879c5
|
[
"Apache-2.0"
] | 12
|
2017-05-22T16:42:03.000Z
|
2021-08-09T11:11:47.000Z
|
aiofcm/client.py
|
cyberbudy/aiofcm
|
30e66b872aa2e1fc43ef4884fb84ba23b91879c5
|
[
"Apache-2.0"
] | 16
|
2017-05-22T11:30:55.000Z
|
2021-11-11T09:48:04.000Z
|
import asyncio
from typing import Optional, NoReturn
from aiofcm.connection import FCMConnectionPool
from aiofcm.common import Message, MessageResponse
from aiofcm.logging import logger
class FCM:
def __init__(self, sender_id, api_key, max_connections=10, loop=None):
# type: (int, str, int, Optional[asyncio.AbstractEventLoop]) -> NoReturn
self.pool = FCMConnectionPool(sender_id, api_key, max_connections, loop)
async def send_message(self, message: Message) -> MessageResponse:
response = await self.pool.send_message(message)
if not response.is_successful:
msg = 'Status of message %s is %s' %\
(message.message_id, response.status)
if response.description:
msg += ' (%s)' % response.description
logger.error(msg)
return response
| 37.217391
| 80
| 0.679907
|
import asyncio
from typing import Optional, NoReturn
from aiofcm.connection import FCMConnectionPool
from aiofcm.common import Message, MessageResponse
from aiofcm.logging import logger
class FCM:
def __init__(self, sender_id, api_key, max_connections=10, loop=None):
self.pool = FCMConnectionPool(sender_id, api_key, max_connections, loop)
async def send_message(self, message: Message) -> MessageResponse:
response = await self.pool.send_message(message)
if not response.is_successful:
msg = 'Status of message %s is %s' %\
(message.message_id, response.status)
if response.description:
msg += ' (%s)' % response.description
logger.error(msg)
return response
| true
| true
|
1c4751b7582b662927b44f9a171203401afd2ce3
| 36,054
|
py
|
Python
|
src/unity/python/turicreate/toolkits/drawing_classifier/drawing_classifier.py
|
LeeCenY/turicreate
|
fb2f3bf313e831ceb42a2e10aacda6e472ea8d93
|
[
"BSD-3-Clause"
] | null | null | null |
src/unity/python/turicreate/toolkits/drawing_classifier/drawing_classifier.py
|
LeeCenY/turicreate
|
fb2f3bf313e831ceb42a2e10aacda6e472ea8d93
|
[
"BSD-3-Clause"
] | null | null | null |
src/unity/python/turicreate/toolkits/drawing_classifier/drawing_classifier.py
|
LeeCenY/turicreate
|
fb2f3bf313e831ceb42a2e10aacda6e472ea8d93
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright © 2019 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import turicreate as _tc
import numpy as _np
import time as _time
from turicreate.toolkits._model import CustomModel as _CustomModel
from turicreate.toolkits._model import PythonProxy as _PythonProxy
from turicreate.toolkits import evaluation as _evaluation
import turicreate.toolkits._internal_utils as _tkutl
from turicreate.toolkits._main import ToolkitError as _ToolkitError
from .. import _mxnet_utils
from turicreate import extensions as _extensions
from .. import _pre_trained_models
BITMAP_WIDTH = 28
BITMAP_HEIGHT = 28
TRAIN_VALIDATION_SPLIT = .95
def _raise_error_if_not_drawing_classifier_input_sframe(
dataset, feature, target):
"""
Performs some sanity checks on the SFrame provided as input to
`turicreate.drawing_classifier.create` and raises a ToolkitError
if something in the dataset is missing or wrong.
"""
from turicreate.toolkits._internal_utils import _raise_error_if_not_sframe
_raise_error_if_not_sframe(dataset)
if feature not in dataset.column_names():
raise _ToolkitError("Feature column '%s' does not exist" % feature)
if target not in dataset.column_names():
raise _ToolkitError("Target column '%s' does not exist" % target)
if (dataset[feature].dtype != _tc.Image and dataset[feature].dtype != list):
raise _ToolkitError("Feature column must contain images"
+ " or stroke-based drawings encoded as lists of strokes"
+ " where each stroke is a list of points and"
+ " each point is stored as a dictionary")
if dataset[target].dtype != int and dataset[target].dtype != str:
raise _ToolkitError("Target column contains " + str(dataset[target].dtype)
+ " but it must contain strings or integers to represent"
+ " labels for drawings.")
if len(dataset) == 0:
raise _ToolkitError("Input Dataset is empty!")
def create(input_dataset, target, feature=None, validation_set='auto',
warm_start='auto', batch_size=256,
max_iterations=100, verbose=True):
"""
Create a :class:`DrawingClassifier` model.
Parameters
----------
dataset : SFrame
Input data. The columns named by the ``feature`` and ``target``
parameters will be extracted for training the drawing classifier.
target : string
Name of the column containing the target variable. The values in this
column must be of string or integer type.
feature : string optional
Name of the column containing the input drawings. 'None' (the default)
indicates the column in `dataset` named "drawing" should be used as the
feature.
The feature column can contain both bitmap-based drawings as well as
stroke-based drawings. Bitmap-based drawing input can be a grayscale
tc.Image of any size.
Stroke-based drawing input must be in the following format:
Every drawing must be represented by a list of strokes, where each
stroke must be a list of points in the order in which they were drawn
on the canvas.
Each point must be a dictionary with two keys, "x" and "y", and their
respective values must be numerical, i.e. either integer or float.
validation_set : SFrame optional
A dataset for monitoring the model's generalization performance.
The format of this SFrame must be the same as the training set.
By default this argument is set to 'auto' and a validation set is
automatically sampled and used for progress printing. If
validation_set is set to None, then no additional metrics
are computed. The default value is 'auto'.
warm_start : string optional
A string to denote which pretrained model to use. Set to "auto"
by default which uses a model trained on 245 of the 345 classes in the
Quick, Draw! dataset. Here is a list of all the pretrained models that
can be passed in as this argument:
"auto": Uses quickdraw_245_v0
"quickdraw_245_v0": Uses a model trained on 245 of the 345 classes in the
Quick, Draw! dataset.
batch_size: int optional
The number of drawings per training step. If not set, a default
value of 256 will be used. If you are getting memory errors,
try decreasing this value. If you have a powerful computer, increasing
this value may improve performance.
max_iterations : int optional
The maximum number of allowed passes through the data. More passes over
the data can result in a more accurately trained model.
verbose : bool optional
If True, print progress updates and model details.
Returns
-------
out : DrawingClassifier
A trained :class:`DrawingClassifier` model.
See Also
--------
DrawingClassifier
Examples
--------
.. sourcecode:: python
# Train a drawing classifier model
>>> model = turicreate.drawing_classifier.create(data)
# Make predictions on the training set and as column to the SFrame
>>> data['predictions'] = model.predict(data)
"""
import mxnet as _mx
from mxnet import autograd as _autograd
from ._model_architecture import Model as _Model
from ._sframe_loader import SFrameClassifierIter as _SFrameClassifierIter
start_time = _time.time()
# @TODO: Should be able to automatically choose number of iterations
# based on data size: Tracked in Github Issue #1576
# automatically infer feature column
if feature is None:
feature = _tkutl._find_only_drawing_column(input_dataset)
_raise_error_if_not_drawing_classifier_input_sframe(
input_dataset, feature, target)
if batch_size is not None and not isinstance(batch_size, int):
raise TypeError("'batch_size' must be an integer >= 1")
if batch_size is not None and batch_size < 1:
raise ValueError("'batch_size' must be >= 1")
if max_iterations is not None and not isinstance(max_iterations, int):
raise TypeError("'max_iterations' must be an integer >= 1")
if max_iterations is not None and max_iterations < 1:
raise ValueError("'max_iterations' must be >= 1")
is_stroke_input = (input_dataset[feature].dtype != _tc.Image)
dataset = _extensions._drawing_classifier_prepare_data(
input_dataset, feature) if is_stroke_input else input_dataset
iteration = 0
classes = dataset[target].unique()
classes = sorted(classes)
class_to_index = {name: index for index, name in enumerate(classes)}
validation_set_corrective_string = ("'validation_set' parameter must be "
+ "an SFrame, or None, or must be set to 'auto' for the toolkit to "
+ "automatically create a validation set.")
if isinstance(validation_set, _tc.SFrame):
_raise_error_if_not_drawing_classifier_input_sframe(
validation_set, feature, target)
is_validation_stroke_input = (validation_set[feature].dtype != _tc.Image)
validation_dataset = _extensions._drawing_classifier_prepare_data(
validation_set, feature) if is_validation_stroke_input else validation_set
elif isinstance(validation_set, str):
if validation_set == 'auto':
if dataset.num_rows() >= 100:
if verbose:
print ( "PROGRESS: Creating a validation set from 5 percent of training data. This may take a while.\n"
" You can set ``validation_set=None`` to disable validation tracking.\n")
dataset, validation_dataset = dataset.random_split(TRAIN_VALIDATION_SPLIT, exact=True)
else:
validation_set = None
validation_dataset = _tc.SFrame()
else:
raise _ToolkitError("Unrecognized value for 'validation_set'. "
+ validation_set_corrective_string)
elif validation_set is None:
validation_dataset = _tc.SFrame()
else:
raise TypeError("Unrecognized type for 'validation_set'."
+ validation_set_corrective_string)
train_loader = _SFrameClassifierIter(dataset, batch_size,
feature_column=feature,
target_column=target,
class_to_index=class_to_index,
load_labels=True,
shuffle=True,
iterations=max_iterations)
train_loader_to_compute_accuracy = _SFrameClassifierIter(dataset, batch_size,
feature_column=feature,
target_column=target,
class_to_index=class_to_index,
load_labels=True,
shuffle=True,
iterations=1)
validation_loader = _SFrameClassifierIter(validation_dataset, batch_size,
feature_column=feature,
target_column=target,
class_to_index=class_to_index,
load_labels=True,
shuffle=True,
iterations=1)
if verbose and iteration == 0:
column_names = ['iteration', 'train_loss', 'train_accuracy', 'time']
column_titles = ['Iteration', 'Training Loss', 'Training Accuracy', 'Elapsed Time (seconds)']
if validation_set is not None:
column_names.insert(3, 'validation_accuracy')
column_titles.insert(3, 'Validation Accuracy')
table_printer = _tc.util._ProgressTablePrinter(
column_names, column_titles)
ctx = _mxnet_utils.get_mxnet_context(max_devices=batch_size)
model = _Model(num_classes = len(classes), prefix="drawing_")
model_params = model.collect_params()
model_params.initialize(_mx.init.Xavier(), ctx=ctx)
if warm_start is not None:
pretrained_model = _pre_trained_models.DrawingClassifierPreTrainedModel(
warm_start)
pretrained_model_params_path = pretrained_model.get_model_path()
model.load_params(pretrained_model_params_path,
ctx=ctx,
allow_missing=True)
softmax_cross_entropy = _mx.gluon.loss.SoftmaxCrossEntropyLoss()
model.hybridize()
trainer = _mx.gluon.Trainer(model.collect_params(), 'adam')
train_accuracy = _mx.metric.Accuracy()
validation_accuracy = _mx.metric.Accuracy()
def get_data_and_label_from_batch(batch):
if batch.pad is not None:
size = batch_size - batch.pad
sliced_data = _mx.nd.slice_axis(batch.data[0], axis=0, begin=0, end=size)
sliced_label = _mx.nd.slice_axis(batch.label[0], axis=0, begin=0, end=size)
num_devices = min(sliced_data.shape[0], len(ctx))
batch_data = _mx.gluon.utils.split_and_load(sliced_data, ctx_list=ctx[:num_devices], even_split=False)
batch_label = _mx.gluon.utils.split_and_load(sliced_label, ctx_list=ctx[:num_devices], even_split=False)
else:
batch_data = _mx.gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
batch_label = _mx.gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
return batch_data, batch_label
def compute_accuracy(accuracy_metric, batch_loader):
batch_loader.reset()
accuracy_metric.reset()
for batch in batch_loader:
batch_data, batch_label = get_data_and_label_from_batch(batch)
outputs = []
for x, y in zip(batch_data, batch_label):
if x is None or y is None: continue
z = model(x)
outputs.append(z)
accuracy_metric.update(batch_label, outputs)
for train_batch in train_loader:
train_batch_data, train_batch_label = get_data_and_label_from_batch(train_batch)
with _autograd.record():
# Inside training scope
for x, y in zip(train_batch_data, train_batch_label):
z = model(x)
# Computes softmax cross entropy loss.
loss = softmax_cross_entropy(z, y)
# Backpropagate the error for one iteration.
loss.backward()
# Make one step of parameter update. Trainer needs to know the
# batch size of data to normalize the gradient by 1/batch_size.
trainer.step(train_batch.data[0].shape[0])
# calculate training metrics
train_loss = loss.mean().asscalar()
train_time = _time.time() - start_time
if train_batch.iteration > iteration:
# Compute training accuracy
compute_accuracy(train_accuracy, train_loader_to_compute_accuracy)
# Compute validation accuracy
if validation_set is not None:
compute_accuracy(validation_accuracy, validation_loader)
iteration = train_batch.iteration
if verbose:
kwargs = { "iteration": iteration,
"train_loss": float(train_loss),
"train_accuracy": train_accuracy.get()[1],
"time": train_time}
if validation_set is not None:
kwargs["validation_accuracy"] = validation_accuracy.get()[1]
table_printer.print_row(**kwargs)
state = {
'_model': model,
'_class_to_index': class_to_index,
'num_classes': len(classes),
'classes': classes,
'input_image_shape': (1, BITMAP_WIDTH, BITMAP_HEIGHT),
'batch_size': batch_size,
'training_loss': train_loss,
'training_accuracy': train_accuracy.get()[1],
'training_time': train_time,
'validation_accuracy': validation_accuracy.get()[1],
# nan if validation_set=None
'max_iterations': max_iterations,
'target': target,
'feature': feature,
'num_examples': len(input_dataset)
}
return DrawingClassifier(state)
class DrawingClassifier(_CustomModel):
"""
A trained model that is ready to use for classification, and to be
exported to Core ML.
This model should not be constructed directly.
"""
_PYTHON_DRAWING_CLASSIFIER_VERSION = 1
def __init__(self, state):
self.__proxy__ = _PythonProxy(state)
@classmethod
def _native_name(cls):
return "drawing_classifier"
def _get_native_state(self):
state = self.__proxy__.get_state()
mxnet_params = state['_model'].collect_params()
state['_model'] = _mxnet_utils.get_gluon_net_params_state(mxnet_params)
return state
def _get_version(self):
return self._PYTHON_DRAWING_CLASSIFIER_VERSION
@classmethod
def _load_version(cls, state, version):
_tkutl._model_version_check(version,
cls._PYTHON_DRAWING_CLASSIFIER_VERSION)
from ._model_architecture import Model as _Model
net = _Model(num_classes = len(state['classes']), prefix = 'drawing_')
ctx = _mxnet_utils.get_mxnet_context(max_devices=state['batch_size'])
net_params = net.collect_params()
_mxnet_utils.load_net_params_from_state(
net_params, state['_model'], ctx=ctx
)
state['_model'] = net
# For a model trained on integer classes, when saved and loaded back,
# the classes are loaded as floats. The following if statement casts
# the loaded "float" classes back to int.
if len(state['classes']) > 0 and isinstance(state['classes'][0], float):
state['classes'] = list(map(int, state['classes']))
return DrawingClassifier(state)
def __str__(self):
"""
Return a string description of the model to the ``print`` method.
Returns
-------
out : string
A description of the DrawingClassifier.
"""
return self.__repr__()
def __repr__(self):
"""
Returns a string description of the model when the model name is
entered in the terminal.
"""
width = 40
sections, section_titles = self._get_summary_struct()
out = _tkutl._toolkit_repr_print(self, sections, section_titles,
width=width)
return out
def _get_summary_struct(self):
"""
Returns a structured description of the model, including (where
relevant) the schema of the training data, description of the training
data, training statistics, and model hyperparameters.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<label>','<field>')
section_titles: list
A list of section titles.
The order matches that of the 'sections' object.
"""
model_fields = [
('Number of classes', 'num_classes'),
('Feature column', 'feature'),
('Target column', 'target')
]
training_fields = [
('Training Iterations', 'max_iterations'),
('Training Accuracy', 'training_accuracy'),
('Validation Accuracy', 'validation_accuracy'),
('Training Time', 'training_time'),
('Number of Examples', 'num_examples'),
('Batch Size', 'batch_size'),
('Final Loss (specific to model)', 'training_loss')
]
section_titles = ['Schema', 'Training summary']
return([model_fields, training_fields], section_titles)
def export_coreml(self, filename, verbose=False):
"""
Save the model in Core ML format. The Core ML model takes a grayscale
drawing of fixed size as input and produces two outputs:
`classLabel` and `labelProbabilities`.
The first one, `classLabel` is an integer or string (depending on the
classes the model was trained on) to store the label of the top
prediction by the model.
The second one, `labelProbabilities`, is a dictionary with all the
class labels in the dataset as the keys, and their respective
probabilities as the values.
See Also
--------
save
Parameters
----------
filename : string
The path of the file where we want to save the Core ML model.
verbose : bool optional
If True, prints export progress.
Examples
--------
>>> model.export_coreml('drawing_classifier.mlmodel')
"""
import mxnet as _mx
from .._mxnet_to_coreml import _mxnet_converter
import coremltools as _coremltools
batch_size = 1
image_shape = (batch_size,) + (1, BITMAP_WIDTH, BITMAP_HEIGHT)
s_image = _mx.sym.Variable(self.feature,
shape=image_shape, dtype=_np.float32)
from copy import copy as _copy
net = _copy(self._model)
s_ymap = net(s_image)
mod = _mx.mod.Module(symbol=s_ymap, label_names=None, data_names=[self.feature])
mod.bind(for_training=False, data_shapes=[(self.feature, image_shape)])
mod.init_params()
arg_params, aux_params = mod.get_params()
net_params = net.collect_params()
new_arg_params = {}
for k, param in arg_params.items():
new_arg_params[k] = net_params[k].data(net_params[k].list_ctx()[0])
new_aux_params = {}
for k, param in aux_params.items():
new_aux_params[k] = net_params[k].data(net_params[k].list_ctx()[0])
mod.set_params(new_arg_params, new_aux_params)
coreml_model = _mxnet_converter.convert(mod, mode='classifier',
class_labels=self.classes,
input_shape=[(self.feature, image_shape)],
builder=None, verbose=verbose,
preprocessor_args={
'image_input_names': [self.feature],
'image_scale': 1.0/255
})
DESIRED_OUTPUT_NAME = self.target + "Probabilities"
spec = coreml_model._spec
class_label_output_index = 0 if spec.description.output[0].name == "classLabel" else 1
probabilities_output_index = 1-class_label_output_index
spec.neuralNetworkClassifier.labelProbabilityLayerName = DESIRED_OUTPUT_NAME
spec.neuralNetworkClassifier.layers[-1].name = DESIRED_OUTPUT_NAME
spec.neuralNetworkClassifier.layers[-1].output[0] = DESIRED_OUTPUT_NAME
spec.description.predictedProbabilitiesName = DESIRED_OUTPUT_NAME
spec.description.output[probabilities_output_index].name = DESIRED_OUTPUT_NAME
from turicreate.toolkits import _coreml_utils
model_type = "drawing classifier"
spec.description.metadata.shortDescription = _coreml_utils._mlmodel_short_description(model_type)
spec.description.input[0].shortDescription = self.feature
spec.description.output[probabilities_output_index].shortDescription = 'Prediction probabilities'
spec.description.output[class_label_output_index].shortDescription = 'Class Label of Top Prediction'
from coremltools.models.utils import save_spec as _save_spec
_save_spec(spec, filename)
def _predict_with_probabilities(self, input_dataset, batch_size=None,
verbose=True):
"""
Predict with probabilities. The core prediction part that both
`evaluate` and `predict` share.
Returns an SFrame with two columns, self.target, and "probabilities".
The column with column name, self.target, contains the predictions made
by the model for the provided dataset.
The "probabilities" column contains the probabilities for each class
that the model predicted for the data provided to the function.
"""
import mxnet as _mx
from ._sframe_loader import SFrameClassifierIter as _SFrameClassifierIter
is_stroke_input = (input_dataset[self.feature].dtype != _tc.Image)
dataset = _extensions._drawing_classifier_prepare_data(
input_dataset, self.feature) if is_stroke_input else input_dataset
batch_size = self.batch_size if batch_size is None else batch_size
loader = _SFrameClassifierIter(dataset, batch_size,
class_to_index=self._class_to_index,
feature_column=self.feature,
target_column=self.target,
load_labels=False,
shuffle=False,
iterations=1)
dataset_size = len(dataset)
ctx = _mxnet_utils.get_mxnet_context()
index = 0
last_time = 0
done = False
from turicreate import SArrayBuilder
from array import array
classes = self.classes
all_predicted_builder = SArrayBuilder(dtype=type(classes[0]))
all_probabilities_builder = SArrayBuilder(dtype=array)
for batch in loader:
if batch.pad is not None:
size = batch_size - batch.pad
batch_data = _mx.nd.slice_axis(batch.data[0],
axis=0, begin=0, end=size)
else:
batch_data = batch.data[0]
size = batch_size
num_devices = min(batch_data.shape[0], len(ctx))
split_data = _mx.gluon.utils.split_and_load(batch_data, ctx_list=ctx[:num_devices], even_split=False)
for data in split_data:
z = self._model(data).asnumpy()
predicted = list(map(lambda x: classes[x], z.argmax(axis=1)))
split_length = z.shape[0]
all_predicted_builder.append_multiple(predicted)
all_probabilities_builder.append_multiple(z.tolist())
index += split_length
if index == dataset_size - 1:
done = True
cur_time = _time.time()
# Do not print progress if only a few samples are predicted
if verbose and (dataset_size >= 5
and cur_time > last_time + 10 or done):
print('Predicting {cur_n:{width}d}/{max_n:{width}d}'.format(
cur_n = index + 1,
max_n = dataset_size,
width = len(str(dataset_size))))
last_time = cur_time
return (_tc.SFrame({self.target: all_predicted_builder.close(),
'probability': all_probabilities_builder.close()}))
def evaluate(self, dataset, metric='auto', batch_size=None, verbose=True):
"""
Evaluate the model by making predictions of target values and comparing
these to actual values.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the feature and target columns used for model training.
Additional columns are ignored.
metric : str, optional
Name of the evaluation metric. Possible values are:
- 'auto' : Returns all available metrics.
- 'accuracy' : Classification accuracy (micro average).
- 'auc' : Area under the ROC curve (macro average)
- 'precision' : Precision score (macro average)
- 'recall' : Recall score (macro average)
- 'f1_score' : F1 score (macro average)
- 'confusion_matrix' : An SFrame with counts of possible
prediction/true label combinations.
- 'roc_curve' : An SFrame containing information needed for an
ROC curve
verbose : bool, optional
If True, prints prediction progress.
Returns
-------
out : dict
Dictionary of evaluation results where the key is the name of the
evaluation metric (e.g. `accuracy`) and the value is the evaluation
score.
See Also
----------
create, predict
Examples
----------
.. sourcecode:: python
>>> results = model.evaluate(data)
>>> print(results['accuracy'])
"""
if self.target not in dataset.column_names():
raise _ToolkitError("Must provide ground truth column, '"
+ self.target + "' in the evaluation dataset.")
predicted = self._predict_with_probabilities(dataset, batch_size, verbose)
avail_metrics = ['accuracy', 'auc', 'precision', 'recall',
'f1_score', 'confusion_matrix', 'roc_curve']
_tkutl._check_categorical_option_type(
'metric', metric, avail_metrics + ['auto'])
metrics = avail_metrics if metric == 'auto' else [metric]
ret = {}
if 'accuracy' in metrics:
ret['accuracy'] = _evaluation.accuracy(
dataset[self.target], predicted[self.target])
if 'auc' in metrics:
ret['auc'] = _evaluation.auc(
dataset[self.target], predicted['probability'],
index_map=self._class_to_index)
if 'precision' in metrics:
ret['precision'] = _evaluation.precision(
dataset[self.target], predicted[self.target])
if 'recall' in metrics:
ret['recall'] = _evaluation.recall(
dataset[self.target], predicted[self.target])
if 'f1_score' in metrics:
ret['f1_score'] = _evaluation.f1_score(
dataset[self.target], predicted[self.target])
if 'confusion_matrix' in metrics:
ret['confusion_matrix'] = _evaluation.confusion_matrix(
dataset[self.target], predicted[self.target])
if 'roc_curve' in metrics:
ret['roc_curve'] = _evaluation.roc_curve(
dataset[self.target], predicted['probability'],
index_map=self._class_to_index)
return ret
def predict_topk(self, dataset, output_type="probability", k=3,
batch_size=None):
"""
Return top-k predictions for the ``dataset``, using the trained model.
Predictions are returned as an SFrame with three columns: `id`,
`class`, and `probability` or `rank`, depending on the ``output_type``
parameter.
Parameters
----------
dataset : SFrame | SArray | turicreate.Image | list
Drawings to be classified.
If dataset is an SFrame, it must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
output_type : {'probability', 'rank'}, optional
Choose the return type of the prediction:
- `probability`: Probability associated with each label in the
prediction.
- `rank` : Rank associated with each label in the prediction.
k : int, optional
Number of classes to return for each input example.
batch_size : int, optional
If you are getting memory errors, try decreasing this value. If you
have a powerful computer, increasing this value may improve
performance.
Returns
-------
out : SFrame
An SFrame with model predictions.
See Also
--------
predict, evaluate
Examples
--------
>>> pred = m.predict_topk(validation_data, k=3)
>>> pred
+----+-------+-------------------+
| id | class | probability |
+----+-------+-------------------+
| 0 | 4 | 0.995623886585 |
| 0 | 9 | 0.0038311756216 |
| 0 | 7 | 0.000301006948575 |
| 1 | 1 | 0.928708016872 |
| 1 | 3 | 0.0440889261663 |
| 1 | 2 | 0.0176190119237 |
| 2 | 3 | 0.996967732906 |
| 2 | 2 | 0.00151345680933 |
| 2 | 7 | 0.000637513934635 |
| 3 | 1 | 0.998070061207 |
| .. | ... | ... |
+----+-------+-------------------+
[35688 rows x 3 columns]
"""
_tkutl._check_categorical_option_type("output_type", output_type,
["probability", "rank"])
if not isinstance(k, int):
raise TypeError("'k' must be an integer >= 1")
if k <= 0:
raise ValueError("'k' must be >= 1")
if batch_size is not None and not isinstance(batch_size, int):
raise TypeError("'batch_size' must be an integer >= 1")
if batch_size is not None and batch_size < 1:
raise ValueError("'batch_size' must be >= 1")
prob_vector = self.predict(
dataset, output_type='probability_vector', batch_size=batch_size)
classes = self.classes
if output_type == 'probability':
results = prob_vector.apply(lambda p: [
{'class': classes[i], 'probability': p[i]}
for i in reversed(_np.argsort(p)[-k:])]
)
else:
assert(output_type == 'rank')
results = prob_vector.apply(lambda p: [
{'class': classes[index], 'rank': rank}
for rank, index in enumerate(reversed(_np.argsort(p)[-k:]))]
)
results = _tc.SFrame({'X': results})
results = results.add_row_number()
results = results.stack('X', new_column_name='X')
results = results.unpack('X', column_name_prefix='')
return results
def predict(self, data, output_type='class', batch_size=None, verbose=True):
"""
Predict on an SFrame or SArray of drawings, or on a single drawing.
Parameters
----------
data : SFrame | SArray | tc.Image | list
The drawing(s) on which to perform drawing classification.
If dataset is an SFrame, it must have a column with the same name
as the feature column during training. Additional columns are
ignored.
If the data is a single drawing, it can be either of type tc.Image,
in which case it is a bitmap-based drawing input,
or of type list, in which case it is a stroke-based drawing input.
output_type : {'probability', 'class', 'probability_vector'}, optional
Form of the predictions which are one of:
- 'class': Class prediction. For multi-class classification, this
returns the class with maximum probability.
- 'probability': Prediction probability associated with the True
class (not applicable for multi-class classification)
- 'probability_vector': Prediction probability associated with each
class as a vector. Label ordering is dictated by the ``classes``
member variable.
batch_size : int, optional
If you are getting memory errors, try decreasing this value. If you
have a powerful computer, increasing this value may improve
performance.
verbose : bool, optional
If True, prints prediction progress.
Returns
-------
out : SArray
An SArray with model predictions. Each element corresponds to
a drawing and contains a single value corresponding to the
predicted label. Each prediction will have type integer or string
depending on the type of the classes the model was trained on.
If `data` is a single drawing, the return value will be a single
prediction.
See Also
--------
evaluate
Examples
--------
.. sourcecode:: python
# Make predictions
>>> pred = model.predict(data)
# Print predictions, for a better overview
>>> print(pred)
dtype: int
Rows: 10
[3, 4, 3, 3, 4, 5, 8, 8, 8, 4]
"""
_tkutl._check_categorical_option_type("output_type", output_type,
["probability", "class", "probability_vector"])
if isinstance(data, _tc.SArray):
predicted = self._predict_with_probabilities(
_tc.SFrame({
self.feature: data
}),
batch_size,
verbose
)
elif isinstance(data, _tc.SFrame):
predicted = self._predict_with_probabilities(data, batch_size, verbose)
else:
# single input
predicted = self._predict_with_probabilities(
_tc.SFrame({
self.feature: [data]
}),
batch_size,
verbose
)
if output_type == "class":
return predicted[self.target]
elif output_type == "probability":
_class_to_index = self._class_to_index
target = self.target
return predicted.apply(
lambda row: row["probability"][_class_to_index[row[target]]])
else:
assert (output_type == "probability_vector")
return predicted["probability"]
| 41.632794
| 123
| 0.608837
|
import turicreate as _tc
import numpy as _np
import time as _time
from turicreate.toolkits._model import CustomModel as _CustomModel
from turicreate.toolkits._model import PythonProxy as _PythonProxy
from turicreate.toolkits import evaluation as _evaluation
import turicreate.toolkits._internal_utils as _tkutl
from turicreate.toolkits._main import ToolkitError as _ToolkitError
from .. import _mxnet_utils
from turicreate import extensions as _extensions
from .. import _pre_trained_models
BITMAP_WIDTH = 28
BITMAP_HEIGHT = 28
TRAIN_VALIDATION_SPLIT = .95
def _raise_error_if_not_drawing_classifier_input_sframe(
dataset, feature, target):
from turicreate.toolkits._internal_utils import _raise_error_if_not_sframe
_raise_error_if_not_sframe(dataset)
if feature not in dataset.column_names():
raise _ToolkitError("Feature column '%s' does not exist" % feature)
if target not in dataset.column_names():
raise _ToolkitError("Target column '%s' does not exist" % target)
if (dataset[feature].dtype != _tc.Image and dataset[feature].dtype != list):
raise _ToolkitError("Feature column must contain images"
+ " or stroke-based drawings encoded as lists of strokes"
+ " where each stroke is a list of points and"
+ " each point is stored as a dictionary")
if dataset[target].dtype != int and dataset[target].dtype != str:
raise _ToolkitError("Target column contains " + str(dataset[target].dtype)
+ " but it must contain strings or integers to represent"
+ " labels for drawings.")
if len(dataset) == 0:
raise _ToolkitError("Input Dataset is empty!")
def create(input_dataset, target, feature=None, validation_set='auto',
warm_start='auto', batch_size=256,
max_iterations=100, verbose=True):
import mxnet as _mx
from mxnet import autograd as _autograd
from ._model_architecture import Model as _Model
from ._sframe_loader import SFrameClassifierIter as _SFrameClassifierIter
start_time = _time.time()
if feature is None:
feature = _tkutl._find_only_drawing_column(input_dataset)
_raise_error_if_not_drawing_classifier_input_sframe(
input_dataset, feature, target)
if batch_size is not None and not isinstance(batch_size, int):
raise TypeError("'batch_size' must be an integer >= 1")
if batch_size is not None and batch_size < 1:
raise ValueError("'batch_size' must be >= 1")
if max_iterations is not None and not isinstance(max_iterations, int):
raise TypeError("'max_iterations' must be an integer >= 1")
if max_iterations is not None and max_iterations < 1:
raise ValueError("'max_iterations' must be >= 1")
is_stroke_input = (input_dataset[feature].dtype != _tc.Image)
dataset = _extensions._drawing_classifier_prepare_data(
input_dataset, feature) if is_stroke_input else input_dataset
iteration = 0
classes = dataset[target].unique()
classes = sorted(classes)
class_to_index = {name: index for index, name in enumerate(classes)}
validation_set_corrective_string = ("'validation_set' parameter must be "
+ "an SFrame, or None, or must be set to 'auto' for the toolkit to "
+ "automatically create a validation set.")
if isinstance(validation_set, _tc.SFrame):
_raise_error_if_not_drawing_classifier_input_sframe(
validation_set, feature, target)
is_validation_stroke_input = (validation_set[feature].dtype != _tc.Image)
validation_dataset = _extensions._drawing_classifier_prepare_data(
validation_set, feature) if is_validation_stroke_input else validation_set
elif isinstance(validation_set, str):
if validation_set == 'auto':
if dataset.num_rows() >= 100:
if verbose:
print ( "PROGRESS: Creating a validation set from 5 percent of training data. This may take a while.\n"
" You can set ``validation_set=None`` to disable validation tracking.\n")
dataset, validation_dataset = dataset.random_split(TRAIN_VALIDATION_SPLIT, exact=True)
else:
validation_set = None
validation_dataset = _tc.SFrame()
else:
raise _ToolkitError("Unrecognized value for 'validation_set'. "
+ validation_set_corrective_string)
elif validation_set is None:
validation_dataset = _tc.SFrame()
else:
raise TypeError("Unrecognized type for 'validation_set'."
+ validation_set_corrective_string)
train_loader = _SFrameClassifierIter(dataset, batch_size,
feature_column=feature,
target_column=target,
class_to_index=class_to_index,
load_labels=True,
shuffle=True,
iterations=max_iterations)
train_loader_to_compute_accuracy = _SFrameClassifierIter(dataset, batch_size,
feature_column=feature,
target_column=target,
class_to_index=class_to_index,
load_labels=True,
shuffle=True,
iterations=1)
validation_loader = _SFrameClassifierIter(validation_dataset, batch_size,
feature_column=feature,
target_column=target,
class_to_index=class_to_index,
load_labels=True,
shuffle=True,
iterations=1)
if verbose and iteration == 0:
column_names = ['iteration', 'train_loss', 'train_accuracy', 'time']
column_titles = ['Iteration', 'Training Loss', 'Training Accuracy', 'Elapsed Time (seconds)']
if validation_set is not None:
column_names.insert(3, 'validation_accuracy')
column_titles.insert(3, 'Validation Accuracy')
table_printer = _tc.util._ProgressTablePrinter(
column_names, column_titles)
ctx = _mxnet_utils.get_mxnet_context(max_devices=batch_size)
model = _Model(num_classes = len(classes), prefix="drawing_")
model_params = model.collect_params()
model_params.initialize(_mx.init.Xavier(), ctx=ctx)
if warm_start is not None:
pretrained_model = _pre_trained_models.DrawingClassifierPreTrainedModel(
warm_start)
pretrained_model_params_path = pretrained_model.get_model_path()
model.load_params(pretrained_model_params_path,
ctx=ctx,
allow_missing=True)
softmax_cross_entropy = _mx.gluon.loss.SoftmaxCrossEntropyLoss()
model.hybridize()
trainer = _mx.gluon.Trainer(model.collect_params(), 'adam')
train_accuracy = _mx.metric.Accuracy()
validation_accuracy = _mx.metric.Accuracy()
def get_data_and_label_from_batch(batch):
if batch.pad is not None:
size = batch_size - batch.pad
sliced_data = _mx.nd.slice_axis(batch.data[0], axis=0, begin=0, end=size)
sliced_label = _mx.nd.slice_axis(batch.label[0], axis=0, begin=0, end=size)
num_devices = min(sliced_data.shape[0], len(ctx))
batch_data = _mx.gluon.utils.split_and_load(sliced_data, ctx_list=ctx[:num_devices], even_split=False)
batch_label = _mx.gluon.utils.split_and_load(sliced_label, ctx_list=ctx[:num_devices], even_split=False)
else:
batch_data = _mx.gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
batch_label = _mx.gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
return batch_data, batch_label
def compute_accuracy(accuracy_metric, batch_loader):
batch_loader.reset()
accuracy_metric.reset()
for batch in batch_loader:
batch_data, batch_label = get_data_and_label_from_batch(batch)
outputs = []
for x, y in zip(batch_data, batch_label):
if x is None or y is None: continue
z = model(x)
outputs.append(z)
accuracy_metric.update(batch_label, outputs)
for train_batch in train_loader:
train_batch_data, train_batch_label = get_data_and_label_from_batch(train_batch)
with _autograd.record():
for x, y in zip(train_batch_data, train_batch_label):
z = model(x)
loss = softmax_cross_entropy(z, y)
loss.backward()
trainer.step(train_batch.data[0].shape[0])
train_loss = loss.mean().asscalar()
train_time = _time.time() - start_time
if train_batch.iteration > iteration:
compute_accuracy(train_accuracy, train_loader_to_compute_accuracy)
if validation_set is not None:
compute_accuracy(validation_accuracy, validation_loader)
iteration = train_batch.iteration
if verbose:
kwargs = { "iteration": iteration,
"train_loss": float(train_loss),
"train_accuracy": train_accuracy.get()[1],
"time": train_time}
if validation_set is not None:
kwargs["validation_accuracy"] = validation_accuracy.get()[1]
table_printer.print_row(**kwargs)
state = {
'_model': model,
'_class_to_index': class_to_index,
'num_classes': len(classes),
'classes': classes,
'input_image_shape': (1, BITMAP_WIDTH, BITMAP_HEIGHT),
'batch_size': batch_size,
'training_loss': train_loss,
'training_accuracy': train_accuracy.get()[1],
'training_time': train_time,
'validation_accuracy': validation_accuracy.get()[1],
'max_iterations': max_iterations,
'target': target,
'feature': feature,
'num_examples': len(input_dataset)
}
return DrawingClassifier(state)
class DrawingClassifier(_CustomModel):
_PYTHON_DRAWING_CLASSIFIER_VERSION = 1
def __init__(self, state):
self.__proxy__ = _PythonProxy(state)
@classmethod
def _native_name(cls):
return "drawing_classifier"
def _get_native_state(self):
state = self.__proxy__.get_state()
mxnet_params = state['_model'].collect_params()
state['_model'] = _mxnet_utils.get_gluon_net_params_state(mxnet_params)
return state
def _get_version(self):
return self._PYTHON_DRAWING_CLASSIFIER_VERSION
@classmethod
def _load_version(cls, state, version):
_tkutl._model_version_check(version,
cls._PYTHON_DRAWING_CLASSIFIER_VERSION)
from ._model_architecture import Model as _Model
net = _Model(num_classes = len(state['classes']), prefix = 'drawing_')
ctx = _mxnet_utils.get_mxnet_context(max_devices=state['batch_size'])
net_params = net.collect_params()
_mxnet_utils.load_net_params_from_state(
net_params, state['_model'], ctx=ctx
)
state['_model'] = net
if len(state['classes']) > 0 and isinstance(state['classes'][0], float):
state['classes'] = list(map(int, state['classes']))
return DrawingClassifier(state)
def __str__(self):
return self.__repr__()
def __repr__(self):
width = 40
sections, section_titles = self._get_summary_struct()
out = _tkutl._toolkit_repr_print(self, sections, section_titles,
width=width)
return out
def _get_summary_struct(self):
model_fields = [
('Number of classes', 'num_classes'),
('Feature column', 'feature'),
('Target column', 'target')
]
training_fields = [
('Training Iterations', 'max_iterations'),
('Training Accuracy', 'training_accuracy'),
('Validation Accuracy', 'validation_accuracy'),
('Training Time', 'training_time'),
('Number of Examples', 'num_examples'),
('Batch Size', 'batch_size'),
('Final Loss (specific to model)', 'training_loss')
]
section_titles = ['Schema', 'Training summary']
return([model_fields, training_fields], section_titles)
def export_coreml(self, filename, verbose=False):
import mxnet as _mx
from .._mxnet_to_coreml import _mxnet_converter
import coremltools as _coremltools
batch_size = 1
image_shape = (batch_size,) + (1, BITMAP_WIDTH, BITMAP_HEIGHT)
s_image = _mx.sym.Variable(self.feature,
shape=image_shape, dtype=_np.float32)
from copy import copy as _copy
net = _copy(self._model)
s_ymap = net(s_image)
mod = _mx.mod.Module(symbol=s_ymap, label_names=None, data_names=[self.feature])
mod.bind(for_training=False, data_shapes=[(self.feature, image_shape)])
mod.init_params()
arg_params, aux_params = mod.get_params()
net_params = net.collect_params()
new_arg_params = {}
for k, param in arg_params.items():
new_arg_params[k] = net_params[k].data(net_params[k].list_ctx()[0])
new_aux_params = {}
for k, param in aux_params.items():
new_aux_params[k] = net_params[k].data(net_params[k].list_ctx()[0])
mod.set_params(new_arg_params, new_aux_params)
coreml_model = _mxnet_converter.convert(mod, mode='classifier',
class_labels=self.classes,
input_shape=[(self.feature, image_shape)],
builder=None, verbose=verbose,
preprocessor_args={
'image_input_names': [self.feature],
'image_scale': 1.0/255
})
DESIRED_OUTPUT_NAME = self.target + "Probabilities"
spec = coreml_model._spec
class_label_output_index = 0 if spec.description.output[0].name == "classLabel" else 1
probabilities_output_index = 1-class_label_output_index
spec.neuralNetworkClassifier.labelProbabilityLayerName = DESIRED_OUTPUT_NAME
spec.neuralNetworkClassifier.layers[-1].name = DESIRED_OUTPUT_NAME
spec.neuralNetworkClassifier.layers[-1].output[0] = DESIRED_OUTPUT_NAME
spec.description.predictedProbabilitiesName = DESIRED_OUTPUT_NAME
spec.description.output[probabilities_output_index].name = DESIRED_OUTPUT_NAME
from turicreate.toolkits import _coreml_utils
model_type = "drawing classifier"
spec.description.metadata.shortDescription = _coreml_utils._mlmodel_short_description(model_type)
spec.description.input[0].shortDescription = self.feature
spec.description.output[probabilities_output_index].shortDescription = 'Prediction probabilities'
spec.description.output[class_label_output_index].shortDescription = 'Class Label of Top Prediction'
from coremltools.models.utils import save_spec as _save_spec
_save_spec(spec, filename)
def _predict_with_probabilities(self, input_dataset, batch_size=None,
verbose=True):
import mxnet as _mx
from ._sframe_loader import SFrameClassifierIter as _SFrameClassifierIter
is_stroke_input = (input_dataset[self.feature].dtype != _tc.Image)
dataset = _extensions._drawing_classifier_prepare_data(
input_dataset, self.feature) if is_stroke_input else input_dataset
batch_size = self.batch_size if batch_size is None else batch_size
loader = _SFrameClassifierIter(dataset, batch_size,
class_to_index=self._class_to_index,
feature_column=self.feature,
target_column=self.target,
load_labels=False,
shuffle=False,
iterations=1)
dataset_size = len(dataset)
ctx = _mxnet_utils.get_mxnet_context()
index = 0
last_time = 0
done = False
from turicreate import SArrayBuilder
from array import array
classes = self.classes
all_predicted_builder = SArrayBuilder(dtype=type(classes[0]))
all_probabilities_builder = SArrayBuilder(dtype=array)
for batch in loader:
if batch.pad is not None:
size = batch_size - batch.pad
batch_data = _mx.nd.slice_axis(batch.data[0],
axis=0, begin=0, end=size)
else:
batch_data = batch.data[0]
size = batch_size
num_devices = min(batch_data.shape[0], len(ctx))
split_data = _mx.gluon.utils.split_and_load(batch_data, ctx_list=ctx[:num_devices], even_split=False)
for data in split_data:
z = self._model(data).asnumpy()
predicted = list(map(lambda x: classes[x], z.argmax(axis=1)))
split_length = z.shape[0]
all_predicted_builder.append_multiple(predicted)
all_probabilities_builder.append_multiple(z.tolist())
index += split_length
if index == dataset_size - 1:
done = True
cur_time = _time.time()
if verbose and (dataset_size >= 5
and cur_time > last_time + 10 or done):
print('Predicting {cur_n:{width}d}/{max_n:{width}d}'.format(
cur_n = index + 1,
max_n = dataset_size,
width = len(str(dataset_size))))
last_time = cur_time
return (_tc.SFrame({self.target: all_predicted_builder.close(),
'probability': all_probabilities_builder.close()}))
def evaluate(self, dataset, metric='auto', batch_size=None, verbose=True):
if self.target not in dataset.column_names():
raise _ToolkitError("Must provide ground truth column, '"
+ self.target + "' in the evaluation dataset.")
predicted = self._predict_with_probabilities(dataset, batch_size, verbose)
avail_metrics = ['accuracy', 'auc', 'precision', 'recall',
'f1_score', 'confusion_matrix', 'roc_curve']
_tkutl._check_categorical_option_type(
'metric', metric, avail_metrics + ['auto'])
metrics = avail_metrics if metric == 'auto' else [metric]
ret = {}
if 'accuracy' in metrics:
ret['accuracy'] = _evaluation.accuracy(
dataset[self.target], predicted[self.target])
if 'auc' in metrics:
ret['auc'] = _evaluation.auc(
dataset[self.target], predicted['probability'],
index_map=self._class_to_index)
if 'precision' in metrics:
ret['precision'] = _evaluation.precision(
dataset[self.target], predicted[self.target])
if 'recall' in metrics:
ret['recall'] = _evaluation.recall(
dataset[self.target], predicted[self.target])
if 'f1_score' in metrics:
ret['f1_score'] = _evaluation.f1_score(
dataset[self.target], predicted[self.target])
if 'confusion_matrix' in metrics:
ret['confusion_matrix'] = _evaluation.confusion_matrix(
dataset[self.target], predicted[self.target])
if 'roc_curve' in metrics:
ret['roc_curve'] = _evaluation.roc_curve(
dataset[self.target], predicted['probability'],
index_map=self._class_to_index)
return ret
def predict_topk(self, dataset, output_type="probability", k=3,
batch_size=None):
_tkutl._check_categorical_option_type("output_type", output_type,
["probability", "rank"])
if not isinstance(k, int):
raise TypeError("'k' must be an integer >= 1")
if k <= 0:
raise ValueError("'k' must be >= 1")
if batch_size is not None and not isinstance(batch_size, int):
raise TypeError("'batch_size' must be an integer >= 1")
if batch_size is not None and batch_size < 1:
raise ValueError("'batch_size' must be >= 1")
prob_vector = self.predict(
dataset, output_type='probability_vector', batch_size=batch_size)
classes = self.classes
if output_type == 'probability':
results = prob_vector.apply(lambda p: [
{'class': classes[i], 'probability': p[i]}
for i in reversed(_np.argsort(p)[-k:])]
)
else:
assert(output_type == 'rank')
results = prob_vector.apply(lambda p: [
{'class': classes[index], 'rank': rank}
for rank, index in enumerate(reversed(_np.argsort(p)[-k:]))]
)
results = _tc.SFrame({'X': results})
results = results.add_row_number()
results = results.stack('X', new_column_name='X')
results = results.unpack('X', column_name_prefix='')
return results
def predict(self, data, output_type='class', batch_size=None, verbose=True):
_tkutl._check_categorical_option_type("output_type", output_type,
["probability", "class", "probability_vector"])
if isinstance(data, _tc.SArray):
predicted = self._predict_with_probabilities(
_tc.SFrame({
self.feature: data
}),
batch_size,
verbose
)
elif isinstance(data, _tc.SFrame):
predicted = self._predict_with_probabilities(data, batch_size, verbose)
else:
predicted = self._predict_with_probabilities(
_tc.SFrame({
self.feature: [data]
}),
batch_size,
verbose
)
if output_type == "class":
return predicted[self.target]
elif output_type == "probability":
_class_to_index = self._class_to_index
target = self.target
return predicted.apply(
lambda row: row["probability"][_class_to_index[row[target]]])
else:
assert (output_type == "probability_vector")
return predicted["probability"]
| true
| true
|
1c47529775227539b203847b8de750e8bd66423a
| 407
|
py
|
Python
|
cont/contapp/models.py
|
Chuox/Contador_Palabras
|
2be98392351536416baa38c90fc62950138d84f1
|
[
"MIT"
] | null | null | null |
cont/contapp/models.py
|
Chuox/Contador_Palabras
|
2be98392351536416baa38c90fc62950138d84f1
|
[
"MIT"
] | null | null | null |
cont/contapp/models.py
|
Chuox/Contador_Palabras
|
2be98392351536416baa38c90fc62950138d84f1
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.urls import reverse
# Create your models here.
class Palabras(models.Model):
url = models.CharField(max_length=99999,default="https://es.wikipedia.org/")
texto = models.CharField(max_length=9999999,default="")
def __str__(self):
return self.url
def get_absolute_url(self):
return reverse('count-detail', kwargs={'pk': self.pk})
| 31.307692
| 80
| 0.702703
|
from django.db import models
from django.urls import reverse
class Palabras(models.Model):
url = models.CharField(max_length=99999,default="https://es.wikipedia.org/")
texto = models.CharField(max_length=9999999,default="")
def __str__(self):
return self.url
def get_absolute_url(self):
return reverse('count-detail', kwargs={'pk': self.pk})
| true
| true
|
1c4752b75cce49cce05e2ea439f39e239799fab9
| 2,740
|
py
|
Python
|
mvmv/mvmv.py
|
movermeyer/mvmv
|
23c1c4202b6fb0ef08d6c07975107dcec87d7208
|
[
"MIT"
] | 1
|
2019-01-26T16:35:31.000Z
|
2019-01-26T16:35:31.000Z
|
mvmv/mvmv.py
|
movermeyer/mvmv
|
23c1c4202b6fb0ef08d6c07975107dcec87d7208
|
[
"MIT"
] | 5
|
2015-01-22T23:24:05.000Z
|
2015-01-25T04:49:03.000Z
|
mvmv/mvmv.py
|
movermeyer/mvmv
|
23c1c4202b6fb0ef08d6c07975107dcec87d7208
|
[
"MIT"
] | 3
|
2015-02-25T17:51:41.000Z
|
2018-03-04T20:29:59.000Z
|
import codecs
import mimetypes
import os
import re
import sqlite3
from fuzzywuzzy import fuzz
# common words in movies that we don't want to search the database for
common_words = [
"The",
"Them",
"A",
"An",
"In",
]
# blacklist of common garbage that fills up movie names
blacklist = [
"BluRay",
"\d{3,4}p",
"(HD|DVD|BR)Rip",
"x\d{3}",
"XViD(-.*)?",
"AC3-EVO",
]
# compile the blacklist into a regex
bl_re = re.compile("(" + "|".join(blacklist) + ")(\s|$)", re.IGNORECASE)
# Setup the sqlite database
def search(query, cursor):
# remove all instancer of 'WORD ' for WORD in blacklist
query = query.replace(".", " ")
query = bl_re.sub("", query)
year = re.search("(19|20)\d{2}", query)
if year:
year = year.group(0)
# Find the first relevant word
word = ""
for item in query.split(" "):
if item not in common_words and len(item) > 3:
word = item.replace("-", " ")
break
cursor.execute("SELECT * FROM movies WHERE movies MATCH ?",
["%s %s" % (word, year)])
ratio = 0
best = query
if year:
best = best.replace(year, "")
best = best.strip()
for item in cursor:
current = fuzz.ratio(item[0], query)
for word in item[0].split():
if word not in query:
current -= 10
if item[0] in query and len(item[0].split()) > 1:
ratio = 100
best = item[0]
elif current > ratio:
ratio = current
best = item[0]
return best
def is_valid_file(filename, excludes):
return str(mimetypes.guess_type(filename)[0]).find('video/') == 0 and \
not any(map(lambda x: bool(x.match(filename)), excludes))
def get_movies_list(dirname, excludes=None):
if excludes is None:
excludes = []
movies = []
for root, _, files in os.walk(dirname):
if any(map(lambda x: x.match(root), excludes)):
continue
movies += [(root, mov) for mov in files if is_valid_file(mov, excludes)]
return movies
def movemovie(src, dst, cursor):
filename, extension = os.path.splitext(src[1])
os.rename(os.path.join(src[0], src[1]),
"%s/%s%s" % (dst, search(filename, cursor),
extension))
def movemovies(dirname, dst, cursor, excludes=None):
for movie in get_movies_list(dirname, excludes):
movemovie(movie, dst, cursor)
if __name__ == "__main__":
conn = sqlite3.connect("movies.db")
cursor = conn.cursor()
import sys
print(search(sys.argv[1], cursor))
conn.close()
| 26.346154
| 80
| 0.55365
|
import codecs
import mimetypes
import os
import re
import sqlite3
from fuzzywuzzy import fuzz
common_words = [
"The",
"Them",
"A",
"An",
"In",
]
# blacklist of common garbage that fills up movie names
blacklist = [
"BluRay",
"\d{3,4}p",
"(HD|DVD|BR)Rip",
"x\d{3}",
"XViD(-.*)?",
"AC3-EVO",
]
# compile the blacklist into a regex
bl_re = re.compile("(" + "|".join(blacklist) + ")(\s|$)", re.IGNORECASE)
# Setup the sqlite database
def search(query, cursor):
# remove all instancer of 'WORD ' for WORD in blacklist
query = query.replace(".", " ")
query = bl_re.sub("", query)
year = re.search("(19|20)\d{2}", query)
if year:
year = year.group(0)
# Find the first relevant word
word = ""
for item in query.split(" "):
if item not in common_words and len(item) > 3:
word = item.replace("-", " ")
break
cursor.execute("SELECT * FROM movies WHERE movies MATCH ?",
["%s %s" % (word, year)])
ratio = 0
best = query
if year:
best = best.replace(year, "")
best = best.strip()
for item in cursor:
current = fuzz.ratio(item[0], query)
for word in item[0].split():
if word not in query:
current -= 10
if item[0] in query and len(item[0].split()) > 1:
ratio = 100
best = item[0]
elif current > ratio:
ratio = current
best = item[0]
return best
def is_valid_file(filename, excludes):
return str(mimetypes.guess_type(filename)[0]).find('video/') == 0 and \
not any(map(lambda x: bool(x.match(filename)), excludes))
def get_movies_list(dirname, excludes=None):
if excludes is None:
excludes = []
movies = []
for root, _, files in os.walk(dirname):
if any(map(lambda x: x.match(root), excludes)):
continue
movies += [(root, mov) for mov in files if is_valid_file(mov, excludes)]
return movies
def movemovie(src, dst, cursor):
filename, extension = os.path.splitext(src[1])
os.rename(os.path.join(src[0], src[1]),
"%s/%s%s" % (dst, search(filename, cursor),
extension))
def movemovies(dirname, dst, cursor, excludes=None):
for movie in get_movies_list(dirname, excludes):
movemovie(movie, dst, cursor)
if __name__ == "__main__":
conn = sqlite3.connect("movies.db")
cursor = conn.cursor()
import sys
print(search(sys.argv[1], cursor))
conn.close()
| true
| true
|
1c4752ee09bf70092f224bcea3d2adc5f3dcac59
| 708
|
py
|
Python
|
Switches.py
|
ProgrammingNerdGit/GBLS
|
6fcc3acc4b2797ef7c97f6d88c42cef66f8e7b50
|
[
"MIT"
] | 1
|
2020-11-04T18:50:54.000Z
|
2020-11-04T18:50:54.000Z
|
Switches.py
|
ProgrammingNerdGit/GBLS
|
6fcc3acc4b2797ef7c97f6d88c42cef66f8e7b50
|
[
"MIT"
] | null | null | null |
Switches.py
|
ProgrammingNerdGit/GBLS
|
6fcc3acc4b2797ef7c97f6d88c42cef66f8e7b50
|
[
"MIT"
] | null | null | null |
class switch:
def __init__(self):
self.cases = []
self.triggered = False
def anyCase(self,func,*args):
if(len(args) <= 1): args += tuple([False])
for i in args:
if(args[i] and not self.triggered):
self.triggered = True
func()
def exclusiveCase(self,func,*args):
if(len(args) <= 1): args += tuple([False])
numOfExepts = 0
for i in args:
if(args[i] and not self.triggered):
numOfExepts += 1
if(numOfExepts == len(args)):
self.triggered = True
func()
def default(self,func):
if(not self.triggered):
func()
| 29.5
| 50
| 0.492938
|
class switch:
def __init__(self):
self.cases = []
self.triggered = False
def anyCase(self,func,*args):
if(len(args) <= 1): args += tuple([False])
for i in args:
if(args[i] and not self.triggered):
self.triggered = True
func()
def exclusiveCase(self,func,*args):
if(len(args) <= 1): args += tuple([False])
numOfExepts = 0
for i in args:
if(args[i] and not self.triggered):
numOfExepts += 1
if(numOfExepts == len(args)):
self.triggered = True
func()
def default(self,func):
if(not self.triggered):
func()
| true
| true
|
1c4753ab0132900bf58f1a4ebd6b8e9c3f876049
| 924
|
bzl
|
Python
|
tools/repositories.bzl
|
guibou/rules_haskell
|
ea0e70ace2432a490d4ab4c4e54617612466e584
|
[
"Apache-2.0"
] | 222
|
2017-11-06T09:01:12.000Z
|
2022-03-28T08:24:22.000Z
|
tools/repositories.bzl
|
guibou/rules_haskell
|
ea0e70ace2432a490d4ab4c4e54617612466e584
|
[
"Apache-2.0"
] | 1,168
|
2017-11-19T07:43:13.000Z
|
2022-03-31T12:40:39.000Z
|
tools/repositories.bzl
|
guibou/rules_haskell
|
ea0e70ace2432a490d4ab4c4e54617612466e584
|
[
"Apache-2.0"
] | 94
|
2017-11-17T22:46:37.000Z
|
2022-03-15T00:16:56.000Z
|
"""Workspace rules (tools/repositories)"""
load("@rules_haskell//haskell:cabal.bzl", "stack_snapshot")
def rules_haskell_worker_dependencies(**stack_kwargs):
"""Provide all repositories that are necessary for `rules_haskell`'s tools to
function.
"""
excludes = native.existing_rules().keys()
if "rules_haskell_worker_dependencies" not in excludes:
stack_snapshot(
name = "rules_haskell_worker_dependencies",
packages = [
"base",
"bytestring",
"filepath",
"ghc",
"ghc-paths",
"microlens",
"process",
"profunctors-5.5.2",
"proto-lens-0.7.0.0",
"proto-lens-runtime-0.7.0.0",
"text",
"vector",
],
snapshot = "lts-18.0",
**stack_kwargs
)
| 29.806452
| 81
| 0.504329
|
load("@rules_haskell//haskell:cabal.bzl", "stack_snapshot")
def rules_haskell_worker_dependencies(**stack_kwargs):
excludes = native.existing_rules().keys()
if "rules_haskell_worker_dependencies" not in excludes:
stack_snapshot(
name = "rules_haskell_worker_dependencies",
packages = [
"base",
"bytestring",
"filepath",
"ghc",
"ghc-paths",
"microlens",
"process",
"profunctors-5.5.2",
"proto-lens-0.7.0.0",
"proto-lens-runtime-0.7.0.0",
"text",
"vector",
],
snapshot = "lts-18.0",
**stack_kwargs
)
| true
| true
|
1c4753ba6758fb3028d113543431f667163dd0f4
| 3,120
|
py
|
Python
|
newproject_1/newproject_1/settings.py
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
09f6abfc3bcf10ee28f486d83b450c89a07e066e
|
[
"MIT"
] | null | null | null |
newproject_1/newproject_1/settings.py
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
09f6abfc3bcf10ee28f486d83b450c89a07e066e
|
[
"MIT"
] | null | null | null |
newproject_1/newproject_1/settings.py
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
09f6abfc3bcf10ee28f486d83b450c89a07e066e
|
[
"MIT"
] | null | null | null |
# Scrapy settings for newproject_1 project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'newproject_1'
SPIDER_MODULES = ['newproject_1.spiders']
NEWSPIDER_MODULE = 'newproject_1.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'newproject_1 (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'newproject_1.middlewares.Newproject1SpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'newproject_1.middlewares.Newproject1DownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'newproject_1.pipelines.Newproject1Pipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| 35.05618
| 103
| 0.780769
|
BOT_NAME = 'newproject_1'
SPIDER_MODULES = ['newproject_1.spiders']
NEWSPIDER_MODULE = 'newproject_1.spiders'
ROBOTSTXT_OBEY = True
| true
| true
|
1c4753eff116b910c9c93958d56825d7720f1568
| 1,444
|
py
|
Python
|
samples/generated_samples/dialogflow_v2_generated_versions_get_version_async.py
|
rkdfc93/python-dialogflow
|
a59cff0298ef18674c0b4133ef0a6ab82e288920
|
[
"Apache-2.0"
] | 171
|
2018-09-19T21:16:18.000Z
|
2020-12-07T17:41:10.000Z
|
samples/generated_samples/dialogflow_v2_generated_versions_get_version_async.py
|
rkdfc93/python-dialogflow
|
a59cff0298ef18674c0b4133ef0a6ab82e288920
|
[
"Apache-2.0"
] | 150
|
2018-09-25T14:04:28.000Z
|
2020-12-09T21:45:43.000Z
|
samples/generated_samples/dialogflow_v2_generated_versions_get_version_async.py
|
rkdfc93/python-dialogflow
|
a59cff0298ef18674c0b4133ef0a6ab82e288920
|
[
"Apache-2.0"
] | 75
|
2018-09-22T14:12:18.000Z
|
2020-12-08T07:12:12.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetVersion
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflow
# [START dialogflow_v2_generated_Versions_GetVersion_async]
from google.cloud import dialogflow_v2
async def sample_get_version():
# Create a client
client = dialogflow_v2.VersionsAsyncClient()
# Initialize request argument(s)
request = dialogflow_v2.GetVersionRequest(
name="name_value",
)
# Make the request
response = await client.get_version(request=request)
# Handle the response
print(response)
# [END dialogflow_v2_generated_Versions_GetVersion_async]
| 31.391304
| 85
| 0.756925
|
from google.cloud import dialogflow_v2
async def sample_get_version():
client = dialogflow_v2.VersionsAsyncClient()
request = dialogflow_v2.GetVersionRequest(
name="name_value",
)
response = await client.get_version(request=request)
print(response)
| true
| true
|
1c4755892a095d9eed7918634a6edef5688ce027
| 1,624
|
py
|
Python
|
sdks/python/http_client/v1/test/test_v1_list_searches_response.py
|
TariqAHassan/polyaxon
|
6fc7f6a6ec49ef02d525887b6d18a893203e5b29
|
[
"Apache-2.0"
] | null | null | null |
sdks/python/http_client/v1/test/test_v1_list_searches_response.py
|
TariqAHassan/polyaxon
|
6fc7f6a6ec49ef02d525887b6d18a893203e5b29
|
[
"Apache-2.0"
] | null | null | null |
sdks/python/http_client/v1/test/test_v1_list_searches_response.py
|
TariqAHassan/polyaxon
|
6fc7f6a6ec49ef02d525887b6d18a893203e5b29
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2019 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon sdk
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.14.4
Contact: contact@polyaxon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import polyaxon_sdk
from polyaxon_sdk.models.v1_list_searches_response import V1ListSearchesResponse # noqa: E501
from polyaxon_sdk.rest import ApiException
class TestV1ListSearchesResponse(unittest.TestCase):
"""V1ListSearchesResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1ListSearchesResponse(self):
"""Test V1ListSearchesResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = polyaxon_sdk.models.v1_list_searches_response.V1ListSearchesResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 28.491228
| 119
| 0.738916
|
from __future__ import absolute_import
import unittest
import polyaxon_sdk
from polyaxon_sdk.models.v1_list_searches_response import V1ListSearchesResponse
from polyaxon_sdk.rest import ApiException
class TestV1ListSearchesResponse(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testV1ListSearchesResponse(self):
s
if __name__ == '__main__':
unittest.main()
| true
| true
|
1c475637e60225ae646c1b529f1fa216fb2c6c1a
| 10,082
|
py
|
Python
|
doc/source/conf.py
|
genomicsengland/gel-coverage
|
61a671a53ac52a0b62c8aea983ced65fd0bed6cc
|
[
"Apache-2.0"
] | 2
|
2019-07-15T08:13:22.000Z
|
2020-09-30T18:47:59.000Z
|
doc/source/conf.py
|
genomicsengland/gel-coverage
|
61a671a53ac52a0b62c8aea983ced65fd0bed6cc
|
[
"Apache-2.0"
] | null | null | null |
doc/source/conf.py
|
genomicsengland/gel-coverage
|
61a671a53ac52a0b62c8aea983ced65fd0bed6cc
|
[
"Apache-2.0"
] | null | null | null |
import sphinx_rtd_theme
# -*- coding: utf-8 -*-
#
# GelCoverage documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 13 14:37:07 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'GelCoverage'
copyright = u'2016, Pablo Riesgo, Pedro Furio, Matthew Parker, Antonio Rueda, Alona Sosinsky'
author = u'Pablo Riesgo, Pedro Furio, Matthew Parker, Antonio Rueda, Alona Sosinsky'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'GelCoverage v1.0.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'GelCoveragedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'GelCoverage.tex', u'GelCoverage Documentation',
u'Pablo Riesgo, Pedro Furio, Matthew Parker, Antonio Rueda, Alona Sosinsky', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'gelcoverage', u'GelCoverage Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'GelCoverage', u'GelCoverage Documentation',
author, 'GelCoverage', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| 29.223188
| 93
| 0.706903
|
import sphinx_rtd_theme
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'GelCoverage'
copyright = u'2016, Pablo Riesgo, Pedro Furio, Matthew Parker, Antonio Rueda, Alona Sosinsky'
author = u'Pablo Riesgo, Pedro Furio, Matthew Parker, Antonio Rueda, Alona Sosinsky'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'GelCoverage v1.0.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'GelCoveragedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'GelCoverage.tex', u'GelCoverage Documentation',
u'Pablo Riesgo, Pedro Furio, Matthew Parker, Antonio Rueda, Alona Sosinsky', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'gelcoverage', u'GelCoverage Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'GelCoverage', u'GelCoverage Documentation',
author, 'GelCoverage', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
| true
| true
|
1c475707181d966447b38a87fe651934c279aaa0
| 1,151
|
py
|
Python
|
aiida/tools/importexport/__init__.py
|
aiace9/aiida-core
|
09ac91654648adb684a58d5d2d7b1c11a503dae8
|
[
"MIT",
"BSD-3-Clause"
] | 1
|
2020-10-01T17:11:58.000Z
|
2020-10-01T17:11:58.000Z
|
aiida/tools/importexport/__init__.py
|
blokhin/aiida-core
|
29331b558b45ba74acf1ca633a2d8bfabc1bdd05
|
[
"MIT",
"BSD-3-Clause"
] | 2
|
2019-03-06T11:23:42.000Z
|
2020-03-09T09:34:07.000Z
|
aiida/tools/importexport/__init__.py
|
blokhin/aiida-core
|
29331b558b45ba74acf1ca633a2d8bfabc1bdd05
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=wildcard-import,undefined-variable
"""Provides import/export functionalities.
To see history/git blame prior to the move to aiida.tools.importexport,
explore tree: https://github.com/aiidateam/aiida-core/tree/eebef392c81e8b130834a92e1d7abf5e2e30b3ce
Functionality: <tree>/aiida/orm/importexport.py
Tests: <tree>/aiida/backends/tests/test_export_and_import.py
"""
from .dbexport import *
from .dbimport import *
from .common import *
__all__ = (dbexport.__all__ + dbimport.__all__ + common.__all__)
| 47.958333
| 99
| 0.564726
| true
| true
|
|
1c47573535fc8458d412b298db9ec2766ec449c9
| 645
|
py
|
Python
|
modules/sample/src/sample/CSV/pf.py
|
AsmaBRZ/rcrs-server
|
d67a84a17b73dd95c5553bed68b8c4c08cd5651a
|
[
"BSD-3-Clause"
] | null | null | null |
modules/sample/src/sample/CSV/pf.py
|
AsmaBRZ/rcrs-server
|
d67a84a17b73dd95c5553bed68b8c4c08cd5651a
|
[
"BSD-3-Clause"
] | null | null | null |
modules/sample/src/sample/CSV/pf.py
|
AsmaBRZ/rcrs-server
|
d67a84a17b73dd95c5553bed68b8c4c08cd5651a
|
[
"BSD-3-Clause"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
import os
time=np.arange(1,301)
array=np.zeros(250)
a=[]
fichiers=os.listdir("d")
for f in fichiers:
print(f)
i=0
with open("d/"+f, "r") as ins:
for line in ins:
if i<300:
print(line)
l=line.split(" ")
print(int(l[1]))
print(i)
print('jjjjjjjj')
print(array[i])
array[i]=array[i]+int(l[1])
i=i+1
print (array)
plt.plot(array)
plt.ylabel("Nombre d'obstacles nettoyés")
plt.xlabel('Temps')
plt.suptitle('Agent random')
plt.show()
| 18.970588
| 43
| 0.516279
|
import matplotlib.pyplot as plt
import numpy as np
import os
time=np.arange(1,301)
array=np.zeros(250)
a=[]
fichiers=os.listdir("d")
for f in fichiers:
print(f)
i=0
with open("d/"+f, "r") as ins:
for line in ins:
if i<300:
print(line)
l=line.split(" ")
print(int(l[1]))
print(i)
print('jjjjjjjj')
print(array[i])
array[i]=array[i]+int(l[1])
i=i+1
print (array)
plt.plot(array)
plt.ylabel("Nombre d'obstacles nettoyés")
plt.xlabel('Temps')
plt.suptitle('Agent random')
plt.show()
| true
| true
|
1c47577594847e925fd3f69b3081b42da3d8500b
| 49,232
|
py
|
Python
|
tests/test_data_tokenizers.py
|
sxjscience/gluon-nlp
|
e6c39a80f4155cdb9c5fe8145287ddd322b4952b
|
[
"Apache-2.0"
] | 1
|
2020-03-20T08:01:34.000Z
|
2020-03-20T08:01:34.000Z
|
tests/test_data_tokenizers.py
|
sxjscience/gluon-nlp
|
e6c39a80f4155cdb9c5fe8145287ddd322b4952b
|
[
"Apache-2.0"
] | null | null | null |
tests/test_data_tokenizers.py
|
sxjscience/gluon-nlp
|
e6c39a80f4155cdb9c5fe8145287ddd322b4952b
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import random
import collections
import pickle
from uuid import uuid4
import os
import unicodedata
import tempfile
from pkg_resources import parse_version
import gluonnlp
from gluonnlp.data.tokenizers import WhitespaceTokenizer, MosesTokenizer, JiebaTokenizer,\
SpacyTokenizer, SubwordNMTTokenizer, YTTMTokenizer, SentencepieceTokenizer, \
HuggingFaceBPETokenizer, HuggingFaceByteBPETokenizer, HuggingFaceWordPieceTokenizer, \
HuggingFaceTokenizer
from gluonnlp.base import get_repo_url
from gluonnlp.data import Vocab
from gluonnlp.utils.misc import download
EN_SAMPLES = ['Four score and seven years ago our fathers brought forth on this continent, '
'a new nation, conceived in Liberty, and dedicated to the proposition '
'that all men are created equal.',
'In spite of the debate going on for months about the photos of Özil with the '
'Turkish President Recep Tayyip Erdogan, he regrets the return of '
'the 92-match national player Özil.']
DE_SAMPLES = ['Goethe stammte aus einer angesehenen bürgerlichen Familie; sein Großvater'
' mütterlicherseits war als Stadtschultheiß höchster Justizbeamter der'
' Stadt Frankfurt, sein Vater Doktor der Rechte und kaiserlicher Rat.',
'"Das ist eine Frage, die natürlich davon abhängt, dass man einmal ins '
'Gespräch kommt, dass man mit ihm auch darüber spricht, warum er das eine '
'oder andere offenbar so empfunden hat, wie das in seinem Statement niedergelegt'
' ist", sagte Grindel im Fußball-Podcast "Phrasenmäher" der "Bild-Zeitung.']
ZH_SAMPLES = ['苟活者在淡红的血色中,会依稀看见微茫的希望;真的猛士,将更奋然而前行。',
'参加工作,哈尔滨工业大学无线电工程系电子仪器及测量技术专业毕业。']
SUBWORD_TEST_SAMPLES = ["Hello, y'all! How are you Ⅷ 😁 😁 😁 ?",
'GluonNLP is great!!!!!!',
"GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# 'abc'"]
def random_inject_space(sentence):
words = sentence.split()
ret = ''
for i, word in enumerate(words):
ret += word
if i < len(words) - 1:
n_space_tokens = random.randint(1, 10)
for j in range(n_space_tokens):
ret += random.choice([' ', '\t', '\r', '\n'])
return ret
def verify_encode_token_with_offsets(tokenizer, all_sentences, gt_offsets=None):
if gt_offsets is None:
for sentences in [all_sentences[0], all_sentences]:
enc_tokens = tokenizer.encode(sentences, str)
tokens, offsets = tokenizer.encode_with_offsets(sentences, str)
if isinstance(sentences, list):
for ele_tokens, ele_enc_tokens, ele_offsets, ele_sentence in\
zip(tokens, enc_tokens, offsets, sentences):
for tok, offset, enc_tok in zip(ele_tokens, ele_offsets, ele_enc_tokens):
assert ele_sentence[offset[0]:offset[1]] == tok
assert tok == enc_tok
else:
for tok, offset, enc_tok in zip(tokens, offsets, enc_tokens):
assert sentences[offset[0]:offset[1]] == tok
assert tok == enc_tok
else:
for sentences, ele_gt_offsets in [(all_sentences[0], gt_offsets[0]),
(all_sentences, gt_offsets)]:
enc_tokens = tokenizer.encode(sentences, str)
tokens, offsets = tokenizer.encode_with_offsets(sentences, str)
assert ele_gt_offsets == offsets
assert enc_tokens == tokens
def verify_sentencepiece_tokenizer_with_offsets(tokenizer, all_sentences):
for sentences in [all_sentences[0], all_sentences]:
enc_tokens = tokenizer.encode(sentences, str)
tokens, offsets = tokenizer.encode_with_offsets(sentences, str)
if isinstance(sentences, list):
for ele_tokens, ele_enc_tokens, ele_offsets, ele_sentence\
in zip(tokens, enc_tokens, offsets, sentences):
for i, (tok, offset, enc_tok) in enumerate(zip(ele_tokens, ele_offsets,
ele_enc_tokens)):
assert tok == enc_tok
ele_sel_tok = unicodedata.normalize('NFKC',
ele_sentence[offset[0]:offset[1]]).strip()
if tokenizer.is_first_subword(tok):
real_tok = tok[1:]
else:
real_tok = tok
assert ele_sel_tok == real_tok,\
'ele_sel_tok={}, real_tok={}'.format(ele_sel_tok, real_tok)
def verify_encode_with_offsets_consistency(tokenizer, all_sentences):
for sentences in [all_sentences[0], all_sentences]:
enc_tokens = tokenizer.encode(sentences, int)
tokens, offsets = tokenizer.encode_with_offsets(sentences, int)
str_tokens, str_offsets = tokenizer.encode_with_offsets(sentences, str)
assert offsets == str_offsets
assert tokens == enc_tokens
def verify_encode_token(tokenizer, all_sentences, all_gt_tokens):
for sentences, gt_tokens in [(all_sentences[0], all_gt_tokens[0]),
(all_sentences, all_gt_tokens)]:
tokenizer_encode_ret = tokenizer.encode(sentences)
assert tokenizer_encode_ret == gt_tokens,\
'Whole Encoded: {}, \nWhole GT: {}'.format(tokenizer_encode_ret, gt_tokens)
def verify_decode(tokenizer, all_sentences, out_type=str):
for sentences in [all_sentences[0], all_sentences]:
assert tokenizer.decode(tokenizer.encode(sentences, out_type)) == sentences
def verify_decode_spm(tokenizer, all_sentences, gt_int_decode_sentences):
for sentences, case_gt_int_decode in [(all_sentences[0], gt_int_decode_sentences[0]),
(all_sentences, gt_int_decode_sentences)]:
if isinstance(sentences, str):
gt_str_decode_sentences = sentences
if tokenizer.lowercase:
gt_str_decode_sentences = gt_str_decode_sentences.lower()
gt_str_decode_sentences = unicodedata.normalize('NFKC', gt_str_decode_sentences)
elif isinstance(sentences, list):
gt_str_decode_sentences = []
for ele in sentences:
ele_gt_decode = ele
if tokenizer.lowercase:
ele_gt_decode = ele_gt_decode.lower()
ele_gt_decode = unicodedata.normalize('NFKC', ele_gt_decode)
gt_str_decode_sentences.append(ele_gt_decode)
else:
raise NotImplementedError
assert tokenizer.decode(tokenizer.encode(sentences, str)) == gt_str_decode_sentences
assert tokenizer.decode(tokenizer.encode(sentences, int)) == case_gt_int_decode
def verify_decode_subword_nmt(tokenizer, all_sentences, gt_int_decode, gt_str_decode):
for sentences, case_gt_int_decode, case_gt_str_decode in [(all_sentences[0], gt_int_decode[0], gt_str_decode[0]),
(all_sentences, gt_int_decode, gt_str_decode)]:
assert tokenizer.decode(tokenizer.encode(sentences, str)) == case_gt_str_decode
assert tokenizer.decode(tokenizer.encode(sentences, int)) == case_gt_int_decode
def verify_decode_hf(tokenizer, all_sentences, gt_decode_sentences):
for sentences, case_gt_decode in [(all_sentences[0], gt_decode_sentences[0]),
(all_sentences, gt_decode_sentences)]:
assert tokenizer.decode(tokenizer.encode(sentences, str)) == case_gt_decode
assert tokenizer.decode(tokenizer.encode(sentences, int)) == case_gt_decode
if isinstance(sentences, list):
for sentence in sentences:
assert tokenizer.vocab.to_tokens(tokenizer.encode(sentence, int))\
== tokenizer.encode(sentence, str)
assert tokenizer.vocab[tokenizer.encode(sentence, str)]\
== tokenizer.encode(sentence, int)
else:
assert tokenizer.vocab.to_tokens(tokenizer.encode(sentences, int)) \
== tokenizer.encode(sentences, str)
assert tokenizer.vocab[tokenizer.encode(sentences, str)] \
== tokenizer.encode(sentences, int)
def verify_decode_no_vocab_raise(tokenizer):
# When the vocab is not attached, should raise ValueError
for sentences in [EN_SAMPLES[0], EN_SAMPLES]:
with pytest.raises(ValueError):
tokenizer.encode(sentences, int)
with pytest.raises(ValueError):
tokenizer.decode([0])
with pytest.raises(ValueError):
tokenizer.decode([[0], [1]])
def verify_pickleble(tokenizer, cls):
print(tokenizer)
# Verify if the tokenizer is pickleable and has the same behavior after dumping/loading
tokenizer_p = pickle.loads(pickle.dumps(tokenizer))
assert isinstance(tokenizer_p, cls)
assert tokenizer.encode(SUBWORD_TEST_SAMPLES, str) == tokenizer_p.encode(SUBWORD_TEST_SAMPLES, str)
def test_whitespace_tokenizer():
tokenizer = WhitespaceTokenizer()
gt_en_tokenized = [['Four', 'score', 'and', 'seven', 'years', 'ago', 'our', 'fathers', 'brought',
'forth', 'on', 'this', 'continent,', 'a', 'new', 'nation,', 'conceived',
'in', 'Liberty,', 'and', 'dedicated', 'to', 'the', 'proposition', 'that',
'all', 'men', 'are', 'created', 'equal.'],
['In', 'spite', 'of', 'the', 'debate', 'going', 'on', 'for', 'months',
'about', 'the', 'photos', 'of', 'Özil', 'with', 'the', 'Turkish',
'President', 'Recep', 'Tayyip', 'Erdogan,', 'he', 'regrets', 'the',
'return', 'of', 'the', '92-match', 'national', 'player', 'Özil.']]
gt_de_tokenized = [['Goethe', 'stammte', 'aus', 'einer', 'angesehenen', 'bürgerlichen',
'Familie;', 'sein', 'Großvater', 'mütterlicherseits', 'war', 'als',
'Stadtschultheiß', 'höchster', 'Justizbeamter', 'der', 'Stadt',
'Frankfurt,', 'sein', 'Vater', 'Doktor', 'der', 'Rechte', 'und',
'kaiserlicher', 'Rat.'],
['"Das', 'ist', 'eine', 'Frage,', 'die', 'natürlich', 'davon', 'abhängt,',
'dass', 'man', 'einmal', 'ins', 'Gespräch', 'kommt,', 'dass', 'man', 'mit',
'ihm', 'auch', 'darüber', 'spricht,', 'warum', 'er', 'das', 'eine', 'oder',
'andere', 'offenbar', 'so', 'empfunden', 'hat,', 'wie', 'das', 'in',
'seinem', 'Statement', 'niedergelegt', 'ist",', 'sagte', 'Grindel', 'im',
'Fußball-Podcast', '"Phrasenmäher"', 'der', '"Bild-Zeitung.']]
for _ in range(2):
# Inject noise and test for encode
noisy_en_samples = [random_inject_space(ele) for ele in EN_SAMPLES]
noisy_de_samples = [random_inject_space(ele) for ele in DE_SAMPLES]
verify_encode_token(tokenizer, noisy_en_samples + noisy_de_samples,
gt_en_tokenized + gt_de_tokenized)
# Test for decode
verify_decode(tokenizer, EN_SAMPLES + DE_SAMPLES, str)
# Test for encode_with_offsets
verify_encode_token_with_offsets(tokenizer, noisy_en_samples + noisy_de_samples)
verify_decode_no_vocab_raise(tokenizer)
# Test for output_type = int
vocab = Vocab(collections.Counter(sum(gt_en_tokenized + gt_de_tokenized,
[])))
tokenizer.set_vocab(vocab)
verify_decode(tokenizer, EN_SAMPLES + DE_SAMPLES, int)
verify_pickleble(tokenizer, WhitespaceTokenizer)
verify_encode_token_with_offsets(tokenizer, EN_SAMPLES + DE_SAMPLES)
def test_moses_tokenizer():
en_tokenizer = MosesTokenizer('en')
de_tokenizer = MosesTokenizer('de')
gt_en_tokenized = [['Four', 'score', 'and', 'seven', 'years', 'ago', 'our', 'fathers',
'brought', 'forth', 'on', 'this', 'continent', ',', 'a', 'new', 'nation',
',', 'conceived', 'in', 'Liberty', ',', 'and', 'dedicated', 'to', 'the',
'proposition', 'that', 'all', 'men', 'are', 'created', 'equal', '.'],
['In', 'spite', 'of', 'the', 'debate', 'going', 'on', 'for', 'months',
'about', 'the', 'photos', 'of', 'Özil', 'with', 'the', 'Turkish',
'President', 'Recep', 'Tayyip', 'Erdogan', ',', 'he', 'regrets', 'the',
'return', 'of', 'the', '92-match', 'national', 'player', 'Özil', '.']]
gt_de_tokenized = [['Goethe', 'stammte', 'aus', 'einer', 'angesehenen', 'bürgerlichen',
'Familie', ';', 'sein', 'Großvater', 'mütterlicherseits', 'war', 'als',
'Stadtschultheiß', 'höchster', 'Justizbeamter', 'der', 'Stadt',
'Frankfurt', ',', 'sein', 'Vater', 'Doktor', 'der', 'Rechte', 'und',
'kaiserlicher', 'Rat', '.'],
['"', 'Das', 'ist', 'eine', 'Frage', ',', 'die', 'natürlich', 'davon',
'abhängt', ',', 'dass', 'man', 'einmal', 'ins', 'Gespräch', 'kommt', ',',
'dass', 'man', 'mit', 'ihm', 'auch', 'darüber', 'spricht', ',', 'warum',
'er', 'das', 'eine', 'oder', 'andere', 'offenbar', 'so', 'empfunden',
'hat', ',', 'wie', 'das', 'in', 'seinem', 'Statement', 'niedergelegt',
'ist', '"', ',', 'sagte', 'Grindel', 'im', 'Fußball-Podcast',
'"', 'Phrasenmäher', '"', 'der', '"', 'Bild-Zeitung', '.']]
verify_encode_token(en_tokenizer, EN_SAMPLES, gt_en_tokenized)
verify_encode_token(de_tokenizer, DE_SAMPLES, gt_de_tokenized)
verify_decode(en_tokenizer, EN_SAMPLES, str)
verify_decode(de_tokenizer, DE_SAMPLES, str)
vocab = Vocab(collections.Counter(sum(gt_en_tokenized + gt_de_tokenized, [])))
verify_decode_no_vocab_raise(en_tokenizer)
verify_decode_no_vocab_raise(de_tokenizer)
en_tokenizer.set_vocab(vocab)
de_tokenizer.set_vocab(vocab)
verify_decode(en_tokenizer, EN_SAMPLES, int)
verify_decode(de_tokenizer, DE_SAMPLES, int)
verify_pickleble(en_tokenizer, MosesTokenizer)
verify_pickleble(de_tokenizer, MosesTokenizer)
def test_jieba_tokenizer():
tokenizer = JiebaTokenizer()
gt_zh_tokenized = [['苟活', '者', '在', '淡红', '的', '血色', '中', ',',
'会', '依稀', '看见', '微茫', '的', '希望', ';', '真的',
'猛士', ',', '将', '更奋', '然而', '前行', '。'],
['参加', '工作', ',', '哈尔滨工业大学', '无线电', '工程系', '电子仪器',
'及', '测量', '技术', '专业', '毕业', '。']]
verify_encode_token(tokenizer, ZH_SAMPLES, gt_zh_tokenized)
verify_decode(tokenizer, ZH_SAMPLES, str)
vocab = Vocab(collections.Counter(sum(gt_zh_tokenized, [])))
verify_decode_no_vocab_raise(tokenizer)
tokenizer.set_vocab(vocab)
verify_decode(tokenizer, ZH_SAMPLES, int)
verify_pickleble(tokenizer, JiebaTokenizer)
def test_spacy_tokenizer():
en_tokenizer = SpacyTokenizer('en')
de_tokenizer = SpacyTokenizer('de')
gt_en_tokenized = [['Four', 'score', 'and', 'seven', 'years', 'ago', 'our', 'fathers',
'brought', 'forth', 'on', 'this', 'continent', ',', 'a', 'new', 'nation',
',', 'conceived', 'in', 'Liberty', ',', 'and', 'dedicated', 'to', 'the',
'proposition', 'that', 'all', 'men', 'are', 'created', 'equal', '.'],
['In', 'spite', 'of', 'the', 'debate', 'going', 'on', 'for', 'months',
'about', 'the', 'photos', 'of', 'Özil', 'with', 'the', 'Turkish',
'President', 'Recep', 'Tayyip', 'Erdogan', ',', 'he', 'regrets', 'the',
'return', 'of', 'the', '92-match', 'national', 'player', 'Özil', '.']]
gt_de_tokenized = [['Goethe', 'stammte', 'aus', 'einer', 'angesehenen', 'bürgerlichen',
'Familie', ';', 'sein', 'Großvater', 'mütterlicherseits', 'war', 'als',
'Stadtschultheiß', 'höchster', 'Justizbeamter', 'der', 'Stadt', 'Frankfurt',
',', 'sein', 'Vater', 'Doktor', 'der', 'Rechte', 'und', 'kaiserlicher',
'Rat', '.'],
['"', 'Das', 'ist', 'eine', 'Frage', ',', 'die', 'natürlich', 'davon',
'abhängt', ',', 'dass', 'man', 'einmal', 'ins', 'Gespräch', 'kommt', ',',
'dass', 'man', 'mit', 'ihm', 'auch', 'darüber', 'spricht', ',', 'warum',
'er', 'das', 'eine', 'oder', 'andere', 'offenbar', 'so', 'empfunden', 'hat',
',', 'wie', 'das', 'in', 'seinem', 'Statement', 'niedergelegt', 'ist', '"',
',', 'sagte', 'Grindel', 'im', 'Fußball-Podcast', '"', 'Phrasenmäher', '"',
'der', '"', 'Bild-Zeitung', '.']]
verify_encode_token(en_tokenizer, EN_SAMPLES, gt_en_tokenized)
verify_encode_token(de_tokenizer, DE_SAMPLES, gt_de_tokenized)
vocab = Vocab(collections.Counter(sum(gt_en_tokenized + gt_de_tokenized, [])))
en_tokenizer.set_vocab(vocab)
de_tokenizer.set_vocab(vocab)
verify_pickleble(en_tokenizer, SpacyTokenizer)
verify_pickleble(de_tokenizer, SpacyTokenizer)
verify_encode_token_with_offsets(en_tokenizer, EN_SAMPLES)
verify_encode_token_with_offsets(de_tokenizer, DE_SAMPLES)
# Test for loading spacy tokenizer from specifying the "model" flag
en_tokenizer = SpacyTokenizer(model='en_core_web_lg')
out = en_tokenizer.encode(EN_SAMPLES)
def test_yttm_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'yttm.model')
download(url=get_repo_url() + 'tokenizer_test_models/yttm/test_ende_yttm-6f2c39.model',
path=model_path)
tokenizer = YTTMTokenizer(model_path=model_path)
gt_tokenized = [['▁He', 'll', 'o', ',', '▁y', "'", 'all', '!', '▁How', '▁are', '▁you', '▁',
'Ⅷ', '▁', '😁', '▁', '😁', '▁', '😁', '▁?'],
['▁Gl', 'u', 'on', 'N', 'L', 'P', '▁is', '▁great', '!', '!', '!', '!',
'!', '!'],
['▁Gl', 'u', 'on', 'N', 'L', 'P', '-A', 'm', 'az', 'on', '-H', 'a', 'ib',
'in', '-L', 'e', 'on', 'ard', '-S', 'hen', 'g', '-S', 'h', 'u', 'ai',
'-', 'X', 'ing', 'j', 'ian', '.', '.', '.', '.', '.', '/', ':', '!',
'@', '#', '▁', "'", 'ab', 'c', "'"]]
gt_offsets = [[(0, 2), (2, 4), (4, 5), (5, 6), (6, 8), (8, 9), (9, 12), (12, 13), (13, 17),
(17, 21), (21, 25), (25, 26), (26, 27), (27, 28), (28, 29), (29, 30), (30, 31),
(31, 32), (32, 33), (33, 35)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 11), (11, 17), (17, 18),
(18, 19), (19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 10), (10, 11), (11, 13),
(13, 15), (15, 17), (17, 18), (18, 20), (20, 22), (22, 24), (24, 25), (25, 27),
(27, 30), (30, 32), (32, 35), (35, 36), (36, 38), (38, 39), (39, 40), (40, 42),
(42, 43), (43, 44), (44, 47), (47, 48), (48, 51), (51, 52), (52, 53), (53, 54),
(54, 55), (55, 56), (56, 57), (57, 58), (58, 59), (59, 60), (60, 61), (61, 62),
(62, 63), (63, 65), (65, 66), (66, 67)]]
gt_int_decode = ['Hello, y<UNK>all! How are you <UNK> <UNK> <UNK> <UNK> ?',
'GluonNLP is great!!!!!!',
'GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# <UNK>abc<UNK>']
gt_str_decode = ["Hello, y'all! How are you Ⅷ 😁 😁 😁 ?",
'GluonNLP is great!!!!!!',
"GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# 'abc'"]
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, YTTMTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
# Begin to verify decode
for sample_sentences, ele_gt_int_decode, ele_gt_str_decode in [(SUBWORD_TEST_SAMPLES[0], gt_int_decode[0], gt_str_decode[0]),
(SUBWORD_TEST_SAMPLES, gt_int_decode, gt_str_decode)]:
int_decode = tokenizer.decode(tokenizer.encode(sample_sentences, int))
str_decode = tokenizer.decode(tokenizer.encode(sample_sentences, str))
assert int_decode == ele_gt_int_decode
assert str_decode == ele_gt_str_decode
os.remove(model_path)
assert tokenizer.decode([]) == ''
assert tokenizer.decode([[]]) == ['']
@pytest.mark.seed(123)
def test_sentencepiece_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'spm.model')
download(url=get_repo_url()
+ 'tokenizer_test_models/sentencepiece/case1/test_ende-a9bee4.model',
path=model_path)
# Case1
tokenizer = SentencepieceTokenizer(model_path)
gt_tokenized = [['▁Hel', 'lo', ',', '▁y', "'", 'all', '!', '▁How', '▁are', '▁you',
'▁', 'VI', 'II', '▁', '😁', '▁', '😁', '▁', '😁', '▁?'],
['▁G', 'lu', 'on', 'N', 'L', 'P', '▁is', '▁great', '!', '!', '!', '!',
'!', '!'],
['▁G', 'lu', 'on', 'N', 'L', 'P', '-', 'A', 'ma', 'zo', 'n', '-', 'H', 'ai',
'bin', '-', 'L', 'e', 'on', 'ard', '-', 'S', 'hen', 'g', '-', 'S', 'hu', 'ai',
'-', 'X', 'ing', 'j', 'ian', '.', '.', '.', '.', '.', '/', ':', '!', '@',
'#', '▁', "'", 'ab', 'c', "'"]]
gt_offsets = [[(0, 3), (3, 5), (5, 6), (6, 8), (8, 9), (9, 12), (12, 13), (13, 17), (17, 21),
(21, 25), (25, 26), (26, 26), (26, 27), (27, 28), (28, 29), (29, 30), (30, 31),
(31, 32), (32, 33), (33, 35)],
[(0, 1), (1, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 11), (11, 17), (17, 18),
(18, 19), (19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 1), (1, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 12),
(12, 14), (14, 15), (15, 16), (16, 17), (17, 19), (19, 22), (22, 23), (23, 24),
(24, 25), (25, 27), (27, 30), (30, 31), (31, 32), (32, 35), (35, 36), (36, 37),
(37, 38), (38, 40), (40, 42), (42, 43), (43, 44), (44, 47), (47, 48), (48, 51),
(51, 52), (52, 53), (53, 54), (54, 55), (55, 56), (56, 57), (57, 58), (58, 59),
(59, 60), (60, 61), (61, 62), (62, 63), (63, 65), (65, 66), (66, 67)]]
gt_int_decode = ['Hello, y ⁇ all! How are you VIII ⁇ ⁇ ⁇ ?',
'GluonNLP is great!!!!!!',
'GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:! ⁇ # ⁇ abc ⁇ ']
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, SentencepieceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_spm(tokenizer, SUBWORD_TEST_SAMPLES, gt_int_decode)
# Case2, lower_case
gt_lower_case_int_decode = ['hello, y ⁇ all! how are you viii ⁇ ⁇ ⁇ ?',
'gluonnlp is great!!!!!!',
'gluonnlp-amazon-haibin-leonard-sheng-shuai-xingjian...../:! ⁇ # ⁇ abc ⁇ ']
tokenizer = SentencepieceTokenizer(model_path, lowercase=True)
verify_decode_spm(tokenizer, SUBWORD_TEST_SAMPLES, gt_lower_case_int_decode)
# Case3, Use the sentencepiece regularization commands, we test whether we can obtain different encoding results
tokenizer = SentencepieceTokenizer(model_path, lowercase=True, nbest=-1, alpha=1.0)
has_different_encode_out = False
encode_out = None
for _ in range(10):
if encode_out is None:
encode_out = tokenizer.encode(SUBWORD_TEST_SAMPLES[0])
else:
ele_out = tokenizer.encode(SUBWORD_TEST_SAMPLES[0])
if ele_out != encode_out:
has_different_encode_out = True
break
assert has_different_encode_out
os.remove(model_path)
def test_subword_nmt_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'subword_nmt.model')
download(url=get_repo_url() + 'tokenizer_test_models/subword-nmt/test_ende-d189ff.model',
path=model_path)
vocab_path = os.path.join(dir_path, 'subword_nmt.vocab')
download(url=get_repo_url() + 'tokenizer_test_models/subword-nmt/test_ende_vocab-900f81.json',
path=vocab_path)
# Case 1
tokenizer = SubwordNMTTokenizer(model_path, vocab_path)
gt_tokenized = [["Hel", "lo", ",</w>", "y", "\'", "all", "!</w>", "How</w>", "are</w>", "you</w>",
"Ⅷ</w>", "😁</w>", "😁</w>", "😁</w>", "?</w>"],
["Gl", "u", "on", "N", "L", "P</w>", "is</w>", "great", "!", "!", "!", "!!",
"!</w>"],
["Gl", "u", "on", "N", "L", "P", "-", "Amaz", "on-", "H", "ai", "b", "in-", "Le",
"on", "ard", "-", "Sh", "eng", "-", "Sh", "u", "ai", "-", "X", "ing", "ji",
"an", "..", "...", "/", ":", "!", "@", "#</w>", "\'", "ab", "c", "\'</w>"]]
gt_offsets = [[(0, 3), (3, 5), (5, 6), (7, 8), (8, 9), (9, 12), (12, 13), (14, 17), (18, 21),
(22, 25), (26, 27), (28, 29), (30, 31), (32, 33), (34, 35)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (9, 11), (12, 17), (17, 18),
(18, 19), (19, 20), (20, 22), (22, 23)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 13), (13, 16),
(16, 17), (17, 19), (19, 20), (20, 23), (23, 25), (25, 27), (27, 30), (30, 31),
(31, 33), (33, 36), (36, 37), (37, 39), (39, 40), (40, 42), (42, 43), (43, 44),
(44, 47), (47, 49), (49, 51), (51, 53), (53, 56), (56, 57), (57, 58), (58, 59),
(59, 60), (60, 61), (62, 63), (63, 65), (65, 66), (66, 67)]]
gt_int_decode = ["Hello, y\'all! How are you Ⅷ 😁 😁 😁 ?",
"GluonNLP is great!!!!!!",
"GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# \'abc\'"]
gt_str_decode = SUBWORD_TEST_SAMPLES
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, SubwordNMTTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_subword_nmt(tokenizer, SUBWORD_TEST_SAMPLES, gt_int_decode, gt_str_decode)
# Case 2, bpe_dropout
# We use str decode here because we may not perfectly recover the original sentence with int decode.
tokenizer = SubwordNMTTokenizer(model_path, vocab_path, bpe_dropout=0.5)
verify_decode(tokenizer, SUBWORD_TEST_SAMPLES, out_type=str)
os.remove(model_path)
os.remove(vocab_path)
def test_huggingface_bpe_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'test_hf_bpe.model')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bpe/test_hf_bpe.model',
path=model_path)
vocab_path = os.path.join(dir_path, 'test_hf_bpe.vocab')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bpe/test_hf_bpe.vocab',
path=vocab_path)
hf_vocab_path = os.path.join(dir_path, 'test_hf_bpe.hf_vocab')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bpe/test_hf_bpe.hf_vocab',
path=hf_vocab_path)
# Case 1, default lowercase=False
tokenizer = HuggingFaceBPETokenizer(model_path, vocab_path)
gt_tokenized = [['Hello</w>', ',</w>', 'y</w>', "'</w>", 'all</w>', '!</w>', 'How</w>',
'are</w>', 'you</w>', '<unk>', '<unk>', '<unk>', '<unk>', '?</w>'],
['Gl', 'u', 'on', 'N', 'LP</w>', 'is</w>', 'great</w>', '!</w>', '!</w>',
'!</w>', '!</w>', '!</w>', '!</w>'],
['Gl', 'u', 'on', 'N', 'LP</w>', '-</w>', 'Amazon</w>', '-</w>', 'H', 'ai',
'bin</w>', '-</w>', 'Leonard</w>', '-</w>', 'Sh', 'en', 'g</w>', '-</w>',
'Sh', 'u', 'ai</w>', '-</w>', 'X', 'ing', 'j', 'ian</w>', '.</w>', '.</w>',
'.</w>', '.</w>', '.</w>', '/</w>', ':</w>', '!</w>', '@</w>', '#</w>',
"'</w>", 'ab', 'c</w>', "'</w>"]]
gt_offsets = [[(0, 5), (5, 6), (7, 8), (8, 9), (9, 12), (12, 13), (14, 17), (18, 21), (22, 25),
(26, 27), (28, 29), (30, 31), (32, 33), (34, 35)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 8), (9, 11), (12, 17), (17, 18), (18, 19),
(19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 8), (8, 9), (9, 15), (15, 16), (16, 17),
(17, 19), (19, 22), (22, 23), (23, 30), (30, 31), (31, 33), (33, 35), (35, 36),
(36, 37), (37, 39), (39, 40), (40, 42), (42, 43), (43, 44), (44, 47), (47, 48),
(48, 51), (51, 52), (52, 53), (53, 54), (54, 55), (55, 56), (56, 57), (57, 58),
(58, 59), (59, 60), (60, 61), (62, 63), (63, 65), (65, 66), (66, 67)]]
# gt_int_decode = gt_str_decode for hf
# hf removed the unk tokens in decode result
gt_decode = ["Hello , y ' all ! How are you ?",
'GluonNLP is great ! ! ! ! ! !',
"GluonNLP - Amazon - Haibin - Leonard - Sheng - Shuai - Xingjian . . . . . / : ! @ # ' abc '"]
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceBPETokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
# Case 2, lowercase=True
gt_lowercase_decode = ["hello , y ' all ! how are you ?",
'gluonnlp is great ! ! ! ! ! !',
"gluonnlp - amazon - haibin - leonard - sheng - shuai - xingjian . . . . . / : ! @ # ' abc '"]
tokenizer = HuggingFaceBPETokenizer(model_path, vocab_path, lowercase=True)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_lowercase_decode)
# Case 3, using original hf vocab
tokenizer = HuggingFaceBPETokenizer(model_path, hf_vocab_path)
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceBPETokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
os.remove(model_path)
os.remove(vocab_path)
os.remove(hf_vocab_path)
def test_huggingface_bytebpe_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'hf_bytebpe.model')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bytebpe/test_hf_bytebpe.model',
path=model_path)
vocab_path = os.path.join(dir_path, 'hf_bytebpe.vocab')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bytebpe/test_hf_bytebpe.vocab',
path=vocab_path)
hf_vocab_path = os.path.join(dir_path, 'hf_bytebpe.hf_vocab')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bytebpe/test_hf_bytebpe.hf_vocab',
path=hf_vocab_path)
# Case 1, default lowercase=False
tokenizer = HuggingFaceByteBPETokenizer(model_path, vocab_path)
gt_tokenized = [['Hello', ',', 'Ġy', "'", 'all', '!', 'ĠHow', 'Ġare', 'Ġyou',
'Ġâ', 'ħ', '§', 'ĠðŁĺ', 'ģ', 'ĠðŁĺ', 'ģ', 'ĠðŁĺ', 'ģ', 'Ġ?'],
['Gl', 'u', 'on', 'N', 'LP', 'Ġis', 'Ġgreat', 'ï¼', 'ģ', 'ï¼',
'ģ', 'ï¼', 'ģ', '!!!'],
['Gl', 'u', 'on', 'N', 'LP', '-', 'Amazon', '-', 'Ha', 'ib', 'in',
'-', 'Le', 'on', 'ard', '-', 'She', 'ng', '-', 'Sh', 'u',
'ai', '-', 'X', 'ing', 'j', 'ian', '.....', '/', ':', '!', '@',
'#', "Ġ'", 'ab', 'c', "'"]]
# the defination of the offsets of bytelevel seems not clear
gt_offsets = [[(0, 5), (5, 6), (6, 8), (8, 9), (9, 12), (12, 13), (13, 17), (17, 21),
(21, 25), (25, 27), (26, 27), (26, 27), (27, 29), (28, 29), (29, 31),
(30, 31), (31, 33), (32, 33), (33, 35)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 8), (8, 11), (11, 17), (17, 18),
(17, 18), (18, 19), (18, 19), (19, 20), (19, 20), (20, 23)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 8), (8, 9), (9, 15), (15, 16),
(16, 18), (18, 20), (20, 22), (22, 23), (23, 25), (25, 27), (27, 30),
(30, 31), (31, 34), (34, 36), (36, 37), (37, 39), (39, 40), (40, 42),
(42, 43), (43, 44), (44, 47), (47, 48), (48, 51), (51, 56),
(56, 57), (57, 58), (58, 59), (59, 60), (60, 61), (61, 63),
(63, 65), (65, 66), (66, 67)]]
gt_decode = ["Hello, y'all! How are you Ⅷ 😁 😁 😁 ?",
'GluonNLP is great!!!!!!',
"GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# 'abc'"]
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceByteBPETokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
# Case 2, lowercase=True
gt_lowercase_int_decode = ["hello, y'all! how are you ⅷ 😁 😁 😁 ?",
'gluonnlp is great!!!!!!',
"gluonnlp-amazon-haibin-leonard-sheng-shuai-xingjian...../:!@# 'abc'"]
tokenizer = HuggingFaceByteBPETokenizer(model_path, vocab_path, lowercase=True)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_lowercase_int_decode)
# Case 3, using original hf vocab
tokenizer = HuggingFaceByteBPETokenizer(model_path, hf_vocab_path)
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceByteBPETokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
os.remove(model_path)
os.remove(vocab_path)
os.remove(hf_vocab_path)
def test_huggingface_wordpiece_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
vocab_path = os.path.join(dir_path, 'hf_wordpiece.vocab')
download(url=get_repo_url()
+ 'tokenizer_test_models/hf_wordpiece/test_hf_wordpiece.vocab',
path=vocab_path)
hf_vocab_path = os.path.join(dir_path, 'hf_wordpiece.hf_vocab')
download(url=get_repo_url()
+ 'tokenizer_test_models/hf_wordpiece/test_hf_wordpiece.hf_vocab',
path=hf_vocab_path)
# Case 1, lowercase=True
tokenizer = HuggingFaceWordPieceTokenizer(vocab_path, lowercase=True)
gt_tokenized = [["hello", ",", "y", "'", "all", "!", "how", "are", "you",
"<unk>", "<unk>", "<unk>", "<unk>", "?"],
["gl", "##uo", "##nn", "##l", "##p", "is", "great", "\uff01",
"\uff01", "\uff01", "!", "!", "!"],
["gl", "##uo", "##nn", "##l", "##p", "-", "amazon", "-", "hai",
"##bin", "-", "leonard", "-", "shen", "##g", "-", "shu", "##ai", "-",
"xin", "##g", "##ji", "##an", ".", ".", ".", ".", ".", "/", ":", "!",
"@", "#", "'", "abc", "'"]]
gt_offsets = [[(0, 5), (5, 6), (7, 8), (8, 9), (9, 12), (12, 13), (14, 17), (18, 21),
(22, 25), (26, 27), (28, 29), (30, 31), (32, 33), (34, 35)],
[(0, 2), (2, 4), (4, 6), (6, 7), (7, 8), (9, 11), (12, 17), (17, 18),
(18, 19), (19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 2), (2, 4), (4, 6), (6, 7), (7, 8), (8, 9), (9, 15), (15, 16), (16, 19),
(19, 22), (22, 23), (23, 30), (30, 31), (31, 35), (35, 36), (36, 37), (37, 40),
(40, 42), (42, 43), (43, 46), (46, 47), (47, 49), (49, 51), (51, 52), (52, 53),
(53, 54), (54, 55), (55, 56), (56, 57), (57, 58), (58, 59), (59, 60), (60, 61),
(62, 63), (63, 66), (66, 67)]]
gt_decode = ["hello, y'all! how are you?",
"gluonnlp is great ! ! !!!!",
"gluonnlp - amazon - haibin - leonard - sheng - shuai - xingjian..... / :! @ #'abc '"]
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceWordPieceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
# Case 2, lowercase=False
gt_lowercase_decode = [", y'all! are you?",
"is great ! ! !!!!",
"- - - - - -..... / :! @ #'abc '"]
tokenizer = HuggingFaceWordPieceTokenizer(vocab_path, lowercase=False)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_lowercase_decode)
# Case 3, using original hf vocab
tokenizer = HuggingFaceWordPieceTokenizer(hf_vocab_path, lowercase=True)
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceWordPieceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
os.remove(vocab_path)
os.remove(hf_vocab_path)
@pytest.mark.skipif(parse_version(gluonnlp.utils.lazy_imports.try_import_huggingface_tokenizers().__version__)
>= parse_version('0.9.0.dev0'), reason="Test is only valid for tokenizers 0.8.x")
def test_huggingface_wordpiece_tokenizer_v08():
"""Test for huggingface tokenizer >=0.8"""
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'hf_wordpiece_new_0.8.model')
download(url=get_repo_url() +
'tokenizer_test_models/hf_wordpiece_new_0.8/hf_wordpiece.model',
path=model_path,
sha1_hash='66ccadf6e5e354ff9604e4a82f107a2ac873abd5')
vocab_path = os.path.join(dir_path, 'hf_wordpiece_new_0.8.vocab')
download(url=get_repo_url() +
'tokenizer_test_models/hf_wordpiece_new_0.8/hf_wordpiece.vocab',
path=vocab_path,
sha1_hash='dd6fdf4bbc74eaa8806d12cb3d38a4d9a306aea8')
tokenizer = HuggingFaceTokenizer(model_path, vocab_path)
gt_tokenized = [['Hel', '##lo', ',', 'y', '[UNK]', 'all', '!',
'How', 'are', 'you', '[UNK]', '[UNK]', '[UNK]', '[UNK]', '?'],
['Gl', '##u', '##on', '##N', '##L', '##P', 'is', 'great', '[UNK]',
'[UNK]', '[UNK]', '!', '!', '!'],
['Gl', '##u', '##on', '##N', '##L', '##P', '-',
'Am', '##az', '##on', '-', 'Ha', '##ibi', '##n', '-', 'Leon', '##ard',
'-', 'She', '##n', '##g', '-', 'Sh', '##ua', '##i', '-', 'X',
'##ing', '##j', '##ian', '.', '.', '.', '.', '.', '/', ':', '!',
'@', '#', '[UNK]', 'ab', '##c', '[UNK]']]
gt_offsets = [[(0, 3), (3, 5), (5, 6), (7, 8), (8, 9), (9, 12), (12, 13),
(14, 17), (18, 21), (22, 25), (26, 27), (28, 29), (30, 31),
(32, 33), (34, 35)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (9, 11), (12, 17),
(17, 18), (18, 19), (19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9),
(9, 11), (11, 13), (13, 15), (15, 16), (16, 18), (18, 21),
(21, 22), (22, 23), (23, 27), (27, 30), (30, 31), (31, 34),
(34, 35), (35, 36), (36, 37), (37, 39), (39, 41), (41, 42),
(42, 43), (43, 44), (44, 47), (47, 48), (48, 51), (51, 52),
(52, 53), (53, 54), (54, 55), (55, 56), (56, 57), (57, 58),
(58, 59), (59, 60), (60, 61), (62, 63), (63, 65), (65, 66),
(66, 67)]]
gt_decode = ['Hello, y all! How are you?',
'GluonNLP is great!!!',
'GluonNLP - Amazon - Haibin - Leonard - Sheng - Shuai - Xingjian..... / '
':! @ # abc']
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
@pytest.mark.skipif(parse_version(gluonnlp.utils.lazy_imports.try_import_huggingface_tokenizers().__version__)
>= parse_version('0.9.0.dev0'), reason="Test is only valid for tokenizers 0.8.x")
def test_huggingface_bpe_tokenizer_v08():
"""Test for huggingface BPE tokenizer >=0.8"""
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'hf_bpe_new_0.8.model')
download(url=get_repo_url() +
'tokenizer_test_models/hf_bpe_new_0.8/hf_bpe.model',
path=model_path,
sha1_hash='ecda90979561ca4c5a8d769b5e3c9fa2270d5317')
vocab_path = os.path.join(dir_path, 'hf_bpe_new_0.8.vocab')
download(url=get_repo_url() +
'tokenizer_test_models/hf_bpe_new_0.8/hf_bpe.vocab',
path=vocab_path,
sha1_hash='b92dde0b094f405208f3ec94b5eae88430bf4262')
tokenizer = HuggingFaceTokenizer(model_path, vocab_path)
gt_tokenized = [['H', 'ello</w>', ',</w>', 'y</w>', 'all</w>', '!</w>',
'How</w>', 'are</w>', 'you</w>', '?</w>'],
['G', 'lu', 'on', 'N', 'L', 'P</w>', 'is</w>', 'great</w>',
'!</w>', '!</w>', '!</w>'],
['G', 'lu', 'on', 'N', 'L', 'P</w>', '-</w>', 'Amaz', 'on</w>',
'-</w>', 'Ha', 'i', 'bin</w>', '-</w>', 'Leon', 'ard</w>', '-</w>',
'Sh', 'eng</w>', '-</w>', 'S', 'hu', 'ai</w>', '-</w>', 'X', 'ing',
'j', 'ian</w>', '.</w>', '.</w>', '.</w>', '.</w>', '.</w>', '/</w>',
':</w>', '!</w>', '@</w>', '#</w>', 'ab', 'c</w>']]
gt_offsets = [[(0, 1), (1, 5), (5, 6), (7, 8), (9, 12), (12, 13), (14, 17),
(18, 21), (22, 25), (34, 35)],
[(0, 1), (1, 3), (3, 5), (5, 6), (6, 7), (7, 8), (9, 11), (12, 17),
(20, 21), (21, 22), (22, 23)],
[(0, 1), (1, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 13), (13, 15),
(15, 16), (16, 18), (18, 19), (19, 22), (22, 23), (23, 27), (27, 30),
(30, 31), (31, 33), (33, 36), (36, 37), (37, 38), (38, 40), (40, 42),
(42, 43), (43, 44), (44, 47), (47, 48), (48, 51), (51, 52), (52, 53),
(53, 54), (54, 55), (55, 56), (56, 57), (57, 58), (58, 59), (59, 60),
(60, 61), (63, 65), (65, 66)]]
gt_decode = ['Hello , y all ! How are you ?',
'GluonNLP is great ! ! !',
'GluonNLP - Amazon - Haibin - Leonard - Sheng - Shuai - Xingjian'
' . . . . . / : ! @ # abc']
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
@pytest.mark.skipif(parse_version(gluonnlp.utils.lazy_imports.try_import_huggingface_tokenizers().__version__)
>= parse_version('0.9.0.dev0'), reason="Test is only valid for tokenizers 0.8.x")
def test_huggingface_bytebpe_tokenizer_v08():
"""Test for huggingface bytebpe tokenizer >=0.8"""
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'hf_bytebpe_new_0.8.model')
download(url=get_repo_url() +
'tokenizer_test_models/hf_bytebpe_new_0.8/hf_bytebpe.model',
path=model_path,
sha1_hash='a1c4da1f6c21df923e150f56dbb5b7a53c61808b')
vocab_path = os.path.join(dir_path, 'hf_bytebpe_new_0.8.vocab')
download(url=get_repo_url() +
'tokenizer_test_models/hf_bytebpe_new_0.8/hf_bytebpe.vocab',
path=vocab_path,
sha1_hash='7831b19078a3222f450e65b2188dc0770473123b')
tokenizer = HuggingFaceTokenizer(model_path, vocab_path)
gt_tokenized = [['He', 'llo', ',', 'Ġy', "'", 'all', '!', 'ĠHow', 'Ġare', 'Ġyou',
'Ġâ', 'ħ', '§', 'Ġ', 'ð', 'Ł', 'ĺ', 'ģ', 'Ġ', 'ð', 'Ł', 'ĺ',
'ģ', 'Ġ', 'ð', 'Ł', 'ĺ', 'ģ', 'Ġ?'],
['G', 'l', 'u', 'on', 'N', 'L', 'P', 'Ġis', 'Ġgreat', 'ï', '¼', 'ģ',
'ï', '¼', 'ģ', 'ï', '¼', 'ģ', '!', '!', '!'],
['G', 'l', 'u', 'on', 'N', 'L', 'P', '-', 'Am', 'az', 'on', '-',
'Ha', 'ib', 'in', '-', 'Le', 'on', 'ard', '-', 'S', 'hen', 'g', '-',
'Sh', 'u', 'ai', '-', 'X', 'ing', 'j', 'ian',
'..', '...', '/', ':', '!', '@', '#', 'Ġ', "'", 'ab', 'c', "'"]]
gt_offsets = [[(0, 2), (2, 5), (5, 6), (6, 8), (8, 9), (9, 12), (12, 13), (13, 17),
(17, 21), (21, 25), (25, 27), (26, 27), (26, 27), (27, 28), (28, 29),
(28, 29), (28, 29), (28, 29), (29, 30), (30, 31), (30, 31), (30, 31),
(30, 31), (31, 32), (32, 33), (32, 33), (32, 33), (32, 33), (33, 35)],
[(0, 1), (1, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 11), (11, 17),
(17, 18), (17, 18), (17, 18), (18, 19), (18, 19), (18, 19), (19, 20),
(19, 20), (19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 1), (1, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 11),
(11, 13), (13, 15), (15, 16), (16, 18), (18, 20), (20, 22), (22, 23),
(23, 25), (25, 27), (27, 30), (30, 31), (31, 32), (32, 35), (35, 36),
(36, 37), (37, 39), (39, 40), (40, 42), (42, 43), (43, 44),
(44, 47), (47, 48), (48, 51), (51, 53), (53, 56), (56, 57),
(57, 58), (58, 59), (59, 60), (60, 61), (61, 62), (62, 63),
(63, 65), (65, 66), (66, 67)]]
gt_decode = ["Hello, y'all! How are you Ⅷ 😁 😁 😁 ?",
'GluonNLP is great!!!!!!',
"GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# 'abc'"]
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
def test_tokenizers_create():
tokenizer = gluonnlp.data.tokenizers.create('moses', 'en')
tokenizer.encode('hello world!')
| 62.318987
| 133
| 0.50518
|
import pytest
import random
import collections
import pickle
from uuid import uuid4
import os
import unicodedata
import tempfile
from pkg_resources import parse_version
import gluonnlp
from gluonnlp.data.tokenizers import WhitespaceTokenizer, MosesTokenizer, JiebaTokenizer,\
SpacyTokenizer, SubwordNMTTokenizer, YTTMTokenizer, SentencepieceTokenizer, \
HuggingFaceBPETokenizer, HuggingFaceByteBPETokenizer, HuggingFaceWordPieceTokenizer, \
HuggingFaceTokenizer
from gluonnlp.base import get_repo_url
from gluonnlp.data import Vocab
from gluonnlp.utils.misc import download
EN_SAMPLES = ['Four score and seven years ago our fathers brought forth on this continent, '
'a new nation, conceived in Liberty, and dedicated to the proposition '
'that all men are created equal.',
'In spite of the debate going on for months about the photos of Özil with the '
'Turkish President Recep Tayyip Erdogan, he regrets the return of '
'the 92-match national player Özil.']
DE_SAMPLES = ['Goethe stammte aus einer angesehenen bürgerlichen Familie; sein Großvater'
' mütterlicherseits war als Stadtschultheiß höchster Justizbeamter der'
' Stadt Frankfurt, sein Vater Doktor der Rechte und kaiserlicher Rat.',
'"Das ist eine Frage, die natürlich davon abhängt, dass man einmal ins '
'Gespräch kommt, dass man mit ihm auch darüber spricht, warum er das eine '
'oder andere offenbar so empfunden hat, wie das in seinem Statement niedergelegt'
' ist", sagte Grindel im Fußball-Podcast "Phrasenmäher" der "Bild-Zeitung.']
ZH_SAMPLES = ['苟活者在淡红的血色中,会依稀看见微茫的希望;真的猛士,将更奋然而前行。',
'参加工作,哈尔滨工业大学无线电工程系电子仪器及测量技术专业毕业。']
SUBWORD_TEST_SAMPLES = ["Hello, y'all! How are you Ⅷ 😁 😁 😁 ?",
'GluonNLP is great!!!!!!',
"GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# 'abc'"]
def random_inject_space(sentence):
words = sentence.split()
ret = ''
for i, word in enumerate(words):
ret += word
if i < len(words) - 1:
n_space_tokens = random.randint(1, 10)
for j in range(n_space_tokens):
ret += random.choice([' ', '\t', '\r', '\n'])
return ret
def verify_encode_token_with_offsets(tokenizer, all_sentences, gt_offsets=None):
if gt_offsets is None:
for sentences in [all_sentences[0], all_sentences]:
enc_tokens = tokenizer.encode(sentences, str)
tokens, offsets = tokenizer.encode_with_offsets(sentences, str)
if isinstance(sentences, list):
for ele_tokens, ele_enc_tokens, ele_offsets, ele_sentence in\
zip(tokens, enc_tokens, offsets, sentences):
for tok, offset, enc_tok in zip(ele_tokens, ele_offsets, ele_enc_tokens):
assert ele_sentence[offset[0]:offset[1]] == tok
assert tok == enc_tok
else:
for tok, offset, enc_tok in zip(tokens, offsets, enc_tokens):
assert sentences[offset[0]:offset[1]] == tok
assert tok == enc_tok
else:
for sentences, ele_gt_offsets in [(all_sentences[0], gt_offsets[0]),
(all_sentences, gt_offsets)]:
enc_tokens = tokenizer.encode(sentences, str)
tokens, offsets = tokenizer.encode_with_offsets(sentences, str)
assert ele_gt_offsets == offsets
assert enc_tokens == tokens
def verify_sentencepiece_tokenizer_with_offsets(tokenizer, all_sentences):
for sentences in [all_sentences[0], all_sentences]:
enc_tokens = tokenizer.encode(sentences, str)
tokens, offsets = tokenizer.encode_with_offsets(sentences, str)
if isinstance(sentences, list):
for ele_tokens, ele_enc_tokens, ele_offsets, ele_sentence\
in zip(tokens, enc_tokens, offsets, sentences):
for i, (tok, offset, enc_tok) in enumerate(zip(ele_tokens, ele_offsets,
ele_enc_tokens)):
assert tok == enc_tok
ele_sel_tok = unicodedata.normalize('NFKC',
ele_sentence[offset[0]:offset[1]]).strip()
if tokenizer.is_first_subword(tok):
real_tok = tok[1:]
else:
real_tok = tok
assert ele_sel_tok == real_tok,\
'ele_sel_tok={}, real_tok={}'.format(ele_sel_tok, real_tok)
def verify_encode_with_offsets_consistency(tokenizer, all_sentences):
for sentences in [all_sentences[0], all_sentences]:
enc_tokens = tokenizer.encode(sentences, int)
tokens, offsets = tokenizer.encode_with_offsets(sentences, int)
str_tokens, str_offsets = tokenizer.encode_with_offsets(sentences, str)
assert offsets == str_offsets
assert tokens == enc_tokens
def verify_encode_token(tokenizer, all_sentences, all_gt_tokens):
for sentences, gt_tokens in [(all_sentences[0], all_gt_tokens[0]),
(all_sentences, all_gt_tokens)]:
tokenizer_encode_ret = tokenizer.encode(sentences)
assert tokenizer_encode_ret == gt_tokens,\
'Whole Encoded: {}, \nWhole GT: {}'.format(tokenizer_encode_ret, gt_tokens)
def verify_decode(tokenizer, all_sentences, out_type=str):
for sentences in [all_sentences[0], all_sentences]:
assert tokenizer.decode(tokenizer.encode(sentences, out_type)) == sentences
def verify_decode_spm(tokenizer, all_sentences, gt_int_decode_sentences):
for sentences, case_gt_int_decode in [(all_sentences[0], gt_int_decode_sentences[0]),
(all_sentences, gt_int_decode_sentences)]:
if isinstance(sentences, str):
gt_str_decode_sentences = sentences
if tokenizer.lowercase:
gt_str_decode_sentences = gt_str_decode_sentences.lower()
gt_str_decode_sentences = unicodedata.normalize('NFKC', gt_str_decode_sentences)
elif isinstance(sentences, list):
gt_str_decode_sentences = []
for ele in sentences:
ele_gt_decode = ele
if tokenizer.lowercase:
ele_gt_decode = ele_gt_decode.lower()
ele_gt_decode = unicodedata.normalize('NFKC', ele_gt_decode)
gt_str_decode_sentences.append(ele_gt_decode)
else:
raise NotImplementedError
assert tokenizer.decode(tokenizer.encode(sentences, str)) == gt_str_decode_sentences
assert tokenizer.decode(tokenizer.encode(sentences, int)) == case_gt_int_decode
def verify_decode_subword_nmt(tokenizer, all_sentences, gt_int_decode, gt_str_decode):
for sentences, case_gt_int_decode, case_gt_str_decode in [(all_sentences[0], gt_int_decode[0], gt_str_decode[0]),
(all_sentences, gt_int_decode, gt_str_decode)]:
assert tokenizer.decode(tokenizer.encode(sentences, str)) == case_gt_str_decode
assert tokenizer.decode(tokenizer.encode(sentences, int)) == case_gt_int_decode
def verify_decode_hf(tokenizer, all_sentences, gt_decode_sentences):
for sentences, case_gt_decode in [(all_sentences[0], gt_decode_sentences[0]),
(all_sentences, gt_decode_sentences)]:
assert tokenizer.decode(tokenizer.encode(sentences, str)) == case_gt_decode
assert tokenizer.decode(tokenizer.encode(sentences, int)) == case_gt_decode
if isinstance(sentences, list):
for sentence in sentences:
assert tokenizer.vocab.to_tokens(tokenizer.encode(sentence, int))\
== tokenizer.encode(sentence, str)
assert tokenizer.vocab[tokenizer.encode(sentence, str)]\
== tokenizer.encode(sentence, int)
else:
assert tokenizer.vocab.to_tokens(tokenizer.encode(sentences, int)) \
== tokenizer.encode(sentences, str)
assert tokenizer.vocab[tokenizer.encode(sentences, str)] \
== tokenizer.encode(sentences, int)
def verify_decode_no_vocab_raise(tokenizer):
# When the vocab is not attached, should raise ValueError
for sentences in [EN_SAMPLES[0], EN_SAMPLES]:
with pytest.raises(ValueError):
tokenizer.encode(sentences, int)
with pytest.raises(ValueError):
tokenizer.decode([0])
with pytest.raises(ValueError):
tokenizer.decode([[0], [1]])
def verify_pickleble(tokenizer, cls):
print(tokenizer)
# Verify if the tokenizer is pickleable and has the same behavior after dumping/loading
tokenizer_p = pickle.loads(pickle.dumps(tokenizer))
assert isinstance(tokenizer_p, cls)
assert tokenizer.encode(SUBWORD_TEST_SAMPLES, str) == tokenizer_p.encode(SUBWORD_TEST_SAMPLES, str)
def test_whitespace_tokenizer():
tokenizer = WhitespaceTokenizer()
gt_en_tokenized = [['Four', 'score', 'and', 'seven', 'years', 'ago', 'our', 'fathers', 'brought',
'forth', 'on', 'this', 'continent,', 'a', 'new', 'nation,', 'conceived',
'in', 'Liberty,', 'and', 'dedicated', 'to', 'the', 'proposition', 'that',
'all', 'men', 'are', 'created', 'equal.'],
['In', 'spite', 'of', 'the', 'debate', 'going', 'on', 'for', 'months',
'about', 'the', 'photos', 'of', 'Özil', 'with', 'the', 'Turkish',
'President', 'Recep', 'Tayyip', 'Erdogan,', 'he', 'regrets', 'the',
'return', 'of', 'the', '92-match', 'national', 'player', 'Özil.']]
gt_de_tokenized = [['Goethe', 'stammte', 'aus', 'einer', 'angesehenen', 'bürgerlichen',
'Familie;', 'sein', 'Großvater', 'mütterlicherseits', 'war', 'als',
'Stadtschultheiß', 'höchster', 'Justizbeamter', 'der', 'Stadt',
'Frankfurt,', 'sein', 'Vater', 'Doktor', 'der', 'Rechte', 'und',
'kaiserlicher', 'Rat.'],
['"Das', 'ist', 'eine', 'Frage,', 'die', 'natürlich', 'davon', 'abhängt,',
'dass', 'man', 'einmal', 'ins', 'Gespräch', 'kommt,', 'dass', 'man', 'mit',
'ihm', 'auch', 'darüber', 'spricht,', 'warum', 'er', 'das', 'eine', 'oder',
'andere', 'offenbar', 'so', 'empfunden', 'hat,', 'wie', 'das', 'in',
'seinem', 'Statement', 'niedergelegt', 'ist",', 'sagte', 'Grindel', 'im',
'Fußball-Podcast', '"Phrasenmäher"', 'der', '"Bild-Zeitung.']]
for _ in range(2):
# Inject noise and test for encode
noisy_en_samples = [random_inject_space(ele) for ele in EN_SAMPLES]
noisy_de_samples = [random_inject_space(ele) for ele in DE_SAMPLES]
verify_encode_token(tokenizer, noisy_en_samples + noisy_de_samples,
gt_en_tokenized + gt_de_tokenized)
# Test for decode
verify_decode(tokenizer, EN_SAMPLES + DE_SAMPLES, str)
# Test for encode_with_offsets
verify_encode_token_with_offsets(tokenizer, noisy_en_samples + noisy_de_samples)
verify_decode_no_vocab_raise(tokenizer)
# Test for output_type = int
vocab = Vocab(collections.Counter(sum(gt_en_tokenized + gt_de_tokenized,
[])))
tokenizer.set_vocab(vocab)
verify_decode(tokenizer, EN_SAMPLES + DE_SAMPLES, int)
verify_pickleble(tokenizer, WhitespaceTokenizer)
verify_encode_token_with_offsets(tokenizer, EN_SAMPLES + DE_SAMPLES)
def test_moses_tokenizer():
en_tokenizer = MosesTokenizer('en')
de_tokenizer = MosesTokenizer('de')
gt_en_tokenized = [['Four', 'score', 'and', 'seven', 'years', 'ago', 'our', 'fathers',
'brought', 'forth', 'on', 'this', 'continent', ',', 'a', 'new', 'nation',
',', 'conceived', 'in', 'Liberty', ',', 'and', 'dedicated', 'to', 'the',
'proposition', 'that', 'all', 'men', 'are', 'created', 'equal', '.'],
['In', 'spite', 'of', 'the', 'debate', 'going', 'on', 'for', 'months',
'about', 'the', 'photos', 'of', 'Özil', 'with', 'the', 'Turkish',
'President', 'Recep', 'Tayyip', 'Erdogan', ',', 'he', 'regrets', 'the',
'return', 'of', 'the', '92-match', 'national', 'player', 'Özil', '.']]
gt_de_tokenized = [['Goethe', 'stammte', 'aus', 'einer', 'angesehenen', 'bürgerlichen',
'Familie', ';', 'sein', 'Großvater', 'mütterlicherseits', 'war', 'als',
'Stadtschultheiß', 'höchster', 'Justizbeamter', 'der', 'Stadt',
'Frankfurt', ',', 'sein', 'Vater', 'Doktor', 'der', 'Rechte', 'und',
'kaiserlicher', 'Rat', '.'],
['"', 'Das', 'ist', 'eine', 'Frage', ',', 'die', 'natürlich', 'davon',
'abhängt', ',', 'dass', 'man', 'einmal', 'ins', 'Gespräch', 'kommt', ',',
'dass', 'man', 'mit', 'ihm', 'auch', 'darüber', 'spricht', ',', 'warum',
'er', 'das', 'eine', 'oder', 'andere', 'offenbar', 'so', 'empfunden',
'hat', ',', 'wie', 'das', 'in', 'seinem', 'Statement', 'niedergelegt',
'ist', '"', ',', 'sagte', 'Grindel', 'im', 'Fußball-Podcast',
'"', 'Phrasenmäher', '"', 'der', '"', 'Bild-Zeitung', '.']]
verify_encode_token(en_tokenizer, EN_SAMPLES, gt_en_tokenized)
verify_encode_token(de_tokenizer, DE_SAMPLES, gt_de_tokenized)
verify_decode(en_tokenizer, EN_SAMPLES, str)
verify_decode(de_tokenizer, DE_SAMPLES, str)
vocab = Vocab(collections.Counter(sum(gt_en_tokenized + gt_de_tokenized, [])))
verify_decode_no_vocab_raise(en_tokenizer)
verify_decode_no_vocab_raise(de_tokenizer)
en_tokenizer.set_vocab(vocab)
de_tokenizer.set_vocab(vocab)
verify_decode(en_tokenizer, EN_SAMPLES, int)
verify_decode(de_tokenizer, DE_SAMPLES, int)
verify_pickleble(en_tokenizer, MosesTokenizer)
verify_pickleble(de_tokenizer, MosesTokenizer)
def test_jieba_tokenizer():
tokenizer = JiebaTokenizer()
gt_zh_tokenized = [['苟活', '者', '在', '淡红', '的', '血色', '中', ',',
'会', '依稀', '看见', '微茫', '的', '希望', ';', '真的',
'猛士', ',', '将', '更奋', '然而', '前行', '。'],
['参加', '工作', ',', '哈尔滨工业大学', '无线电', '工程系', '电子仪器',
'及', '测量', '技术', '专业', '毕业', '。']]
verify_encode_token(tokenizer, ZH_SAMPLES, gt_zh_tokenized)
verify_decode(tokenizer, ZH_SAMPLES, str)
vocab = Vocab(collections.Counter(sum(gt_zh_tokenized, [])))
verify_decode_no_vocab_raise(tokenizer)
tokenizer.set_vocab(vocab)
verify_decode(tokenizer, ZH_SAMPLES, int)
verify_pickleble(tokenizer, JiebaTokenizer)
def test_spacy_tokenizer():
en_tokenizer = SpacyTokenizer('en')
de_tokenizer = SpacyTokenizer('de')
gt_en_tokenized = [['Four', 'score', 'and', 'seven', 'years', 'ago', 'our', 'fathers',
'brought', 'forth', 'on', 'this', 'continent', ',', 'a', 'new', 'nation',
',', 'conceived', 'in', 'Liberty', ',', 'and', 'dedicated', 'to', 'the',
'proposition', 'that', 'all', 'men', 'are', 'created', 'equal', '.'],
['In', 'spite', 'of', 'the', 'debate', 'going', 'on', 'for', 'months',
'about', 'the', 'photos', 'of', 'Özil', 'with', 'the', 'Turkish',
'President', 'Recep', 'Tayyip', 'Erdogan', ',', 'he', 'regrets', 'the',
'return', 'of', 'the', '92-match', 'national', 'player', 'Özil', '.']]
gt_de_tokenized = [['Goethe', 'stammte', 'aus', 'einer', 'angesehenen', 'bürgerlichen',
'Familie', ';', 'sein', 'Großvater', 'mütterlicherseits', 'war', 'als',
'Stadtschultheiß', 'höchster', 'Justizbeamter', 'der', 'Stadt', 'Frankfurt',
',', 'sein', 'Vater', 'Doktor', 'der', 'Rechte', 'und', 'kaiserlicher',
'Rat', '.'],
['"', 'Das', 'ist', 'eine', 'Frage', ',', 'die', 'natürlich', 'davon',
'abhängt', ',', 'dass', 'man', 'einmal', 'ins', 'Gespräch', 'kommt', ',',
'dass', 'man', 'mit', 'ihm', 'auch', 'darüber', 'spricht', ',', 'warum',
'er', 'das', 'eine', 'oder', 'andere', 'offenbar', 'so', 'empfunden', 'hat',
',', 'wie', 'das', 'in', 'seinem', 'Statement', 'niedergelegt', 'ist', '"',
',', 'sagte', 'Grindel', 'im', 'Fußball-Podcast', '"', 'Phrasenmäher', '"',
'der', '"', 'Bild-Zeitung', '.']]
verify_encode_token(en_tokenizer, EN_SAMPLES, gt_en_tokenized)
verify_encode_token(de_tokenizer, DE_SAMPLES, gt_de_tokenized)
vocab = Vocab(collections.Counter(sum(gt_en_tokenized + gt_de_tokenized, [])))
en_tokenizer.set_vocab(vocab)
de_tokenizer.set_vocab(vocab)
verify_pickleble(en_tokenizer, SpacyTokenizer)
verify_pickleble(de_tokenizer, SpacyTokenizer)
verify_encode_token_with_offsets(en_tokenizer, EN_SAMPLES)
verify_encode_token_with_offsets(de_tokenizer, DE_SAMPLES)
# Test for loading spacy tokenizer from specifying the "model" flag
en_tokenizer = SpacyTokenizer(model='en_core_web_lg')
out = en_tokenizer.encode(EN_SAMPLES)
def test_yttm_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'yttm.model')
download(url=get_repo_url() + 'tokenizer_test_models/yttm/test_ende_yttm-6f2c39.model',
path=model_path)
tokenizer = YTTMTokenizer(model_path=model_path)
gt_tokenized = [['▁He', 'll', 'o', ',', '▁y', "'", 'all', '!', '▁How', '▁are', '▁you', '▁',
'Ⅷ', '▁', '😁', '▁', '😁', '▁', '😁', '▁?'],
['▁Gl', 'u', 'on', 'N', 'L', 'P', '▁is', '▁great', '!', '!', '!', '!',
'!', '!'],
['▁Gl', 'u', 'on', 'N', 'L', 'P', '-A', 'm', 'az', 'on', '-H', 'a', 'ib',
'in', '-L', 'e', 'on', 'ard', '-S', 'hen', 'g', '-S', 'h', 'u', 'ai',
'-', 'X', 'ing', 'j', 'ian', '.', '.', '.', '.', '.', '/', ':', '!',
'@', '#', '▁', "'", 'ab', 'c', "'"]]
gt_offsets = [[(0, 2), (2, 4), (4, 5), (5, 6), (6, 8), (8, 9), (9, 12), (12, 13), (13, 17),
(17, 21), (21, 25), (25, 26), (26, 27), (27, 28), (28, 29), (29, 30), (30, 31),
(31, 32), (32, 33), (33, 35)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 11), (11, 17), (17, 18),
(18, 19), (19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 10), (10, 11), (11, 13),
(13, 15), (15, 17), (17, 18), (18, 20), (20, 22), (22, 24), (24, 25), (25, 27),
(27, 30), (30, 32), (32, 35), (35, 36), (36, 38), (38, 39), (39, 40), (40, 42),
(42, 43), (43, 44), (44, 47), (47, 48), (48, 51), (51, 52), (52, 53), (53, 54),
(54, 55), (55, 56), (56, 57), (57, 58), (58, 59), (59, 60), (60, 61), (61, 62),
(62, 63), (63, 65), (65, 66), (66, 67)]]
gt_int_decode = ['Hello, y<UNK>all! How are you <UNK> <UNK> <UNK> <UNK> ?',
'GluonNLP is great!!!!!!',
'GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# <UNK>abc<UNK>']
gt_str_decode = ["Hello, y'all! How are you Ⅷ 😁 😁 😁 ?",
'GluonNLP is great!!!!!!',
"GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# 'abc'"]
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, YTTMTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
# Begin to verify decode
for sample_sentences, ele_gt_int_decode, ele_gt_str_decode in [(SUBWORD_TEST_SAMPLES[0], gt_int_decode[0], gt_str_decode[0]),
(SUBWORD_TEST_SAMPLES, gt_int_decode, gt_str_decode)]:
int_decode = tokenizer.decode(tokenizer.encode(sample_sentences, int))
str_decode = tokenizer.decode(tokenizer.encode(sample_sentences, str))
assert int_decode == ele_gt_int_decode
assert str_decode == ele_gt_str_decode
os.remove(model_path)
assert tokenizer.decode([]) == ''
assert tokenizer.decode([[]]) == ['']
@pytest.mark.seed(123)
def test_sentencepiece_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'spm.model')
download(url=get_repo_url()
+ 'tokenizer_test_models/sentencepiece/case1/test_ende-a9bee4.model',
path=model_path)
# Case1
tokenizer = SentencepieceTokenizer(model_path)
gt_tokenized = [['▁Hel', 'lo', ',', '▁y', "'", 'all', '!', '▁How', '▁are', '▁you',
'▁', 'VI', 'II', '▁', '😁', '▁', '😁', '▁', '😁', '▁?'],
['▁G', 'lu', 'on', 'N', 'L', 'P', '▁is', '▁great', '!', '!', '!', '!',
'!', '!'],
['▁G', 'lu', 'on', 'N', 'L', 'P', '-', 'A', 'ma', 'zo', 'n', '-', 'H', 'ai',
'bin', '-', 'L', 'e', 'on', 'ard', '-', 'S', 'hen', 'g', '-', 'S', 'hu', 'ai',
'-', 'X', 'ing', 'j', 'ian', '.', '.', '.', '.', '.', '/', ':', '!', '@',
'#', '▁', "'", 'ab', 'c', "'"]]
gt_offsets = [[(0, 3), (3, 5), (5, 6), (6, 8), (8, 9), (9, 12), (12, 13), (13, 17), (17, 21),
(21, 25), (25, 26), (26, 26), (26, 27), (27, 28), (28, 29), (29, 30), (30, 31),
(31, 32), (32, 33), (33, 35)],
[(0, 1), (1, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 11), (11, 17), (17, 18),
(18, 19), (19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 1), (1, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 12),
(12, 14), (14, 15), (15, 16), (16, 17), (17, 19), (19, 22), (22, 23), (23, 24),
(24, 25), (25, 27), (27, 30), (30, 31), (31, 32), (32, 35), (35, 36), (36, 37),
(37, 38), (38, 40), (40, 42), (42, 43), (43, 44), (44, 47), (47, 48), (48, 51),
(51, 52), (52, 53), (53, 54), (54, 55), (55, 56), (56, 57), (57, 58), (58, 59),
(59, 60), (60, 61), (61, 62), (62, 63), (63, 65), (65, 66), (66, 67)]]
gt_int_decode = ['Hello, y ⁇ all! How are you VIII ⁇ ⁇ ⁇ ?',
'GluonNLP is great!!!!!!',
'GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:! ⁇ # ⁇ abc ⁇ ']
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, SentencepieceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_spm(tokenizer, SUBWORD_TEST_SAMPLES, gt_int_decode)
# Case2, lower_case
gt_lower_case_int_decode = ['hello, y ⁇ all! how are you viii ⁇ ⁇ ⁇ ?',
'gluonnlp is great!!!!!!',
'gluonnlp-amazon-haibin-leonard-sheng-shuai-xingjian...../:! ⁇ # ⁇ abc ⁇ ']
tokenizer = SentencepieceTokenizer(model_path, lowercase=True)
verify_decode_spm(tokenizer, SUBWORD_TEST_SAMPLES, gt_lower_case_int_decode)
# Case3, Use the sentencepiece regularization commands, we test whether we can obtain different encoding results
tokenizer = SentencepieceTokenizer(model_path, lowercase=True, nbest=-1, alpha=1.0)
has_different_encode_out = False
encode_out = None
for _ in range(10):
if encode_out is None:
encode_out = tokenizer.encode(SUBWORD_TEST_SAMPLES[0])
else:
ele_out = tokenizer.encode(SUBWORD_TEST_SAMPLES[0])
if ele_out != encode_out:
has_different_encode_out = True
break
assert has_different_encode_out
os.remove(model_path)
def test_subword_nmt_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'subword_nmt.model')
download(url=get_repo_url() + 'tokenizer_test_models/subword-nmt/test_ende-d189ff.model',
path=model_path)
vocab_path = os.path.join(dir_path, 'subword_nmt.vocab')
download(url=get_repo_url() + 'tokenizer_test_models/subword-nmt/test_ende_vocab-900f81.json',
path=vocab_path)
# Case 1
tokenizer = SubwordNMTTokenizer(model_path, vocab_path)
gt_tokenized = [["Hel", "lo", ",</w>", "y", "\'", "all", "!</w>", "How</w>", "are</w>", "you</w>",
"Ⅷ</w>", "😁</w>", "😁</w>", "😁</w>", "?</w>"],
["Gl", "u", "on", "N", "L", "P</w>", "is</w>", "great", "!", "!", "!", "!!",
"!</w>"],
["Gl", "u", "on", "N", "L", "P", "-", "Amaz", "on-", "H", "ai", "b", "in-", "Le",
"on", "ard", "-", "Sh", "eng", "-", "Sh", "u", "ai", "-", "X", "ing", "ji",
"an", "..", "...", "/", ":", "!", "@", "#</w>", "\'", "ab", "c", "\'</w>"]]
gt_offsets = [[(0, 3), (3, 5), (5, 6), (7, 8), (8, 9), (9, 12), (12, 13), (14, 17), (18, 21),
(22, 25), (26, 27), (28, 29), (30, 31), (32, 33), (34, 35)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (9, 11), (12, 17), (17, 18),
(18, 19), (19, 20), (20, 22), (22, 23)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 13), (13, 16),
(16, 17), (17, 19), (19, 20), (20, 23), (23, 25), (25, 27), (27, 30), (30, 31),
(31, 33), (33, 36), (36, 37), (37, 39), (39, 40), (40, 42), (42, 43), (43, 44),
(44, 47), (47, 49), (49, 51), (51, 53), (53, 56), (56, 57), (57, 58), (58, 59),
(59, 60), (60, 61), (62, 63), (63, 65), (65, 66), (66, 67)]]
gt_int_decode = ["Hello, y\'all! How are you Ⅷ 😁 😁 😁 ?",
"GluonNLP is great!!!!!!",
"GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@
gt_str_decode = SUBWORD_TEST_SAMPLES
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, SubwordNMTTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_subword_nmt(tokenizer, SUBWORD_TEST_SAMPLES, gt_int_decode, gt_str_decode)
# Case 2, bpe_dropout
# We use str decode here because we may not perfectly recover the original sentence with int decode.
tokenizer = SubwordNMTTokenizer(model_path, vocab_path, bpe_dropout=0.5)
verify_decode(tokenizer, SUBWORD_TEST_SAMPLES, out_type=str)
os.remove(model_path)
os.remove(vocab_path)
def test_huggingface_bpe_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'test_hf_bpe.model')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bpe/test_hf_bpe.model',
path=model_path)
vocab_path = os.path.join(dir_path, 'test_hf_bpe.vocab')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bpe/test_hf_bpe.vocab',
path=vocab_path)
hf_vocab_path = os.path.join(dir_path, 'test_hf_bpe.hf_vocab')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bpe/test_hf_bpe.hf_vocab',
path=hf_vocab_path)
# Case 1, default lowercase=False
tokenizer = HuggingFaceBPETokenizer(model_path, vocab_path)
gt_tokenized = [['Hello</w>', ',</w>', 'y</w>', "'</w>", 'all</w>', '!</w>', 'How</w>',
'are</w>', 'you</w>', '<unk>', '<unk>', '<unk>', '<unk>', '?</w>'],
['Gl', 'u', 'on', 'N', 'LP</w>', 'is</w>', 'great</w>', '!</w>', '!</w>',
'!</w>', '!</w>', '!</w>', '!</w>'],
['Gl', 'u', 'on', 'N', 'LP</w>', '-</w>', 'Amazon</w>', '-</w>', 'H', 'ai',
'bin</w>', '-</w>', 'Leonard</w>', '-</w>', 'Sh', 'en', 'g</w>', '-</w>',
'Sh', 'u', 'ai</w>', '-</w>', 'X', 'ing', 'j', 'ian</w>', '.</w>', '.</w>',
'.</w>', '.</w>', '.</w>', '/</w>', ':</w>', '!</w>', '@</w>', '#</w>',
"'</w>", 'ab', 'c</w>', "'</w>"]]
gt_offsets = [[(0, 5), (5, 6), (7, 8), (8, 9), (9, 12), (12, 13), (14, 17), (18, 21), (22, 25),
(26, 27), (28, 29), (30, 31), (32, 33), (34, 35)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 8), (9, 11), (12, 17), (17, 18), (18, 19),
(19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 8), (8, 9), (9, 15), (15, 16), (16, 17),
(17, 19), (19, 22), (22, 23), (23, 30), (30, 31), (31, 33), (33, 35), (35, 36),
(36, 37), (37, 39), (39, 40), (40, 42), (42, 43), (43, 44), (44, 47), (47, 48),
(48, 51), (51, 52), (52, 53), (53, 54), (54, 55), (55, 56), (56, 57), (57, 58),
(58, 59), (59, 60), (60, 61), (62, 63), (63, 65), (65, 66), (66, 67)]]
# gt_int_decode = gt_str_decode for hf
# hf removed the unk tokens in decode result
gt_decode = ["Hello , y ' all ! How are you ?",
'GluonNLP is great ! ! ! ! ! !',
"GluonNLP - Amazon - Haibin - Leonard - Sheng - Shuai - Xingjian . . . . . / : ! @
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceBPETokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
# Case 2, lowercase=True
gt_lowercase_decode = ["hello , y ' all ! how are you ?",
'gluonnlp is great ! ! ! ! ! !',
"gluonnlp - amazon - haibin - leonard - sheng - shuai - xingjian . . . . . / : ! @ # ' abc '"]
tokenizer = HuggingFaceBPETokenizer(model_path, vocab_path, lowercase=True)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_lowercase_decode)
# Case 3, using original hf vocab
tokenizer = HuggingFaceBPETokenizer(model_path, hf_vocab_path)
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceBPETokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
os.remove(model_path)
os.remove(vocab_path)
os.remove(hf_vocab_path)
def test_huggingface_bytebpe_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'hf_bytebpe.model')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bytebpe/test_hf_bytebpe.model',
path=model_path)
vocab_path = os.path.join(dir_path, 'hf_bytebpe.vocab')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bytebpe/test_hf_bytebpe.vocab',
path=vocab_path)
hf_vocab_path = os.path.join(dir_path, 'hf_bytebpe.hf_vocab')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bytebpe/test_hf_bytebpe.hf_vocab',
path=hf_vocab_path)
# Case 1, default lowercase=False
tokenizer = HuggingFaceByteBPETokenizer(model_path, vocab_path)
gt_tokenized = [['Hello', ',', 'Ġy', "'", 'all', '!', 'ĠHow', 'Ġare', 'Ġyou',
'Ġâ', 'ħ', '§', 'ĠðŁĺ', 'ģ', 'ĠðŁĺ', 'ģ', 'ĠðŁĺ', 'ģ', 'Ġ?'],
['Gl', 'u', 'on', 'N', 'LP', 'Ġis', 'Ġgreat', 'ï¼', 'ģ', 'ï¼',
'ģ', 'ï¼', 'ģ', '!!!'],
['Gl', 'u', 'on', 'N', 'LP', '-', 'Amazon', '-', 'Ha', 'ib', 'in',
'-', 'Le', 'on', 'ard', '-', 'She', 'ng', '-', 'Sh', 'u',
'ai', '-', 'X', 'ing', 'j', 'ian', '.....', '/', ':', '!', '@',
'#', "Ġ'", 'ab', 'c', "'"]]
# the defination of the offsets of bytelevel seems not clear
gt_offsets = [[(0, 5), (5, 6), (6, 8), (8, 9), (9, 12), (12, 13), (13, 17), (17, 21),
(21, 25), (25, 27), (26, 27), (26, 27), (27, 29), (28, 29), (29, 31),
(30, 31), (31, 33), (32, 33), (33, 35)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 8), (8, 11), (11, 17), (17, 18),
(17, 18), (18, 19), (18, 19), (19, 20), (19, 20), (20, 23)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 8), (8, 9), (9, 15), (15, 16),
(16, 18), (18, 20), (20, 22), (22, 23), (23, 25), (25, 27), (27, 30),
(30, 31), (31, 34), (34, 36), (36, 37), (37, 39), (39, 40), (40, 42),
(42, 43), (43, 44), (44, 47), (47, 48), (48, 51), (51, 56),
(56, 57), (57, 58), (58, 59), (59, 60), (60, 61), (61, 63),
(63, 65), (65, 66), (66, 67)]]
gt_decode = ["Hello, y'all! How are you Ⅷ 😁 😁 😁 ?",
'GluonNLP is great!!!!!!',
"GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# 'abc'"]
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceByteBPETokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
# Case 2, lowercase=True
gt_lowercase_int_decode = ["hello, y'all! how are you ⅷ 😁 😁 😁 ?",
'gluonnlp is great!!!!!!',
"gluonnlp-amazon-haibin-leonard-sheng-shuai-xingjian...../:!@
tokenizer = HuggingFaceByteBPETokenizer(model_path, vocab_path, lowercase=True)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_lowercase_int_decode)
# Case 3, using original hf vocab
tokenizer = HuggingFaceByteBPETokenizer(model_path, hf_vocab_path)
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceByteBPETokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
os.remove(model_path)
os.remove(vocab_path)
os.remove(hf_vocab_path)
def test_huggingface_wordpiece_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
vocab_path = os.path.join(dir_path, 'hf_wordpiece.vocab')
download(url=get_repo_url()
+ 'tokenizer_test_models/hf_wordpiece/test_hf_wordpiece.vocab',
path=vocab_path)
hf_vocab_path = os.path.join(dir_path, 'hf_wordpiece.hf_vocab')
download(url=get_repo_url()
+ 'tokenizer_test_models/hf_wordpiece/test_hf_wordpiece.hf_vocab',
path=hf_vocab_path)
# Case 1, lowercase=True
tokenizer = HuggingFaceWordPieceTokenizer(vocab_path, lowercase=True)
gt_tokenized = [["hello", ",", "y", "'", "all", "!", "how", "are", "you",
"<unk>", "<unk>", "<unk>", "<unk>", "?"],
["gl", "##uo", "##nn", "##l", "##p", "is", "great", "\uff01",
"\uff01", "\uff01", "!", "!", "!"],
["gl", "##uo", "##nn", "##l", "##p", "-", "amazon", "-", "hai",
"##bin", "-", "leonard", "-", "shen", "##g", "-", "shu", "##ai", "-",
"xin", "##g", "##ji", "##an", ".", ".", ".", ".", ".", "/", ":", "!",
"@", "#", "'", "abc", "'"]]
gt_offsets = [[(0, 5), (5, 6), (7, 8), (8, 9), (9, 12), (12, 13), (14, 17), (18, 21),
(22, 25), (26, 27), (28, 29), (30, 31), (32, 33), (34, 35)],
[(0, 2), (2, 4), (4, 6), (6, 7), (7, 8), (9, 11), (12, 17), (17, 18),
(18, 19), (19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 2), (2, 4), (4, 6), (6, 7), (7, 8), (8, 9), (9, 15), (15, 16), (16, 19),
(19, 22), (22, 23), (23, 30), (30, 31), (31, 35), (35, 36), (36, 37), (37, 40),
(40, 42), (42, 43), (43, 46), (46, 47), (47, 49), (49, 51), (51, 52), (52, 53),
(53, 54), (54, 55), (55, 56), (56, 57), (57, 58), (58, 59), (59, 60), (60, 61),
(62, 63), (63, 66), (66, 67)]]
gt_decode = ["hello, y'all! how are you?",
"gluonnlp is great ! ! !!!!",
"gluonnlp - amazon - haibin - leonard - sheng - shuai - xingjian..... / :! @
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceWordPieceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
# Case 2, lowercase=False
gt_lowercase_decode = [", y'all! are you?",
"is great ! ! !!!!",
"- - - - - -..... / :! @ #'abc '"]
tokenizer = HuggingFaceWordPieceTokenizer(vocab_path, lowercase=False)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_lowercase_decode)
# Case 3, using original hf vocab
tokenizer = HuggingFaceWordPieceTokenizer(hf_vocab_path, lowercase=True)
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceWordPieceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
os.remove(vocab_path)
os.remove(hf_vocab_path)
@pytest.mark.skipif(parse_version(gluonnlp.utils.lazy_imports.try_import_huggingface_tokenizers().__version__)
>= parse_version('0.9.0.dev0'), reason="Test is only valid for tokenizers 0.8.x")
def test_huggingface_wordpiece_tokenizer_v08():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'hf_wordpiece_new_0.8.model')
download(url=get_repo_url() +
'tokenizer_test_models/hf_wordpiece_new_0.8/hf_wordpiece.model',
path=model_path,
sha1_hash='66ccadf6e5e354ff9604e4a82f107a2ac873abd5')
vocab_path = os.path.join(dir_path, 'hf_wordpiece_new_0.8.vocab')
download(url=get_repo_url() +
'tokenizer_test_models/hf_wordpiece_new_0.8/hf_wordpiece.vocab',
path=vocab_path,
sha1_hash='dd6fdf4bbc74eaa8806d12cb3d38a4d9a306aea8')
tokenizer = HuggingFaceTokenizer(model_path, vocab_path)
gt_tokenized = [['Hel', '##lo', ',', 'y', '[UNK]', 'all', '!',
'How', 'are', 'you', '[UNK]', '[UNK]', '[UNK]', '[UNK]', '?'],
['Gl', '##u', '##on', '##N', '##L', '##P', 'is', 'great', '[UNK]',
'[UNK]', '[UNK]', '!', '!', '!'],
['Gl', '##u', '##on', '##N', '##L', '##P', '-',
'Am', '##az', '##on', '-', 'Ha', '##ibi', '##n', '-', 'Leon', '##ard',
'-', 'She', '##n', '##g', '-', 'Sh', '##ua', '##i', '-', 'X',
'##ing', '##j', '##ian', '.', '.', '.', '.', '.', '/', ':', '!',
'@', '#', '[UNK]', 'ab', '##c', '[UNK]']]
gt_offsets = [[(0, 3), (3, 5), (5, 6), (7, 8), (8, 9), (9, 12), (12, 13),
(14, 17), (18, 21), (22, 25), (26, 27), (28, 29), (30, 31),
(32, 33), (34, 35)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (9, 11), (12, 17),
(17, 18), (18, 19), (19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9),
(9, 11), (11, 13), (13, 15), (15, 16), (16, 18), (18, 21),
(21, 22), (22, 23), (23, 27), (27, 30), (30, 31), (31, 34),
(34, 35), (35, 36), (36, 37), (37, 39), (39, 41), (41, 42),
(42, 43), (43, 44), (44, 47), (47, 48), (48, 51), (51, 52),
(52, 53), (53, 54), (54, 55), (55, 56), (56, 57), (57, 58),
(58, 59), (59, 60), (60, 61), (62, 63), (63, 65), (65, 66),
(66, 67)]]
gt_decode = ['Hello, y all! How are you?',
'GluonNLP is great!!!',
'GluonNLP - Amazon - Haibin - Leonard - Sheng - Shuai - Xingjian..... / '
':! @ # abc']
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
@pytest.mark.skipif(parse_version(gluonnlp.utils.lazy_imports.try_import_huggingface_tokenizers().__version__)
>= parse_version('0.9.0.dev0'), reason="Test is only valid for tokenizers 0.8.x")
def test_huggingface_bpe_tokenizer_v08():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'hf_bpe_new_0.8.model')
download(url=get_repo_url() +
'tokenizer_test_models/hf_bpe_new_0.8/hf_bpe.model',
path=model_path,
sha1_hash='ecda90979561ca4c5a8d769b5e3c9fa2270d5317')
vocab_path = os.path.join(dir_path, 'hf_bpe_new_0.8.vocab')
download(url=get_repo_url() +
'tokenizer_test_models/hf_bpe_new_0.8/hf_bpe.vocab',
path=vocab_path,
sha1_hash='b92dde0b094f405208f3ec94b5eae88430bf4262')
tokenizer = HuggingFaceTokenizer(model_path, vocab_path)
gt_tokenized = [['H', 'ello</w>', ',</w>', 'y</w>', 'all</w>', '!</w>',
'How</w>', 'are</w>', 'you</w>', '?</w>'],
['G', 'lu', 'on', 'N', 'L', 'P</w>', 'is</w>', 'great</w>',
'!</w>', '!</w>', '!</w>'],
['G', 'lu', 'on', 'N', 'L', 'P</w>', '-</w>', 'Amaz', 'on</w>',
'-</w>', 'Ha', 'i', 'bin</w>', '-</w>', 'Leon', 'ard</w>', '-</w>',
'Sh', 'eng</w>', '-</w>', 'S', 'hu', 'ai</w>', '-</w>', 'X', 'ing',
'j', 'ian</w>', '.</w>', '.</w>', '.</w>', '.</w>', '.</w>', '/</w>',
':</w>', '!</w>', '@</w>', '#</w>', 'ab', 'c</w>']]
gt_offsets = [[(0, 1), (1, 5), (5, 6), (7, 8), (9, 12), (12, 13), (14, 17),
(18, 21), (22, 25), (34, 35)],
[(0, 1), (1, 3), (3, 5), (5, 6), (6, 7), (7, 8), (9, 11), (12, 17),
(20, 21), (21, 22), (22, 23)],
[(0, 1), (1, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 13), (13, 15),
(15, 16), (16, 18), (18, 19), (19, 22), (22, 23), (23, 27), (27, 30),
(30, 31), (31, 33), (33, 36), (36, 37), (37, 38), (38, 40), (40, 42),
(42, 43), (43, 44), (44, 47), (47, 48), (48, 51), (51, 52), (52, 53),
(53, 54), (54, 55), (55, 56), (56, 57), (57, 58), (58, 59), (59, 60),
(60, 61), (63, 65), (65, 66)]]
gt_decode = ['Hello , y all ! How are you ?',
'GluonNLP is great ! ! !',
'GluonNLP - Amazon - Haibin - Leonard - Sheng - Shuai - Xingjian'
' . . . . . / : ! @ # abc']
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
@pytest.mark.skipif(parse_version(gluonnlp.utils.lazy_imports.try_import_huggingface_tokenizers().__version__)
>= parse_version('0.9.0.dev0'), reason="Test is only valid for tokenizers 0.8.x")
def test_huggingface_bytebpe_tokenizer_v08():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'hf_bytebpe_new_0.8.model')
download(url=get_repo_url() +
'tokenizer_test_models/hf_bytebpe_new_0.8/hf_bytebpe.model',
path=model_path,
sha1_hash='a1c4da1f6c21df923e150f56dbb5b7a53c61808b')
vocab_path = os.path.join(dir_path, 'hf_bytebpe_new_0.8.vocab')
download(url=get_repo_url() +
'tokenizer_test_models/hf_bytebpe_new_0.8/hf_bytebpe.vocab',
path=vocab_path,
sha1_hash='7831b19078a3222f450e65b2188dc0770473123b')
tokenizer = HuggingFaceTokenizer(model_path, vocab_path)
gt_tokenized = [['He', 'llo', ',', 'Ġy', "'", 'all', '!', 'ĠHow', 'Ġare', 'Ġyou',
'Ġâ', 'ħ', '§', 'Ġ', 'ð', 'Ł', 'ĺ', 'ģ', 'Ġ', 'ð', 'Ł', 'ĺ',
'ģ', 'Ġ', 'ð', 'Ł', 'ĺ', 'ģ', 'Ġ?'],
['G', 'l', 'u', 'on', 'N', 'L', 'P', 'Ġis', 'Ġgreat', 'ï', '¼', 'ģ',
'ï', '¼', 'ģ', 'ï', '¼', 'ģ', '!', '!', '!'],
['G', 'l', 'u', 'on', 'N', 'L', 'P', '-', 'Am', 'az', 'on', '-',
'Ha', 'ib', 'in', '-', 'Le', 'on', 'ard', '-', 'S', 'hen', 'g', '-',
'Sh', 'u', 'ai', '-', 'X', 'ing', 'j', 'ian',
'..', '...', '/', ':', '!', '@', '#', 'Ġ', "'", 'ab', 'c', "'"]]
gt_offsets = [[(0, 2), (2, 5), (5, 6), (6, 8), (8, 9), (9, 12), (12, 13), (13, 17),
(17, 21), (21, 25), (25, 27), (26, 27), (26, 27), (27, 28), (28, 29),
(28, 29), (28, 29), (28, 29), (29, 30), (30, 31), (30, 31), (30, 31),
(30, 31), (31, 32), (32, 33), (32, 33), (32, 33), (32, 33), (33, 35)],
[(0, 1), (1, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 11), (11, 17),
(17, 18), (17, 18), (17, 18), (18, 19), (18, 19), (18, 19), (19, 20),
(19, 20), (19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 1), (1, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 11),
(11, 13), (13, 15), (15, 16), (16, 18), (18, 20), (20, 22), (22, 23),
(23, 25), (25, 27), (27, 30), (30, 31), (31, 32), (32, 35), (35, 36),
(36, 37), (37, 39), (39, 40), (40, 42), (42, 43), (43, 44),
(44, 47), (47, 48), (48, 51), (51, 53), (53, 56), (56, 57),
(57, 58), (58, 59), (59, 60), (60, 61), (61, 62), (62, 63),
(63, 65), (65, 66), (66, 67)]]
gt_decode = ["Hello, y'all! How are you Ⅷ 😁 😁 😁 ?",
'GluonNLP is great!!!!!!',
"GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# 'abc'"]
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
def test_tokenizers_create():
tokenizer = gluonnlp.data.tokenizers.create('moses', 'en')
tokenizer.encode('hello world!')
| true
| true
|
1c475796efa58d436a4aeaa031170fd8364ddc7a
| 256
|
py
|
Python
|
09/01/01/5.py
|
pylangstudy/201707
|
c1cc72667f1e0b6e8eef4ee85067d7fa4ca500b6
|
[
"CC0-1.0"
] | null | null | null |
09/01/01/5.py
|
pylangstudy/201707
|
c1cc72667f1e0b6e8eef4ee85067d7fa4ca500b6
|
[
"CC0-1.0"
] | 46
|
2017-06-30T22:19:07.000Z
|
2017-07-31T22:51:31.000Z
|
10/01/01/5.py
|
pylangstudy/201707
|
c1cc72667f1e0b6e8eef4ee85067d7fa4ca500b6
|
[
"CC0-1.0"
] | null | null | null |
class Base1:
def __init__(self): print('Base1.__init__');
class Base2:
def __init__(self): print('Base2.__init__');
class Super(Base1, Base2):
def __init__(self): print('Super.__init__'); Base1.__init__(self); Base2.__init__(self)
c = Super()
| 28.444444
| 91
| 0.699219
|
class Base1:
def __init__(self): print('Base1.__init__');
class Base2:
def __init__(self): print('Base2.__init__');
class Super(Base1, Base2):
def __init__(self): print('Super.__init__'); Base1.__init__(self); Base2.__init__(self)
c = Super()
| true
| true
|
1c4757eb287bb3f279ee1609d1ef569abd806f07
| 156
|
py
|
Python
|
tests/models.py
|
rtidatascience/django-postgres-power
|
cf3f714ab9d8919187dc478f1d0679945017ae17
|
[
"BSD-3-Clause"
] | 16
|
2015-12-10T06:37:49.000Z
|
2021-07-16T00:02:41.000Z
|
tests/models.py
|
rtidatascience/django-postgres-power
|
cf3f714ab9d8919187dc478f1d0679945017ae17
|
[
"BSD-3-Clause"
] | 4
|
2016-08-23T13:31:33.000Z
|
2019-04-08T15:47:38.000Z
|
tests/models.py
|
rtidatascience/django-postgres-power
|
cf3f714ab9d8919187dc478f1d0679945017ae17
|
[
"BSD-3-Clause"
] | 7
|
2016-08-23T12:57:55.000Z
|
2020-11-14T21:08:53.000Z
|
from django.db import models
class Checkin(models.Model):
logged_at = models.DateTimeField()
class Number(models.Model):
n = models.IntegerField()
| 22.285714
| 38
| 0.74359
|
from django.db import models
class Checkin(models.Model):
logged_at = models.DateTimeField()
class Number(models.Model):
n = models.IntegerField()
| true
| true
|
1c475960ea7c505c741557bae2f651bd3511c226
| 2,710
|
py
|
Python
|
cimcb_lite/utils/table_check.py
|
RuibingS/cimcb
|
382f7d8fff30d3d276f18ac8c7dc686e0e643fa9
|
[
"MIT"
] | 3
|
2019-05-19T10:36:50.000Z
|
2020-10-12T08:13:04.000Z
|
cimcb_lite/utils/table_check.py
|
RuibingS/cimcb
|
382f7d8fff30d3d276f18ac8c7dc686e0e643fa9
|
[
"MIT"
] | 1
|
2019-03-24T11:04:39.000Z
|
2019-03-26T03:54:51.000Z
|
cimcb_lite/utils/table_check.py
|
RuibingS/cimcb
|
382f7d8fff30d3d276f18ac8c7dc686e0e643fa9
|
[
"MIT"
] | 3
|
2019-05-19T10:37:03.000Z
|
2020-10-12T08:13:05.000Z
|
import numpy as np
def table_check(DataTable, PeakTable, print_statement=True):
"""Error checking for DataTable and PeakTable (used in load_dataXL).
Parameters
----------
DataTable: DataFrame
Data sheet with the required columns.
PeakTable: DataFrame
Peak sheet with the required columns.
print_statement: boolean (default True)
If the error checks are successful and print_statement is True, the following is printed: "Data Table & Peak Table is suitable."
"""
# Check DataTable for Idx, Class and SampleID
data_columns = DataTable.columns.values
if "Idx" not in data_columns:
raise ValueError("Data Table does not contain the required 'Idx' column")
if DataTable.Idx.isnull().values.any() == True:
raise ValueError("Data Table Idx column cannot contain missing values")
if len(np.unique(DataTable.Idx)) != len(DataTable.Idx):
raise ValueError("Data Table Idx numbers are not unique. Please change")
if "Class" not in data_columns:
raise ValueError("Data Table does not contain the required 'Class' column")
if "SampleID" not in data_columns:
raise ValueError("Data Table does not contain the required 'SampleID' column")
# Check PeakTable for Idx, Name, Label
peak_columns = PeakTable.columns.values
if "Idx" not in peak_columns:
raise ValueError("Peak Table does not contain the required 'Idx' column")
if PeakTable.Idx.isnull().values.any() == True:
raise ValueError("Peak Table Idx column cannot contain missing values")
if len(np.unique(PeakTable.Idx)) != len(PeakTable.Idx):
raise ValueError("Peak Table Idx numbers are not unique. Please change")
if "Name" not in peak_columns:
raise ValueError("Peak Table does not contain the required 'Name' column")
if PeakTable.Idx.isnull().values.any() == True:
raise ValueError("Peak Table Name column cannot contain missing values")
if len(np.unique(PeakTable.Idx)) != len(PeakTable.Idx):
raise ValueError("Peak Table Name numbers are not unique. Please change")
if "Label" not in peak_columns:
raise ValueError("Data Table does not contain the required 'Label' column")
# Check that Peak Names in PeakTable & DataTable match
peak_list = PeakTable.Name
data_columns = DataTable.columns.values
temp = np.intersect1d(data_columns, peak_list)
if len(temp) != len(peak_list):
raise ValueError("The Peak Names in Data Table should exactly match the Peak Names in Peak Table. Remember that all Peak Names should be unique.")
if print_statement is True:
print("Data Table & Peak Table is suitable.")
| 41.692308
| 154
| 0.700738
|
import numpy as np
def table_check(DataTable, PeakTable, print_statement=True):
data_columns = DataTable.columns.values
if "Idx" not in data_columns:
raise ValueError("Data Table does not contain the required 'Idx' column")
if DataTable.Idx.isnull().values.any() == True:
raise ValueError("Data Table Idx column cannot contain missing values")
if len(np.unique(DataTable.Idx)) != len(DataTable.Idx):
raise ValueError("Data Table Idx numbers are not unique. Please change")
if "Class" not in data_columns:
raise ValueError("Data Table does not contain the required 'Class' column")
if "SampleID" not in data_columns:
raise ValueError("Data Table does not contain the required 'SampleID' column")
peak_columns = PeakTable.columns.values
if "Idx" not in peak_columns:
raise ValueError("Peak Table does not contain the required 'Idx' column")
if PeakTable.Idx.isnull().values.any() == True:
raise ValueError("Peak Table Idx column cannot contain missing values")
if len(np.unique(PeakTable.Idx)) != len(PeakTable.Idx):
raise ValueError("Peak Table Idx numbers are not unique. Please change")
if "Name" not in peak_columns:
raise ValueError("Peak Table does not contain the required 'Name' column")
if PeakTable.Idx.isnull().values.any() == True:
raise ValueError("Peak Table Name column cannot contain missing values")
if len(np.unique(PeakTable.Idx)) != len(PeakTable.Idx):
raise ValueError("Peak Table Name numbers are not unique. Please change")
if "Label" not in peak_columns:
raise ValueError("Data Table does not contain the required 'Label' column")
peak_list = PeakTable.Name
data_columns = DataTable.columns.values
temp = np.intersect1d(data_columns, peak_list)
if len(temp) != len(peak_list):
raise ValueError("The Peak Names in Data Table should exactly match the Peak Names in Peak Table. Remember that all Peak Names should be unique.")
if print_statement is True:
print("Data Table & Peak Table is suitable.")
| true
| true
|
1c475968ebbd39e752c755cb7b4598bf947a6220
| 556
|
py
|
Python
|
src/log.py
|
ENDERZOMBI102/endc-lang
|
554c540111adae52c3ec23c75474d2121d339df4
|
[
"MIT"
] | null | null | null |
src/log.py
|
ENDERZOMBI102/endc-lang
|
554c540111adae52c3ec23c75474d2121d339df4
|
[
"MIT"
] | null | null | null |
src/log.py
|
ENDERZOMBI102/endc-lang
|
554c540111adae52c3ec23c75474d2121d339df4
|
[
"MIT"
] | null | null | null |
import sys
from typing import TextIO
from cli import args
def _log(level: int, msg: str, file: TextIO) -> None:
if args.verboseLevel <= level:
print(msg, file=file)
def debug(msg: str, file: TextIO = sys.stdout) -> None:
if args.debug:
_log( 0, f'[DEBUG] {msg}', file )
def info(msg: str, file: TextIO = sys.stdout) -> None:
_log( 1, f'[INFO] {msg}', file )
def warn(msg: str, file: TextIO = sys.stderr) -> None:
_log( 2, f'[WARN] {msg}', file )
def error(msg: str, file: TextIO = sys.stderr) -> None:
_log( 3, f'[ERROR] {msg}', file )
| 20.592593
| 55
| 0.627698
|
import sys
from typing import TextIO
from cli import args
def _log(level: int, msg: str, file: TextIO) -> None:
if args.verboseLevel <= level:
print(msg, file=file)
def debug(msg: str, file: TextIO = sys.stdout) -> None:
if args.debug:
_log( 0, f'[DEBUG] {msg}', file )
def info(msg: str, file: TextIO = sys.stdout) -> None:
_log( 1, f'[INFO] {msg}', file )
def warn(msg: str, file: TextIO = sys.stderr) -> None:
_log( 2, f'[WARN] {msg}', file )
def error(msg: str, file: TextIO = sys.stderr) -> None:
_log( 3, f'[ERROR] {msg}', file )
| true
| true
|
1c47596b8a5035d0ebdff520ba15dc9448d843dc
| 7,887
|
py
|
Python
|
sphinx/builders/singlehtml.py
|
choldgraf/sphinx
|
97d2f9fbf8eab478908af981c1a36aed1d75a4ce
|
[
"BSD-2-Clause"
] | null | null | null |
sphinx/builders/singlehtml.py
|
choldgraf/sphinx
|
97d2f9fbf8eab478908af981c1a36aed1d75a4ce
|
[
"BSD-2-Clause"
] | null | null | null |
sphinx/builders/singlehtml.py
|
choldgraf/sphinx
|
97d2f9fbf8eab478908af981c1a36aed1d75a4ce
|
[
"BSD-2-Clause"
] | null | null | null |
"""
sphinx.builders.singlehtml
~~~~~~~~~~~~~~~~~~~~~~~~~~
Single HTML builders.
:copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from os import path
from typing import Any, Dict, List, Tuple, Union
from docutils import nodes
from docutils.nodes import Node
from sphinx.application import Sphinx
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.deprecation import RemovedInSphinx40Warning, deprecated_alias
from sphinx.environment.adapters.toctree import TocTree
from sphinx.locale import __
from sphinx.util import logging
from sphinx.util import progress_message
from sphinx.util.console import darkgreen # type: ignore
from sphinx.util.nodes import inline_all_toctrees
logger = logging.getLogger(__name__)
class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
"""
A StandaloneHTMLBuilder subclass that puts the whole document tree on one
HTML page.
"""
name = 'singlehtml'
epilog = __('The HTML page is in %(outdir)s.')
copysource = False
def get_outdated_docs(self) -> Union[str, List[str]]: # type: ignore
return 'all documents'
def get_target_uri(self, docname: str, typ: str = None) -> str:
if docname in self.env.all_docs:
# all references are on the same page...
return self.config.master_doc + self.out_suffix + \
'#document-' + docname
else:
# chances are this is a html_additional_page
return docname + self.out_suffix
def get_relative_uri(self, from_: str, to: str, typ: str = None) -> str:
# ignore source
return self.get_target_uri(to, typ)
def fix_refuris(self, tree: Node) -> None:
# fix refuris with double anchor
fname = self.config.master_doc + self.out_suffix
for refnode in tree.traverse(nodes.reference):
if 'refuri' not in refnode:
continue
refuri = refnode['refuri']
hashindex = refuri.find('#')
if hashindex < 0:
continue
hashindex = refuri.find('#', hashindex + 1)
if hashindex >= 0:
refnode['refuri'] = fname + refuri[hashindex:]
def _get_local_toctree(self, docname: str, collapse: bool = True, **kwds: Any) -> str:
if 'includehidden' not in kwds:
kwds['includehidden'] = False
toctree = TocTree(self.env).get_toctree_for(docname, self, collapse, **kwds)
if toctree is not None:
self.fix_refuris(toctree)
return self.render_partial(toctree)['fragment']
def assemble_doctree(self) -> nodes.document:
master = self.config.master_doc
tree = self.env.get_doctree(master)
tree = inline_all_toctrees(self, set(), master, tree, darkgreen, [master])
tree['docname'] = master
self.env.resolve_references(tree, master, self)
self.fix_refuris(tree)
return tree
def assemble_toc_secnumbers(self) -> Dict[str, Dict[str, Tuple[int, ...]]]:
# Assemble toc_secnumbers to resolve section numbers on SingleHTML.
# Merge all secnumbers to single secnumber.
#
# Note: current Sphinx has refid confliction in singlehtml mode.
# To avoid the problem, it replaces key of secnumbers to
# tuple of docname and refid.
#
# There are related codes in inline_all_toctres() and
# HTMLTranslter#add_secnumber().
new_secnumbers = {} # type: Dict[str, Tuple[int, ...]]
for docname, secnums in self.env.toc_secnumbers.items():
for id, secnum in secnums.items():
alias = "%s/%s" % (docname, id)
new_secnumbers[alias] = secnum
return {self.config.master_doc: new_secnumbers}
def assemble_toc_fignumbers(self) -> Dict[str, Dict[str, Dict[str, Tuple[int, ...]]]]:
# Assemble toc_fignumbers to resolve figure numbers on SingleHTML.
# Merge all fignumbers to single fignumber.
#
# Note: current Sphinx has refid confliction in singlehtml mode.
# To avoid the problem, it replaces key of secnumbers to
# tuple of docname and refid.
#
# There are related codes in inline_all_toctres() and
# HTMLTranslter#add_fignumber().
new_fignumbers = {} # type: Dict[str, Dict[str, Tuple[int, ...]]]
# {'foo': {'figure': {'id2': (2,), 'id1': (1,)}}, 'bar': {'figure': {'id1': (3,)}}}
for docname, fignumlist in self.env.toc_fignumbers.items():
for figtype, fignums in fignumlist.items():
alias = "%s/%s" % (docname, figtype)
new_fignumbers.setdefault(alias, {})
for id, fignum in fignums.items():
new_fignumbers[alias][id] = fignum
return {self.config.master_doc: new_fignumbers}
def get_doc_context(self, docname: str, body: str, metatags: str) -> Dict:
# no relation links...
toctree = TocTree(self.env).get_toctree_for(self.config.master_doc, self, False)
# if there is no toctree, toc is None
if toctree:
self.fix_refuris(toctree)
toc = self.render_partial(toctree)['fragment']
display_toc = True
else:
toc = ''
display_toc = False
return {
'parents': [],
'prev': None,
'next': None,
'docstitle': None,
'title': self.config.html_title,
'meta': None,
'body': body,
'metatags': metatags,
'rellinks': [],
'sourcename': '',
'toc': toc,
'display_toc': display_toc,
}
def write(self, *ignored: Any) -> None:
docnames = self.env.all_docs
with progress_message(__('preparing documents')):
self.prepare_writing(docnames) # type: ignore
with progress_message(__('assembling single document')):
doctree = self.assemble_doctree()
self.env.toc_secnumbers = self.assemble_toc_secnumbers()
self.env.toc_fignumbers = self.assemble_toc_fignumbers()
with progress_message(__('writing')):
self.write_doc_serialized(self.config.master_doc, doctree)
self.write_doc(self.config.master_doc, doctree)
def finish(self) -> None:
self.write_additional_files()
self.copy_image_files()
self.copy_download_files()
self.copy_static_files()
self.copy_extra_files()
self.write_buildinfo()
self.dump_inventory()
@progress_message(__('writing additional files'))
def write_additional_files(self) -> None:
# no indices or search pages are supported
# additional pages from conf.py
for pagename, template in self.config.html_additional_pages.items():
logger.info(' ' + pagename, nonl=True)
self.handle_page(pagename, {}, template)
if self.config.html_use_opensearch:
logger.info(' opensearch', nonl=True)
fn = path.join(self.outdir, '_static', 'opensearch.xml')
self.handle_page('opensearch', {}, 'opensearch.xml', outfilename=fn)
# for compatibility
deprecated_alias('sphinx.builders.html',
{
'SingleFileHTMLBuilder': SingleFileHTMLBuilder,
},
RemovedInSphinx40Warning)
def setup(app: Sphinx) -> Dict[str, Any]:
app.setup_extension('sphinx.builders.html')
app.add_builder(SingleFileHTMLBuilder)
app.add_config_value('singlehtml_sidebars', lambda self: self.html_sidebars, 'html')
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| 37.557143
| 91
| 0.613034
|
from os import path
from typing import Any, Dict, List, Tuple, Union
from docutils import nodes
from docutils.nodes import Node
from sphinx.application import Sphinx
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.deprecation import RemovedInSphinx40Warning, deprecated_alias
from sphinx.environment.adapters.toctree import TocTree
from sphinx.locale import __
from sphinx.util import logging
from sphinx.util import progress_message
from sphinx.util.console import darkgreen
from sphinx.util.nodes import inline_all_toctrees
logger = logging.getLogger(__name__)
class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
name = 'singlehtml'
epilog = __('The HTML page is in %(outdir)s.')
copysource = False
def get_outdated_docs(self) -> Union[str, List[str]]:
return 'all documents'
def get_target_uri(self, docname: str, typ: str = None) -> str:
if docname in self.env.all_docs:
return self.config.master_doc + self.out_suffix + \
'#document-' + docname
else:
return docname + self.out_suffix
def get_relative_uri(self, from_: str, to: str, typ: str = None) -> str:
return self.get_target_uri(to, typ)
def fix_refuris(self, tree: Node) -> None:
fname = self.config.master_doc + self.out_suffix
for refnode in tree.traverse(nodes.reference):
if 'refuri' not in refnode:
continue
refuri = refnode['refuri']
hashindex = refuri.find('#')
if hashindex < 0:
continue
hashindex = refuri.find('#', hashindex + 1)
if hashindex >= 0:
refnode['refuri'] = fname + refuri[hashindex:]
def _get_local_toctree(self, docname: str, collapse: bool = True, **kwds: Any) -> str:
if 'includehidden' not in kwds:
kwds['includehidden'] = False
toctree = TocTree(self.env).get_toctree_for(docname, self, collapse, **kwds)
if toctree is not None:
self.fix_refuris(toctree)
return self.render_partial(toctree)['fragment']
def assemble_doctree(self) -> nodes.document:
master = self.config.master_doc
tree = self.env.get_doctree(master)
tree = inline_all_toctrees(self, set(), master, tree, darkgreen, [master])
tree['docname'] = master
self.env.resolve_references(tree, master, self)
self.fix_refuris(tree)
return tree
def assemble_toc_secnumbers(self) -> Dict[str, Dict[str, Tuple[int, ...]]]:
umbers = {}
for docname, secnums in self.env.toc_secnumbers.items():
for id, secnum in secnums.items():
alias = "%s/%s" % (docname, id)
new_secnumbers[alias] = secnum
return {self.config.master_doc: new_secnumbers}
def assemble_toc_fignumbers(self) -> Dict[str, Dict[str, Dict[str, Tuple[int, ...]]]]:
umbers = {}
for docname, fignumlist in self.env.toc_fignumbers.items():
for figtype, fignums in fignumlist.items():
alias = "%s/%s" % (docname, figtype)
new_fignumbers.setdefault(alias, {})
for id, fignum in fignums.items():
new_fignumbers[alias][id] = fignum
return {self.config.master_doc: new_fignumbers}
def get_doc_context(self, docname: str, body: str, metatags: str) -> Dict:
toctree = TocTree(self.env).get_toctree_for(self.config.master_doc, self, False)
if toctree:
self.fix_refuris(toctree)
toc = self.render_partial(toctree)['fragment']
display_toc = True
else:
toc = ''
display_toc = False
return {
'parents': [],
'prev': None,
'next': None,
'docstitle': None,
'title': self.config.html_title,
'meta': None,
'body': body,
'metatags': metatags,
'rellinks': [],
'sourcename': '',
'toc': toc,
'display_toc': display_toc,
}
def write(self, *ignored: Any) -> None:
docnames = self.env.all_docs
with progress_message(__('preparing documents')):
self.prepare_writing(docnames)
with progress_message(__('assembling single document')):
doctree = self.assemble_doctree()
self.env.toc_secnumbers = self.assemble_toc_secnumbers()
self.env.toc_fignumbers = self.assemble_toc_fignumbers()
with progress_message(__('writing')):
self.write_doc_serialized(self.config.master_doc, doctree)
self.write_doc(self.config.master_doc, doctree)
def finish(self) -> None:
self.write_additional_files()
self.copy_image_files()
self.copy_download_files()
self.copy_static_files()
self.copy_extra_files()
self.write_buildinfo()
self.dump_inventory()
@progress_message(__('writing additional files'))
def write_additional_files(self) -> None:
for pagename, template in self.config.html_additional_pages.items():
logger.info(' ' + pagename, nonl=True)
self.handle_page(pagename, {}, template)
if self.config.html_use_opensearch:
logger.info(' opensearch', nonl=True)
fn = path.join(self.outdir, '_static', 'opensearch.xml')
self.handle_page('opensearch', {}, 'opensearch.xml', outfilename=fn)
deprecated_alias('sphinx.builders.html',
{
'SingleFileHTMLBuilder': SingleFileHTMLBuilder,
},
RemovedInSphinx40Warning)
def setup(app: Sphinx) -> Dict[str, Any]:
app.setup_extension('sphinx.builders.html')
app.add_builder(SingleFileHTMLBuilder)
app.add_config_value('singlehtml_sidebars', lambda self: self.html_sidebars, 'html')
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| true
| true
|
1c4759c0cc109175a0ac69b07dc02aafad9b54f6
| 26,867
|
py
|
Python
|
src/test/isolation2/sql_isolation_testcase.py
|
kalensk/gpdb
|
52d17ad2057c0b74360e4693f683cc537178d86a
|
[
"PostgreSQL",
"Apache-2.0"
] | null | null | null |
src/test/isolation2/sql_isolation_testcase.py
|
kalensk/gpdb
|
52d17ad2057c0b74360e4693f683cc537178d86a
|
[
"PostgreSQL",
"Apache-2.0"
] | null | null | null |
src/test/isolation2/sql_isolation_testcase.py
|
kalensk/gpdb
|
52d17ad2057c0b74360e4693f683cc537178d86a
|
[
"PostgreSQL",
"Apache-2.0"
] | null | null | null |
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pygresql.pg
import os
import subprocess
import re
import multiprocessing
import tempfile
import time
import sys
import socket
from optparse import OptionParser
import traceback
def is_digit(n):
try:
int(n)
return True
except ValueError:
return False
def load_helper_file(helper_file):
with open(helper_file) as file:
return "".join(file.readlines()).strip()
def parse_include_statement(sql):
include_statement, command = sql.split(None, 1)
stripped_command = command.strip()
if stripped_command.endswith(";"):
return stripped_command.replace(";", "")
else:
raise SyntaxError("expected 'include: %s' to end with a semicolon." % stripped_command)
class SQLIsolationExecutor(object):
def __init__(self, dbname=''):
self.processes = {}
# The re.S flag makes the "." in the regex match newlines.
# When matched against a command in process_command(), all
# lines in the command are matched and sent as SQL query.
self.command_pattern = re.compile(r"^(-?\d+|[*])([&\\<\\>USIq]*?)\:(.*)", re.S)
if dbname:
self.dbname = dbname
else:
self.dbname = os.environ.get('PGDATABASE')
class SQLConnection(object):
def __init__(self, out_file, name, mode, dbname):
self.name = name
self.mode = mode
self.out_file = out_file
self.dbname = dbname
parent_conn, child_conn = multiprocessing.Pipe(True)
self.p = multiprocessing.Process(target=self.session_process, args=(child_conn,))
self.pipe = parent_conn
self.has_open = False
self.p.start()
# Close "our" copy of the child's handle, so that if the child dies,
# recv() on the pipe will fail.
child_conn.close();
self.out_file = out_file
def session_process(self, pipe):
sp = SQLIsolationExecutor.SQLSessionProcess(self.name,
self.mode, pipe, self.dbname)
sp.do()
def query(self, command):
print >>self.out_file
self.out_file.flush()
if len(command.strip()) == 0:
return
if self.has_open:
raise Exception("Cannot query command while waiting for results")
self.pipe.send((command, False))
r = self.pipe.recv()
if r is None:
raise Exception("Execution failed")
print >>self.out_file, r.rstrip()
def fork(self, command, blocking):
print >>self.out_file, " <waiting ...>"
self.pipe.send((command, True))
if blocking:
time.sleep(0.5)
if self.pipe.poll(0):
p = self.pipe.recv()
raise Exception("Forked command is not blocking; got output: %s" % p.strip())
self.has_open = True
def join(self):
r = None
print >>self.out_file, " <... completed>"
if self.has_open:
r = self.pipe.recv()
if r is None:
raise Exception("Execution failed")
print >>self.out_file, r.rstrip()
self.has_open = False
def stop(self):
self.pipe.send(("", False))
self.p.join()
if self.has_open:
raise Exception("Should not finish test case while waiting for results")
def quit(self):
print >>self.out_file, "... <quitting>"
self.stop()
def terminate(self):
self.pipe.close()
self.p.terminate()
class SQLSessionProcess(object):
def __init__(self, name, mode, pipe, dbname):
"""
Constructor
"""
self.name = name
self.mode = mode
self.pipe = pipe
self.dbname = dbname
if self.mode == "utility":
(hostname, port) = self.get_hostname_port(name, 'p')
self.con = self.connectdb(given_dbname=self.dbname,
given_host=hostname,
given_port=port,
given_opt="-c gp_session_role=utility")
elif self.mode == "standby":
# Connect to standby even when it's role is recorded
# as mirror. This is useful for scenarios where a
# test needs to promote a standby without using
# gpactivatestandby.
(hostname, port) = self.get_hostname_port(name, 'm')
self.con = self.connectdb(given_dbname=self.dbname,
given_host=hostname,
given_port=port)
else:
self.con = self.connectdb(self.dbname)
def connectdb(self, given_dbname, given_host = None, given_port = None, given_opt = None):
con = None
retry = 1000
while retry:
try:
if (given_port is None):
con = pygresql.pg.connect(host= given_host,
opt= given_opt,
dbname= given_dbname)
else:
con = pygresql.pg.connect(host= given_host,
port= given_port,
opt= given_opt,
dbname= given_dbname)
break
except Exception as e:
if (("the database system is starting up" in str(e) or
"the database system is in recovery mode" in str(e)) and
retry > 1):
retry -= 1
time.sleep(0.1)
else:
raise
return con
def get_hostname_port(self, contentid, role):
"""
Gets the port number/hostname combination of the
contentid and role
"""
query = ("SELECT hostname, port FROM gp_segment_configuration WHERE"
" content = %s AND role = '%s'") % (contentid, role)
con = self.connectdb(self.dbname)
r = con.query(query).getresult()
if len(r) == 0:
raise Exception("Invalid content %s" % contentid)
if r[0][0] == socket.gethostname():
return (None, int(r[0][1]))
return (r[0][0], int(r[0][1]))
# Print out a pygresql result set (a Query object, after the query
# has been executed), in a format that imitates the default
# formatting of psql. This isn't a perfect imitation: we left-justify
# all the fields and headers, whereas psql centers the header, and
# right-justifies numeric fields. But this is close enough, to make
# gpdiff.pl recognize the result sets as such. (We used to just call
# str(r), and let PyGreSQL do the formatting. But even though
# PyGreSQL's default formatting is close to psql's, it's not close
# enough.)
def printout_result(self, r):
widths = []
# Figure out the widths of each column.
fields = r.listfields()
for f in fields:
widths.append(len(str(f)))
rset = r.getresult()
for row in rset:
colno = 0
for col in row:
if col is None:
col = ""
widths[colno] = max(widths[colno], len(str(col)))
colno = colno + 1
# Start printing. Header first.
result = ""
colno = 0
for f in fields:
if colno > 0:
result += "|"
result += " " + f.ljust(widths[colno]) + " "
colno = colno + 1
result += "\n"
# Then the bar ("----+----")
colno = 0
for f in fields:
if colno > 0:
result += "+"
result += "".ljust(widths[colno] + 2, "-")
colno = colno + 1
result += "\n"
# Then the result set itself
for row in rset:
colno = 0
for col in row:
if colno > 0:
result += "|"
if col is None:
col = ""
result += " " + str(col).ljust(widths[colno]) + " "
colno = colno + 1
result += "\n"
# Finally, the row count
if len(rset) == 1:
result += "(1 row)\n"
else:
result += "(" + str(len(rset)) +" rows)\n"
return result
def execute_command(self, command):
"""
Executes a given command
"""
try:
r = self.con.query(command)
if r and type(r) == str:
echo_content = command[:-1].partition(" ")[0].upper()
return "%s %s" % (echo_content, r)
elif r:
return self.printout_result(r)
else:
echo_content = command[:-1].partition(" ")[0].upper()
return echo_content
except Exception as e:
return str(e)
def do(self):
"""
Process loop.
Ends when the command None is received
"""
(c, wait) = self.pipe.recv()
while c:
if wait:
time.sleep(0.1)
r = self.execute_command(c)
self.pipe.send(r)
r = None
(c, wait) = self.pipe.recv()
def get_process(self, out_file, name, mode="", dbname=""):
"""
Gets or creates the process by the given name
"""
if len(name) > 0 and not is_digit(name):
raise Exception("Name should be a number")
if len(name) > 0 and mode != "utility" and int(name) >= 1024:
raise Exception("Session name should be smaller than 1024 unless it is utility mode number")
if not (name, mode) in self.processes:
if not dbname:
dbname = self.dbname
self.processes[(name, mode)] = SQLIsolationExecutor.SQLConnection(out_file, name, mode, dbname)
return self.processes[(name, mode)]
def quit_process(self, out_file, name, mode="", dbname=""):
"""
Quits a process with the given name
"""
if len(name) > 0 and not is_digit(name):
raise Exception("Name should be a number")
if len(name) > 0 and mode != "utility" and int(name) >= 1024:
raise Exception("Session name should be smaller than 1024 unless it is utility mode number")
if not (name, mode) in self.processes:
raise Exception("Sessions not started cannot be quit")
self.processes[(name, mode)].quit()
del self.processes[(name, mode)]
def get_all_primary_contentids(self, dbname):
"""
Retrieves all primary content IDs (including the master). Intended for
use by *U queries.
"""
if not dbname:
dbname = self.dbname
con = pygresql.pg.connect(dbname=dbname)
result = con.query("SELECT content FROM gp_segment_configuration WHERE role = 'p'").getresult()
if len(result) == 0:
raise Exception("Invalid gp_segment_configuration contents")
return [int(content[0]) for content in result]
def process_command(self, command, output_file):
"""
Processes the given command.
The command at this point still includes the isolation behavior
flags, e.g. which session to use.
"""
process_name = ""
sql = command
flag = ""
con_mode = ""
dbname = ""
m = self.command_pattern.match(command)
if m:
process_name = m.groups()[0]
flag = m.groups()[1]
if flag and flag[0] == "U":
con_mode = "utility"
elif flag and flag[0] == "S":
if len(flag) > 1:
flag = flag[1:]
con_mode = "standby"
sql = m.groups()[2]
sql = sql.lstrip()
# If db_name is specifed , it should be of the following syntax:
# 1:@db_name <db_name>: <sql>
if sql.startswith('@db_name'):
sql_parts = sql.split(':', 2)
if not len(sql_parts) == 2:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
if not sql_parts[0].startswith('@db_name'):
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
if not len(sql_parts[0].split()) == 2:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
dbname = sql_parts[0].split()[1].strip()
if not dbname:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
sql = sql_parts[1]
if not flag:
if sql.startswith('!'):
sql = sql[1:]
# Check for execution mode. E.g.
# !\retcode path/to/executable --option1 --option2 ...
#
# At the moment, we only recognize the \retcode mode, which
# ignores all program output in the diff (it's still printed)
# and adds the return code.
mode = None
if sql.startswith('\\'):
mode, sql = sql.split(None, 1)
if mode != '\\retcode':
raise Exception('Invalid execution mode: {}'.format(mode))
cmd_output = subprocess.Popen(sql.strip(), stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
stdout, _ = cmd_output.communicate()
print >> output_file
if mode == '\\retcode':
print >> output_file, '-- start_ignore'
print >> output_file, stdout
if mode == '\\retcode':
print >> output_file, '-- end_ignore'
print >> output_file, '(exited with code {})'.format(cmd_output.returncode)
elif sql.startswith('include:'):
helper_file = parse_include_statement(sql)
self.get_process(
output_file,
process_name,
dbname=dbname
).query(
load_helper_file(helper_file)
)
else:
self.get_process(output_file, process_name, con_mode, dbname=dbname).query(sql.strip())
elif flag == "&":
self.get_process(output_file, process_name, con_mode, dbname=dbname).fork(sql.strip(), True)
elif flag == ">":
self.get_process(output_file, process_name, con_mode, dbname=dbname).fork(sql.strip(), False)
elif flag == "<":
if len(sql) > 0:
raise Exception("No query should be given on join")
self.get_process(output_file, process_name, con_mode, dbname=dbname).join()
elif flag == "q":
if len(sql) > 0:
raise Exception("No query should be given on quit")
self.quit_process(output_file, process_name, con_mode, dbname=dbname)
elif flag == "U":
if process_name == '*':
process_names = [str(content) for content in self.get_all_primary_contentids(dbname)]
else:
process_names = [process_name]
for name in process_names:
self.get_process(output_file, name, con_mode, dbname=dbname).query(sql.strip())
elif flag == "U&":
self.get_process(output_file, process_name, con_mode, dbname=dbname).fork(sql.strip(), True)
elif flag == "U<":
if len(sql) > 0:
raise Exception("No query should be given on join")
self.get_process(output_file, process_name, con_mode, dbname=dbname).join()
elif flag == "Uq":
if len(sql) > 0:
raise Exception("No query should be given on quit")
self.quit_process(output_file, process_name, con_mode, dbname=dbname)
elif flag == "S":
self.get_process(output_file, process_name, con_mode, dbname=dbname).query(sql.strip())
else:
raise Exception("Invalid isolation flag")
def process_isolation_file(self, sql_file, output_file):
"""
Processes the given sql file and writes the output
to output file
"""
try:
command = ""
for line in sql_file:
#tinctest.logger.info("re.match: %s" %re.match(r"^\d+[q\\<]:$", line))
print >>output_file, line.strip(),
if line[0] == "!":
command_part = line # shell commands can use -- for multichar options like --include
else:
command_part = line.partition("--")[0] # remove comment from line
if command_part == "" or command_part == "\n":
print >>output_file
elif command_part.endswith(";\n") or re.match(r"^\d+[q\\<]:$", line) or re.match(r"^-?\d+[SU][q\\<]:$", line):
command += command_part
try:
self.process_command(command, output_file)
except Exception as e:
print >>output_file, "FAILED: ", e
command = ""
else:
command += command_part
for process in self.processes.values():
process.stop()
except:
for process in self.processes.values():
process.terminate()
raise
finally:
for process in self.processes.values():
process.terminate()
class SQLIsolationTestCase:
"""
The isolation test case allows a fine grained control of interleaved
executing transactions. This is mainly used to test isolation behavior.
[<#>[flag]:] <sql> | ! <shell scripts or command>
#: either an integer indicating a unique session, or a content-id if
followed by U (for utility-mode connections). In 'U' mode, the
content-id can alternatively be an asterisk '*' to perform a
utility-mode query on the master and all primaries.
flag:
&: expect blocking behavior
>: running in background without blocking
<: join an existing session
q: quit the given session
U: connect in utility mode to primary contentid from gp_segment_configuration
U&: expect blocking behavior in utility mode (does not currently support an asterisk target)
U<: join an existing utility mode session (does not currently support an asterisk target)
I: include a file of sql statements (useful for loading reusable functions)
An example is:
Execute BEGIN in transaction 1
Execute BEGIN in transaction 2
Execute INSERT in transaction 2
Execute SELECT in transaction 1
Execute COMMIT in transaction 2
Execute SELECT in transaction 1
The isolation tests are specified identical to sql-scripts in normal
SQLTestCases. However, it is possible to prefix a SQL line with
an tranaction identifier followed by a colon (":").
The above example would be defined by
1: BEGIN;
2: BEGIN;
2: INSERT INTO a VALUES (1);
1: SELECT * FROM a;
2: COMMIT;
1: SELECT * FROM a;
Blocking behavior can be tested by forking and joining.
1: BEGIN;
2: BEGIN;
1: DELETE FROM foo WHERE a = 4;
2&: DELETE FROM foo WHERE a = 4;
1: COMMIT;
2<:
2: COMMIT;
2& forks the command. It is executed in the background. If the
command is NOT blocking at this point, it is considered an error.
2< joins the background command and outputs the result of the
command execution.
Session ids should be smaller than 1024.
2U: Executes a utility command connected to port 40000.
One difference to SQLTestCase is the output of INSERT.
SQLTestCase would output "INSERT 0 1" if one tuple is inserted.
SQLIsolationTestCase would output "INSERT 1". As the
SQLIsolationTestCase needs to have a more fine-grained control
over the execution order than possible with PSQL, it uses
the pygresql python library instead.
Connecting to a specific database:
1. If you specify a db_name metadata in the sql file, connect to that database in all open sessions.
2. If you want a specific session to be connected to a specific database , specify the sql as follows:
1:@db_name testdb: <sql>
2:@db_name test2db: <sql>
1: <sql>
2: <sql>
etc
Here session 1 will be connected to testdb and session 2 will be connected to test2db. You can specify @db_name only at the beginning of the session. For eg:, following would error out:
1:@db_name testdb: <sql>
2:@db_name test2db: <sql>
1: @db_name testdb: <sql>
2: <sql>
etc
Quitting sessions:
By default, all opened sessions will be stopped only at the end of the sql file execution. If you want to explicitly quit a session
in the middle of the test execution, you can specify a flag 'q' with the session identifier. For eg:
1:@db_name testdb: <sql>
2:@db_name test2db: <sql>
1: <sql>
2: <sql>
1q:
2: <sql>
3: <sql>
2q:
3: <sql>
2: @db_name test: <sql>
1q: ---> Will quit the session established with testdb.
2q: ---> Will quit the session established with test2db.
The subsequent 2: @db_name test: <sql> will open a new session with the database test and execute the sql against that session.
Catalog Modification:
Some tests are easier to write if it's possible to modify a system
catalog across the *entire* cluster. To perform a utility-mode query on
all segments and the master, you can use *U commands:
*U: SET allow_system_table_mods = true;
*U: UPDATE pg_catalog.<table> SET <column> = <value> WHERE <cond>;
Since the number of query results returned by a *U command depends on
the developer's cluster configuration, it can be useful to wrap them in
a start_/end_ignore block. (Unfortunately, this also hides legitimate
failures; a better long-term solution is needed.)
Block/join flags are not currently supported with *U.
Including files:
-- example contents for file.sql: create function some_test_function() returning void ...
include: path/to/some/file.sql;
select some_helper_function();
"""
def run_sql_file(self, sql_file, out_file = None, out_dir = None, optimizer = None):
"""
Given a sql file and an ans file, this adds the specified gucs (self.gucs) to the sql file , runs the sql
against the test case database (self.db_name) and verifies the output with the ans file.
If an 'init_file' exists in the same location as the sql_file, this will be used
while doing gpdiff.
"""
# Add gucs to the test sql and form the actual sql file to be run
if not out_dir:
out_dir = self.get_out_dir()
if not os.path.exists(out_dir):
TINCSystem.make_dirs(out_dir, ignore_exists_error = True)
if optimizer is None:
gucs_sql_file = os.path.join(out_dir, os.path.basename(sql_file))
else:
# sql file will be <basename>_opt.sql or <basename>_planner.sql based on optimizer
gucs_sql_file = os.path.join(out_dir, os.path.basename(sql_file).replace('.sql', '_%s.sql' %self._optimizer_suffix(optimizer)))
self._add_gucs_to_sql_file(sql_file, gucs_sql_file, optimizer)
self.test_artifacts.append(gucs_sql_file)
if not out_file:
if optimizer is None:
out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '.out'))
else:
# out file will be *_opt.out or *_planner.out based on optimizer
out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '_%s.out' %self._optimizer_suffix(optimizer)))
self.test_artifacts.append(out_file)
executor = SQLIsolationExecutor(dbname=self.db_name)
with open(out_file, "w") as f:
executor.process_isolation_file(open(sql_file), f)
f.flush()
if out_file[-2:] == '.t':
out_file = out_file[:-2]
return out_file
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("--dbname", dest="dbname",
help="connect to database DBNAME", metavar="DBNAME")
(options, args) = parser.parse_args()
executor = SQLIsolationExecutor(dbname=options.dbname)
executor.process_isolation_file(sys.stdin, sys.stdout)
| 40.1
| 193
| 0.544274
|
import pygresql.pg
import os
import subprocess
import re
import multiprocessing
import tempfile
import time
import sys
import socket
from optparse import OptionParser
import traceback
def is_digit(n):
try:
int(n)
return True
except ValueError:
return False
def load_helper_file(helper_file):
with open(helper_file) as file:
return "".join(file.readlines()).strip()
def parse_include_statement(sql):
include_statement, command = sql.split(None, 1)
stripped_command = command.strip()
if stripped_command.endswith(";"):
return stripped_command.replace(";", "")
else:
raise SyntaxError("expected 'include: %s' to end with a semicolon." % stripped_command)
class SQLIsolationExecutor(object):
def __init__(self, dbname=''):
self.processes = {}
self.command_pattern = re.compile(r"^(-?\d+|[*])([&\\<\\>USIq]*?)\:(.*)", re.S)
if dbname:
self.dbname = dbname
else:
self.dbname = os.environ.get('PGDATABASE')
class SQLConnection(object):
def __init__(self, out_file, name, mode, dbname):
self.name = name
self.mode = mode
self.out_file = out_file
self.dbname = dbname
parent_conn, child_conn = multiprocessing.Pipe(True)
self.p = multiprocessing.Process(target=self.session_process, args=(child_conn,))
self.pipe = parent_conn
self.has_open = False
self.p.start()
# recv() on the pipe will fail.
child_conn.close();
self.out_file = out_file
def session_process(self, pipe):
sp = SQLIsolationExecutor.SQLSessionProcess(self.name,
self.mode, pipe, self.dbname)
sp.do()
def query(self, command):
print >>self.out_file
self.out_file.flush()
if len(command.strip()) == 0:
return
if self.has_open:
raise Exception("Cannot query command while waiting for results")
self.pipe.send((command, False))
r = self.pipe.recv()
if r is None:
raise Exception("Execution failed")
print >>self.out_file, r.rstrip()
def fork(self, command, blocking):
print >>self.out_file, " <waiting ...>"
self.pipe.send((command, True))
if blocking:
time.sleep(0.5)
if self.pipe.poll(0):
p = self.pipe.recv()
raise Exception("Forked command is not blocking; got output: %s" % p.strip())
self.has_open = True
def join(self):
r = None
print >>self.out_file, " <... completed>"
if self.has_open:
r = self.pipe.recv()
if r is None:
raise Exception("Execution failed")
print >>self.out_file, r.rstrip()
self.has_open = False
def stop(self):
self.pipe.send(("", False))
self.p.join()
if self.has_open:
raise Exception("Should not finish test case while waiting for results")
def quit(self):
print >>self.out_file, "... <quitting>"
self.stop()
def terminate(self):
self.pipe.close()
self.p.terminate()
class SQLSessionProcess(object):
def __init__(self, name, mode, pipe, dbname):
self.name = name
self.mode = mode
self.pipe = pipe
self.dbname = dbname
if self.mode == "utility":
(hostname, port) = self.get_hostname_port(name, 'p')
self.con = self.connectdb(given_dbname=self.dbname,
given_host=hostname,
given_port=port,
given_opt="-c gp_session_role=utility")
elif self.mode == "standby":
# Connect to standby even when it's role is recorded
(hostname, port) = self.get_hostname_port(name, 'm')
self.con = self.connectdb(given_dbname=self.dbname,
given_host=hostname,
given_port=port)
else:
self.con = self.connectdb(self.dbname)
def connectdb(self, given_dbname, given_host = None, given_port = None, given_opt = None):
con = None
retry = 1000
while retry:
try:
if (given_port is None):
con = pygresql.pg.connect(host= given_host,
opt= given_opt,
dbname= given_dbname)
else:
con = pygresql.pg.connect(host= given_host,
port= given_port,
opt= given_opt,
dbname= given_dbname)
break
except Exception as e:
if (("the database system is starting up" in str(e) or
"the database system is in recovery mode" in str(e)) and
retry > 1):
retry -= 1
time.sleep(0.1)
else:
raise
return con
def get_hostname_port(self, contentid, role):
query = ("SELECT hostname, port FROM gp_segment_configuration WHERE"
" content = %s AND role = '%s'") % (contentid, role)
con = self.connectdb(self.dbname)
r = con.query(query).getresult()
if len(r) == 0:
raise Exception("Invalid content %s" % contentid)
if r[0][0] == socket.gethostname():
return (None, int(r[0][1]))
return (r[0][0], int(r[0][1]))
# all the fields and headers, whereas psql centers the header, and
# right-justifies numeric fields. But this is close enough, to make
# gpdiff.pl recognize the result sets as such. (We used to just call
# str(r), and let PyGreSQL do the formatting. But even though
# PyGreSQL's default formatting is close to psql's, it's not close
def printout_result(self, r):
widths = []
fields = r.listfields()
for f in fields:
widths.append(len(str(f)))
rset = r.getresult()
for row in rset:
colno = 0
for col in row:
if col is None:
col = ""
widths[colno] = max(widths[colno], len(str(col)))
colno = colno + 1
result = ""
colno = 0
for f in fields:
if colno > 0:
result += "|"
result += " " + f.ljust(widths[colno]) + " "
colno = colno + 1
result += "\n"
colno = 0
for f in fields:
if colno > 0:
result += "+"
result += "".ljust(widths[colno] + 2, "-")
colno = colno + 1
result += "\n"
for row in rset:
colno = 0
for col in row:
if colno > 0:
result += "|"
if col is None:
col = ""
result += " " + str(col).ljust(widths[colno]) + " "
colno = colno + 1
result += "\n"
if len(rset) == 1:
result += "(1 row)\n"
else:
result += "(" + str(len(rset)) +" rows)\n"
return result
def execute_command(self, command):
try:
r = self.con.query(command)
if r and type(r) == str:
echo_content = command[:-1].partition(" ")[0].upper()
return "%s %s" % (echo_content, r)
elif r:
return self.printout_result(r)
else:
echo_content = command[:-1].partition(" ")[0].upper()
return echo_content
except Exception as e:
return str(e)
def do(self):
(c, wait) = self.pipe.recv()
while c:
if wait:
time.sleep(0.1)
r = self.execute_command(c)
self.pipe.send(r)
r = None
(c, wait) = self.pipe.recv()
def get_process(self, out_file, name, mode="", dbname=""):
if len(name) > 0 and not is_digit(name):
raise Exception("Name should be a number")
if len(name) > 0 and mode != "utility" and int(name) >= 1024:
raise Exception("Session name should be smaller than 1024 unless it is utility mode number")
if not (name, mode) in self.processes:
if not dbname:
dbname = self.dbname
self.processes[(name, mode)] = SQLIsolationExecutor.SQLConnection(out_file, name, mode, dbname)
return self.processes[(name, mode)]
def quit_process(self, out_file, name, mode="", dbname=""):
if len(name) > 0 and not is_digit(name):
raise Exception("Name should be a number")
if len(name) > 0 and mode != "utility" and int(name) >= 1024:
raise Exception("Session name should be smaller than 1024 unless it is utility mode number")
if not (name, mode) in self.processes:
raise Exception("Sessions not started cannot be quit")
self.processes[(name, mode)].quit()
del self.processes[(name, mode)]
def get_all_primary_contentids(self, dbname):
if not dbname:
dbname = self.dbname
con = pygresql.pg.connect(dbname=dbname)
result = con.query("SELECT content FROM gp_segment_configuration WHERE role = 'p'").getresult()
if len(result) == 0:
raise Exception("Invalid gp_segment_configuration contents")
return [int(content[0]) for content in result]
def process_command(self, command, output_file):
process_name = ""
sql = command
flag = ""
con_mode = ""
dbname = ""
m = self.command_pattern.match(command)
if m:
process_name = m.groups()[0]
flag = m.groups()[1]
if flag and flag[0] == "U":
con_mode = "utility"
elif flag and flag[0] == "S":
if len(flag) > 1:
flag = flag[1:]
con_mode = "standby"
sql = m.groups()[2]
sql = sql.lstrip()
if sql.startswith('@db_name'):
sql_parts = sql.split(':', 2)
if not len(sql_parts) == 2:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
if not sql_parts[0].startswith('@db_name'):
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
if not len(sql_parts[0].split()) == 2:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
dbname = sql_parts[0].split()[1].strip()
if not dbname:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
sql = sql_parts[1]
if not flag:
if sql.startswith('!'):
sql = sql[1:]
# and adds the return code.
mode = None
if sql.startswith('\\'):
mode, sql = sql.split(None, 1)
if mode != '\\retcode':
raise Exception('Invalid execution mode: {}'.format(mode))
cmd_output = subprocess.Popen(sql.strip(), stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
stdout, _ = cmd_output.communicate()
print >> output_file
if mode == '\\retcode':
print >> output_file, '-- start_ignore'
print >> output_file, stdout
if mode == '\\retcode':
print >> output_file, '-- end_ignore'
print >> output_file, '(exited with code {})'.format(cmd_output.returncode)
elif sql.startswith('include:'):
helper_file = parse_include_statement(sql)
self.get_process(
output_file,
process_name,
dbname=dbname
).query(
load_helper_file(helper_file)
)
else:
self.get_process(output_file, process_name, con_mode, dbname=dbname).query(sql.strip())
elif flag == "&":
self.get_process(output_file, process_name, con_mode, dbname=dbname).fork(sql.strip(), True)
elif flag == ">":
self.get_process(output_file, process_name, con_mode, dbname=dbname).fork(sql.strip(), False)
elif flag == "<":
if len(sql) > 0:
raise Exception("No query should be given on join")
self.get_process(output_file, process_name, con_mode, dbname=dbname).join()
elif flag == "q":
if len(sql) > 0:
raise Exception("No query should be given on quit")
self.quit_process(output_file, process_name, con_mode, dbname=dbname)
elif flag == "U":
if process_name == '*':
process_names = [str(content) for content in self.get_all_primary_contentids(dbname)]
else:
process_names = [process_name]
for name in process_names:
self.get_process(output_file, name, con_mode, dbname=dbname).query(sql.strip())
elif flag == "U&":
self.get_process(output_file, process_name, con_mode, dbname=dbname).fork(sql.strip(), True)
elif flag == "U<":
if len(sql) > 0:
raise Exception("No query should be given on join")
self.get_process(output_file, process_name, con_mode, dbname=dbname).join()
elif flag == "Uq":
if len(sql) > 0:
raise Exception("No query should be given on quit")
self.quit_process(output_file, process_name, con_mode, dbname=dbname)
elif flag == "S":
self.get_process(output_file, process_name, con_mode, dbname=dbname).query(sql.strip())
else:
raise Exception("Invalid isolation flag")
def process_isolation_file(self, sql_file, output_file):
try:
command = ""
for line in sql_file:
#tinctest.logger.info("re.match: %s" %re.match(r"^\d+[q\\<]:$", line))
print >>output_file, line.strip(),
if line[0] == "!":
command_part = line # shell commands can use -- for multichar options like --include
else:
command_part = line.partition("--")[0] # remove comment from line
if command_part == "" or command_part == "\n":
print >>output_file
elif command_part.endswith(";\n") or re.match(r"^\d+[q\\<]:$", line) or re.match(r"^-?\d+[SU][q\\<]:$", line):
command += command_part
try:
self.process_command(command, output_file)
except Exception as e:
print >>output_file, "FAILED: ", e
command = ""
else:
command += command_part
for process in self.processes.values():
process.stop()
except:
for process in self.processes.values():
process.terminate()
raise
finally:
for process in self.processes.values():
process.terminate()
class SQLIsolationTestCase:
def run_sql_file(self, sql_file, out_file = None, out_dir = None, optimizer = None):
# Add gucs to the test sql and form the actual sql file to be run
if not out_dir:
out_dir = self.get_out_dir()
if not os.path.exists(out_dir):
TINCSystem.make_dirs(out_dir, ignore_exists_error = True)
if optimizer is None:
gucs_sql_file = os.path.join(out_dir, os.path.basename(sql_file))
else:
# sql file will be <basename>_opt.sql or <basename>_planner.sql based on optimizer
gucs_sql_file = os.path.join(out_dir, os.path.basename(sql_file).replace('.sql', '_%s.sql' %self._optimizer_suffix(optimizer)))
self._add_gucs_to_sql_file(sql_file, gucs_sql_file, optimizer)
self.test_artifacts.append(gucs_sql_file)
if not out_file:
if optimizer is None:
out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '.out'))
else:
# out file will be *_opt.out or *_planner.out based on optimizer
out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '_%s.out' %self._optimizer_suffix(optimizer)))
self.test_artifacts.append(out_file)
executor = SQLIsolationExecutor(dbname=self.db_name)
with open(out_file, "w") as f:
executor.process_isolation_file(open(sql_file), f)
f.flush()
if out_file[-2:] == '.t':
out_file = out_file[:-2]
return out_file
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("--dbname", dest="dbname",
help="connect to database DBNAME", metavar="DBNAME")
(options, args) = parser.parse_args()
executor = SQLIsolationExecutor(dbname=options.dbname)
executor.process_isolation_file(sys.stdin, sys.stdout)
| true
| true
|
1c475a28b1d83edba4b3c614df0405e3f55f79f0
| 53,813
|
py
|
Python
|
lib/sqlalchemy/sql/sqltypes.py
|
mjpieters/sqlalchemy
|
a8efeb6c052330b7b8d44960132d638b08d42d18
|
[
"MIT"
] | null | null | null |
lib/sqlalchemy/sql/sqltypes.py
|
mjpieters/sqlalchemy
|
a8efeb6c052330b7b8d44960132d638b08d42d18
|
[
"MIT"
] | null | null | null |
lib/sqlalchemy/sql/sqltypes.py
|
mjpieters/sqlalchemy
|
a8efeb6c052330b7b8d44960132d638b08d42d18
|
[
"MIT"
] | null | null | null |
# sql/sqltypes.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""SQL specific types.
"""
import datetime as dt
import codecs
from .type_api import TypeEngine, TypeDecorator, to_instance
from .elements import quoted_name, type_coerce
from .default_comparator import _DefaultColumnComparator
from .. import exc, util, processors
from .base import _bind_or_error, SchemaEventTarget
from . import operators
from .. import event
from ..util import pickle
import decimal
if util.jython:
import array
class _DateAffinity(object):
"""Mixin date/time specific expression adaptations.
Rules are implemented within Date,Time,Interval,DateTime, Numeric,
Integer. Based on http://www.postgresql.org/docs/current/static
/functions-datetime.html.
"""
@property
def _expression_adaptations(self):
raise NotImplementedError()
class Comparator(TypeEngine.Comparator):
_blank_dict = util.immutabledict()
def _adapt_expression(self, op, other_comparator):
othertype = other_comparator.type._type_affinity
return op, \
to_instance(self.type._expression_adaptations.get(op, self._blank_dict).\
get(othertype, NULLTYPE))
comparator_factory = Comparator
class Concatenable(object):
"""A mixin that marks a type as supporting 'concatenation',
typically strings."""
class Comparator(TypeEngine.Comparator):
def _adapt_expression(self, op, other_comparator):
if op is operators.add and isinstance(other_comparator,
(Concatenable.Comparator, NullType.Comparator)):
return operators.concat_op, self.expr.type
else:
return op, self.expr.type
comparator_factory = Comparator
class String(Concatenable, TypeEngine):
"""The base for all string and character types.
In SQL, corresponds to VARCHAR. Can also take Python unicode objects
and encode to the database's encoding in bind params (and the reverse for
result sets.)
The `length` field is usually required when the `String` type is
used within a CREATE TABLE statement, as VARCHAR requires a length
on most databases.
"""
__visit_name__ = 'string'
def __init__(self, length=None, collation=None,
convert_unicode=False,
unicode_error=None,
_warn_on_bytestring=False
):
"""
Create a string-holding type.
:param length: optional, a length for the column for use in
DDL and CAST expressions. May be safely omitted if no ``CREATE
TABLE`` will be issued. Certain databases may require a
``length`` for use in DDL, and will raise an exception when
the ``CREATE TABLE`` DDL is issued if a ``VARCHAR``
with no length is included. Whether the value is
interpreted as bytes or characters is database specific.
:param collation: Optional, a column-level collation for
use in DDL and CAST expressions. Renders using the
COLLATE keyword supported by SQLite, MySQL, and Postgresql.
E.g.::
>>> from sqlalchemy import cast, select, String
>>> print select([cast('some string', String(collation='utf8'))])
SELECT CAST(:param_1 AS VARCHAR COLLATE utf8) AS anon_1
.. versionadded:: 0.8 Added support for COLLATE to all
string types.
:param convert_unicode: When set to ``True``, the
:class:`.String` type will assume that
input is to be passed as Python ``unicode`` objects,
and results returned as Python ``unicode`` objects.
If the DBAPI in use does not support Python unicode
(which is fewer and fewer these days), SQLAlchemy
will encode/decode the value, using the
value of the ``encoding`` parameter passed to
:func:`.create_engine` as the encoding.
When using a DBAPI that natively supports Python
unicode objects, this flag generally does not
need to be set. For columns that are explicitly
intended to store non-ASCII data, the :class:`.Unicode`
or :class:`.UnicodeText`
types should be used regardless, which feature
the same behavior of ``convert_unicode`` but
also indicate an underlying column type that
directly supports unicode, such as ``NVARCHAR``.
For the extremely rare case that Python ``unicode``
is to be encoded/decoded by SQLAlchemy on a backend
that does natively support Python ``unicode``,
the value ``force`` can be passed here which will
cause SQLAlchemy's encode/decode services to be
used unconditionally.
:param unicode_error: Optional, a method to use to handle Unicode
conversion errors. Behaves like the ``errors`` keyword argument to
the standard library's ``string.decode()`` functions. This flag
requires that ``convert_unicode`` is set to ``force`` - otherwise,
SQLAlchemy is not guaranteed to handle the task of unicode
conversion. Note that this flag adds significant performance
overhead to row-fetching operations for backends that already
return unicode objects natively (which most DBAPIs do). This
flag should only be used as a last resort for reading
strings from a column with varied or corrupted encodings.
"""
if unicode_error is not None and convert_unicode != 'force':
raise exc.ArgumentError("convert_unicode must be 'force' "
"when unicode_error is set.")
self.length = length
self.collation = collation
self.convert_unicode = convert_unicode
self.unicode_error = unicode_error
self._warn_on_bytestring = _warn_on_bytestring
def literal_processor(self, dialect):
def process(value):
value = value.replace("'", "''")
return "'%s'" % value
return process
def bind_processor(self, dialect):
if self.convert_unicode or dialect.convert_unicode:
if dialect.supports_unicode_binds and \
self.convert_unicode != 'force':
if self._warn_on_bytestring:
def process(value):
if isinstance(value, util.binary_type):
util.warn("Unicode type received non-unicode bind "
"param value.")
return value
return process
else:
return None
else:
encoder = codecs.getencoder(dialect.encoding)
warn_on_bytestring = self._warn_on_bytestring
def process(value):
if isinstance(value, util.text_type):
return encoder(value, self.unicode_error)[0]
elif warn_on_bytestring and value is not None:
util.warn("Unicode type received non-unicode bind "
"param value")
return value
return process
else:
return None
def result_processor(self, dialect, coltype):
wants_unicode = self.convert_unicode or dialect.convert_unicode
needs_convert = wants_unicode and \
(dialect.returns_unicode_strings is not True or
self.convert_unicode == 'force')
if needs_convert:
to_unicode = processors.to_unicode_processor_factory(
dialect.encoding, self.unicode_error)
if dialect.returns_unicode_strings:
# we wouldn't be here unless convert_unicode='force'
# was specified, or the driver has erratic unicode-returning
# habits. since we will be getting back unicode
# in most cases, we check for it (decode will fail).
def process(value):
if isinstance(value, util.text_type):
return value
else:
return to_unicode(value)
return process
else:
# here, we assume that the object is not unicode,
# avoiding expensive isinstance() check.
return to_unicode
else:
return None
@property
def python_type(self):
if self.convert_unicode:
return util.text_type
else:
return str
def get_dbapi_type(self, dbapi):
return dbapi.STRING
class Text(String):
"""A variably sized string type.
In SQL, usually corresponds to CLOB or TEXT. Can also take Python
unicode objects and encode to the database's encoding in bind
params (and the reverse for result sets.) In general, TEXT objects
do not have a length; while some databases will accept a length
argument here, it will be rejected by others.
"""
__visit_name__ = 'text'
class Unicode(String):
"""A variable length Unicode string type.
The :class:`.Unicode` type is a :class:`.String` subclass
that assumes input and output as Python ``unicode`` data,
and in that regard is equivalent to the usage of the
``convert_unicode`` flag with the :class:`.String` type.
However, unlike plain :class:`.String`, it also implies an
underlying column type that is explicitly supporting of non-ASCII
data, such as ``NVARCHAR`` on Oracle and SQL Server.
This can impact the output of ``CREATE TABLE`` statements
and ``CAST`` functions at the dialect level, and can
also affect the handling of bound parameters in some
specific DBAPI scenarios.
The encoding used by the :class:`.Unicode` type is usually
determined by the DBAPI itself; most modern DBAPIs
feature support for Python ``unicode`` objects as bound
values and result set values, and the encoding should
be configured as detailed in the notes for the target
DBAPI in the :ref:`dialect_toplevel` section.
For those DBAPIs which do not support, or are not configured
to accommodate Python ``unicode`` objects
directly, SQLAlchemy does the encoding and decoding
outside of the DBAPI. The encoding in this scenario
is determined by the ``encoding`` flag passed to
:func:`.create_engine`.
When using the :class:`.Unicode` type, it is only appropriate
to pass Python ``unicode`` objects, and not plain ``str``.
If a plain ``str`` is passed under Python 2, a warning
is emitted. If you notice your application emitting these warnings but
you're not sure of the source of them, the Python
``warnings`` filter, documented at
http://docs.python.org/library/warnings.html,
can be used to turn these warnings into exceptions
which will illustrate a stack trace::
import warnings
warnings.simplefilter('error')
For an application that wishes to pass plain bytestrings
and Python ``unicode`` objects to the ``Unicode`` type
equally, the bytestrings must first be decoded into
unicode. The recipe at :ref:`coerce_to_unicode` illustrates
how this is done.
See also:
:class:`.UnicodeText` - unlengthed textual counterpart
to :class:`.Unicode`.
"""
__visit_name__ = 'unicode'
def __init__(self, length=None, **kwargs):
"""
Create a :class:`.Unicode` object.
Parameters are the same as that of :class:`.String`,
with the exception that ``convert_unicode``
defaults to ``True``.
"""
kwargs.setdefault('convert_unicode', True)
kwargs.setdefault('_warn_on_bytestring', True)
super(Unicode, self).__init__(length=length, **kwargs)
class UnicodeText(Text):
"""An unbounded-length Unicode string type.
See :class:`.Unicode` for details on the unicode
behavior of this object.
Like :class:`.Unicode`, usage the :class:`.UnicodeText` type implies a
unicode-capable type being used on the backend, such as
``NCLOB``, ``NTEXT``.
"""
__visit_name__ = 'unicode_text'
def __init__(self, length=None, **kwargs):
"""
Create a Unicode-converting Text type.
Parameters are the same as that of :class:`.Text`,
with the exception that ``convert_unicode``
defaults to ``True``.
"""
kwargs.setdefault('convert_unicode', True)
kwargs.setdefault('_warn_on_bytestring', True)
super(UnicodeText, self).__init__(length=length, **kwargs)
class Integer(_DateAffinity, TypeEngine):
"""A type for ``int`` integers."""
__visit_name__ = 'integer'
def get_dbapi_type(self, dbapi):
return dbapi.NUMBER
@property
def python_type(self):
return int
def literal_processor(self, dialect):
def process(value):
return str(value)
return process
@util.memoized_property
def _expression_adaptations(self):
# TODO: need a dictionary object that will
# handle operators generically here, this is incomplete
return {
operators.add: {
Date: Date,
Integer: self.__class__,
Numeric: Numeric,
},
operators.mul: {
Interval: Interval,
Integer: self.__class__,
Numeric: Numeric,
},
operators.div: {
Integer: self.__class__,
Numeric: Numeric,
},
operators.truediv: {
Integer: self.__class__,
Numeric: Numeric,
},
operators.sub: {
Integer: self.__class__,
Numeric: Numeric,
},
}
class SmallInteger(Integer):
"""A type for smaller ``int`` integers.
Typically generates a ``SMALLINT`` in DDL, and otherwise acts like
a normal :class:`.Integer` on the Python side.
"""
__visit_name__ = 'small_integer'
class BigInteger(Integer):
"""A type for bigger ``int`` integers.
Typically generates a ``BIGINT`` in DDL, and otherwise acts like
a normal :class:`.Integer` on the Python side.
"""
__visit_name__ = 'big_integer'
class Numeric(_DateAffinity, TypeEngine):
"""A type for fixed precision numbers.
Typically generates DECIMAL or NUMERIC. Returns
``decimal.Decimal`` objects by default, applying
conversion as needed.
.. note::
The `cdecimal <http://pypi.python.org/pypi/cdecimal/>`_ library
is a high performing alternative to Python's built-in
``decimal.Decimal`` type, which performs very poorly in high volume
situations. SQLAlchemy 0.7 is tested against ``cdecimal`` and supports
it fully. The type is not necessarily supported by DBAPI
implementations however, most of which contain an import for plain
``decimal`` in their source code, even though some such as psycopg2
provide hooks for alternate adapters. SQLAlchemy imports ``decimal``
globally as well. The most straightforward and
foolproof way to use "cdecimal" given current DBAPI and Python support
is to patch it directly into sys.modules before anything else is
imported::
import sys
import cdecimal
sys.modules["decimal"] = cdecimal
While the global patch is a little ugly, it's particularly
important to use just one decimal library at a time since
Python Decimal and cdecimal Decimal objects
are not currently compatible *with each other*::
>>> import cdecimal
>>> import decimal
>>> decimal.Decimal("10") == cdecimal.Decimal("10")
False
SQLAlchemy will provide more natural support of
cdecimal if and when it becomes a standard part of Python
installations and is supported by all DBAPIs.
"""
__visit_name__ = 'numeric'
_default_decimal_return_scale = 10
def __init__(self, precision=None, scale=None,
decimal_return_scale=None, asdecimal=True):
"""
Construct a Numeric.
:param precision: the numeric precision for use in DDL ``CREATE
TABLE``.
:param scale: the numeric scale for use in DDL ``CREATE TABLE``.
:param asdecimal: default True. Return whether or not
values should be sent as Python Decimal objects, or
as floats. Different DBAPIs send one or the other based on
datatypes - the Numeric type will ensure that return values
are one or the other across DBAPIs consistently.
:param decimal_return_scale: Default scale to use when converting
from floats to Python decimals. Floating point values will typically
be much longer due to decimal inaccuracy, and most floating point
database types don't have a notion of "scale", so by default the
float type looks for the first ten decimal places when converting.
Specfiying this value will override that length. Types which
do include an explicit ".scale" value, such as the base :class:`.Numeric`
as well as the MySQL float types, will use the value of ".scale"
as the default for decimal_return_scale, if not otherwise specified.
.. versionadded:: 0.9.0
When using the ``Numeric`` type, care should be taken to ensure
that the asdecimal setting is apppropriate for the DBAPI in use -
when Numeric applies a conversion from Decimal->float or float->
Decimal, this conversion incurs an additional performance overhead
for all result columns received.
DBAPIs that return Decimal natively (e.g. psycopg2) will have
better accuracy and higher performance with a setting of ``True``,
as the native translation to Decimal reduces the amount of floating-
point issues at play, and the Numeric type itself doesn't need
to apply any further conversions. However, another DBAPI which
returns floats natively *will* incur an additional conversion
overhead, and is still subject to floating point data loss - in
which case ``asdecimal=False`` will at least remove the extra
conversion overhead.
"""
self.precision = precision
self.scale = scale
self.decimal_return_scale = decimal_return_scale
self.asdecimal = asdecimal
@property
def _effective_decimal_return_scale(self):
if self.decimal_return_scale is not None:
return self.decimal_return_scale
elif getattr(self, "scale", None) is not None:
return self.scale
else:
return self._default_decimal_return_scale
def get_dbapi_type(self, dbapi):
return dbapi.NUMBER
def literal_processor(self, dialect):
def process(value):
return str(value)
return process
@property
def python_type(self):
if self.asdecimal:
return decimal.Decimal
else:
return float
def bind_processor(self, dialect):
if dialect.supports_native_decimal:
return None
else:
return processors.to_float
def result_processor(self, dialect, coltype):
if self.asdecimal:
if dialect.supports_native_decimal:
# we're a "numeric", DBAPI will give us Decimal directly
return None
else:
util.warn('Dialect %s+%s does *not* support Decimal '
'objects natively, and SQLAlchemy must '
'convert from floating point - rounding '
'errors and other issues may occur. Please '
'consider storing Decimal numbers as strings '
'or integers on this platform for lossless '
'storage.' % (dialect.name, dialect.driver))
# we're a "numeric", DBAPI returns floats, convert.
return processors.to_decimal_processor_factory(
decimal.Decimal,
self.scale if self.scale is not None
else self._default_decimal_return_scale)
else:
if dialect.supports_native_decimal:
return processors.to_float
else:
return None
@util.memoized_property
def _expression_adaptations(self):
return {
operators.mul: {
Interval: Interval,
Numeric: self.__class__,
Integer: self.__class__,
},
operators.div: {
Numeric: self.__class__,
Integer: self.__class__,
},
operators.truediv: {
Numeric: self.__class__,
Integer: self.__class__,
},
operators.add: {
Numeric: self.__class__,
Integer: self.__class__,
},
operators.sub: {
Numeric: self.__class__,
Integer: self.__class__,
}
}
class Float(Numeric):
"""A type for ``float`` numbers.
Returns Python ``float`` objects by default, applying
conversion as needed.
"""
__visit_name__ = 'float'
scale = None
def __init__(self, precision=None, asdecimal=False,
decimal_return_scale=None, **kwargs):
"""
Construct a Float.
:param precision: the numeric precision for use in DDL ``CREATE
TABLE``.
:param asdecimal: the same flag as that of :class:`.Numeric`, but
defaults to ``False``. Note that setting this flag to ``True``
results in floating point conversion.
:param decimal_return_scale: Default scale to use when converting
from floats to Python decimals. Floating point values will typically
be much longer due to decimal inaccuracy, and most floating point
database types don't have a notion of "scale", so by default the
float type looks for the first ten decimal places when converting.
Specfiying this value will override that length. Note that the
MySQL float types, which do include "scale", will use "scale"
as the default for decimal_return_scale, if not otherwise specified.
.. versionadded:: 0.9.0
:param \**kwargs: deprecated. Additional arguments here are ignored
by the default :class:`.Float` type. For database specific
floats that support additional arguments, see that dialect's
documentation for details, such as
:class:`sqlalchemy.dialects.mysql.FLOAT`.
"""
self.precision = precision
self.asdecimal = asdecimal
self.decimal_return_scale = decimal_return_scale
if kwargs:
util.warn_deprecated("Additional keyword arguments "
"passed to Float ignored.")
def result_processor(self, dialect, coltype):
if self.asdecimal:
return processors.to_decimal_processor_factory(
decimal.Decimal,
self._effective_decimal_return_scale)
else:
return None
@util.memoized_property
def _expression_adaptations(self):
return {
operators.mul: {
Interval: Interval,
Numeric: self.__class__,
},
operators.div: {
Numeric: self.__class__,
},
operators.truediv: {
Numeric: self.__class__,
},
operators.add: {
Numeric: self.__class__,
},
operators.sub: {
Numeric: self.__class__,
}
}
class DateTime(_DateAffinity, TypeEngine):
"""A type for ``datetime.datetime()`` objects.
Date and time types return objects from the Python ``datetime``
module. Most DBAPIs have built in support for the datetime
module, with the noted exception of SQLite. In the case of
SQLite, date and time types are stored as strings which are then
converted back to datetime objects when rows are returned.
"""
__visit_name__ = 'datetime'
def __init__(self, timezone=False):
"""Construct a new :class:`.DateTime`.
:param timezone: boolean. If True, and supported by the
backend, will produce 'TIMESTAMP WITH TIMEZONE'. For backends
that don't support timezone aware timestamps, has no
effect.
"""
self.timezone = timezone
def get_dbapi_type(self, dbapi):
return dbapi.DATETIME
@property
def python_type(self):
return dt.datetime
@util.memoized_property
def _expression_adaptations(self):
return {
operators.add: {
Interval: self.__class__,
},
operators.sub: {
Interval: self.__class__,
DateTime: Interval,
},
}
class Date(_DateAffinity, TypeEngine):
"""A type for ``datetime.date()`` objects."""
__visit_name__ = 'date'
def get_dbapi_type(self, dbapi):
return dbapi.DATETIME
@property
def python_type(self):
return dt.date
@util.memoized_property
def _expression_adaptations(self):
return {
operators.add: {
Integer: self.__class__,
Interval: DateTime,
Time: DateTime,
},
operators.sub: {
# date - integer = date
Integer: self.__class__,
# date - date = integer.
Date: Integer,
Interval: DateTime,
# date - datetime = interval,
# this one is not in the PG docs
# but works
DateTime: Interval,
},
}
class Time(_DateAffinity, TypeEngine):
"""A type for ``datetime.time()`` objects."""
__visit_name__ = 'time'
def __init__(self, timezone=False):
self.timezone = timezone
def get_dbapi_type(self, dbapi):
return dbapi.DATETIME
@property
def python_type(self):
return dt.time
@util.memoized_property
def _expression_adaptations(self):
return {
operators.add: {
Date: DateTime,
Interval: self.__class__
},
operators.sub: {
Time: Interval,
Interval: self.__class__,
},
}
class _Binary(TypeEngine):
"""Define base behavior for binary types."""
def __init__(self, length=None):
self.length = length
def literal_processor(self, dialect):
def process(value):
value = value.decode(self.dialect.encoding).replace("'", "''")
return "'%s'" % value
return process
@property
def python_type(self):
return util.binary_type
# Python 3 - sqlite3 doesn't need the `Binary` conversion
# here, though pg8000 does to indicate "bytea"
def bind_processor(self, dialect):
DBAPIBinary = dialect.dbapi.Binary
def process(value):
if value is not None:
return DBAPIBinary(value)
else:
return None
return process
# Python 3 has native bytes() type
# both sqlite3 and pg8000 seem to return it,
# psycopg2 as of 2.5 returns 'memoryview'
if util.py2k:
def result_processor(self, dialect, coltype):
if util.jython:
def process(value):
if value is not None:
if isinstance(value, array.array):
return value.tostring()
return str(value)
else:
return None
else:
process = processors.to_str
return process
else:
def result_processor(self, dialect, coltype):
def process(value):
if value is not None:
value = bytes(value)
return value
return process
def coerce_compared_value(self, op, value):
"""See :meth:`.TypeEngine.coerce_compared_value` for a description."""
if isinstance(value, util.string_types):
return self
else:
return super(_Binary, self).coerce_compared_value(op, value)
def get_dbapi_type(self, dbapi):
return dbapi.BINARY
class LargeBinary(_Binary):
"""A type for large binary byte data.
The Binary type generates BLOB or BYTEA when tables are created,
and also converts incoming values using the ``Binary`` callable
provided by each DB-API.
"""
__visit_name__ = 'large_binary'
def __init__(self, length=None):
"""
Construct a LargeBinary type.
:param length: optional, a length for the column for use in
DDL statements, for those BLOB types that accept a length
(i.e. MySQL). It does *not* produce a small BINARY/VARBINARY
type - use the BINARY/VARBINARY types specifically for those.
May be safely omitted if no ``CREATE
TABLE`` will be issued. Certain databases may require a
*length* for use in DDL, and will raise an exception when
the ``CREATE TABLE`` DDL is issued.
"""
_Binary.__init__(self, length=length)
class Binary(LargeBinary):
"""Deprecated. Renamed to LargeBinary."""
def __init__(self, *arg, **kw):
util.warn_deprecated('The Binary type has been renamed to '
'LargeBinary.')
LargeBinary.__init__(self, *arg, **kw)
class SchemaType(SchemaEventTarget):
"""Mark a type as possibly requiring schema-level DDL for usage.
Supports types that must be explicitly created/dropped (i.e. PG ENUM type)
as well as types that are complimented by table or schema level
constraints, triggers, and other rules.
:class:`.SchemaType` classes can also be targets for the
:meth:`.DDLEvents.before_parent_attach` and
:meth:`.DDLEvents.after_parent_attach` events, where the events fire off
surrounding the association of the type object with a parent
:class:`.Column`.
.. seealso::
:class:`.Enum`
:class:`.Boolean`
"""
def __init__(self, **kw):
name = kw.pop('name', None)
if name is not None:
self.name = quoted_name(name, kw.pop('quote', None))
else:
self.name = None
self.schema = kw.pop('schema', None)
self.metadata = kw.pop('metadata', None)
self.inherit_schema = kw.pop('inherit_schema', False)
if self.metadata:
event.listen(
self.metadata,
"before_create",
util.portable_instancemethod(self._on_metadata_create)
)
event.listen(
self.metadata,
"after_drop",
util.portable_instancemethod(self._on_metadata_drop)
)
def _set_parent(self, column):
column._on_table_attach(util.portable_instancemethod(self._set_table))
def _set_table(self, column, table):
if self.inherit_schema:
self.schema = table.schema
event.listen(
table,
"before_create",
util.portable_instancemethod(
self._on_table_create)
)
event.listen(
table,
"after_drop",
util.portable_instancemethod(self._on_table_drop)
)
if self.metadata is None:
# TODO: what's the difference between self.metadata
# and table.metadata here ?
event.listen(
table.metadata,
"before_create",
util.portable_instancemethod(self._on_metadata_create)
)
event.listen(
table.metadata,
"after_drop",
util.portable_instancemethod(self._on_metadata_drop)
)
def copy(self, **kw):
return self.adapt(self.__class__)
def adapt(self, impltype, **kw):
schema = kw.pop('schema', self.schema)
metadata = kw.pop('metadata', self.metadata)
return impltype(name=self.name,
schema=schema,
metadata=metadata,
inherit_schema=self.inherit_schema,
**kw
)
@property
def bind(self):
return self.metadata and self.metadata.bind or None
def create(self, bind=None, checkfirst=False):
"""Issue CREATE ddl for this type, if applicable."""
if bind is None:
bind = _bind_or_error(self)
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t.create(bind=bind, checkfirst=checkfirst)
def drop(self, bind=None, checkfirst=False):
"""Issue DROP ddl for this type, if applicable."""
if bind is None:
bind = _bind_or_error(self)
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t.drop(bind=bind, checkfirst=checkfirst)
def _on_table_create(self, target, bind, **kw):
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t._on_table_create(target, bind, **kw)
def _on_table_drop(self, target, bind, **kw):
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t._on_table_drop(target, bind, **kw)
def _on_metadata_create(self, target, bind, **kw):
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t._on_metadata_create(target, bind, **kw)
def _on_metadata_drop(self, target, bind, **kw):
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t._on_metadata_drop(target, bind, **kw)
class Enum(String, SchemaType):
"""Generic Enum Type.
The Enum type provides a set of possible string values which the
column is constrained towards.
By default, uses the backend's native ENUM type if available,
else uses VARCHAR + a CHECK constraint.
.. seealso::
:class:`~.postgresql.ENUM` - PostgreSQL-specific type,
which has additional functionality.
"""
__visit_name__ = 'enum'
def __init__(self, *enums, **kw):
"""Construct an enum.
Keyword arguments which don't apply to a specific backend are ignored
by that backend.
:param \*enums: string or unicode enumeration labels. If unicode
labels are present, the `convert_unicode` flag is auto-enabled.
:param convert_unicode: Enable unicode-aware bind parameter and
result-set processing for this Enum's data. This is set
automatically based on the presence of unicode label strings.
:param metadata: Associate this type directly with a ``MetaData``
object. For types that exist on the target database as an
independent schema construct (Postgresql), this type will be
created and dropped within ``create_all()`` and ``drop_all()``
operations. If the type is not associated with any ``MetaData``
object, it will associate itself with each ``Table`` in which it is
used, and will be created when any of those individual tables are
created, after a check is performed for it's existence. The type is
only dropped when ``drop_all()`` is called for that ``Table``
object's metadata, however.
:param name: The name of this type. This is required for Postgresql
and any future supported database which requires an explicitly
named type, or an explicitly named constraint in order to generate
the type and/or a table that uses it.
:param native_enum: Use the database's native ENUM type when
available. Defaults to True. When False, uses VARCHAR + check
constraint for all backends.
:param schema: Schema name of this type. For types that exist on the
target database as an independent schema construct (Postgresql),
this parameter specifies the named schema in which the type is
present.
.. note::
The ``schema`` of the :class:`.Enum` type does not
by default make use of the ``schema`` established on the
owning :class:`.Table`. If this behavior is desired,
set the ``inherit_schema`` flag to ``True``.
:param quote: Set explicit quoting preferences for the type's name.
:param inherit_schema: When ``True``, the "schema" from the owning
:class:`.Table` will be copied to the "schema" attribute of this
:class:`.Enum`, replacing whatever value was passed for the
``schema`` attribute. This also takes effect when using the
:meth:`.Table.tometadata` operation.
.. versionadded:: 0.8
"""
self.enums = enums
self.native_enum = kw.pop('native_enum', True)
convert_unicode = kw.pop('convert_unicode', None)
if convert_unicode is None:
for e in enums:
if isinstance(e, util.text_type):
convert_unicode = True
break
else:
convert_unicode = False
if self.enums:
length = max(len(x) for x in self.enums)
else:
length = 0
String.__init__(self,
length=length,
convert_unicode=convert_unicode,
)
SchemaType.__init__(self, **kw)
def __repr__(self):
return util.generic_repr(self, [
("native_enum", True),
("name", None)
])
def _should_create_constraint(self, compiler):
return not self.native_enum or \
not compiler.dialect.supports_native_enum
@util.dependencies("sqlalchemy.sql.schema")
def _set_table(self, schema, column, table):
if self.native_enum:
SchemaType._set_table(self, column, table)
e = schema.CheckConstraint(
type_coerce(column, self).in_(self.enums),
name=self.name,
_create_rule=util.portable_instancemethod(
self._should_create_constraint)
)
table.append_constraint(e)
def adapt(self, impltype, **kw):
schema = kw.pop('schema', self.schema)
metadata = kw.pop('metadata', self.metadata)
if issubclass(impltype, Enum):
return impltype(name=self.name,
schema=schema,
metadata=metadata,
convert_unicode=self.convert_unicode,
native_enum=self.native_enum,
inherit_schema=self.inherit_schema,
*self.enums,
**kw
)
else:
return super(Enum, self).adapt(impltype, **kw)
class PickleType(TypeDecorator):
"""Holds Python objects, which are serialized using pickle.
PickleType builds upon the Binary type to apply Python's
``pickle.dumps()`` to incoming objects, and ``pickle.loads()`` on
the way out, allowing any pickleable Python object to be stored as
a serialized binary field.
To allow ORM change events to propagate for elements associated
with :class:`.PickleType`, see :ref:`mutable_toplevel`.
"""
impl = LargeBinary
def __init__(self, protocol=pickle.HIGHEST_PROTOCOL,
pickler=None, comparator=None):
"""
Construct a PickleType.
:param protocol: defaults to ``pickle.HIGHEST_PROTOCOL``.
:param pickler: defaults to cPickle.pickle or pickle.pickle if
cPickle is not available. May be any object with
pickle-compatible ``dumps` and ``loads`` methods.
:param comparator: a 2-arg callable predicate used
to compare values of this type. If left as ``None``,
the Python "equals" operator is used to compare values.
"""
self.protocol = protocol
self.pickler = pickler or pickle
self.comparator = comparator
super(PickleType, self).__init__()
def __reduce__(self):
return PickleType, (self.protocol,
None,
self.comparator)
def bind_processor(self, dialect):
impl_processor = self.impl.bind_processor(dialect)
dumps = self.pickler.dumps
protocol = self.protocol
if impl_processor:
def process(value):
if value is not None:
value = dumps(value, protocol)
return impl_processor(value)
else:
def process(value):
if value is not None:
value = dumps(value, protocol)
return value
return process
def result_processor(self, dialect, coltype):
impl_processor = self.impl.result_processor(dialect, coltype)
loads = self.pickler.loads
if impl_processor:
def process(value):
value = impl_processor(value)
if value is None:
return None
return loads(value)
else:
def process(value):
if value is None:
return None
return loads(value)
return process
def compare_values(self, x, y):
if self.comparator:
return self.comparator(x, y)
else:
return x == y
class Boolean(TypeEngine, SchemaType):
"""A bool datatype.
Boolean typically uses BOOLEAN or SMALLINT on the DDL side, and on
the Python side deals in ``True`` or ``False``.
"""
__visit_name__ = 'boolean'
def __init__(self, create_constraint=True, name=None):
"""Construct a Boolean.
:param create_constraint: defaults to True. If the boolean
is generated as an int/smallint, also create a CHECK constraint
on the table that ensures 1 or 0 as a value.
:param name: if a CHECK constraint is generated, specify
the name of the constraint.
"""
self.create_constraint = create_constraint
self.name = name
def _should_create_constraint(self, compiler):
return not compiler.dialect.supports_native_boolean
@util.dependencies("sqlalchemy.sql.schema")
def _set_table(self, schema, column, table):
if not self.create_constraint:
return
e = schema.CheckConstraint(
type_coerce(column, self).in_([0, 1]),
name=self.name,
_create_rule=util.portable_instancemethod(
self._should_create_constraint)
)
table.append_constraint(e)
@property
def python_type(self):
return bool
def bind_processor(self, dialect):
if dialect.supports_native_boolean:
return None
else:
return processors.boolean_to_int
def result_processor(self, dialect, coltype):
if dialect.supports_native_boolean:
return None
else:
return processors.int_to_boolean
class Interval(_DateAffinity, TypeDecorator):
"""A type for ``datetime.timedelta()`` objects.
The Interval type deals with ``datetime.timedelta`` objects. In
PostgreSQL, the native ``INTERVAL`` type is used; for others, the
value is stored as a date which is relative to the "epoch"
(Jan. 1, 1970).
Note that the ``Interval`` type does not currently provide date arithmetic
operations on platforms which do not support interval types natively. Such
operations usually require transformation of both sides of the expression
(such as, conversion of both sides into integer epoch values first) which
currently is a manual procedure (such as via
:attr:`~sqlalchemy.sql.expression.func`).
"""
impl = DateTime
epoch = dt.datetime.utcfromtimestamp(0)
def __init__(self, native=True,
second_precision=None,
day_precision=None):
"""Construct an Interval object.
:param native: when True, use the actual
INTERVAL type provided by the database, if
supported (currently Postgresql, Oracle).
Otherwise, represent the interval data as
an epoch value regardless.
:param second_precision: For native interval types
which support a "fractional seconds precision" parameter,
i.e. Oracle and Postgresql
:param day_precision: for native interval types which
support a "day precision" parameter, i.e. Oracle.
"""
super(Interval, self).__init__()
self.native = native
self.second_precision = second_precision
self.day_precision = day_precision
def adapt(self, cls, **kw):
if self.native and hasattr(cls, '_adapt_from_generic_interval'):
return cls._adapt_from_generic_interval(self, **kw)
else:
return self.__class__(
native=self.native,
second_precision=self.second_precision,
day_precision=self.day_precision,
**kw)
@property
def python_type(self):
return dt.timedelta
def bind_processor(self, dialect):
impl_processor = self.impl.bind_processor(dialect)
epoch = self.epoch
if impl_processor:
def process(value):
if value is not None:
value = epoch + value
return impl_processor(value)
else:
def process(value):
if value is not None:
value = epoch + value
return value
return process
def result_processor(self, dialect, coltype):
impl_processor = self.impl.result_processor(dialect, coltype)
epoch = self.epoch
if impl_processor:
def process(value):
value = impl_processor(value)
if value is None:
return None
return value - epoch
else:
def process(value):
if value is None:
return None
return value - epoch
return process
@util.memoized_property
def _expression_adaptations(self):
return {
operators.add: {
Date: DateTime,
Interval: self.__class__,
DateTime: DateTime,
Time: Time,
},
operators.sub: {
Interval: self.__class__
},
operators.mul: {
Numeric: self.__class__
},
operators.truediv: {
Numeric: self.__class__
},
operators.div: {
Numeric: self.__class__
}
}
@property
def _type_affinity(self):
return Interval
def coerce_compared_value(self, op, value):
"""See :meth:`.TypeEngine.coerce_compared_value` for a description."""
return self.impl.coerce_compared_value(op, value)
class REAL(Float):
"""The SQL REAL type."""
__visit_name__ = 'REAL'
class FLOAT(Float):
"""The SQL FLOAT type."""
__visit_name__ = 'FLOAT'
class NUMERIC(Numeric):
"""The SQL NUMERIC type."""
__visit_name__ = 'NUMERIC'
class DECIMAL(Numeric):
"""The SQL DECIMAL type."""
__visit_name__ = 'DECIMAL'
class INTEGER(Integer):
"""The SQL INT or INTEGER type."""
__visit_name__ = 'INTEGER'
INT = INTEGER
class SMALLINT(SmallInteger):
"""The SQL SMALLINT type."""
__visit_name__ = 'SMALLINT'
class BIGINT(BigInteger):
"""The SQL BIGINT type."""
__visit_name__ = 'BIGINT'
class TIMESTAMP(DateTime):
"""The SQL TIMESTAMP type."""
__visit_name__ = 'TIMESTAMP'
def get_dbapi_type(self, dbapi):
return dbapi.TIMESTAMP
class DATETIME(DateTime):
"""The SQL DATETIME type."""
__visit_name__ = 'DATETIME'
class DATE(Date):
"""The SQL DATE type."""
__visit_name__ = 'DATE'
class TIME(Time):
"""The SQL TIME type."""
__visit_name__ = 'TIME'
class TEXT(Text):
"""The SQL TEXT type."""
__visit_name__ = 'TEXT'
class CLOB(Text):
"""The CLOB type.
This type is found in Oracle and Informix.
"""
__visit_name__ = 'CLOB'
class VARCHAR(String):
"""The SQL VARCHAR type."""
__visit_name__ = 'VARCHAR'
class NVARCHAR(Unicode):
"""The SQL NVARCHAR type."""
__visit_name__ = 'NVARCHAR'
class CHAR(String):
"""The SQL CHAR type."""
__visit_name__ = 'CHAR'
class NCHAR(Unicode):
"""The SQL NCHAR type."""
__visit_name__ = 'NCHAR'
class BLOB(LargeBinary):
"""The SQL BLOB type."""
__visit_name__ = 'BLOB'
class BINARY(_Binary):
"""The SQL BINARY type."""
__visit_name__ = 'BINARY'
class VARBINARY(_Binary):
"""The SQL VARBINARY type."""
__visit_name__ = 'VARBINARY'
class BOOLEAN(Boolean):
"""The SQL BOOLEAN type."""
__visit_name__ = 'BOOLEAN'
class NullType(TypeEngine):
"""An unknown type.
:class:`.NullType` is used as a default type for those cases where
a type cannot be determined, including:
* During table reflection, when the type of a column is not recognized
by the :class:`.Dialect`
* When constructing SQL expressions using plain Python objects of
unknown types (e.g. ``somecolumn == my_special_object``)
* When a new :class:`.Column` is created, and the given type is passed
as ``None`` or is not passed at all.
The :class:`.NullType` can be used within SQL expression invocation
without issue, it just has no behavior either at the expression construction
level or at the bind-parameter/result processing level. :class:`.NullType`
will result in a :exc:`.CompileError` if the compiler is asked to render
the type itself, such as if it is used in a :func:`.cast` operation
or within a schema creation operation such as that invoked by
:meth:`.MetaData.create_all` or the :class:`.CreateTable` construct.
"""
__visit_name__ = 'null'
_isnull = True
def literal_processor(self, dialect):
def process(value):
return "NULL"
return process
class Comparator(TypeEngine.Comparator):
def _adapt_expression(self, op, other_comparator):
if isinstance(other_comparator, NullType.Comparator) or \
not operators.is_commutative(op):
return op, self.expr.type
else:
return other_comparator._adapt_expression(op, self)
comparator_factory = Comparator
NULLTYPE = NullType()
BOOLEANTYPE = Boolean()
STRINGTYPE = String()
INTEGERTYPE = Integer()
_type_map = {
int: Integer(),
float: Numeric(),
bool: BOOLEANTYPE,
decimal.Decimal: Numeric(),
dt.date: Date(),
dt.datetime: DateTime(),
dt.time: Time(),
dt.timedelta: Interval(),
util.NoneType: NULLTYPE
}
if util.py3k:
_type_map[bytes] = LargeBinary()
_type_map[str] = Unicode()
else:
_type_map[unicode] = Unicode()
_type_map[str] = String()
# back-assign to type_api
from . import type_api
type_api.BOOLEANTYPE = BOOLEANTYPE
type_api.STRINGTYPE = STRINGTYPE
type_api.INTEGERTYPE = INTEGERTYPE
type_api.NULLTYPE = NULLTYPE
type_api._type_map = _type_map
# this one, there's all kinds of ways to play it, but at the EOD
# there's just a giant dependency cycle between the typing system and
# the expression element system, as you might expect. We can use
# importlaters or whatnot, but the typing system just necessarily has
# to have some kind of connection like this. right now we're injecting the
# _DefaultColumnComparator implementation into the TypeEngine.Comparator interface.
# Alternatively TypeEngine.Comparator could have an "impl" injected, though
# just injecting the base is simpler, error free, and more performant.
class Comparator(_DefaultColumnComparator):
BOOLEANTYPE = BOOLEANTYPE
TypeEngine.Comparator.__bases__ = (Comparator, ) + TypeEngine.Comparator.__bases__
| 33.115692
| 93
| 0.607474
|
import datetime as dt
import codecs
from .type_api import TypeEngine, TypeDecorator, to_instance
from .elements import quoted_name, type_coerce
from .default_comparator import _DefaultColumnComparator
from .. import exc, util, processors
from .base import _bind_or_error, SchemaEventTarget
from . import operators
from .. import event
from ..util import pickle
import decimal
if util.jython:
import array
class _DateAffinity(object):
@property
def _expression_adaptations(self):
raise NotImplementedError()
class Comparator(TypeEngine.Comparator):
_blank_dict = util.immutabledict()
def _adapt_expression(self, op, other_comparator):
othertype = other_comparator.type._type_affinity
return op, \
to_instance(self.type._expression_adaptations.get(op, self._blank_dict).\
get(othertype, NULLTYPE))
comparator_factory = Comparator
class Concatenable(object):
class Comparator(TypeEngine.Comparator):
def _adapt_expression(self, op, other_comparator):
if op is operators.add and isinstance(other_comparator,
(Concatenable.Comparator, NullType.Comparator)):
return operators.concat_op, self.expr.type
else:
return op, self.expr.type
comparator_factory = Comparator
class String(Concatenable, TypeEngine):
__visit_name__ = 'string'
def __init__(self, length=None, collation=None,
convert_unicode=False,
unicode_error=None,
_warn_on_bytestring=False
):
if unicode_error is not None and convert_unicode != 'force':
raise exc.ArgumentError("convert_unicode must be 'force' "
"when unicode_error is set.")
self.length = length
self.collation = collation
self.convert_unicode = convert_unicode
self.unicode_error = unicode_error
self._warn_on_bytestring = _warn_on_bytestring
def literal_processor(self, dialect):
def process(value):
value = value.replace("'", "''")
return "'%s'" % value
return process
def bind_processor(self, dialect):
if self.convert_unicode or dialect.convert_unicode:
if dialect.supports_unicode_binds and \
self.convert_unicode != 'force':
if self._warn_on_bytestring:
def process(value):
if isinstance(value, util.binary_type):
util.warn("Unicode type received non-unicode bind "
"param value.")
return value
return process
else:
return None
else:
encoder = codecs.getencoder(dialect.encoding)
warn_on_bytestring = self._warn_on_bytestring
def process(value):
if isinstance(value, util.text_type):
return encoder(value, self.unicode_error)[0]
elif warn_on_bytestring and value is not None:
util.warn("Unicode type received non-unicode bind "
"param value")
return value
return process
else:
return None
def result_processor(self, dialect, coltype):
wants_unicode = self.convert_unicode or dialect.convert_unicode
needs_convert = wants_unicode and \
(dialect.returns_unicode_strings is not True or
self.convert_unicode == 'force')
if needs_convert:
to_unicode = processors.to_unicode_processor_factory(
dialect.encoding, self.unicode_error)
if dialect.returns_unicode_strings:
# we wouldn't be here unless convert_unicode='force'
def process(value):
if isinstance(value, util.text_type):
return value
else:
return to_unicode(value)
return process
else:
return to_unicode
else:
return None
@property
def python_type(self):
if self.convert_unicode:
return util.text_type
else:
return str
def get_dbapi_type(self, dbapi):
return dbapi.STRING
class Text(String):
__visit_name__ = 'text'
class Unicode(String):
__visit_name__ = 'unicode'
def __init__(self, length=None, **kwargs):
kwargs.setdefault('convert_unicode', True)
kwargs.setdefault('_warn_on_bytestring', True)
super(Unicode, self).__init__(length=length, **kwargs)
class UnicodeText(Text):
__visit_name__ = 'unicode_text'
def __init__(self, length=None, **kwargs):
kwargs.setdefault('convert_unicode', True)
kwargs.setdefault('_warn_on_bytestring', True)
super(UnicodeText, self).__init__(length=length, **kwargs)
class Integer(_DateAffinity, TypeEngine):
__visit_name__ = 'integer'
def get_dbapi_type(self, dbapi):
return dbapi.NUMBER
@property
def python_type(self):
return int
def literal_processor(self, dialect):
def process(value):
return str(value)
return process
@util.memoized_property
def _expression_adaptations(self):
return {
operators.add: {
Date: Date,
Integer: self.__class__,
Numeric: Numeric,
},
operators.mul: {
Interval: Interval,
Integer: self.__class__,
Numeric: Numeric,
},
operators.div: {
Integer: self.__class__,
Numeric: Numeric,
},
operators.truediv: {
Integer: self.__class__,
Numeric: Numeric,
},
operators.sub: {
Integer: self.__class__,
Numeric: Numeric,
},
}
class SmallInteger(Integer):
__visit_name__ = 'small_integer'
class BigInteger(Integer):
__visit_name__ = 'big_integer'
class Numeric(_DateAffinity, TypeEngine):
__visit_name__ = 'numeric'
_default_decimal_return_scale = 10
def __init__(self, precision=None, scale=None,
decimal_return_scale=None, asdecimal=True):
self.precision = precision
self.scale = scale
self.decimal_return_scale = decimal_return_scale
self.asdecimal = asdecimal
@property
def _effective_decimal_return_scale(self):
if self.decimal_return_scale is not None:
return self.decimal_return_scale
elif getattr(self, "scale", None) is not None:
return self.scale
else:
return self._default_decimal_return_scale
def get_dbapi_type(self, dbapi):
return dbapi.NUMBER
def literal_processor(self, dialect):
def process(value):
return str(value)
return process
@property
def python_type(self):
if self.asdecimal:
return decimal.Decimal
else:
return float
def bind_processor(self, dialect):
if dialect.supports_native_decimal:
return None
else:
return processors.to_float
def result_processor(self, dialect, coltype):
if self.asdecimal:
if dialect.supports_native_decimal:
return None
else:
util.warn('Dialect %s+%s does *not* support Decimal '
'objects natively, and SQLAlchemy must '
'convert from floating point - rounding '
'errors and other issues may occur. Please '
'consider storing Decimal numbers as strings '
'or integers on this platform for lossless '
'storage.' % (dialect.name, dialect.driver))
# we're a "numeric", DBAPI returns floats, convert.
return processors.to_decimal_processor_factory(
decimal.Decimal,
self.scale if self.scale is not None
else self._default_decimal_return_scale)
else:
if dialect.supports_native_decimal:
return processors.to_float
else:
return None
@util.memoized_property
def _expression_adaptations(self):
return {
operators.mul: {
Interval: Interval,
Numeric: self.__class__,
Integer: self.__class__,
},
operators.div: {
Numeric: self.__class__,
Integer: self.__class__,
},
operators.truediv: {
Numeric: self.__class__,
Integer: self.__class__,
},
operators.add: {
Numeric: self.__class__,
Integer: self.__class__,
},
operators.sub: {
Numeric: self.__class__,
Integer: self.__class__,
}
}
class Float(Numeric):
__visit_name__ = 'float'
scale = None
def __init__(self, precision=None, asdecimal=False,
decimal_return_scale=None, **kwargs):
self.precision = precision
self.asdecimal = asdecimal
self.decimal_return_scale = decimal_return_scale
if kwargs:
util.warn_deprecated("Additional keyword arguments "
"passed to Float ignored.")
def result_processor(self, dialect, coltype):
if self.asdecimal:
return processors.to_decimal_processor_factory(
decimal.Decimal,
self._effective_decimal_return_scale)
else:
return None
@util.memoized_property
def _expression_adaptations(self):
return {
operators.mul: {
Interval: Interval,
Numeric: self.__class__,
},
operators.div: {
Numeric: self.__class__,
},
operators.truediv: {
Numeric: self.__class__,
},
operators.add: {
Numeric: self.__class__,
},
operators.sub: {
Numeric: self.__class__,
}
}
class DateTime(_DateAffinity, TypeEngine):
__visit_name__ = 'datetime'
def __init__(self, timezone=False):
self.timezone = timezone
def get_dbapi_type(self, dbapi):
return dbapi.DATETIME
@property
def python_type(self):
return dt.datetime
@util.memoized_property
def _expression_adaptations(self):
return {
operators.add: {
Interval: self.__class__,
},
operators.sub: {
Interval: self.__class__,
DateTime: Interval,
},
}
class Date(_DateAffinity, TypeEngine):
__visit_name__ = 'date'
def get_dbapi_type(self, dbapi):
return dbapi.DATETIME
@property
def python_type(self):
return dt.date
@util.memoized_property
def _expression_adaptations(self):
return {
operators.add: {
Integer: self.__class__,
Interval: DateTime,
Time: DateTime,
},
operators.sub: {
Integer: self.__class__,
Date: Integer,
Interval: DateTime,
DateTime: Interval,
},
}
class Time(_DateAffinity, TypeEngine):
__visit_name__ = 'time'
def __init__(self, timezone=False):
self.timezone = timezone
def get_dbapi_type(self, dbapi):
return dbapi.DATETIME
@property
def python_type(self):
return dt.time
@util.memoized_property
def _expression_adaptations(self):
return {
operators.add: {
Date: DateTime,
Interval: self.__class__
},
operators.sub: {
Time: Interval,
Interval: self.__class__,
},
}
class _Binary(TypeEngine):
def __init__(self, length=None):
self.length = length
def literal_processor(self, dialect):
def process(value):
value = value.decode(self.dialect.encoding).replace("'", "''")
return "'%s'" % value
return process
@property
def python_type(self):
return util.binary_type
# Python 3 - sqlite3 doesn't need the `Binary` conversion
def bind_processor(self, dialect):
DBAPIBinary = dialect.dbapi.Binary
def process(value):
if value is not None:
return DBAPIBinary(value)
else:
return None
return process
if util.py2k:
def result_processor(self, dialect, coltype):
if util.jython:
def process(value):
if value is not None:
if isinstance(value, array.array):
return value.tostring()
return str(value)
else:
return None
else:
process = processors.to_str
return process
else:
def result_processor(self, dialect, coltype):
def process(value):
if value is not None:
value = bytes(value)
return value
return process
def coerce_compared_value(self, op, value):
if isinstance(value, util.string_types):
return self
else:
return super(_Binary, self).coerce_compared_value(op, value)
def get_dbapi_type(self, dbapi):
return dbapi.BINARY
class LargeBinary(_Binary):
__visit_name__ = 'large_binary'
def __init__(self, length=None):
_Binary.__init__(self, length=length)
class Binary(LargeBinary):
def __init__(self, *arg, **kw):
util.warn_deprecated('The Binary type has been renamed to '
'LargeBinary.')
LargeBinary.__init__(self, *arg, **kw)
class SchemaType(SchemaEventTarget):
def __init__(self, **kw):
name = kw.pop('name', None)
if name is not None:
self.name = quoted_name(name, kw.pop('quote', None))
else:
self.name = None
self.schema = kw.pop('schema', None)
self.metadata = kw.pop('metadata', None)
self.inherit_schema = kw.pop('inherit_schema', False)
if self.metadata:
event.listen(
self.metadata,
"before_create",
util.portable_instancemethod(self._on_metadata_create)
)
event.listen(
self.metadata,
"after_drop",
util.portable_instancemethod(self._on_metadata_drop)
)
def _set_parent(self, column):
column._on_table_attach(util.portable_instancemethod(self._set_table))
def _set_table(self, column, table):
if self.inherit_schema:
self.schema = table.schema
event.listen(
table,
"before_create",
util.portable_instancemethod(
self._on_table_create)
)
event.listen(
table,
"after_drop",
util.portable_instancemethod(self._on_table_drop)
)
if self.metadata is None:
# and table.metadata here ?
event.listen(
table.metadata,
"before_create",
util.portable_instancemethod(self._on_metadata_create)
)
event.listen(
table.metadata,
"after_drop",
util.portable_instancemethod(self._on_metadata_drop)
)
def copy(self, **kw):
return self.adapt(self.__class__)
def adapt(self, impltype, **kw):
schema = kw.pop('schema', self.schema)
metadata = kw.pop('metadata', self.metadata)
return impltype(name=self.name,
schema=schema,
metadata=metadata,
inherit_schema=self.inherit_schema,
**kw
)
@property
def bind(self):
return self.metadata and self.metadata.bind or None
def create(self, bind=None, checkfirst=False):
if bind is None:
bind = _bind_or_error(self)
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t.create(bind=bind, checkfirst=checkfirst)
def drop(self, bind=None, checkfirst=False):
if bind is None:
bind = _bind_or_error(self)
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t.drop(bind=bind, checkfirst=checkfirst)
def _on_table_create(self, target, bind, **kw):
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t._on_table_create(target, bind, **kw)
def _on_table_drop(self, target, bind, **kw):
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t._on_table_drop(target, bind, **kw)
def _on_metadata_create(self, target, bind, **kw):
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t._on_metadata_create(target, bind, **kw)
def _on_metadata_drop(self, target, bind, **kw):
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t._on_metadata_drop(target, bind, **kw)
class Enum(String, SchemaType):
__visit_name__ = 'enum'
def __init__(self, *enums, **kw):
self.enums = enums
self.native_enum = kw.pop('native_enum', True)
convert_unicode = kw.pop('convert_unicode', None)
if convert_unicode is None:
for e in enums:
if isinstance(e, util.text_type):
convert_unicode = True
break
else:
convert_unicode = False
if self.enums:
length = max(len(x) for x in self.enums)
else:
length = 0
String.__init__(self,
length=length,
convert_unicode=convert_unicode,
)
SchemaType.__init__(self, **kw)
def __repr__(self):
return util.generic_repr(self, [
("native_enum", True),
("name", None)
])
def _should_create_constraint(self, compiler):
return not self.native_enum or \
not compiler.dialect.supports_native_enum
@util.dependencies("sqlalchemy.sql.schema")
def _set_table(self, schema, column, table):
if self.native_enum:
SchemaType._set_table(self, column, table)
e = schema.CheckConstraint(
type_coerce(column, self).in_(self.enums),
name=self.name,
_create_rule=util.portable_instancemethod(
self._should_create_constraint)
)
table.append_constraint(e)
def adapt(self, impltype, **kw):
schema = kw.pop('schema', self.schema)
metadata = kw.pop('metadata', self.metadata)
if issubclass(impltype, Enum):
return impltype(name=self.name,
schema=schema,
metadata=metadata,
convert_unicode=self.convert_unicode,
native_enum=self.native_enum,
inherit_schema=self.inherit_schema,
*self.enums,
**kw
)
else:
return super(Enum, self).adapt(impltype, **kw)
class PickleType(TypeDecorator):
impl = LargeBinary
def __init__(self, protocol=pickle.HIGHEST_PROTOCOL,
pickler=None, comparator=None):
self.protocol = protocol
self.pickler = pickler or pickle
self.comparator = comparator
super(PickleType, self).__init__()
def __reduce__(self):
return PickleType, (self.protocol,
None,
self.comparator)
def bind_processor(self, dialect):
impl_processor = self.impl.bind_processor(dialect)
dumps = self.pickler.dumps
protocol = self.protocol
if impl_processor:
def process(value):
if value is not None:
value = dumps(value, protocol)
return impl_processor(value)
else:
def process(value):
if value is not None:
value = dumps(value, protocol)
return value
return process
def result_processor(self, dialect, coltype):
impl_processor = self.impl.result_processor(dialect, coltype)
loads = self.pickler.loads
if impl_processor:
def process(value):
value = impl_processor(value)
if value is None:
return None
return loads(value)
else:
def process(value):
if value is None:
return None
return loads(value)
return process
def compare_values(self, x, y):
if self.comparator:
return self.comparator(x, y)
else:
return x == y
class Boolean(TypeEngine, SchemaType):
__visit_name__ = 'boolean'
def __init__(self, create_constraint=True, name=None):
self.create_constraint = create_constraint
self.name = name
def _should_create_constraint(self, compiler):
return not compiler.dialect.supports_native_boolean
@util.dependencies("sqlalchemy.sql.schema")
def _set_table(self, schema, column, table):
if not self.create_constraint:
return
e = schema.CheckConstraint(
type_coerce(column, self).in_([0, 1]),
name=self.name,
_create_rule=util.portable_instancemethod(
self._should_create_constraint)
)
table.append_constraint(e)
@property
def python_type(self):
return bool
def bind_processor(self, dialect):
if dialect.supports_native_boolean:
return None
else:
return processors.boolean_to_int
def result_processor(self, dialect, coltype):
if dialect.supports_native_boolean:
return None
else:
return processors.int_to_boolean
class Interval(_DateAffinity, TypeDecorator):
impl = DateTime
epoch = dt.datetime.utcfromtimestamp(0)
def __init__(self, native=True,
second_precision=None,
day_precision=None):
super(Interval, self).__init__()
self.native = native
self.second_precision = second_precision
self.day_precision = day_precision
def adapt(self, cls, **kw):
if self.native and hasattr(cls, '_adapt_from_generic_interval'):
return cls._adapt_from_generic_interval(self, **kw)
else:
return self.__class__(
native=self.native,
second_precision=self.second_precision,
day_precision=self.day_precision,
**kw)
@property
def python_type(self):
return dt.timedelta
def bind_processor(self, dialect):
impl_processor = self.impl.bind_processor(dialect)
epoch = self.epoch
if impl_processor:
def process(value):
if value is not None:
value = epoch + value
return impl_processor(value)
else:
def process(value):
if value is not None:
value = epoch + value
return value
return process
def result_processor(self, dialect, coltype):
impl_processor = self.impl.result_processor(dialect, coltype)
epoch = self.epoch
if impl_processor:
def process(value):
value = impl_processor(value)
if value is None:
return None
return value - epoch
else:
def process(value):
if value is None:
return None
return value - epoch
return process
@util.memoized_property
def _expression_adaptations(self):
return {
operators.add: {
Date: DateTime,
Interval: self.__class__,
DateTime: DateTime,
Time: Time,
},
operators.sub: {
Interval: self.__class__
},
operators.mul: {
Numeric: self.__class__
},
operators.truediv: {
Numeric: self.__class__
},
operators.div: {
Numeric: self.__class__
}
}
@property
def _type_affinity(self):
return Interval
def coerce_compared_value(self, op, value):
return self.impl.coerce_compared_value(op, value)
class REAL(Float):
__visit_name__ = 'REAL'
class FLOAT(Float):
__visit_name__ = 'FLOAT'
class NUMERIC(Numeric):
__visit_name__ = 'NUMERIC'
class DECIMAL(Numeric):
__visit_name__ = 'DECIMAL'
class INTEGER(Integer):
__visit_name__ = 'INTEGER'
INT = INTEGER
class SMALLINT(SmallInteger):
__visit_name__ = 'SMALLINT'
class BIGINT(BigInteger):
__visit_name__ = 'BIGINT'
class TIMESTAMP(DateTime):
__visit_name__ = 'TIMESTAMP'
def get_dbapi_type(self, dbapi):
return dbapi.TIMESTAMP
class DATETIME(DateTime):
__visit_name__ = 'DATETIME'
class DATE(Date):
__visit_name__ = 'DATE'
class TIME(Time):
__visit_name__ = 'TIME'
class TEXT(Text):
__visit_name__ = 'TEXT'
class CLOB(Text):
__visit_name__ = 'CLOB'
class VARCHAR(String):
__visit_name__ = 'VARCHAR'
class NVARCHAR(Unicode):
__visit_name__ = 'NVARCHAR'
class CHAR(String):
__visit_name__ = 'CHAR'
class NCHAR(Unicode):
__visit_name__ = 'NCHAR'
class BLOB(LargeBinary):
__visit_name__ = 'BLOB'
class BINARY(_Binary):
__visit_name__ = 'BINARY'
class VARBINARY(_Binary):
__visit_name__ = 'VARBINARY'
class BOOLEAN(Boolean):
__visit_name__ = 'BOOLEAN'
class NullType(TypeEngine):
__visit_name__ = 'null'
_isnull = True
def literal_processor(self, dialect):
def process(value):
return "NULL"
return process
class Comparator(TypeEngine.Comparator):
def _adapt_expression(self, op, other_comparator):
if isinstance(other_comparator, NullType.Comparator) or \
not operators.is_commutative(op):
return op, self.expr.type
else:
return other_comparator._adapt_expression(op, self)
comparator_factory = Comparator
NULLTYPE = NullType()
BOOLEANTYPE = Boolean()
STRINGTYPE = String()
INTEGERTYPE = Integer()
_type_map = {
int: Integer(),
float: Numeric(),
bool: BOOLEANTYPE,
decimal.Decimal: Numeric(),
dt.date: Date(),
dt.datetime: DateTime(),
dt.time: Time(),
dt.timedelta: Interval(),
util.NoneType: NULLTYPE
}
if util.py3k:
_type_map[bytes] = LargeBinary()
_type_map[str] = Unicode()
else:
_type_map[unicode] = Unicode()
_type_map[str] = String()
# back-assign to type_api
from . import type_api
type_api.BOOLEANTYPE = BOOLEANTYPE
type_api.STRINGTYPE = STRINGTYPE
type_api.INTEGERTYPE = INTEGERTYPE
type_api.NULLTYPE = NULLTYPE
type_api._type_map = _type_map
# this one, there's all kinds of ways to play it, but at the EOD
# the expression element system, as you might expect. We can use
# importlaters or whatnot, but the typing system just necessarily has
# to have some kind of connection like this. right now we're injecting the
class Comparator(_DefaultColumnComparator):
BOOLEANTYPE = BOOLEANTYPE
TypeEngine.Comparator.__bases__ = (Comparator, ) + TypeEngine.Comparator.__bases__
| true
| true
|
1c475b01d3f2a15d38e7166284a6e4891d718fa6
| 4,466
|
py
|
Python
|
tinkt/cmap_utils.py
|
claydodo/tinkt
|
dfd07fe7cad34c0d5a1ec0e03a6437a502410918
|
[
"Unlicense"
] | null | null | null |
tinkt/cmap_utils.py
|
claydodo/tinkt
|
dfd07fe7cad34c0d5a1ec0e03a6437a502410918
|
[
"Unlicense"
] | null | null | null |
tinkt/cmap_utils.py
|
claydodo/tinkt
|
dfd07fe7cad34c0d5a1ec0e03a6437a502410918
|
[
"Unlicense"
] | null | null | null |
# -*- coding:utf-8 -*-
# cmap utils
import six
import numpy as np
from matplotlib import cm as mpl_cm
from matplotlib import colors as mpl_colors
from . import cm as tinkt_cm
CM_FAMILIES = {
'mpl': mpl_cm,
'tinkt': tinkt_cm
}
def set_under_over_bad_colors(cmap, under=None, over=None, bad=None):
if under is not None:
cmap.set_under(under)
if over is not None:
cmap.set_over(over)
if bad is not None:
cmap.set_bad(bad)
return cmap
def get_cmap(base_cmap,
clip_min=None, clip_max=None,
N=None,
sample_points=None,
bad=None, over=None, under=None,
*args, **kwargs):
"""
Get cmap object by name, and optionally tweak it into a new one.
Currently only supports tweaking of continuous cmaps.
:param base_cmap: either a name or a cmap object.
:param clip_min: lower clip point, valid range: 0.0~1.0, default: None.
:param clip_max: upper clip point, valid range: 0.0~1.0, default: None.
:param N: new cmap's color number, default: None (inherits from base_cmap).
:param sample_points: a series of sampling points (0.0~1.0) on the base_cmap. When using this arg, clip_min, clip_max and N are ignored.
:param bad: bad color, default None (inherits from base_cmap)
:param over: over color, default None (inherits from base_cmap)
:param under: under color, default None (inherits from base_cmap)
:return: a cmap object (matplotlib.colors.Colormap)
"""
if isinstance(base_cmap, tuple):
# The tuple-form is for compatibility of old codes using metlib.color.cmap_utils.get_cmap , which read opts from json file.
# Please neglect the complex logics and use named args whenever possible.
return _parse_tuple_form_args_for_get_cmap(base_cmap)
if isinstance(base_cmap, six.string_types):
for cm_family in CM_FAMILIES.values():
try:
base_cmap = getattr(cm_family, base_cmap)
break
except AttributeError:
pass
if not isinstance(base_cmap, mpl_colors.Colormap):
raise RuntimeError(u'Cannot find base_cmap: {}'.format(base_cmap))
if sample_points is not None:
new_name = u'Resampled from {}'.format(base_cmap.name)
new_cmap = mpl_colors.LinearSegmentedColormap.from_list(new_name, base_cmap(sample_points))
elif clip_min is not None or clip_max is not None:
clip_min = 0.0 if clip_min is None else float(clip_min)
clip_max = 0.0 if clip_max is None else float(clip_max)
N = base_cmap.N if N is None else int(N)
sample_points = np.linspace(clip_min, clip_max, N)
new_name = u'Clipped from {}'.format(base_cmap.name)
new_cmap = mpl_colors.LinearSegmentedColormap.from_list(new_name, base_cmap(sample_points))
else:
N = int(N) if N is not None else base_cmap.N
new_cmap = base_cmap._resample(N)
if bad is not None:
new_cmap.set_bad(bad)
elif base_cmap._rgba_bad:
new_cmap.set_bad(base_cmap._rgba_bad)
if over is not None:
new_cmap.set_over(over)
elif base_cmap._rgba_over:
new_cmap.set_over(base_cmap._rgba_over)
if under is not None:
new_cmap.set_under(under)
elif base_cmap._rgba_under:
new_cmap.set_under(base_cmap._rgba_under)
return new_cmap
def _parse_tuple_form_args_for_get_cmap(opts):
# The tuple-form is for compatibility of old codes using metlib.color.cmap_utils.get_cmap, which read opts from json file.
if len(opts) == 1:
return get_cmap(opts[0])
elif len(opts) == 2:
if isinstance(opts[1], (tuple, list, np.ndarray)):
if len(opts[1]) == 0:
return get_cmap(opts[0])
elif len(opts[1]) == 1:
if isinstance(opts[1][0], (tuple, list, np.ndarray)):
return get_cmap(opts[0], sample_points=opts[1][0])
else:
raise ValueError("")
elif len(opts[1]) == 2:
clip_min, clip_max = opts[1]
N = None
elif len(opts[1]) == 3:
clip_min, clip_max, N = opts[1]
else:
return get_cmap(opts[0], sample_points=opts[1])
return get_cmap(opts[0], clip_min=clip_min, clip_max=clip_max, N=N)
else:
raise ValueError("")
else:
raise ValueError("")
| 36.606557
| 140
| 0.638155
|
import six
import numpy as np
from matplotlib import cm as mpl_cm
from matplotlib import colors as mpl_colors
from . import cm as tinkt_cm
CM_FAMILIES = {
'mpl': mpl_cm,
'tinkt': tinkt_cm
}
def set_under_over_bad_colors(cmap, under=None, over=None, bad=None):
if under is not None:
cmap.set_under(under)
if over is not None:
cmap.set_over(over)
if bad is not None:
cmap.set_bad(bad)
return cmap
def get_cmap(base_cmap,
clip_min=None, clip_max=None,
N=None,
sample_points=None,
bad=None, over=None, under=None,
*args, **kwargs):
if isinstance(base_cmap, tuple):
return _parse_tuple_form_args_for_get_cmap(base_cmap)
if isinstance(base_cmap, six.string_types):
for cm_family in CM_FAMILIES.values():
try:
base_cmap = getattr(cm_family, base_cmap)
break
except AttributeError:
pass
if not isinstance(base_cmap, mpl_colors.Colormap):
raise RuntimeError(u'Cannot find base_cmap: {}'.format(base_cmap))
if sample_points is not None:
new_name = u'Resampled from {}'.format(base_cmap.name)
new_cmap = mpl_colors.LinearSegmentedColormap.from_list(new_name, base_cmap(sample_points))
elif clip_min is not None or clip_max is not None:
clip_min = 0.0 if clip_min is None else float(clip_min)
clip_max = 0.0 if clip_max is None else float(clip_max)
N = base_cmap.N if N is None else int(N)
sample_points = np.linspace(clip_min, clip_max, N)
new_name = u'Clipped from {}'.format(base_cmap.name)
new_cmap = mpl_colors.LinearSegmentedColormap.from_list(new_name, base_cmap(sample_points))
else:
N = int(N) if N is not None else base_cmap.N
new_cmap = base_cmap._resample(N)
if bad is not None:
new_cmap.set_bad(bad)
elif base_cmap._rgba_bad:
new_cmap.set_bad(base_cmap._rgba_bad)
if over is not None:
new_cmap.set_over(over)
elif base_cmap._rgba_over:
new_cmap.set_over(base_cmap._rgba_over)
if under is not None:
new_cmap.set_under(under)
elif base_cmap._rgba_under:
new_cmap.set_under(base_cmap._rgba_under)
return new_cmap
def _parse_tuple_form_args_for_get_cmap(opts):
if len(opts) == 1:
return get_cmap(opts[0])
elif len(opts) == 2:
if isinstance(opts[1], (tuple, list, np.ndarray)):
if len(opts[1]) == 0:
return get_cmap(opts[0])
elif len(opts[1]) == 1:
if isinstance(opts[1][0], (tuple, list, np.ndarray)):
return get_cmap(opts[0], sample_points=opts[1][0])
else:
raise ValueError("")
elif len(opts[1]) == 2:
clip_min, clip_max = opts[1]
N = None
elif len(opts[1]) == 3:
clip_min, clip_max, N = opts[1]
else:
return get_cmap(opts[0], sample_points=opts[1])
return get_cmap(opts[0], clip_min=clip_min, clip_max=clip_max, N=N)
else:
raise ValueError("")
else:
raise ValueError("")
| true
| true
|
1c475e064511372aa11c413ea6aad9da5ab26d2e
| 10,185
|
py
|
Python
|
test_nfc.py
|
tnoumar/ST-M24SR64-NFC
|
6f5b2ec574fb51d3ffc458b562eb0f6df657a6a4
|
[
"MIT"
] | null | null | null |
test_nfc.py
|
tnoumar/ST-M24SR64-NFC
|
6f5b2ec574fb51d3ffc458b562eb0f6df657a6a4
|
[
"MIT"
] | null | null | null |
test_nfc.py
|
tnoumar/ST-M24SR64-NFC
|
6f5b2ec574fb51d3ffc458b562eb0f6df657a6a4
|
[
"MIT"
] | null | null | null |
# Author: Taha NOUMAR tnoumar@enseirb-matmeca.fr
# DATA SHEETS
# https://www.st.com/resource/en/datasheet/m24sr64-y.pdf
# CONFIGURATION
# tag type: M24SR64Y
# eeprom size: 64KBit
# I2C address: 0x56
import machine
import binascii
import utime
def byte0(b):
return b & 0x00FF
def byte1(b):
return (b & 0xFF00) >> 8
class NFCTag():
I2C_ADDRESS_7BIT = 0x56
SYSTEM = 0xE101
CC = 0xE103
NDEF = 0x0001
NDEF_HEADER=[0xd1, 0x01, 0x00, 0x54, 0x02, 0x65, 0x6e]
verbose = True # not to supercharge the user's console
def __init__(self, i2c):
self.i2c = i2c
self.addr = self.I2C_ADDRESS_7BIT
def wait(self, msg):
''' Wait a certain amount of time between operations'''
utime.sleep_ms(500)
if self.verbose:
print("\n" + str(msg))
def write(self, data, crc=False):
"""Write a string of data bytes, with optional CRC"""
if crc:
crc0, crc1 = CRC.compute(data)
data.append(crc0)
data.append(crc1)
data_hex = ""
for i in range(len(data)):
data_hex += hex(data[i]) + " "
print("i2c write: [AC] " + data_hex)
result = self.i2c.writeto(self.addr, bytes(data))
print("write:" + str(result))
if result == 0:
raise RuntimeError("write result:" + str(result))
def read(self, len, checkCrc=False):
"""read a string of data bytes, with optional CRC checking"""
data = bytearray(len)
result = self.i2c.readfrom_into(0x56, data)
if checkCrc:
raise RuntimeError("CRC checking not yet written")
#print("read:" + str(data))
# print('type of data is'+type(data))
# if len(data) == 0:
# raise RuntimeError("read result:" + len(str(data)))
return data
def killRFSelectI2C(self):
"""Kill off any RF session and open an I2C session"""
# tx: [0xAC] 0x52
# rx: TODO
self.wait("Selecting I2C, deselecting RF ...")
self.write([0x52])
def selectNFCT4Application(self, pcb=0x02):
"""Select the NFC app"""
# tx: [0xAC] 0x02 0x00 0xA4 0x04 0x00 0x07 0xD2 0x76 0x00 0x00 0x85 0x01 0x01 0x00 [0x35 0xC0]
# rx: [0xAD] 0x02 0x90 0x00 [0xF1 0x09]
self.write([pcb, 0x00, 0xA4, 0x04, 0x00, 0x07, 0xD2, 0x76,
0x00, 0x00, 0x85, 0x01, 0x01, 0x00], crc=True)
self.wait('Selecting NFC APP ...')
result = self.read(5)
return result
def selectFile(self, fileId, pcb=0x02):
"""Select a nominated file"""
# tx: [0xAC] 0x03 0x00 0xA4 0x00 0x0c 0x02 (0xE101) 0xCCCC
# rx: TODO
self.write([pcb, 0x00, 0xA4, 0x00, 0x0C, 0x02,
byte1(fileId), byte0(fileId)], crc=True)
self.wait('Selecting file ...')
result = self.read(5)
return result
def readBinary(self, offset, length, pcb=0x02):
"""Read binary from the currently selected file"""
# read length
# tx: [0xAD] 0x03 0x00 0xB0 (0x00 0x00) (0x02) 0xCCCC
# rx: TODO
self.write([pcb, 0x00, 0xB0, byte1(offset),
byte0(offset), byte0(length)], crc=True)
self.wait('Reading binary ...')
result = self.read(length+5)
print("readBinary:" + str(result))
return result
def updateBinaryLength(self, data, pcb=0x03):
""" Update binary length in the currently selected file"""
# tx: ERASE BINARY [AC] 03 00 D6 00 00 02 00 00 6B 37
# rx:
self.write([pcb, 0x00, 0xD6, 0x00, 0x00, 0x02,
byte1(data), byte0(data)], crc=True)
utime.sleep(1)
result = self.read(5)
print("updateBinaryLength:"+str(result))
return result
def updateBinary(self, offset, length, data, pcb=0x02):
""" Update binary data in the currently selected file"""
# UPDATE BINARY with HELLO WORLD e.g.
# tx: 0xAC 0x02 0x00 0xD6 0x00 0x02 0x0B 0x68 0x65 0x6C 0x6C 0x6F 0x20 0x77 0x6F 0x72 0x6C 0x64 0x2F 0xFC
# rx:
payload = self.NDEF_HEADER + data
payload[2] = length - 4
self.write([pcb, 0x00, 0xD6, byte1(offset), byte0(
offset), byte0(length)]+payload, crc=True)
self.wait('Updating Binary ...')
result = self.read(5)
print("updateBinary: "+str(result))
return result
def deselect(self):
"""Deselect the I2C (allow RF to come in again)"""
# deselect
# tx: [0xAC] 0xC2 0xE0 B4
# rx: 0xC2 0xE0 0xB4
self.write([0xC2], crc=True)
self.wait('Deselecting I2C, selecting RF ')
result = self.read(3)
return result
def readNDEFFile(self):
'''
select I2C
select NFC application
select CC
read CC file and length
select NDEF file
read NDEF length
read NDEF file
'''
self.killRFSelectI2C()
self.selectNFCT4Application()
self.selectFile(self.CC, pcb=0x03)
data = self.readBinary(0x0000, 0x02, pcb=0x02)
data = self.readBinary(0x0000, 0x0F, pcb=0x03)
self.selectFile(self.NDEF, pcb=0x02)
data = self.readBinary(0x0000, 0x02, pcb=0x03)
ndef_len = (data[1]*256) + data[2]
print("NDEF len:" + str(ndef_len))
data = self.readBinary(0x0002, ndef_len, pcb=0x02)
ndef = data[8:-4]
s = ""
for i in range(len(ndef)):
s += chr(ndef[i])
print("ndef message:" + s)
return s
def eraseNDEFFile(self):
'''
select I2C
select NFC application
select CC
read CC file and length
select NDEF file
set NDEF length to 0
'''
self.killRFSelectI2C()
self.selectNFCT4Application()
self.selectFile(self.CC, pcb=0x03)
data = self.readBinary(0x0000, 0x02, pcb=0x02)
data = self.readBinary(0x0000, 0x0F, pcb=0x03)
self.selectFile(self.NDEF, pcb=0x02)
try:
data = self.updateBinaryLength(0)
print("File erased successfully")
except:
print("error while erasing file")
def writeNDEFFile(self, text):
'''
erase NDEF length
update NDEF message
set new NDEF length
deselect I2C
'''
self.eraseNDEFFile()
# Write hello world in the tag
print("Storing " + text + " in NDEF message")
hex_text = binascii.hexlify(text.encode('utf8'))
hex_list = [0x00 for i in range(0, int((len(hex_text)/2)))]
for i in range(0, int((len(hex_text)/2))):
hex_list[i] = int("0x"+str(hex_text[2*i:2*(i+1)]
).replace("b'", "").replace("'", ""))
data = self.updateBinary(0x0002, len(text), hex_list)
utime.sleep(1)
try:
data = self.updateBinaryLength(len(text))
print("File written successfully")
except:
print("error while writing file")
print("deselecting I2C")
self.deselect()
utime.sleep(2)
# PCB means "protocol control byte",
# Takes 0x02 or 0x03
# CLA is class byte (always 0x00 for these apps)
# INS is the instruction to send
# P1 P2 are parameter 1 and 2,
# Lc is length of command
# Data is the payload of the command
# Le is the length of expected response
# CRC2 is the cyclic redundancy check bytes
#Structure of NDEF message (NFC Data Exchange Format) ########################################################
# Byte 0 Byte 1 Byte 2 Byte 3
# 0x0000 NDEF message length User data User data
# 0x0004 User data User data User data User data
# ... ... ... ... ...
##############################################################################################################
# COMMANDS
# SEL PCB CLA INS P1 P2 Lc Data Le CRC2
# kill RF session, open I2C 0xAC 0x52
# select system file 0xAC 0x02 0x00 0xA4 0x00 0x0c 0x02 0xE101 0xCCCC
# read length 0xAD 0x03 0x00 0xB0 0x00 0x00 0x02 0xCCCC
# read memsize 0xAD 0x03 0x00 0xB0 0x00 0x0F 0x02 0xCCCC
# deselect (Kill I2C, open RF) 0xAC 0xC2 0xE0 0xB4
# erase NDEF len 0xAC 0x03 0x00 0xD6 0x00 0x00 0x02 0x00 0x00 0x6B 0x37
# write HELLO WORLD in tag 0xAC 0x02 0x00 0xD6 0x00 0x02 0x0B 0x68 0x65 0x6C 0x6C 0x6F 0x20 0x77 0x6F 0x72 0x6C 0x64 0x2F 0xFC
#####################################################################################################################################################
class CRC():
def __init__(self, initial=0x6363):
# initialize CRC OBJ
self.initial = initial
def start(self):
self.crc = self.initial
def update(self, data):
# update hex entries for CRC computation
datain = data
data = data ^ ((self.crc) & 0x00FF)
data = data ^ ((data << 4) & 0x00FF)
self.crc = (self.crc >> 8) \
^ (data << 8) \
^ (data << 3) \
^ (data >> 4)
self.crc = self.crc & 0xFFFF
return self.crc
def getCRC(self):
return (self.crc & 0xFF), ((self.crc & 0xFF00) >> 8)
def compute(block):
c = CRC()
c.start()
for i in range(len(block)):
c.update(block[i])
crc0, crc1 = c.getCRC()
return crc0, crc1
tag = NFCTag(machine.I2C(1))
print('(before) text in the tag is '+tag.readNDEFFile())
tag.eraseNDEFFile()
print('text in the tag is '+tag.readNDEFFile())
while True:
pass
| 34.880137
| 149
| 0.525282
|
import machine
import binascii
import utime
def byte0(b):
return b & 0x00FF
def byte1(b):
return (b & 0xFF00) >> 8
class NFCTag():
I2C_ADDRESS_7BIT = 0x56
SYSTEM = 0xE101
CC = 0xE103
NDEF = 0x0001
NDEF_HEADER=[0xd1, 0x01, 0x00, 0x54, 0x02, 0x65, 0x6e]
verbose = True
def __init__(self, i2c):
self.i2c = i2c
self.addr = self.I2C_ADDRESS_7BIT
def wait(self, msg):
utime.sleep_ms(500)
if self.verbose:
print("\n" + str(msg))
def write(self, data, crc=False):
if crc:
crc0, crc1 = CRC.compute(data)
data.append(crc0)
data.append(crc1)
data_hex = ""
for i in range(len(data)):
data_hex += hex(data[i]) + " "
print("i2c write: [AC] " + data_hex)
result = self.i2c.writeto(self.addr, bytes(data))
print("write:" + str(result))
if result == 0:
raise RuntimeError("write result:" + str(result))
def read(self, len, checkCrc=False):
data = bytearray(len)
result = self.i2c.readfrom_into(0x56, data)
if checkCrc:
raise RuntimeError("CRC checking not yet written")
#print("read:" + str(data))
# print('type of data is'+type(data))
# if len(data) == 0:
# raise RuntimeError("read result:" + len(str(data)))
return data
def killRFSelectI2C(self):
# tx: [0xAC] 0x52
# rx: TODO
self.wait("Selecting I2C, deselecting RF ...")
self.write([0x52])
def selectNFCT4Application(self, pcb=0x02):
# tx: [0xAC] 0x02 0x00 0xA4 0x04 0x00 0x07 0xD2 0x76 0x00 0x00 0x85 0x01 0x01 0x00 [0x35 0xC0]
# rx: [0xAD] 0x02 0x90 0x00 [0xF1 0x09]
self.write([pcb, 0x00, 0xA4, 0x04, 0x00, 0x07, 0xD2, 0x76,
0x00, 0x00, 0x85, 0x01, 0x01, 0x00], crc=True)
self.wait('Selecting NFC APP ...')
result = self.read(5)
return result
def selectFile(self, fileId, pcb=0x02):
# tx: [0xAC] 0x03 0x00 0xA4 0x00 0x0c 0x02 (0xE101) 0xCCCC
# rx: TODO
self.write([pcb, 0x00, 0xA4, 0x00, 0x0C, 0x02,
byte1(fileId), byte0(fileId)], crc=True)
self.wait('Selecting file ...')
result = self.read(5)
return result
def readBinary(self, offset, length, pcb=0x02):
# read length
# tx: [0xAD] 0x03 0x00 0xB0 (0x00 0x00) (0x02) 0xCCCC
# rx: TODO
self.write([pcb, 0x00, 0xB0, byte1(offset),
byte0(offset), byte0(length)], crc=True)
self.wait('Reading binary ...')
result = self.read(length+5)
print("readBinary:" + str(result))
return result
def updateBinaryLength(self, data, pcb=0x03):
# tx: ERASE BINARY [AC] 03 00 D6 00 00 02 00 00 6B 37
# rx:
self.write([pcb, 0x00, 0xD6, 0x00, 0x00, 0x02,
byte1(data), byte0(data)], crc=True)
utime.sleep(1)
result = self.read(5)
print("updateBinaryLength:"+str(result))
return result
def updateBinary(self, offset, length, data, pcb=0x02):
# UPDATE BINARY with HELLO WORLD e.g.
# tx: 0xAC 0x02 0x00 0xD6 0x00 0x02 0x0B 0x68 0x65 0x6C 0x6C 0x6F 0x20 0x77 0x6F 0x72 0x6C 0x64 0x2F 0xFC
# rx:
payload = self.NDEF_HEADER + data
payload[2] = length - 4
self.write([pcb, 0x00, 0xD6, byte1(offset), byte0(
offset), byte0(length)]+payload, crc=True)
self.wait('Updating Binary ...')
result = self.read(5)
print("updateBinary: "+str(result))
return result
def deselect(self):
# deselect
# tx: [0xAC] 0xC2 0xE0 B4
# rx: 0xC2 0xE0 0xB4
self.write([0xC2], crc=True)
self.wait('Deselecting I2C, selecting RF ')
result = self.read(3)
return result
def readNDEFFile(self):
self.killRFSelectI2C()
self.selectNFCT4Application()
self.selectFile(self.CC, pcb=0x03)
data = self.readBinary(0x0000, 0x02, pcb=0x02)
data = self.readBinary(0x0000, 0x0F, pcb=0x03)
self.selectFile(self.NDEF, pcb=0x02)
data = self.readBinary(0x0000, 0x02, pcb=0x03)
ndef_len = (data[1]*256) + data[2]
print("NDEF len:" + str(ndef_len))
data = self.readBinary(0x0002, ndef_len, pcb=0x02)
ndef = data[8:-4]
s = ""
for i in range(len(ndef)):
s += chr(ndef[i])
print("ndef message:" + s)
return s
def eraseNDEFFile(self):
self.killRFSelectI2C()
self.selectNFCT4Application()
self.selectFile(self.CC, pcb=0x03)
data = self.readBinary(0x0000, 0x02, pcb=0x02)
data = self.readBinary(0x0000, 0x0F, pcb=0x03)
self.selectFile(self.NDEF, pcb=0x02)
try:
data = self.updateBinaryLength(0)
print("File erased successfully")
except:
print("error while erasing file")
def writeNDEFFile(self, text):
self.eraseNDEFFile()
# Write hello world in the tag
print("Storing " + text + " in NDEF message")
hex_text = binascii.hexlify(text.encode('utf8'))
hex_list = [0x00 for i in range(0, int((len(hex_text)/2)))]
for i in range(0, int((len(hex_text)/2))):
hex_list[i] = int("0x"+str(hex_text[2*i:2*(i+1)]
).replace("b'", "").replace("'", ""))
data = self.updateBinary(0x0002, len(text), hex_list)
utime.sleep(1)
try:
data = self.updateBinaryLength(len(text))
print("File written successfully")
except:
print("error while writing file")
print("deselecting I2C")
self.deselect()
utime.sleep(2)
# PCB means "protocol control byte",
# Takes 0x02 or 0x03
# CLA is class byte (always 0x00 for these apps)
# INS is the instruction to send
# P1 P2 are parameter 1 and 2,
# Lc is length of command
# Data is the payload of the command
# Le is the length of expected response
# CRC2 is the cyclic redundancy check bytes
#Structure of NDEF message (NFC Data Exchange Format) ########################################################
# Byte 0 Byte 1 Byte 2 Byte 3
# 0x0000 NDEF message length User data User data
# 0x0004 User data User data User data User data
# ... ... ... ... ...
##############################################################################################################
# COMMANDS
# SEL PCB CLA INS P1 P2 Lc Data Le CRC2
# kill RF session, open I2C 0xAC 0x52
# select system file 0xAC 0x02 0x00 0xA4 0x00 0x0c 0x02 0xE101 0xCCCC
# read length 0xAD 0x03 0x00 0xB0 0x00 0x00 0x02 0xCCCC
# read memsize 0xAD 0x03 0x00 0xB0 0x00 0x0F 0x02 0xCCCC
# deselect (Kill I2C, open RF) 0xAC 0xC2 0xE0 0xB4
# erase NDEF len 0xAC 0x03 0x00 0xD6 0x00 0x00 0x02 0x00 0x00 0x6B 0x37
# write HELLO WORLD in tag 0xAC 0x02 0x00 0xD6 0x00 0x02 0x0B 0x68 0x65 0x6C 0x6C 0x6F 0x20 0x77 0x6F 0x72 0x6C 0x64 0x2F 0xFC
#####################################################################################################################################################
class CRC():
def __init__(self, initial=0x6363):
# initialize CRC OBJ
self.initial = initial
def start(self):
self.crc = self.initial
def update(self, data):
# update hex entries for CRC computation
datain = data
data = data ^ ((self.crc) & 0x00FF)
data = data ^ ((data << 4) & 0x00FF)
self.crc = (self.crc >> 8) \
^ (data << 8) \
^ (data << 3) \
^ (data >> 4)
self.crc = self.crc & 0xFFFF
return self.crc
def getCRC(self):
return (self.crc & 0xFF), ((self.crc & 0xFF00) >> 8)
def compute(block):
c = CRC()
c.start()
for i in range(len(block)):
c.update(block[i])
crc0, crc1 = c.getCRC()
return crc0, crc1
tag = NFCTag(machine.I2C(1))
print('(before) text in the tag is '+tag.readNDEFFile())
tag.eraseNDEFFile()
print('text in the tag is '+tag.readNDEFFile())
while True:
pass
| true
| true
|
1c475e204df91f662e807804eaf4a475b120362c
| 18,766
|
py
|
Python
|
OgreVertexBuffer.py
|
lamogui/ogre_blender_importer
|
4742e27909f57598889bdfa8a956001c6776d056
|
[
"MIT"
] | 13
|
2016-01-23T08:00:34.000Z
|
2022-02-16T10:27:08.000Z
|
OgreVertexBuffer.py
|
lamogui/ogre_blender_importer
|
4742e27909f57598889bdfa8a956001c6776d056
|
[
"MIT"
] | 3
|
2016-09-20T15:22:28.000Z
|
2021-05-31T01:25:05.000Z
|
OgreVertexBuffer.py
|
lamogui/ogre_blender_importer
|
4742e27909f57598889bdfa8a956001c6776d056
|
[
"MIT"
] | 9
|
2016-07-13T23:23:55.000Z
|
2022-03-24T21:22:53.000Z
|
from enum import IntEnum;
from struct import unpack_from;
try:
from OgreHardwareBuffer import OgreFakeHardwareBuffer
except ImportError as e:
directory = os.path.dirname(os.path.realpath(__file__));
print("Import error: " + str(e) + " manual compilation" );
srcfile="OgreHardwareBuffer.py"; exec(compile(open(os.path.join(directory,srcfile)).read(), srcfile, 'exec'))
class OgreVertexBuffer(OgreFakeHardwareBuffer):
"""
Just a class to simulate a graphic card memory buffer
"""
def __init__(self, vertexSize, numVertices):
OgreFakeHardwareBuffer.__init__(self);
self._vertexSize = vertexSize;
self._numVertices = numVertices;
@property
def vertexSize(self):
return self._vertexSize;
@property
def numVertices(self):
return self._numVertices;
@property
def sizeInBytes(self):
return self.vertexSize * self.numVertices;
class OgreVertexElementSemantic(IntEnum):
"""
Vertex element semantics, used to identify the meaning of vertex buffer contents
"""
VES_UNKNOWN = 0;
# Position, 3 reals per vertex
VES_POSITION = 1;
# Blending weights
VES_BLEND_WEIGHTS = 2;
# Blending indices
VES_BLEND_INDICES = 3;
# Normal, 3 reals per vertex
VES_NORMAL = 4;
# Diffuse colours
VES_DIFFUSE = 5;
# Specular colours
VES_SPECULAR = 6;
# Texture coordinates
VES_TEXTURE_COORDINATES = 7;
# Binormal (Y axis if normal is Z)
VES_BINORMAL = 8;
# Tangent (X axis if normal is Z)
VES_TANGENT = 9;
# The number of VertexElementSemantic elements (note - the first value VES_POSITION is 1)
VES_COUNT = 9;
def toStr(ves):
if (ves==OgreVertexElementSemantic.VES_UNKNOWN):
return "VES_UNKNOWN";
elif (ves==OgreVertexElementSemantic.VES_POSITION):
return "VES_POSITION";
elif (ves==OgreVertexElementSemantic.VES_BLEND_WEIGHTS):
return "VES_BLEND_WEIGHTS";
elif (ves==OgreVertexElementSemantic.VES_BLEND_INDICES):
return "VES_BLEND_INDICES";
elif (ves==OgreVertexElementSemantic.VES_NORMAL):
return "VES_NORMAL";
elif (ves==OgreVertexElementSemantic.VES_DIFFUSE):
return "VES_DIFFUSE";
elif (ves==OgreVertexElementSemantic.VES_SPECULAR):
return "VES_SPECULAR";
elif (ves==OgreVertexElementSemantic.VES_TEXTURE_COORDINATES):
return "VES_TEXTURE_COORDINATES";
elif (ves==OgreVertexElementSemantic.VES_BINORMAL):
return "VES_BINORMAL";
elif (ves==OgreVertexElementSemantic.VES_TANGENT):
return "VES_TANGENT";
elif (ves==OgreVertexElementSemantic.VES_COUNT):
return "VES_COUNT";
class OgreVertexElementType(IntEnum):
"""
Vertex element type, used to identify the base types of the vertex contents
"""
VET_FLOAT1 = 0;
VET_FLOAT2 = 1;
VET_FLOAT3 = 2;
VET_FLOAT4 = 3;
# alias to more specific colour type - use the current rendersystem's colour packing
VET_COLOUR = 4;
VET_SHORT1 = 5;
VET_SHORT2 = 6;
VET_SHORT3 = 7;
VET_SHORT4 = 8;
VET_UBYTE4 = 9;
# D3D style compact colour
VET_COLOUR_ARGB = 10;
# GL style compact colour
VET_COLOUR_ABGR = 11;
VET_DOUBLE1 = 12;
VET_DOUBLE2 = 13;
VET_DOUBLE3 = 14;
VET_DOUBLE4 = 15;
VET_USHORT1 = 16;
VET_USHORT2 = 17;
VET_USHORT3 = 18;
VET_USHORT4 = 19;
VET_INT1 = 20;
VET_INT2 = 21;
VET_INT3 = 22;
VET_INT4 = 23;
VET_UINT1 = 24;
VET_UINT2 = 25;
VET_UINT3 = 26;
VET_UINT4 = 27;
def toStr(vet):
if (vet==OgreVertexElementType.VET_FLOAT1):
return "VET_FLOAT1";
elif (vet==OgreVertexElementType.VET_FLOAT2):
return "VET_FLOAT2";
elif (vet==OgreVertexElementType.VET_FLOAT3):
return "VET_FLOAT3";
elif (vet==OgreVertexElementType.VET_FLOAT4):
return "VET_FLOAT4";
elif (vet==OgreVertexElementType.VET_COLOUR):
return "VET_COLOUR";
elif (vet==OgreVertexElementType.VET_SHORT1):
return "VET_SHORT1";
elif (vet==OgreVertexElementType.VET_SHORT2):
return "VET_SHORT2";
elif (vet==OgreVertexElementType.VET_SHORT3):
return "VET_SHORT3";
elif (vet==OgreVertexElementType.VET_SHORT4):
return "VET_SHORT4";
elif (vet==OgreVertexElementType.VET_USHORT1):
return "VET_USHORT1";
elif (vet==OgreVertexElementType.VET_USHORT2):
return "VET_USHORT2";
elif (vet==OgreVertexElementType.VET_USHORT3):
return "VET_USHORT3";
elif (vet==OgreVertexElementType.VET_USHORT4):
return "VET_USHORT4";
elif (vet==OgreVertexElementType.VET_UBYTE4):
return "VET_UBYTE4";
elif (vet==OgreVertexElementType.VET_COLOUR_ABGR):
return "VET_COLOUR_ABGR";
elif (vet==OgreVertexElementType.VET_COLOUR_ARGB):
return "VET_COLOUR_ARGB";
elif (vet==OgreVertexElementType.VET_DOUBLE1):
return "VET_COLOUR_DOUBLE1";
elif (vet==OgreVertexElementType.VET_DOUBLE2):
return "VET_COLOUR_DOUBLE2";
elif (vet==OgreVertexElementType.VET_DOUBLE3):
return "VET_COLOUR_DOUBLE3";
elif (vet==OgreVertexElementType.VET_DOUBLE4):
return "VET_COLOUR_DOUBLE4";
elif (vet==OgreVertexElementType.VET_INT1):
return "VET_COLOUR_INT1";
elif (vet==OgreVertexElementType.VET_INT2):
return "VET_COLOUR_INT2";
elif (vet==OgreVertexElementType.VET_INT3):
return "VET_COLOUR_INT3";
elif (vet==OgreVertexElementType.VET_INT4):
return "VET_COLOUR_INT4";
elif (vet==OgreVertexElementType.VET_UINT1):
return "VET_COLOUR_UINT1";
elif (vet==OgreVertexElementType.VET_UINT2):
return "VET_COLOUR_UINT2";
elif (vet==OgreVertexElementType.VET_UINT3):
return "VET_COLOUR_UINT3";
elif (vet==OgreVertexElementType.VET_UINT4):
return "VET_COLOUR_UINT4";
class OgreVertexElement:
"""
This class declares the usage of a single vertex buffer as a component
of a complete VertexDeclaration.
@remarks
Several vertex buffers can be used to supply the input geometry for a
rendering operation, and in each case a vertex buffer can be used in
different ways for different operations; the buffer itself does not
define the semantics (position, normal etc), the VertexElement
class does.
"""
def __init__(self, source, offset, theType, semantic, index):
assert(type(source) is int and type(source) is int and type(index) is int);
self._source = source;
self._offset = offset;
self._type = theType;
self._semantic = semantic;
self._index = index;
def getType(self):
return self._type;
@property
def semantic(self):
return self._semantic;
@property
def index(self):
return self._index;
@property
def offset(self):
return self._offset;
@property
def source(self):
return self._source;
def getTypeSize(t):
if (t==OgreVertexElementType.VET_COLOUR or \
t==OgreVertexElementType.VET_COLOUR_ABGR or \
t==OgreVertexElementType.VET_COLOUR_ARGB):
return 4;
elif (t==OgreVertexElementType.VET_FLOAT1):
return 4*1;
elif (t==OgreVertexElementType.VET_FLOAT2):
return 4*2;
elif (t==OgreVertexElementType.VET_FLOAT3):
return 4*3;
elif (t==OgreVertexElementType.VET_FLOAT4):
return 4*4;
elif (t==OgreVertexElementType.VET_DOUBLE1):
return 8*1;
elif (t==OgreVertexElementType.VET_DOUBLE2):
return 8*2;
elif (t==OgreVertexElementType.VET_DOUBLE3):
return 8*3;
elif (t==OgreVertexElementType.VET_DOUBLE4):
return 8*4;
elif (t==OgreVertexElementType.VET_SHORT1):
return 2*1;
elif (t==OgreVertexElementType.VET_SHORT2):
return 2*2;
elif (t==OgreVertexElementType.VET_SHORT3):
return 2*3;
elif (t==OgreVertexElementType.VET_SHORT4):
return 2*4;
elif (t==OgreVertexElementType.VET_USHORT1):
return 2*1;
elif (t==OgreVertexElementType.VET_USHORT2):
return 2*2;
elif (t==OgreVertexElementType.VET_USHORT3):
return 2*3;
elif (t==OgreVertexElementType.VET_USHORT4):
return 2*4;
elif (t==OgreVertexElementType.VET_INT1):
return 4*1;
elif (t==OgreVertexElementType.VET_INT2):
return 4*2;
elif (t==OgreVertexElementType.VET_INT3):
return 4*3;
elif (t==OgreVertexElementType.VET_INT4):
return 4*4;
elif (t==OgreVertexElementType.VET_UINT1):
return 4*1;
elif (t==OgreVertexElementType.VET_UINT2):
return 4*2;
elif (t==OgreVertexElementType.VET_UINT3):
return 4*3;
elif (t==OgreVertexElementType.VET_UINT4):
return 4*4;
elif (t==OgreVertexElementType.VET_UBYTE4):
return 4;
return 0;
def getTypeCount(t):
if (t==OgreVertexElementType.VET_COLOUR or \
t==OgreVertexElementType.VET_COLOUR_ABGR or \
t==OgreVertexElementType.VET_COLOUR_ARGB or \
t==OgreVertexElementType.VET_FLOAT1 or \
t==OgreVertexElementType.VET_DOUBLE1 or \
t==OgreVertexElementType.VET_SHORT1 or \
t==OgreVertexElementType.VET_USHORT1 or \
t==OgreVertexElementType.VET_INT1 or \
t==OgreVertexElementType.VET_UINT1):
return 1;
elif (t==OgreVertexElementType.VET_FLOAT2 or \
t==OgreVertexElementType.VET_DOUBLE2 or \
t==OgreVertexElementType.VET_SHORT2 or \
t==OgreVertexElementType.VET_USHORT2 or \
t==OgreVertexElementType.VET_INT2 or \
t==OgreVertexElementType.VET_UINT2):
return 2;
elif (t==OgreVertexElementType.VET_FLOAT3 or \
t==OgreVertexElementType.VET_DOUBLE3 or \
t==OgreVertexElementType.VET_SHORT3 or \
t==OgreVertexElementType.VET_USHORT3 or \
t==OgreVertexElementType.VET_INT3 or \
t==OgreVertexElementType.VET_UINT3):
return 3;
elif (t==OgreVertexElementType.VET_FLOAT4 or \
t==OgreVertexElementType.VET_DOUBLE4 or \
t==OgreVertexElementType.VET_SHORT4 or \
t==OgreVertexElementType.VET_USHORT4 or \
t==OgreVertexElementType.VET_INT4 or \
t==OgreVertexElementType.VET_UINT4):
return 4;
raise ValueError("OgreVertexElement.getTypeCount(type): Invalid type");
def getTypePythonUnpackStr(t):
if (t==OgreVertexElementType.VET_COLOUR or \
t==OgreVertexElementType.VET_COLOUR_ABGR or \
t==OgreVertexElementType.VET_COLOUR_ARGB):
raise ValueError("OgreVertexElement.getTypePythonUnpackStr(type): Color unsupported yet");
elif (t==OgreVertexElementType.VET_FLOAT1 or \
t==OgreVertexElementType.VET_FLOAT2 or \
t==OgreVertexElementType.VET_FLOAT3 or \
t==OgreVertexElementType.VET_FLOAT4):
return 'f' * OgreVertexElement.getTypeCount(t);
elif (t==OgreVertexElementType.VET_DOUBLE1 or \
t==OgreVertexElementType.VET_DOUBLE2 or \
t==OgreVertexElementType.VET_DOUBLE3 or \
t==OgreVertexElementType.VET_DOUBLE4):
return 'd' * OgreVertexElement.getTypeCount(t);
elif (t==OgreVertexElementType.VET_SHORT1 or \
t==OgreVertexElementType.VET_SHORT2 or \
t==OgreVertexElementType.VET_SHORT3 or \
t==OgreVertexElementType.VET_SHORT4):
return 'h' * OgreVertexElement.getTypeCount(t);
elif (t==OgreVertexElementType.VET_USHORT1 or \
t==OgreVertexElementType.VET_USHORT2 or \
t==OgreVertexElementType.VET_USHORT3 or \
t==OgreVertexElementType.VET_USHORT4):
return 'H' * OgreVertexElement.getTypeCount(t);
elif (t==OgreVertexElementType.VET_INT1 or \
t==OgreVertexElementType.VET_INT2 or \
t==OgreVertexElementType.VET_INT3 or \
t==OgreVertexElementType.VET_INT4):
return 'i' * OgreVertexElement.getTypeCount(t);
elif (t==OgreVertexElementType.VET_UINT1 or \
t==OgreVertexElementType.VET_UINT2 or \
t==OgreVertexElementType.VET_UINT3 or \
t==OgreVertexElementType.VET_UINT4):
return 'I' * OgreVertexElement.getTypeCount(t);
raise ValueError("OgreVertexElement.getTypePythonUnpackStr(type): Invalid type");
def getBestCoulourVertexElementType():
#Blender use opengl
return OgreVertexElementType.VET_COLOUR_ABGR;
def __eq__(self, other):
if (self._source == other._source and \
self._index == other._index and \
self._offet == other._offset and \
self._semantic == other._semantic and \
self._type == other._type):
return True;
else:
return False;
def getSize(self):
return OgreVertexElement.getTypeSize(self._type);
def extractFromBuffer(self, vertexBufferBinding, dest, endianess):
buf = vertexBufferBinding.getBuffer(self.source);
cmd = "";
#FIXME: endianess not working...
#if (endianess.value == 'big'):
# cmd = '<';
#elif (endianess.value == 'little'):
# cmd = '>';
#else :
# cmd = endianess;
#assert(cmd == '<' or cmd == '>');
cmd = "="
cmd = cmd + OgreVertexElement.getTypePythonUnpackStr(self.getType());
print(cmd);
data = buf.data[self.offset:]
for i in range(buf.numVertices):
v = unpack_from(cmd, data, i * buf.vertexSize);
dest.append(v);
class OgreVertexDeclaration:
"""
This class declares the format of a set of vertex inputs, which
can be issued to the rendering API through a RenderOperation.
@remarks
You should be aware that the ordering and structure of the
VertexDeclaration can be very important on DirectX with older
cards,so if you want to maintain maximum compatibility with
all render systems and all cards you should be careful to follow these
rules:<ol>
<li>VertexElements should be added in the following order, and the order of the
elements within a shared buffer should be as follows:
position, blending weights, normals, diffuse colours, specular colours,
texture coordinates (in order, with no gaps)</li>
<li>You must not have unused gaps in your buffers which are not referenced
by any VertexElement</li>
<li>You must not cause the buffer & offset settings of 2 VertexElements to overlap</li>
</ol>
Whilst GL and more modern graphics cards in D3D will allow you to defy these rules,
sticking to them will ensure that your buffers have the maximum compatibility.
@par
Like the other classes in this functional area, these declarations should be created and
destroyed using the HardwareBufferManager.
"""
def __init__(self):
self._elementList = [];
def getElements(self):
return self._elementList;
def addElement(self, source, offset, theType, semantic, index):
if (theType == OgreVertexElementType.VET_COLOUR):
theType = OgreVertexElement.getBestCoulourVertexElementType();
self._elementList.append(OgreVertexElement(source,offset,theType,semantic,index));
return self._elementList[-1];
def insertElement(self, atPosition, source, offset, theType, semantic, index):
if (atPosition >= len(_elementList)):
return self.addElement(source,offset,theType,semantic,index);
_elementList.insert(atPosition,OgreVertexElement(source,offset,theType,semantic,index));
return _elementList[-1];
def getElement(self, index):
return self._elementList[index];
def removeElement(self, index):
del self._elementList[index];
def removeElementWithSemantic(self, semantic, index):
for i in range(self._elementList):
if (self._elementList[i].semantic == semantic and self._elementList[i].index == index):
del self._elementList[i];
break;
def removeAllElements(self):
self._elementList = [];
def findElementBySemantic(self, sem, index):
for e in self._elementList:
if (e.semantic == sem and e.index == index):
return e;
return None;
def findElementsBySemantic(self,sem):
elements = []
for e in self._elementList:
if (e.semantic == sem):
elements.append(e);
return elements;
def findElementBySource(self,source):
return [e for e in self._elementList if e.source == source];
def getVertexSize(self, source):
sz = 0;
for e in self._elementList:
if (e.source == source):
sz += e.getSize();
return sz;
def vertexElementLess(e1, e2):
if (e1.source < e2.source):
return True;
elif (e1.source == e2.source):
if (e1.semantic < e2.semantic):
return True;
elif (e1.semantic == e2.semantic):
if (e1.index < e2.index):
return True;
return False;
def sort(self):
self._elementList.sort(cmp=OgreVertexDeclaration.vertexElementLess);
def closeGapInSource(self):
if (not self._elementList):
return;
self.sort();
raise NotImplementedError;
class OgreVertexBufferBinding:
"""
This is the legacy of Ogre code. Because ogre separate vertex declarations
from vertex buffer in his file. So this class allow us to associate the
correct declaration with the correct buffer.
"""
def __init__(self):
self._bindingMap = {};
def setBinding(self, index, vbuffer):
self._bindingMap[str(index)]=vbuffer;
def getBuffer(self, source):
return self._bindingMap[str(source)];
def unsetAllBindings(self):
self._bindingMap = {};
| 37.013807
| 113
| 0.632154
|
from enum import IntEnum;
from struct import unpack_from;
try:
from OgreHardwareBuffer import OgreFakeHardwareBuffer
except ImportError as e:
directory = os.path.dirname(os.path.realpath(__file__));
print("Import error: " + str(e) + " manual compilation" );
srcfile="OgreHardwareBuffer.py"; exec(compile(open(os.path.join(directory,srcfile)).read(), srcfile, 'exec'))
class OgreVertexBuffer(OgreFakeHardwareBuffer):
def __init__(self, vertexSize, numVertices):
OgreFakeHardwareBuffer.__init__(self);
self._vertexSize = vertexSize;
self._numVertices = numVertices;
@property
def vertexSize(self):
return self._vertexSize;
@property
def numVertices(self):
return self._numVertices;
@property
def sizeInBytes(self):
return self.vertexSize * self.numVertices;
class OgreVertexElementSemantic(IntEnum):
VES_UNKNOWN = 0;
VES_POSITION = 1;
VES_BLEND_WEIGHTS = 2;
VES_BLEND_INDICES = 3;
VES_NORMAL = 4;
VES_DIFFUSE = 5;
VES_SPECULAR = 6;
VES_TEXTURE_COORDINATES = 7;
VES_BINORMAL = 8;
VES_TANGENT = 9;
VES_COUNT = 9;
def toStr(ves):
if (ves==OgreVertexElementSemantic.VES_UNKNOWN):
return "VES_UNKNOWN";
elif (ves==OgreVertexElementSemantic.VES_POSITION):
return "VES_POSITION";
elif (ves==OgreVertexElementSemantic.VES_BLEND_WEIGHTS):
return "VES_BLEND_WEIGHTS";
elif (ves==OgreVertexElementSemantic.VES_BLEND_INDICES):
return "VES_BLEND_INDICES";
elif (ves==OgreVertexElementSemantic.VES_NORMAL):
return "VES_NORMAL";
elif (ves==OgreVertexElementSemantic.VES_DIFFUSE):
return "VES_DIFFUSE";
elif (ves==OgreVertexElementSemantic.VES_SPECULAR):
return "VES_SPECULAR";
elif (ves==OgreVertexElementSemantic.VES_TEXTURE_COORDINATES):
return "VES_TEXTURE_COORDINATES";
elif (ves==OgreVertexElementSemantic.VES_BINORMAL):
return "VES_BINORMAL";
elif (ves==OgreVertexElementSemantic.VES_TANGENT):
return "VES_TANGENT";
elif (ves==OgreVertexElementSemantic.VES_COUNT):
return "VES_COUNT";
class OgreVertexElementType(IntEnum):
VET_FLOAT1 = 0;
VET_FLOAT2 = 1;
VET_FLOAT3 = 2;
VET_FLOAT4 = 3;
VET_COLOUR = 4;
VET_SHORT1 = 5;
VET_SHORT2 = 6;
VET_SHORT3 = 7;
VET_SHORT4 = 8;
VET_UBYTE4 = 9;
# D3D style compact colour
VET_COLOUR_ARGB = 10;
# GL style compact colour
VET_COLOUR_ABGR = 11;
VET_DOUBLE1 = 12;
VET_DOUBLE2 = 13;
VET_DOUBLE3 = 14;
VET_DOUBLE4 = 15;
VET_USHORT1 = 16;
VET_USHORT2 = 17;
VET_USHORT3 = 18;
VET_USHORT4 = 19;
VET_INT1 = 20;
VET_INT2 = 21;
VET_INT3 = 22;
VET_INT4 = 23;
VET_UINT1 = 24;
VET_UINT2 = 25;
VET_UINT3 = 26;
VET_UINT4 = 27;
def toStr(vet):
if (vet==OgreVertexElementType.VET_FLOAT1):
return "VET_FLOAT1";
elif (vet==OgreVertexElementType.VET_FLOAT2):
return "VET_FLOAT2";
elif (vet==OgreVertexElementType.VET_FLOAT3):
return "VET_FLOAT3";
elif (vet==OgreVertexElementType.VET_FLOAT4):
return "VET_FLOAT4";
elif (vet==OgreVertexElementType.VET_COLOUR):
return "VET_COLOUR";
elif (vet==OgreVertexElementType.VET_SHORT1):
return "VET_SHORT1";
elif (vet==OgreVertexElementType.VET_SHORT2):
return "VET_SHORT2";
elif (vet==OgreVertexElementType.VET_SHORT3):
return "VET_SHORT3";
elif (vet==OgreVertexElementType.VET_SHORT4):
return "VET_SHORT4";
elif (vet==OgreVertexElementType.VET_USHORT1):
return "VET_USHORT1";
elif (vet==OgreVertexElementType.VET_USHORT2):
return "VET_USHORT2";
elif (vet==OgreVertexElementType.VET_USHORT3):
return "VET_USHORT3";
elif (vet==OgreVertexElementType.VET_USHORT4):
return "VET_USHORT4";
elif (vet==OgreVertexElementType.VET_UBYTE4):
return "VET_UBYTE4";
elif (vet==OgreVertexElementType.VET_COLOUR_ABGR):
return "VET_COLOUR_ABGR";
elif (vet==OgreVertexElementType.VET_COLOUR_ARGB):
return "VET_COLOUR_ARGB";
elif (vet==OgreVertexElementType.VET_DOUBLE1):
return "VET_COLOUR_DOUBLE1";
elif (vet==OgreVertexElementType.VET_DOUBLE2):
return "VET_COLOUR_DOUBLE2";
elif (vet==OgreVertexElementType.VET_DOUBLE3):
return "VET_COLOUR_DOUBLE3";
elif (vet==OgreVertexElementType.VET_DOUBLE4):
return "VET_COLOUR_DOUBLE4";
elif (vet==OgreVertexElementType.VET_INT1):
return "VET_COLOUR_INT1";
elif (vet==OgreVertexElementType.VET_INT2):
return "VET_COLOUR_INT2";
elif (vet==OgreVertexElementType.VET_INT3):
return "VET_COLOUR_INT3";
elif (vet==OgreVertexElementType.VET_INT4):
return "VET_COLOUR_INT4";
elif (vet==OgreVertexElementType.VET_UINT1):
return "VET_COLOUR_UINT1";
elif (vet==OgreVertexElementType.VET_UINT2):
return "VET_COLOUR_UINT2";
elif (vet==OgreVertexElementType.VET_UINT3):
return "VET_COLOUR_UINT3";
elif (vet==OgreVertexElementType.VET_UINT4):
return "VET_COLOUR_UINT4";
class OgreVertexElement:
def __init__(self, source, offset, theType, semantic, index):
assert(type(source) is int and type(source) is int and type(index) is int);
self._source = source;
self._offset = offset;
self._type = theType;
self._semantic = semantic;
self._index = index;
def getType(self):
return self._type;
@property
def semantic(self):
return self._semantic;
@property
def index(self):
return self._index;
@property
def offset(self):
return self._offset;
@property
def source(self):
return self._source;
def getTypeSize(t):
if (t==OgreVertexElementType.VET_COLOUR or \
t==OgreVertexElementType.VET_COLOUR_ABGR or \
t==OgreVertexElementType.VET_COLOUR_ARGB):
return 4;
elif (t==OgreVertexElementType.VET_FLOAT1):
return 4*1;
elif (t==OgreVertexElementType.VET_FLOAT2):
return 4*2;
elif (t==OgreVertexElementType.VET_FLOAT3):
return 4*3;
elif (t==OgreVertexElementType.VET_FLOAT4):
return 4*4;
elif (t==OgreVertexElementType.VET_DOUBLE1):
return 8*1;
elif (t==OgreVertexElementType.VET_DOUBLE2):
return 8*2;
elif (t==OgreVertexElementType.VET_DOUBLE3):
return 8*3;
elif (t==OgreVertexElementType.VET_DOUBLE4):
return 8*4;
elif (t==OgreVertexElementType.VET_SHORT1):
return 2*1;
elif (t==OgreVertexElementType.VET_SHORT2):
return 2*2;
elif (t==OgreVertexElementType.VET_SHORT3):
return 2*3;
elif (t==OgreVertexElementType.VET_SHORT4):
return 2*4;
elif (t==OgreVertexElementType.VET_USHORT1):
return 2*1;
elif (t==OgreVertexElementType.VET_USHORT2):
return 2*2;
elif (t==OgreVertexElementType.VET_USHORT3):
return 2*3;
elif (t==OgreVertexElementType.VET_USHORT4):
return 2*4;
elif (t==OgreVertexElementType.VET_INT1):
return 4*1;
elif (t==OgreVertexElementType.VET_INT2):
return 4*2;
elif (t==OgreVertexElementType.VET_INT3):
return 4*3;
elif (t==OgreVertexElementType.VET_INT4):
return 4*4;
elif (t==OgreVertexElementType.VET_UINT1):
return 4*1;
elif (t==OgreVertexElementType.VET_UINT2):
return 4*2;
elif (t==OgreVertexElementType.VET_UINT3):
return 4*3;
elif (t==OgreVertexElementType.VET_UINT4):
return 4*4;
elif (t==OgreVertexElementType.VET_UBYTE4):
return 4;
return 0;
def getTypeCount(t):
if (t==OgreVertexElementType.VET_COLOUR or \
t==OgreVertexElementType.VET_COLOUR_ABGR or \
t==OgreVertexElementType.VET_COLOUR_ARGB or \
t==OgreVertexElementType.VET_FLOAT1 or \
t==OgreVertexElementType.VET_DOUBLE1 or \
t==OgreVertexElementType.VET_SHORT1 or \
t==OgreVertexElementType.VET_USHORT1 or \
t==OgreVertexElementType.VET_INT1 or \
t==OgreVertexElementType.VET_UINT1):
return 1;
elif (t==OgreVertexElementType.VET_FLOAT2 or \
t==OgreVertexElementType.VET_DOUBLE2 or \
t==OgreVertexElementType.VET_SHORT2 or \
t==OgreVertexElementType.VET_USHORT2 or \
t==OgreVertexElementType.VET_INT2 or \
t==OgreVertexElementType.VET_UINT2):
return 2;
elif (t==OgreVertexElementType.VET_FLOAT3 or \
t==OgreVertexElementType.VET_DOUBLE3 or \
t==OgreVertexElementType.VET_SHORT3 or \
t==OgreVertexElementType.VET_USHORT3 or \
t==OgreVertexElementType.VET_INT3 or \
t==OgreVertexElementType.VET_UINT3):
return 3;
elif (t==OgreVertexElementType.VET_FLOAT4 or \
t==OgreVertexElementType.VET_DOUBLE4 or \
t==OgreVertexElementType.VET_SHORT4 or \
t==OgreVertexElementType.VET_USHORT4 or \
t==OgreVertexElementType.VET_INT4 or \
t==OgreVertexElementType.VET_UINT4):
return 4;
raise ValueError("OgreVertexElement.getTypeCount(type): Invalid type");
def getTypePythonUnpackStr(t):
if (t==OgreVertexElementType.VET_COLOUR or \
t==OgreVertexElementType.VET_COLOUR_ABGR or \
t==OgreVertexElementType.VET_COLOUR_ARGB):
raise ValueError("OgreVertexElement.getTypePythonUnpackStr(type): Color unsupported yet");
elif (t==OgreVertexElementType.VET_FLOAT1 or \
t==OgreVertexElementType.VET_FLOAT2 or \
t==OgreVertexElementType.VET_FLOAT3 or \
t==OgreVertexElementType.VET_FLOAT4):
return 'f' * OgreVertexElement.getTypeCount(t);
elif (t==OgreVertexElementType.VET_DOUBLE1 or \
t==OgreVertexElementType.VET_DOUBLE2 or \
t==OgreVertexElementType.VET_DOUBLE3 or \
t==OgreVertexElementType.VET_DOUBLE4):
return 'd' * OgreVertexElement.getTypeCount(t);
elif (t==OgreVertexElementType.VET_SHORT1 or \
t==OgreVertexElementType.VET_SHORT2 or \
t==OgreVertexElementType.VET_SHORT3 or \
t==OgreVertexElementType.VET_SHORT4):
return 'h' * OgreVertexElement.getTypeCount(t);
elif (t==OgreVertexElementType.VET_USHORT1 or \
t==OgreVertexElementType.VET_USHORT2 or \
t==OgreVertexElementType.VET_USHORT3 or \
t==OgreVertexElementType.VET_USHORT4):
return 'H' * OgreVertexElement.getTypeCount(t);
elif (t==OgreVertexElementType.VET_INT1 or \
t==OgreVertexElementType.VET_INT2 or \
t==OgreVertexElementType.VET_INT3 or \
t==OgreVertexElementType.VET_INT4):
return 'i' * OgreVertexElement.getTypeCount(t);
elif (t==OgreVertexElementType.VET_UINT1 or \
t==OgreVertexElementType.VET_UINT2 or \
t==OgreVertexElementType.VET_UINT3 or \
t==OgreVertexElementType.VET_UINT4):
return 'I' * OgreVertexElement.getTypeCount(t);
raise ValueError("OgreVertexElement.getTypePythonUnpackStr(type): Invalid type");
def getBestCoulourVertexElementType():
#Blender use opengl
return OgreVertexElementType.VET_COLOUR_ABGR;
def __eq__(self, other):
if (self._source == other._source and \
self._index == other._index and \
self._offet == other._offset and \
self._semantic == other._semantic and \
self._type == other._type):
return True;
else:
return False;
def getSize(self):
return OgreVertexElement.getTypeSize(self._type);
def extractFromBuffer(self, vertexBufferBinding, dest, endianess):
buf = vertexBufferBinding.getBuffer(self.source);
cmd = "";
#FIXME: endianess not working...
#if (endianess.value == 'big'):
# cmd = '<';
#elif (endianess.value == 'little'):
# cmd = '>';
#else :
# cmd = endianess;
#assert(cmd == '<' or cmd == '>');
cmd = "="
cmd = cmd + OgreVertexElement.getTypePythonUnpackStr(self.getType());
print(cmd);
data = buf.data[self.offset:]
for i in range(buf.numVertices):
v = unpack_from(cmd, data, i * buf.vertexSize);
dest.append(v);
class OgreVertexDeclaration:
def __init__(self):
self._elementList = [];
def getElements(self):
return self._elementList;
def addElement(self, source, offset, theType, semantic, index):
if (theType == OgreVertexElementType.VET_COLOUR):
theType = OgreVertexElement.getBestCoulourVertexElementType();
self._elementList.append(OgreVertexElement(source,offset,theType,semantic,index));
return self._elementList[-1];
def insertElement(self, atPosition, source, offset, theType, semantic, index):
if (atPosition >= len(_elementList)):
return self.addElement(source,offset,theType,semantic,index);
_elementList.insert(atPosition,OgreVertexElement(source,offset,theType,semantic,index));
return _elementList[-1];
def getElement(self, index):
return self._elementList[index];
def removeElement(self, index):
del self._elementList[index];
def removeElementWithSemantic(self, semantic, index):
for i in range(self._elementList):
if (self._elementList[i].semantic == semantic and self._elementList[i].index == index):
del self._elementList[i];
break;
def removeAllElements(self):
self._elementList = [];
def findElementBySemantic(self, sem, index):
for e in self._elementList:
if (e.semantic == sem and e.index == index):
return e;
return None;
def findElementsBySemantic(self,sem):
elements = []
for e in self._elementList:
if (e.semantic == sem):
elements.append(e);
return elements;
def findElementBySource(self,source):
return [e for e in self._elementList if e.source == source];
def getVertexSize(self, source):
sz = 0;
for e in self._elementList:
if (e.source == source):
sz += e.getSize();
return sz;
def vertexElementLess(e1, e2):
if (e1.source < e2.source):
return True;
elif (e1.source == e2.source):
if (e1.semantic < e2.semantic):
return True;
elif (e1.semantic == e2.semantic):
if (e1.index < e2.index):
return True;
return False;
def sort(self):
self._elementList.sort(cmp=OgreVertexDeclaration.vertexElementLess);
def closeGapInSource(self):
if (not self._elementList):
return;
self.sort();
raise NotImplementedError;
class OgreVertexBufferBinding:
def __init__(self):
self._bindingMap = {};
def setBinding(self, index, vbuffer):
self._bindingMap[str(index)]=vbuffer;
def getBuffer(self, source):
return self._bindingMap[str(source)];
def unsetAllBindings(self):
self._bindingMap = {};
| true
| true
|
1c475e3625b49e36e394562fd00fe1877c86b2a5
| 4,692
|
py
|
Python
|
env/Lib/site-packages/sqlalchemy/dialects/sqlite/pysqlcipher.py
|
aammjian/cotton
|
f72b814f795f79a4054688e465c8b0ae5560f3b7
|
[
"Apache-2.0"
] | 5,079
|
2015-01-01T03:39:46.000Z
|
2022-03-31T07:38:22.000Z
|
env/Lib/site-packages/sqlalchemy/dialects/sqlite/pysqlcipher.py
|
aammjian/cotton
|
f72b814f795f79a4054688e465c8b0ae5560f3b7
|
[
"Apache-2.0"
] | 1,623
|
2015-01-01T08:06:24.000Z
|
2022-03-30T19:48:52.000Z
|
env/Lib/site-packages/sqlalchemy/dialects/sqlite/pysqlcipher.py
|
aammjian/cotton
|
f72b814f795f79a4054688e465c8b0ae5560f3b7
|
[
"Apache-2.0"
] | 2,033
|
2015-01-04T07:18:02.000Z
|
2022-03-28T19:55:47.000Z
|
# sqlite/pysqlcipher.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: sqlite+pysqlcipher
:name: pysqlcipher
:dbapi: pysqlcipher
:connectstring: sqlite+pysqlcipher://:passphrase/file_path[?kdf_iter=<iter>]
:url: https://pypi.python.org/pypi/pysqlcipher
``pysqlcipher`` is a fork of the standard ``pysqlite`` driver to make
use of the `SQLCipher <https://www.zetetic.net/sqlcipher>`_ backend.
``pysqlcipher3`` is a fork of ``pysqlcipher`` for Python 3. This dialect
will attempt to import it if ``pysqlcipher`` is non-present.
.. versionadded:: 1.1.4 - added fallback import for pysqlcipher3
.. versionadded:: 0.9.9 - added pysqlcipher dialect
Driver
------
The driver here is the
`pysqlcipher <https://pypi.python.org/pypi/pysqlcipher>`_
driver, which makes use of the SQLCipher engine. This system essentially
introduces new PRAGMA commands to SQLite which allows the setting of a
passphrase and other encryption parameters, allowing the database
file to be encrypted.
`pysqlcipher3` is a fork of `pysqlcipher` with support for Python 3,
the driver is the same.
Connect Strings
---------------
The format of the connect string is in every way the same as that
of the :mod:`~sqlalchemy.dialects.sqlite.pysqlite` driver, except that the
"password" field is now accepted, which should contain a passphrase::
e = create_engine('sqlite+pysqlcipher://:testing@/foo.db')
For an absolute file path, two leading slashes should be used for the
database name::
e = create_engine('sqlite+pysqlcipher://:testing@//path/to/foo.db')
A selection of additional encryption-related pragmas supported by SQLCipher
as documented at https://www.zetetic.net/sqlcipher/sqlcipher-api/ can be passed
in the query string, and will result in that PRAGMA being called for each
new connection. Currently, ``cipher``, ``kdf_iter``
``cipher_page_size`` and ``cipher_use_hmac`` are supported::
e = create_engine('sqlite+pysqlcipher://:testing@/foo.db?cipher=aes-256-cfb&kdf_iter=64000')
Pooling Behavior
----------------
The driver makes a change to the default pool behavior of pysqlite
as described in :ref:`pysqlite_threading_pooling`. The pysqlcipher driver
has been observed to be significantly slower on connection than the
pysqlite driver, most likely due to the encryption overhead, so the
dialect here defaults to using the :class:`.SingletonThreadPool`
implementation,
instead of the :class:`.NullPool` pool used by pysqlite. As always, the pool
implementation is entirely configurable using the
:paramref:`_sa.create_engine.poolclass` parameter; the :class:`.StaticPool`
may
be more feasible for single-threaded use, or :class:`.NullPool` may be used
to prevent unencrypted connections from being held open for long periods of
time, at the expense of slower startup time for new connections.
""" # noqa
from __future__ import absolute_import
from .pysqlite import SQLiteDialect_pysqlite
from ... import pool
from ...engine import url as _url
class SQLiteDialect_pysqlcipher(SQLiteDialect_pysqlite):
driver = "pysqlcipher"
pragmas = ("kdf_iter", "cipher", "cipher_page_size", "cipher_use_hmac")
@classmethod
def dbapi(cls):
try:
from pysqlcipher import dbapi2 as sqlcipher
except ImportError as e:
try:
from pysqlcipher3 import dbapi2 as sqlcipher
except ImportError:
raise e
return sqlcipher
@classmethod
def get_pool_class(cls, url):
return pool.SingletonThreadPool
def connect(self, *cargs, **cparams):
passphrase = cparams.pop("passphrase", "")
pragmas = dict((key, cparams.pop(key, None)) for key in self.pragmas)
conn = super(SQLiteDialect_pysqlcipher, self).connect(
*cargs, **cparams
)
conn.execute('pragma key="%s"' % passphrase)
for prag, value in pragmas.items():
if value is not None:
conn.execute('pragma %s="%s"' % (prag, value))
return conn
def create_connect_args(self, url):
super_url = _url.URL(
url.drivername,
username=url.username,
host=url.host,
database=url.database,
query=url.query,
)
c_args, opts = super(
SQLiteDialect_pysqlcipher, self
).create_connect_args(super_url)
opts["passphrase"] = url.password
return c_args, opts
dialect = SQLiteDialect_pysqlcipher
| 33.755396
| 96
| 0.702472
|
from __future__ import absolute_import
from .pysqlite import SQLiteDialect_pysqlite
from ... import pool
from ...engine import url as _url
class SQLiteDialect_pysqlcipher(SQLiteDialect_pysqlite):
driver = "pysqlcipher"
pragmas = ("kdf_iter", "cipher", "cipher_page_size", "cipher_use_hmac")
@classmethod
def dbapi(cls):
try:
from pysqlcipher import dbapi2 as sqlcipher
except ImportError as e:
try:
from pysqlcipher3 import dbapi2 as sqlcipher
except ImportError:
raise e
return sqlcipher
@classmethod
def get_pool_class(cls, url):
return pool.SingletonThreadPool
def connect(self, *cargs, **cparams):
passphrase = cparams.pop("passphrase", "")
pragmas = dict((key, cparams.pop(key, None)) for key in self.pragmas)
conn = super(SQLiteDialect_pysqlcipher, self).connect(
*cargs, **cparams
)
conn.execute('pragma key="%s"' % passphrase)
for prag, value in pragmas.items():
if value is not None:
conn.execute('pragma %s="%s"' % (prag, value))
return conn
def create_connect_args(self, url):
super_url = _url.URL(
url.drivername,
username=url.username,
host=url.host,
database=url.database,
query=url.query,
)
c_args, opts = super(
SQLiteDialect_pysqlcipher, self
).create_connect_args(super_url)
opts["passphrase"] = url.password
return c_args, opts
dialect = SQLiteDialect_pysqlcipher
| true
| true
|
1c475e7b96a4c7661d55f944dc305ea0b892c612
| 2,727
|
py
|
Python
|
facerec_py/facerec/svm.py
|
idf/FaceReader
|
d649bf7ca7f9cf66ac99e81a5187cfcc2b54f49d
|
[
"MIT"
] | 7
|
2015-04-17T02:12:32.000Z
|
2018-08-08T01:29:24.000Z
|
facerec_py/facerec/svm.py
|
idf/FaceReader
|
d649bf7ca7f9cf66ac99e81a5187cfcc2b54f49d
|
[
"MIT"
] | null | null | null |
facerec_py/facerec/svm.py
|
idf/FaceReader
|
d649bf7ca7f9cf66ac99e81a5187cfcc2b54f49d
|
[
"MIT"
] | 4
|
2017-08-26T11:44:20.000Z
|
2021-06-13T11:50:11.000Z
|
from facerec_py.facerec.classifier import SVM
from facerec_py.facerec.validation import KFoldCrossValidation
from facerec_py.facerec.model import PredictableModel
from svmutil import *
from itertools import product
import numpy as np
import logging
def range_f(begin, end, step):
seq = []
while True:
if step == 0: break
if step > 0 and begin > end: break
if step < 0 and begin < end: break
seq.append(begin)
begin = begin + step
return seq
def grid(grid_parameters):
grid = []
for parameter in grid_parameters:
begin, end, step = parameter
grid.append(range_f(begin, end, step))
return product(*grid)
def grid_search(model, X, y, C_range=(-5, 15, 2), gamma_range=(3, -15, -2), k=5, num_cores=1):
if not isinstance(model, PredictableModel):
raise TypeError("GridSearch expects a PredictableModel. If you want to perform optimization on raw data use facerec.feature.Identity to pass unpreprocessed data!")
if not isinstance(model.classifier, SVM):
raise TypeError("GridSearch expects a SVM as classifier. Please use a facerec.classifier.SVM!")
logger = logging.getLogger("facerec.svm.gridsearch")
logger.info("Performing a Grid Search.")
# best parameter combination to return
best_parameter = svm_parameter("-q")
best_parameter.kernel_type = model.classifier.param.kernel_type
best_parameter.nu = model.classifier.param.nu
best_parameter.coef0 = model.classifier.param.coef0
# either no gamma given or kernel is linear (only C to optimize)
if (gamma_range is None) or (model.classifier.param.kernel_type == LINEAR):
gamma_range = (0, 0, 1)
# best validation error so far
best_accuracy = np.finfo('float').min
# create grid (cartesian product of ranges)
g = grid([C_range, gamma_range])
results = []
for p in g:
C, gamma = p
C, gamma = 2**C, 2**gamma
model.classifier.param.C, model.classifier.param.gamma = C, gamma
# perform a k-fold cross validation
cv = KFoldCrossValidation(model=model,k=k)
cv.validate(X,y)
# append parameter into list with accuracies for all parameter combinations
results.append([C, gamma, cv.accuracy])
# store best parameter combination
if cv.accuracy > best_accuracy:
logger.info("best_accuracy=%s" % (cv.accuracy))
best_accuracy = cv.accuracy
best_parameter.C, best_parameter.gamma = C, gamma
logger.info("%d-CV Result = %.2f." % (k, cv.accuracy))
# set best parameter combination to best found
return best_parameter, results
| 35.881579
| 171
| 0.6641
|
from facerec_py.facerec.classifier import SVM
from facerec_py.facerec.validation import KFoldCrossValidation
from facerec_py.facerec.model import PredictableModel
from svmutil import *
from itertools import product
import numpy as np
import logging
def range_f(begin, end, step):
seq = []
while True:
if step == 0: break
if step > 0 and begin > end: break
if step < 0 and begin < end: break
seq.append(begin)
begin = begin + step
return seq
def grid(grid_parameters):
grid = []
for parameter in grid_parameters:
begin, end, step = parameter
grid.append(range_f(begin, end, step))
return product(*grid)
def grid_search(model, X, y, C_range=(-5, 15, 2), gamma_range=(3, -15, -2), k=5, num_cores=1):
if not isinstance(model, PredictableModel):
raise TypeError("GridSearch expects a PredictableModel. If you want to perform optimization on raw data use facerec.feature.Identity to pass unpreprocessed data!")
if not isinstance(model.classifier, SVM):
raise TypeError("GridSearch expects a SVM as classifier. Please use a facerec.classifier.SVM!")
logger = logging.getLogger("facerec.svm.gridsearch")
logger.info("Performing a Grid Search.")
best_parameter = svm_parameter("-q")
best_parameter.kernel_type = model.classifier.param.kernel_type
best_parameter.nu = model.classifier.param.nu
best_parameter.coef0 = model.classifier.param.coef0
if (gamma_range is None) or (model.classifier.param.kernel_type == LINEAR):
gamma_range = (0, 0, 1)
best_accuracy = np.finfo('float').min
g = grid([C_range, gamma_range])
results = []
for p in g:
C, gamma = p
C, gamma = 2**C, 2**gamma
model.classifier.param.C, model.classifier.param.gamma = C, gamma
cv = KFoldCrossValidation(model=model,k=k)
cv.validate(X,y)
results.append([C, gamma, cv.accuracy])
if cv.accuracy > best_accuracy:
logger.info("best_accuracy=%s" % (cv.accuracy))
best_accuracy = cv.accuracy
best_parameter.C, best_parameter.gamma = C, gamma
logger.info("%d-CV Result = %.2f." % (k, cv.accuracy))
return best_parameter, results
| true
| true
|
1c475ea363209a3a683098d4d7dce556761ceb57
| 7,113
|
py
|
Python
|
app/main.py
|
ri10073/tracardi-api
|
828bc0939b3915af4c32906c65769c5b5fd992c3
|
[
"MIT"
] | null | null | null |
app/main.py
|
ri10073/tracardi-api
|
828bc0939b3915af4c32906c65769c5b5fd992c3
|
[
"MIT"
] | null | null | null |
app/main.py
|
ri10073/tracardi-api
|
828bc0939b3915af4c32906c65769c5b5fd992c3
|
[
"MIT"
] | null | null | null |
import logging
import os
import asyncio
from time import time
import elasticsearch
from fastapi.middleware.cors import CORSMiddleware
from fastapi import FastAPI, Request, Depends
from starlette.staticfiles import StaticFiles
from app.api import token_endpoint, rule_endpoint, resource_endpoint, event_endpoint, \
profile_endpoint, flow_endpoint, generic_endpoint, project_endpoint, \
credentials_endpoint, segments_endpoint, \
tql_endpoint, health_endpoint, session_endpoint, instance_endpoint, plugins_endpoint, test_endpoint, \
settings_endpoint, \
purchases_endpoint, event_tag_endpoint, consent_type_endpoint
from app.api.auth.authentication import get_current_user
from app.api.graphql.profile import graphql_profiles
from app.api.scheduler import tasks_endpoint
from app.api.track import event_server_endpoint
from app.config import server
from app.setup.on_start import add_plugins, update_api_instance
from tracardi.config import tracardi
from tracardi.service.storage.elastic_client import ElasticClient
from app.setup.indices_setup import create_indices
from tracardi.service.storage.index import resources
logging.basicConfig(level=logging.ERROR)
logger = logging.getLogger('app.main')
logger.setLevel(tracardi.logging_level)
_local_dir = os.path.dirname(__file__)
tags_metadata = [
{
"name": "profile",
"description": "Manage profiles. Read more about core concepts of TRACARDI in documentation.",
"externalDocs": {
"description": "Profile external docs",
"url": "https://github/atompie/docs/en/docs",
},
},
{
"name": "resource",
"description": "Manage data resources. Read more about core concepts of TRACARDI in documentation.",
"externalDocs": {
"description": "Resource external docs",
"url": "https://github/atompie/docs/en/docs",
},
},
{
"name": "rule",
"description": "Manage flow rule triggers. Read more about core concepts of TRACARDI in documentation.",
"externalDocs": {
"description": "Rule external docs",
"url": "https://github/atompie/docs/en/docs",
},
},
{
"name": "flow",
"description": "Manage flows. Read more about core concepts of TRACARDI in documentation.",
"externalDocs": {
"description": "Flows external docs",
"url": "https://github/atompie/docs/en/docs",
},
},
{
"name": "event",
"description": "Manage events. Read more about core concepts of TRACARDI in documentation.",
"externalDocs": {
"description": "Events external docs",
"url": "https://github/atompie/docs/en/docs",
},
},
{
"name": "authorization",
"description": "OAuth authorization.",
},
{
"name": "tracker",
"description": "Read more about TRACARDI event server in documentation. http://localhost:8686/manual/en/site",
"externalDocs": {
"description": "External docs",
"url": "https://github/atompie/docs/en/docs",
},
}
]
application = FastAPI(
title="Tracardi Customer Data Platform Project",
description="TRACARDI open-source customer data platform offers you excellent control over your customer data with its broad set of features",
version="0.6.0",
openapi_tags=tags_metadata if server.expose_gui_api else None,
contact={
"name": "Risto Kowaczewski",
"url": "http://github.com/atompie/tracardi",
"email": "office@tracardi.com",
},
)
application.add_middleware(
CORSMiddleware,
allow_origins=['*'],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
application.mount("/tracker",
StaticFiles(
html=True,
directory=os.path.join(_local_dir, "tracker")),
name="tracker")
application.mount("/manual",
StaticFiles(
html=True,
directory=os.path.join(_local_dir, "../manual")),
name="manual")
application.include_router(event_server_endpoint.router)
application.include_router(tql_endpoint.router)
application.include_router(segments_endpoint.router)
application.include_router(credentials_endpoint.router)
application.include_router(project_endpoint.router)
application.include_router(resource_endpoint.router)
application.include_router(rule_endpoint.router)
application.include_router(flow_endpoint.router)
application.include_router(event_endpoint.router)
application.include_router(profile_endpoint.router)
application.include_router(token_endpoint.router)
application.include_router(generic_endpoint.router)
application.include_router(health_endpoint.router)
application.include_router(session_endpoint.router)
application.include_router(tasks_endpoint.router)
application.include_router(instance_endpoint.router)
application.include_router(plugins_endpoint.router)
application.include_router(test_endpoint.router)
application.include_router(settings_endpoint.router)
application.include_router(purchases_endpoint.router)
application.include_router(event_tag_endpoint.router)
application.include_router(consent_type_endpoint.router)
# GraphQL
application.include_router(graphql_profiles,
prefix="/graphql/profile",
# dependencies=[Depends(get_current_user)],
tags=["graphql"])
@application.on_event("startup")
async def app_starts():
while True:
try:
if server.reset_plugins is True:
es = ElasticClient.instance()
index = resources.resources['action']
if await es.exists_index(index.get_write_index()):
await es.remove_index(index.get_read_index())
await create_indices()
await update_api_instance()
if server.update_plugins_on_start_up is not False:
await add_plugins()
break
except elasticsearch.exceptions.ConnectionError:
await asyncio.sleep(5)
report_i_am_alive()
logger.info("START UP exits.")
@application.middleware("http")
async def add_process_time_header(request: Request, call_next):
start_time = time()
if server.make_slower_responses > 0:
await asyncio.sleep(server.make_slower_responses)
response = await call_next(request)
process_time = time() - start_time
response.headers["X-Process-Time"] = str(process_time)
return response
@application.on_event("shutdown")
async def app_shutdown():
elastic = ElasticClient.instance()
await elastic.close()
def report_i_am_alive():
async def heartbeat():
while True:
await asyncio.sleep(server.heartbeat_every)
await update_api_instance()
asyncio.create_task(heartbeat())
if __name__ == "__main__":
import uvicorn
uvicorn.run("app.main:application", host="0.0.0.0", port=8686, log_level="info")
| 34.529126
| 146
| 0.685505
|
import logging
import os
import asyncio
from time import time
import elasticsearch
from fastapi.middleware.cors import CORSMiddleware
from fastapi import FastAPI, Request, Depends
from starlette.staticfiles import StaticFiles
from app.api import token_endpoint, rule_endpoint, resource_endpoint, event_endpoint, \
profile_endpoint, flow_endpoint, generic_endpoint, project_endpoint, \
credentials_endpoint, segments_endpoint, \
tql_endpoint, health_endpoint, session_endpoint, instance_endpoint, plugins_endpoint, test_endpoint, \
settings_endpoint, \
purchases_endpoint, event_tag_endpoint, consent_type_endpoint
from app.api.auth.authentication import get_current_user
from app.api.graphql.profile import graphql_profiles
from app.api.scheduler import tasks_endpoint
from app.api.track import event_server_endpoint
from app.config import server
from app.setup.on_start import add_plugins, update_api_instance
from tracardi.config import tracardi
from tracardi.service.storage.elastic_client import ElasticClient
from app.setup.indices_setup import create_indices
from tracardi.service.storage.index import resources
logging.basicConfig(level=logging.ERROR)
logger = logging.getLogger('app.main')
logger.setLevel(tracardi.logging_level)
_local_dir = os.path.dirname(__file__)
tags_metadata = [
{
"name": "profile",
"description": "Manage profiles. Read more about core concepts of TRACARDI in documentation.",
"externalDocs": {
"description": "Profile external docs",
"url": "https://github/atompie/docs/en/docs",
},
},
{
"name": "resource",
"description": "Manage data resources. Read more about core concepts of TRACARDI in documentation.",
"externalDocs": {
"description": "Resource external docs",
"url": "https://github/atompie/docs/en/docs",
},
},
{
"name": "rule",
"description": "Manage flow rule triggers. Read more about core concepts of TRACARDI in documentation.",
"externalDocs": {
"description": "Rule external docs",
"url": "https://github/atompie/docs/en/docs",
},
},
{
"name": "flow",
"description": "Manage flows. Read more about core concepts of TRACARDI in documentation.",
"externalDocs": {
"description": "Flows external docs",
"url": "https://github/atompie/docs/en/docs",
},
},
{
"name": "event",
"description": "Manage events. Read more about core concepts of TRACARDI in documentation.",
"externalDocs": {
"description": "Events external docs",
"url": "https://github/atompie/docs/en/docs",
},
},
{
"name": "authorization",
"description": "OAuth authorization.",
},
{
"name": "tracker",
"description": "Read more about TRACARDI event server in documentation. http://localhost:8686/manual/en/site",
"externalDocs": {
"description": "External docs",
"url": "https://github/atompie/docs/en/docs",
},
}
]
application = FastAPI(
title="Tracardi Customer Data Platform Project",
description="TRACARDI open-source customer data platform offers you excellent control over your customer data with its broad set of features",
version="0.6.0",
openapi_tags=tags_metadata if server.expose_gui_api else None,
contact={
"name": "Risto Kowaczewski",
"url": "http://github.com/atompie/tracardi",
"email": "office@tracardi.com",
},
)
application.add_middleware(
CORSMiddleware,
allow_origins=['*'],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
application.mount("/tracker",
StaticFiles(
html=True,
directory=os.path.join(_local_dir, "tracker")),
name="tracker")
application.mount("/manual",
StaticFiles(
html=True,
directory=os.path.join(_local_dir, "../manual")),
name="manual")
application.include_router(event_server_endpoint.router)
application.include_router(tql_endpoint.router)
application.include_router(segments_endpoint.router)
application.include_router(credentials_endpoint.router)
application.include_router(project_endpoint.router)
application.include_router(resource_endpoint.router)
application.include_router(rule_endpoint.router)
application.include_router(flow_endpoint.router)
application.include_router(event_endpoint.router)
application.include_router(profile_endpoint.router)
application.include_router(token_endpoint.router)
application.include_router(generic_endpoint.router)
application.include_router(health_endpoint.router)
application.include_router(session_endpoint.router)
application.include_router(tasks_endpoint.router)
application.include_router(instance_endpoint.router)
application.include_router(plugins_endpoint.router)
application.include_router(test_endpoint.router)
application.include_router(settings_endpoint.router)
application.include_router(purchases_endpoint.router)
application.include_router(event_tag_endpoint.router)
application.include_router(consent_type_endpoint.router)
application.include_router(graphql_profiles,
prefix="/graphql/profile",
tags=["graphql"])
@application.on_event("startup")
async def app_starts():
while True:
try:
if server.reset_plugins is True:
es = ElasticClient.instance()
index = resources.resources['action']
if await es.exists_index(index.get_write_index()):
await es.remove_index(index.get_read_index())
await create_indices()
await update_api_instance()
if server.update_plugins_on_start_up is not False:
await add_plugins()
break
except elasticsearch.exceptions.ConnectionError:
await asyncio.sleep(5)
report_i_am_alive()
logger.info("START UP exits.")
@application.middleware("http")
async def add_process_time_header(request: Request, call_next):
start_time = time()
if server.make_slower_responses > 0:
await asyncio.sleep(server.make_slower_responses)
response = await call_next(request)
process_time = time() - start_time
response.headers["X-Process-Time"] = str(process_time)
return response
@application.on_event("shutdown")
async def app_shutdown():
elastic = ElasticClient.instance()
await elastic.close()
def report_i_am_alive():
async def heartbeat():
while True:
await asyncio.sleep(server.heartbeat_every)
await update_api_instance()
asyncio.create_task(heartbeat())
if __name__ == "__main__":
import uvicorn
uvicorn.run("app.main:application", host="0.0.0.0", port=8686, log_level="info")
| true
| true
|
1c475ed89de55cb2f813d13f5130ed38d968d27a
| 3,572
|
py
|
Python
|
bindings/python/ensmallen/datasets/string/sulfurospirillumhalorespiransdsm13726.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 5
|
2021-02-17T00:44:45.000Z
|
2021-08-09T16:41:47.000Z
|
bindings/python/ensmallen/datasets/string/sulfurospirillumhalorespiransdsm13726.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 18
|
2021-01-07T16:47:39.000Z
|
2021-08-12T21:51:32.000Z
|
bindings/python/ensmallen/datasets/string/sulfurospirillumhalorespiransdsm13726.py
|
AnacletoLAB/ensmallen
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 3
|
2021-01-14T02:20:59.000Z
|
2021-08-04T19:09:52.000Z
|
"""
This file offers the methods to automatically retrieve the graph Sulfurospirillum halorespirans DSM 13726.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def SulfurospirillumHalorespiransDsm13726(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Sulfurospirillum halorespirans DSM 13726 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Sulfurospirillum halorespirans DSM 13726 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="SulfurospirillumHalorespiransDsm13726",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 34.019048
| 223
| 0.68505
|
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph
def SulfurospirillumHalorespiransDsm13726(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
return AutomaticallyRetrievedGraph(
graph_name="SulfurospirillumHalorespiransDsm13726",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| true
| true
|
1c475eea3e539ba4d1a9a72d6264384d25b277e3
| 204
|
py
|
Python
|
book/recursion/base_conversion.py
|
Web-Dev-Collaborative/algos
|
d280581d74ded382094283d931a202eb55fd8369
|
[
"CC0-1.0"
] | 153
|
2015-12-24T00:32:23.000Z
|
2022-02-24T06:00:29.000Z
|
book/recursion/base_conversion.py
|
Web-Dev-Collaborative/algos
|
d280581d74ded382094283d931a202eb55fd8369
|
[
"CC0-1.0"
] | 78
|
2015-11-17T11:46:15.000Z
|
2021-06-28T18:37:58.000Z
|
book/recursion/base_conversion.py
|
rhivent/algo-books-python
|
c4fa29616ca9a8a15ba40fa12d21fd8f35096d40
|
[
"CC0-1.0"
] | 66
|
2015-11-02T03:38:02.000Z
|
2022-03-05T17:36:26.000Z
|
CHAR_FOR_INT = '0123456789abcdef'
def to_string(n, base):
if n < base:
return CHAR_FOR_INT[n]
return to_string(n // base, base) + CHAR_FOR_INT[n % base]
to_string(1453, 16) # => 5Ad
| 17
| 62
| 0.637255
|
CHAR_FOR_INT = '0123456789abcdef'
def to_string(n, base):
if n < base:
return CHAR_FOR_INT[n]
return to_string(n // base, base) + CHAR_FOR_INT[n % base]
to_string(1453, 16)
| true
| true
|
1c475efe695ee9d1a051a1330fe3636e05ac3b4c
| 579
|
py
|
Python
|
setup.py
|
tijko/shadow
|
8ba9a8c2de2be51fa4eb387a179dbc0ac4641575
|
[
"MIT"
] | null | null | null |
setup.py
|
tijko/shadow
|
8ba9a8c2de2be51fa4eb387a179dbc0ac4641575
|
[
"MIT"
] | null | null | null |
setup.py
|
tijko/shadow
|
8ba9a8c2de2be51fa4eb387a179dbc0ac4641575
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup, Extension, find_packages
except ImportError:
from distutils.core import setup, Extension
setup(
name = 'shadow',
version = '0.0.1',
author='Tim Konick',
author_email='konick781@gmail.com',
url='',
description='Provides auxillary data on processes',
long_description=open('README.md').read(),
license=open('LICENSE').read(),
packages=['shadow', 'shadow.taskstats'],
ext_modules=[Extension('libshadow', sources=['shadow/libshadow/libshadow.c'])]
)
| 26.318182
| 82
| 0.670121
|
try:
from setuptools import setup, Extension, find_packages
except ImportError:
from distutils.core import setup, Extension
setup(
name = 'shadow',
version = '0.0.1',
author='Tim Konick',
author_email='konick781@gmail.com',
url='',
description='Provides auxillary data on processes',
long_description=open('README.md').read(),
license=open('LICENSE').read(),
packages=['shadow', 'shadow.taskstats'],
ext_modules=[Extension('libshadow', sources=['shadow/libshadow/libshadow.c'])]
)
| true
| true
|
1c475f7fce5478d597fc5b92d7692cf01e58b4c5
| 1,559
|
py
|
Python
|
thenewboston_node/business_logic/models/signed_change_request/base.py
|
nishp77/thenewboston-node
|
158b1f1739b2c6c9c21c80e9da854ca141f1cf8f
|
[
"MIT"
] | null | null | null |
thenewboston_node/business_logic/models/signed_change_request/base.py
|
nishp77/thenewboston-node
|
158b1f1739b2c6c9c21c80e9da854ca141f1cf8f
|
[
"MIT"
] | null | null | null |
thenewboston_node/business_logic/models/signed_change_request/base.py
|
nishp77/thenewboston-node
|
158b1f1739b2c6c9c21c80e9da854ca141f1cf8f
|
[
"MIT"
] | null | null | null |
import copy
import logging
from dataclasses import dataclass
from typing import ClassVar, Type, TypeVar
from thenewboston_node.business_logic.models.base import BaseDataclass
from thenewboston_node.core.logging import validates
from thenewboston_node.core.utils.cryptography import derive_public_key
from thenewboston_node.core.utils.dataclass import cover_docstring, revert_docstring
from thenewboston_node.core.utils.types import hexstr
from ..mixins.signable import SignableMixin
from ..signed_change_request_message import SignedChangeRequestMessage
T = TypeVar('T', bound='SignedChangeRequest')
logger = logging.getLogger(__name__)
@revert_docstring
@dataclass
@cover_docstring
class SignedChangeRequest(SignableMixin, BaseDataclass):
block_type: ClassVar[str]
message: SignedChangeRequestMessage
@classmethod
def create_from_signed_change_request_message(
cls: Type[T], message: SignedChangeRequestMessage, signing_key: hexstr
) -> T:
request = cls(signer=derive_public_key(signing_key), message=copy.deepcopy(message))
request.sign(signing_key)
return request
@validates('signed request')
def validate(self, blockchain, block_number: int):
self.validate_message()
with validates('block signature'):
self.validate_signature()
@validates('signed request message')
def validate_message(self):
self.message.validate()
def get_updated_account_states(self, blockchain):
raise NotImplementedError('Must be implemented in subclass')
| 32.479167
| 92
| 0.77678
|
import copy
import logging
from dataclasses import dataclass
from typing import ClassVar, Type, TypeVar
from thenewboston_node.business_logic.models.base import BaseDataclass
from thenewboston_node.core.logging import validates
from thenewboston_node.core.utils.cryptography import derive_public_key
from thenewboston_node.core.utils.dataclass import cover_docstring, revert_docstring
from thenewboston_node.core.utils.types import hexstr
from ..mixins.signable import SignableMixin
from ..signed_change_request_message import SignedChangeRequestMessage
T = TypeVar('T', bound='SignedChangeRequest')
logger = logging.getLogger(__name__)
@revert_docstring
@dataclass
@cover_docstring
class SignedChangeRequest(SignableMixin, BaseDataclass):
block_type: ClassVar[str]
message: SignedChangeRequestMessage
@classmethod
def create_from_signed_change_request_message(
cls: Type[T], message: SignedChangeRequestMessage, signing_key: hexstr
) -> T:
request = cls(signer=derive_public_key(signing_key), message=copy.deepcopy(message))
request.sign(signing_key)
return request
@validates('signed request')
def validate(self, blockchain, block_number: int):
self.validate_message()
with validates('block signature'):
self.validate_signature()
@validates('signed request message')
def validate_message(self):
self.message.validate()
def get_updated_account_states(self, blockchain):
raise NotImplementedError('Must be implemented in subclass')
| true
| true
|
1c475f9553b3a997c5e9fa81cedd6cc86997d3a6
| 4,621
|
py
|
Python
|
ProjectFiles/UMKCEntrepreneurialLegalServicesClinicDocuments/IntakeForm.py
|
KCLegalHackers/2016-Coding-For-Lawyers
|
0e7aeaf3b446defcfa60c862dfac5627cedd1560
|
[
"MIT"
] | 1
|
2021-01-15T00:34:54.000Z
|
2021-01-15T00:34:54.000Z
|
ProjectFiles/UMKCEntrepreneurialLegalServicesClinicDocuments/IntakeForm.py
|
KCLegalHackers/2016-Coding-For-Lawyers
|
0e7aeaf3b446defcfa60c862dfac5627cedd1560
|
[
"MIT"
] | null | null | null |
ProjectFiles/UMKCEntrepreneurialLegalServicesClinicDocuments/IntakeForm.py
|
KCLegalHackers/2016-Coding-For-Lawyers
|
0e7aeaf3b446defcfa60c862dfac5627cedd1560
|
[
"MIT"
] | null | null | null |
print('Application for Services: To be considered for acceptance as a client, you must complete this form and return it to the Entrepreneurial Legal Services Clinic. Acceptance as a client of the UMKC Entrepreneurial Legal Services Clinic is not guaranteed, and is ultimately based upon available of resources and time to provide services, absence of conflicts of interest, financial need of the client, and educational value for our students. What is your full name?')
clientName = input()
print('What is the date? (dd/mm/yyyy)')
date = input()
print('What is the name of the entity?')
companyName = input()
print('What is your mailing address')
clientAddress = input()
print('What city do you live in?')
clientCity = input()
print('What state do you live in?')
clientState = input()
print('What zip code do you live in?')
clientZip = input()
print('What is your telephone number?')
clientPhone = input()
print('What is your email address? By providing your email address you are giving the Entrepreneurial Legal Services Clinic express permission to contact you via email with matters regarding your business and to contact you regarding other information that may be of interest to you. If you do not want the Entrepreneurial Legal Services Clinic type N/A')
clientEmail = input()
print('Applicants for services are hereby notified that the University of Missouri-Kansas City and the Entrepreneurial Legal Services Clinic do not discriminate on the basis of race, color, creed, sex, sexual orientation, age, national origin, disability or Vietnam era veterans status in admission or access to, or treatment or employment in, its programs and activities. Financial Information (required for means testing): What is your total expected income for this year?')
expectedAnnualIncome = input()
print('What was your total expected income for last year?')
pastAnnualIncome = input()
print('How much available capital do you have to spend for your entity?')
availableCapital = input()
print('Are you currently employed?')
clientEmployment = input()
if str(clientEmployment) == 'yes': #I'm not sure if these next few lines are properly formatted
print('If so, where?')
employmentLocation = input()
else:
print('Demographic Information. This information is collected for demographic purposes only; it is anonymous and does not affect your acceptance as a client. What is your race?')
print('Demographic Information. This information is collected for demographic purposes only; it is anonymous and does not affect your acceptance as a client. What is your race?')
clientRace = input()
print('What is your gender?')
clientGender = input()
print('What is your marital status?')
clientMaritalStatus = input()
print('What is your highest level of education?')
clientEducation = input()
print('Required for conflicts check List any person or company, if any, who may have a claim against you or your business. If none, type n/a')
clientClaimants = input()
print('Are you currently a student at the University of Missouri at Kansas City or any other U- System campus?')
clientUMKCStudent = input()
print('Do you currently have or expect to have any contracts, employment, or other business relationship with the University of Missouri-Kansas City or any other campus, office or operation of the University of Missouri System?')
clientUMKCContracts = input()
print('Briefly state your legal question or problem/type of legal advice sought. If unsure, type n/a')
legalAdivceSought = input()
print('Please list any deadlines under which you are operating (court dates, etc. if any). If none, type n/a')
clientDeadlines = input()
print('I hereby state the above information is true to the best of my knowledge, and give permission to the Entrepreneurial Legal Services Clinic to check for potential conflicts of interests between myself and affiliates, and with current and former clients of the clinic, clients of firms at which students may be working, UMKC ,and the University of Missouri. I further confirm that I understand that work in the Entrepreneurial Legal Services Clinic is performed by law students under the supervision of licensed attorneys and therefore I may experience a delay due to the work being completed by said students. Type your signature in the following box to confirm that you are comfortable with the preceding obligations.')
clientSignature = input()
print('Type the date in the following box to confirm that you are comfortable with the preceding obligations')
clientDate = input()
# [Client Intake Form](http://www1.law.umkc.edu/clinics/els/application.pdf)
| 78.322034
| 726
| 0.781649
|
print('Application for Services: To be considered for acceptance as a client, you must complete this form and return it to the Entrepreneurial Legal Services Clinic. Acceptance as a client of the UMKC Entrepreneurial Legal Services Clinic is not guaranteed, and is ultimately based upon available of resources and time to provide services, absence of conflicts of interest, financial need of the client, and educational value for our students. What is your full name?')
clientName = input()
print('What is the date? (dd/mm/yyyy)')
date = input()
print('What is the name of the entity?')
companyName = input()
print('What is your mailing address')
clientAddress = input()
print('What city do you live in?')
clientCity = input()
print('What state do you live in?')
clientState = input()
print('What zip code do you live in?')
clientZip = input()
print('What is your telephone number?')
clientPhone = input()
print('What is your email address? By providing your email address you are giving the Entrepreneurial Legal Services Clinic express permission to contact you via email with matters regarding your business and to contact you regarding other information that may be of interest to you. If you do not want the Entrepreneurial Legal Services Clinic type N/A')
clientEmail = input()
print('Applicants for services are hereby notified that the University of Missouri-Kansas City and the Entrepreneurial Legal Services Clinic do not discriminate on the basis of race, color, creed, sex, sexual orientation, age, national origin, disability or Vietnam era veterans status in admission or access to, or treatment or employment in, its programs and activities. Financial Information (required for means testing): What is your total expected income for this year?')
expectedAnnualIncome = input()
print('What was your total expected income for last year?')
pastAnnualIncome = input()
print('How much available capital do you have to spend for your entity?')
availableCapital = input()
print('Are you currently employed?')
clientEmployment = input()
if str(clientEmployment) == 'yes':
print('If so, where?')
employmentLocation = input()
else:
print('Demographic Information. This information is collected for demographic purposes only; it is anonymous and does not affect your acceptance as a client. What is your race?')
print('Demographic Information. This information is collected for demographic purposes only; it is anonymous and does not affect your acceptance as a client. What is your race?')
clientRace = input()
print('What is your gender?')
clientGender = input()
print('What is your marital status?')
clientMaritalStatus = input()
print('What is your highest level of education?')
clientEducation = input()
print('Required for conflicts check List any person or company, if any, who may have a claim against you or your business. If none, type n/a')
clientClaimants = input()
print('Are you currently a student at the University of Missouri at Kansas City or any other U- System campus?')
clientUMKCStudent = input()
print('Do you currently have or expect to have any contracts, employment, or other business relationship with the University of Missouri-Kansas City or any other campus, office or operation of the University of Missouri System?')
clientUMKCContracts = input()
print('Briefly state your legal question or problem/type of legal advice sought. If unsure, type n/a')
legalAdivceSought = input()
print('Please list any deadlines under which you are operating (court dates, etc. if any). If none, type n/a')
clientDeadlines = input()
print('I hereby state the above information is true to the best of my knowledge, and give permission to the Entrepreneurial Legal Services Clinic to check for potential conflicts of interests between myself and affiliates, and with current and former clients of the clinic, clients of firms at which students may be working, UMKC ,and the University of Missouri. I further confirm that I understand that work in the Entrepreneurial Legal Services Clinic is performed by law students under the supervision of licensed attorneys and therefore I may experience a delay due to the work being completed by said students. Type your signature in the following box to confirm that you are comfortable with the preceding obligations.')
clientSignature = input()
print('Type the date in the following box to confirm that you are comfortable with the preceding obligations')
clientDate = input()
# [Client Intake Form](http://www1.law.umkc.edu/clinics/els/application.pdf)
| true
| true
|
1c475fd0731889687d14b2130b367eb0ec6cbbcf
| 1,749
|
py
|
Python
|
setup.py
|
gaussian/django-sql-explorer
|
844c8f59f8a3de31ef445e18356e97afded50dfc
|
[
"MIT"
] | null | null | null |
setup.py
|
gaussian/django-sql-explorer
|
844c8f59f8a3de31ef445e18356e97afded50dfc
|
[
"MIT"
] | null | null | null |
setup.py
|
gaussian/django-sql-explorer
|
844c8f59f8a3de31ef445e18356e97afded50dfc
|
[
"MIT"
] | null | null | null |
import os
from setuptools import setup
from explorer import __version__
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="django-sql-explorer",
version=__version__,
author="Chris Clark",
author_email="chris@untrod.com",
description=("A pluggable app that allows users (admins) to execute SQL,"
" view, and export the results."),
license="MIT",
keywords="django sql explorer reports reporting csv database query",
url="https://github.com/groveco/django-sql-explorer",
packages=['explorer'],
long_description=read('README.rst'),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Utilities',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.0',
'Framework :: Django :: 2.1',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
install_requires=[
'Django>=2.2.14',
'sqlparse>=0.1.18',
'unicodecsv>=0.14.1',
'six>=1.10.0',
],
include_package_data=True,
zip_safe=False,
)
| 33.634615
| 79
| 0.612922
|
import os
from setuptools import setup
from explorer import __version__
# README file and 2) it's easier to type in the README file than to put a raw
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="django-sql-explorer",
version=__version__,
author="Chris Clark",
author_email="chris@untrod.com",
description=("A pluggable app that allows users (admins) to execute SQL,"
" view, and export the results."),
license="MIT",
keywords="django sql explorer reports reporting csv database query",
url="https://github.com/groveco/django-sql-explorer",
packages=['explorer'],
long_description=read('README.rst'),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Utilities',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.0',
'Framework :: Django :: 2.1',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
install_requires=[
'Django>=2.2.14',
'sqlparse>=0.1.18',
'unicodecsv>=0.14.1',
'six>=1.10.0',
],
include_package_data=True,
zip_safe=False,
)
| true
| true
|
1c4760d27cf1f4616f2f9ae082e15fd487249b5e
| 3,074
|
py
|
Python
|
tensorflow_privacy/privacy/privacy_tests/membership_inference_attack/keras_evaluation_test.py
|
andrewyguo/privacy
|
a33afde0c105ece6c48b17a80f13899cf3e7c1b3
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_privacy/privacy/privacy_tests/membership_inference_attack/keras_evaluation_test.py
|
andrewyguo/privacy
|
a33afde0c105ece6c48b17a80f13899cf3e7c1b3
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_privacy/privacy/privacy_tests/membership_inference_attack/keras_evaluation_test.py
|
andrewyguo/privacy
|
a33afde0c105ece6c48b17a80f13899cf3e7c1b3
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
import numpy as np
import tensorflow as tf
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack import keras_evaluation
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import AttackResults
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import AttackType
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import get_flattened_attack_metrics
class UtilsTest(absltest.TestCase):
def __init__(self, methodname):
"""Initialize the test class."""
super().__init__(methodname)
self.ntrain, self.ntest = 50, 100
self.nclass = 5
self.ndim = 10
# Generate random training and test data
self.train_data = np.random.rand(self.ntrain, self.ndim)
self.test_data = np.random.rand(self.ntest, self.ndim)
self.train_labels = np.random.randint(self.nclass, size=self.ntrain)
self.test_labels = np.random.randint(self.nclass, size=self.ntest)
self.model = tf.keras.Sequential([tf.keras.layers.Dense(self.nclass)])
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
self.model.compile(optimizer='Adam', loss=loss, metrics=['accuracy'])
def test_calculate_losses(self):
"""Test calculating the loss."""
pred, loss = keras_evaluation.calculate_losses(self.model, self.train_data,
self.train_labels)
self.assertEqual(pred.shape, (self.ntrain, self.nclass))
self.assertEqual(loss.shape, (self.ntrain,))
pred, loss = keras_evaluation.calculate_losses(self.model, self.test_data,
self.test_labels)
self.assertEqual(pred.shape, (self.ntest, self.nclass))
self.assertEqual(loss.shape, (self.ntest,))
def test_run_attack_on_keras_model(self):
"""Test the attack."""
results = keras_evaluation.run_attack_on_keras_model(
self.model, (self.train_data, self.train_labels),
(self.test_data, self.test_labels),
attack_types=[AttackType.THRESHOLD_ATTACK])
self.assertIsInstance(results, AttackResults)
att_types, att_slices, att_metrics, att_values = get_flattened_attack_metrics(
results)
self.assertLen(att_types, 2)
self.assertLen(att_slices, 2)
self.assertLen(att_metrics, 2)
self.assertLen(att_values, 2)
if __name__ == '__main__':
absltest.main()
| 41.540541
| 125
| 0.737801
|
from absl.testing import absltest
import numpy as np
import tensorflow as tf
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack import keras_evaluation
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import AttackResults
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import AttackType
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import get_flattened_attack_metrics
class UtilsTest(absltest.TestCase):
def __init__(self, methodname):
super().__init__(methodname)
self.ntrain, self.ntest = 50, 100
self.nclass = 5
self.ndim = 10
self.train_data = np.random.rand(self.ntrain, self.ndim)
self.test_data = np.random.rand(self.ntest, self.ndim)
self.train_labels = np.random.randint(self.nclass, size=self.ntrain)
self.test_labels = np.random.randint(self.nclass, size=self.ntest)
self.model = tf.keras.Sequential([tf.keras.layers.Dense(self.nclass)])
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
self.model.compile(optimizer='Adam', loss=loss, metrics=['accuracy'])
def test_calculate_losses(self):
pred, loss = keras_evaluation.calculate_losses(self.model, self.train_data,
self.train_labels)
self.assertEqual(pred.shape, (self.ntrain, self.nclass))
self.assertEqual(loss.shape, (self.ntrain,))
pred, loss = keras_evaluation.calculate_losses(self.model, self.test_data,
self.test_labels)
self.assertEqual(pred.shape, (self.ntest, self.nclass))
self.assertEqual(loss.shape, (self.ntest,))
def test_run_attack_on_keras_model(self):
results = keras_evaluation.run_attack_on_keras_model(
self.model, (self.train_data, self.train_labels),
(self.test_data, self.test_labels),
attack_types=[AttackType.THRESHOLD_ATTACK])
self.assertIsInstance(results, AttackResults)
att_types, att_slices, att_metrics, att_values = get_flattened_attack_metrics(
results)
self.assertLen(att_types, 2)
self.assertLen(att_slices, 2)
self.assertLen(att_metrics, 2)
self.assertLen(att_values, 2)
if __name__ == '__main__':
absltest.main()
| true
| true
|
1c47629a3fff6341d9f92bd348f85e77bc92bff9
| 282
|
py
|
Python
|
html_downloader.py
|
etworker/TinySpider
|
b3e3c67451d361d064d915875582341b84f0d49d
|
[
"MIT"
] | null | null | null |
html_downloader.py
|
etworker/TinySpider
|
b3e3c67451d361d064d915875582341b84f0d49d
|
[
"MIT"
] | null | null | null |
html_downloader.py
|
etworker/TinySpider
|
b3e3c67451d361d064d915875582341b84f0d49d
|
[
"MIT"
] | null | null | null |
__author__ = 'worker'
import urllib2
class HtmlDownloader(object):
def download(self, url):
if url is None:
return None
response = urllib2.urlopen(url)
if response.getcode() != 200:
return None
return response.read()
| 20.142857
| 39
| 0.588652
|
__author__ = 'worker'
import urllib2
class HtmlDownloader(object):
def download(self, url):
if url is None:
return None
response = urllib2.urlopen(url)
if response.getcode() != 200:
return None
return response.read()
| true
| true
|
1c4762e3f34e2ed7a22ada6411f795fe540463d8
| 18,315
|
py
|
Python
|
pfp/native/compat_io.py
|
krx/pfp-construct
|
248c43781e15ba6eb0a9a6c0982a40c0e380d9b6
|
[
"MIT"
] | null | null | null |
pfp/native/compat_io.py
|
krx/pfp-construct
|
248c43781e15ba6eb0a9a6c0982a40c0e380d9b6
|
[
"MIT"
] | null | null | null |
pfp/native/compat_io.py
|
krx/pfp-construct
|
248c43781e15ba6eb0a9a6c0982a40c0e380d9b6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
"""
This module of native functions is implemented for
compatability with 010 editor functions. Some of these functions
are nops, some are fully implemented.
"""
from pytest import skip
import six
import sys
from pfp.native import native
import pfp.interp
import pfp.errors as errors
import pfp.bitwrap as bitwrap
from .. import utils
import construct as C
# http://www.sweetscape.com/010editor/manual/FuncIO.htm
# void BigEndian()
@native(name="BigEndian", ret=None)
def BigEndian(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
pfp.interp.Endian.current = pfp.interp.Endian.BIG
# void BitfieldDisablePadding()
@native(name="BitfieldDisablePadding", ret=None, send_interp=True)
def BitfieldDisablePadding(params, ctxt, scope, stream, coord, interp):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
interp.set_bitfield_padded(False)
# void BitfieldEnablePadding()
@native(name="BitfieldEnablePadding", ret=None, send_interp=True)
def BitfieldEnablePadding(params, ctxt, scope, stream, coord, interp):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
interp.set_bitfield_padded(True)
# void BitfieldLeftToRight()
@native(name="BitfieldLeftToRight", ret=None, send_interp=True)
def BitfieldLeftToRight(params, ctxt, scope, stream, coord, interp):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
interp.set_bitfield_direction(interp.BITFIELD_DIR_LEFT_RIGHT)
# void BitfieldRightToLeft()
@native(name="BitfieldRightToLeft", ret=None, send_interp=True)
def BitfieldRightToLeft(params, ctxt, scope, stream, coord, interp):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
interp.set_bitfield_direction(interp.BITFIELD_DIR_RIGHT_LEFT)
# double ConvertBytesToDouble( uchar byteArray[] )
@native(name="ConvertBytesToDouble", ret=C.Double)
def ConvertBytesToDouble(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# float ConvertBytesToFloat( uchar byteArray[] )
@native(name="ConvertBytesToFloat", ret=C.Single)
def ConvertBytesToFloat(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# hfloat ConvertBytesToHFloat( uchar byteArray[] )
@native(name="ConvertBytesToHFloat", ret=C.Single)
def ConvertBytesToHFloat(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int ConvertDataToBytes( data_type value, uchar byteArray[] )
@native(name="ConvertDataToBytes", ret=C.Int)
def ConvertDataToBytes(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void DeleteBytes( int64 start, int64 size )
@native(name="DeleteBytes", ret=None)
def DeleteBytes(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int DirectoryExists( string dir )
@native(name="DirectoryExists", ret=C.Int)
def DirectoryExists(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int FEof()
@native(name="FEof", ret=bool)
def FEof(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
# now that streams are _ALL_ BitwrappedStreams, we can use BitwrappedStream-specific
# functions
return C.stream_iseof(ctxt._io)
# int64 FileSize()
@native(name="FileSize", ret=int)
def FileSize(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
return ctxt._io.size()
# TFileList FindFiles( string dir, string filter )
@native(name="FindFiles", ret=None)
def FindFiles(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int FPrintf( int fileNum, char format[], ... )
@native(name="FPrintf", ret=C.Int)
def FPrintf(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int FSeek( int64 pos )
@native(name="FSeek", ret=int)
def FSeek(params, ctxt, scope, stream, coord):
"""Returns 0 if successful or -1 if the address is out of range
"""
if len(params) != 1:
raise errors.InvalidArguments(
coord,
"{} args".format(len(params)),
"FSeek accepts only one argument",
)
pos = utils.evaluate(params[0], ctxt)
if pos > ctxt._io.size():
return -1
C.stream_seek(ctxt._io, pos, 0, "")
return 0
# curr_pos = stream.tell()
# fsize = stream.size()
# if pos > fsize:
# stream.seek(fsize)
# return -1
# elif pos < 0:
# stream.seek(0)
# return -1
# diff = pos - curr_pos
# if diff < 0:
# stream.seek(pos)
# return 0
# data = stream.read(diff)
# # let the ctxt automatically append numbers, as needed, unless the previous
# # child was also a skipped field
# skipped_name = "_skipped"
# if len(ctxt._pfp__children) > 0 and ctxt._pfp__children[
# -1
# ]._pfp__name.startswith("_skipped"):
# old_name = ctxt._pfp__children[-1]._pfp__name
# data = ctxt._pfp__children[-1].raw_data + data
# skipped_name = old_name
# ctxt._pfp__children = ctxt._pfp__children[:-1]
# del ctxt._pfp__children_map[old_name]
# tmp_stream = bitwrap.BitwrappedStream(six.BytesIO(data))
# new_field = pfp.fields.Array(len(data), C.Byte, tmp_stream)
# ctxt._pfp__add_child(skipped_name, new_field, stream)
# scope.add_var(skipped_name, new_field)
# return 0
# int FSkip( int64 offset )
@native(name="FSkip", ret=int)
def FSkip(params, ctxt, scope, stream, coord):
"""Returns 0 if successful or -1 if the address is out of range
"""
if len(params) != 1:
raise errors.InvalidArguments(
coord,
"{} args".format(len(params)),
"FSkip accepts only one argument",
)
skip_amt = params[0]
while callable(skip_amt):
skip_amt = skip_amt(ctxt)
return C.stream_seek(ctxt._io, skip_amt, whence=1, path="")
# pos = skip_amt + stream.tell()
# return FSeek([pos], ctxt, scope, stream, coord)
# int64 FTell()
@native(name="FTell", ret=int)
def FTell(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
# print()
return C.stream_tell(ctxt._io, None)
# void InsertBytes( int64 start, int64 size, uchar value=0 )
@native(name="InsertBytes", ret=None)
def InsertBytes(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int IsBigEndian()
@native(name="IsBigEndian", ret=bool)
def IsBigEndian(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
return pfp.interp.Endian.current == pfp.interp.Endian.BIG
# int IsLittleEndian()
@native(name="IsLittleEndian", ret=bool)
def IsLittleEndian(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
return pfp.interp.Endian.current == pfp.interp.Endian.LITTLE
# void LittleEndian()
@native(name="LittleEndian", ret=None)
def LittleEndian(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
pfp.interp.Endian.current = pfp.interp.Endian.LITTLE
# int MakeDir( string dir )
@native(name="MakeDir", ret=C.Int)
def MakeDir(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void OverwriteBytes( int64 start, int64 size, uchar value=0 )
@native(name="OverwriteBytes", ret=None)
def OverwriteBytes(params, ctxt, scope, stream, coord):
raise NotImplementedError()
def _read_data(params, ctxt, cls, coord):
stream = ctxt._io
bits = stream._bits
curr_pos = stream.tell()
if len(params) == 1:
pos = utils.evaluate(params[0], ctxt)
stream.seek(pos, 0)
elif len(params) > 1:
raise errors.InvalidArguments(
coord, "at most 1 arguments", "{} args".format(len(params))
)
# Make sure to use the right endianness
cls.fmtstr = pfp.interp.Endian.current + cls.fmtstr[1:]
res = cls.parse_stream(stream)
# reset the stream
stream.seek(curr_pos, 0)
stream._bits = bits
return res
# char ReadByte( int64 pos=FTell() )
@native(name="ReadByte", ret=int)
def ReadByte(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Int8sb, coord)
# double ReadDouble( int64 pos=FTell() )
@native(name="ReadDouble", ret=float)
def ReadDouble(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Double, coord)
# float ReadFloat( int64 pos=FTell() )
@native(name="ReadFloat", ret=float)
def ReadFloat(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Single, coord)
# hfloat ReadHFloat( int64 pos=FTell() )
@native(name="ReadHFloat", ret=float)
def ReadHFloat(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Single, coord)
# int ReadInt( int64 pos=FTell() )
@native(name="ReadInt", ret=int)
def ReadInt(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Int32sb, coord)
# int64 ReadInt64( int64 pos=FTell() )
@native(name="ReadInt64", ret=int)
def ReadInt64(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Int64sb, coord)
# int64 ReadQuad( int64 pos=FTell() )
@native(name="ReadQuad", ret=int)
def ReadQuad(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Int64sb, coord)
# short ReadShort( int64 pos=FTell() )
@native(name="ReadShort", ret=int)
def ReadShort(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Int16sb, coord)
# uchar ReadUByte( int64 pos=FTell() )
@native(name="ReadUByte", ret=int)
def ReadUByte(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Byte, coord)
# uint ReadUInt( int64 pos=FTell() )
@native(name="ReadUInt", ret=int)
def ReadUInt(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Int32ub, coord)
# uint64 ReadUInt64( int64 pos=FTell() )
@native(name="ReadUInt64", ret=int)
def ReadUInt64(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Int64ub, coord)
# uint64 ReadUQuad( int64 pos=FTell() )
@native(name="ReadUQuad", ret=int)
def ReadUQuad(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt,C.Int64ub, coord)
# ushort ReadUShort( int64 pos=FTell() )
@native(name="ReadUShort", ret=int)
def ReadUShort(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Int16ub, coord)
# char[] ReadLine( int64 pos, int maxLen=-1, int includeLinefeeds=true )
@native(name="ReadLine", ret=C.CString)
def ReadLine(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void ReadBytes( uchar buffer[], int64 pos, int n )
@native(name="ReadBytes", ret=None)
def ReadBytes(params, ctxt, scope, stream, coord):
if len(params) != 3:
raise errors.InvalidArguments(
coord,
"3 arguments (buffer, pos, n)",
"{} args".format(len(params)),
)
if not isinstance(params[0], C.Bytes):
raise errors.InvalidArguments(
coord, "buffer must be Bytes", params[0].__class__.__name__
)
if params[0].field_cls not in [pfp.fields.UChar, C.Byte]:
raise errors.InvalidArguments(
coord,
"buffer must be an array of uchar or char",
params[0].field_cls.__name__,
)
if not isinstance(params[1], C.IntBase):
raise errors.InvalidArguments(
coord, "pos must be an integer", params[1].__class__.__name__
)
if not isinstance(params[2], C.IntBase):
raise errors.InvalidArguments(
coord, "n must be an integer", params[2].__class__.__name__
)
bits = stream._bits
curr_pos = stream.tell()
vals = [
params[0].field_cls(stream) for x in six.moves.range(utils.evaluate(params[2], ctxt))
]
stream.seek(curr_pos, 0)
stream._bits = bits
params[0]._pfp__set_value(vals)
# char[] ReadString( int64 pos, int maxLen=-1 )
@native(name="ReadString", ret=C.CString)
def ReadString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int ReadStringLength( int64 pos, int maxLen=-1 )
@native(name="ReadStringLength", ret=C.Int)
def ReadStringLength(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# wstring ReadWLine( int64 pos, int maxLen=-1 )
@native(name="ReadWLine", ret=C.CString)
def ReadWLine(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# wstring ReadWString( int64 pos, int maxLen=-1 )
@native(name="ReadWString", ret=C.CString)
def ReadWString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int ReadWStringLength( int64 pos, int maxLen=-1 )
@native(name="ReadWStringLength", ret=C.Int)
def ReadWStringLength(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int64 TextAddressToLine( int64 address )
@native(name="TextAddressToLine", ret=C.Long)
def TextAddressToLine(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int TextAddressToColumn( int64 address )
@native(name="TextAddressToColumn", ret=C.Int)
def TextAddressToColumn(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int64 TextColumnToAddress( int64 line, int column )
@native(name="TextColumnToAddress", ret=C.Long)
def TextColumnToAddress(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int64 TextGetNumLines()
@native(name="TextGetNumLines", ret=C.Long)
def TextGetNumLines(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int TextGetLineSize( int64 line, int includeLinefeeds=true )
@native(name="TextGetLineSize", ret=C.Int)
def TextGetLineSize(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int64 TextLineToAddress( int64 line )
@native(name="TextLineToAddress", ret=C.Long)
def TextLineToAddress(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int TextReadLine( char buffer[], int64 line, int maxsize, int includeLinefeeds=true )
@native(name="TextReadLine", ret=C.Int)
def TextReadLine(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int TextReadLineW( wchar_t buffer[], int64 line, int maxsize, int includeLinefeeds=true )
@native(name="TextReadLineW", ret=C.Int)
def TextReadLineW(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void TextWriteLine( const char buffer[], int64 line, int includeLinefeeds=true )
@native(name="TextWriteLine", ret=None)
def TextWriteLine(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void TextWriteLineW( const wchar_t buffer[], int64 line, int includeLinefeeds=true )
@native(name="TextWriteLineW", ret=None)
def TextWriteLineW(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteByte( int64 pos, char value )
@native(name="WriteByte", ret=None)
def WriteByte(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteDouble( int64 pos, double value )
@native(name="WriteDouble", ret=None)
def WriteDouble(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteFloat( int64 pos, float value )
@native(name="WriteFloat", ret=None)
def WriteFloat(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteHFloat( int64 pos, float value )
@native(name="WriteHFloat", ret=None)
def WriteHFloat(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteInt( int64 pos, int value )
@native(name="WriteInt", ret=None)
def WriteInt(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteInt64( int64 pos, int64 value )
@native(name="WriteInt64", ret=None)
def WriteInt64(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteQuad( int64 pos, int64 value )
@native(name="WriteQuad", ret=None)
def WriteQuad(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteShort( int64 pos, short value )
@native(name="WriteShort", ret=None)
def WriteShort(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteUByte( int64 pos, uchar value )
@native(name="WriteUByte", ret=None)
def WriteUByte(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteUInt( int64 pos, uint value )
@native(name="WriteUInt", ret=None)
def WriteUInt(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteUInt64( int64 pos, uint64 value )
@native(name="WriteUInt64", ret=None)
def WriteUInt64(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteUQuad( int64 pos, uint64 value )
@native(name="WriteUQuad", ret=None)
def WriteUQuad(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteUShort( int64 pos, ushort value )
@native(name="WriteUShort", ret=None)
def WriteUShort(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteBytes( const uchar buffer[], int64 pos, int n )
@native(name="WriteBytes", ret=None)
def WriteBytes(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteString( int64 pos, const char value[] )
@native(name="WriteString", ret=None)
def WriteString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteWString( int64 pos, const wstring value )
@native(name="WriteWString", ret=None)
def WriteWString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
| 29.82899
| 93
| 0.690527
|
from pytest import skip
import six
import sys
from pfp.native import native
import pfp.interp
import pfp.errors as errors
import pfp.bitwrap as bitwrap
from .. import utils
import construct as C
@native(name="BigEndian", ret=None)
def BigEndian(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
pfp.interp.Endian.current = pfp.interp.Endian.BIG
@native(name="BitfieldDisablePadding", ret=None, send_interp=True)
def BitfieldDisablePadding(params, ctxt, scope, stream, coord, interp):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
interp.set_bitfield_padded(False)
@native(name="BitfieldEnablePadding", ret=None, send_interp=True)
def BitfieldEnablePadding(params, ctxt, scope, stream, coord, interp):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
interp.set_bitfield_padded(True)
@native(name="BitfieldLeftToRight", ret=None, send_interp=True)
def BitfieldLeftToRight(params, ctxt, scope, stream, coord, interp):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
interp.set_bitfield_direction(interp.BITFIELD_DIR_LEFT_RIGHT)
@native(name="BitfieldRightToLeft", ret=None, send_interp=True)
def BitfieldRightToLeft(params, ctxt, scope, stream, coord, interp):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
interp.set_bitfield_direction(interp.BITFIELD_DIR_RIGHT_LEFT)
@native(name="ConvertBytesToDouble", ret=C.Double)
def ConvertBytesToDouble(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="ConvertBytesToFloat", ret=C.Single)
def ConvertBytesToFloat(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="ConvertBytesToHFloat", ret=C.Single)
def ConvertBytesToHFloat(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="ConvertDataToBytes", ret=C.Int)
def ConvertDataToBytes(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="DeleteBytes", ret=None)
def DeleteBytes(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="DirectoryExists", ret=C.Int)
def DirectoryExists(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="FEof", ret=bool)
def FEof(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
return C.stream_iseof(ctxt._io)
@native(name="FileSize", ret=int)
def FileSize(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
return ctxt._io.size()
@native(name="FindFiles", ret=None)
def FindFiles(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="FPrintf", ret=C.Int)
def FPrintf(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="FSeek", ret=int)
def FSeek(params, ctxt, scope, stream, coord):
if len(params) != 1:
raise errors.InvalidArguments(
coord,
"{} args".format(len(params)),
"FSeek accepts only one argument",
)
pos = utils.evaluate(params[0], ctxt)
if pos > ctxt._io.size():
return -1
C.stream_seek(ctxt._io, pos, 0, "")
return 0
=int)
def FSkip(params, ctxt, scope, stream, coord):
if len(params) != 1:
raise errors.InvalidArguments(
coord,
"{} args".format(len(params)),
"FSkip accepts only one argument",
)
skip_amt = params[0]
while callable(skip_amt):
skip_amt = skip_amt(ctxt)
return C.stream_seek(ctxt._io, skip_amt, whence=1, path="")
@native(name="FTell", ret=int)
def FTell(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
return C.stream_tell(ctxt._io, None)
@native(name="InsertBytes", ret=None)
def InsertBytes(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="IsBigEndian", ret=bool)
def IsBigEndian(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
return pfp.interp.Endian.current == pfp.interp.Endian.BIG
@native(name="IsLittleEndian", ret=bool)
def IsLittleEndian(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
return pfp.interp.Endian.current == pfp.interp.Endian.LITTLE
@native(name="LittleEndian", ret=None)
def LittleEndian(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
pfp.interp.Endian.current = pfp.interp.Endian.LITTLE
@native(name="MakeDir", ret=C.Int)
def MakeDir(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="OverwriteBytes", ret=None)
def OverwriteBytes(params, ctxt, scope, stream, coord):
raise NotImplementedError()
def _read_data(params, ctxt, cls, coord):
stream = ctxt._io
bits = stream._bits
curr_pos = stream.tell()
if len(params) == 1:
pos = utils.evaluate(params[0], ctxt)
stream.seek(pos, 0)
elif len(params) > 1:
raise errors.InvalidArguments(
coord, "at most 1 arguments", "{} args".format(len(params))
)
cls.fmtstr = pfp.interp.Endian.current + cls.fmtstr[1:]
res = cls.parse_stream(stream)
stream.seek(curr_pos, 0)
stream._bits = bits
return res
@native(name="ReadByte", ret=int)
def ReadByte(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Int8sb, coord)
@native(name="ReadDouble", ret=float)
def ReadDouble(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Double, coord)
@native(name="ReadFloat", ret=float)
def ReadFloat(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Single, coord)
@native(name="ReadHFloat", ret=float)
def ReadHFloat(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Single, coord)
@native(name="ReadInt", ret=int)
def ReadInt(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Int32sb, coord)
@native(name="ReadInt64", ret=int)
def ReadInt64(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Int64sb, coord)
@native(name="ReadQuad", ret=int)
def ReadQuad(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Int64sb, coord)
@native(name="ReadShort", ret=int)
def ReadShort(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Int16sb, coord)
@native(name="ReadUByte", ret=int)
def ReadUByte(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Byte, coord)
@native(name="ReadUInt", ret=int)
def ReadUInt(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Int32ub, coord)
@native(name="ReadUInt64", ret=int)
def ReadUInt64(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Int64ub, coord)
@native(name="ReadUQuad", ret=int)
def ReadUQuad(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt,C.Int64ub, coord)
@native(name="ReadUShort", ret=int)
def ReadUShort(params, ctxt, scope, stream, coord):
return _read_data(params, ctxt, C.Int16ub, coord)
@native(name="ReadLine", ret=C.CString)
def ReadLine(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="ReadBytes", ret=None)
def ReadBytes(params, ctxt, scope, stream, coord):
if len(params) != 3:
raise errors.InvalidArguments(
coord,
"3 arguments (buffer, pos, n)",
"{} args".format(len(params)),
)
if not isinstance(params[0], C.Bytes):
raise errors.InvalidArguments(
coord, "buffer must be Bytes", params[0].__class__.__name__
)
if params[0].field_cls not in [pfp.fields.UChar, C.Byte]:
raise errors.InvalidArguments(
coord,
"buffer must be an array of uchar or char",
params[0].field_cls.__name__,
)
if not isinstance(params[1], C.IntBase):
raise errors.InvalidArguments(
coord, "pos must be an integer", params[1].__class__.__name__
)
if not isinstance(params[2], C.IntBase):
raise errors.InvalidArguments(
coord, "n must be an integer", params[2].__class__.__name__
)
bits = stream._bits
curr_pos = stream.tell()
vals = [
params[0].field_cls(stream) for x in six.moves.range(utils.evaluate(params[2], ctxt))
]
stream.seek(curr_pos, 0)
stream._bits = bits
params[0]._pfp__set_value(vals)
@native(name="ReadString", ret=C.CString)
def ReadString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="ReadStringLength", ret=C.Int)
def ReadStringLength(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="ReadWLine", ret=C.CString)
def ReadWLine(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="ReadWString", ret=C.CString)
def ReadWString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="ReadWStringLength", ret=C.Int)
def ReadWStringLength(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="TextAddressToLine", ret=C.Long)
def TextAddressToLine(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="TextAddressToColumn", ret=C.Int)
def TextAddressToColumn(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="TextColumnToAddress", ret=C.Long)
def TextColumnToAddress(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="TextGetNumLines", ret=C.Long)
def TextGetNumLines(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="TextGetLineSize", ret=C.Int)
def TextGetLineSize(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="TextLineToAddress", ret=C.Long)
def TextLineToAddress(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="TextReadLine", ret=C.Int)
def TextReadLine(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="TextReadLineW", ret=C.Int)
def TextReadLineW(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="TextWriteLine", ret=None)
def TextWriteLine(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="TextWriteLineW", ret=None)
def TextWriteLineW(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="WriteByte", ret=None)
def WriteByte(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="WriteDouble", ret=None)
def WriteDouble(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="WriteFloat", ret=None)
def WriteFloat(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="WriteHFloat", ret=None)
def WriteHFloat(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="WriteInt", ret=None)
def WriteInt(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="WriteInt64", ret=None)
def WriteInt64(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="WriteQuad", ret=None)
def WriteQuad(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="WriteShort", ret=None)
def WriteShort(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="WriteUByte", ret=None)
def WriteUByte(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="WriteUInt", ret=None)
def WriteUInt(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="WriteUInt64", ret=None)
def WriteUInt64(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="WriteUQuad", ret=None)
def WriteUQuad(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="WriteUShort", ret=None)
def WriteUShort(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="WriteBytes", ret=None)
def WriteBytes(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="WriteString", ret=None)
def WriteString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
@native(name="WriteWString", ret=None)
def WriteWString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
| true
| true
|
1c4763580d072403c8ca37e045aa564412f3085f
| 3,801
|
py
|
Python
|
train_utils.py
|
Jack407/TFCNs_source_code
|
f41466ad18457dd6335287112191e5daacf6d80d
|
[
"MIT"
] | null | null | null |
train_utils.py
|
Jack407/TFCNs_source_code
|
f41466ad18457dd6335287112191e5daacf6d80d
|
[
"MIT"
] | null | null | null |
train_utils.py
|
Jack407/TFCNs_source_code
|
f41466ad18457dd6335287112191e5daacf6d80d
|
[
"MIT"
] | null | null | null |
import argparse
import logging
import random
import sys
import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from tqdm import tqdm
from utils import one_hot_encoder
from loss import mixed_focal_loss
from loss import dice_loss as dl
from torchvision import transforms
import os
def train_starter(args, model, snapshot_path):
from preprocess import TFCNs_dataset, RandomGenerator
logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size * args.n_gpu
db_train = TFCNs_dataset(base_dir=args.root_path, list_dir=args.list_dir, split="train",
transform=transforms.Compose(
[RandomGenerator(output_size=[args.img_size, args.img_size])]))
print("The length of train set is: {}".format(len(db_train)))
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
trainloader = DataLoader(db_train, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True,
worker_init_fn=worker_init_fn)
if args.n_gpu > 1:
model = nn.DataParallel(model)
model.train()
optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9, weight_decay=0.0001)
writer = SummaryWriter(snapshot_path + '/log')
iter_num = 0
max_epoch = args.max_epochs
max_iterations = args.max_epochs * len(trainloader) # max_epoch = max_iterations // len(trainloader) + 1
logging.info("{} iterations per epoch. {} max iterations ".format(len(trainloader), max_iterations))
best_performance = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
image_batch, label_batch = sampled_batch['image'], sampled_batch['label']
image_batch, label_batch = image_batch.cuda(), label_batch.cuda()
outputs = model(image_batch)
label_batch = one_hot_encoder(label_batch,args.dataset,args.num_classes)
outputs = torch.softmax(outputs,dim=1)
loss = mixed_focal_loss(label_batch,outputs)
loss = torch.mean(loss,axis=0)
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
logging.info('iteration %d : loss : %f' % (iter_num, loss.item()))
save_interval = 50 # int(max_epoch/6)
if epoch_num > int(max_epoch / 2) and (epoch_num + 1) % save_interval == 0:
save_mode_path = os.path.join(snapshot_path, 'epoch_' + str(epoch_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if epoch_num >= max_epoch - 1:
save_mode_path = os.path.join(snapshot_path, 'epoch_' + str(epoch_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
iterator.close()
break
writer.close()
return "Training Finished!"
| 43.689655
| 109
| 0.660353
|
import argparse
import logging
import random
import sys
import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from tqdm import tqdm
from utils import one_hot_encoder
from loss import mixed_focal_loss
from loss import dice_loss as dl
from torchvision import transforms
import os
def train_starter(args, model, snapshot_path):
from preprocess import TFCNs_dataset, RandomGenerator
logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size * args.n_gpu
db_train = TFCNs_dataset(base_dir=args.root_path, list_dir=args.list_dir, split="train",
transform=transforms.Compose(
[RandomGenerator(output_size=[args.img_size, args.img_size])]))
print("The length of train set is: {}".format(len(db_train)))
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
trainloader = DataLoader(db_train, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True,
worker_init_fn=worker_init_fn)
if args.n_gpu > 1:
model = nn.DataParallel(model)
model.train()
optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9, weight_decay=0.0001)
writer = SummaryWriter(snapshot_path + '/log')
iter_num = 0
max_epoch = args.max_epochs
max_iterations = args.max_epochs * len(trainloader)
logging.info("{} iterations per epoch. {} max iterations ".format(len(trainloader), max_iterations))
best_performance = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
image_batch, label_batch = sampled_batch['image'], sampled_batch['label']
image_batch, label_batch = image_batch.cuda(), label_batch.cuda()
outputs = model(image_batch)
label_batch = one_hot_encoder(label_batch,args.dataset,args.num_classes)
outputs = torch.softmax(outputs,dim=1)
loss = mixed_focal_loss(label_batch,outputs)
loss = torch.mean(loss,axis=0)
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
logging.info('iteration %d : loss : %f' % (iter_num, loss.item()))
save_interval = 50
if epoch_num > int(max_epoch / 2) and (epoch_num + 1) % save_interval == 0:
save_mode_path = os.path.join(snapshot_path, 'epoch_' + str(epoch_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if epoch_num >= max_epoch - 1:
save_mode_path = os.path.join(snapshot_path, 'epoch_' + str(epoch_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
iterator.close()
break
writer.close()
return "Training Finished!"
| true
| true
|
1c4763d96158d165cbae23a7f534f6cbe67be1a2
| 78,654
|
py
|
Python
|
source/codegen/metadata/nifgen/functions.py
|
zhindes/grpc-device
|
616aa913963098b12d276693895b7eb946f82df4
|
[
"MIT"
] | null | null | null |
source/codegen/metadata/nifgen/functions.py
|
zhindes/grpc-device
|
616aa913963098b12d276693895b7eb946f82df4
|
[
"MIT"
] | 23
|
2021-04-16T06:22:40.000Z
|
2021-06-11T05:51:45.000Z
|
source/codegen/metadata/nifgen/functions.py
|
zhindes/grpc-device
|
616aa913963098b12d276693895b7eb946f82df4
|
[
"MIT"
] | 1
|
2021-10-30T09:23:49.000Z
|
2021-10-30T09:23:49.000Z
|
functions = {
'AbortGeneration':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'AdjustSampleClockRelativeDelay':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'adjustmentTime',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'AllocateNamedWaveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformSize',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'AllocateWaveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformSize',
'direction':'in',
'type':'ViInt32'
},
{
'name':'waveformHandle',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'CheckAttributeViBoolean':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViBoolean'
}
],
'returns':'ViStatus'
},
'CheckAttributeViInt32':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'CheckAttributeViInt64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViInt64'
}
],
'returns':'ViStatus'
},
'CheckAttributeViReal64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'CheckAttributeViSession':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'CheckAttributeViString':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'ClearArbMemory':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'ClearArbSequence':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'sequenceHandle',
'direction':'in',
'type':'ViInt32',
'enum':'SequenceHandle'
}
],
'returns':'ViStatus'
},
'ClearArbWaveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'waveformHandle',
'direction':'in',
'type':'ViInt32',
'enum':'WaveformHandle'
}
],
'returns':'ViStatus'
},
'ClearError':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'ClearFreqList':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'frequencyListHandle',
'direction':'in',
'type':'ViInt32',
'enum':'FrequencyListOptions'
}
],
'returns':'ViStatus'
},
'ClearInterchangeWarnings':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'ClearUserStandardWaveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'Close':{
'cname' : 'niFgen_close',
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'Commit':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'ConfigureAmplitude':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'amplitude',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ConfigureArbSequence':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'sequenceHandle',
'direction':'in',
'type':'ViInt32'
},
{
'name':'gain',
'direction':'in',
'type':'ViReal64'
},
{
'name':'offset',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ConfigureArbWaveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformHandle',
'direction':'in',
'type':'ViInt32'
},
{
'name':'gain',
'direction':'in',
'type':'ViReal64'
},
{
'name':'offset',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ConfigureChannels':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channels',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'ConfigureClockMode':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'clockMode',
'direction':'in',
'type':'ViInt32',
'enum':'ClockMode'
}
],
'returns':'ViStatus'
},
'ConfigureCustomFIRFilterCoefficients':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'numberOfCoefficients',
'direction':'in',
'type':'ViInt32'
},
{
'name':'coefficientsArray',
'direction':'in',
'type':'ViReal64[]',
'size':{
'mechanism':'len',
'value':'numberOfCoefficients'
}
}
],
'returns':'ViStatus'
},
'ConfigureDigitalEdgeScriptTrigger':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'triggerId',
'direction':'in',
'type':'ViConstString'
},
{
'name':'source',
'direction':'in',
'type':'ViConstString'
},
{
'name':'edge',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'ConfigureDigitalEdgeStartTrigger':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'source',
'direction':'in',
'type':'ViConstString'
},
{
'name':'edge',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'ConfigureDigitalLevelScriptTrigger':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'triggerId',
'direction':'in',
'type':'ViConstString'
},
{
'name':'source',
'direction':'in',
'type':'ViConstString'
},
{
'name':'triggerWhen',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'ConfigureFreqList':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'frequencyListHandle',
'direction':'in',
'type':'ViInt32'
},
{
'name':'amplitude',
'direction':'in',
'type':'ViReal64'
},
{
'name':'dcOffset',
'direction':'in',
'type':'ViReal64'
},
{
'name':'startPhase',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ConfigureFrequency':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'frequency',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ConfigureOperationMode':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'operationMode',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'ConfigureOutputEnabled':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'enabled',
'direction':'in',
'type':'ViBoolean'
}
],
'returns':'ViStatus'
},
'ConfigureOutputImpedance':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'impedance',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ConfigureOutputMode':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'outputMode',
'direction':'in',
'type':'ViInt32',
'enum':'OutputMode'
}
],
'returns':'ViStatus'
},
'ConfigureP2PEndpointFullnessStartTrigger':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'p2pEndpointFullnessLevel',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'ConfigureReferenceClock':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'referenceClockSource',
'direction':'in',
'type':'ViConstString'
},
{
'name':'referenceClockFrequency',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ConfigureSampleClockSource':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'sampleClockSource',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'ConfigureSampleRate':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'sampleRate',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ConfigureSoftwareEdgeScriptTrigger':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'triggerId',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'ConfigureSoftwareEdgeStartTrigger':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'ConfigureStandardWaveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveform',
'direction':'in',
'type':'ViInt32',
'enum':'Waveform'
},
{
'name':'amplitude',
'direction':'in',
'type':'ViReal64'
},
{
'name':'dcOffset',
'direction':'in',
'type':'ViReal64'
},
{
'name':'frequency',
'direction':'in',
'type':'ViReal64'
},
{
'name':'startPhase',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ConfigureSynchronization':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'synchronizationSource',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'ConfigureTriggerMode':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'triggerMode',
'direction':'in',
'type':'ViInt32',
'enum':'TriggerMode'
}
],
'returns':'ViStatus'
},
'CreateAdvancedArbSequence':{
'codegen_method': 'CustomCode',
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'sequenceLength',
'direction':'in',
'type':'ViInt32'
},
{
'name':'waveformHandlesArray',
'direction':'in',
'type':'ViInt32[]',
'size':{
'mechanism':'len',
'value':'sequenceLength'
}
},
{
'name':'loopCountsArray',
'direction':'in',
'type':'ViInt32[]',
'size':{
'mechanism':'len',
'value':'sequenceLength'
}
},
{
'name':'sampleCountsArray',
'direction':'in',
'type':'ViInt32[]',
'size':{
'mechanism':'len',
'value':'sequenceLength'
}
},
{
'name':'markerLocationArray',
'direction':'in',
'type':'ViInt32[]',
'size':{
'mechanism':'len',
'value':'sequenceLength'
}
},
{
'name':'coercedMarkersArray',
'direction':'out',
'type':'ViInt32[]',
'size':{
'mechanism':'custom-code',
'value':'sequenceLength'
}
},
{
'name':'sequenceHandle',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'CreateArbSequence':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'sequenceLength',
'direction':'in',
'type':'ViInt32'
},
{
'name':'waveformHandlesArray',
'direction':'in',
'type':'ViInt32[]',
'size':{
'mechanism':'len',
'value':'sequenceLength'
}
},
{
'name':'loopCountsArray',
'direction':'in',
'type':'ViInt32[]',
'size':{
'mechanism':'len',
'value':'sequenceLength'
}
},
{
'name':'sequenceHandle',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'CreateFreqList':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'waveform',
'direction':'in',
'type':'ViInt32',
'enum':'Waveform'
},
{
'name':'frequencyListLength',
'direction':'in',
'type':'ViInt32'
},
{
'name':'frequencyArray',
'direction':'in',
'type':'ViReal64[]',
'size':{
'mechanism':'len',
'value':'frequencyListLength'
}
},
{
'name':'durationArray',
'direction':'in',
'type':'ViReal64[]',
'size':{
'mechanism':'len',
'value':'frequencyListLength'
}
},
{
'name':'frequencyListHandle',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'CreateWaveformComplexF64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'numberOfSamples',
'direction':'in',
'type':'ViInt32'
},
{
'name':'waveformDataArray',
'direction':'in',
'type': 'struct NIComplexNumber_struct[]',
'grpc_type': 'repeated NIComplexNumber',
'size': {
'mechanism': 'len',
'value': 'numberOfSamples'
}
},
{
'name':'waveformHandle',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'CreateWaveformF64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformSize',
'direction':'in',
'type':'ViInt32'
},
{
'name':'waveformDataArray',
'direction':'in',
'type':'ViReal64[]',
'size':{
'mechanism':'len',
'value':'waveformSize'
}
},
{
'name':'waveformHandle',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'CreateWaveformFromFileF64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'fileName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'byteOrder',
'direction':'in',
'type':'ViInt32',
'enum':'ByteOrder'
},
{
'name':'waveformHandle',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'CreateWaveformFromFileHWS':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'fileName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'useRateFromWaveform',
'direction':'in',
'type':'ViBoolean'
},
{
'name':'useGainAndOffsetFromWaveform',
'direction':'in',
'type':'ViBoolean'
},
{
'name':'waveformHandle',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'CreateWaveformI16': {
'parameters': [
{
'direction': 'in',
'name': 'vi',
'type': 'ViSession'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'ViConstString'
},
{
'direction': 'in',
'name': 'waveformSize',
'type': 'ViInt32'
},
{
'direction': 'in',
'name': 'waveformDataArray',
'size': {
'mechanism': 'len',
'value': 'waveformSize'
},
'type': 'ViInt16[]',
'use_array': True
},
{
'direction': 'out',
'name': 'waveformHandle',
'type': 'ViInt32'
}
],
'returns': 'ViStatus'
},
'CreateWaveformFromFileI16':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'fileName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'byteOrder',
'direction':'in',
'type':'ViInt32',
'enum':'ByteOrder'
},
{
'name':'waveformHandle',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'DefineUserStandardWaveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformSize',
'direction':'in',
'type':'ViInt32'
},
{
'name':'waveformDataArray',
'direction':'in',
'type':'ViReal64[]',
'size':{
'mechanism':'len',
'value':'waveformSize'
}
}
],
'returns':'ViStatus'
},
'DeleteNamedWaveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformName',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'DeleteScript':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'scriptName',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'Disable':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'DisableAnalogFilter':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'DisableDigitalFilter':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'DisableDigitalPatterning':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'DisableScriptTrigger':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'triggerId',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'DisableStartTrigger':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'EnableAnalogFilter':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'filterCorrectionFrequency',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'EnableDigitalFilter':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'EnableDigitalPatterning':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'ErrorHandler':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'errorCode',
'direction':'in',
'type':'ViStatus'
},
{
'name':'errorMessage',
'direction':'out',
'type':'ViChar[]',
'size':{
'mechanism':'fixed',
'value':256
}
}
],
'returns':'ViStatus'
},
'ErrorMessage':{
'cname' : 'niFgen_error_message',
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'errorCode',
'direction':'in',
'type':'ViStatus'
},
{
'name':'errorMessage',
'direction':'out',
'type':'ViChar[]',
'size':{
'mechanism':'fixed',
'value':256
}
}
],
'returns':'ViStatus'
},
'ErrorQuery': {
'cname' : 'niFgen_error_query',
'parameters': [
{
'direction': 'in',
'name': 'vi',
'type': 'ViSession'
},
{
'direction': 'out',
'name': 'errorCode',
'type': 'ViInt32'
},
{
'direction': 'out',
'name': 'errorMessage',
'size': {
'mechanism': 'fixed',
'value': 256
},
'type': 'ViChar[]'
}
],
'returns': 'ViStatus'
},
'ExportAttributeConfigurationBuffer':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'sizeInBytes',
'direction':'in',
'type':'ViInt32'
},
{
'name':'configuration',
'direction':'out',
'type':'ViAddr[]',
'size':{
'mechanism':'ivi-dance',
'value':'sizeInBytes'
}
}
],
'returns':'ViStatus'
},
'ExportAttributeConfigurationFile':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'filePath',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'ExportSignal':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'signal',
'direction':'in',
'enum':'Signal',
'type':'ViInt32'
},
{
'name':'signalIdentifier',
'direction':'in',
'type':'ViConstString'
},
{
'name':'outputTerminal',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'GetAttributeViBoolean':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'out',
'type':'ViBoolean'
}
],
'returns':'ViStatus'
},
'GetAttributeViInt32':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'GetAttributeViInt64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'out',
'type':'ViInt64'
}
],
'returns':'ViStatus'
},
'GetAttributeViReal64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'out',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'GetAttributeViSession':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'out',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'GetAttributeViString':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'arraySize',
'direction':'in',
'type':'ViInt32'
},
{
'name':'attributeValue',
'direction':'out',
'type':'ViChar[]',
'size':{
'mechanism':'ivi-dance',
'value':'arraySize'
}
}
],
'returns':'ViStatus'
},
'GetChannelName':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'index',
'direction':'in',
'type':'ViInt32'
},
{
'name':'bufferSize',
'direction':'in',
'type':'ViInt32'
},
{
'name':'channelString',
'direction':'out',
'type':'ViChar[]',
'size':{
'mechanism':'ivi-dance',
'value':'bufferSize'
}
}
],
'returns':'ViStatus'
},
'GetError':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'errorCode',
'direction':'out',
'type':'ViStatus'
},
{
'name':'errorDescriptionBufferSize',
'direction':'in',
'type':'ViInt32'
},
{
'name':'errorDescription',
'direction':'out',
'type':'ViChar[]',
'size':{
'mechanism':'ivi-dance',
'value':'errorDescriptionBufferSize'
}
}
],
'returns':'ViStatus'
},
'GetExtCalLastDateAndTime':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'year',
'direction':'out',
'type':'ViInt32'
},
{
'name':'month',
'direction':'out',
'type':'ViInt32'
},
{
'name':'day',
'direction':'out',
'type':'ViInt32'
},
{
'name':'hour',
'direction':'out',
'type':'ViInt32'
},
{
'name':'minute',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'GetExtCalLastTemp':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'temperature',
'direction':'out',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'GetExtCalRecommendedInterval':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'months',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'GetFIRFilterCoefficients':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'arraySize',
'direction':'in',
'type':'ViInt32'
},
{
'name':'coefficientsArray',
'direction':'out',
'type':'ViReal64[]',
'size':{
'mechanism':'ivi-dance-with-a-twist',
'value':'arraySize',
'value_twist':'numberOfCoefficientsRead',
}
},
{
'name':'numberOfCoefficientsRead',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'GetHardwareState':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'state',
'direction':'out',
'type':'ViInt32',
'enum':'HardwareState'
}
],
'returns':'ViStatus'
},
'GetNextCoercionRecord':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'bufferSize',
'direction':'in',
'type':'ViInt32'
},
{
'name':'coercionRecord',
'direction':'out',
'type':'ViChar[]',
'size': {
'mechanism': 'ivi-dance',
'value': 'bufferSize'
}
}
],
'returns':'ViStatus'
},
'GetNextInterchangeWarning':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'bufferSize',
'direction':'in',
'type':'ViInt32'
},
{
'name':'interchangeWarning',
'direction':'out',
'type':'ViChar[]',
'size': {
'mechanism': 'ivi-dance',
'value': 'bufferSize'
}
}
],
'returns':'ViStatus'
},
'GetSelfCalLastDateAndTime':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'year',
'direction':'out',
'type':'ViInt32'
},
{
'name':'month',
'direction':'out',
'type':'ViInt32'
},
{
'name':'day',
'direction':'out',
'type':'ViInt32'
},
{
'name':'hour',
'direction':'out',
'type':'ViInt32'
},
{
'name':'minute',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'GetSelfCalLastTemp':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'temperature',
'direction':'out',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'GetSelfCalSupported':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'selfCalSupported',
'direction':'out',
'type':'ViBoolean'
}
],
'returns':'ViStatus'
},
'GetStreamEndpointHandle':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'streamEndpoint',
'direction':'in',
'type':'ViConstString'
},
{
'name':'readerHandle',
'direction':'out',
'type':'ViUInt32'
}
],
'returns':'ViStatus'
},
'ImportAttributeConfigurationBuffer':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'sizeInBytes',
'direction':'in',
'type':'ViInt32'
},
{
'name':'configuration',
'direction':'in',
'type':'ViAddr[]',
'size':{
'mechanism':'len',
'value':'sizeInBytes'
}
}
],
'returns':'ViStatus'
},
'ImportAttributeConfigurationFile':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'filePath',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'Init': {
'init_method': True,
'cname': 'niFgen_init ',
'parameters': [
{
'direction': 'in',
'name': 'resourceName',
'type': 'ViRsrc'
},
{
'direction': 'in',
'name': 'idQuery',
'type': 'ViBoolean'
},
{
'direction': 'in',
'name': 'resetDevice',
'type': 'ViBoolean'
},
{
'direction': 'out',
'name': 'vi',
'type': 'ViSession'
}
],
'returns': 'ViStatus',
},
'InitWithOptions':{
'init_method' : True,
'parameters':[
{
'name':'resourceName',
'direction':'in',
'type':'ViRsrc'
},
{
'name':'idQuery',
'direction':'in',
'type':'ViBoolean'
},
{
'name':'resetDevice',
'direction':'in',
'type':'ViBoolean'
},
{
'name':'optionString',
'direction':'in',
'type':'ViConstString'
},
{
'name':'vi',
'direction':'out',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'InitializeWithChannels':{
'init_method' : True,
'parameters':[
{
'name':'resourceName',
'direction':'in',
'type':'ViRsrc'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'resetDevice',
'direction':'in',
'type':'ViBoolean'
},
{
'name':'optionString',
'direction':'in',
'type':'ViConstString'
},
{
'name':'vi',
'direction':'out',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'InitiateGeneration':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'InvalidateAllAttributes':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'IsDone':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'done',
'direction':'out',
'type':'ViBoolean'
}
],
'returns':'ViStatus'
},
'LockSession':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'callerHasLock',
'direction':'out',
'type':'ViBoolean'
}
],
'returns':'ViStatus'
},
'ManualEnableP2PStream':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'endpointName',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'QueryArbSeqCapabilities':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'maximumNumberOfSequences',
'direction':'out',
'type':'ViInt32'
},
{
'name':'minimumSequenceLength',
'direction':'out',
'type':'ViInt32'
},
{
'name':'maximumSequenceLength',
'direction':'out',
'type':'ViInt32'
},
{
'name':'maximumLoopCount',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'QueryArbWfmCapabilities':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'maximumNumberOfWaveforms',
'direction':'out',
'type':'ViInt32'
},
{
'name':'waveformQuantum',
'direction':'out',
'type':'ViInt32'
},
{
'name':'minimumWaveformSize',
'direction':'out',
'type':'ViInt32'
},
{
'name':'maximumWaveformSize',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'QueryFreqListCapabilities':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'maximumNumberOfFreqLists',
'direction':'out',
'type':'ViInt32'
},
{
'name':'minimumFrequencyListLength',
'direction':'out',
'type':'ViInt32'
},
{
'name':'maximumFrequencyListLength',
'direction':'out',
'type':'ViInt32'
},
{
'name':'minimumFrequencyListDuration',
'direction':'out',
'type':'ViReal64'
},
{
'name':'maximumFrequencyListDuration',
'direction':'out',
'type':'ViReal64'
},
{
'name':'frequencyListDurationQuantum',
'direction':'out',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ReadCurrentTemperature':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'temperature',
'direction':'out',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'Reset':{
'cname' : 'niFgen_reset',
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'ResetAttribute':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
}
],
'returns':'ViStatus'
},
'ResetDevice':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'ResetInterchangeCheck':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'ResetWithDefaults':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'RevisionQuery': {
'cname' : 'niFgen_revision_query',
'parameters': [
{
'direction': 'in',
'name': 'vi',
'type': 'ViSession'
},
{
'direction': 'out',
'name': 'instrumentDriverRevision',
'size': {
'mechanism': 'fixed',
'value': 256
},
'type': 'ViChar[]'
},
{
'direction': 'out',
'name': 'firmwareRevision',
'size': {
'mechanism': 'fixed',
'value': 256
},
'type': 'ViChar[]'
}
],
'returns': 'ViStatus'
},
'RouteSignalOut':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'routeSignalFrom',
'direction':'in',
'type':'ViInt32',
'enum':'RouteSignalFrom'
},
{
'name':'routeSignalTo',
'direction':'in',
'type':'ViInt32',
'enum':'RouteSignalTo'
}
],
'returns':'ViStatus'
},
'SelfCal':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'SelfTest':{
'cname' : 'niFgen_self_test',
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'selfTestResult',
'direction':'out',
'type':'ViInt16'
},
{
'name':'selfTestMessage',
'direction':'out',
'type':'ViChar[]',
'size':{
'mechanism':'fixed',
'value':256
}
}
],
'returns':'ViStatus'
},
'SendSoftwareEdgeTrigger': {
'parameters': [
{
'direction': 'in',
'name': 'vi',
'type': 'ViSession'
},
{
'direction': 'in',
'enum': 'Trigger',
'name': 'trigger',
'type': 'ViInt32',
},
{
'direction': 'in',
'name': 'triggerId',
'type': 'ViString'
}
],
'returns': 'ViStatus'
},
'SetAttributeViBoolean':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViBoolean'
}
],
'returns':'ViStatus'
},
'SetAttributeViInt32':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'SetAttributeViInt64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViInt64'
}
],
'returns':'ViStatus'
},
'SetAttributeViReal64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'SetAttributeViSession':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'SetAttributeViString':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'SetNamedWaveformNextWritePosition':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'relativeTo',
'direction':'in',
'type':'ViInt32',
'enum':'RelativeTo'
},
{
'name':'offset',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'SetWaveformNextWritePosition':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformHandle',
'direction':'in',
'type':'ViInt32'
},
{
'name':'relativeTo',
'direction':'in',
'type':'ViInt32',
'enum':'RelativeTo'
},
{
'name':'offset',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'UnlockSession':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'callerHasLock',
'direction':'out',
'type':'ViBoolean'
}
],
'returns':'ViStatus'
},
'WaitUntilDone':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'maxTime',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'WriteBinary16Waveform': {
'parameters': [
{
'direction': 'in',
'name': 'vi',
'type': 'ViSession'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'ViConstString'
},
{
'direction': 'in',
'name': 'waveformHandle',
'type': 'ViInt32'
},
{
'direction': 'in',
'name': 'size',
'type': 'ViInt32'
},
{
'direction': 'in',
'name': 'data',
'size': {
'mechanism': 'len',
'value': 'size'
},
'type': 'ViInt16[]',
'use_array': True
}
],
'returns': 'ViStatus'
},
'WriteComplexBinary16Waveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformHandle',
'direction':'in',
'type':'ViInt32'
},
{
'name':'size',
'direction':'in',
'type':'ViInt32'
},
{
'name':'data',
'direction':'in',
'type':'struct NIComplexI16_struct[]',
'grpc_type':'repeated NIComplexInt32',
'size':{
'mechanism':'len',
'value':'size'
}
}
],
'returns':'ViStatus'
},
'WriteNamedWaveformF64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'size',
'direction':'in',
'type':'ViInt32'
},
{
'name':'data',
'direction':'in',
'type':'ViReal64[]',
'size':{
'mechanism':'len',
'value':'size'
}
}
],
'returns':'ViStatus'
},
'WriteNamedWaveformI16': {
'parameters': [
{
'direction': 'in',
'name': 'vi',
'type': 'ViSession'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'ViConstString'
},
{
'direction': 'in',
'name': 'waveformName',
'type': 'ViConstString'
},
{
'direction': 'in',
'name': 'size',
'type': 'ViInt32'
},
{
'direction': 'in',
'name': 'data',
'size': {
'mechanism': 'len',
'value': 'size'
},
'type': 'ViInt16[]',
'use_array': True
}
],
'returns': 'ViStatus'
},
'WriteP2PEndpointI16':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'endpointName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'numberOfSamples',
'direction':'in',
'type':'ViInt32'
},
{
'name':'endpointData',
'direction':'in',
'type':'ViInt16[]',
'size': {
'mechanism': 'len',
'value': 'numberOfSamples'
}
}
],
'returns':'ViStatus'
},
'WriteScript':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'script',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'WriteWaveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformHandle',
'direction':'in',
'type':'ViInt32'
},
{
'name':'size',
'direction':'in',
'type':'ViInt32'
},
{
'name':'data',
'direction':'in',
'type':'ViReal64[]',
'size':{
'mechanism':'len',
'value':'size'
}
}
],
'returns':'ViStatus'
},
'WriteWaveformComplexF64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'numberOfSamples',
'direction':'in',
'type':'ViInt32'
},
{
'name':'data',
'direction':'in',
'type':'struct NIComplexNumber_struct[]',
'grpc_type':'repeated NIComplexNumber',
'size':{
'mechanism':'len',
'value':'numberOfSamples'
}
},
{
'name':'waveformHandle',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'WriteNamedWaveformComplexF64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'size',
'direction':'in',
'type':'ViInt32'
},
{
'name':'data',
'direction':'in',
'type':'struct NIComplexNumber_struct[]',
'grpc_type':'repeated NIComplexNumber',
'size':{
'mechanism':'len',
'value':'size'
}
}
],
'returns':'ViStatus'
},
'WriteNamedWaveformComplexI16':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'size',
'direction':'in',
'type':'ViInt32'
},
{
'name':'data',
'direction':'in',
'type':'struct NIComplexI16_struct[]',
'grpc_type':'repeated NIComplexInt32',
'size':{
'mechanism':'len',
'value':'size'
}
}
],
'returns':'ViStatus'
}
}
| 25.729146
| 61
| 0.309902
|
functions = {
'AbortGeneration':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'AdjustSampleClockRelativeDelay':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'adjustmentTime',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'AllocateNamedWaveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformSize',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'AllocateWaveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformSize',
'direction':'in',
'type':'ViInt32'
},
{
'name':'waveformHandle',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'CheckAttributeViBoolean':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViBoolean'
}
],
'returns':'ViStatus'
},
'CheckAttributeViInt32':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'CheckAttributeViInt64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViInt64'
}
],
'returns':'ViStatus'
},
'CheckAttributeViReal64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'CheckAttributeViSession':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'CheckAttributeViString':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'ClearArbMemory':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'ClearArbSequence':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'sequenceHandle',
'direction':'in',
'type':'ViInt32',
'enum':'SequenceHandle'
}
],
'returns':'ViStatus'
},
'ClearArbWaveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'waveformHandle',
'direction':'in',
'type':'ViInt32',
'enum':'WaveformHandle'
}
],
'returns':'ViStatus'
},
'ClearError':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'ClearFreqList':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'frequencyListHandle',
'direction':'in',
'type':'ViInt32',
'enum':'FrequencyListOptions'
}
],
'returns':'ViStatus'
},
'ClearInterchangeWarnings':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'ClearUserStandardWaveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'Close':{
'cname' : 'niFgen_close',
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'Commit':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'ConfigureAmplitude':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'amplitude',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ConfigureArbSequence':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'sequenceHandle',
'direction':'in',
'type':'ViInt32'
},
{
'name':'gain',
'direction':'in',
'type':'ViReal64'
},
{
'name':'offset',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ConfigureArbWaveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformHandle',
'direction':'in',
'type':'ViInt32'
},
{
'name':'gain',
'direction':'in',
'type':'ViReal64'
},
{
'name':'offset',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ConfigureChannels':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channels',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'ConfigureClockMode':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'clockMode',
'direction':'in',
'type':'ViInt32',
'enum':'ClockMode'
}
],
'returns':'ViStatus'
},
'ConfigureCustomFIRFilterCoefficients':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'numberOfCoefficients',
'direction':'in',
'type':'ViInt32'
},
{
'name':'coefficientsArray',
'direction':'in',
'type':'ViReal64[]',
'size':{
'mechanism':'len',
'value':'numberOfCoefficients'
}
}
],
'returns':'ViStatus'
},
'ConfigureDigitalEdgeScriptTrigger':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'triggerId',
'direction':'in',
'type':'ViConstString'
},
{
'name':'source',
'direction':'in',
'type':'ViConstString'
},
{
'name':'edge',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'ConfigureDigitalEdgeStartTrigger':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'source',
'direction':'in',
'type':'ViConstString'
},
{
'name':'edge',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'ConfigureDigitalLevelScriptTrigger':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'triggerId',
'direction':'in',
'type':'ViConstString'
},
{
'name':'source',
'direction':'in',
'type':'ViConstString'
},
{
'name':'triggerWhen',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'ConfigureFreqList':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'frequencyListHandle',
'direction':'in',
'type':'ViInt32'
},
{
'name':'amplitude',
'direction':'in',
'type':'ViReal64'
},
{
'name':'dcOffset',
'direction':'in',
'type':'ViReal64'
},
{
'name':'startPhase',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ConfigureFrequency':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'frequency',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ConfigureOperationMode':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'operationMode',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'ConfigureOutputEnabled':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'enabled',
'direction':'in',
'type':'ViBoolean'
}
],
'returns':'ViStatus'
},
'ConfigureOutputImpedance':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'impedance',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ConfigureOutputMode':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'outputMode',
'direction':'in',
'type':'ViInt32',
'enum':'OutputMode'
}
],
'returns':'ViStatus'
},
'ConfigureP2PEndpointFullnessStartTrigger':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'p2pEndpointFullnessLevel',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'ConfigureReferenceClock':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'referenceClockSource',
'direction':'in',
'type':'ViConstString'
},
{
'name':'referenceClockFrequency',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ConfigureSampleClockSource':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'sampleClockSource',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'ConfigureSampleRate':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'sampleRate',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ConfigureSoftwareEdgeScriptTrigger':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'triggerId',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'ConfigureSoftwareEdgeStartTrigger':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'ConfigureStandardWaveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveform',
'direction':'in',
'type':'ViInt32',
'enum':'Waveform'
},
{
'name':'amplitude',
'direction':'in',
'type':'ViReal64'
},
{
'name':'dcOffset',
'direction':'in',
'type':'ViReal64'
},
{
'name':'frequency',
'direction':'in',
'type':'ViReal64'
},
{
'name':'startPhase',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ConfigureSynchronization':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'synchronizationSource',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'ConfigureTriggerMode':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'triggerMode',
'direction':'in',
'type':'ViInt32',
'enum':'TriggerMode'
}
],
'returns':'ViStatus'
},
'CreateAdvancedArbSequence':{
'codegen_method': 'CustomCode',
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'sequenceLength',
'direction':'in',
'type':'ViInt32'
},
{
'name':'waveformHandlesArray',
'direction':'in',
'type':'ViInt32[]',
'size':{
'mechanism':'len',
'value':'sequenceLength'
}
},
{
'name':'loopCountsArray',
'direction':'in',
'type':'ViInt32[]',
'size':{
'mechanism':'len',
'value':'sequenceLength'
}
},
{
'name':'sampleCountsArray',
'direction':'in',
'type':'ViInt32[]',
'size':{
'mechanism':'len',
'value':'sequenceLength'
}
},
{
'name':'markerLocationArray',
'direction':'in',
'type':'ViInt32[]',
'size':{
'mechanism':'len',
'value':'sequenceLength'
}
},
{
'name':'coercedMarkersArray',
'direction':'out',
'type':'ViInt32[]',
'size':{
'mechanism':'custom-code',
'value':'sequenceLength'
}
},
{
'name':'sequenceHandle',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'CreateArbSequence':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'sequenceLength',
'direction':'in',
'type':'ViInt32'
},
{
'name':'waveformHandlesArray',
'direction':'in',
'type':'ViInt32[]',
'size':{
'mechanism':'len',
'value':'sequenceLength'
}
},
{
'name':'loopCountsArray',
'direction':'in',
'type':'ViInt32[]',
'size':{
'mechanism':'len',
'value':'sequenceLength'
}
},
{
'name':'sequenceHandle',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'CreateFreqList':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'waveform',
'direction':'in',
'type':'ViInt32',
'enum':'Waveform'
},
{
'name':'frequencyListLength',
'direction':'in',
'type':'ViInt32'
},
{
'name':'frequencyArray',
'direction':'in',
'type':'ViReal64[]',
'size':{
'mechanism':'len',
'value':'frequencyListLength'
}
},
{
'name':'durationArray',
'direction':'in',
'type':'ViReal64[]',
'size':{
'mechanism':'len',
'value':'frequencyListLength'
}
},
{
'name':'frequencyListHandle',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'CreateWaveformComplexF64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'numberOfSamples',
'direction':'in',
'type':'ViInt32'
},
{
'name':'waveformDataArray',
'direction':'in',
'type': 'struct NIComplexNumber_struct[]',
'grpc_type': 'repeated NIComplexNumber',
'size': {
'mechanism': 'len',
'value': 'numberOfSamples'
}
},
{
'name':'waveformHandle',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'CreateWaveformF64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformSize',
'direction':'in',
'type':'ViInt32'
},
{
'name':'waveformDataArray',
'direction':'in',
'type':'ViReal64[]',
'size':{
'mechanism':'len',
'value':'waveformSize'
}
},
{
'name':'waveformHandle',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'CreateWaveformFromFileF64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'fileName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'byteOrder',
'direction':'in',
'type':'ViInt32',
'enum':'ByteOrder'
},
{
'name':'waveformHandle',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'CreateWaveformFromFileHWS':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'fileName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'useRateFromWaveform',
'direction':'in',
'type':'ViBoolean'
},
{
'name':'useGainAndOffsetFromWaveform',
'direction':'in',
'type':'ViBoolean'
},
{
'name':'waveformHandle',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'CreateWaveformI16': {
'parameters': [
{
'direction': 'in',
'name': 'vi',
'type': 'ViSession'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'ViConstString'
},
{
'direction': 'in',
'name': 'waveformSize',
'type': 'ViInt32'
},
{
'direction': 'in',
'name': 'waveformDataArray',
'size': {
'mechanism': 'len',
'value': 'waveformSize'
},
'type': 'ViInt16[]',
'use_array': True
},
{
'direction': 'out',
'name': 'waveformHandle',
'type': 'ViInt32'
}
],
'returns': 'ViStatus'
},
'CreateWaveformFromFileI16':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'fileName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'byteOrder',
'direction':'in',
'type':'ViInt32',
'enum':'ByteOrder'
},
{
'name':'waveformHandle',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'DefineUserStandardWaveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformSize',
'direction':'in',
'type':'ViInt32'
},
{
'name':'waveformDataArray',
'direction':'in',
'type':'ViReal64[]',
'size':{
'mechanism':'len',
'value':'waveformSize'
}
}
],
'returns':'ViStatus'
},
'DeleteNamedWaveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformName',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'DeleteScript':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'scriptName',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'Disable':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'DisableAnalogFilter':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'DisableDigitalFilter':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'DisableDigitalPatterning':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'DisableScriptTrigger':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'triggerId',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'DisableStartTrigger':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'EnableAnalogFilter':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'filterCorrectionFrequency',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'EnableDigitalFilter':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'EnableDigitalPatterning':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'ErrorHandler':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'errorCode',
'direction':'in',
'type':'ViStatus'
},
{
'name':'errorMessage',
'direction':'out',
'type':'ViChar[]',
'size':{
'mechanism':'fixed',
'value':256
}
}
],
'returns':'ViStatus'
},
'ErrorMessage':{
'cname' : 'niFgen_error_message',
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'errorCode',
'direction':'in',
'type':'ViStatus'
},
{
'name':'errorMessage',
'direction':'out',
'type':'ViChar[]',
'size':{
'mechanism':'fixed',
'value':256
}
}
],
'returns':'ViStatus'
},
'ErrorQuery': {
'cname' : 'niFgen_error_query',
'parameters': [
{
'direction': 'in',
'name': 'vi',
'type': 'ViSession'
},
{
'direction': 'out',
'name': 'errorCode',
'type': 'ViInt32'
},
{
'direction': 'out',
'name': 'errorMessage',
'size': {
'mechanism': 'fixed',
'value': 256
},
'type': 'ViChar[]'
}
],
'returns': 'ViStatus'
},
'ExportAttributeConfigurationBuffer':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'sizeInBytes',
'direction':'in',
'type':'ViInt32'
},
{
'name':'configuration',
'direction':'out',
'type':'ViAddr[]',
'size':{
'mechanism':'ivi-dance',
'value':'sizeInBytes'
}
}
],
'returns':'ViStatus'
},
'ExportAttributeConfigurationFile':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'filePath',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'ExportSignal':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'signal',
'direction':'in',
'enum':'Signal',
'type':'ViInt32'
},
{
'name':'signalIdentifier',
'direction':'in',
'type':'ViConstString'
},
{
'name':'outputTerminal',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'GetAttributeViBoolean':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'out',
'type':'ViBoolean'
}
],
'returns':'ViStatus'
},
'GetAttributeViInt32':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'GetAttributeViInt64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'out',
'type':'ViInt64'
}
],
'returns':'ViStatus'
},
'GetAttributeViReal64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'out',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'GetAttributeViSession':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'out',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'GetAttributeViString':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'arraySize',
'direction':'in',
'type':'ViInt32'
},
{
'name':'attributeValue',
'direction':'out',
'type':'ViChar[]',
'size':{
'mechanism':'ivi-dance',
'value':'arraySize'
}
}
],
'returns':'ViStatus'
},
'GetChannelName':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'index',
'direction':'in',
'type':'ViInt32'
},
{
'name':'bufferSize',
'direction':'in',
'type':'ViInt32'
},
{
'name':'channelString',
'direction':'out',
'type':'ViChar[]',
'size':{
'mechanism':'ivi-dance',
'value':'bufferSize'
}
}
],
'returns':'ViStatus'
},
'GetError':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'errorCode',
'direction':'out',
'type':'ViStatus'
},
{
'name':'errorDescriptionBufferSize',
'direction':'in',
'type':'ViInt32'
},
{
'name':'errorDescription',
'direction':'out',
'type':'ViChar[]',
'size':{
'mechanism':'ivi-dance',
'value':'errorDescriptionBufferSize'
}
}
],
'returns':'ViStatus'
},
'GetExtCalLastDateAndTime':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'year',
'direction':'out',
'type':'ViInt32'
},
{
'name':'month',
'direction':'out',
'type':'ViInt32'
},
{
'name':'day',
'direction':'out',
'type':'ViInt32'
},
{
'name':'hour',
'direction':'out',
'type':'ViInt32'
},
{
'name':'minute',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'GetExtCalLastTemp':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'temperature',
'direction':'out',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'GetExtCalRecommendedInterval':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'months',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'GetFIRFilterCoefficients':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'arraySize',
'direction':'in',
'type':'ViInt32'
},
{
'name':'coefficientsArray',
'direction':'out',
'type':'ViReal64[]',
'size':{
'mechanism':'ivi-dance-with-a-twist',
'value':'arraySize',
'value_twist':'numberOfCoefficientsRead',
}
},
{
'name':'numberOfCoefficientsRead',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'GetHardwareState':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'state',
'direction':'out',
'type':'ViInt32',
'enum':'HardwareState'
}
],
'returns':'ViStatus'
},
'GetNextCoercionRecord':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'bufferSize',
'direction':'in',
'type':'ViInt32'
},
{
'name':'coercionRecord',
'direction':'out',
'type':'ViChar[]',
'size': {
'mechanism': 'ivi-dance',
'value': 'bufferSize'
}
}
],
'returns':'ViStatus'
},
'GetNextInterchangeWarning':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'bufferSize',
'direction':'in',
'type':'ViInt32'
},
{
'name':'interchangeWarning',
'direction':'out',
'type':'ViChar[]',
'size': {
'mechanism': 'ivi-dance',
'value': 'bufferSize'
}
}
],
'returns':'ViStatus'
},
'GetSelfCalLastDateAndTime':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'year',
'direction':'out',
'type':'ViInt32'
},
{
'name':'month',
'direction':'out',
'type':'ViInt32'
},
{
'name':'day',
'direction':'out',
'type':'ViInt32'
},
{
'name':'hour',
'direction':'out',
'type':'ViInt32'
},
{
'name':'minute',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'GetSelfCalLastTemp':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'temperature',
'direction':'out',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'GetSelfCalSupported':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'selfCalSupported',
'direction':'out',
'type':'ViBoolean'
}
],
'returns':'ViStatus'
},
'GetStreamEndpointHandle':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'streamEndpoint',
'direction':'in',
'type':'ViConstString'
},
{
'name':'readerHandle',
'direction':'out',
'type':'ViUInt32'
}
],
'returns':'ViStatus'
},
'ImportAttributeConfigurationBuffer':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'sizeInBytes',
'direction':'in',
'type':'ViInt32'
},
{
'name':'configuration',
'direction':'in',
'type':'ViAddr[]',
'size':{
'mechanism':'len',
'value':'sizeInBytes'
}
}
],
'returns':'ViStatus'
},
'ImportAttributeConfigurationFile':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'filePath',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'Init': {
'init_method': True,
'cname': 'niFgen_init ',
'parameters': [
{
'direction': 'in',
'name': 'resourceName',
'type': 'ViRsrc'
},
{
'direction': 'in',
'name': 'idQuery',
'type': 'ViBoolean'
},
{
'direction': 'in',
'name': 'resetDevice',
'type': 'ViBoolean'
},
{
'direction': 'out',
'name': 'vi',
'type': 'ViSession'
}
],
'returns': 'ViStatus',
},
'InitWithOptions':{
'init_method' : True,
'parameters':[
{
'name':'resourceName',
'direction':'in',
'type':'ViRsrc'
},
{
'name':'idQuery',
'direction':'in',
'type':'ViBoolean'
},
{
'name':'resetDevice',
'direction':'in',
'type':'ViBoolean'
},
{
'name':'optionString',
'direction':'in',
'type':'ViConstString'
},
{
'name':'vi',
'direction':'out',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'InitializeWithChannels':{
'init_method' : True,
'parameters':[
{
'name':'resourceName',
'direction':'in',
'type':'ViRsrc'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'resetDevice',
'direction':'in',
'type':'ViBoolean'
},
{
'name':'optionString',
'direction':'in',
'type':'ViConstString'
},
{
'name':'vi',
'direction':'out',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'InitiateGeneration':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'InvalidateAllAttributes':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'IsDone':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'done',
'direction':'out',
'type':'ViBoolean'
}
],
'returns':'ViStatus'
},
'LockSession':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'callerHasLock',
'direction':'out',
'type':'ViBoolean'
}
],
'returns':'ViStatus'
},
'ManualEnableP2PStream':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'endpointName',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'QueryArbSeqCapabilities':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'maximumNumberOfSequences',
'direction':'out',
'type':'ViInt32'
},
{
'name':'minimumSequenceLength',
'direction':'out',
'type':'ViInt32'
},
{
'name':'maximumSequenceLength',
'direction':'out',
'type':'ViInt32'
},
{
'name':'maximumLoopCount',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'QueryArbWfmCapabilities':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'maximumNumberOfWaveforms',
'direction':'out',
'type':'ViInt32'
},
{
'name':'waveformQuantum',
'direction':'out',
'type':'ViInt32'
},
{
'name':'minimumWaveformSize',
'direction':'out',
'type':'ViInt32'
},
{
'name':'maximumWaveformSize',
'direction':'out',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'QueryFreqListCapabilities':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'maximumNumberOfFreqLists',
'direction':'out',
'type':'ViInt32'
},
{
'name':'minimumFrequencyListLength',
'direction':'out',
'type':'ViInt32'
},
{
'name':'maximumFrequencyListLength',
'direction':'out',
'type':'ViInt32'
},
{
'name':'minimumFrequencyListDuration',
'direction':'out',
'type':'ViReal64'
},
{
'name':'maximumFrequencyListDuration',
'direction':'out',
'type':'ViReal64'
},
{
'name':'frequencyListDurationQuantum',
'direction':'out',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'ReadCurrentTemperature':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'temperature',
'direction':'out',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'Reset':{
'cname' : 'niFgen_reset',
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'ResetAttribute':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
}
],
'returns':'ViStatus'
},
'ResetDevice':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'ResetInterchangeCheck':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'ResetWithDefaults':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'RevisionQuery': {
'cname' : 'niFgen_revision_query',
'parameters': [
{
'direction': 'in',
'name': 'vi',
'type': 'ViSession'
},
{
'direction': 'out',
'name': 'instrumentDriverRevision',
'size': {
'mechanism': 'fixed',
'value': 256
},
'type': 'ViChar[]'
},
{
'direction': 'out',
'name': 'firmwareRevision',
'size': {
'mechanism': 'fixed',
'value': 256
},
'type': 'ViChar[]'
}
],
'returns': 'ViStatus'
},
'RouteSignalOut':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'routeSignalFrom',
'direction':'in',
'type':'ViInt32',
'enum':'RouteSignalFrom'
},
{
'name':'routeSignalTo',
'direction':'in',
'type':'ViInt32',
'enum':'RouteSignalTo'
}
],
'returns':'ViStatus'
},
'SelfCal':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'SelfTest':{
'cname' : 'niFgen_self_test',
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'selfTestResult',
'direction':'out',
'type':'ViInt16'
},
{
'name':'selfTestMessage',
'direction':'out',
'type':'ViChar[]',
'size':{
'mechanism':'fixed',
'value':256
}
}
],
'returns':'ViStatus'
},
'SendSoftwareEdgeTrigger': {
'parameters': [
{
'direction': 'in',
'name': 'vi',
'type': 'ViSession'
},
{
'direction': 'in',
'enum': 'Trigger',
'name': 'trigger',
'type': 'ViInt32',
},
{
'direction': 'in',
'name': 'triggerId',
'type': 'ViString'
}
],
'returns': 'ViStatus'
},
'SetAttributeViBoolean':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViBoolean'
}
],
'returns':'ViStatus'
},
'SetAttributeViInt32':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'SetAttributeViInt64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViInt64'
}
],
'returns':'ViStatus'
},
'SetAttributeViReal64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViReal64'
}
],
'returns':'ViStatus'
},
'SetAttributeViSession':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViSession'
}
],
'returns':'ViStatus'
},
'SetAttributeViString':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'attributeId',
'direction':'in',
'type':'ViAttr'
},
{
'name':'attributeValue',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'SetNamedWaveformNextWritePosition':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'relativeTo',
'direction':'in',
'type':'ViInt32',
'enum':'RelativeTo'
},
{
'name':'offset',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'SetWaveformNextWritePosition':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformHandle',
'direction':'in',
'type':'ViInt32'
},
{
'name':'relativeTo',
'direction':'in',
'type':'ViInt32',
'enum':'RelativeTo'
},
{
'name':'offset',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'UnlockSession':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'callerHasLock',
'direction':'out',
'type':'ViBoolean'
}
],
'returns':'ViStatus'
},
'WaitUntilDone':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'maxTime',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'WriteBinary16Waveform': {
'parameters': [
{
'direction': 'in',
'name': 'vi',
'type': 'ViSession'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'ViConstString'
},
{
'direction': 'in',
'name': 'waveformHandle',
'type': 'ViInt32'
},
{
'direction': 'in',
'name': 'size',
'type': 'ViInt32'
},
{
'direction': 'in',
'name': 'data',
'size': {
'mechanism': 'len',
'value': 'size'
},
'type': 'ViInt16[]',
'use_array': True
}
],
'returns': 'ViStatus'
},
'WriteComplexBinary16Waveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformHandle',
'direction':'in',
'type':'ViInt32'
},
{
'name':'size',
'direction':'in',
'type':'ViInt32'
},
{
'name':'data',
'direction':'in',
'type':'struct NIComplexI16_struct[]',
'grpc_type':'repeated NIComplexInt32',
'size':{
'mechanism':'len',
'value':'size'
}
}
],
'returns':'ViStatus'
},
'WriteNamedWaveformF64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'size',
'direction':'in',
'type':'ViInt32'
},
{
'name':'data',
'direction':'in',
'type':'ViReal64[]',
'size':{
'mechanism':'len',
'value':'size'
}
}
],
'returns':'ViStatus'
},
'WriteNamedWaveformI16': {
'parameters': [
{
'direction': 'in',
'name': 'vi',
'type': 'ViSession'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'ViConstString'
},
{
'direction': 'in',
'name': 'waveformName',
'type': 'ViConstString'
},
{
'direction': 'in',
'name': 'size',
'type': 'ViInt32'
},
{
'direction': 'in',
'name': 'data',
'size': {
'mechanism': 'len',
'value': 'size'
},
'type': 'ViInt16[]',
'use_array': True
}
],
'returns': 'ViStatus'
},
'WriteP2PEndpointI16':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'endpointName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'numberOfSamples',
'direction':'in',
'type':'ViInt32'
},
{
'name':'endpointData',
'direction':'in',
'type':'ViInt16[]',
'size': {
'mechanism': 'len',
'value': 'numberOfSamples'
}
}
],
'returns':'ViStatus'
},
'WriteScript':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'script',
'direction':'in',
'type':'ViConstString'
}
],
'returns':'ViStatus'
},
'WriteWaveform':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformHandle',
'direction':'in',
'type':'ViInt32'
},
{
'name':'size',
'direction':'in',
'type':'ViInt32'
},
{
'name':'data',
'direction':'in',
'type':'ViReal64[]',
'size':{
'mechanism':'len',
'value':'size'
}
}
],
'returns':'ViStatus'
},
'WriteWaveformComplexF64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'numberOfSamples',
'direction':'in',
'type':'ViInt32'
},
{
'name':'data',
'direction':'in',
'type':'struct NIComplexNumber_struct[]',
'grpc_type':'repeated NIComplexNumber',
'size':{
'mechanism':'len',
'value':'numberOfSamples'
}
},
{
'name':'waveformHandle',
'direction':'in',
'type':'ViInt32'
}
],
'returns':'ViStatus'
},
'WriteNamedWaveformComplexF64':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'size',
'direction':'in',
'type':'ViInt32'
},
{
'name':'data',
'direction':'in',
'type':'struct NIComplexNumber_struct[]',
'grpc_type':'repeated NIComplexNumber',
'size':{
'mechanism':'len',
'value':'size'
}
}
],
'returns':'ViStatus'
},
'WriteNamedWaveformComplexI16':{
'parameters':[
{
'name':'vi',
'direction':'in',
'type':'ViSession'
},
{
'name':'channelName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'waveformName',
'direction':'in',
'type':'ViConstString'
},
{
'name':'size',
'direction':'in',
'type':'ViInt32'
},
{
'name':'data',
'direction':'in',
'type':'struct NIComplexI16_struct[]',
'grpc_type':'repeated NIComplexInt32',
'size':{
'mechanism':'len',
'value':'size'
}
}
],
'returns':'ViStatus'
}
}
| true
| true
|
1c476528ea9e0ab39dc368d76e84eab32c00fa45
| 724
|
py
|
Python
|
mldp/tests/transformers/test_seq_len_computer.py
|
prashantlv/mltoolkit
|
acc192bafc66b7661d541ef4f604b5e5ab7df5ca
|
[
"MIT"
] | 1
|
2020-10-03T05:23:31.000Z
|
2020-10-03T05:23:31.000Z
|
mldp/tests/transformers/test_seq_len_computer.py
|
prashantlv/mltoolkit
|
acc192bafc66b7661d541ef4f604b5e5ab7df5ca
|
[
"MIT"
] | null | null | null |
mldp/tests/transformers/test_seq_len_computer.py
|
prashantlv/mltoolkit
|
acc192bafc66b7661d541ef4f604b5e5ab7df5ca
|
[
"MIT"
] | null | null | null |
import unittest
from mldp.steps.transformers.nlp import SeqLenComputer
from mldp.utils.tools import DataChunk
from copy import deepcopy
import numpy as np
class TestSeqLenComputer(unittest.TestCase):
def test_output(self):
fn = "dummy"
new_fn = "dummy_len"
data = [[1, 2, 3], [12], ["a", "b", "d", "e"]]
actual_dc = DataChunk(**{fn: np.array(deepcopy(data))})
expected_dc = DataChunk(**{fn: np.array(deepcopy(data)),
new_fn: np.array([3, 1, 4])})
slc = SeqLenComputer(fname=fn, new_len_fname=new_fn)
actual_dc = slc(actual_dc)
self.assertTrue(actual_dc == expected_dc)
if __name__ == '__main__':
unittest.main()
| 27.846154
| 64
| 0.618785
|
import unittest
from mldp.steps.transformers.nlp import SeqLenComputer
from mldp.utils.tools import DataChunk
from copy import deepcopy
import numpy as np
class TestSeqLenComputer(unittest.TestCase):
def test_output(self):
fn = "dummy"
new_fn = "dummy_len"
data = [[1, 2, 3], [12], ["a", "b", "d", "e"]]
actual_dc = DataChunk(**{fn: np.array(deepcopy(data))})
expected_dc = DataChunk(**{fn: np.array(deepcopy(data)),
new_fn: np.array([3, 1, 4])})
slc = SeqLenComputer(fname=fn, new_len_fname=new_fn)
actual_dc = slc(actual_dc)
self.assertTrue(actual_dc == expected_dc)
if __name__ == '__main__':
unittest.main()
| true
| true
|
1c4765731326549e159d462a7abaa90cb1582cbf
| 181
|
py
|
Python
|
apps/profile/apps.py
|
OpenAdaptronik/Rattler
|
c3bdde0ca56b6d77f49bc830fa2b8bb41a26bae4
|
[
"MIT"
] | 2
|
2018-05-18T08:38:29.000Z
|
2018-05-22T08:26:09.000Z
|
apps/profile/apps.py
|
IT-PM-OpenAdaptronik/Webapp
|
c3bdde0ca56b6d77f49bc830fa2b8bb41a26bae4
|
[
"MIT"
] | 118
|
2017-10-31T13:45:09.000Z
|
2018-02-24T20:51:42.000Z
|
apps/profile/apps.py
|
OpenAdaptronik/Rattler
|
c3bdde0ca56b6d77f49bc830fa2b8bb41a26bae4
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class ProfileConfig(AppConfig):
name = 'apps.profile'
verbose_name = _('profile')
| 22.625
| 54
| 0.762431
|
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class ProfileConfig(AppConfig):
name = 'apps.profile'
verbose_name = _('profile')
| true
| true
|
1c47670eaf2832f39b529a294728b4e11a136702
| 629
|
py
|
Python
|
src/create_experiment.py
|
G-Simeone/Learning_Accident_Occurence_on_Dutch_Highways
|
1f3992a529fed70fd488811d68128a1e255fac5f
|
[
"MIT"
] | 4
|
2018-11-09T16:18:28.000Z
|
2019-04-09T11:19:23.000Z
|
src/create_experiment.py
|
G-Simeone/Learning_Accident_Occurence_on_Dutch_Highways
|
1f3992a529fed70fd488811d68128a1e255fac5f
|
[
"MIT"
] | null | null | null |
src/create_experiment.py
|
G-Simeone/Learning_Accident_Occurence_on_Dutch_Highways
|
1f3992a529fed70fd488811d68128a1e255fac5f
|
[
"MIT"
] | 1
|
2020-05-28T18:48:17.000Z
|
2020-05-28T18:48:17.000Z
|
import sys
from utils import write_exp_utils
import pandas as pd
from utils import misc_utils
import psycopg2
from psycopg2.extras import Json, DictCursor
def main(argv):
print(argv[1])
w = write_exp_utils.ExperimentConfig(argv[1], argv[2])
print("writing {} to database".format(argv[1]) )
w.write_to_db()# write experiment on database
# check if the experiment is written correctly
q = 'select experiment_id from rws_experiment.experiment_table order by experiment_id desc limit 1;'
conn = misc_utils.connect_rds()
print(pd.read_sql(q, conn))
if __name__== '__main__':
main(sys.argv)
| 29.952381
| 105
| 0.732909
|
import sys
from utils import write_exp_utils
import pandas as pd
from utils import misc_utils
import psycopg2
from psycopg2.extras import Json, DictCursor
def main(argv):
print(argv[1])
w = write_exp_utils.ExperimentConfig(argv[1], argv[2])
print("writing {} to database".format(argv[1]) )
w.write_to_db()
q = 'select experiment_id from rws_experiment.experiment_table order by experiment_id desc limit 1;'
conn = misc_utils.connect_rds()
print(pd.read_sql(q, conn))
if __name__== '__main__':
main(sys.argv)
| true
| true
|
1c4767c28a173b87d61645270342bcabb9c6929c
| 7,674
|
py
|
Python
|
setup.py
|
WildbookOrg/wbia-deprecate-tpl-brambox
|
9aa6a69f706d0653a65520c696a7cd66715b6a37
|
[
"MIT"
] | 2
|
2019-03-23T03:14:11.000Z
|
2019-11-21T07:16:13.000Z
|
setup.py
|
WildbookOrg/wbia-deprecate-tpl-brambox
|
9aa6a69f706d0653a65520c696a7cd66715b6a37
|
[
"MIT"
] | null | null | null |
setup.py
|
WildbookOrg/wbia-deprecate-tpl-brambox
|
9aa6a69f706d0653a65520c696a7cd66715b6a37
|
[
"MIT"
] | 1
|
2021-12-01T03:04:53.000Z
|
2021-12-01T03:04:53.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import sys
from os.path import exists
from collections import OrderedDict
from setuptools import find_packages
from skbuild import setup
def native_mb_python_tag(plat_impl=None, version_info=None):
"""
Example:
>>> print(native_mb_python_tag())
>>> print(native_mb_python_tag('PyPy', (2, 7)))
>>> print(native_mb_python_tag('CPython', (3, 8)))
"""
if plat_impl is None:
import platform
plat_impl = platform.python_implementation()
if version_info is None:
import sys
version_info = sys.version_info
major, minor = version_info[0:2]
ver = '{}{}'.format(major, minor)
if plat_impl == 'CPython':
# TODO: get if cp27m or cp27mu
impl = 'cp'
if ver == '27':
IS_27_BUILT_WITH_UNICODE = True # how to determine this?
if IS_27_BUILT_WITH_UNICODE:
abi = 'mu'
else:
abi = 'm'
else:
if ver == '38':
# no abi in 38?
abi = ''
else:
abi = 'm'
mb_tag = '{impl}{ver}-{impl}{ver}{abi}'.format(**locals())
elif plat_impl == 'PyPy':
abi = ''
impl = 'pypy'
ver = '{}{}'.format(major, minor)
mb_tag = '{impl}-{ver}'.format(**locals())
else:
raise NotImplementedError(plat_impl)
return mb_tag
def parse_version(fpath='brambox/__init__.py'):
"""
Statically parse the version number from a python file
"""
import ast
if not exists(fpath):
raise ValueError('fpath={!r} does not exist'.format(fpath))
with open(fpath, 'r') as file_:
sourcecode = file_.read()
pt = ast.parse(sourcecode)
class VersionVisitor(ast.NodeVisitor):
def visit_Assign(self, node):
for target in node.targets:
if getattr(target, 'id', None) == '__version__':
self.version = node.value.s
visitor = VersionVisitor()
visitor.visit(pt)
return visitor.version
def parse_long_description(fpath='README.rst'):
"""
Reads README text, but doesn't break if README does not exist.
"""
if exists(fpath):
with open(fpath, 'r') as file:
return file.read()
return ''
def parse_requirements(fname='requirements.txt', with_version=False):
"""
Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if true include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
python -c "import setup; print(chr(10).join(setup.parse_requirements(with_version=True)))"
"""
from os.path import exists
import re
require_fpath = fname
def parse_line(line):
"""
Parse information from a line in a requirements text file
"""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip, rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages
NAME = 'wbia-brambox'
MB_PYTHON_TAG = native_mb_python_tag() # NOQA
AUTHORS = [
'EAVISE',
'Jason Parham',
'WildMe Developers',
]
AUTHOR_EMAIL = 'dev@wildme.org'
URL = 'https://github.com/WildbookOrg/wbia-tpl-brambox'
LICENSE = 'BSD'
DESCRIPTION = 'brambox - Basic Recipes for Annotations and Modeling'
KWARGS = OrderedDict(
name=NAME,
author=', '.join(AUTHORS),
author_email=AUTHOR_EMAIL,
description=DESCRIPTION,
long_description=parse_long_description('README.rst'),
long_description_content_type='text/x-rst',
url=URL,
license=LICENSE,
install_requires=parse_requirements('requirements/runtime.txt'),
extras_require={
'all': parse_requirements('requirements.txt'),
'tests': parse_requirements('requirements/tests.txt'),
'build': parse_requirements('requirements/build.txt'),
'runtime': parse_requirements('requirements/runtime.txt'),
},
# --- VERSION ---
# The following settings retreive the version from git.
# See https://github.com/pypa/setuptools_scm/ for more information
setup_requires=['setuptools_scm'],
use_scm_version={
'write_to': 'brambox/_version.py',
'write_to_template': '__version__ = "{version}"',
'tag_regex': '^(?P<prefix>v)?(?P<version>[^\\+]+)(?P<suffix>.*)?$',
'local_scheme': 'dirty-tag',
},
packages=find_packages(),
include_package_data=False,
# List of classifiers available at:
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 6 - Mature',
'License :: OSI Approved :: BSD License',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Unix',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)
if __name__ == '__main__':
"""
python -c "import brambox; print(brambox.__file__)"
"""
setup(**KWARGS)
| 31.975
| 125
| 0.572974
|
from __future__ import absolute_import, division, print_function
import sys
from os.path import exists
from collections import OrderedDict
from setuptools import find_packages
from skbuild import setup
def native_mb_python_tag(plat_impl=None, version_info=None):
if plat_impl is None:
import platform
plat_impl = platform.python_implementation()
if version_info is None:
import sys
version_info = sys.version_info
major, minor = version_info[0:2]
ver = '{}{}'.format(major, minor)
if plat_impl == 'CPython':
impl = 'cp'
if ver == '27':
IS_27_BUILT_WITH_UNICODE = True
if IS_27_BUILT_WITH_UNICODE:
abi = 'mu'
else:
abi = 'm'
else:
if ver == '38':
abi = ''
else:
abi = 'm'
mb_tag = '{impl}{ver}-{impl}{ver}{abi}'.format(**locals())
elif plat_impl == 'PyPy':
abi = ''
impl = 'pypy'
ver = '{}{}'.format(major, minor)
mb_tag = '{impl}-{ver}'.format(**locals())
else:
raise NotImplementedError(plat_impl)
return mb_tag
def parse_version(fpath='brambox/__init__.py'):
import ast
if not exists(fpath):
raise ValueError('fpath={!r} does not exist'.format(fpath))
with open(fpath, 'r') as file_:
sourcecode = file_.read()
pt = ast.parse(sourcecode)
class VersionVisitor(ast.NodeVisitor):
def visit_Assign(self, node):
for target in node.targets:
if getattr(target, 'id', None) == '__version__':
self.version = node.value.s
visitor = VersionVisitor()
visitor.visit(pt)
return visitor.version
def parse_long_description(fpath='README.rst'):
if exists(fpath):
with open(fpath, 'r') as file:
return file.read()
return ''
def parse_requirements(fname='requirements.txt', with_version=False):
from os.path import exists
import re
require_fpath = fname
def parse_line(line):
if line.startswith('-r '):
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
else:
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
m_deps = map(str.strip, rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages
NAME = 'wbia-brambox'
MB_PYTHON_TAG = native_mb_python_tag()
AUTHORS = [
'EAVISE',
'Jason Parham',
'WildMe Developers',
]
AUTHOR_EMAIL = 'dev@wildme.org'
URL = 'https://github.com/WildbookOrg/wbia-tpl-brambox'
LICENSE = 'BSD'
DESCRIPTION = 'brambox - Basic Recipes for Annotations and Modeling'
KWARGS = OrderedDict(
name=NAME,
author=', '.join(AUTHORS),
author_email=AUTHOR_EMAIL,
description=DESCRIPTION,
long_description=parse_long_description('README.rst'),
long_description_content_type='text/x-rst',
url=URL,
license=LICENSE,
install_requires=parse_requirements('requirements/runtime.txt'),
extras_require={
'all': parse_requirements('requirements.txt'),
'tests': parse_requirements('requirements/tests.txt'),
'build': parse_requirements('requirements/build.txt'),
'runtime': parse_requirements('requirements/runtime.txt'),
},
setup_requires=['setuptools_scm'],
use_scm_version={
'write_to': 'brambox/_version.py',
'write_to_template': '__version__ = "{version}"',
'tag_regex': '^(?P<prefix>v)?(?P<version>[^\\+]+)(?P<suffix>.*)?$',
'local_scheme': 'dirty-tag',
},
packages=find_packages(),
include_package_data=False,
classifiers=[
'Development Status :: 6 - Mature',
'License :: OSI Approved :: BSD License',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Unix',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)
if __name__ == '__main__':
setup(**KWARGS)
| true
| true
|
1c476801c70edbae6a98a7915c2d93aa454b9a2d
| 5,022
|
py
|
Python
|
Analysis/SampleVisualization_AE.py
|
melodist/MELTNET
|
47548e4a027ea4e23cdcb5ba1f1d9aa1aa7bbf29
|
[
"MIT"
] | 9
|
2020-03-16T04:17:05.000Z
|
2022-02-08T12:51:45.000Z
|
Analysis/SampleVisualization_AE.py
|
melodist/MELTNET
|
47548e4a027ea4e23cdcb5ba1f1d9aa1aa7bbf29
|
[
"MIT"
] | 1
|
2019-11-26T08:18:16.000Z
|
2020-09-10T15:21:40.000Z
|
Analysis/SampleVisualization_AE.py
|
melodist/MELTNET
|
47548e4a027ea4e23cdcb5ba1f1d9aa1aa7bbf29
|
[
"MIT"
] | 3
|
2020-03-16T04:17:30.000Z
|
2021-12-02T07:10:22.000Z
|
"""
Sample Visualization
Make 2-D image of sample distribution
1-1. Extract Features using initial network
1-2. Extract Features using trained network
2. Using K-means to classify the patches
3. Dimension reduction using PCA
4. Visualize results
"""
import tensorflow as tf
import numpy as np
from Network import NetworkKeras
import os
import time
from Extraction import PatchExtraction
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
from datetime import datetime
def SampleVisualization_AE(path_model, path_image):
""" Visualize sample distribution using PCA.
The result image will be saved on 'Results_%Y%m%d_%H%M%S'
Input
______
path_model: path of trained model
path_image: path of test image
Output
______
"""
tf.enable_eager_execution()
time_start = time.time()
# Extract Features using trained network
# Load model
input_shape = (17 * 17)
initial_model_CT = NetworkKeras.create_autoencoder(input_shape)
initial_model_PT = NetworkKeras.create_autoencoder(input_shape)
trained_model_CT = NetworkKeras.create_autoencoder(input_shape)
trained_model_CT.load_weights(path_model + 'CT')
trained_model_PT = NetworkKeras.create_autoencoder(input_shape)
trained_model_PT.load_weights(path_model + 'PT')
# Make feature extraction model
initial_extractor_CT = tf.keras.models.Model(inputs=initial_model_CT.input,
outputs=initial_model_CT.get_layer('tf_op_layer_l2_normalize').output)
initial_extractor_PT = tf.keras.models.Model(inputs=initial_model_PT.input,
outputs=initial_model_PT.get_layer('tf_op_layer_l2_normalize_2').output)
feature_extractor_CT = tf.keras.models.Model(inputs=trained_model_CT.input,
outputs=trained_model_CT.get_layer('tf_op_layer_l2_normalize_4').output)
feature_extractor_PT = tf.keras.models.Model(inputs=trained_model_PT.input,
outputs=trained_model_PT.get_layer('tf_op_layer_l2_normalize_6').output)
# Load Images
ind_CT = [[230, 380], [150, 370]]
ind_PT = [[230, 380], [150, 370]]
# Make Results Folder
now = datetime.now()
path_result = f"./Results_{now.strftime('%Y%m%d_%H%M%S')}/"
os.makedirs(path_result)
# Print Patients Number
patient_dir = os.listdir(path_image)
print(f'Patients Number: {len(patient_dir)}')
for path_patient in patient_dir:
addr_patient = f'{path_image}/{path_patient}/'\
img_CT, img_PT = PatchExtraction.stackImages(addr_patient, ind_CT, ind_PT)
patches_CT, patches_PT = PatchExtraction.patch_extraction_thres(img_CT, img_PT, 0)
# Extract Features using initial network
print(f"Extract Features using initial network...")
features_init_CT = initial_extractor_CT.predict(patches_CT, steps=1)
features_init_PT = initial_extractor_PT.predict(patches_PT, steps=1)
features_init = np.hstack((features_init_CT, features_init_PT))
# Extract Features
print(f"Extract Features...")
features_CT = feature_extractor_CT.predict(patches_CT, steps=1)
features_PT = feature_extractor_PT.predict(patches_PT, steps=1)
features = np.hstack((features_CT, features_PT))
# Using K-means
print(f"K-means Clustering...")
num_labels = 5
model_k_means = KMeans(n_clusters=num_labels, random_state=0)
model_k_means.fit(features)
# Merging Patches
num_x = 44
num_y = 30
stride = 5
label_predict = model_k_means.fit_predict(features)
label_predict_batch = label_predict.reshape((-1, num_y * num_x))
# Dimension reduction using PCA
pca = PCA(n_components=2)
features_low = pca.fit_transform(features)
features_init_low = pca.transform(features_init)
colors = ['salmon', 'orange', 'steelblue', 'violet', 'khaki']
fig, ax = plt.subplots(2, figsize=(5, 5), constrained_layout=True)
for i in range(5):
data_init = features_init_low[label_predict == i]
X_init = data_init[:, 0]
Y_init = data_init[:, 1]
ax[0].scatter(X_init, Y_init, color=colors[i], label=i, s=1)
data = features_low[label_predict == i]
X = data[:, 0]
Y = data[:, 1]
ax[1].scatter(X, Y, color=colors[i], label=i, s=1)
ax[0].legend(loc='best')
ax[0].set_xticks([])
ax[0].set_yticks([])
ax[1].legend(loc='best')
ax[1].set_xticks([])
ax[1].set_yticks([])
fig.suptitle('Distribution of patches')
plt.savefig(f"{path_result}Plot_{path_patient}.png", format='png', dpi=300)
time_end = time.time()
print(f"Evaluation Finished! Elapsed time: {time_end - time_start}")
| 35.871429
| 121
| 0.660892
|
import tensorflow as tf
import numpy as np
from Network import NetworkKeras
import os
import time
from Extraction import PatchExtraction
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
from datetime import datetime
def SampleVisualization_AE(path_model, path_image):
tf.enable_eager_execution()
time_start = time.time()
input_shape = (17 * 17)
initial_model_CT = NetworkKeras.create_autoencoder(input_shape)
initial_model_PT = NetworkKeras.create_autoencoder(input_shape)
trained_model_CT = NetworkKeras.create_autoencoder(input_shape)
trained_model_CT.load_weights(path_model + 'CT')
trained_model_PT = NetworkKeras.create_autoencoder(input_shape)
trained_model_PT.load_weights(path_model + 'PT')
initial_extractor_CT = tf.keras.models.Model(inputs=initial_model_CT.input,
outputs=initial_model_CT.get_layer('tf_op_layer_l2_normalize').output)
initial_extractor_PT = tf.keras.models.Model(inputs=initial_model_PT.input,
outputs=initial_model_PT.get_layer('tf_op_layer_l2_normalize_2').output)
feature_extractor_CT = tf.keras.models.Model(inputs=trained_model_CT.input,
outputs=trained_model_CT.get_layer('tf_op_layer_l2_normalize_4').output)
feature_extractor_PT = tf.keras.models.Model(inputs=trained_model_PT.input,
outputs=trained_model_PT.get_layer('tf_op_layer_l2_normalize_6').output)
ind_CT = [[230, 380], [150, 370]]
ind_PT = [[230, 380], [150, 370]]
now = datetime.now()
path_result = f"./Results_{now.strftime('%Y%m%d_%H%M%S')}/"
os.makedirs(path_result)
patient_dir = os.listdir(path_image)
print(f'Patients Number: {len(patient_dir)}')
for path_patient in patient_dir:
addr_patient = f'{path_image}/{path_patient}/'\
img_CT, img_PT = PatchExtraction.stackImages(addr_patient, ind_CT, ind_PT)
patches_CT, patches_PT = PatchExtraction.patch_extraction_thres(img_CT, img_PT, 0)
print(f"Extract Features using initial network...")
features_init_CT = initial_extractor_CT.predict(patches_CT, steps=1)
features_init_PT = initial_extractor_PT.predict(patches_PT, steps=1)
features_init = np.hstack((features_init_CT, features_init_PT))
print(f"Extract Features...")
features_CT = feature_extractor_CT.predict(patches_CT, steps=1)
features_PT = feature_extractor_PT.predict(patches_PT, steps=1)
features = np.hstack((features_CT, features_PT))
print(f"K-means Clustering...")
num_labels = 5
model_k_means = KMeans(n_clusters=num_labels, random_state=0)
model_k_means.fit(features)
num_x = 44
num_y = 30
stride = 5
label_predict = model_k_means.fit_predict(features)
label_predict_batch = label_predict.reshape((-1, num_y * num_x))
pca = PCA(n_components=2)
features_low = pca.fit_transform(features)
features_init_low = pca.transform(features_init)
colors = ['salmon', 'orange', 'steelblue', 'violet', 'khaki']
fig, ax = plt.subplots(2, figsize=(5, 5), constrained_layout=True)
for i in range(5):
data_init = features_init_low[label_predict == i]
X_init = data_init[:, 0]
Y_init = data_init[:, 1]
ax[0].scatter(X_init, Y_init, color=colors[i], label=i, s=1)
data = features_low[label_predict == i]
X = data[:, 0]
Y = data[:, 1]
ax[1].scatter(X, Y, color=colors[i], label=i, s=1)
ax[0].legend(loc='best')
ax[0].set_xticks([])
ax[0].set_yticks([])
ax[1].legend(loc='best')
ax[1].set_xticks([])
ax[1].set_yticks([])
fig.suptitle('Distribution of patches')
plt.savefig(f"{path_result}Plot_{path_patient}.png", format='png', dpi=300)
time_end = time.time()
print(f"Evaluation Finished! Elapsed time: {time_end - time_start}")
| true
| true
|
1c4768746d5b6ffc5563045f2c062c9a11652afe
| 7,689
|
py
|
Python
|
tests/components/hue/test_init.py
|
sgrzys/AIS-home-assistant
|
7bfc4d6d90de75eea06702c36474d91bf38df3bf
|
[
"Apache-2.0"
] | 1
|
2019-04-22T06:05:09.000Z
|
2019-04-22T06:05:09.000Z
|
tests/components/hue/test_init.py
|
sgrzys/AIS-home-assistant
|
7bfc4d6d90de75eea06702c36474d91bf38df3bf
|
[
"Apache-2.0"
] | 2
|
2022-01-13T04:26:00.000Z
|
2022-03-12T01:05:37.000Z
|
tests/components/hue/test_init.py
|
sgrzys/AIS-home-assistant
|
7bfc4d6d90de75eea06702c36474d91bf38df3bf
|
[
"Apache-2.0"
] | 1
|
2021-09-20T01:52:31.000Z
|
2021-09-20T01:52:31.000Z
|
"""Test Hue setup process."""
from unittest.mock import Mock, patch
from homeassistant.setup import async_setup_component
from homeassistant.components import hue
from tests.common import mock_coro, MockConfigEntry
async def test_setup_with_no_config(hass):
"""Test that we do not discover anything or try to set up a bridge."""
with patch.object(hass, 'config_entries') as mock_config_entries, \
patch.object(hue, 'configured_hosts', return_value=[]):
assert await async_setup_component(hass, hue.DOMAIN, {}) is True
# No flows started
assert len(mock_config_entries.flow.mock_calls) == 0
# No configs stored
assert hass.data[hue.DOMAIN] == {}
async def test_setup_with_discovery_no_known_auth(hass, aioclient_mock):
"""Test discovering a bridge and not having known auth."""
aioclient_mock.get(hue.API_NUPNP, json=[
{
'internalipaddress': '0.0.0.0',
'id': 'abcd1234'
}
])
with patch.object(hass, 'config_entries') as mock_config_entries, \
patch.object(hue, 'configured_hosts', return_value=[]):
mock_config_entries.flow.async_init.return_value = mock_coro()
assert await async_setup_component(hass, hue.DOMAIN, {
hue.DOMAIN: {}
}) is True
# Flow started for discovered bridge
assert len(mock_config_entries.flow.mock_calls) == 1
assert mock_config_entries.flow.mock_calls[0][2]['data'] == {
'host': '0.0.0.0',
'path': '.hue_abcd1234.conf',
}
# Config stored for domain.
assert hass.data[hue.DOMAIN] == {
'0.0.0.0': {
hue.CONF_HOST: '0.0.0.0',
hue.CONF_FILENAME: '.hue_abcd1234.conf',
hue.CONF_ALLOW_HUE_GROUPS: hue.DEFAULT_ALLOW_HUE_GROUPS,
hue.CONF_ALLOW_UNREACHABLE: hue.DEFAULT_ALLOW_UNREACHABLE,
}
}
async def test_setup_with_discovery_known_auth(hass, aioclient_mock):
"""Test we don't do anything if we discover already configured hub."""
aioclient_mock.get(hue.API_NUPNP, json=[
{
'internalipaddress': '0.0.0.0',
'id': 'abcd1234'
}
])
with patch.object(hass, 'config_entries') as mock_config_entries, \
patch.object(hue, 'configured_hosts', return_value=['0.0.0.0']):
assert await async_setup_component(hass, hue.DOMAIN, {
hue.DOMAIN: {}
}) is True
# Flow started for discovered bridge
assert len(mock_config_entries.flow.mock_calls) == 0
# Config stored for domain.
assert hass.data[hue.DOMAIN] == {}
async def test_setup_defined_hosts_known_auth(hass):
"""Test we don't initiate a config entry if config bridge is known."""
with patch.object(hass, 'config_entries') as mock_config_entries, \
patch.object(hue, 'configured_hosts', return_value=['0.0.0.0']):
assert await async_setup_component(hass, hue.DOMAIN, {
hue.DOMAIN: {
hue.CONF_BRIDGES: {
hue.CONF_HOST: '0.0.0.0',
hue.CONF_FILENAME: 'bla.conf',
hue.CONF_ALLOW_HUE_GROUPS: False,
hue.CONF_ALLOW_UNREACHABLE: True
}
}
}) is True
# Flow started for discovered bridge
assert len(mock_config_entries.flow.mock_calls) == 0
# Config stored for domain.
assert hass.data[hue.DOMAIN] == {
'0.0.0.0': {
hue.CONF_HOST: '0.0.0.0',
hue.CONF_FILENAME: 'bla.conf',
hue.CONF_ALLOW_HUE_GROUPS: False,
hue.CONF_ALLOW_UNREACHABLE: True
}
}
async def test_setup_defined_hosts_no_known_auth(hass):
"""Test we initiate config entry if config bridge is not known."""
with patch.object(hass, 'config_entries') as mock_config_entries, \
patch.object(hue, 'configured_hosts', return_value=[]):
mock_config_entries.flow.async_init.return_value = mock_coro()
assert await async_setup_component(hass, hue.DOMAIN, {
hue.DOMAIN: {
hue.CONF_BRIDGES: {
hue.CONF_HOST: '0.0.0.0',
hue.CONF_FILENAME: 'bla.conf',
hue.CONF_ALLOW_HUE_GROUPS: False,
hue.CONF_ALLOW_UNREACHABLE: True
}
}
}) is True
# Flow started for discovered bridge
assert len(mock_config_entries.flow.mock_calls) == 1
assert mock_config_entries.flow.mock_calls[0][2]['data'] == {
'host': '0.0.0.0',
'path': 'bla.conf',
}
# Config stored for domain.
assert hass.data[hue.DOMAIN] == {
'0.0.0.0': {
hue.CONF_HOST: '0.0.0.0',
hue.CONF_FILENAME: 'bla.conf',
hue.CONF_ALLOW_HUE_GROUPS: False,
hue.CONF_ALLOW_UNREACHABLE: True
}
}
async def test_config_passed_to_config_entry(hass):
"""Test that configured options for a host are loaded via config entry."""
entry = MockConfigEntry(domain=hue.DOMAIN, data={
'host': '0.0.0.0',
})
entry.add_to_hass(hass)
mock_registry = Mock()
with patch.object(hue, 'HueBridge') as mock_bridge, \
patch('homeassistant.helpers.device_registry.async_get_registry',
return_value=mock_coro(mock_registry)):
mock_bridge.return_value.async_setup.return_value = mock_coro(True)
mock_bridge.return_value.api.config = Mock(
mac='mock-mac',
bridgeid='mock-bridgeid',
raw={
'modelid': 'mock-modelid',
'swversion': 'mock-swversion',
}
)
# Can't set name via kwargs
mock_bridge.return_value.api.config.name = 'mock-name'
assert await async_setup_component(hass, hue.DOMAIN, {
hue.DOMAIN: {
hue.CONF_BRIDGES: {
hue.CONF_HOST: '0.0.0.0',
hue.CONF_FILENAME: 'bla.conf',
hue.CONF_ALLOW_HUE_GROUPS: False,
hue.CONF_ALLOW_UNREACHABLE: True
}
}
}) is True
assert len(mock_bridge.mock_calls) == 2
p_hass, p_entry, p_allow_unreachable, p_allow_groups = \
mock_bridge.mock_calls[0][1]
assert p_hass is hass
assert p_entry is entry
assert p_allow_unreachable is True
assert p_allow_groups is False
assert len(mock_registry.mock_calls) == 1
assert mock_registry.mock_calls[0][2] == {
'config_entry': entry.entry_id,
'connections': {
('mac', 'mock-mac')
},
'identifiers': {
('hue', 'mock-bridgeid')
},
'manufacturer': 'Signify',
'name': 'mock-name',
'model': 'mock-modelid',
'sw_version': 'mock-swversion'
}
async def test_unload_entry(hass):
"""Test being able to unload an entry."""
entry = MockConfigEntry(domain=hue.DOMAIN, data={
'host': '0.0.0.0',
})
entry.add_to_hass(hass)
with patch.object(hue, 'HueBridge') as mock_bridge, \
patch('homeassistant.helpers.device_registry.async_get_registry',
return_value=mock_coro(Mock())):
mock_bridge.return_value.async_setup.return_value = mock_coro(True)
mock_bridge.return_value.api.config = Mock()
assert await async_setup_component(hass, hue.DOMAIN, {}) is True
assert len(mock_bridge.return_value.mock_calls) == 1
mock_bridge.return_value.async_reset.return_value = mock_coro(True)
assert await hue.async_unload_entry(hass, entry)
assert len(mock_bridge.return_value.async_reset.mock_calls) == 1
assert hass.data[hue.DOMAIN] == {}
| 35.109589
| 78
| 0.613864
|
from unittest.mock import Mock, patch
from homeassistant.setup import async_setup_component
from homeassistant.components import hue
from tests.common import mock_coro, MockConfigEntry
async def test_setup_with_no_config(hass):
with patch.object(hass, 'config_entries') as mock_config_entries, \
patch.object(hue, 'configured_hosts', return_value=[]):
assert await async_setup_component(hass, hue.DOMAIN, {}) is True
assert len(mock_config_entries.flow.mock_calls) == 0
assert hass.data[hue.DOMAIN] == {}
async def test_setup_with_discovery_no_known_auth(hass, aioclient_mock):
aioclient_mock.get(hue.API_NUPNP, json=[
{
'internalipaddress': '0.0.0.0',
'id': 'abcd1234'
}
])
with patch.object(hass, 'config_entries') as mock_config_entries, \
patch.object(hue, 'configured_hosts', return_value=[]):
mock_config_entries.flow.async_init.return_value = mock_coro()
assert await async_setup_component(hass, hue.DOMAIN, {
hue.DOMAIN: {}
}) is True
assert len(mock_config_entries.flow.mock_calls) == 1
assert mock_config_entries.flow.mock_calls[0][2]['data'] == {
'host': '0.0.0.0',
'path': '.hue_abcd1234.conf',
}
assert hass.data[hue.DOMAIN] == {
'0.0.0.0': {
hue.CONF_HOST: '0.0.0.0',
hue.CONF_FILENAME: '.hue_abcd1234.conf',
hue.CONF_ALLOW_HUE_GROUPS: hue.DEFAULT_ALLOW_HUE_GROUPS,
hue.CONF_ALLOW_UNREACHABLE: hue.DEFAULT_ALLOW_UNREACHABLE,
}
}
async def test_setup_with_discovery_known_auth(hass, aioclient_mock):
aioclient_mock.get(hue.API_NUPNP, json=[
{
'internalipaddress': '0.0.0.0',
'id': 'abcd1234'
}
])
with patch.object(hass, 'config_entries') as mock_config_entries, \
patch.object(hue, 'configured_hosts', return_value=['0.0.0.0']):
assert await async_setup_component(hass, hue.DOMAIN, {
hue.DOMAIN: {}
}) is True
assert len(mock_config_entries.flow.mock_calls) == 0
assert hass.data[hue.DOMAIN] == {}
async def test_setup_defined_hosts_known_auth(hass):
with patch.object(hass, 'config_entries') as mock_config_entries, \
patch.object(hue, 'configured_hosts', return_value=['0.0.0.0']):
assert await async_setup_component(hass, hue.DOMAIN, {
hue.DOMAIN: {
hue.CONF_BRIDGES: {
hue.CONF_HOST: '0.0.0.0',
hue.CONF_FILENAME: 'bla.conf',
hue.CONF_ALLOW_HUE_GROUPS: False,
hue.CONF_ALLOW_UNREACHABLE: True
}
}
}) is True
assert len(mock_config_entries.flow.mock_calls) == 0
assert hass.data[hue.DOMAIN] == {
'0.0.0.0': {
hue.CONF_HOST: '0.0.0.0',
hue.CONF_FILENAME: 'bla.conf',
hue.CONF_ALLOW_HUE_GROUPS: False,
hue.CONF_ALLOW_UNREACHABLE: True
}
}
async def test_setup_defined_hosts_no_known_auth(hass):
with patch.object(hass, 'config_entries') as mock_config_entries, \
patch.object(hue, 'configured_hosts', return_value=[]):
mock_config_entries.flow.async_init.return_value = mock_coro()
assert await async_setup_component(hass, hue.DOMAIN, {
hue.DOMAIN: {
hue.CONF_BRIDGES: {
hue.CONF_HOST: '0.0.0.0',
hue.CONF_FILENAME: 'bla.conf',
hue.CONF_ALLOW_HUE_GROUPS: False,
hue.CONF_ALLOW_UNREACHABLE: True
}
}
}) is True
assert len(mock_config_entries.flow.mock_calls) == 1
assert mock_config_entries.flow.mock_calls[0][2]['data'] == {
'host': '0.0.0.0',
'path': 'bla.conf',
}
assert hass.data[hue.DOMAIN] == {
'0.0.0.0': {
hue.CONF_HOST: '0.0.0.0',
hue.CONF_FILENAME: 'bla.conf',
hue.CONF_ALLOW_HUE_GROUPS: False,
hue.CONF_ALLOW_UNREACHABLE: True
}
}
async def test_config_passed_to_config_entry(hass):
entry = MockConfigEntry(domain=hue.DOMAIN, data={
'host': '0.0.0.0',
})
entry.add_to_hass(hass)
mock_registry = Mock()
with patch.object(hue, 'HueBridge') as mock_bridge, \
patch('homeassistant.helpers.device_registry.async_get_registry',
return_value=mock_coro(mock_registry)):
mock_bridge.return_value.async_setup.return_value = mock_coro(True)
mock_bridge.return_value.api.config = Mock(
mac='mock-mac',
bridgeid='mock-bridgeid',
raw={
'modelid': 'mock-modelid',
'swversion': 'mock-swversion',
}
)
mock_bridge.return_value.api.config.name = 'mock-name'
assert await async_setup_component(hass, hue.DOMAIN, {
hue.DOMAIN: {
hue.CONF_BRIDGES: {
hue.CONF_HOST: '0.0.0.0',
hue.CONF_FILENAME: 'bla.conf',
hue.CONF_ALLOW_HUE_GROUPS: False,
hue.CONF_ALLOW_UNREACHABLE: True
}
}
}) is True
assert len(mock_bridge.mock_calls) == 2
p_hass, p_entry, p_allow_unreachable, p_allow_groups = \
mock_bridge.mock_calls[0][1]
assert p_hass is hass
assert p_entry is entry
assert p_allow_unreachable is True
assert p_allow_groups is False
assert len(mock_registry.mock_calls) == 1
assert mock_registry.mock_calls[0][2] == {
'config_entry': entry.entry_id,
'connections': {
('mac', 'mock-mac')
},
'identifiers': {
('hue', 'mock-bridgeid')
},
'manufacturer': 'Signify',
'name': 'mock-name',
'model': 'mock-modelid',
'sw_version': 'mock-swversion'
}
async def test_unload_entry(hass):
entry = MockConfigEntry(domain=hue.DOMAIN, data={
'host': '0.0.0.0',
})
entry.add_to_hass(hass)
with patch.object(hue, 'HueBridge') as mock_bridge, \
patch('homeassistant.helpers.device_registry.async_get_registry',
return_value=mock_coro(Mock())):
mock_bridge.return_value.async_setup.return_value = mock_coro(True)
mock_bridge.return_value.api.config = Mock()
assert await async_setup_component(hass, hue.DOMAIN, {}) is True
assert len(mock_bridge.return_value.mock_calls) == 1
mock_bridge.return_value.async_reset.return_value = mock_coro(True)
assert await hue.async_unload_entry(hass, entry)
assert len(mock_bridge.return_value.async_reset.mock_calls) == 1
assert hass.data[hue.DOMAIN] == {}
| true
| true
|
1c476b5d686fb5d71b925dc5ae700b71ab106d76
| 3,587
|
py
|
Python
|
auto_xml.py
|
tdwitham/AutohammerPy
|
1621400fd148f012bc59176ad51aa05c5c879c4f
|
[
"BSD-2-Clause"
] | null | null | null |
auto_xml.py
|
tdwitham/AutohammerPy
|
1621400fd148f012bc59176ad51aa05c5c879c4f
|
[
"BSD-2-Clause"
] | null | null | null |
auto_xml.py
|
tdwitham/AutohammerPy
|
1621400fd148f012bc59176ad51aa05c5c879c4f
|
[
"BSD-2-Clause"
] | null | null | null |
# (c) 2016,2017 - Timothy D. Witham tim.wookie.witham@gmail.com
# Licensed under BSD 2-Clause
__author__ = 'wookie'
import pprint
from components.FileOps import writeLog, initialLogDir, makeLogDir
from components.infrastructure import getSysInfo
from components.MySQL import MySQLOps
global DBOP
runConfig = dict()
secParms = dict()
def ckheader(cEvent, cTag, cText):
if (cEvent == 'start'):
setTo = cTag
else:
setTo = None
if (cTag == 'autohammer'):
return True
elif (cTag == 'config'):
return True
elif (cTag == 'connect'):
return True
elif cTag == 'import_code':
return True
elif cTag == 'run_code':
return True
elif (cTag == 'run_sql'):
return True
elif (cTag == 'sys_info'):
return True
return False
def finishSection(thisSection):
if (thisSection == 'config'):
initialLogDir(runConfig)
elif (thisSection == 'connect'):
writeLog(1, '<connect>')
connectDB(runConfig)
writeLog(-1,'</connect>')
elif (thisSection =='sys_info'):
writeLog(1, '<sys_info>')
getSysInfo()
writeLog(-1, '</sys_info>')
elif (thisSection =='run_sql'):
runSQL(runConfig, secParms)
elif (thisSection =='run_code'):
runCode(runConfig, secParms )
elif (thisSection =='load_code'):
loadCode()
elif (thisSection =='autohammer'):
finishIt()
def doSection(thisSection, cEvent, cTag, cText):
if thisSection == None:
return
elif thisSection == 'config':
runConfig[cTag] = cText
elif thisSection == 'run_sql':
secParms[cTag] = cText
elif thisSection == 'run_code':
secParms[cTag] = cText
elif thisSection == 'load_code':
runConfig[cTag] = cText
def validateConfig():
global runConfig
global dbConfig
print("runConfig")
pprint.pprint(runConfig, width=1)
print("dbConfig")
pprint.pprint(dbConfig, width=1)
print('Validate config')
if dbConfig['test'].upper() == 'TPCC':
runConfig['logDir'] = makeLogDir(dbConfig['rdbms'], dbConfig['test'], dbConfig['warehouses'])
copyFiles(runConfig['logDir'])
elif dbConfig['test'].upper() == 'TPCH':
runConfig['logDir'] = makeLogDir(dbConfig['rdbms'], dbConfig['test'], dbConfig['db_scale'])
copyFiles(runConfig['logDir'])
def validateTest():
print('Validate test')
def setupCode():
#validateCode()
print('setting up code section - I think that this is a do not care')
def runCode():
# validateCode()
print('Running a code section ')
def validateSQL():
print('Validate SQL config')
def validateCode():
print('Validate Code config')
def loadCode():
print("Inside of load code")
def finishIt():
print("Done with Autohammer")
def runSQL(runConfig, secParms):
global DBOP
if (secParms['use_db'] == 'system'):
DBOP.connectAdmin(runConfig)
DBOP.nowDoSQL(runConfig, secParms)
DBOP.disconnectAdmin()
else:
DBOP.connectUser(runConfig)
DBOP.nowDoSQL(runConfig, secParms)
DBOP.disconnectUser()
def connectDB(runCOnfig):
global DBOP
if runConfig['rdbms'].lower() == 'oracle':
adminOracle(runConfig)
elif runConfig['rdbms'].lower() == 'mysql':
DBOP = MySQLOps()
elif runConfig['rdbms'].lower() == 'mssql':
DBOP = MSSQLDB()
elif runConfig['rdbms'].lower() == 'pgsql':
adminMSSQL(runConfig)
else:
writeLog("ERROR: Unknown RDBMS {}i\n".format(runConfig['rdbms']))
| 26.182482
| 101
| 0.622805
|
__author__ = 'wookie'
import pprint
from components.FileOps import writeLog, initialLogDir, makeLogDir
from components.infrastructure import getSysInfo
from components.MySQL import MySQLOps
global DBOP
runConfig = dict()
secParms = dict()
def ckheader(cEvent, cTag, cText):
if (cEvent == 'start'):
setTo = cTag
else:
setTo = None
if (cTag == 'autohammer'):
return True
elif (cTag == 'config'):
return True
elif (cTag == 'connect'):
return True
elif cTag == 'import_code':
return True
elif cTag == 'run_code':
return True
elif (cTag == 'run_sql'):
return True
elif (cTag == 'sys_info'):
return True
return False
def finishSection(thisSection):
if (thisSection == 'config'):
initialLogDir(runConfig)
elif (thisSection == 'connect'):
writeLog(1, '<connect>')
connectDB(runConfig)
writeLog(-1,'</connect>')
elif (thisSection =='sys_info'):
writeLog(1, '<sys_info>')
getSysInfo()
writeLog(-1, '</sys_info>')
elif (thisSection =='run_sql'):
runSQL(runConfig, secParms)
elif (thisSection =='run_code'):
runCode(runConfig, secParms )
elif (thisSection =='load_code'):
loadCode()
elif (thisSection =='autohammer'):
finishIt()
def doSection(thisSection, cEvent, cTag, cText):
if thisSection == None:
return
elif thisSection == 'config':
runConfig[cTag] = cText
elif thisSection == 'run_sql':
secParms[cTag] = cText
elif thisSection == 'run_code':
secParms[cTag] = cText
elif thisSection == 'load_code':
runConfig[cTag] = cText
def validateConfig():
global runConfig
global dbConfig
print("runConfig")
pprint.pprint(runConfig, width=1)
print("dbConfig")
pprint.pprint(dbConfig, width=1)
print('Validate config')
if dbConfig['test'].upper() == 'TPCC':
runConfig['logDir'] = makeLogDir(dbConfig['rdbms'], dbConfig['test'], dbConfig['warehouses'])
copyFiles(runConfig['logDir'])
elif dbConfig['test'].upper() == 'TPCH':
runConfig['logDir'] = makeLogDir(dbConfig['rdbms'], dbConfig['test'], dbConfig['db_scale'])
copyFiles(runConfig['logDir'])
def validateTest():
print('Validate test')
def setupCode():
print('setting up code section - I think that this is a do not care')
def runCode():
print('Running a code section ')
def validateSQL():
print('Validate SQL config')
def validateCode():
print('Validate Code config')
def loadCode():
print("Inside of load code")
def finishIt():
print("Done with Autohammer")
def runSQL(runConfig, secParms):
global DBOP
if (secParms['use_db'] == 'system'):
DBOP.connectAdmin(runConfig)
DBOP.nowDoSQL(runConfig, secParms)
DBOP.disconnectAdmin()
else:
DBOP.connectUser(runConfig)
DBOP.nowDoSQL(runConfig, secParms)
DBOP.disconnectUser()
def connectDB(runCOnfig):
global DBOP
if runConfig['rdbms'].lower() == 'oracle':
adminOracle(runConfig)
elif runConfig['rdbms'].lower() == 'mysql':
DBOP = MySQLOps()
elif runConfig['rdbms'].lower() == 'mssql':
DBOP = MSSQLDB()
elif runConfig['rdbms'].lower() == 'pgsql':
adminMSSQL(runConfig)
else:
writeLog("ERROR: Unknown RDBMS {}i\n".format(runConfig['rdbms']))
| true
| true
|
1c476bd27893d69e83ccb306a1d2ce80722a4ad1
| 9,534
|
py
|
Python
|
piqa/fsim.py
|
francois-rozet/spiq
|
a2e68c38da9129c85867e77641ed29d88e84c9d7
|
[
"MIT"
] | 19
|
2020-10-12T13:57:21.000Z
|
2020-12-05T12:23:41.000Z
|
piqa/fsim.py
|
francois-rozet/spiq
|
a2e68c38da9129c85867e77641ed29d88e84c9d7
|
[
"MIT"
] | null | null | null |
piqa/fsim.py
|
francois-rozet/spiq
|
a2e68c38da9129c85867e77641ed29d88e84c9d7
|
[
"MIT"
] | null | null | null |
r"""Feature Similarity (FSIM)
This module implements the FSIM in PyTorch.
Original:
https://www4.comp.polyu.edu.hk/~cslzhang/IQA/FSIM/FSIM.htm
References:
.. [Zhang2011] FSIM: A Feature Similarity Index for Image Quality Assessment (Zhang et al., 2011)
.. [Kovesi1999] Image Features From Phase Congruency (Kovesi, 1999)
"""
import math
import torch
import torch.fft as fft
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from .utils import _jit, assert_type, reduce_tensor
from .utils import complex as cx
from .utils.color import ColorConv
from .utils.functional import (
scharr_kernel,
gradient_kernel,
filter_grid,
log_gabor,
channel_conv,
l2_norm,
)
@_jit
def fsim(
x: Tensor,
y: Tensor,
pc_x: Tensor,
pc_y: Tensor,
kernel: Tensor,
value_range: float = 1.,
t1: float = 0.85,
t2: float = 160. / (255. ** 2),
t3: float = 200. / (255. ** 2),
t4: float = 200. / (255. ** 2),
lmbda: float = 0.03,
) -> Tensor:
r"""Returns the FSIM between :math:`x` and :math:`y`,
without color space conversion and downsampling.
Args:
x: An input tensor, :math:`(N, 3 \text{ or } 1, H, W)`.
y: A target tensor, :math:`(N, 3 \text{ or } 1, H, W)`.
pc_x: The input phase congruency, :math:`(N, H, W)`.
pc_y: The target phase congruency, :math:`(N, H, W)`.
kernel: A gradient kernel, :math:`(2, 1, K, K)`.
value_range: The value range :math:`L` of the inputs (usually `1.` or `255`).
Note:
For the remaining arguments, refer to [Zhang2011]_.
Returns:
The FSIM vector, :math:`(N,)`.
Example:
>>> x = torch.rand(5, 3, 256, 256)
>>> y = torch.rand(5, 3, 256, 256)
>>> filters = pc_filters(x)
>>> pc_x = phase_congruency(x[:, :1], filters)
>>> pc_y = phase_congruency(y[:, :1], filters)
>>> kernel = gradient_kernel(scharr_kernel())
>>> l = fsim(x, y, pc_x, pc_y, kernel)
>>> l.size()
torch.Size([5])
"""
t2 *= value_range ** 2
t3 *= value_range ** 2
t4 *= value_range ** 2
y_x, y_y = x[:, :1], y[:, :1]
# Phase congruency similarity
pc_m = torch.max(pc_x, pc_y)
s_pc = (2 * pc_x * pc_y + t1) / (pc_x ** 2 + pc_y ** 2 + t1)
# Gradient magnitude similarity
pad = kernel.size(-1) // 2
g_x = l2_norm(channel_conv(y_x, kernel, padding=pad), dims=[1])
g_y = l2_norm(channel_conv(y_y, kernel, padding=pad), dims=[1])
s_g = (2 * g_x * g_y + t2) / (g_x ** 2 + g_y ** 2 + t2)
# Chrominance similarity
s_l = s_pc * s_g
if x.size(1) == 3:
i_x, i_y = x[:, 1], y[:, 1]
q_x, q_y = x[:, 2], y[:, 2]
s_i = (2 * i_x * i_y + t3) / (i_x ** 2 + i_y ** 2 + t3)
s_q = (2 * q_x * q_y + t4) / (q_x ** 2 + q_y ** 2 + t4)
s_iq = s_i * s_q
s_iq = cx.complx(s_iq, torch.zeros_like(s_iq))
s_iq_lambda = cx.real(cx.pow(s_iq, lmbda))
s_l = s_l * s_iq_lambda
# Feature similarity
fs = (s_l * pc_m).sum(dim=(-1, -2)) / pc_m.sum(dim=(-1, -2))
return fs
@_jit
def pc_filters(
x: Tensor,
scales: int = 4,
orientations: int = 4,
wavelength: float = 6.,
factor: float = 2.,
sigma_f: float = 0.5978, # -log(0.55)
sigma_theta: float = 0.6545, # pi / (4 * 1.2)
) -> Tensor:
r"""Returns the log-Gabor filters for :func:`phase_congruency`.
Args:
x: An input tensor, :math:`(*, H, W)`.
scales: The number of scales, :math:`S_1`.
orientations: The number of orientations, :math:`S_2`.
Note:
For the remaining arguments, refer to [Kovesi1999]_.
Returns:
The filters tensor, :math:`(S_1, S_2, H, W)`.
"""
r, theta = filter_grid(x)
# Low-pass filter
lowpass = 1 / (1 + (r / 0.45) ** (2 * 15))
# Radial
radial = []
for i in range(scales):
f_0 = 1 / (wavelength * factor ** i)
lg = log_gabor(r, f_0, sigma_f)
radial.append(lg)
radial = torch.stack(radial)
# Angular
cos_theta = torch.cos(theta)
sin_theta = torch.sin(theta)
theta_j = math.pi * torch.arange(orientations).to(x) / orientations
theta_j = theta_j.reshape(orientations, 1, 1)
## Measure (theta - theta_j) in the sine/cosine domains
## to prevent wrap-around errors
delta_sin = sin_theta * theta_j.cos() - cos_theta * theta_j.sin()
delta_cos = cos_theta * theta_j.cos() + sin_theta * theta_j.sin()
delta_theta = torch.atan2(delta_sin, delta_cos)
angular = torch.exp(-delta_theta ** 2 / (2 * sigma_theta ** 2))
# Combination
filters = lowpass * radial[:, None] * angular[None, :]
return filters
@_jit
def phase_congruency(
x: Tensor,
filters: Tensor,
value_range: float = 1.,
k: float = 2.,
rescale: float = 1.7,
eps: float = 1e-8,
) -> Tensor:
r"""Returns the Phase Congruency (PC) of :math:`x`.
Args:
x: An input tensor, :math:`(N, 1, H, W)`.
filters: The frequency domain filters, :math:`(S_1, S_2, H, W)`.
value_range: The value range :math:`L` of the input (usually `1.` or `255`).
Note:
For the remaining arguments, refer to [Kovesi1999]_.
Returns:
The PC tensor, :math:`(N, H, W)`.
Example:
>>> x = torch.rand(5, 1, 256, 256)
>>> filters = pc_filters(x)
>>> pc = phase_congruency(x, filters)
>>> pc.size()
torch.Size([5, 256, 256])
"""
x = x * (255. / value_range)
# Filters
M_hat = filters
M = fft.ifft2(M_hat)
M = cx.real(torch.view_as_real(M))
# Even & odd (real and imaginary) responses
eo = fft.ifft2(fft.fft2(x[:, None]) * M_hat)
eo = torch.view_as_real(eo)
# Amplitude
A = cx.mod(eo)
# Expected E^2
A2 = A[:, 0] ** 2
median_A2, _ = A2.flatten(-2).median(dim=-1)
expect_A2 = median_A2 / math.log(2)
expect_M2_hat = (M_hat[0] ** 2).mean(dim=(-1, -2))
expect_MiMj = (M[:, None] * M[None, :]).sum(dim=(0, 1, 3, 4))
expect_E2 = expect_A2 * expect_MiMj / expect_M2_hat
# Threshold
sigma_G = expect_E2.sqrt()
mu_R = sigma_G * (math.pi / 2) ** 0.5
sigma_R = sigma_G * (2 - math.pi / 2) ** 0.5
T = mu_R + k * sigma_R
T = T / rescale # emprirical rescaling
T = T[..., None, None]
# Phase deviation
FH = eo.sum(dim=1, keepdim=True)
phi_eo = FH / (cx.mod(FH)[..., None] + eps)
E = cx.dot(eo, phi_eo) - cx.dot(eo, cx.turn(phi_eo)).abs()
E = E.sum(dim=1)
# Phase congruency
pc = (E - T).relu().sum(dim=1) / (A.sum(dim=(1, 2)) + eps)
return pc
class FSIM(nn.Module):
r"""Creates a criterion that measures the FSIM
between an input and a target.
Before applying :func:`fsim`, the input and target are converted from
RBG to Y(IQ) and downsampled by a factor :math:`\frac{\min(H, W)}{256}`.
Args:
chromatic: Whether to use the chromatic channels (IQ) or not.
downsample: Whether downsampling is enabled or not.
kernel: A gradient kernel, :math:`(2, 1, K, K)`.
If `None`, use the Scharr kernel instead.
reduction: Specifies the reduction to apply to the output:
`'none'` | `'mean'` | `'sum'`.
Note:
`**kwargs` are passed to :func:`fsim`.
Shapes:
input: :math:`(N, 3, H, W)`
target: :math:`(N, 3, H, W)`
output: :math:`(N,)` or :math:`()` depending on `reduction`
Example:
>>> criterion = FSIM().cuda()
>>> x = torch.rand(5, 3, 256, 256, requires_grad=True).cuda()
>>> y = torch.rand(5, 3, 256, 256).cuda()
>>> l = 1 - criterion(x, y)
>>> l.size()
torch.Size([])
>>> l.backward()
"""
def __init__(
self,
chromatic: bool = True,
downsample: bool = True,
kernel: Tensor = None,
reduction: str = 'mean',
**kwargs,
):
super().__init__()
if kernel is None:
kernel = gradient_kernel(scharr_kernel())
self.register_buffer('kernel', kernel)
self.register_buffer('filters', torch.zeros((0, 0, 0, 0)))
self.convert = ColorConv('RGB', 'YIQ' if chromatic else 'Y')
self.downsample = downsample
self.reduction = reduction
self.value_range = kwargs.get('value_range', 1.)
self.kwargs = kwargs
def forward(self, input: Tensor, target: Tensor) -> Tensor:
assert_type(
input, target,
device=self.kernel.device,
dim_range=(4, 4),
n_channels=3,
value_range=(0., self.value_range),
)
# Downsample
if self.downsample:
_, _, h, w = input.size()
M = round(min(h, w) / 256)
if M > 1:
input = F.avg_pool2d(input, kernel_size=M, ceil_mode=True)
target = F.avg_pool2d(target, kernel_size=M, ceil_mode=True)
# RGB to Y(IQ)
input = self.convert(input)
target = self.convert(target)
# Phase congruency
if self.filters.shape[-2:] != input.shape[-2:]:
self.filters = pc_filters(input)
pc_input = phase_congruency(input[:, :1], self.filters, self.value_range)
pc_target = phase_congruency(target[:, :1], self.filters, self.value_range)
# FSIM
l = fsim(input, target, pc_input, pc_target, kernel=self.kernel, **self.kwargs)
return reduce_tensor(l, self.reduction)
| 27.877193
| 101
| 0.560835
|
import math
import torch
import torch.fft as fft
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from .utils import _jit, assert_type, reduce_tensor
from .utils import complex as cx
from .utils.color import ColorConv
from .utils.functional import (
scharr_kernel,
gradient_kernel,
filter_grid,
log_gabor,
channel_conv,
l2_norm,
)
@_jit
def fsim(
x: Tensor,
y: Tensor,
pc_x: Tensor,
pc_y: Tensor,
kernel: Tensor,
value_range: float = 1.,
t1: float = 0.85,
t2: float = 160. / (255. ** 2),
t3: float = 200. / (255. ** 2),
t4: float = 200. / (255. ** 2),
lmbda: float = 0.03,
) -> Tensor:
t2 *= value_range ** 2
t3 *= value_range ** 2
t4 *= value_range ** 2
y_x, y_y = x[:, :1], y[:, :1]
pc_m = torch.max(pc_x, pc_y)
s_pc = (2 * pc_x * pc_y + t1) / (pc_x ** 2 + pc_y ** 2 + t1)
pad = kernel.size(-1) // 2
g_x = l2_norm(channel_conv(y_x, kernel, padding=pad), dims=[1])
g_y = l2_norm(channel_conv(y_y, kernel, padding=pad), dims=[1])
s_g = (2 * g_x * g_y + t2) / (g_x ** 2 + g_y ** 2 + t2)
s_l = s_pc * s_g
if x.size(1) == 3:
i_x, i_y = x[:, 1], y[:, 1]
q_x, q_y = x[:, 2], y[:, 2]
s_i = (2 * i_x * i_y + t3) / (i_x ** 2 + i_y ** 2 + t3)
s_q = (2 * q_x * q_y + t4) / (q_x ** 2 + q_y ** 2 + t4)
s_iq = s_i * s_q
s_iq = cx.complx(s_iq, torch.zeros_like(s_iq))
s_iq_lambda = cx.real(cx.pow(s_iq, lmbda))
s_l = s_l * s_iq_lambda
fs = (s_l * pc_m).sum(dim=(-1, -2)) / pc_m.sum(dim=(-1, -2))
return fs
@_jit
def pc_filters(
x: Tensor,
scales: int = 4,
orientations: int = 4,
wavelength: float = 6.,
factor: float = 2.,
sigma_f: float = 0.5978,
sigma_theta: float = 0.6545,
) -> Tensor:
r, theta = filter_grid(x)
lowpass = 1 / (1 + (r / 0.45) ** (2 * 15))
radial = []
for i in range(scales):
f_0 = 1 / (wavelength * factor ** i)
lg = log_gabor(r, f_0, sigma_f)
radial.append(lg)
radial = torch.stack(radial)
cos_theta = torch.cos(theta)
sin_theta = torch.sin(theta)
theta_j = math.pi * torch.arange(orientations).to(x) / orientations
theta_j = theta_j.reshape(orientations, 1, 1)
_cos = cos_theta * theta_j.cos() + sin_theta * theta_j.sin()
delta_theta = torch.atan2(delta_sin, delta_cos)
angular = torch.exp(-delta_theta ** 2 / (2 * sigma_theta ** 2))
filters = lowpass * radial[:, None] * angular[None, :]
return filters
@_jit
def phase_congruency(
x: Tensor,
filters: Tensor,
value_range: float = 1.,
k: float = 2.,
rescale: float = 1.7,
eps: float = 1e-8,
) -> Tensor:
x = x * (255. / value_range)
M_hat = filters
M = fft.ifft2(M_hat)
M = cx.real(torch.view_as_real(M))
eo = fft.ifft2(fft.fft2(x[:, None]) * M_hat)
eo = torch.view_as_real(eo)
A = cx.mod(eo)
A2 = A[:, 0] ** 2
median_A2, _ = A2.flatten(-2).median(dim=-1)
expect_A2 = median_A2 / math.log(2)
expect_M2_hat = (M_hat[0] ** 2).mean(dim=(-1, -2))
expect_MiMj = (M[:, None] * M[None, :]).sum(dim=(0, 1, 3, 4))
expect_E2 = expect_A2 * expect_MiMj / expect_M2_hat
sigma_G = expect_E2.sqrt()
mu_R = sigma_G * (math.pi / 2) ** 0.5
sigma_R = sigma_G * (2 - math.pi / 2) ** 0.5
T = mu_R + k * sigma_R
T = T / rescale
T = T[..., None, None]
FH = eo.sum(dim=1, keepdim=True)
phi_eo = FH / (cx.mod(FH)[..., None] + eps)
E = cx.dot(eo, phi_eo) - cx.dot(eo, cx.turn(phi_eo)).abs()
E = E.sum(dim=1)
pc = (E - T).relu().sum(dim=1) / (A.sum(dim=(1, 2)) + eps)
return pc
class FSIM(nn.Module):
def __init__(
self,
chromatic: bool = True,
downsample: bool = True,
kernel: Tensor = None,
reduction: str = 'mean',
**kwargs,
):
super().__init__()
if kernel is None:
kernel = gradient_kernel(scharr_kernel())
self.register_buffer('kernel', kernel)
self.register_buffer('filters', torch.zeros((0, 0, 0, 0)))
self.convert = ColorConv('RGB', 'YIQ' if chromatic else 'Y')
self.downsample = downsample
self.reduction = reduction
self.value_range = kwargs.get('value_range', 1.)
self.kwargs = kwargs
def forward(self, input: Tensor, target: Tensor) -> Tensor:
assert_type(
input, target,
device=self.kernel.device,
dim_range=(4, 4),
n_channels=3,
value_range=(0., self.value_range),
)
if self.downsample:
_, _, h, w = input.size()
M = round(min(h, w) / 256)
if M > 1:
input = F.avg_pool2d(input, kernel_size=M, ceil_mode=True)
target = F.avg_pool2d(target, kernel_size=M, ceil_mode=True)
input = self.convert(input)
target = self.convert(target)
if self.filters.shape[-2:] != input.shape[-2:]:
self.filters = pc_filters(input)
pc_input = phase_congruency(input[:, :1], self.filters, self.value_range)
pc_target = phase_congruency(target[:, :1], self.filters, self.value_range)
l = fsim(input, target, pc_input, pc_target, kernel=self.kernel, **self.kwargs)
return reduce_tensor(l, self.reduction)
| true
| true
|
1c476c016e38e87c7a75eeb62acb50db4e2d2883
| 1,623
|
py
|
Python
|
tests/test_exceptions.py
|
dobisel/yhttp
|
4396c03905d71b801a92dead3504cc3ef7d98d79
|
[
"MIT"
] | 10
|
2020-01-30T16:23:28.000Z
|
2021-12-12T23:24:37.000Z
|
tests/test_exceptions.py
|
dobisel/yhttp
|
4396c03905d71b801a92dead3504cc3ef7d98d79
|
[
"MIT"
] | 1
|
2021-07-12T21:07:06.000Z
|
2021-08-08T10:42:27.000Z
|
tests/test_exceptions.py
|
dobisel/yhttp
|
4396c03905d71b801a92dead3504cc3ef7d98d79
|
[
"MIT"
] | 1
|
2020-01-26T13:28:35.000Z
|
2020-01-26T13:28:35.000Z
|
import pytest
from bddrest import status, response, when
from yhttp import statuses
def test_httpstatus(app, Given):
@app.route()
def get(req):
raise statuses.badrequest()
@app.route('/foo')
def get(req):
return statuses.badrequest()
with Given():
assert status == '400 Bad Request'
assert response.text.startswith('400 Bad Request\r\n')
assert response.headers['content-type'] == 'text/plain; charset=utf-8'
app.settings.debug = False
when()
assert status == '400 Bad Request'
assert response.text == '400 Bad Request'
assert response.headers['content-type'] == 'text/plain; charset=utf-8'
when('/foo')
assert status == 400
def test_unhandledexception(app, Given):
class MyException(Exception):
pass
@app.route()
def get(req):
raise MyException()
with pytest.raises(MyException), Given():
pass
def test_redirect(app, Given):
@app.route()
def get(req):
raise statuses.found('http://example.com')
with Given():
assert status == 302
assert response.headers['location'] == 'http://example.com'
assert response.text == ''
def test_modified(app, Given):
@app.route()
def get(req):
raise statuses.notmodified()
with Given():
assert status == 304
assert response.text == ''
def test_nocontent(app, Given):
@app.route()
def remove(req):
raise statuses.nocontent()
with Given(verb='REMOVE'):
assert status == 204
assert response == ''
| 21.077922
| 78
| 0.601356
|
import pytest
from bddrest import status, response, when
from yhttp import statuses
def test_httpstatus(app, Given):
@app.route()
def get(req):
raise statuses.badrequest()
@app.route('/foo')
def get(req):
return statuses.badrequest()
with Given():
assert status == '400 Bad Request'
assert response.text.startswith('400 Bad Request\r\n')
assert response.headers['content-type'] == 'text/plain; charset=utf-8'
app.settings.debug = False
when()
assert status == '400 Bad Request'
assert response.text == '400 Bad Request'
assert response.headers['content-type'] == 'text/plain; charset=utf-8'
when('/foo')
assert status == 400
def test_unhandledexception(app, Given):
class MyException(Exception):
pass
@app.route()
def get(req):
raise MyException()
with pytest.raises(MyException), Given():
pass
def test_redirect(app, Given):
@app.route()
def get(req):
raise statuses.found('http://example.com')
with Given():
assert status == 302
assert response.headers['location'] == 'http://example.com'
assert response.text == ''
def test_modified(app, Given):
@app.route()
def get(req):
raise statuses.notmodified()
with Given():
assert status == 304
assert response.text == ''
def test_nocontent(app, Given):
@app.route()
def remove(req):
raise statuses.nocontent()
with Given(verb='REMOVE'):
assert status == 204
assert response == ''
| true
| true
|
1c476cdd7fb60214bfeb7c01ad0034abc05bd191
| 3,585
|
py
|
Python
|
plots/thresholds/vit.py
|
drunkcoding/model-inference
|
02d2240bc7052fa32223a80fa63625fe681db102
|
[
"MIT"
] | 1
|
2021-11-15T19:07:13.000Z
|
2021-11-15T19:07:13.000Z
|
plots/thresholds/vit.py
|
drunkcoding/model-inference
|
02d2240bc7052fa32223a80fa63625fe681db102
|
[
"MIT"
] | null | null | null |
plots/thresholds/vit.py
|
drunkcoding/model-inference
|
02d2240bc7052fa32223a80fa63625fe681db102
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass, field
from functools import partial
import itertools
import json
import logging
import os
import time
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
import torch
from transformers import AutoModelForImageClassification, ViTForImageClassification
from torchvision.datasets import ImageNet
import datasets
from hfutils.preprocess import (
split_train_test,
vit_collate_fn,
ViTFeatureExtractorTransforms,
)
import pandas as pd
from torch.utils.data import DataLoader
from tqdm import tqdm
import numpy as np
from hfutils.logger import Logger
from hfutils.pipe.vit import ViTPyTorchPipeForImageClassification
from hfutils.calibration import temperature_scale
import sys
sys.path.append(".")
from plots.thresholds.utils import *
home_dir = "/mnt/raid0nvme1"
dataset_path = os.path.join(home_dir, "ImageNet")
model_keys = [
"XS",
"S",
"M",
"L",
]
model_names = [
"vit-tiny-patch16-224",
"vit-small-patch16-224",
"vit-base-patch16-224",
"vit-large-patch16-224",
]
device_map = [
"cuda:4",
"cuda:4",
"cuda:4",
"cuda:4",
]
model_paths = [
f"{home_dir}/HuggingFace/WinKawaks/vit-tiny-patch16-224",
f"{home_dir}/HuggingFace/WinKawaks/vit-small-patch16-224",
f"{home_dir}/HuggingFace/google/vit-base-patch16-224",
f"{home_dir}/HuggingFace/google/vit-large-patch16-224",
]
model_paths = dict(zip(model_keys, model_paths))
model_names = dict(zip(model_keys, model_names))
model_device = dict(zip(model_keys, device_map))
def model_inference(model, batch, temperature=None, device="cuda:0"):
pixel_values = batch["pixel_values"].to(device)
logits = model((pixel_values,))
if temperature is not None:
logits = temperature_scale(logits, temperature)
return logits
with open("tests/kernel_duration/latency.json", "r") as fp:
model_latency = json.load(fp)
with open("repository/repo_vit/meta.json", "r") as fp:
model_meta = json.load(fp)
dataset = ImageNet(
dataset_path,
split="train",
transform=ViTFeatureExtractorTransforms(model_paths[model_keys[0]], split="val"),
)
dataset, _ = split_train_test(dataset, 0.98)
num_labels = len(dataset)
dataloader = DataLoader(
dataset, shuffle=True, collate_fn=vit_collate_fn, batch_size=32, drop_last=True,
)
models = load_models(
model_keys,
model_paths,
model_device,
ViTForImageClassification,
ViTPyTorchPipeForImageClassification,
)
n_models = len(model_keys)
model_outputs = dict(zip(model_keys, [list() for _ in range(n_models)]))
m = torch.nn.Softmax(dim=-1)
labels = []
for batch in tqdm(dataloader, desc="Collect Train Data"):
label = batch["labels"]
for i, key in enumerate(model_keys):
logits = model_inference(
models[key],
batch,
device=model_device[key],
temperature=model_meta[model_names[key]]["temperature"],
)
model_outputs[key].append(logits)
labels.append(label)
model_probs, model_ans, model_outputs, labels = postprocessing_inference(
model_keys, model_outputs, labels, m
)
all_thresholds = list(
itertools.product(np.linspace(0, 1, endpoint=True, num=100), repeat=n_models - 1)
)
max_size = 100000
if len(all_thresholds) > max_size:
rnd_idx = np.random.randint(0, len(all_thresholds), max_size)
all_thresholds = [all_thresholds[i] for i in rnd_idx]
profile_thresholds(
model_keys,
model_probs,
model_ans,
model_latency,
model_names,
all_thresholds,
"vit",
)
| 24.724138
| 85
| 0.72106
|
from dataclasses import dataclass, field
from functools import partial
import itertools
import json
import logging
import os
import time
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
import torch
from transformers import AutoModelForImageClassification, ViTForImageClassification
from torchvision.datasets import ImageNet
import datasets
from hfutils.preprocess import (
split_train_test,
vit_collate_fn,
ViTFeatureExtractorTransforms,
)
import pandas as pd
from torch.utils.data import DataLoader
from tqdm import tqdm
import numpy as np
from hfutils.logger import Logger
from hfutils.pipe.vit import ViTPyTorchPipeForImageClassification
from hfutils.calibration import temperature_scale
import sys
sys.path.append(".")
from plots.thresholds.utils import *
home_dir = "/mnt/raid0nvme1"
dataset_path = os.path.join(home_dir, "ImageNet")
model_keys = [
"XS",
"S",
"M",
"L",
]
model_names = [
"vit-tiny-patch16-224",
"vit-small-patch16-224",
"vit-base-patch16-224",
"vit-large-patch16-224",
]
device_map = [
"cuda:4",
"cuda:4",
"cuda:4",
"cuda:4",
]
model_paths = [
f"{home_dir}/HuggingFace/WinKawaks/vit-tiny-patch16-224",
f"{home_dir}/HuggingFace/WinKawaks/vit-small-patch16-224",
f"{home_dir}/HuggingFace/google/vit-base-patch16-224",
f"{home_dir}/HuggingFace/google/vit-large-patch16-224",
]
model_paths = dict(zip(model_keys, model_paths))
model_names = dict(zip(model_keys, model_names))
model_device = dict(zip(model_keys, device_map))
def model_inference(model, batch, temperature=None, device="cuda:0"):
pixel_values = batch["pixel_values"].to(device)
logits = model((pixel_values,))
if temperature is not None:
logits = temperature_scale(logits, temperature)
return logits
with open("tests/kernel_duration/latency.json", "r") as fp:
model_latency = json.load(fp)
with open("repository/repo_vit/meta.json", "r") as fp:
model_meta = json.load(fp)
dataset = ImageNet(
dataset_path,
split="train",
transform=ViTFeatureExtractorTransforms(model_paths[model_keys[0]], split="val"),
)
dataset, _ = split_train_test(dataset, 0.98)
num_labels = len(dataset)
dataloader = DataLoader(
dataset, shuffle=True, collate_fn=vit_collate_fn, batch_size=32, drop_last=True,
)
models = load_models(
model_keys,
model_paths,
model_device,
ViTForImageClassification,
ViTPyTorchPipeForImageClassification,
)
n_models = len(model_keys)
model_outputs = dict(zip(model_keys, [list() for _ in range(n_models)]))
m = torch.nn.Softmax(dim=-1)
labels = []
for batch in tqdm(dataloader, desc="Collect Train Data"):
label = batch["labels"]
for i, key in enumerate(model_keys):
logits = model_inference(
models[key],
batch,
device=model_device[key],
temperature=model_meta[model_names[key]]["temperature"],
)
model_outputs[key].append(logits)
labels.append(label)
model_probs, model_ans, model_outputs, labels = postprocessing_inference(
model_keys, model_outputs, labels, m
)
all_thresholds = list(
itertools.product(np.linspace(0, 1, endpoint=True, num=100), repeat=n_models - 1)
)
max_size = 100000
if len(all_thresholds) > max_size:
rnd_idx = np.random.randint(0, len(all_thresholds), max_size)
all_thresholds = [all_thresholds[i] for i in rnd_idx]
profile_thresholds(
model_keys,
model_probs,
model_ans,
model_latency,
model_names,
all_thresholds,
"vit",
)
| true
| true
|
1c476e3ec222661def123f38fb26ec5839432659
| 1,087
|
py
|
Python
|
src/utils/etc.py
|
slowwavesleep/NeuralMorphemeSegmenter
|
b32f47ecc380262755bf436cf793f35901919f0f
|
[
"MIT"
] | null | null | null |
src/utils/etc.py
|
slowwavesleep/NeuralMorphemeSegmenter
|
b32f47ecc380262755bf436cf793f35901919f0f
|
[
"MIT"
] | null | null | null |
src/utils/etc.py
|
slowwavesleep/NeuralMorphemeSegmenter
|
b32f47ecc380262755bf436cf793f35901919f0f
|
[
"MIT"
] | null | null | null |
import itertools
import json
from typing import Iterable, List, Tuple
def remove_pads(sequences: Iterable[Iterable[int]],
true_lengths: Iterable[int],
*,
pre_pad: bool = False) -> List[List[int]]:
assert len(sequences) == len(true_lengths)
output = []
for element, true_length in zip(sequences, true_lengths):
if pre_pad:
element = element[max(0, len(element) - true_length):]
else:
element = element[:true_length]
output.append(list(element))
return output
def flatten_list(list_to_flatten: List[list]) -> list:
return list(itertools.chain(*list_to_flatten))
def read_experiment_data(path: str) -> Tuple[List[int], List[str], List[str]]:
indices = []
original = []
segmented = []
with open(path) as file:
for line in file:
data = json.loads(line)
indices.append(data["index"])
original.append(data["original"])
segmented.append(data["segmented"])
return indices, original, segmented
| 26.512195
| 78
| 0.609936
|
import itertools
import json
from typing import Iterable, List, Tuple
def remove_pads(sequences: Iterable[Iterable[int]],
true_lengths: Iterable[int],
*,
pre_pad: bool = False) -> List[List[int]]:
assert len(sequences) == len(true_lengths)
output = []
for element, true_length in zip(sequences, true_lengths):
if pre_pad:
element = element[max(0, len(element) - true_length):]
else:
element = element[:true_length]
output.append(list(element))
return output
def flatten_list(list_to_flatten: List[list]) -> list:
return list(itertools.chain(*list_to_flatten))
def read_experiment_data(path: str) -> Tuple[List[int], List[str], List[str]]:
indices = []
original = []
segmented = []
with open(path) as file:
for line in file:
data = json.loads(line)
indices.append(data["index"])
original.append(data["original"])
segmented.append(data["segmented"])
return indices, original, segmented
| true
| true
|
1c476f371ca7d1b74fa727dff3dcc27f059ba338
| 4,943
|
py
|
Python
|
tiddlyweb/serializations/json.py
|
angeluseve/tiddlyweb
|
d24a45d48faa2b014e1c1598ec176c4c1c98fb07
|
[
"BSD-3-Clause"
] | 1
|
2016-05-09T15:26:17.000Z
|
2016-05-09T15:26:17.000Z
|
tiddlyweb/serializations/json.py
|
angeluseve/tiddlyweb
|
d24a45d48faa2b014e1c1598ec176c4c1c98fb07
|
[
"BSD-3-Clause"
] | null | null | null |
tiddlyweb/serializations/json.py
|
angeluseve/tiddlyweb
|
d24a45d48faa2b014e1c1598ec176c4c1c98fb07
|
[
"BSD-3-Clause"
] | null | null | null |
"""
JSON based serializer.
"""
import simplejson
from base64 import b64encode, b64decode
from tiddlyweb.serializations import SerializationInterface
from tiddlyweb.model.bag import Bag
from tiddlyweb.model.policy import Policy
class Serialization(SerializationInterface):
"""
Turn various entities to and from JSON.
"""
def list_recipes(self, recipes):
"""
Create a JSON list of recipe names from
the provided recipes.
"""
return simplejson.dumps([recipe.name for recipe in recipes])
def list_bags(self, bags):
"""
Create a JSON list of bag names from the
provided bags.
"""
return simplejson.dumps([bag.name for bag in bags])
def list_tiddlers(self, bag):
"""
List the tiddlers in a bag as JSON.
The format is a list of dicts in
the form described by self._tiddler_dict.
"""
return simplejson.dumps([self._tiddler_dict(tiddler) for tiddler in bag.list_tiddlers()])
def recipe_as(self, recipe):
"""
A recipe as a JSON dictionary.
"""
policy = recipe.policy
policy_dict = {}
for key in ['owner', 'read', 'write', 'create', 'delete', 'manage']:
policy_dict[key] = getattr(policy, key)
return simplejson.dumps(dict(desc=recipe.desc, policy=policy_dict, recipe=recipe.get_recipe()))
def as_recipe(self, recipe, input_string):
"""
Turn a JSON dictionary into a Recipe
if it is in the proper form. Include
the policy.
"""
info = simplejson.loads(input_string)
try:
recipe.set_recipe(info['recipe'])
recipe.desc = info['desc']
if info['policy']:
recipe.policy = Policy()
for key, value in info['policy'].items():
recipe.policy.__setattr__(key, value)
except KeyError:
pass
return recipe
def bag_as(self, bag):
"""
Create a JSON dictionary representing
a Bag and Policy.
"""
policy = bag.policy
policy_dict = {}
for key in ['owner', 'read', 'write', 'create', 'delete', 'manage']:
policy_dict[key] = getattr(policy, key)
info = dict(policy=policy_dict, desc=bag.desc)
return simplejson.dumps(info)
def as_bag(self, bag, input_string):
"""
Turn a JSON string into a bag.
"""
info = simplejson.loads(input_string)
if info['policy']:
bag.policy = Policy()
for key, value in info['policy'].items():
bag.policy.__setattr__(key, value)
bag.desc = info.get('desc', '')
return bag
def tiddler_as(self, tiddler):
"""
Create a JSON dictionary representing
a tiddler, as described by _tiddler_dict
plus the text of the tiddler.
"""
tiddler_dict = self._tiddler_dict(tiddler)
if tiddler.type and tiddler.type != 'None':
tiddler_dict['text'] = b64encode(tiddler.text)
else:
tiddler_dict['text'] = tiddler.text
return simplejson.dumps(tiddler_dict)
def as_tiddler(self, tiddler, input_string):
"""
Turn a JSON dictionary into a Tiddler.
"""
dict_from_input = simplejson.loads(input_string)
accepted_keys = ['created', 'modified', 'modifier', 'tags', 'fields', 'text', 'type']
for key, value in dict_from_input.iteritems():
if value and key in accepted_keys:
setattr(tiddler, key, value)
if tiddler.type and tiddler.type != 'None':
tiddler.text = b64decode(tiddler.text)
return tiddler
def _tiddler_dict(self, tiddler):
"""
Select fields from a tiddler to create
a dictonary.
"""
unwanted_keys = ['text', 'store']
wanted_keys = [attribute for attribute in tiddler.slots if attribute not in unwanted_keys]
wanted_info = {}
for attribute in wanted_keys:
wanted_info[attribute] = getattr(tiddler, attribute, None)
wanted_info['permissions'] = self._tiddler_permissions(tiddler)
try:
fat = self.environ['tiddlyweb.query'].get('fat', [None])[0]
if fat:
wanted_info['text'] = tiddler.text
except KeyError:
pass # tiddlyweb.query is not there
return dict(wanted_info)
def _tiddler_permissions(self, tiddler):
"""
Make a list of the permissions the current user has
on this tiddler.
"""
perms = []
bag = Bag(tiddler.bag)
if tiddler.store:
bag = tiddler.store.get(bag)
if 'tiddlyweb.usersign' in self.environ:
perms = bag.policy.user_perms(self.environ['tiddlyweb.usersign'])
return perms
| 32.519737
| 103
| 0.584665
|
import simplejson
from base64 import b64encode, b64decode
from tiddlyweb.serializations import SerializationInterface
from tiddlyweb.model.bag import Bag
from tiddlyweb.model.policy import Policy
class Serialization(SerializationInterface):
def list_recipes(self, recipes):
return simplejson.dumps([recipe.name for recipe in recipes])
def list_bags(self, bags):
return simplejson.dumps([bag.name for bag in bags])
def list_tiddlers(self, bag):
return simplejson.dumps([self._tiddler_dict(tiddler) for tiddler in bag.list_tiddlers()])
def recipe_as(self, recipe):
policy = recipe.policy
policy_dict = {}
for key in ['owner', 'read', 'write', 'create', 'delete', 'manage']:
policy_dict[key] = getattr(policy, key)
return simplejson.dumps(dict(desc=recipe.desc, policy=policy_dict, recipe=recipe.get_recipe()))
def as_recipe(self, recipe, input_string):
info = simplejson.loads(input_string)
try:
recipe.set_recipe(info['recipe'])
recipe.desc = info['desc']
if info['policy']:
recipe.policy = Policy()
for key, value in info['policy'].items():
recipe.policy.__setattr__(key, value)
except KeyError:
pass
return recipe
def bag_as(self, bag):
policy = bag.policy
policy_dict = {}
for key in ['owner', 'read', 'write', 'create', 'delete', 'manage']:
policy_dict[key] = getattr(policy, key)
info = dict(policy=policy_dict, desc=bag.desc)
return simplejson.dumps(info)
def as_bag(self, bag, input_string):
info = simplejson.loads(input_string)
if info['policy']:
bag.policy = Policy()
for key, value in info['policy'].items():
bag.policy.__setattr__(key, value)
bag.desc = info.get('desc', '')
return bag
def tiddler_as(self, tiddler):
tiddler_dict = self._tiddler_dict(tiddler)
if tiddler.type and tiddler.type != 'None':
tiddler_dict['text'] = b64encode(tiddler.text)
else:
tiddler_dict['text'] = tiddler.text
return simplejson.dumps(tiddler_dict)
def as_tiddler(self, tiddler, input_string):
dict_from_input = simplejson.loads(input_string)
accepted_keys = ['created', 'modified', 'modifier', 'tags', 'fields', 'text', 'type']
for key, value in dict_from_input.iteritems():
if value and key in accepted_keys:
setattr(tiddler, key, value)
if tiddler.type and tiddler.type != 'None':
tiddler.text = b64decode(tiddler.text)
return tiddler
def _tiddler_dict(self, tiddler):
unwanted_keys = ['text', 'store']
wanted_keys = [attribute for attribute in tiddler.slots if attribute not in unwanted_keys]
wanted_info = {}
for attribute in wanted_keys:
wanted_info[attribute] = getattr(tiddler, attribute, None)
wanted_info['permissions'] = self._tiddler_permissions(tiddler)
try:
fat = self.environ['tiddlyweb.query'].get('fat', [None])[0]
if fat:
wanted_info['text'] = tiddler.text
except KeyError:
pass
return dict(wanted_info)
def _tiddler_permissions(self, tiddler):
perms = []
bag = Bag(tiddler.bag)
if tiddler.store:
bag = tiddler.store.get(bag)
if 'tiddlyweb.usersign' in self.environ:
perms = bag.policy.user_perms(self.environ['tiddlyweb.usersign'])
return perms
| true
| true
|
1c4771447baf8ca0aea72d01cd74569e19c6a862
| 7,917
|
py
|
Python
|
solo/methods/nnsiam.py
|
ludysama/crp
|
08027b67f174426ddac5eef8186349e8337481fc
|
[
"MIT"
] | 2
|
2021-11-02T07:38:33.000Z
|
2021-11-21T12:55:28.000Z
|
solo/methods/nnsiam.py
|
ludysama/crp
|
08027b67f174426ddac5eef8186349e8337481fc
|
[
"MIT"
] | null | null | null |
solo/methods/nnsiam.py
|
ludysama/crp
|
08027b67f174426ddac5eef8186349e8337481fc
|
[
"MIT"
] | null | null | null |
# Copyright 2021 solo-learn development team.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import argparse
from typing import Any, Dict, List, Sequence, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from solo.losses.simsiam import simsiam_loss_func
from solo.methods.base import BaseMethod
from solo.utils.misc import gather
class NNSiam(BaseMethod):
def __init__(
self,
proj_output_dim: int,
proj_hidden_dim: int,
pred_hidden_dim: int,
queue_size: int,
**kwargs,
):
"""Implements NNSiam (https://arxiv.org/abs/2104.14548).
Args:
proj_output_dim (int): number of dimensions of projected features.
proj_hidden_dim (int): number of neurons of the hidden layers of the projector.
pred_hidden_dim (int): number of neurons of the hidden layers of the predictor.
queue_size (int): number of samples to keep in the queue.
"""
super().__init__(**kwargs)
self.queue_size = queue_size
# projector
self.projector = nn.Sequential(
nn.Linear(self.features_dim, proj_hidden_dim, bias=False),
nn.BatchNorm1d(proj_hidden_dim),
nn.ReLU(),
nn.Linear(proj_hidden_dim, proj_hidden_dim, bias=False),
nn.BatchNorm1d(proj_hidden_dim),
nn.ReLU(),
nn.Linear(proj_hidden_dim, proj_output_dim),
nn.BatchNorm1d(proj_output_dim, affine=False),
)
self.projector[6].bias.requires_grad = False # hack: not use bias as it is followed by BN
# predictor
self.predictor = nn.Sequential(
nn.Linear(proj_output_dim, pred_hidden_dim, bias=False),
nn.BatchNorm1d(pred_hidden_dim),
nn.ReLU(),
nn.Linear(pred_hidden_dim, proj_output_dim),
)
# queue
self.register_buffer("queue", torch.randn(self.queue_size, proj_output_dim))
self.register_buffer("queue_y", -torch.ones(self.queue_size, dtype=torch.long))
self.queue = F.normalize(self.queue, dim=1)
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
@staticmethod
def add_model_specific_args(parent_parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
parent_parser = super(NNSiam, NNSiam).add_model_specific_args(parent_parser)
parser = parent_parser.add_argument_group("nnsiam")
# projector
parser.add_argument("--proj_output_dim", type=int, default=128)
parser.add_argument("--proj_hidden_dim", type=int, default=2048)
# predictor
parser.add_argument("--pred_hidden_dim", type=int, default=512)
# queue settings
parser.add_argument("--queue_size", default=65536, type=int)
return parent_parser
@property
def learnable_params(self) -> List[dict]:
"""Adds projector and predictor parameters to the parent's learnable parameters.
Returns:
List[dict]: list of learnable parameters.
"""
extra_learnable_params: List[dict] = [
{"params": self.projector.parameters()},
{"params": self.predictor.parameters(), "static_lr": True},
]
return super().learnable_params + extra_learnable_params
@torch.no_grad()
def dequeue_and_enqueue(self, z: torch.Tensor, y: torch.Tensor):
"""Adds new samples and removes old samples from the queue in a fifo manner. Also stores
the labels of the samples.
Args:
z (torch.Tensor): batch of projected features.
y (torch.Tensor): labels of the samples in the batch.
"""
z = gather(z)
y = gather(y)
batch_size = z.shape[0]
ptr = int(self.queue_ptr) # type: ignore
assert self.queue_size % batch_size == 0
self.queue[ptr : ptr + batch_size, :] = z
self.queue_y[ptr : ptr + batch_size] = y # type: ignore
ptr = (ptr + batch_size) % self.queue_size
self.queue_ptr[0] = ptr # type: ignore
@torch.no_grad()
def find_nn(self, z: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Finds the nearest neighbor of a sample.
Args:
z (torch.Tensor): a batch of projected features.
Returns:
Tuple[torch.Tensor, torch.Tensor]:
indices and projected features of the nearest neighbors.
"""
idx = (z @ self.queue.T).max(dim=1)[1]
nn = self.queue[idx]
return idx, nn
def forward(self, X: torch.Tensor, *args, **kwargs) -> Dict[str, Any]:
"""Performs the forward pass of the encoder, the projector and the predictor.
Args:
X (torch.Tensor): a batch of images in the tensor format.
Returns:
Dict[str, Any]:
a dict containing the outputs of the parent
and the projected and predicted features.
"""
out = super().forward(X, *args, **kwargs)
z = self.projector(out["feats"])
p = self.predictor(z)
return {**out, "z": z, "p": p}
def training_step(self, batch: Sequence[Any], batch_idx: int) -> torch.Tensor:
"""Training step for NNSiam reusing BaseMethod training step.
Args:
batch (Sequence[Any]): a batch of data in the format of [img_indexes, [X], Y], where
[X] is a list of size self.num_crops containing batches of images
batch_idx (int): index of the batch
Returns:
torch.Tensor: total loss composed of SimSiam loss and classification loss
"""
targets = batch[-1]
out = super().training_step(batch, batch_idx)
class_loss = out["loss"]
feats1, feats2 = out["feats"]
z1 = self.projector(feats1)
z2 = self.projector(feats2)
p1 = self.predictor(z1)
p2 = self.predictor(z2)
z1 = F.normalize(z1, dim=-1)
z2 = F.normalize(z2, dim=-1)
# find nn
idx1, nn1 = self.find_nn(z1)
_, nn2 = self.find_nn(z2)
# ------- negative cosine similarity loss -------
neg_cos_sim = simsiam_loss_func(p1, nn2) / 2 + simsiam_loss_func(p2, nn1) / 2
# compute nn accuracy
b = targets.size(0)
nn_acc = (targets == self.queue_y[idx1]).sum() / b
# dequeue and enqueue
self.dequeue_and_enqueue(z1, targets)
# calculate std of features
z1_std = F.normalize(z1, dim=-1).std(dim=0).mean()
z2_std = F.normalize(z2, dim=-1).std(dim=0).mean()
z_std = (z1_std + z2_std) / 2
metrics = {
"train_neg_cos_sim": neg_cos_sim,
"train_z_std": z_std,
"train_nn_acc": nn_acc,
}
self.log_dict(metrics, on_epoch=True, sync_dist=True)
return neg_cos_sim + class_loss
| 35.662162
| 99
| 0.630794
|
import argparse
from typing import Any, Dict, List, Sequence, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from solo.losses.simsiam import simsiam_loss_func
from solo.methods.base import BaseMethod
from solo.utils.misc import gather
class NNSiam(BaseMethod):
def __init__(
self,
proj_output_dim: int,
proj_hidden_dim: int,
pred_hidden_dim: int,
queue_size: int,
**kwargs,
):
super().__init__(**kwargs)
self.queue_size = queue_size
self.projector = nn.Sequential(
nn.Linear(self.features_dim, proj_hidden_dim, bias=False),
nn.BatchNorm1d(proj_hidden_dim),
nn.ReLU(),
nn.Linear(proj_hidden_dim, proj_hidden_dim, bias=False),
nn.BatchNorm1d(proj_hidden_dim),
nn.ReLU(),
nn.Linear(proj_hidden_dim, proj_output_dim),
nn.BatchNorm1d(proj_output_dim, affine=False),
)
self.projector[6].bias.requires_grad = False
self.predictor = nn.Sequential(
nn.Linear(proj_output_dim, pred_hidden_dim, bias=False),
nn.BatchNorm1d(pred_hidden_dim),
nn.ReLU(),
nn.Linear(pred_hidden_dim, proj_output_dim),
)
self.register_buffer("queue", torch.randn(self.queue_size, proj_output_dim))
self.register_buffer("queue_y", -torch.ones(self.queue_size, dtype=torch.long))
self.queue = F.normalize(self.queue, dim=1)
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
@staticmethod
def add_model_specific_args(parent_parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
parent_parser = super(NNSiam, NNSiam).add_model_specific_args(parent_parser)
parser = parent_parser.add_argument_group("nnsiam")
parser.add_argument("--proj_output_dim", type=int, default=128)
parser.add_argument("--proj_hidden_dim", type=int, default=2048)
parser.add_argument("--pred_hidden_dim", type=int, default=512)
parser.add_argument("--queue_size", default=65536, type=int)
return parent_parser
@property
def learnable_params(self) -> List[dict]:
extra_learnable_params: List[dict] = [
{"params": self.projector.parameters()},
{"params": self.predictor.parameters(), "static_lr": True},
]
return super().learnable_params + extra_learnable_params
@torch.no_grad()
def dequeue_and_enqueue(self, z: torch.Tensor, y: torch.Tensor):
z = gather(z)
y = gather(y)
batch_size = z.shape[0]
ptr = int(self.queue_ptr)
assert self.queue_size % batch_size == 0
self.queue[ptr : ptr + batch_size, :] = z
self.queue_y[ptr : ptr + batch_size] = y
ptr = (ptr + batch_size) % self.queue_size
self.queue_ptr[0] = ptr
@torch.no_grad()
def find_nn(self, z: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
idx = (z @ self.queue.T).max(dim=1)[1]
nn = self.queue[idx]
return idx, nn
def forward(self, X: torch.Tensor, *args, **kwargs) -> Dict[str, Any]:
out = super().forward(X, *args, **kwargs)
z = self.projector(out["feats"])
p = self.predictor(z)
return {**out, "z": z, "p": p}
def training_step(self, batch: Sequence[Any], batch_idx: int) -> torch.Tensor:
targets = batch[-1]
out = super().training_step(batch, batch_idx)
class_loss = out["loss"]
feats1, feats2 = out["feats"]
z1 = self.projector(feats1)
z2 = self.projector(feats2)
p1 = self.predictor(z1)
p2 = self.predictor(z2)
z1 = F.normalize(z1, dim=-1)
z2 = F.normalize(z2, dim=-1)
idx1, nn1 = self.find_nn(z1)
_, nn2 = self.find_nn(z2)
neg_cos_sim = simsiam_loss_func(p1, nn2) / 2 + simsiam_loss_func(p2, nn1) / 2
b = targets.size(0)
nn_acc = (targets == self.queue_y[idx1]).sum() / b
self.dequeue_and_enqueue(z1, targets)
z1_std = F.normalize(z1, dim=-1).std(dim=0).mean()
z2_std = F.normalize(z2, dim=-1).std(dim=0).mean()
z_std = (z1_std + z2_std) / 2
metrics = {
"train_neg_cos_sim": neg_cos_sim,
"train_z_std": z_std,
"train_nn_acc": nn_acc,
}
self.log_dict(metrics, on_epoch=True, sync_dist=True)
return neg_cos_sim + class_loss
| true
| true
|
1c47724e4746e520c60664378824afb818843692
| 6,421
|
py
|
Python
|
src/create_embedded_tools.py
|
erenon/bazel
|
9bf885afeb01c766d84acf86ca847a7b5e7bd0d8
|
[
"Apache-2.0"
] | null | null | null |
src/create_embedded_tools.py
|
erenon/bazel
|
9bf885afeb01c766d84acf86ca847a7b5e7bd0d8
|
[
"Apache-2.0"
] | null | null | null |
src/create_embedded_tools.py
|
erenon/bazel
|
9bf885afeb01c766d84acf86ca847a7b5e7bd0d8
|
[
"Apache-2.0"
] | null | null | null |
# pylint: disable=g-direct-third-party-import
# pylint: disable=g-bad-file-header
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates the embedded_tools.zip that is part of the Bazel binary."""
import contextlib
import fnmatch
import os
import os.path
import re
import sys
import zipfile
from src.create_embedded_tools_lib import copy_tar_to_zip
from src.create_embedded_tools_lib import copy_zip_to_zip
from src.create_embedded_tools_lib import is_executable
output_paths = [
('*tools/jdk/BUILD*', lambda x: 'tools/jdk/BUILD'),
('*tools/platforms/platforms.BUILD', lambda x: 'platforms/BUILD'),
('*tools/platforms/*', lambda x: 'platforms/' + os.path.basename(x)),
('*tools/cpp/runfiles/generated_*',
lambda x: 'tools/cpp/runfiles/' + os.path.basename(x)[len('generated_'):]),
('*JavaBuilder*_deploy.jar', lambda x: 'tools/jdk/' + os.path.basename(x)),
('*JacocoCoverage*_deploy.jar',
lambda x: 'tools/jdk/JacocoCoverage_deploy.jar'),
('*turbine_deploy.jar', lambda x: 'tools/jdk/turbine_deploy.jar'),
('*turbine_direct*', lambda x: 'tools/jdk/' + os.path.basename(x)),
('*javac-9+181-r4173-1.jar',
lambda x: 'third_party/java/jdk/langtools/javac-9+181-r4173-1.jar'),
('*bazel-singlejar_deploy.jar',
lambda x: 'tools/jdk/singlejar/bazel-singlejar_deploy.jar'),
('*GenClass_deploy.jar', lambda x: 'tools/jdk/GenClass_deploy.jar'),
('*ExperimentalRunner_deploy.jar',
lambda x: 'tools/jdk/ExperimentalTestRunner_deploy.jar'),
('*Runner_deploy.jar', lambda x: 'tools/jdk/TestRunner_deploy.jar'),
('*singlejar_local.exe', lambda x: 'tools/jdk/singlejar/singlejar.exe'),
('*singlejar_local', lambda x: 'tools/jdk/singlejar/singlejar'),
('*launcher.exe', lambda x: 'tools/launcher/launcher.exe'),
('*def_parser.exe', lambda x: 'tools/def_parser/def_parser.exe'),
('*ijar.exe', lambda x: 'tools/jdk/ijar/ijar.exe'),
('*ijar', lambda x: 'tools/jdk/ijar/ijar'),
('*zipper.exe', lambda x: 'tools/zip/zipper/zipper.exe'),
('*zipper', lambda x: 'tools/zip/zipper/zipper'),
('*src/objc_tools/*',
lambda x: 'tools/objc/precomp_' + os.path.basename(x)),
('*xcode*StdRedirect.dylib', lambda x: 'tools/objc/StdRedirect.dylib'),
('*xcode*make_hashed_objlist.py',
lambda x: 'tools/objc/make_hashed_objlist.py'),
('*xcode*realpath', lambda x: 'tools/objc/realpath'),
('*xcode*xcode-locator', lambda x: 'tools/objc/xcode-locator'),
('*src/tools/xcode/*.sh', lambda x: 'tools/objc/' + os.path.basename(x)),
('*src/tools/xcode/*',
lambda x: 'tools/objc/' + os.path.basename(x) + '.sh'),
('*external/openjdk_*/file/*.tar.gz', lambda x: 'jdk.tar.gz'),
('*external/openjdk_*/file/*.zip', lambda x: 'jdk.zip'),
('*src/minimal_jdk.tar.gz', lambda x: 'jdk.tar.gz'),
('*src/minimal_jdk.zip', lambda x: 'jdk.zip'),
('*', lambda x: re.sub(r'^.*bazel-out/[^/]*/bin/', '', x, count=1)),
]
def get_output_path(path):
for pattern, transformer in output_paths:
if fnmatch.fnmatch(path.replace('\\', '/'), pattern):
# BUILD.tools are stored as BUILD files.
return transformer(path).replace('/BUILD.tools', '/BUILD')
def get_input_files(argsfile):
"""Returns a sorted list of tuples (archive_file, input_file).
This describes the files that should be put into the generated archive.
Args:
argsfile: The file containing the list of input files.
"""
with open(argsfile, 'r') as f:
input_files = set(x.strip() for x in f.readlines())
result = {}
for input_file in input_files:
# If we have both a BUILD and a BUILD.tools file, take the latter only.
if (os.path.basename(input_file) == 'BUILD' and
input_file + '.tools' in input_files):
continue
# This gives us the same behavior as the older bash version of this
# tool: If two input files map to the same output files, the one that
# comes last in the list of input files overrides all earlier ones.
result[get_output_path(input_file)] = input_file
# By sorting the file list, the resulting ZIP file will not be reproducible
# and deterministic.
return sorted(result.items())
def copy_jdk_into_archive(output_zip, archive_file, input_file):
"""Extract the JDK and adds it to the archive under jdk/*."""
def _replace_dirname(filename):
# Rename the first folder to 'jdk', because Bazel looks for a
# bundled JDK in the embedded tools using that folder name.
return 'jdk/' + '/'.join(filename.split('/')[1:])
# The JDK is special - it's extracted instead of copied.
if archive_file.endswith('.tar.gz'):
copy_tar_to_zip(output_zip, input_file, _replace_dirname)
elif archive_file.endswith('.zip'):
copy_zip_to_zip(output_zip, input_file, _replace_dirname)
def main():
output_zip = os.path.join(os.getcwd(), sys.argv[1])
input_files = get_input_files(sys.argv[2])
# Copy all the input_files into output_zip.
# Adding contextlib.closing to be python 2.6 (for centos 6.7) compatible
with contextlib.closing(
zipfile.ZipFile(output_zip, 'w', zipfile.ZIP_DEFLATED)) as output_zip:
zipinfo = zipfile.ZipInfo('WORKSPACE', (1980, 1, 1, 0, 0, 0))
zipinfo.external_attr = 0o644 << 16
output_zip.writestr(zipinfo, 'workspace(name = "bazel_tools")\n')
for archive_file, input_file in input_files:
if os.path.basename(archive_file) in ('jdk.tar.gz', 'jdk.zip'):
copy_jdk_into_archive(output_zip, archive_file, input_file)
else:
zipinfo = zipfile.ZipInfo(archive_file, (1980, 1, 1, 0, 0, 0))
zipinfo.external_attr = 0o755 << 16 if is_executable(
input_file) else 0o644 << 16
zipinfo.compress_type = zipfile.ZIP_DEFLATED
with open(input_file, 'rb') as f:
output_zip.writestr(zipinfo, f.read())
if __name__ == '__main__':
main()
| 42.523179
| 80
| 0.686653
|
atch
import os
import os.path
import re
import sys
import zipfile
from src.create_embedded_tools_lib import copy_tar_to_zip
from src.create_embedded_tools_lib import copy_zip_to_zip
from src.create_embedded_tools_lib import is_executable
output_paths = [
('*tools/jdk/BUILD*', lambda x: 'tools/jdk/BUILD'),
('*tools/platforms/platforms.BUILD', lambda x: 'platforms/BUILD'),
('*tools/platforms/*', lambda x: 'platforms/' + os.path.basename(x)),
('*tools/cpp/runfiles/generated_*',
lambda x: 'tools/cpp/runfiles/' + os.path.basename(x)[len('generated_'):]),
('*JavaBuilder*_deploy.jar', lambda x: 'tools/jdk/' + os.path.basename(x)),
('*JacocoCoverage*_deploy.jar',
lambda x: 'tools/jdk/JacocoCoverage_deploy.jar'),
('*turbine_deploy.jar', lambda x: 'tools/jdk/turbine_deploy.jar'),
('*turbine_direct*', lambda x: 'tools/jdk/' + os.path.basename(x)),
('*javac-9+181-r4173-1.jar',
lambda x: 'third_party/java/jdk/langtools/javac-9+181-r4173-1.jar'),
('*bazel-singlejar_deploy.jar',
lambda x: 'tools/jdk/singlejar/bazel-singlejar_deploy.jar'),
('*GenClass_deploy.jar', lambda x: 'tools/jdk/GenClass_deploy.jar'),
('*ExperimentalRunner_deploy.jar',
lambda x: 'tools/jdk/ExperimentalTestRunner_deploy.jar'),
('*Runner_deploy.jar', lambda x: 'tools/jdk/TestRunner_deploy.jar'),
('*singlejar_local.exe', lambda x: 'tools/jdk/singlejar/singlejar.exe'),
('*singlejar_local', lambda x: 'tools/jdk/singlejar/singlejar'),
('*launcher.exe', lambda x: 'tools/launcher/launcher.exe'),
('*def_parser.exe', lambda x: 'tools/def_parser/def_parser.exe'),
('*ijar.exe', lambda x: 'tools/jdk/ijar/ijar.exe'),
('*ijar', lambda x: 'tools/jdk/ijar/ijar'),
('*zipper.exe', lambda x: 'tools/zip/zipper/zipper.exe'),
('*zipper', lambda x: 'tools/zip/zipper/zipper'),
('*src/objc_tools/*',
lambda x: 'tools/objc/precomp_' + os.path.basename(x)),
('*xcode*StdRedirect.dylib', lambda x: 'tools/objc/StdRedirect.dylib'),
('*xcode*make_hashed_objlist.py',
lambda x: 'tools/objc/make_hashed_objlist.py'),
('*xcode*realpath', lambda x: 'tools/objc/realpath'),
('*xcode*xcode-locator', lambda x: 'tools/objc/xcode-locator'),
('*src/tools/xcode/*.sh', lambda x: 'tools/objc/' + os.path.basename(x)),
('*src/tools/xcode/*',
lambda x: 'tools/objc/' + os.path.basename(x) + '.sh'),
('*external/openjdk_*/file/*.tar.gz', lambda x: 'jdk.tar.gz'),
('*external/openjdk_*/file/*.zip', lambda x: 'jdk.zip'),
('*src/minimal_jdk.tar.gz', lambda x: 'jdk.tar.gz'),
('*src/minimal_jdk.zip', lambda x: 'jdk.zip'),
('*', lambda x: re.sub(r'^.*bazel-out/[^/]*/bin/', '', x, count=1)),
]
def get_output_path(path):
for pattern, transformer in output_paths:
if fnmatch.fnmatch(path.replace('\\', '/'), pattern):
return transformer(path).replace('/BUILD.tools', '/BUILD')
def get_input_files(argsfile):
with open(argsfile, 'r') as f:
input_files = set(x.strip() for x in f.readlines())
result = {}
for input_file in input_files:
if (os.path.basename(input_file) == 'BUILD' and
input_file + '.tools' in input_files):
continue
result[get_output_path(input_file)] = input_file
return sorted(result.items())
def copy_jdk_into_archive(output_zip, archive_file, input_file):
def _replace_dirname(filename):
return 'jdk/' + '/'.join(filename.split('/')[1:])
if archive_file.endswith('.tar.gz'):
copy_tar_to_zip(output_zip, input_file, _replace_dirname)
elif archive_file.endswith('.zip'):
copy_zip_to_zip(output_zip, input_file, _replace_dirname)
def main():
output_zip = os.path.join(os.getcwd(), sys.argv[1])
input_files = get_input_files(sys.argv[2])
# Copy all the input_files into output_zip.
# Adding contextlib.closing to be python 2.6 (for centos 6.7) compatible
with contextlib.closing(
zipfile.ZipFile(output_zip, 'w', zipfile.ZIP_DEFLATED)) as output_zip:
zipinfo = zipfile.ZipInfo('WORKSPACE', (1980, 1, 1, 0, 0, 0))
zipinfo.external_attr = 0o644 << 16
output_zip.writestr(zipinfo, 'workspace(name = "bazel_tools")\n')
for archive_file, input_file in input_files:
if os.path.basename(archive_file) in ('jdk.tar.gz', 'jdk.zip'):
copy_jdk_into_archive(output_zip, archive_file, input_file)
else:
zipinfo = zipfile.ZipInfo(archive_file, (1980, 1, 1, 0, 0, 0))
zipinfo.external_attr = 0o755 << 16 if is_executable(
input_file) else 0o644 << 16
zipinfo.compress_type = zipfile.ZIP_DEFLATED
with open(input_file, 'rb') as f:
output_zip.writestr(zipinfo, f.read())
if __name__ == '__main__':
main()
| true
| true
|
1c47729e783feede84d393f9c877b04a40b6c1cf
| 5,680
|
py
|
Python
|
src/morphforgeexamples/exset6_poster_ocns2012/poster1.py
|
mikehulluk/morphforge
|
2a95096f144ed4ea487decb735ce66706357d3c7
|
[
"BSD-2-Clause"
] | 1
|
2021-01-21T11:31:59.000Z
|
2021-01-21T11:31:59.000Z
|
src/morphforgeexamples/exset6_poster_ocns2012/poster1.py
|
mikehulluk/morphforge
|
2a95096f144ed4ea487decb735ce66706357d3c7
|
[
"BSD-2-Clause"
] | null | null | null |
src/morphforgeexamples/exset6_poster_ocns2012/poster1.py
|
mikehulluk/morphforge
|
2a95096f144ed4ea487decb735ce66706357d3c7
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
""" Simulation of a HodgkinHuxley-type neuron specified through NeuroUnits.
"""
import matplotlib as mpl
mpl.rcParams['font.size'] = 14
from morphforge.stdimports import *
from morphforgecontrib.stdimports import *
eqnset_txt_na = """
define_component hh_na {
i = g * (v-erev) * m**3*h
m_inf = m_alpha_rate / (m_alpha_rate + m_beta_rate)
m_tau = 1.0 / (m_alpha_rate + m_beta_rate)
m' = (m_inf-m) / m_tau
h_inf = h_alpha_rate / (h_alpha_rate + h_beta_rate)
h_tau = 1.0 / (h_alpha_rate + h_beta_rate)
h' = (h_inf-h) / h_tau
StdFormAB(V, a1, a2, a3, a4, a5) = (a1+a2*V)/(a3+std.math.exp((V+a4)/a5))
m_alpha_rate = StdFormAB(V=v, a1=m_a1, a2=m_a2, a3=m_a3, a4=m_a4, a5=m_a5)
m_beta_rate = StdFormAB(V=v, a1=m_b1, a2=m_b2, a3=m_b3, a4=m_b4, a5=m_b5)
h_alpha_rate = StdFormAB(V=v, a1=h_a1, a2=h_a2, a3=h_a3, a4=h_a4, a5=h_a5)
h_beta_rate = StdFormAB(V=v, a1=h_b1, a2=h_b2, a3=h_b3, a4=h_b4, a5=h_b5)
m_a1={-4.00 ms-1}; m_a2={-0.10 mV-1 ms-1}; m_a3={-1.00}; m_a4={40.00 mV}; m_a5={-10.00 mV};
m_b1={ 4.00 ms-1}; m_b2={ 0.00 mV-1 ms-1}; m_b3={ 0.00}; m_b4={65.00 mV}; m_b5={ 18.00 mV};
h_a1={ 0.07 ms-1}; h_a2={ 0.00 mV-1 ms-1}; h_a3={ 0.00}; h_a4={65.00 mV}; h_a5={ 20.00 mV};
h_b1={ 1.00 ms-1}; h_b2={ 0.00 mV-1 ms-1}; h_b3={ 1.00}; h_b4={35.00 mV}; h_b5={-10.00 mV};
erev = 50.0mV;
<=> PARAMETER g:(S/m2)
<=> OUTPUT i:(A/m2) METADATA {"mf":{"role":"TRANSMEMBRANECURRENT"} }
<=> INPUT v: V METADATA {"mf":{"role":"MEMBRANEVOLTAGE"} }
} """
eqnset_txt_k = """
define_component hh_k {
i = g * (v-erev) * n*n*n*n
n_inf = n_alpha_rate / (n_alpha_rate + n_beta_rate)
n_tau = 1.0 / (n_alpha_rate + n_beta_rate)
n' = (n_inf-n) / n_tau
StdFormAB(V, a1, a2, a3, a4, a5) = (a1 + a2*V)/(a3+std.math.exp((V+a4)/a5))
n_alpha_rate = StdFormAB(V=v, a1=n_a1, a2=n_a2, a3=n_a3, a4=n_a4, a5=n_a5)
n_beta_rate = StdFormAB(V=v, a1=n_b1, a2=n_b2, a3=n_b3, a4=n_b4, a5=n_b5)
n_a1={-0.55 ms-1}; n_a2={-0.01 mV-1 ms-1}; n_a3={-1.00}; n_a4={55.00 mV}; n_a5={-10.00 mV}
n_b1={0.125 ms-1}; n_b2={ 0.00 mV-1 ms-1}; n_b3={ 0.00}; n_b4={65.00 mV}; n_b5={ 80.00 mV}
g = {36.0mS/cm2}
erev = {-77.0mV}
<=> OUTPUT i:(A/m2) METADATA {"mf":{"role":"TRANSMEMBRANECURRENT"} }
<=> INPUT v: V METADATA {"mf":{"role":"MEMBRANEVOLTAGE"} }
} """
eqnset_txt_lk = """
define_component hh_lk {
i = {0.3mS/cm2} * (v- {-54.3mV})
<=> OUTPUT i:(A/m2) METADATA {"mf":{"role":"TRANSMEMBRANECURRENT"} }
<=> INPUT v: V METADATA {"mf":{"role":"MEMBRANEVOLTAGE"} }
} """
env = NEURONEnvironment()
sim = env.Simulation()
# Create a cell:
morph_dict = {'root': {'length': 18.8, 'diam': 18.8, 'id':'soma'} }
my_morph = MorphologyTree.fromDictionary(morph_dict)
cell = sim.create_cell(name="Cell1", morphology=my_morph)
#soma = cell.get_location("soma")
# Setup passive channels:
cell.set_passive( PassiveProperty.SpecificCapacitance, qty('1.0:uF/cm2'))
# Setup active channels:
na_chl = env.Channel(NeuroUnitEqnsetMechanism, name="NaChl", eqnset=eqnset_txt_na,
default_parameters={"g":qty("120:mS/cm2")}, )
k_chl = env.Channel(NeuroUnitEqnsetMechanism, name="KChl", eqnset=eqnset_txt_k, )
lk_chl = env.Channel(NeuroUnitEqnsetMechanism, name="LKChl", eqnset=eqnset_txt_lk, )
cell.apply_channel( na_chl)
cell.apply_channel( lk_chl)
cell.apply_channel( k_chl)
# Define what to record:
sim.record(cell, what=StandardTags.Voltage, name="SomaVoltage", cell_location = cell.soma)
sim.record(na_chl, what='m', cell_location=cell.soma, user_tags=[StandardTags.StateVariable])
sim.record(na_chl, what='h', cell_location=cell.soma, user_tags=[StandardTags.StateVariable])
sim.record(k_chl, what='n', cell_location=cell.soma, user_tags=[StandardTags.StateVariable])
# Create the stimulus and record the injected current:
cc = sim.create_currentclamp(name="CC1", amp=qty("100:pA"), dur=qty("100:ms"), delay=qty("100:ms"), cell_location=cell.soma)
sim.record(cc, what=StandardTags.Current)
# run the simulation
results = sim.run()
TagViewer(results, timerange=(50, 250)*units.ms, show=True)
| 40.283688
| 124
| 0.660915
|
import matplotlib as mpl
mpl.rcParams['font.size'] = 14
from morphforge.stdimports import *
from morphforgecontrib.stdimports import *
eqnset_txt_na = """
define_component hh_na {
i = g * (v-erev) * m**3*h
m_inf = m_alpha_rate / (m_alpha_rate + m_beta_rate)
m_tau = 1.0 / (m_alpha_rate + m_beta_rate)
m' = (m_inf-m) / m_tau
h_inf = h_alpha_rate / (h_alpha_rate + h_beta_rate)
h_tau = 1.0 / (h_alpha_rate + h_beta_rate)
h' = (h_inf-h) / h_tau
StdFormAB(V, a1, a2, a3, a4, a5) = (a1+a2*V)/(a3+std.math.exp((V+a4)/a5))
m_alpha_rate = StdFormAB(V=v, a1=m_a1, a2=m_a2, a3=m_a3, a4=m_a4, a5=m_a5)
m_beta_rate = StdFormAB(V=v, a1=m_b1, a2=m_b2, a3=m_b3, a4=m_b4, a5=m_b5)
h_alpha_rate = StdFormAB(V=v, a1=h_a1, a2=h_a2, a3=h_a3, a4=h_a4, a5=h_a5)
h_beta_rate = StdFormAB(V=v, a1=h_b1, a2=h_b2, a3=h_b3, a4=h_b4, a5=h_b5)
m_a1={-4.00 ms-1}; m_a2={-0.10 mV-1 ms-1}; m_a3={-1.00}; m_a4={40.00 mV}; m_a5={-10.00 mV};
m_b1={ 4.00 ms-1}; m_b2={ 0.00 mV-1 ms-1}; m_b3={ 0.00}; m_b4={65.00 mV}; m_b5={ 18.00 mV};
h_a1={ 0.07 ms-1}; h_a2={ 0.00 mV-1 ms-1}; h_a3={ 0.00}; h_a4={65.00 mV}; h_a5={ 20.00 mV};
h_b1={ 1.00 ms-1}; h_b2={ 0.00 mV-1 ms-1}; h_b3={ 1.00}; h_b4={35.00 mV}; h_b5={-10.00 mV};
erev = 50.0mV;
<=> PARAMETER g:(S/m2)
<=> OUTPUT i:(A/m2) METADATA {"mf":{"role":"TRANSMEMBRANECURRENT"} }
<=> INPUT v: V METADATA {"mf":{"role":"MEMBRANEVOLTAGE"} }
} """
eqnset_txt_k = """
define_component hh_k {
i = g * (v-erev) * n*n*n*n
n_inf = n_alpha_rate / (n_alpha_rate + n_beta_rate)
n_tau = 1.0 / (n_alpha_rate + n_beta_rate)
n' = (n_inf-n) / n_tau
StdFormAB(V, a1, a2, a3, a4, a5) = (a1 + a2*V)/(a3+std.math.exp((V+a4)/a5))
n_alpha_rate = StdFormAB(V=v, a1=n_a1, a2=n_a2, a3=n_a3, a4=n_a4, a5=n_a5)
n_beta_rate = StdFormAB(V=v, a1=n_b1, a2=n_b2, a3=n_b3, a4=n_b4, a5=n_b5)
n_a1={-0.55 ms-1}; n_a2={-0.01 mV-1 ms-1}; n_a3={-1.00}; n_a4={55.00 mV}; n_a5={-10.00 mV}
n_b1={0.125 ms-1}; n_b2={ 0.00 mV-1 ms-1}; n_b3={ 0.00}; n_b4={65.00 mV}; n_b5={ 80.00 mV}
g = {36.0mS/cm2}
erev = {-77.0mV}
<=> OUTPUT i:(A/m2) METADATA {"mf":{"role":"TRANSMEMBRANECURRENT"} }
<=> INPUT v: V METADATA {"mf":{"role":"MEMBRANEVOLTAGE"} }
} """
eqnset_txt_lk = """
define_component hh_lk {
i = {0.3mS/cm2} * (v- {-54.3mV})
<=> OUTPUT i:(A/m2) METADATA {"mf":{"role":"TRANSMEMBRANECURRENT"} }
<=> INPUT v: V METADATA {"mf":{"role":"MEMBRANEVOLTAGE"} }
} """
env = NEURONEnvironment()
sim = env.Simulation()
# Create a cell:
morph_dict = {'root': {'length': 18.8, 'diam': 18.8, 'id':'soma'} }
my_morph = MorphologyTree.fromDictionary(morph_dict)
cell = sim.create_cell(name="Cell1", morphology=my_morph)
#soma = cell.get_location("soma")
# Setup passive channels:
cell.set_passive( PassiveProperty.SpecificCapacitance, qty('1.0:uF/cm2'))
# Setup active channels:
na_chl = env.Channel(NeuroUnitEqnsetMechanism, name="NaChl", eqnset=eqnset_txt_na,
default_parameters={"g":qty("120:mS/cm2")}, )
k_chl = env.Channel(NeuroUnitEqnsetMechanism, name="KChl", eqnset=eqnset_txt_k, )
lk_chl = env.Channel(NeuroUnitEqnsetMechanism, name="LKChl", eqnset=eqnset_txt_lk, )
cell.apply_channel( na_chl)
cell.apply_channel( lk_chl)
cell.apply_channel( k_chl)
# Define what to record:
sim.record(cell, what=StandardTags.Voltage, name="SomaVoltage", cell_location = cell.soma)
sim.record(na_chl, what='m', cell_location=cell.soma, user_tags=[StandardTags.StateVariable])
sim.record(na_chl, what='h', cell_location=cell.soma, user_tags=[StandardTags.StateVariable])
sim.record(k_chl, what='n', cell_location=cell.soma, user_tags=[StandardTags.StateVariable])
# Create the stimulus and record the injected current:
cc = sim.create_currentclamp(name="CC1", amp=qty("100:pA"), dur=qty("100:ms"), delay=qty("100:ms"), cell_location=cell.soma)
sim.record(cc, what=StandardTags.Current)
# run the simulation
results = sim.run()
TagViewer(results, timerange=(50, 250)*units.ms, show=True)
| true
| true
|
1c4772bee94a9049c31da5ef09d5c7071e017e16
| 2,599
|
py
|
Python
|
tasrif/processing_pipeline/pandas/convert_to_datetime.py
|
qcri/tasrif
|
327bc1eccb8f8e11d8869ba65a7c72ad038aa094
|
[
"BSD-3-Clause"
] | 20
|
2021-12-06T10:41:54.000Z
|
2022-03-13T16:25:43.000Z
|
tasrif/processing_pipeline/pandas/convert_to_datetime.py
|
qcri/tasrif
|
327bc1eccb8f8e11d8869ba65a7c72ad038aa094
|
[
"BSD-3-Clause"
] | 33
|
2021-12-06T08:27:18.000Z
|
2022-03-14T05:07:53.000Z
|
tasrif/processing_pipeline/pandas/convert_to_datetime.py
|
qcri/tasrif
|
327bc1eccb8f8e11d8869ba65a7c72ad038aa094
|
[
"BSD-3-Clause"
] | 2
|
2022-02-07T08:06:48.000Z
|
2022-02-14T07:13:42.000Z
|
"""
Operator to convert a column feature from string to datetime
"""
import pandas as pd
from tasrif.processing_pipeline import PandasOperator
from tasrif.processing_pipeline.validators import InputsAreDataFramesValidatorMixin
class ConvertToDatetimeOperator(InputsAreDataFramesValidatorMixin, PandasOperator):
"""
Converts a set of (string) features to datetime using Pandas ``to_datetime``
Examples
--------
>>> import pandas as pd
>>> from tasrif.processing_pipeline.pandas import ConvertToDatetimeOperator
>>>
>>> df0 = pd.DataFrame([[1, "2020-05-01 00:00:00", 1], [1, "2020-05-01 01:00:00", 1],
>>> [1, "2020-05-01 03:00:00", 2], [2, "2020-05-02 00:00:00", 1],
>>> [2, "2020-05-02 01:00:00", 1]],
>>> columns=['logId', 'timestamp', 'sleep_level'])
>>>
>>> operator = ConvertToDatetime(feature_names=["timestamp"], utc=True)
>>> df0 = operator.process(df0)
>>>
>>> print(df0)
. logId timestamp sleep_level
0 1 2020-05-01 00:00:00+00:00 1
1 1 2020-05-01 01:00:00+00:00 1
2 1 2020-05-01 03:00:00+00:00 2
3 2 2020-05-02 00:00:00+00:00 1
4 2 2020-05-02 01:00:00+00:00 1
"""
def __init__(self, feature_names, **kwargs):
"""Convert a set of columns features from string to datetime
Args:
feature_names (str):
Name of the string columns that represent datetime objects
**kwargs:
key word arguments passed to pandas ``to_datetime`` method
"""
self.feature_names = feature_names
super().__init__(kwargs)
self.kwargs = kwargs
def _process(self, *data_frames):
"""Processes the passed data frame as per the configuration define in the constructor.
Args:
*data_frames (list of pd.DataFrame):
Variable number of pandas dataframes to be processed
Returns:
pd.DataFrame -or- list[pd.DataFrame]
Processed dataframe(s) resulting from applying the operator
"""
columns = (
self.feature_names.copy()
if isinstance(self.feature_names, list)
else [self.feature_names]
)
processed = []
for data_frame in data_frames:
for col in columns:
data_frame[col] = pd.to_datetime(
data_frame[col], errors="coerce", **self.kwargs
)
processed.append(data_frame)
return processed
| 33.320513
| 94
| 0.58561
|
import pandas as pd
from tasrif.processing_pipeline import PandasOperator
from tasrif.processing_pipeline.validators import InputsAreDataFramesValidatorMixin
class ConvertToDatetimeOperator(InputsAreDataFramesValidatorMixin, PandasOperator):
def __init__(self, feature_names, **kwargs):
self.feature_names = feature_names
super().__init__(kwargs)
self.kwargs = kwargs
def _process(self, *data_frames):
columns = (
self.feature_names.copy()
if isinstance(self.feature_names, list)
else [self.feature_names]
)
processed = []
for data_frame in data_frames:
for col in columns:
data_frame[col] = pd.to_datetime(
data_frame[col], errors="coerce", **self.kwargs
)
processed.append(data_frame)
return processed
| true
| true
|
1c4772d8628f28ac08f50f8f4e940c76e95bac8c
| 2,757
|
py
|
Python
|
deploy/env/local/lib/python2.7/site-packages/mercurial-3.1-py2.7-linux-x86_64.egg/mercurial/filelog.py
|
wangvictor2012/liuwei
|
0a06f8fd56d78162f81f1e7e7def7bfdeb4472e1
|
[
"BSD-3-Clause"
] | 3
|
2015-11-05T07:42:43.000Z
|
2017-05-29T22:59:47.000Z
|
vendor/lib/python2.7/site-packages/mercurial/filelog.py
|
ddollar/gobuild
|
c1b0e52ab6849a13a95a3fdae4913b925f658272
|
[
"MIT"
] | null | null | null |
vendor/lib/python2.7/site-packages/mercurial/filelog.py
|
ddollar/gobuild
|
c1b0e52ab6849a13a95a3fdae4913b925f658272
|
[
"MIT"
] | null | null | null |
# filelog.py - file history class for mercurial
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import revlog
import re
_mdre = re.compile('\1\n')
def _parsemeta(text):
"""return (metadatadict, keylist, metadatasize)"""
# text can be buffer, so we can't use .startswith or .index
if text[:2] != '\1\n':
return None, None, None
s = _mdre.search(text, 2).start()
mtext = text[2:s]
meta = {}
keys = []
for l in mtext.splitlines():
k, v = l.split(": ", 1)
meta[k] = v
keys.append(k)
return meta, keys, (s + 2)
def _packmeta(meta, keys=None):
if not keys:
keys = sorted(meta.iterkeys())
return "".join("%s: %s\n" % (k, meta[k]) for k in keys)
class filelog(revlog.revlog):
def __init__(self, opener, path):
super(filelog, self).__init__(opener,
"/".join(("data", path + ".i")))
def read(self, node):
t = self.revision(node)
if not t.startswith('\1\n'):
return t
s = t.index('\1\n', 2)
return t[s + 2:]
def add(self, text, meta, transaction, link, p1=None, p2=None):
if meta or text.startswith('\1\n'):
text = "\1\n%s\1\n%s" % (_packmeta(meta), text)
return self.addrevision(text, transaction, link, p1, p2)
def renamed(self, node):
if self.parents(node)[0] != revlog.nullid:
return False
t = self.revision(node)
m = _parsemeta(t)[0]
if m and "copy" in m:
return (m["copy"], revlog.bin(m["copyrev"]))
return False
def size(self, rev):
"""return the size of a given revision"""
# for revisions with renames, we have to go the slow way
node = self.node(rev)
if self.renamed(node):
return len(self.read(node))
# XXX if self.read(node).startswith("\1\n"), this returns (size+4)
return super(filelog, self).size(rev)
def cmp(self, node, text):
"""compare text with a given file revision
returns True if text is different than what is stored.
"""
t = text
if text.startswith('\1\n'):
t = '\1\n\1\n' + text
samehashes = not super(filelog, self).cmp(node, t)
if samehashes:
return False
# renaming a file produces a different hash, even if the data
# remains unchanged. Check if it's the case (slow):
if self.renamed(node):
t2 = self.read(node)
return t2 != text
return True
def _file(self, f):
return filelog(self.opener, f)
| 29.645161
| 74
| 0.564744
|
import revlog
import re
_mdre = re.compile('\1\n')
def _parsemeta(text):
if text[:2] != '\1\n':
return None, None, None
s = _mdre.search(text, 2).start()
mtext = text[2:s]
meta = {}
keys = []
for l in mtext.splitlines():
k, v = l.split(": ", 1)
meta[k] = v
keys.append(k)
return meta, keys, (s + 2)
def _packmeta(meta, keys=None):
if not keys:
keys = sorted(meta.iterkeys())
return "".join("%s: %s\n" % (k, meta[k]) for k in keys)
class filelog(revlog.revlog):
def __init__(self, opener, path):
super(filelog, self).__init__(opener,
"/".join(("data", path + ".i")))
def read(self, node):
t = self.revision(node)
if not t.startswith('\1\n'):
return t
s = t.index('\1\n', 2)
return t[s + 2:]
def add(self, text, meta, transaction, link, p1=None, p2=None):
if meta or text.startswith('\1\n'):
text = "\1\n%s\1\n%s" % (_packmeta(meta), text)
return self.addrevision(text, transaction, link, p1, p2)
def renamed(self, node):
if self.parents(node)[0] != revlog.nullid:
return False
t = self.revision(node)
m = _parsemeta(t)[0]
if m and "copy" in m:
return (m["copy"], revlog.bin(m["copyrev"]))
return False
def size(self, rev):
# for revisions with renames, we have to go the slow way
node = self.node(rev)
if self.renamed(node):
return len(self.read(node))
# XXX if self.read(node).startswith("\1\n"), this returns (size+4)
return super(filelog, self).size(rev)
def cmp(self, node, text):
t = text
if text.startswith('\1\n'):
t = '\1\n\1\n' + text
samehashes = not super(filelog, self).cmp(node, t)
if samehashes:
return False
# renaming a file produces a different hash, even if the data
# remains unchanged. Check if it's the case (slow):
if self.renamed(node):
t2 = self.read(node)
return t2 != text
return True
def _file(self, f):
return filelog(self.opener, f)
| true
| true
|
1c47737253ed550c0b8f08ac8b7f413886c1457e
| 14,684
|
py
|
Python
|
train.py
|
solmn/parallel_wavenet
|
45e9eceb7a2d1982b3d45823332575eb26f333c0
|
[
"MIT"
] | 3
|
2018-10-30T13:45:14.000Z
|
2020-03-29T06:56:10.000Z
|
train.py
|
solmn/parallel_wavenet
|
45e9eceb7a2d1982b3d45823332575eb26f333c0
|
[
"MIT"
] | null | null | null |
train.py
|
solmn/parallel_wavenet
|
45e9eceb7a2d1982b3d45823332575eb26f333c0
|
[
"MIT"
] | null | null | null |
"""Training script for the WaveNet network on the VCTK corpus.
This script trains a network with the WaveNet using data from the VCTK corpus,
which can be freely downloaded at the following site (~10 GB):
http://homepages.inf.ed.ac.uk/jyamagis/page3/page58/page58.html
"""
from __future__ import print_function
import argparse
from datetime import datetime
import json
import os
import sys
import time
import tensorflow as tf
from tensorflow.python.client import timeline
from wavenet import WaveNetModel, AudioReader, optimizer_factory
BATCH_SIZE = 1
DATA_DIRECTORY = './dataset/LJSpeech/wavs/'
LOGDIR_ROOT = './logdir'
CHECKPOINT_EVERY = 100
NUM_STEPS = int(1e6)
LEARNING_RATE = 2 *1e-5
WAVENET_PARAMS = './wavenet_params.json'
STARTED_DATESTRING = "{0:%Y-%m-%dT%H-%M-%S}".format(datetime.now())
SAMPLE_SIZE = 8000
L2_REGULARIZATION_STRENGTH = 0
SILENCE_THRESHOLD = 0.1
EPSILON = 1e-8
MOMENTUM = 0.9
MAX_TO_KEEP = 5
METADATA = False
def get_arguments():
def _str_to_bool(s):
"""Convert string to bool (in argparse context)."""
if s.lower() not in ['true', 'false']:
raise ValueError('Argument needs to be a '
'boolean, got {}'.format(s))
return {'true': True, 'false': False}[s.lower()]
parser = argparse.ArgumentParser(description='WaveNet example network')
parser.add_argument('--batch_size', type=int, default=BATCH_SIZE,
help='How many wav files to process at once. Default: ' + str(BATCH_SIZE) + '.')
parser.add_argument('--data_dir', type=str, default=DATA_DIRECTORY,
help='The directory containing the VCTK corpus.')
parser.add_argument('--store_metadata', type=bool, default=METADATA,
help='Whether to store advanced debugging information '
'(execution time, memory consumption) for use with '
'TensorBoard. Default: ' + str(METADATA) + '.')
parser.add_argument('--logdir', type=str, default=None,
help='Directory in which to store the logging '
'information for TensorBoard. '
'If the model already exists, it will restore '
'the state and will continue training. '
'Cannot use with --logdir_root and --restore_from.')
parser.add_argument('--logdir_root', type=str, default=None,
help='Root directory to place the logging '
'output and generated model. These are stored '
'under the dated subdirectory of --logdir_root. '
'Cannot use with --logdir.')
parser.add_argument('--restore_from', type=str, default=None,
help='Directory in which to restore the model from. '
'This creates the new model under the dated directory '
'in --logdir_root. '
'Cannot use with --logdir.')
parser.add_argument('--checkpoint_every', type=int,
default=CHECKPOINT_EVERY,
help='How many steps to save each checkpoint after. Default: ' + str(CHECKPOINT_EVERY) + '.')
parser.add_argument('--num_steps', type=int, default=NUM_STEPS,
help='Number of training steps. Default: ' + str(NUM_STEPS) + '.')
parser.add_argument('--learning_rate', type=float, default=LEARNING_RATE,
help='Learning rate for training. Default: ' + str(LEARNING_RATE) + '.')
parser.add_argument('--wavenet_params', type=str, default=WAVENET_PARAMS,
help='JSON file with the network parameters. Default: ' + WAVENET_PARAMS + '.')
parser.add_argument('--sample_size', type=int, default=SAMPLE_SIZE,
help='Concatenate and cut audio samples to this many '
'samples. Default: ' + str(SAMPLE_SIZE) + '.')
parser.add_argument('--l2_regularization_strength', type=float,
default=L2_REGULARIZATION_STRENGTH,
help='Coefficient in the L2 regularization. '
'Default: False')
parser.add_argument('--silence_threshold', type=float,
default=SILENCE_THRESHOLD,
help='Volume threshold below which to trim the start '
'and the end from the training set samples. Default: ' + str(SILENCE_THRESHOLD) + '.')
parser.add_argument('--optimizer', type=str, default='adam',
choices=optimizer_factory.keys(),
help='Select the optimizer specified by this option. Default: adam.')
parser.add_argument('--momentum', type=float,
default=MOMENTUM, help='Specify the momentum to be '
'used by sgd or rmsprop optimizer. Ignored by the '
'adam optimizer. Default: ' + str(MOMENTUM) + '.')
parser.add_argument('--histograms', type=_str_to_bool, default=False,
help='Whether to store histogram summaries. Default: False')
parser.add_argument('--gc_channels', type=int, default=None,
help='Number of global condition channels. Default: None. Expecting: Int')
parser.add_argument('--lc_channels', type=int, default=None,
help='Number of local condition channels. Default: None. Expecting: Int')
parser.add_argument('--max_checkpoints', type=int, default=MAX_TO_KEEP,
help='Maximum amount of checkpoints that will be kept alive. Default: '
+ str(MAX_TO_KEEP) + '.')
return parser.parse_args()
def save(saver, sess, logdir, step):
model_name = 'model.ckpt'
checkpoint_path = os.path.join(logdir, model_name)
print('Storing checkpoint to {} ...'.format(logdir), end="")
sys.stdout.flush()
if not os.path.exists(logdir):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, global_step=step)
print(' Done.')
def load(saver, sess, logdir):
# logdir = "logdir/train/2018-09-07T19-20-47/"
print("Trying to restore saved checkpoints from {} ...".format(logdir),
end="")
ckpt = tf.train.get_checkpoint_state(logdir)
if ckpt:
print(" Checkpoint found: {}".format(ckpt.model_checkpoint_path))
global_step = int(ckpt.model_checkpoint_path
.split('/')[-1]
.split('-')[-1])
print(" Global step was: {}".format(global_step))
print(" Restoring...", end="")
saver.restore(sess, ckpt.model_checkpoint_path)
print(" Done.")
return global_step
else:
print(" No checkpoint found.")
return None
def get_default_logdir(logdir_root):
logdir = os.path.join(logdir_root, 'train', STARTED_DATESTRING)
return logdir
def validate_directories(args):
"""Validate and arrange directory related arguments."""
# Validation
if args.logdir and args.logdir_root:
raise ValueError("--logdir and --logdir_root cannot be "
"specified at the same time.")
if args.logdir and args.restore_from:
raise ValueError(
"--logdir and --restore_from cannot be specified at the same "
"time. This is to keep your previous model from unexpected "
"overwrites.\n"
"Use --logdir_root to specify the root of the directory which "
"will be automatically created with current date and time, or use "
"only --logdir to just continue the training from the last "
"checkpoint.")
# Arrangement
logdir_root = args.logdir_root
if logdir_root is None:
logdir_root = LOGDIR_ROOT
logdir = args.logdir
if logdir is None:
logdir = get_default_logdir(logdir_root)
print('Using default logdir: {}'.format(logdir))
restore_from = args.restore_from
if restore_from is None:
# args.logdir and args.restore_from are exclusive,
# so it is guaranteed the logdir here is newly created.
restore_from = logdir
return {
'logdir': logdir,
'logdir_root': args.logdir_root,
'restore_from': restore_from
}
def main():
args = get_arguments()
try:
directories = validate_directories(args)
except ValueError as e:
print("Some arguments are wrong:")
print(str(e))
return
logdir = directories['logdir']
restore_from = directories['restore_from']
# Even if we restored the model, we will treat it as new training
# if the trained model is written into an arbitrary location.
is_overwritten_training = logdir != restore_from
with open(args.wavenet_params, 'r') as f:
wavenet_params = json.load(f)
# Create coordinator.
coord = tf.train.Coordinator()
# Load raw waveform from VCTK corpus.
with tf.name_scope('create_inputs'):
# Allow silence trimming to be skipped by specifying a threshold near
# zero.
silence_threshold = args.silence_threshold if args.silence_threshold > \
EPSILON else None
gc_enabled = args.gc_channels is not None
reader = AudioReader(
args.data_dir,
coord,
sample_rate=wavenet_params['sample_rate'],
gc_enabled=gc_enabled,
receptive_field=WaveNetModel.calculate_receptive_field(wavenet_params["filter_width"],
wavenet_params["dilations"],
wavenet_params["scalar_input"],
wavenet_params["initial_filter_width"]),
sample_size=args.sample_size,
silence_threshold=silence_threshold)
audio_batch = reader.dequeue(args.batch_size)
if gc_enabled:
gc_id_batch = reader.dequeue_gc(args.batch_size)
else:
gc_id_batch = None
# Create network.
net = WaveNetModel(
batch_size=args.batch_size,
dilations=wavenet_params["dilations"],
filter_width=wavenet_params["filter_width"],
residual_channels=wavenet_params["residual_channels"],
dilation_channels=wavenet_params["dilation_channels"],
skip_channels=wavenet_params["skip_channels"],
quantization_channels=wavenet_params["quantization_channels"],
output_channels = wavenet_params["output_channels"],
log_scale_min = wavenet_params["log_scale_min"],
use_biases=wavenet_params["use_biases"],
scalar_input=wavenet_params["scalar_input"],
initial_filter_width=wavenet_params["initial_filter_width"],
histograms=args.histograms,
local_condition_channels = args.lc_channels,
global_condition_channels=args.gc_channels,
global_condition_cardinality=reader.gc_category_cardinality)
if args.l2_regularization_strength == 0:
args.l2_regularization_strength = None
loss = net.loss(input_batch=audio_batch,
global_condition_batch=gc_id_batch,
l2_regularization_strength=args.l2_regularization_strength)
optimizer = optimizer_factory[args.optimizer](
learning_rate=args.learning_rate,
momentum=args.momentum)
trainable = tf.trainable_variables()
optim = optimizer.minimize(loss, var_list=trainable)
# Set up logging for TensorBoard.
writer = tf.summary.FileWriter(logdir)
writer.add_graph(tf.get_default_graph())
run_metadata = tf.RunMetadata()
summaries = tf.summary.merge_all()
# Set up session
sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
init = tf.global_variables_initializer()
sess.run(init)
# Saver for storing checkpoints of the model.
saver = tf.train.Saver(var_list=tf.trainable_variables(), max_to_keep=args.max_checkpoints)
try:
saved_global_step = load(saver, sess, restore_from)
if is_overwritten_training or saved_global_step is None:
# The first training step will be saved_global_step + 1,
# therefore we put -1 here for new or overwritten trainings.
saved_global_step = -1
except:
print("Something went wrong while restoring checkpoint. "
"We will terminate training to avoid accidentally overwriting "
"the previous model.")
raise
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
reader.start_threads(sess)
step = None
last_saved_step = saved_global_step
try:
for step in range(saved_global_step + 1, args.num_steps):
start_time = time.time()
if args.store_metadata and step % 50 == 0:
# Slow run that stores extra information for debugging.
print('Storing metadata')
run_options = tf.RunOptions(
trace_level=tf.RunOptions.FULL_TRACE)
summary, loss_value, _ = sess.run(
[summaries, loss, optim],
options=run_options,
run_metadata=run_metadata)
writer.add_summary(summary, step)
writer.add_run_metadata(run_metadata,
'step_{:04d}'.format(step))
tl = timeline.Timeline(run_metadata.step_stats)
timeline_path = os.path.join(logdir, 'timeline.trace')
with open(timeline_path, 'w') as f:
f.write(tl.generate_chrome_trace_format(show_memory=True))
else:
summary, loss_value, _ = sess.run([summaries, loss, optim])
writer.add_summary(summary, step)
duration = time.time() - start_time
print('step {:d} - loss = {:.3f}, ({:.3f} sec/step)'
.format(step, loss_value, duration))
if step % args.checkpoint_every == 0:
save(saver, sess, logdir, step)
last_saved_step = step
except KeyboardInterrupt:
# Introduce a line break after ^C is displayed so save message
# is on its own line.
print()
finally:
if step > last_saved_step:
save(saver, sess, logdir, step)
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
main()
| 42.686047
| 117
| 0.611959
|
from __future__ import print_function
import argparse
from datetime import datetime
import json
import os
import sys
import time
import tensorflow as tf
from tensorflow.python.client import timeline
from wavenet import WaveNetModel, AudioReader, optimizer_factory
BATCH_SIZE = 1
DATA_DIRECTORY = './dataset/LJSpeech/wavs/'
LOGDIR_ROOT = './logdir'
CHECKPOINT_EVERY = 100
NUM_STEPS = int(1e6)
LEARNING_RATE = 2 *1e-5
WAVENET_PARAMS = './wavenet_params.json'
STARTED_DATESTRING = "{0:%Y-%m-%dT%H-%M-%S}".format(datetime.now())
SAMPLE_SIZE = 8000
L2_REGULARIZATION_STRENGTH = 0
SILENCE_THRESHOLD = 0.1
EPSILON = 1e-8
MOMENTUM = 0.9
MAX_TO_KEEP = 5
METADATA = False
def get_arguments():
def _str_to_bool(s):
if s.lower() not in ['true', 'false']:
raise ValueError('Argument needs to be a '
'boolean, got {}'.format(s))
return {'true': True, 'false': False}[s.lower()]
parser = argparse.ArgumentParser(description='WaveNet example network')
parser.add_argument('--batch_size', type=int, default=BATCH_SIZE,
help='How many wav files to process at once. Default: ' + str(BATCH_SIZE) + '.')
parser.add_argument('--data_dir', type=str, default=DATA_DIRECTORY,
help='The directory containing the VCTK corpus.')
parser.add_argument('--store_metadata', type=bool, default=METADATA,
help='Whether to store advanced debugging information '
'(execution time, memory consumption) for use with '
'TensorBoard. Default: ' + str(METADATA) + '.')
parser.add_argument('--logdir', type=str, default=None,
help='Directory in which to store the logging '
'information for TensorBoard. '
'If the model already exists, it will restore '
'the state and will continue training. '
'Cannot use with --logdir_root and --restore_from.')
parser.add_argument('--logdir_root', type=str, default=None,
help='Root directory to place the logging '
'output and generated model. These are stored '
'under the dated subdirectory of --logdir_root. '
'Cannot use with --logdir.')
parser.add_argument('--restore_from', type=str, default=None,
help='Directory in which to restore the model from. '
'This creates the new model under the dated directory '
'in --logdir_root. '
'Cannot use with --logdir.')
parser.add_argument('--checkpoint_every', type=int,
default=CHECKPOINT_EVERY,
help='How many steps to save each checkpoint after. Default: ' + str(CHECKPOINT_EVERY) + '.')
parser.add_argument('--num_steps', type=int, default=NUM_STEPS,
help='Number of training steps. Default: ' + str(NUM_STEPS) + '.')
parser.add_argument('--learning_rate', type=float, default=LEARNING_RATE,
help='Learning rate for training. Default: ' + str(LEARNING_RATE) + '.')
parser.add_argument('--wavenet_params', type=str, default=WAVENET_PARAMS,
help='JSON file with the network parameters. Default: ' + WAVENET_PARAMS + '.')
parser.add_argument('--sample_size', type=int, default=SAMPLE_SIZE,
help='Concatenate and cut audio samples to this many '
'samples. Default: ' + str(SAMPLE_SIZE) + '.')
parser.add_argument('--l2_regularization_strength', type=float,
default=L2_REGULARIZATION_STRENGTH,
help='Coefficient in the L2 regularization. '
'Default: False')
parser.add_argument('--silence_threshold', type=float,
default=SILENCE_THRESHOLD,
help='Volume threshold below which to trim the start '
'and the end from the training set samples. Default: ' + str(SILENCE_THRESHOLD) + '.')
parser.add_argument('--optimizer', type=str, default='adam',
choices=optimizer_factory.keys(),
help='Select the optimizer specified by this option. Default: adam.')
parser.add_argument('--momentum', type=float,
default=MOMENTUM, help='Specify the momentum to be '
'used by sgd or rmsprop optimizer. Ignored by the '
'adam optimizer. Default: ' + str(MOMENTUM) + '.')
parser.add_argument('--histograms', type=_str_to_bool, default=False,
help='Whether to store histogram summaries. Default: False')
parser.add_argument('--gc_channels', type=int, default=None,
help='Number of global condition channels. Default: None. Expecting: Int')
parser.add_argument('--lc_channels', type=int, default=None,
help='Number of local condition channels. Default: None. Expecting: Int')
parser.add_argument('--max_checkpoints', type=int, default=MAX_TO_KEEP,
help='Maximum amount of checkpoints that will be kept alive. Default: '
+ str(MAX_TO_KEEP) + '.')
return parser.parse_args()
def save(saver, sess, logdir, step):
model_name = 'model.ckpt'
checkpoint_path = os.path.join(logdir, model_name)
print('Storing checkpoint to {} ...'.format(logdir), end="")
sys.stdout.flush()
if not os.path.exists(logdir):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, global_step=step)
print(' Done.')
def load(saver, sess, logdir):
print("Trying to restore saved checkpoints from {} ...".format(logdir),
end="")
ckpt = tf.train.get_checkpoint_state(logdir)
if ckpt:
print(" Checkpoint found: {}".format(ckpt.model_checkpoint_path))
global_step = int(ckpt.model_checkpoint_path
.split('/')[-1]
.split('-')[-1])
print(" Global step was: {}".format(global_step))
print(" Restoring...", end="")
saver.restore(sess, ckpt.model_checkpoint_path)
print(" Done.")
return global_step
else:
print(" No checkpoint found.")
return None
def get_default_logdir(logdir_root):
logdir = os.path.join(logdir_root, 'train', STARTED_DATESTRING)
return logdir
def validate_directories(args):
if args.logdir and args.logdir_root:
raise ValueError("--logdir and --logdir_root cannot be "
"specified at the same time.")
if args.logdir and args.restore_from:
raise ValueError(
"--logdir and --restore_from cannot be specified at the same "
"time. This is to keep your previous model from unexpected "
"overwrites.\n"
"Use --logdir_root to specify the root of the directory which "
"will be automatically created with current date and time, or use "
"only --logdir to just continue the training from the last "
"checkpoint.")
logdir_root = args.logdir_root
if logdir_root is None:
logdir_root = LOGDIR_ROOT
logdir = args.logdir
if logdir is None:
logdir = get_default_logdir(logdir_root)
print('Using default logdir: {}'.format(logdir))
restore_from = args.restore_from
if restore_from is None:
restore_from = logdir
return {
'logdir': logdir,
'logdir_root': args.logdir_root,
'restore_from': restore_from
}
def main():
args = get_arguments()
try:
directories = validate_directories(args)
except ValueError as e:
print("Some arguments are wrong:")
print(str(e))
return
logdir = directories['logdir']
restore_from = directories['restore_from']
is_overwritten_training = logdir != restore_from
with open(args.wavenet_params, 'r') as f:
wavenet_params = json.load(f)
coord = tf.train.Coordinator()
with tf.name_scope('create_inputs'):
silence_threshold = args.silence_threshold if args.silence_threshold > \
EPSILON else None
gc_enabled = args.gc_channels is not None
reader = AudioReader(
args.data_dir,
coord,
sample_rate=wavenet_params['sample_rate'],
gc_enabled=gc_enabled,
receptive_field=WaveNetModel.calculate_receptive_field(wavenet_params["filter_width"],
wavenet_params["dilations"],
wavenet_params["scalar_input"],
wavenet_params["initial_filter_width"]),
sample_size=args.sample_size,
silence_threshold=silence_threshold)
audio_batch = reader.dequeue(args.batch_size)
if gc_enabled:
gc_id_batch = reader.dequeue_gc(args.batch_size)
else:
gc_id_batch = None
net = WaveNetModel(
batch_size=args.batch_size,
dilations=wavenet_params["dilations"],
filter_width=wavenet_params["filter_width"],
residual_channels=wavenet_params["residual_channels"],
dilation_channels=wavenet_params["dilation_channels"],
skip_channels=wavenet_params["skip_channels"],
quantization_channels=wavenet_params["quantization_channels"],
output_channels = wavenet_params["output_channels"],
log_scale_min = wavenet_params["log_scale_min"],
use_biases=wavenet_params["use_biases"],
scalar_input=wavenet_params["scalar_input"],
initial_filter_width=wavenet_params["initial_filter_width"],
histograms=args.histograms,
local_condition_channels = args.lc_channels,
global_condition_channels=args.gc_channels,
global_condition_cardinality=reader.gc_category_cardinality)
if args.l2_regularization_strength == 0:
args.l2_regularization_strength = None
loss = net.loss(input_batch=audio_batch,
global_condition_batch=gc_id_batch,
l2_regularization_strength=args.l2_regularization_strength)
optimizer = optimizer_factory[args.optimizer](
learning_rate=args.learning_rate,
momentum=args.momentum)
trainable = tf.trainable_variables()
optim = optimizer.minimize(loss, var_list=trainable)
writer = tf.summary.FileWriter(logdir)
writer.add_graph(tf.get_default_graph())
run_metadata = tf.RunMetadata()
summaries = tf.summary.merge_all()
sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
init = tf.global_variables_initializer()
sess.run(init)
saver = tf.train.Saver(var_list=tf.trainable_variables(), max_to_keep=args.max_checkpoints)
try:
saved_global_step = load(saver, sess, restore_from)
if is_overwritten_training or saved_global_step is None:
saved_global_step = -1
except:
print("Something went wrong while restoring checkpoint. "
"We will terminate training to avoid accidentally overwriting "
"the previous model.")
raise
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
reader.start_threads(sess)
step = None
last_saved_step = saved_global_step
try:
for step in range(saved_global_step + 1, args.num_steps):
start_time = time.time()
if args.store_metadata and step % 50 == 0:
print('Storing metadata')
run_options = tf.RunOptions(
trace_level=tf.RunOptions.FULL_TRACE)
summary, loss_value, _ = sess.run(
[summaries, loss, optim],
options=run_options,
run_metadata=run_metadata)
writer.add_summary(summary, step)
writer.add_run_metadata(run_metadata,
'step_{:04d}'.format(step))
tl = timeline.Timeline(run_metadata.step_stats)
timeline_path = os.path.join(logdir, 'timeline.trace')
with open(timeline_path, 'w') as f:
f.write(tl.generate_chrome_trace_format(show_memory=True))
else:
summary, loss_value, _ = sess.run([summaries, loss, optim])
writer.add_summary(summary, step)
duration = time.time() - start_time
print('step {:d} - loss = {:.3f}, ({:.3f} sec/step)'
.format(step, loss_value, duration))
if step % args.checkpoint_every == 0:
save(saver, sess, logdir, step)
last_saved_step = step
except KeyboardInterrupt:
print()
finally:
if step > last_saved_step:
save(saver, sess, logdir, step)
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
main()
| true
| true
|
1c4773afb9dfe031efe91c301916c555e9dcc6a3
| 9,570
|
py
|
Python
|
src/HYPERPLUME/hyperplume.py
|
Pabsm94/Easyplume
|
ee54194c1c0930b2a0ef442c47f80bd4570913d2
|
[
"MIT"
] | null | null | null |
src/HYPERPLUME/hyperplume.py
|
Pabsm94/Easyplume
|
ee54194c1c0930b2a0ef442c47f80bd4570913d2
|
[
"MIT"
] | null | null | null |
src/HYPERPLUME/hyperplume.py
|
Pabsm94/Easyplume
|
ee54194c1c0930b2a0ef442c47f80bd4570913d2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 22 14:07:39 2016
@author: pablo
"""
import numpy as np
import abc
import matplotlib.pyplot as plt
class Hyperplume():
""" Parent class Hyperplume loads target plasma and defines common attributes as well as
shared methods in the AEM and SSM plume classes"""
__metaclass__= abc.ABCMeta # Python decorator used to define abstract methods at any location in the class
@abc.abstractclassmethod # Defining abstract method
def solver(self):
"""Solver Abstract Method to be particularised by each Plume code. It is only defined for
structure purposes in parent class Hyperplume"""
return
@abc.abstractclassmethod
def query(self,z,r):
"""Query abstract method returns plasma profile data at specified grid points. query method is
to be particularised by each plume code.It is only defined forstructure purposes
in parent class Hyperplume"""
return
def __init__(self,plasma={'Electrons': {'Gamma': 1,'T_0_electron': 2.1801714e-19,'q_electron': -1.6e-19},'Ions': {'mass_ion': 2.1801714e-25, 'q_ion': 1.6e-19}},z_span=np.linspace(0,100,500),r_span=np.linspace(0,40,500),n_init=0.0472*np.linspace(1,0,500)**2):
""" plume_constructor loads common class properties for AEM and SSM plume classes
Args:
plasma (dict): simple_plasma object dictionary containing basic plasma parameters.
z_span (numpy.ndarray): axial region where the problem will be integrated.
r_span (numpy.ndarray): initial far-field plasma radial profile.
n_init (numpy.ndarray): initial dimensional density front.
Usage:
>>> Plasma = {'Electrons': {'Gamma': 1,'T_0_electron': 2.1801714e-19,'q_electron': -1.6e-19},'Ions': {'mass_ion': 2.1801714e-25, 'q_ion': 1.6e-19}}
>>> z_span = np.linspace(0,100,100)
>>> r0 = np.linspace(0,3,100)
>>> n0 = np.exp(-6.15/2*r_span**2)
>>> Plume = Hyperplume(Plasma,z_span,r0,n0)
"""
self.plasma = plasma
self.Gamma = plasma['Electrons']['Gamma']
self.T_0 = plasma['Electrons']['T_0_electron']
self.m_ion = plasma['Ions']['mass_ion']
self.q_ion = plasma['Ions']['q_ion']
self.z_span = z_span
self.eta = r_span
self.n0 = n_init
def simple_plasma(self,charge=1.6e-19,ion_mass=2.1801714e-25,init_plasma_temp=2.1801714e-19,Gamma=1):
""" Method simple_plasma allows the user to quickly create a Plasma dictionary with two particle species (ions and electrons),
and well defined attributes.
Args:
charge (float): Electron charge given dimensional in units [C]
ion_mass(float): Ion mass given in dimensional units [Kg]
init_plasma_temp(float): Initial plasma temperature given in dimensional units [J]
Gamma(int or float): Dimensionless thermal expansion constant. Must be inside isothermal and polytropic boundaries [1,5/3]
Returns:
plasma (dict): Dictionary containing two simple plasma species (ions and electrons) with the before mentioned
properties stored in favorable form
Usage:
>>> Plasma = Hyperplume().simple_plasma(charge=1.6e-19,ion_mass=2.1801714e-25,init_plasma_temp=2.1801714e-19,Gamma=1)
"""
if Gamma < 1 or Gamma > 2: #checking thermal expansion model
print ('Gamma is outside isothermal or polytropic boundaries')
else:
plasma={'Ions':{'mass_ion': ion_mass,'q_ion':charge}, 'Electrons':{'q_electron': -charge,'T_0_electron':init_plasma_temp,'Gamma':Gamma} }
return plasma
def temp(self,n,n_0,T_0,Gamma):
""" Method temp calculates plasma temperature (T) as function of plasma density (n)
Args:
n(int or np.ndarray): plasma density at specific (z,r) location in the plume grid
n_0 (int):Iinitial density of plasma
T_0 (float): Initial temperature of plasma
Gamma (int): Dimensionless thermal expansion constant
Returns:
T (float or np.ndarray): Temperature of plasma at targeted (z,r) grid points in plume
Usage:
>>> T = Hyperplume().temp(n=0.65,n_0=1,T_0=2.1801714e-19,Gamma=1)
"""
if Gamma == 1: #Checking expansion model
T = T_0*(n*0 + 1)
else:
T = T_0*((n/n_0)**(Gamma-1))
return T
def phi (self,n,n_0,T_0,Gamma,e_charge):
"""Method phi calculates electric potential (\phi) as function of plasma density (n)
Args:
n(int or np.ndarray): plasma density at specific (z,r) location in the plume grid
n_0 (int):Iinitial density of plasma
T_0 (float): Initial temperature of plasma
Gamma (int): Dimensionless thermal expansion constant
e_charge (float):Electron charge
Returns:
phi(float or np.ndarray): Electric potential of plasma at (z,r) targeted grid point
Usage:
>>> phi = Hyperplume().phi(n=0.65,n_0=1,T_0=2.1801714e-19,Gamma=1,e_charge=-1.6e-19)
"""
if Gamma == 1: #Checking expansion model
phi = (T_0/e_charge)*np.log(n/n_0)
else :
phi = (T_0/e_charge)*(Gamma / ((Gamma - 1)) * ((n/n_0)**(Gamma-1)-1))
return phi
def n(self,n_0,T_0,phi,Gamma,e_charge):
"""Method n calculates plasma density (n) as function of plasma potential (\phi)
Args:
n_0 (int):Iinitial density of plasma
T_0 (float): Initial temperature of plasma
Gamma (int): Dimensionless thermal expansion constant
e_charge (float):Electron charge
Returns:
n (float or numpy.ndarray): Pasma density at (z,r) targeted grid point in the plume.
Usage:
n = Hyperplume.n(n_0=1,T_0=2.1801714e-19,phi=-5.7,Gamma=1,e_charge=-1.6e-19)
"""
if Gamma == 1: #Checking expansion model
n = n_0*np.exp(phi*e_charge/T_0)
else:
n = n_0*(((Gamma-1)/Gamma*phi*e_charge/T_0 + 1 )**1/(Gamma-1))
return n
def eta_deriver(self,x,y):
"""Method eta_derivar calculates the numerical derivatives of the variables along eta, with a
Args:
x (np.ndarray): represents the derivative step (dx,dy)
y (np.ndarray): vector to derive with respect to x
Returns:
y_prime(np.ndarray): derivaive of y over x stored in array format
Usage:
>>> x = np.array([0,0.5,1,1.2,2,2.3,2.6])
>>> y = np.array([10,17,23,27,36,40,45])
>>> dydx = Hyperplume.eta_deriver(x,y)
"""
dx = np.gradient(x)
y_prime = np.gradient(y,dx)
return y_prime
def plot(self,z=np.array([15,20,25,30]),r=np.array([20,25,30,35]),var_name='n',contour_levels=[0,1,2,3,4,5,6,7,8]):
""" Hyperplume Class method to plot the contours of important plasma variables along the specified (z,r) plume grid points
Args:
z (int,float, or np.ndarray): new interpolation axial region where plasma variabes are to be calculated and plotted. Must be inside z_grid limits
r (int,float, or np.ndarray): new interpolation axial region where plasma variabes are to be calculated and plotted. Must be inside z_grid limits
var_name (str): string containing the name of the variable to be visualized. Options are:
'lnn': logarithm of plasma density
'u_z': axial plume velocity
'u_r':radial plume velocity
'T': plasmaTemperature
'phi': ambipolar electric field
'eta': ion stream lines
contour_levels (array or of list): contour lables of plasma varialbled at the targets z,r points.
Returns:
None
Usage:
>>> Plasma = Hyperplume().SIMPLE_plasma()
>>> Plume = AEM()
"""
lnn,u_z,u_r,T,phi,error,eta = self.query(z,r) #Retrievibg plasma variables at z,r gid points
fig = plt.figure()
CE = plt.contour(z,r,eval(var_name),contour_levels)
plt.title(var_name)
plt.xlabel(r'$\ z/R_0 $')
plt.ylabel(r'$\ r/R_0 $')
plt.ylim(0,10)
plt.clabel(CE,CE.levels,fontsize=6)
plt.savefig(var_name + '.pdf',bbox_inches='tight')
fig.show()
| 34.301075
| 262
| 0.546604
|
import numpy as np
import abc
import matplotlib.pyplot as plt
class Hyperplume():
__metaclass__= abc.ABCMeta
@abc.abstractclassmethod
def solver(self):
return
@abc.abstractclassmethod
def query(self,z,r):
return
def __init__(self,plasma={'Electrons': {'Gamma': 1,'T_0_electron': 2.1801714e-19,'q_electron': -1.6e-19},'Ions': {'mass_ion': 2.1801714e-25, 'q_ion': 1.6e-19}},z_span=np.linspace(0,100,500),r_span=np.linspace(0,40,500),n_init=0.0472*np.linspace(1,0,500)**2):
self.plasma = plasma
self.Gamma = plasma['Electrons']['Gamma']
self.T_0 = plasma['Electrons']['T_0_electron']
self.m_ion = plasma['Ions']['mass_ion']
self.q_ion = plasma['Ions']['q_ion']
self.z_span = z_span
self.eta = r_span
self.n0 = n_init
def simple_plasma(self,charge=1.6e-19,ion_mass=2.1801714e-25,init_plasma_temp=2.1801714e-19,Gamma=1):
if Gamma < 1 or Gamma > 2:
print ('Gamma is outside isothermal or polytropic boundaries')
else:
plasma={'Ions':{'mass_ion': ion_mass,'q_ion':charge}, 'Electrons':{'q_electron': -charge,'T_0_electron':init_plasma_temp,'Gamma':Gamma} }
return plasma
def temp(self,n,n_0,T_0,Gamma):
if Gamma == 1:
T = T_0*(n*0 + 1)
else:
T = T_0*((n/n_0)**(Gamma-1))
return T
def phi (self,n,n_0,T_0,Gamma,e_charge):
if Gamma == 1:
phi = (T_0/e_charge)*np.log(n/n_0)
else :
phi = (T_0/e_charge)*(Gamma / ((Gamma - 1)) * ((n/n_0)**(Gamma-1)-1))
return phi
def n(self,n_0,T_0,phi,Gamma,e_charge):
if Gamma == 1:
n = n_0*np.exp(phi*e_charge/T_0)
else:
n = n_0*(((Gamma-1)/Gamma*phi*e_charge/T_0 + 1 )**1/(Gamma-1))
return n
def eta_deriver(self,x,y):
dx = np.gradient(x)
y_prime = np.gradient(y,dx)
return y_prime
def plot(self,z=np.array([15,20,25,30]),r=np.array([20,25,30,35]),var_name='n',contour_levels=[0,1,2,3,4,5,6,7,8]):
lnn,u_z,u_r,T,phi,error,eta = self.query(z,r)
fig = plt.figure()
CE = plt.contour(z,r,eval(var_name),contour_levels)
plt.title(var_name)
plt.xlabel(r'$\ z/R_0 $')
plt.ylabel(r'$\ r/R_0 $')
plt.ylim(0,10)
plt.clabel(CE,CE.levels,fontsize=6)
plt.savefig(var_name + '.pdf',bbox_inches='tight')
fig.show()
| true
| true
|
1c47745f1c0e2c39646a97885253608082c44006
| 46
|
py
|
Python
|
__init__.py
|
lucaskjaero/WiktionaryParser
|
c60a7cb7e50ca929e02c8e6e258c23f4d4114c21
|
[
"MIT"
] | 1
|
2021-08-24T17:51:41.000Z
|
2021-08-24T17:51:41.000Z
|
__init__.py
|
lucaskjaero/WiktionaryParser
|
c60a7cb7e50ca929e02c8e6e258c23f4d4114c21
|
[
"MIT"
] | null | null | null |
__init__.py
|
lucaskjaero/WiktionaryParser
|
c60a7cb7e50ca929e02c8e6e258c23f4d4114c21
|
[
"MIT"
] | 1
|
2020-12-14T16:22:31.000Z
|
2020-12-14T16:22:31.000Z
|
from .wiktionaryparser import WiktionaryParser
| 46
| 46
| 0.913043
|
from .wiktionaryparser import WiktionaryParser
| true
| true
|
1c477468c75e4642c2f29e87bfdbf22ef08e11fd
| 4,043
|
py
|
Python
|
models/definitions/flownet/inference.py
|
HaydenFaulkner/VidDet
|
2dbc104a41bf1192a00ffde07695180eab18cea8
|
[
"MIT"
] | 19
|
2019-08-05T12:20:17.000Z
|
2020-10-29T11:33:50.000Z
|
models/definitions/flownet/inference.py
|
HaydenFaulkner/VideoYOLO
|
2dbc104a41bf1192a00ffde07695180eab18cea8
|
[
"MIT"
] | 2
|
2021-08-25T14:47:55.000Z
|
2022-02-09T23:30:49.000Z
|
models/definitions/flownet/inference.py
|
HaydenFaulkner/VideoYOLO
|
2dbc104a41bf1192a00ffde07695180eab18cea8
|
[
"MIT"
] | 3
|
2020-03-02T14:52:18.000Z
|
2020-06-05T07:51:18.000Z
|
import cv2
import mxnet as mx
import numpy as np
from scipy.misc import imresize
from tqdm import tqdm
from flownet import get_flownet
from utils import flow_to_image, crop, normalise
def process_two_images(model, imgs, ctx=None):
"""
Process two images into one flow image
Args:
model: The model to use
imgs: a list of 2 images
ctx: the model ctx
Returns:
"""
if len(imgs) != 2:
return None
if isinstance(imgs[0], str):
if os.path.exists(imgs[0]):
imgs[0] = cv2.cvtColor(cv2.imread(files[i]), cv2.COLOR_BGR2RGB)
else:
return None
if isinstance(imgs[1], str):
if os.path.exists(imgs[1]):
imgs[1] = cv2.cvtColor(cv2.imread(files[i]), cv2.COLOR_BGR2RGB)
else:
return None
imgs = crop(imgs)
imgs = np.array(imgs)
imgs = np.moveaxis(imgs, -1, 1)
imgs = normalise(imgs)
imgs = mx.nd.array(imgs, ctx=ctx)
imgs = mx.nd.expand_dims(imgs, 0) # add batch axis
flow = model(imgs) # run the model
flow = flow.asnumpy()
flow = flow.squeeze()
flow = flow.transpose(1, 2, 0)
img = flow_to_image(flow)
img = imresize(img, 4.0) # doing the bilinear interpolation on the img, NOT flow cause was too hard :'(
return img, flow
def process_imagedir(model, input_dir, output_dir=None, ctx=None):
"""
Process a directory of images
Args:
model:
input_dir:
output_dir:
ctx:
Returns:
"""
files = []
for ext in [".jpg", ".png", ".jpeg", ".JPG", ".PNG", ".JPEG"]:
files = glob.glob(input_dir + "/**/*" + ext, recursive=True)
if len(files) > 0:
break
if not len(files) > 0:
print("Couldn't find any files in {}".format(input_dir))
return None
files.sort()
for i in tqdm(range(len(files) - 1), desc='Calculating Flow'):
img, flow = process_two_images(model, files[i:i+2], ctx)
dir, file = os.path.split(files[i])
if output_dir is None:
output_dir = os.path.join(dir, 'flow')
os.makedirs(output_dir, exists_ok=True)
cv2.imwrite(os.path.join(output_dir, file), cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
return output_dir
def process_video(model, input_path, output_path=None, ctx=None):
"""
Process a video into a flow video
Args:
model:
input_path:
output_path:
ctx:
Returns:
"""
capture = cv2.VideoCapture(input_path)
frames = []
while_safety = 0
while len(frames) < 200:# int(capture.get(cv2.CAP_PROP_FRAME_COUNT))-1:
_, image = capture.read() # read an image from the capture
if while_safety > 500: # break the while if our safety maxs out at 500
break
if image is None:
while_safety += 1
continue
while_safety = 0 # reset the safety count
frames.append(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
capture.release()
if len(frames) < 2:
return None
if output_path is None:
output_path = input_path[:-4] + '_flow.mp4'
cropped_frames = crop(frames)
h, w, _= cropped_frames[0].shape
video = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc('m', 'p', '4', 'v'), 25, (w, h))
for i in tqdm(range(len(frames)-1), desc='Calculating Flow'):
mx.nd.waitall()
img, flow = process_two_images(model, frames[i:i+2], ctx)
video.write(img)
video.release() # release the video
return output_path
if __name__ == '__main__':
# just for debugging
# save_path = "models/definitions/flownet/weights/FlowNet2-S_checkpoint.params"
save_path = "models/definitions/flownet/weights/FlowNet2-C_checkpoint.params"
ctx = mx.gpu(0)
# net = get_flownet('S', pretrained=True, ctx=ctx)
net = get_flownet('C', pretrained=True, ctx=ctx)
net.hybridize()
input_path = "/path/to/test.mp4"
process_video(net, input_path, ctx=ctx)
print("DONE")
| 26.083871
| 108
| 0.606728
|
import cv2
import mxnet as mx
import numpy as np
from scipy.misc import imresize
from tqdm import tqdm
from flownet import get_flownet
from utils import flow_to_image, crop, normalise
def process_two_images(model, imgs, ctx=None):
if len(imgs) != 2:
return None
if isinstance(imgs[0], str):
if os.path.exists(imgs[0]):
imgs[0] = cv2.cvtColor(cv2.imread(files[i]), cv2.COLOR_BGR2RGB)
else:
return None
if isinstance(imgs[1], str):
if os.path.exists(imgs[1]):
imgs[1] = cv2.cvtColor(cv2.imread(files[i]), cv2.COLOR_BGR2RGB)
else:
return None
imgs = crop(imgs)
imgs = np.array(imgs)
imgs = np.moveaxis(imgs, -1, 1)
imgs = normalise(imgs)
imgs = mx.nd.array(imgs, ctx=ctx)
imgs = mx.nd.expand_dims(imgs, 0)
flow = model(imgs)
flow = flow.asnumpy()
flow = flow.squeeze()
flow = flow.transpose(1, 2, 0)
img = flow_to_image(flow)
img = imresize(img, 4.0)
return img, flow
def process_imagedir(model, input_dir, output_dir=None, ctx=None):
files = []
for ext in [".jpg", ".png", ".jpeg", ".JPG", ".PNG", ".JPEG"]:
files = glob.glob(input_dir + "/**/*" + ext, recursive=True)
if len(files) > 0:
break
if not len(files) > 0:
print("Couldn't find any files in {}".format(input_dir))
return None
files.sort()
for i in tqdm(range(len(files) - 1), desc='Calculating Flow'):
img, flow = process_two_images(model, files[i:i+2], ctx)
dir, file = os.path.split(files[i])
if output_dir is None:
output_dir = os.path.join(dir, 'flow')
os.makedirs(output_dir, exists_ok=True)
cv2.imwrite(os.path.join(output_dir, file), cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
return output_dir
def process_video(model, input_path, output_path=None, ctx=None):
capture = cv2.VideoCapture(input_path)
frames = []
while_safety = 0
while len(frames) < 200:
_, image = capture.read()
if while_safety > 500:
break
if image is None:
while_safety += 1
continue
while_safety = 0
frames.append(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
capture.release()
if len(frames) < 2:
return None
if output_path is None:
output_path = input_path[:-4] + '_flow.mp4'
cropped_frames = crop(frames)
h, w, _= cropped_frames[0].shape
video = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc('m', 'p', '4', 'v'), 25, (w, h))
for i in tqdm(range(len(frames)-1), desc='Calculating Flow'):
mx.nd.waitall()
img, flow = process_two_images(model, frames[i:i+2], ctx)
video.write(img)
video.release()
return output_path
if __name__ == '__main__':
save_path = "models/definitions/flownet/weights/FlowNet2-C_checkpoint.params"
ctx = mx.gpu(0)
net = get_flownet('C', pretrained=True, ctx=ctx)
net.hybridize()
input_path = "/path/to/test.mp4"
process_video(net, input_path, ctx=ctx)
print("DONE")
| true
| true
|
1c47763f1386690bf0efd66398f708660e2f5d45
| 5,537
|
py
|
Python
|
scripts/automation/trex_control_plane/astf/trex_astf_lib/trex_astf_global_info.py
|
alialnu/trex-core
|
ae4ab05a6215fd0a859adde40dac6afa8bf0f950
|
[
"Apache-2.0"
] | null | null | null |
scripts/automation/trex_control_plane/astf/trex_astf_lib/trex_astf_global_info.py
|
alialnu/trex-core
|
ae4ab05a6215fd0a859adde40dac6afa8bf0f950
|
[
"Apache-2.0"
] | null | null | null |
scripts/automation/trex_control_plane/astf/trex_astf_lib/trex_astf_global_info.py
|
alialnu/trex-core
|
ae4ab05a6215fd0a859adde40dac6afa8bf0f950
|
[
"Apache-2.0"
] | null | null | null |
import socket
class ASTFGlobalInfoBase(object):
_g_params = {}
class inner(object):
def __init__(self, params, name):
self._fields = {}
self._params = params
self._name = name
def __setattr__(self, name, val):
if name.startswith("_"):
return super(ASTFGlobalInfoBase.inner, self).__setattr__(name, val)
for p in self._params:
if name == p["name"]:
if "sub_type" in p:
if p["sub_type"]=="ipv6_addr":
if (type(val)!=str):
raise AttributeError("{0} in {1} should have one of the following types: {2}"
.format(name, self._name, str))
b=socket.inet_pton(socket.AF_INET6, val)
l = list(b);
# in case of Python 2
if not(type(l[0]) is int):
l=[ord(i) for i in l]
self._fields[name] = l;
return;
if "type" in p and type(val) not in p["type"]:
raise AttributeError("{0} in {1} should have one of the following types: {2}"
.format(name, self._name, p["type"]))
self._fields[name] = val
return
raise AttributeError("%r has no attribute %s" % (self._name, name))
def __getattr__(self, name):
if name.startswith("_"):
return super(ASTFGlobalInfoBase.inner, self).__getattr__(name)
for p in self._params:
if name == p["name"]:
return self._fields[name]
raise AttributeError("%r has no attribute %s" % (self._name, name))
def to_json(self):
return self._fields
def __init__(self, params=_g_params, name="globalp"):
self._fields = {}
self._params = params
self._name = name
def __setattr__(self, name, val):
if name.startswith("_"):
return super(ASTFGlobalInfoBase, self).__setattr__(name, val)
if name in self._params:
if type(self._params[name]) is dict:
next_level_params = self._params[name].keys()
else:
next_level_params = []
for n in self._params[name]:
next_level_params.append(n["name"])
raise AttributeError("{0} in {1} should be followed by one of {2}".format(name, self._name, next_level_params))
else:
raise AttributeError("{0} is not part of valid params".format(name))
def __getattr__(self, name):
if name.startswith("_"):
return super(ASTFGlobalInfoBase.in_tcp, self).__getattr__(name)
if name in self._params:
long_name = self._name + "." + name
if type(self._params[name]) is dict:
return self._fields.setdefault(name, ASTFGlobalInfoBase(params=self._params[name], name=long_name))
elif type(self._params[name]) is list:
return self._fields.setdefault(name, ASTFGlobalInfoBase.inner(params=self._params[name], name=long_name))
raise AttributeError("{0} has no attribute {1} it has {2}".format(self._name, name, self._params.keys()))
def to_json(self):
ret = {}
for field in self._fields.keys():
ret[field] = self._fields[field].to_json()
return ret
class ASTFGlobalInfo(ASTFGlobalInfoBase):
_g_params = {
"scheduler" : [
{"name": "rampup_sec", "type": [int]},
{"name": "accurate", "type": [int]}
],
"ipv6": [
{"name": "src_msb", "sub_type" : "ipv6_addr" },
{"name": "dst_msb", "sub_type" : "ipv6_addr" },
{"name": "enable", "type": [int]}
],
"tcp": [
{"name": "mss", "type": [int]},
{"name": "initwnd", "type": [int]},
{"name": "rxbufsize", "type": [int]},
{"name": "txbufsize", "type": [int]},
{"name": "rexmtthresh", "type": [int]},
{"name": "do_rfc1323", "type": [int]},
{"name": "keepinit", "type": [int]},
{"name": "keepidle", "type": [int]},
{"name": "keepintvl", "type": [int]},
{"name": "delay_ack_msec", "type": [int]},
{"name": "no_delay", "type": [int]},
],
"ip": [
{"name": "tos", "type": [int]},
{"name": "ttl", "type": [int]}
],
}
def __init__(self, params=_g_params, name="GlobalInfo"):
return super(ASTFGlobalInfo, self).__init__(params, name)
class ASTFGlobalInfoPerTemplate(ASTFGlobalInfoBase):
_g_params = {
"tcp": [
{"name": "initwnd", "type": [int]},
{"name": "mss", "type": [int]},
{"name": "no_delay", "type": [int]},
{"name": "rxbufsize", "type": [int]},
{"name": "txbufsize", "type": [int]},
],
"ip": [
{"name": "tos", "type": [int]},
{"name": "ttl", "type": [int]}
],
}
def __init__(self, params=_g_params, name="GlobalInfoPerTemplate"):
return super(ASTFGlobalInfoPerTemplate, self).__init__(params, name)
| 37.161074
| 123
| 0.483475
|
import socket
class ASTFGlobalInfoBase(object):
_g_params = {}
class inner(object):
def __init__(self, params, name):
self._fields = {}
self._params = params
self._name = name
def __setattr__(self, name, val):
if name.startswith("_"):
return super(ASTFGlobalInfoBase.inner, self).__setattr__(name, val)
for p in self._params:
if name == p["name"]:
if "sub_type" in p:
if p["sub_type"]=="ipv6_addr":
if (type(val)!=str):
raise AttributeError("{0} in {1} should have one of the following types: {2}"
.format(name, self._name, str))
b=socket.inet_pton(socket.AF_INET6, val)
l = list(b);
if not(type(l[0]) is int):
l=[ord(i) for i in l]
self._fields[name] = l;
return;
if "type" in p and type(val) not in p["type"]:
raise AttributeError("{0} in {1} should have one of the following types: {2}"
.format(name, self._name, p["type"]))
self._fields[name] = val
return
raise AttributeError("%r has no attribute %s" % (self._name, name))
def __getattr__(self, name):
if name.startswith("_"):
return super(ASTFGlobalInfoBase.inner, self).__getattr__(name)
for p in self._params:
if name == p["name"]:
return self._fields[name]
raise AttributeError("%r has no attribute %s" % (self._name, name))
def to_json(self):
return self._fields
def __init__(self, params=_g_params, name="globalp"):
self._fields = {}
self._params = params
self._name = name
def __setattr__(self, name, val):
if name.startswith("_"):
return super(ASTFGlobalInfoBase, self).__setattr__(name, val)
if name in self._params:
if type(self._params[name]) is dict:
next_level_params = self._params[name].keys()
else:
next_level_params = []
for n in self._params[name]:
next_level_params.append(n["name"])
raise AttributeError("{0} in {1} should be followed by one of {2}".format(name, self._name, next_level_params))
else:
raise AttributeError("{0} is not part of valid params".format(name))
def __getattr__(self, name):
if name.startswith("_"):
return super(ASTFGlobalInfoBase.in_tcp, self).__getattr__(name)
if name in self._params:
long_name = self._name + "." + name
if type(self._params[name]) is dict:
return self._fields.setdefault(name, ASTFGlobalInfoBase(params=self._params[name], name=long_name))
elif type(self._params[name]) is list:
return self._fields.setdefault(name, ASTFGlobalInfoBase.inner(params=self._params[name], name=long_name))
raise AttributeError("{0} has no attribute {1} it has {2}".format(self._name, name, self._params.keys()))
def to_json(self):
ret = {}
for field in self._fields.keys():
ret[field] = self._fields[field].to_json()
return ret
class ASTFGlobalInfo(ASTFGlobalInfoBase):
_g_params = {
"scheduler" : [
{"name": "rampup_sec", "type": [int]},
{"name": "accurate", "type": [int]}
],
"ipv6": [
{"name": "src_msb", "sub_type" : "ipv6_addr" },
{"name": "dst_msb", "sub_type" : "ipv6_addr" },
{"name": "enable", "type": [int]}
],
"tcp": [
{"name": "mss", "type": [int]},
{"name": "initwnd", "type": [int]},
{"name": "rxbufsize", "type": [int]},
{"name": "txbufsize", "type": [int]},
{"name": "rexmtthresh", "type": [int]},
{"name": "do_rfc1323", "type": [int]},
{"name": "keepinit", "type": [int]},
{"name": "keepidle", "type": [int]},
{"name": "keepintvl", "type": [int]},
{"name": "delay_ack_msec", "type": [int]},
{"name": "no_delay", "type": [int]},
],
"ip": [
{"name": "tos", "type": [int]},
{"name": "ttl", "type": [int]}
],
}
def __init__(self, params=_g_params, name="GlobalInfo"):
return super(ASTFGlobalInfo, self).__init__(params, name)
class ASTFGlobalInfoPerTemplate(ASTFGlobalInfoBase):
_g_params = {
"tcp": [
{"name": "initwnd", "type": [int]},
{"name": "mss", "type": [int]},
{"name": "no_delay", "type": [int]},
{"name": "rxbufsize", "type": [int]},
{"name": "txbufsize", "type": [int]},
],
"ip": [
{"name": "tos", "type": [int]},
{"name": "ttl", "type": [int]}
],
}
def __init__(self, params=_g_params, name="GlobalInfoPerTemplate"):
return super(ASTFGlobalInfoPerTemplate, self).__init__(params, name)
| true
| true
|
1c4777590dcdd7cd0868594deb226eb09b523f7d
| 16,358
|
py
|
Python
|
senlin/objects/fields.py
|
openstack/senlin
|
390779ca1e08f819683e79993696f945f1c0393e
|
[
"Apache-2.0"
] | 45
|
2015-10-18T02:56:50.000Z
|
2022-03-01T15:28:02.000Z
|
senlin/objects/fields.py
|
openstack/senlin
|
390779ca1e08f819683e79993696f945f1c0393e
|
[
"Apache-2.0"
] | 2
|
2019-04-26T10:44:47.000Z
|
2020-12-16T19:45:34.000Z
|
senlin/objects/fields.py
|
openstack/senlin
|
390779ca1e08f819683e79993696f945f1c0393e
|
[
"Apache-2.0"
] | 45
|
2015-10-19T02:35:57.000Z
|
2021-09-28T09:01:42.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import strutils
from oslo_utils import uuidutils
from oslo_versionedobjects import fields
import re
from senlin.common import consts
from senlin.common.i18n import _
CONF = cfg.CONF
# Field alias for code readability
# BooleanField = fields.BooleanField
FlexibleBooleanField = fields.FlexibleBooleanField
StringField = fields.StringField
IntegerField = fields.IntegerField
FloatField = fields.FloatField
UUIDField = fields.UUIDField
DateTimeField = fields.DateTimeField
DictOfStringsField = fields.DictOfStringsField
ListOfStringsField = fields.ListOfStringsField
ListOfEnumField = fields.ListOfEnumField
class Boolean(fields.FieldType):
# NOTE: The following definition is much more stricter than the oslo
# version. Also note that the treatment of default values here:
# we are using the user specified default value when invoking
# the 'bool_from_string' until function.
def __init__(self, default=False):
super(Boolean, self).__init__()
self._default = default
def coerce(self, obj, attr, value):
return strutils.bool_from_string(value, strict=True,
default=self._default)
def get_schema(self):
return {'type': ['boolean']}
class NonNegativeInteger(fields.FieldType):
# NOTE: This definition is kept because we want the error message from
# 'int' conversion to be user friendly.
@staticmethod
def coerce(obj, attr, value):
try:
v = int(value)
except (TypeError, ValueError):
raise ValueError(_("The value for %(attr)s must be an integer: "
"'%(value)s'.") %
{'attr': attr, 'value': value})
if v < 0:
err = _("Value must be >= 0 for field '%s'.") % attr
raise ValueError(err)
return v
def get_schema(self):
return {
'type': ['integer', 'string'],
'minimum': 0
}
# Senlin has a stricter field checking for object fields.
class Object(fields.Object):
def get_schema(self):
schema = super(Object, self).get_schema()
# we are not checking whether self._obj_name is registered, an
# exception will be raised anyway if it is not registered.
data_key = 'senlin_object.data'
schema['properties'][data_key]['additionalProperties'] = False
return schema
class UUID(fields.FieldType):
_PATTERN = (r'^[a-fA-F0-9]{8}-?[a-fA-F0-9]{4}-?[a-fA-F0-9]{4}-?[a-fA-F0-9]'
r'{4}-?[a-fA-F0-9]{12}$')
@staticmethod
def coerce(obj, attr, value):
if not uuidutils.is_uuid_like(value):
msg = _("The value for %(attr)s is not a valid UUID: '%(value)s'."
) % {'attr': attr, 'value': value}
raise ValueError(msg)
return str(value)
def get_schema(self):
return {'type': ['string'], 'pattern': self._PATTERN}
class Json(fields.FieldType):
def coerce(self, obj, attr, value):
if isinstance(value, str):
try:
return jsonutils.loads(value)
except ValueError:
msg = _("The value (%s) is not a valid JSON.") % value
raise ValueError(msg)
return value
def from_primitive(self, obj, attr, value):
return self.coerce(obj, attr, value)
def to_primitive(self, obj, attr, value):
return jsonutils.dumps(value)
def stringify(self, value):
if isinstance(value, str):
try:
return jsonutils.loads(value)
except ValueError:
raise
return str(value)
def get_schema(self):
return {'type': ['object']}
class NotificationPriority(fields.Enum):
# The priorities here are derived from oslo_messaging.notify.notifier
ALL = consts.NOTIFICATION_PRIORITIES
def __init__(self):
super(NotificationPriority, self).__init__(self.ALL)
class NotificationPhase(fields.Enum):
ALL = consts.NOTIFICATION_PHASES
def __init__(self):
super(NotificationPhase, self).__init__(self.ALL)
class Name(fields.String):
def __init__(self, min_len=1, max_len=255):
super(Name, self).__init__()
self.min_len = min_len
self.max_len = max_len
def coerce(self, obj, attr, value):
err = None
if len(value) < self.min_len:
err = _("The value for the %(attr)s field must be at least "
"%(count)d characters long."
) % {'attr': attr, 'count': self.min_len}
elif len(value) > self.max_len:
err = _("The value for the %(attr)s field must be less than "
"%(count)d characters long."
) % {'attr': attr, 'count': self.max_len}
else:
# NOTE: This is pretty restrictive. We can relax it later when
# there are requests to do so
regex = re.compile(u'^[a-zA-Z\u4e00-\u9fa5\d\.\_\~-]*$',
re.IGNORECASE)
if not regex.search(value):
err = _("The value for the '%(attr)s' (%(value)s) contains "
"illegal characters. It must contain only "
"alphanumeric or \"_-.~\" characters and must start "
"with letter."
) % {'attr': attr, 'value': value}
if err:
raise ValueError(err)
return super(Name, self).coerce(obj, attr, value)
def get_schema(self):
return {
'type': ['string'],
'minLength': self.min_len,
'maxLength': self.max_len
}
class Capacity(fields.Integer):
def __init__(self, minimum=0, maximum=None):
super(Capacity, self).__init__()
CONF.import_opt("max_nodes_per_cluster", "senlin.conf")
if minimum > CONF.max_nodes_per_cluster:
err = _("The value of 'minimum' cannot be greater than the global "
"constraint (%(m)d).") % {'m': CONF.max_nodes_per_cluster}
raise ValueError(err)
self.minimum = minimum
if maximum is not None:
if maximum < minimum:
err = _("The value of 'maximum' must be greater than or equal "
"to that of the 'minimum' specified.")
raise ValueError(err)
if maximum > CONF.max_nodes_per_cluster:
err = _("The value of 'maximum' cannot be greater than the "
"global constraint (%(m)d)."
) % {'m': CONF.max_nodes_per_cluster}
raise ValueError(err)
self.maximum = maximum
else:
self.maximum = CONF.max_nodes_per_cluster
def coerce(self, obj, attr, value):
try:
v = int(value)
except Exception:
raise ValueError(_("The value for %(attr)s must be an integer: "
"'%(value)s'.") %
{'attr': attr, 'value': value})
if v < self.minimum:
raise ValueError(_("The value for the %(a)s field must be greater "
"than or equal to %(n)d.") %
{'a': attr, 'n': self.minimum})
elif v > self.maximum:
raise ValueError(_("The value for the %(a)s field must be less "
"than or equal to %(n)d.") %
{'a': attr, 'n': self.maximum})
return super(Capacity, self).coerce(obj, attr, v)
def get_schema(self):
return {
'type': ['integer', 'string'],
'minimum': self.minimum,
'maximum': self.maximum,
'pattern': '^[0-9]*$',
}
class Sort(fields.String):
def __init__(self, valid_keys):
super(Sort, self).__init__()
self.valid_keys = valid_keys
def coerce(self, obj, attr, value):
for s in value.split(','):
s_key, _sep, s_dir = s.partition(':')
err = None
if not s_key:
err = _("Missing sort key for '%s'.") % attr
raise ValueError(err)
if s_key not in self.valid_keys:
err = _("Unsupported sort key '%(value)s' for '%(attr)s'."
) % {'attr': attr, 'value': s_key}
if s_dir and s_dir not in ('asc', 'desc'):
err = _("Unsupported sort dir '%(value)s' for '%(attr)s'."
) % {'attr': attr, 'value': s_dir}
if err:
raise ValueError(err)
return super(Sort, self).coerce(obj, attr, value)
def get_schema(self):
return {
'type': ['string'],
}
class IdentityList(fields.List):
def __init__(self, element_type, min_items=0, unique=True, nullable=False,
**kwargs):
super(IdentityList, self).__init__(element_type, **kwargs)
self.min_items = min_items
self.unique_items = unique
self.nullable = nullable
def coerce(self, obj, attr, value):
res = super(IdentityList, self).coerce(obj, attr, value)
if len(res) < self.min_items:
raise ValueError(_("Value for '%(attr)s' must have at least "
"%(num)s item(s).") %
{'attr': attr, 'num': self.min_items})
if len(set(res)) != len(res) and self.unique_items:
raise ValueError(_("Items for '%(attr)s' must be unique") %
{'attr': attr})
return res
def get_schema(self):
schema = super(IdentityList, self).get_schema()
if self.nullable:
schema['type'].append('null')
schema['minItems'] = self.min_items
schema['uniqueItems'] = self.unique_items
return schema
class BaseEnum(fields.FieldType):
# NOTE: We are not basing Enum on String because String is not working
# correctly when handling None value.
def __init__(self, nullable=False):
valid_values = list(self.__class__.ALL)
if not valid_values:
raise ValueError(_("No list of valid values provided for enum."))
for value in valid_values:
if not isinstance(value, str):
raise ValueError(_("Enum field only support string values."))
self._valid_values = list(valid_values)
self._nullable = nullable
super(BaseEnum, self).__init__()
def coerce(self, obj, attr, value):
value = str(value)
if value not in self._valid_values:
raise ValueError(_("Value '%(value)s' is not acceptable for "
"field '%(attr)s'.") %
{'value': value, 'attr': attr})
return value
def stringify(self, value):
if value is None:
return None
return '\'%s\'' % value
class AdjustmentType(BaseEnum):
ALL = consts.ADJUSTMENT_TYPES
def get_schema(self):
return {'type': ['string'],
'enum': self._valid_values}
class ClusterActionName(BaseEnum):
ALL = consts.CLUSTER_ACTION_NAMES
def get_schema(self):
return {'type': ['string'],
'enum': self._valid_values}
class ClusterStatus(BaseEnum):
ALL = consts.CLUSTER_STATUSES
class NodeStatus(BaseEnum):
ALL = consts.NODE_STATUSES
class ActionStatus(BaseEnum):
ALL = consts.ACTION_STATUSES
class ReceiverType(BaseEnum):
ALL = consts.RECEIVER_TYPES
def get_schema(self):
return {'type': ['string'],
'enum': self._valid_values}
class UniqueDict(fields.Dict):
def coerce(self, obj, attr, value):
res = super(UniqueDict, self).coerce(obj, attr, value)
new_nodes = res.values()
if len(new_nodes) != len(set(new_nodes)):
raise ValueError(_("Map contains duplicated values"))
return res
# TODO(Qiming): remove this when oslo patch is released
# https://review.openstack.org/#/c/360095
class NonNegativeIntegerField(fields.AutoTypedField):
AUTO_TYPE = NonNegativeInteger()
class BooleanField(fields.AutoTypedField):
AUTO_TYPE = Boolean()
# An override to the oslo.versionedobjects version so that we are using
# our own Object definition.
class ObjectField(fields.AutoTypedField):
def __init__(self, objtype, subclasses=False, **kwargs):
self.AUTO_TYPE = Object(objtype, subclasses)
self.objname = objtype
super(ObjectField, self).__init__(**kwargs)
class JsonField(fields.AutoTypedField):
AUTO_TYPE = Json()
class ListField(fields.AutoTypedField):
AUTO_TYPE = fields.List(fields.FieldType())
class NotificationPriorityField(fields.BaseEnumField):
AUTO_TYPE = NotificationPriority()
class NotificationPhaseField(fields.BaseEnumField):
AUTO_TYPE = NotificationPhase()
class NameField(fields.AutoTypedField):
AUTO_TYPE = Name()
class UUIDField(fields.AutoTypedField):
AUTO_TYPE = UUID()
class CapacityField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, nullable=False, default=None, minimum=0, maximum=None):
self.AUTO_TYPE = Capacity(minimum=minimum, maximum=maximum)
super(CapacityField, self).__init__(nullable=nullable, default=default)
class SortField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, valid_keys, nullable=False, default=None):
self.AUTO_TYPE = Sort(valid_keys)
super(SortField, self).__init__(nullable=nullable, default=default)
class IdentityListField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, min_items=0, unique=True, nullable=False, default=None):
if default is None:
default = []
self.AUTO_TYPE = IdentityList(fields.String(), min_items=min_items,
unique=unique)
super(IdentityListField, self).__init__(nullable=nullable,
default=default)
class AdjustmentTypeField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, **kwargs):
nullable = kwargs.get('nullable', False)
self.AUTO_TYPE = AdjustmentType(nullable=nullable)
super(AdjustmentTypeField, self).__init__(**kwargs)
class ClusterActionNameField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, **kwargs):
nullable = kwargs.get('nullable', False)
self.AUTO_TYPE = ClusterActionName(nullable=nullable)
super(ClusterActionNameField, self).__init__(**kwargs)
class ClusterStatusField(fields.AutoTypedField):
AUTO_TYPE = ClusterStatus
class NodeStatusField(fields.AutoTypedField):
AUTO_TYPE = NodeStatus
class ActionStatusField(fields.AutoTypedField):
AUTO_TYPE = ActionStatus
class ReceiverTypeField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, **kwargs):
nullable = kwargs.get('nullable', False)
self.AUTO_TYPE = ReceiverType(nullable=nullable)
super(ReceiverTypeField, self).__init__(**kwargs)
class NodeReplaceMapField(fields.AutoTypedField):
AUTO_TYPE = UniqueDict(fields.String())
class CustomListField(ListField):
def __init__(self, attr_name, **kwargs):
self.attr_name = attr_name
super(CustomListField, self).__init__(**kwargs)
def coerce(self, obj, attr, value):
objs = super(CustomListField, self).coerce(obj, attr, value)
custom_list = []
for i in objs:
custom_list.append(getattr(i, self.attr_name))
return custom_list
| 30.575701
| 79
| 0.602152
|
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import strutils
from oslo_utils import uuidutils
from oslo_versionedobjects import fields
import re
from senlin.common import consts
from senlin.common.i18n import _
CONF = cfg.CONF
FlexibleBooleanField = fields.FlexibleBooleanField
StringField = fields.StringField
IntegerField = fields.IntegerField
FloatField = fields.FloatField
UUIDField = fields.UUIDField
DateTimeField = fields.DateTimeField
DictOfStringsField = fields.DictOfStringsField
ListOfStringsField = fields.ListOfStringsField
ListOfEnumField = fields.ListOfEnumField
class Boolean(fields.FieldType):
def __init__(self, default=False):
super(Boolean, self).__init__()
self._default = default
def coerce(self, obj, attr, value):
return strutils.bool_from_string(value, strict=True,
default=self._default)
def get_schema(self):
return {'type': ['boolean']}
class NonNegativeInteger(fields.FieldType):
@staticmethod
def coerce(obj, attr, value):
try:
v = int(value)
except (TypeError, ValueError):
raise ValueError(_("The value for %(attr)s must be an integer: "
"'%(value)s'.") %
{'attr': attr, 'value': value})
if v < 0:
err = _("Value must be >= 0 for field '%s'.") % attr
raise ValueError(err)
return v
def get_schema(self):
return {
'type': ['integer', 'string'],
'minimum': 0
}
class Object(fields.Object):
def get_schema(self):
schema = super(Object, self).get_schema()
data_key = 'senlin_object.data'
schema['properties'][data_key]['additionalProperties'] = False
return schema
class UUID(fields.FieldType):
_PATTERN = (r'^[a-fA-F0-9]{8}-?[a-fA-F0-9]{4}-?[a-fA-F0-9]{4}-?[a-fA-F0-9]'
r'{4}-?[a-fA-F0-9]{12}$')
@staticmethod
def coerce(obj, attr, value):
if not uuidutils.is_uuid_like(value):
msg = _("The value for %(attr)s is not a valid UUID: '%(value)s'."
) % {'attr': attr, 'value': value}
raise ValueError(msg)
return str(value)
def get_schema(self):
return {'type': ['string'], 'pattern': self._PATTERN}
class Json(fields.FieldType):
def coerce(self, obj, attr, value):
if isinstance(value, str):
try:
return jsonutils.loads(value)
except ValueError:
msg = _("The value (%s) is not a valid JSON.") % value
raise ValueError(msg)
return value
def from_primitive(self, obj, attr, value):
return self.coerce(obj, attr, value)
def to_primitive(self, obj, attr, value):
return jsonutils.dumps(value)
def stringify(self, value):
if isinstance(value, str):
try:
return jsonutils.loads(value)
except ValueError:
raise
return str(value)
def get_schema(self):
return {'type': ['object']}
class NotificationPriority(fields.Enum):
ALL = consts.NOTIFICATION_PRIORITIES
def __init__(self):
super(NotificationPriority, self).__init__(self.ALL)
class NotificationPhase(fields.Enum):
ALL = consts.NOTIFICATION_PHASES
def __init__(self):
super(NotificationPhase, self).__init__(self.ALL)
class Name(fields.String):
def __init__(self, min_len=1, max_len=255):
super(Name, self).__init__()
self.min_len = min_len
self.max_len = max_len
def coerce(self, obj, attr, value):
err = None
if len(value) < self.min_len:
err = _("The value for the %(attr)s field must be at least "
"%(count)d characters long."
) % {'attr': attr, 'count': self.min_len}
elif len(value) > self.max_len:
err = _("The value for the %(attr)s field must be less than "
"%(count)d characters long."
) % {'attr': attr, 'count': self.max_len}
else:
regex = re.compile(u'^[a-zA-Z\u4e00-\u9fa5\d\.\_\~-]*$',
re.IGNORECASE)
if not regex.search(value):
err = _("The value for the '%(attr)s' (%(value)s) contains "
"illegal characters. It must contain only "
"alphanumeric or \"_-.~\" characters and must start "
"with letter."
) % {'attr': attr, 'value': value}
if err:
raise ValueError(err)
return super(Name, self).coerce(obj, attr, value)
def get_schema(self):
return {
'type': ['string'],
'minLength': self.min_len,
'maxLength': self.max_len
}
class Capacity(fields.Integer):
def __init__(self, minimum=0, maximum=None):
super(Capacity, self).__init__()
CONF.import_opt("max_nodes_per_cluster", "senlin.conf")
if minimum > CONF.max_nodes_per_cluster:
err = _("The value of 'minimum' cannot be greater than the global "
"constraint (%(m)d).") % {'m': CONF.max_nodes_per_cluster}
raise ValueError(err)
self.minimum = minimum
if maximum is not None:
if maximum < minimum:
err = _("The value of 'maximum' must be greater than or equal "
"to that of the 'minimum' specified.")
raise ValueError(err)
if maximum > CONF.max_nodes_per_cluster:
err = _("The value of 'maximum' cannot be greater than the "
"global constraint (%(m)d)."
) % {'m': CONF.max_nodes_per_cluster}
raise ValueError(err)
self.maximum = maximum
else:
self.maximum = CONF.max_nodes_per_cluster
def coerce(self, obj, attr, value):
try:
v = int(value)
except Exception:
raise ValueError(_("The value for %(attr)s must be an integer: "
"'%(value)s'.") %
{'attr': attr, 'value': value})
if v < self.minimum:
raise ValueError(_("The value for the %(a)s field must be greater "
"than or equal to %(n)d.") %
{'a': attr, 'n': self.minimum})
elif v > self.maximum:
raise ValueError(_("The value for the %(a)s field must be less "
"than or equal to %(n)d.") %
{'a': attr, 'n': self.maximum})
return super(Capacity, self).coerce(obj, attr, v)
def get_schema(self):
return {
'type': ['integer', 'string'],
'minimum': self.minimum,
'maximum': self.maximum,
'pattern': '^[0-9]*$',
}
class Sort(fields.String):
def __init__(self, valid_keys):
super(Sort, self).__init__()
self.valid_keys = valid_keys
def coerce(self, obj, attr, value):
for s in value.split(','):
s_key, _sep, s_dir = s.partition(':')
err = None
if not s_key:
err = _("Missing sort key for '%s'.") % attr
raise ValueError(err)
if s_key not in self.valid_keys:
err = _("Unsupported sort key '%(value)s' for '%(attr)s'."
) % {'attr': attr, 'value': s_key}
if s_dir and s_dir not in ('asc', 'desc'):
err = _("Unsupported sort dir '%(value)s' for '%(attr)s'."
) % {'attr': attr, 'value': s_dir}
if err:
raise ValueError(err)
return super(Sort, self).coerce(obj, attr, value)
def get_schema(self):
return {
'type': ['string'],
}
class IdentityList(fields.List):
def __init__(self, element_type, min_items=0, unique=True, nullable=False,
**kwargs):
super(IdentityList, self).__init__(element_type, **kwargs)
self.min_items = min_items
self.unique_items = unique
self.nullable = nullable
def coerce(self, obj, attr, value):
res = super(IdentityList, self).coerce(obj, attr, value)
if len(res) < self.min_items:
raise ValueError(_("Value for '%(attr)s' must have at least "
"%(num)s item(s).") %
{'attr': attr, 'num': self.min_items})
if len(set(res)) != len(res) and self.unique_items:
raise ValueError(_("Items for '%(attr)s' must be unique") %
{'attr': attr})
return res
def get_schema(self):
schema = super(IdentityList, self).get_schema()
if self.nullable:
schema['type'].append('null')
schema['minItems'] = self.min_items
schema['uniqueItems'] = self.unique_items
return schema
class BaseEnum(fields.FieldType):
def __init__(self, nullable=False):
valid_values = list(self.__class__.ALL)
if not valid_values:
raise ValueError(_("No list of valid values provided for enum."))
for value in valid_values:
if not isinstance(value, str):
raise ValueError(_("Enum field only support string values."))
self._valid_values = list(valid_values)
self._nullable = nullable
super(BaseEnum, self).__init__()
def coerce(self, obj, attr, value):
value = str(value)
if value not in self._valid_values:
raise ValueError(_("Value '%(value)s' is not acceptable for "
"field '%(attr)s'.") %
{'value': value, 'attr': attr})
return value
def stringify(self, value):
if value is None:
return None
return '\'%s\'' % value
class AdjustmentType(BaseEnum):
ALL = consts.ADJUSTMENT_TYPES
def get_schema(self):
return {'type': ['string'],
'enum': self._valid_values}
class ClusterActionName(BaseEnum):
ALL = consts.CLUSTER_ACTION_NAMES
def get_schema(self):
return {'type': ['string'],
'enum': self._valid_values}
class ClusterStatus(BaseEnum):
ALL = consts.CLUSTER_STATUSES
class NodeStatus(BaseEnum):
ALL = consts.NODE_STATUSES
class ActionStatus(BaseEnum):
ALL = consts.ACTION_STATUSES
class ReceiverType(BaseEnum):
ALL = consts.RECEIVER_TYPES
def get_schema(self):
return {'type': ['string'],
'enum': self._valid_values}
class UniqueDict(fields.Dict):
def coerce(self, obj, attr, value):
res = super(UniqueDict, self).coerce(obj, attr, value)
new_nodes = res.values()
if len(new_nodes) != len(set(new_nodes)):
raise ValueError(_("Map contains duplicated values"))
return res
NegativeIntegerField(fields.AutoTypedField):
AUTO_TYPE = NonNegativeInteger()
class BooleanField(fields.AutoTypedField):
AUTO_TYPE = Boolean()
class ObjectField(fields.AutoTypedField):
def __init__(self, objtype, subclasses=False, **kwargs):
self.AUTO_TYPE = Object(objtype, subclasses)
self.objname = objtype
super(ObjectField, self).__init__(**kwargs)
class JsonField(fields.AutoTypedField):
AUTO_TYPE = Json()
class ListField(fields.AutoTypedField):
AUTO_TYPE = fields.List(fields.FieldType())
class NotificationPriorityField(fields.BaseEnumField):
AUTO_TYPE = NotificationPriority()
class NotificationPhaseField(fields.BaseEnumField):
AUTO_TYPE = NotificationPhase()
class NameField(fields.AutoTypedField):
AUTO_TYPE = Name()
class UUIDField(fields.AutoTypedField):
AUTO_TYPE = UUID()
class CapacityField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, nullable=False, default=None, minimum=0, maximum=None):
self.AUTO_TYPE = Capacity(minimum=minimum, maximum=maximum)
super(CapacityField, self).__init__(nullable=nullable, default=default)
class SortField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, valid_keys, nullable=False, default=None):
self.AUTO_TYPE = Sort(valid_keys)
super(SortField, self).__init__(nullable=nullable, default=default)
class IdentityListField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, min_items=0, unique=True, nullable=False, default=None):
if default is None:
default = []
self.AUTO_TYPE = IdentityList(fields.String(), min_items=min_items,
unique=unique)
super(IdentityListField, self).__init__(nullable=nullable,
default=default)
class AdjustmentTypeField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, **kwargs):
nullable = kwargs.get('nullable', False)
self.AUTO_TYPE = AdjustmentType(nullable=nullable)
super(AdjustmentTypeField, self).__init__(**kwargs)
class ClusterActionNameField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, **kwargs):
nullable = kwargs.get('nullable', False)
self.AUTO_TYPE = ClusterActionName(nullable=nullable)
super(ClusterActionNameField, self).__init__(**kwargs)
class ClusterStatusField(fields.AutoTypedField):
AUTO_TYPE = ClusterStatus
class NodeStatusField(fields.AutoTypedField):
AUTO_TYPE = NodeStatus
class ActionStatusField(fields.AutoTypedField):
AUTO_TYPE = ActionStatus
class ReceiverTypeField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, **kwargs):
nullable = kwargs.get('nullable', False)
self.AUTO_TYPE = ReceiverType(nullable=nullable)
super(ReceiverTypeField, self).__init__(**kwargs)
class NodeReplaceMapField(fields.AutoTypedField):
AUTO_TYPE = UniqueDict(fields.String())
class CustomListField(ListField):
def __init__(self, attr_name, **kwargs):
self.attr_name = attr_name
super(CustomListField, self).__init__(**kwargs)
def coerce(self, obj, attr, value):
objs = super(CustomListField, self).coerce(obj, attr, value)
custom_list = []
for i in objs:
custom_list.append(getattr(i, self.attr_name))
return custom_list
| true
| true
|
1c477804be4c4bf6d36610dc17cf96819da6d6fc
| 45,319
|
py
|
Python
|
nessai/nestedsampler.py
|
Rodrigo-Tenorio/nessai
|
2b4175da61b3a7250d1154a126ad93481836df0d
|
[
"MIT"
] | null | null | null |
nessai/nestedsampler.py
|
Rodrigo-Tenorio/nessai
|
2b4175da61b3a7250d1154a126ad93481836df0d
|
[
"MIT"
] | null | null | null |
nessai/nestedsampler.py
|
Rodrigo-Tenorio/nessai
|
2b4175da61b3a7250d1154a126ad93481836df0d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Functions and objects related to the main nested sampling algorithm.
"""
from collections import deque
import datetime
import logging
import os
import pickle
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import numpy as np
import seaborn as sns
import torch
from tqdm import tqdm
from .livepoint import get_dtype, DEFAULT_FLOAT_DTYPE
from .plot import plot_indices, plot_trace
from .evidence import _NSIntegralState
from .proposal import FlowProposal
from .utils import (
safe_file_dump,
compute_indices_ks_test,
rolling_mean,
)
sns.set()
sns.set_style('ticks')
logger = logging.getLogger(__name__)
class NestedSampler:
"""
Nested Sampler class.
Initialisation arguments:
Parameters
----------
model: :obj:`nessai.model.Model`
User defined model
nlive : int, optional
Number of live points.
output : str
Path for output
stopping : float, optional
Stop when remaining samples wouldn't change logZ estimate by this much.
max_iteration : int, optional
Maximum number of iterations to run before force sampler to stop.
If stopping criteria is met before max. is reached sampler will stop.
checkpointing : bool, optional
Boolean to toggle checkpointing, must be enabled to resume the sampler.
If false the sampler is still saved at the end of sampling.
resume_file : str, optional
If specified sampler will be resumed from this file. Still requires
correct model.
seed : int, optional
seed for the initialisation of the pseudorandom chain
n_pool : int, optional
Number of threads to when for creating the multiprocessing pool.
pool : object
User defined multiprocessing pool that will be used when evaluating
the likelihood.
close_pool : bool
Boolean to indicated if the pool should be closed at the end of the
nested sampling loop. If False, the user must manually close the pool.
plot : bool (True)
Boolean to toggle plotting
proposal_plots : bool (True)
Boolean to enable additional plots for the population stage of the
sampler. Overwritten by plot.
prior_sampling : bool (False)
produce nlive samples from the prior.
analytic_priors : bool (False)
Boolean that indicates that the `new_point` method in the model
draws directly from the priors meaning rejection sampling is not
needed.
maximum_uninformed : int (1000)
Maximum number of iterations before forcing the sampler to switch to
using the proposal method with the flow.
uninformed_proposal : :obj:`nessai.proposal.Proposal`: (None)
Class to use for initial sampling before training the flow. If
None RejectionProposal or AnalyticProposal are used depending if
`analytic_priors` is False or True.
uninformed_acceptance_threshold : float (None)
Acceptance threshold for initialising sampling, if acceptance falls
below this value sampler switches to flow-based proposal. If None
then value is set to 10 times `acceptance_threshold`
uninformed_proposal_kwargs : dict, ({})
Dictionary of keyword argument to pass to the class use for
the initial sampling when it is initialised.
flow_class : :obj:`nessai.proposal.FlowProposal`
Class to use for flow-based proposal method
flow_config : dict ({})
Dictionary used to configure instance of `nessai.flowmodel.FlowModel`,
this includes configuring the normalising flow and the training.
training_frequency : int (None)
Number of iterations between re-training the flow. If None flow
is only re-trained based on other criteria.
train_on_empty : bool (True)
If true the flow is retrained every time the proposal pool is
empty. If false it is only training according to the other criteria.
cooldown : int (100)
Minimum number of iterations between training. Can be overridden if
`train_on_empty=True` and the pool is empty.
memory : int, False (False)
Number of old live points to use in training. If False only the current
live points are used.
reset_weights : bool, int, (False)
Boolean to toggle resetting the flow weights whenever re-training.
If an integer is specified the flow is reset every nth time it is
trained.
reset_permutations: bool, int, (False)
Boolean to toggle resetting the permutation layers in the flow whenever
re-training. If an integer is specified the flow is reset every nth
time it is trained.
reset_acceptance : bool, (True)
If true use mean acceptance of samples produced with current flow
as a criteria for retraining
retrain_acceptance : bool (False)
Force the flow to be reset if the acceptance falls below the acceptance
threshold. Requires `reset_acceptance=True`
acceptance_threshold : float (0.01)
Threshold to determine if the flow should be retrained, will not
retrain if cooldown is not satisfied.
kwargs :
Keyword arguments passed to the flow proposal class
"""
def __init__(
self,
model,
nlive=2000,
output=None,
stopping=0.1,
max_iteration=None,
checkpointing=True,
checkpoint_on_training=False,
resume_file=None,
seed=None,
pool=None,
close_pool=True,
n_pool=None,
plot=True,
proposal_plots=False,
prior_sampling=False,
analytic_priors=False,
maximum_uninformed=None,
uninformed_proposal=None,
uninformed_acceptance_threshold=None,
uninformed_proposal_kwargs=None,
flow_class=None,
flow_config=None,
training_frequency=None,
train_on_empty=True,
cooldown=200,
memory=False,
reset_weights=False,
reset_permutations=False,
retrain_acceptance=True,
reset_acceptance=False,
acceptance_threshold=0.01,
**kwargs
):
logger.info('Initialising nested sampler')
self.info_enabled = logger.isEnabledFor(logging.INFO)
model.verify_model()
self.model = model
self.model.configure_pool(pool=pool, n_pool=n_pool)
self.close_pool = close_pool
self.nlive = nlive
self.live_points = None
self.prior_sampling = prior_sampling
self.setup_random_seed(seed)
self.accepted = 0
self.rejected = 1
self.initialised = False
self.checkpointing = checkpointing
self.checkpoint_on_training = checkpoint_on_training
self.iteration = 0
self.acceptance_history = deque(maxlen=(nlive // 10))
self.mean_acceptance_history = []
self.block_acceptance = 1.
self.mean_block_acceptance = 1.
self.block_iteration = 0
self.retrain_acceptance = retrain_acceptance
self.reset_acceptance = reset_acceptance
self.insertion_indices = []
self.rolling_p = []
self.resumed = False
self.tolerance = stopping
self.condition = np.inf
self.logLmin = -np.inf
self.logLmax = -np.inf
self.nested_samples = []
self.logZ = None
self.state = _NSIntegralState(self.nlive, track_gradients=plot)
self.plot = plot
self.resume_file = self.setup_output(output, resume_file)
self.output = output
# Timing
self.training_time = datetime.timedelta()
self.sampling_time = datetime.timedelta()
self.sampling_start_time = datetime.datetime.now()
# Resume flags
self.completed_training = True
self.finalised = False
# History
self.likelihood_evaluations = []
self.training_iterations = []
self.min_likelihood = []
self.max_likelihood = []
self.logZ_history = []
self.dZ_history = []
self.population_acceptance = []
self.population_radii = []
self.population_iterations = []
self.checkpoint_iterations = []
self.acceptance_threshold = acceptance_threshold
self.train_on_empty = train_on_empty
self.cooldown = cooldown
self.memory = memory
self.configure_max_iteration(max_iteration)
self.configure_flow_reset(reset_weights, reset_permutations)
self.configure_training_frequency(training_frequency)
if uninformed_proposal_kwargs is None:
uninformed_proposal_kwargs = {}
self.configure_uninformed_proposal(uninformed_proposal,
analytic_priors,
maximum_uninformed,
uninformed_acceptance_threshold,
**uninformed_proposal_kwargs)
self.configure_flow_proposal(flow_class, flow_config, proposal_plots,
**kwargs)
# Uninformed proposal is used for prior sampling
# If maximum uninformed is greater than 0, the it will be used for
# another n iterations or until it becomes inefficient
self.store_live_points = False
if self.store_live_points:
self.live_points_dir = f'{self.output}/live_points/'
os.makedirs(self.live_points_dir, exist_ok=True)
self.replacement_points = []
@property
def log_evidence(self):
return self.state.logZ
@property
def information(self):
return self.state.info[-1]
@property
def likelihood_calls(self):
return self.model.likelihood_evaluations
@property
def likelihood_evaluation_time(self):
return self.model.likelihood_evaluation_time
@property
def proposal_population_time(self):
t = self._uninformed_proposal.population_time
t += self._flow_proposal.population_time
return t
@property
def acceptance(self):
return self.iteration / self.likelihood_calls
@property
def current_sampling_time(self):
if self.finalised:
return self.sampling_time
else:
return self.sampling_time \
+ (datetime.datetime.now() - self.sampling_start_time)
@property
def last_updated(self):
"""Last time the normalising flow was retrained"""
if self.training_iterations:
return self.training_iterations[-1]
else:
return 0
@property
def mean_acceptance(self):
"""
Mean acceptance of the last nlive // 10 points
"""
if self.acceptance_history:
return np.mean(self.acceptance_history)
else:
return np.nan
def configure_max_iteration(self, max_iteration):
"""Configure the maximum iteration.
If None then no maximum is set.
Parameter
---------
max_iteration : int, None
Maximum iteration.
"""
if max_iteration is None:
self.max_iteration = np.inf
else:
self.max_iteration = max_iteration
def configure_training_frequency(self, training_frequency):
"""Configure the training frequency.
If None, 'inf' or 'None' flow will only train when empty.
"""
if training_frequency in [None, 'inf', 'None']:
logger.warning('Proposal will only train when empty')
self.training_frequency = np.inf
else:
self.training_frequency = training_frequency
def configure_uninformed_proposal(self,
uninformed_proposal,
analytic_priors,
maximum_uninformed,
uninformed_acceptance_threshold,
**kwargs):
"""
Setup the uninformed proposal method (is NOT trained)
Parameters
----------
uninformed_proposal : None or obj
Class to use for uninformed proposal
analytic_priors : bool
If True `AnalyticProposal` is used to directly sample from the
priors rather than using rejection sampling.
maximum_uninformed : {False, None, int, float}
Maximum number of iterations before switching to FlowProposal.
If None, two times nlive is used. If False uninformed sampling is
not used.
uninformed_acceptance_threshold : float or None:
Threshold to use for uninformed proposal, once reached proposal
method will switch. If None acceptance_threshold is used if
greater than 0.1 else 10 x acceptance_threshold is used.
kwargs
Kwargs are passed to init method for uninformed proposal class
"""
if maximum_uninformed is None:
self.uninformed_sampling = True
self.maximum_uninformed = 2 * self.nlive
elif not maximum_uninformed:
self.uninformed_sampling = False
self.maximum_uninformed = 0
else:
self.uninformed_sampling = True
self.maximum_uninformed = float(maximum_uninformed)
if uninformed_acceptance_threshold is None:
if self.acceptance_threshold < 0.1:
self.uninformed_acceptance_threshold = \
10 * self.acceptance_threshold
else:
self.uninformed_acceptance_threshold = \
self.acceptance_threshold
else:
self.uninformed_acceptance_threshold = \
uninformed_acceptance_threshold
if uninformed_proposal is None:
if analytic_priors:
from .proposal import AnalyticProposal as uninformed_proposal
else:
from .proposal import RejectionProposal as uninformed_proposal
kwargs['poolsize'] = self.nlive
logger.debug(f'Using uninformed proposal: {uninformed_proposal}')
logger.debug(f'Parsing kwargs to uninformed proposal: {kwargs}')
self._uninformed_proposal = uninformed_proposal(
self.model, **kwargs
)
def configure_flow_proposal(self, flow_class, flow_config, proposal_plots,
**kwargs):
"""
Set up the flow-based proposal method
Parameters
----------
flow_class : None or obj or str
Class to use for proposal. If None FlowProposal is used.
flow_config : dict
Configuration dictionary passed to the class.
proposal_plots : bool or str
Configuration of plotting in proposal class.
**kwargs :
Kwargs passed to init function.
"""
proposal_output = self.output + '/proposal/'
if not self.plot:
proposal_plots = False
if flow_class is not None:
if isinstance(flow_class, str):
flow_class = flow_class.lower()
if flow_class == 'gwflowproposal':
from .gw.proposal import GWFlowProposal as flow_class
elif flow_class == 'augmentedgwflowproposal':
from .gw.proposal import (
AugmentedGWFlowProposal as flow_class)
elif flow_class == 'legacygwflowproposal':
from .gw.legacy import LegacyGWFlowProposal as flow_class
elif flow_class == 'flowproposal':
flow_class = FlowProposal
elif flow_class == 'augmentedflowproposal':
from .proposal import AugmentedFlowProposal
flow_class = AugmentedFlowProposal
else:
raise ValueError(f'Unknown flow class: {flow_class}')
elif not issubclass(flow_class, FlowProposal):
raise RuntimeError('Flow class must be string or class that '
'inherits from FlowProposal')
else:
flow_class = FlowProposal
if kwargs.get('poolsize', None) is None:
kwargs['poolsize'] = self.nlive
logger.debug(f'Using flow class: {flow_class}')
logger.info(f'Parsing kwargs to FlowProposal: {kwargs}')
self._flow_proposal = flow_class(
self.model,
flow_config=flow_config,
output=proposal_output,
plot=proposal_plots,
**kwargs
)
def setup_output(self, output, resume_file=None):
"""
Set up the output folder
Parameters
----------
output : str
Directory where the results will be stored
resume_file : optional
Specific file to use for checkpointing. If not specified the
default is used (nested_sampler_resume.pkl)
Returns
-------
resume_file : str
File used for checkpointing
"""
if not os.path.exists(output):
os.makedirs(output, exist_ok=True)
if resume_file is None:
resume_file = os.path.join(output, "nested_sampler_resume.pkl")
else:
resume_file = os.path.join(output, resume_file)
if self.plot:
os.makedirs(output + '/diagnostics/', exist_ok=True)
return resume_file
def setup_random_seed(self, seed):
"""
initialise the random seed
"""
self.seed = seed
if self.seed is not None:
logger.debug(f'Setting random seed to {seed}')
np.random.seed(seed=self.seed)
torch.manual_seed(self.seed)
def configure_flow_reset(self, reset_weights, reset_permutations):
"""Configure how often the flow parameters are reset.
Values are converted to floats.
Parameters
----------
reset_weights : int, float or bool
Frequency with which the weights will be reset.
reset_permutations : int, float or bool
Frequency with which the permutations will be reset.
"""
if isinstance(reset_weights, (int, float)):
self.reset_weights = float(reset_weights)
else:
raise TypeError(
'`reset_weights` must be a bool, int or float')
if isinstance(reset_permutations, (int, float)):
self.reset_permutations = float(reset_permutations)
else:
raise TypeError(
'`reset_permutations` must be a bool, int or float')
def check_insertion_indices(self, rolling=True, filename=None):
"""
Checking the distribution of the insertion indices either during
the nested sampling run (rolling=True) or for the whole run
(rolling=False).
"""
if rolling:
indices = self.insertion_indices[-self.nlive:]
else:
indices = self.insertion_indices
D, p = compute_indices_ks_test(indices, self.nlive)
if p is not None:
if rolling:
logger.warning(f'Rolling KS test: D={D:.4}, p-value={p:.4}')
self.rolling_p.append(p)
else:
logger.warning(f'Final KS test: D={D:.4}, p-value={p:.4}')
if filename is not None:
np.savetxt(os.path.join(self.output, filename),
self.insertion_indices, newline='\n', delimiter=' ')
def log_likelihood(self, x):
"""
Wrapper for the model likelihood so evaluations are counted
"""
return self.model.log_likelihood(x)
def yield_sample(self, oldparam):
"""
Draw points and applying rejection sampling
"""
while True:
counter = 0
while True:
counter += 1
newparam = self.proposal.draw(oldparam.copy())
# Prior is computed in the proposal
if newparam['logP'] != -np.inf:
if not newparam['logL']:
newparam['logL'] = \
self.model.evaluate_log_likelihood(newparam)
if newparam['logL'] > self.logLmin:
self.logLmax = max(self.logLmax, newparam['logL'])
oldparam = newparam.copy()
break
# Only here if proposed and then empty
# This returns the old point and allows for a training check
if not self.proposal.populated:
break
yield counter, oldparam
def insert_live_point(self, live_point):
"""
Insert a live point
"""
# This is the index including the current worst point, so final index
# is one less, otherwise index=0 would never be possible
index = np.searchsorted(self.live_points['logL'], live_point['logL'])
self.live_points[:index - 1] = self.live_points[1:index]
self.live_points[index - 1] = live_point
return index - 1
def consume_sample(self):
"""
Replace a sample for single thread
"""
worst = self.live_points[0].copy()
self.logLmin = worst['logL']
self.state.increment(worst['logL'])
self.nested_samples.append(worst)
self.condition = np.logaddexp(self.state.logZ,
self.logLmax
- self.iteration / float(self.nlive)) \
- self.state.logZ
# Replace the points we just consumed with the next acceptable ones
# Make sure we are mixing the chains
self.iteration += 1
self.block_iteration += 1
count = 0
while(True):
c, proposed = next(self.yield_sample(worst))
count += c
if proposed['logL'] > self.logLmin:
# Assuming point was proposed
# replace worst point with new one
index = self.insert_live_point(proposed)
self.insertion_indices.append(index)
self.accepted += 1
self.block_acceptance += 1 / count
self.acceptance_history.append(1 / count)
break
else:
# Only get here if the yield sample returns worse point
# which can only happen if the pool is empty
self.rejected += 1
self.check_state()
# if retrained whilst proposing a sample then update the
# iteration count since will be zero otherwise
if not self.block_iteration:
self.block_iteration += 1
self.mean_block_acceptance = self.block_acceptance \
/ self.block_iteration
if self.info_enabled:
logger.info(f"{self.iteration:5d}: n: {count:3d} "
f"b_acc: {self.mean_block_acceptance:.3f} "
f"H: {self.state.info[-1]:.2f} "
f"logL: {self.logLmin:.5f} --> {proposed['logL']:.5f} "
f"dZ: {self.condition:.3f} "
f"logZ: {self.state.logZ:.3f} "
f"+/- {np.sqrt(self.state.info[-1] / self.nlive):.3f} "
f"logLmax: {self.logLmax:.2f}")
def populate_live_points(self):
"""
Initialise the pool of live points.
"""
i = 0
live_points = np.empty(self.nlive,
dtype=get_dtype(self.model.names,
DEFAULT_FLOAT_DTYPE))
with tqdm(total=self.nlive, desc='Drawing live points') as pbar:
while i < self.nlive:
while i < self.nlive:
count, live_point = next(
self.yield_sample(self.model.new_point()))
if np.isnan(live_point['logL']):
logger.warning(
'Likelihood function returned NaN for '
f'live_point {live_point}'
)
logger.warning(
'You may want to check your likelihood function'
)
break
if (
np.isfinite(live_point['logP'])
and np.isfinite(live_point['logL'])
):
live_points[i] = live_point
i += 1
pbar.update()
break
self.live_points = np.sort(live_points, order='logL')
if self.store_live_points:
np.savetxt(self.live_points_dir + '/initial_live_points.dat',
self.live_points,
header='\t'.join(self.live_points.dtype.names))
def initialise(self, live_points=True):
"""
Initialise the nested sampler
Parameters
----------
live_points : bool, optional (True)
If true and there are no live points, new live points are
drawn using `populate_live_points` else all other initialisation
steps are complete but live points remain empty.
"""
flags = [False] * 3
if not self._flow_proposal.initialised:
self._flow_proposal.initialise()
flags[0] = True
if not self._uninformed_proposal.initialised:
self._uninformed_proposal.initialise()
flags[1] = True
if (
self.iteration < self.maximum_uninformed
and self.uninformed_sampling
):
self.proposal = self._uninformed_proposal
else:
self.proposal = self._flow_proposal
if live_points and self.live_points is None:
self.populate_live_points()
flags[2] = True
if self.condition > self.tolerance:
self.finalised = False
if all(flags):
self.initialised = True
def check_proposal_switch(self, force=False):
"""
Check if the proposal should be switch from uninformed to
flowproposal given the current state.
If the flow proposal is already in use, no changes are made.
Parameters
----------
force : bool, optional
If True proposal is forced to switch.
Returns
-------
bool
Flag to indicated if proposal was switched
"""
if (
(self.mean_acceptance < self.uninformed_acceptance_threshold)
or (self.iteration >= self.maximum_uninformed)
or force
):
if self.proposal is self._flow_proposal:
logger.warning('Already using flowproposal')
return True
logger.warning('Switching to FlowProposal')
self.proposal = self._flow_proposal
self.proposal.ns_acceptance = self.mean_block_acceptance
self.uninformed_sampling = False
return True
# If using uninformed sampling, don't check training
else:
return False
def check_training(self):
"""
Check if the normalising flow should be trained
Checks that can force training:
- Training was previously stopped before completion
- The pool is empty and the proposal was not in the process
of populating when stopped.
Checks that cannot force training is still on cooldown:
- Acceptance falls below threshold and `retrain_acceptance` is
true
- The number of iterations since last training is equal to the
training frequency
Returns
-------
train : bool
Try to train if true
force : bool
Force the training irrespective of cooldown
"""
if not self.completed_training:
logger.debug('Training flow (resume)')
return True, True
elif (not self.proposal.populated and
self.train_on_empty and
not self.proposal.populating):
logger.debug('Training flow (proposal empty)')
return True, True
elif (self.mean_block_acceptance < self.acceptance_threshold and
self.retrain_acceptance):
logger.debug('Training flow (acceptance)')
return True, False
elif (self.iteration - self.last_updated) == self.training_frequency:
logger.debug('Training flow (iteration)')
return True, False
else:
return False, False
def check_flow_model_reset(self):
"""
Check if the normalising flow model should be reset.
Checks acceptance if `reset_acceptance` is True and always checks
how many times the flow has been trained.
Flow will not be reset if it has not been trained. To force a reset
manually call `proposal.reset_model_weights`.
"""
if not self.proposal.training_count:
return
if (self.reset_acceptance
and self.mean_block_acceptance < self.acceptance_threshold):
self.proposal.reset_model_weights(weights=True, permutations=True)
return
self.proposal.reset_model_weights(
weights=(
self.reset_weights and
not (self.proposal.training_count % self.reset_weights)
),
permutations=(
self.reset_permutations and
not (self.proposal.training_count % self.reset_permutations)
),
)
def train_proposal(self, force=False):
"""
Try to train the proposal. Proposal will not train if cooldown is not
exceeded unless force is True.
Parameters
----------
force : bool
Override training checks
"""
if (self.iteration - self.last_updated < self.cooldown and not force):
logger.debug('Not training, still cooling down!')
else:
self.completed_training = False
self.check_flow_model_reset()
training_data = self.live_points.copy()
if self.memory and (len(self.nested_samples) >= self.memory):
training_data = np.concatenate([
training_data, self.nested_samples[-self.memory:].copy()])
st = datetime.datetime.now()
self.proposal.train(training_data)
self.training_time += (datetime.datetime.now() - st)
self.training_iterations.append(self.iteration)
self.block_iteration = 0
self.block_acceptance = 0.
self.completed_training = True
if self.checkpoint_on_training:
self.checkpoint(periodic=True)
def check_state(self, force=False):
"""
Check if state should be updated prior to drawing a new sample
Force will override the cooldown mechanism.
"""
if self.uninformed_sampling:
if self.check_proposal_switch():
force = True
else:
return
# General override
train = False
if force:
train = True
logger.debug('Training flow (force)')
elif not train:
train, force = self.check_training()
if train or force:
self.train_proposal(force=force)
def plot_state(self, filename=None):
"""
Produce plots with the current state of the nested sampling run.
Plots are saved to the output directory specified at initialisation.
Parameters
----------
filename : str, optional
If specified the figure will be saved, otherwise the figure is
returned.
"""
fig, ax = plt.subplots(6, 1, sharex=True, figsize=(12, 12))
ax = ax.ravel()
it = (np.arange(len(self.min_likelihood))) * (self.nlive // 10)
it[-1] = self.iteration
colours = ['#4575b4', '#d73027', '#fad117']
ls = ['-', '--', ':']
for t in self.training_iterations:
for a in ax:
a.axvline(t, ls='-', color='lightgrey')
if not self.train_on_empty:
for p in self.population_iterations:
for a in ax:
a.axvline(p, ls='-', color='tab:orange')
for i in self.checkpoint_iterations:
for a in ax:
a.axvline(i, ls=':', color='#66ccff')
for a in ax:
a.axvline(self.iteration, c='#ff9900', ls='-.')
ax[0].plot(it, self.min_likelihood, label='Min logL',
c=colours[0], ls=ls[0])
ax[0].plot(it, self.max_likelihood, label='Max logL',
c=colours[1], ls=ls[1])
ax[0].set_ylabel('logL')
ax[0].legend(frameon=False)
logX_its = np.arange(len(self.state.log_vols))
ax[1].plot(
logX_its, self.state.log_vols, ls=ls[0], c=colours[0],
label='log X'
)
ax[1].set_ylabel('Log X')
ax[1].legend(frameon=False)
if self.state.track_gradients:
ax_logX_grad = plt.twinx(ax[1])
# Use dotted linestyle (ls[2]) because dashed isn't clear
ax_logX_grad.plot(
logX_its,
rolling_mean(np.abs(self.state.gradients), self.nlive // 10),
c=colours[1],
ls=ls[2],
label='Gradient'
)
ax_logX_grad.set_ylabel(r'$|d\log L/d \log X|$')
ax_logX_grad.set_yscale('log')
handles, labels = ax[1].get_legend_handles_labels()
handles_tw, labels_tw = ax_logX_grad.get_legend_handles_labels()
ax[1].legend(
handles + handles_tw, labels + labels_tw, frameon=False
)
ax[2].plot(it, self.likelihood_evaluations, c=colours[0], ls=ls[0],
label='Evaluations')
ax[2].set_ylabel('logL evaluations')
ax[3].plot(it, self.logZ_history, label='logZ', c=colours[0], ls=ls[0])
ax[3].set_ylabel('logZ')
ax[3].legend(frameon=False)
ax_dz = plt.twinx(ax[3])
ax_dz.plot(it, self.dZ_history, label='dZ', c=colours[1], ls=ls[1])
ax_dz.set_ylabel('dZ')
handles, labels = ax[3].get_legend_handles_labels()
handles_dz, labels_dz = ax_dz.get_legend_handles_labels()
ax[3].legend(handles + handles_dz, labels + labels_dz, frameon=False)
ax[4].plot(it, self.mean_acceptance_history, c=colours[0],
label='Proposal')
ax[4].plot(self.population_iterations, self.population_acceptance,
c=colours[1], ls=ls[1], label='Population')
ax[4].set_ylabel('Acceptance')
ax[4].set_ylim((-0.1, 1.1))
handles, labels = ax[4].get_legend_handles_labels()
ax_r = plt.twinx(ax[4])
ax_r.plot(self.population_iterations, self.population_radii,
label='Radius', color=colours[2], ls=ls[2])
ax_r.set_ylabel('Population radius')
handles_r, labels_r = ax_r.get_legend_handles_labels()
ax[4].legend(handles + handles_r, labels + labels_r, frameon=False)
if len(self.rolling_p):
it = (np.arange(len(self.rolling_p)) + 1) * self.nlive
ax[5].plot(it, self.rolling_p, 'o', c=colours[0], label='p-value')
ax[5].set_ylabel('p-value')
ax[5].set_ylim([-0.1, 1.1])
ax[-1].set_xlabel('Iteration')
fig.suptitle(f'Sampling time: {self.current_sampling_time}',
fontsize=16)
handles = [
Line2D([0], [0], color='#ff9900', linestyle='-.',
label='Current iteration'),
Line2D([0], [0], color='lightgrey', linestyle='-',
markersize=10, markeredgewidth=1.5, label='Training'),
Line2D([0], [0], color='#66ccff', linestyle=':',
label='Checkpoint'),
]
fig.legend(
handles=handles, frameon=False, ncol=3, loc=(0.6, 0.0)
)
fig.tight_layout()
fig.subplots_adjust(top=0.95)
if filename is not None:
fig.savefig(filename)
plt.close(fig)
else:
return fig
def plot_trace(self, filename=None):
"""
Make trace plots for the nested samples.
Parameters
----------
filename : str, optional
If filename is None, the figure is returned. Else the figure
is saved with that file name.
"""
if self.nested_samples:
fig = plot_trace(self.state.log_vols[1:], self.nested_samples,
filename=filename)
return fig
else:
logger.warning('Could not produce trace plot. No nested samples!')
def plot_insertion_indices(self, filename=None, **kwargs):
"""
Make a plot of all the insertion indices.
Parameters
----------
filename : str, optional
If filename is None, the figure is returned. Else the figure
is saved with that file name.
kwargs :
Keyword arguments passed to `nessai.plot.plot_indices`.
"""
return plot_indices(
self.insertion_indices,
self.nlive,
filename=filename,
**kwargs
)
def update_state(self, force=False):
"""
Update state after replacing a live point
"""
# Check if acceptance is not None, this indicates the proposal
# was populated
if not self.proposal._checked_population:
self.population_acceptance.append(
self.proposal.population_acceptance)
self.population_radii.append(self.proposal.r)
self.population_iterations.append(self.iteration)
self.proposal._checked_population = True
if not (self.iteration % (self.nlive // 10)) or force:
self.likelihood_evaluations.append(
self.model.likelihood_evaluations)
self.min_likelihood.append(self.logLmin)
self.max_likelihood.append(self.logLmax)
self.logZ_history.append(self.state.logZ)
self.dZ_history.append(self.condition)
self.mean_acceptance_history.append(self.mean_acceptance)
if not (self.iteration % self.nlive) or force:
logger.warning(
f"it: {self.iteration:5d}: "
f"n eval: {self.likelihood_calls} "
f"H: {self.state.info[-1]:.2f} "
f"dZ: {self.condition:.3f} logZ: {self.state.logZ:.3f} "
f"+/- {np.sqrt(self.state.info[-1] / self.nlive):.3f} "
f"logLmax: {self.logLmax:.2f}")
if self.checkpointing:
self.checkpoint(periodic=True)
if not force:
self.check_insertion_indices()
if self.plot:
plot_indices(self.insertion_indices[-self.nlive:],
self.nlive,
plot_breakdown=False,
filename=(f'{self.output}/diagnostics/'
'insertion_indices_'
f'{self.iteration}.png'))
if self.plot:
self.plot_state(filename=f'{self.output}/state.png')
self.plot_trace(filename=f'{self.output}/trace.png')
if self.uninformed_sampling:
self.block_acceptance = 0.
self.block_iteration = 0
self.proposal.ns_acceptance = self.mean_block_acceptance
def checkpoint(self, periodic=False):
"""
Checkpoint the classes internal state
Parameters
----------
periodic : bool
Indicates if the checkpoint is regular periodic checkpointing
or forced by a signal. If forced by a signal, it will show up on
the state plot.
"""
if not periodic:
self.checkpoint_iterations += [self.iteration]
self.sampling_time += \
(datetime.datetime.now() - self.sampling_start_time)
logger.critical('Checkpointing nested sampling')
safe_file_dump(self, self.resume_file, pickle, save_existing=True)
self.sampling_start_time = datetime.datetime.now()
def check_resume(self):
"""
Check the normalising flow is correctly configured is the sampler
was resumed.
"""
if self.resumed:
if self.uninformed_sampling is False:
self.check_proposal_switch(force=True)
# If pool is populated reset the flag since it is set to
# false during initialisation
if hasattr(self._flow_proposal, 'resume_populated'):
if (self._flow_proposal.resume_populated and
self._flow_proposal.indices):
self._flow_proposal.populated = True
logger.info('Resumed with populated pool')
self.resumed = False
def finalise(self):
"""
Finalise things after sampling
"""
logger.info('Finalising')
for i, p in enumerate(self.live_points):
self.state.increment(p['logL'], nlive=self.nlive-i)
self.nested_samples.append(p)
# Refine evidence estimate
self.update_state(force=True)
self.state.finalise()
# output the chain and evidence
self.finalised = True
def nested_sampling_loop(self):
"""
Main nested sampling loop
"""
self.sampling_start_time = datetime.datetime.now()
if not self.initialised:
self.initialise(live_points=True)
if self.prior_sampling:
self.nested_samples = self.live_points.copy()
if self.close_pool:
self.model.close_pool()
return self.nested_samples
self.check_resume()
if self.iteration:
self.update_state()
logger.critical('Starting nested sampling loop')
while self.condition > self.tolerance:
self.check_state()
self.consume_sample()
self.update_state()
if self.iteration >= self.max_iteration:
break
# final adjustments
# avoid repeating final adjustments if resuming a completed run.
if not self.finalised and (self.condition <= self.tolerance):
self.finalise()
logger.critical(f'Final evidence: {self.state.logZ:.3f} +/- '
f'{np.sqrt(self.state.info[-1] / self.nlive):.3f}')
logger.critical('Information: {0:.2f}'.format(self.state.info[-1]))
self.check_insertion_indices(rolling=False)
# This includes updating the total sampling time
self.checkpoint(periodic=True)
if self.close_pool:
self.model.close_pool()
logger.info(f'Total sampling time: {self.sampling_time}')
logger.info(f'Total training time: {self.training_time}')
logger.info(f'Total population time: {self.proposal_population_time}')
logger.info(
f'Total likelihood evaluations: {self.likelihood_calls:3d}')
logger.info(
'Time spent evaluating likelihood: '
f'{self.likelihood_evaluation_time}'
)
return self.state.logZ, np.array(self.nested_samples)
@classmethod
def resume(cls, filename, model, flow_config={}, weights_file=None):
"""
Resumes the interrupted state from a checkpoint pickle file.
Parameters
----------
filename : str
Pickle pickle to resume from
model : :obj:`nessai.model.Model`
User-defined model
flow_config : dict, optional
Dictionary for configuring the flow
weights_file : str, optional
Weights files to use in place of the weights file stored in the
pickle file.
Returns
-------
obj
Instance of NestedSampler
"""
logger.critical('Resuming NestedSampler from ' + filename)
with open(filename, 'rb') as f:
obj = pickle.load(f)
model.likelihood_evaluations += obj.likelihood_evaluations[-1]
obj.model = model
obj._uninformed_proposal.resume(model)
obj._flow_proposal.resume(model, flow_config, weights_file)
obj.resumed = True
return obj
def __getstate__(self):
state = self.__dict__.copy()
del state['model']
return state
def __setstate__(self, state):
self.__dict__ = state
| 36.313301
| 79
| 0.583045
|
from collections import deque
import datetime
import logging
import os
import pickle
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import numpy as np
import seaborn as sns
import torch
from tqdm import tqdm
from .livepoint import get_dtype, DEFAULT_FLOAT_DTYPE
from .plot import plot_indices, plot_trace
from .evidence import _NSIntegralState
from .proposal import FlowProposal
from .utils import (
safe_file_dump,
compute_indices_ks_test,
rolling_mean,
)
sns.set()
sns.set_style('ticks')
logger = logging.getLogger(__name__)
class NestedSampler:
def __init__(
self,
model,
nlive=2000,
output=None,
stopping=0.1,
max_iteration=None,
checkpointing=True,
checkpoint_on_training=False,
resume_file=None,
seed=None,
pool=None,
close_pool=True,
n_pool=None,
plot=True,
proposal_plots=False,
prior_sampling=False,
analytic_priors=False,
maximum_uninformed=None,
uninformed_proposal=None,
uninformed_acceptance_threshold=None,
uninformed_proposal_kwargs=None,
flow_class=None,
flow_config=None,
training_frequency=None,
train_on_empty=True,
cooldown=200,
memory=False,
reset_weights=False,
reset_permutations=False,
retrain_acceptance=True,
reset_acceptance=False,
acceptance_threshold=0.01,
**kwargs
):
logger.info('Initialising nested sampler')
self.info_enabled = logger.isEnabledFor(logging.INFO)
model.verify_model()
self.model = model
self.model.configure_pool(pool=pool, n_pool=n_pool)
self.close_pool = close_pool
self.nlive = nlive
self.live_points = None
self.prior_sampling = prior_sampling
self.setup_random_seed(seed)
self.accepted = 0
self.rejected = 1
self.initialised = False
self.checkpointing = checkpointing
self.checkpoint_on_training = checkpoint_on_training
self.iteration = 0
self.acceptance_history = deque(maxlen=(nlive // 10))
self.mean_acceptance_history = []
self.block_acceptance = 1.
self.mean_block_acceptance = 1.
self.block_iteration = 0
self.retrain_acceptance = retrain_acceptance
self.reset_acceptance = reset_acceptance
self.insertion_indices = []
self.rolling_p = []
self.resumed = False
self.tolerance = stopping
self.condition = np.inf
self.logLmin = -np.inf
self.logLmax = -np.inf
self.nested_samples = []
self.logZ = None
self.state = _NSIntegralState(self.nlive, track_gradients=plot)
self.plot = plot
self.resume_file = self.setup_output(output, resume_file)
self.output = output
self.training_time = datetime.timedelta()
self.sampling_time = datetime.timedelta()
self.sampling_start_time = datetime.datetime.now()
self.completed_training = True
self.finalised = False
self.likelihood_evaluations = []
self.training_iterations = []
self.min_likelihood = []
self.max_likelihood = []
self.logZ_history = []
self.dZ_history = []
self.population_acceptance = []
self.population_radii = []
self.population_iterations = []
self.checkpoint_iterations = []
self.acceptance_threshold = acceptance_threshold
self.train_on_empty = train_on_empty
self.cooldown = cooldown
self.memory = memory
self.configure_max_iteration(max_iteration)
self.configure_flow_reset(reset_weights, reset_permutations)
self.configure_training_frequency(training_frequency)
if uninformed_proposal_kwargs is None:
uninformed_proposal_kwargs = {}
self.configure_uninformed_proposal(uninformed_proposal,
analytic_priors,
maximum_uninformed,
uninformed_acceptance_threshold,
**uninformed_proposal_kwargs)
self.configure_flow_proposal(flow_class, flow_config, proposal_plots,
**kwargs)
self.store_live_points = False
if self.store_live_points:
self.live_points_dir = f'{self.output}/live_points/'
os.makedirs(self.live_points_dir, exist_ok=True)
self.replacement_points = []
@property
def log_evidence(self):
return self.state.logZ
@property
def information(self):
return self.state.info[-1]
@property
def likelihood_calls(self):
return self.model.likelihood_evaluations
@property
def likelihood_evaluation_time(self):
return self.model.likelihood_evaluation_time
@property
def proposal_population_time(self):
t = self._uninformed_proposal.population_time
t += self._flow_proposal.population_time
return t
@property
def acceptance(self):
return self.iteration / self.likelihood_calls
@property
def current_sampling_time(self):
if self.finalised:
return self.sampling_time
else:
return self.sampling_time \
+ (datetime.datetime.now() - self.sampling_start_time)
@property
def last_updated(self):
if self.training_iterations:
return self.training_iterations[-1]
else:
return 0
@property
def mean_acceptance(self):
if self.acceptance_history:
return np.mean(self.acceptance_history)
else:
return np.nan
def configure_max_iteration(self, max_iteration):
if max_iteration is None:
self.max_iteration = np.inf
else:
self.max_iteration = max_iteration
def configure_training_frequency(self, training_frequency):
if training_frequency in [None, 'inf', 'None']:
logger.warning('Proposal will only train when empty')
self.training_frequency = np.inf
else:
self.training_frequency = training_frequency
def configure_uninformed_proposal(self,
uninformed_proposal,
analytic_priors,
maximum_uninformed,
uninformed_acceptance_threshold,
**kwargs):
if maximum_uninformed is None:
self.uninformed_sampling = True
self.maximum_uninformed = 2 * self.nlive
elif not maximum_uninformed:
self.uninformed_sampling = False
self.maximum_uninformed = 0
else:
self.uninformed_sampling = True
self.maximum_uninformed = float(maximum_uninformed)
if uninformed_acceptance_threshold is None:
if self.acceptance_threshold < 0.1:
self.uninformed_acceptance_threshold = \
10 * self.acceptance_threshold
else:
self.uninformed_acceptance_threshold = \
self.acceptance_threshold
else:
self.uninformed_acceptance_threshold = \
uninformed_acceptance_threshold
if uninformed_proposal is None:
if analytic_priors:
from .proposal import AnalyticProposal as uninformed_proposal
else:
from .proposal import RejectionProposal as uninformed_proposal
kwargs['poolsize'] = self.nlive
logger.debug(f'Using uninformed proposal: {uninformed_proposal}')
logger.debug(f'Parsing kwargs to uninformed proposal: {kwargs}')
self._uninformed_proposal = uninformed_proposal(
self.model, **kwargs
)
def configure_flow_proposal(self, flow_class, flow_config, proposal_plots,
**kwargs):
proposal_output = self.output + '/proposal/'
if not self.plot:
proposal_plots = False
if flow_class is not None:
if isinstance(flow_class, str):
flow_class = flow_class.lower()
if flow_class == 'gwflowproposal':
from .gw.proposal import GWFlowProposal as flow_class
elif flow_class == 'augmentedgwflowproposal':
from .gw.proposal import (
AugmentedGWFlowProposal as flow_class)
elif flow_class == 'legacygwflowproposal':
from .gw.legacy import LegacyGWFlowProposal as flow_class
elif flow_class == 'flowproposal':
flow_class = FlowProposal
elif flow_class == 'augmentedflowproposal':
from .proposal import AugmentedFlowProposal
flow_class = AugmentedFlowProposal
else:
raise ValueError(f'Unknown flow class: {flow_class}')
elif not issubclass(flow_class, FlowProposal):
raise RuntimeError('Flow class must be string or class that '
'inherits from FlowProposal')
else:
flow_class = FlowProposal
if kwargs.get('poolsize', None) is None:
kwargs['poolsize'] = self.nlive
logger.debug(f'Using flow class: {flow_class}')
logger.info(f'Parsing kwargs to FlowProposal: {kwargs}')
self._flow_proposal = flow_class(
self.model,
flow_config=flow_config,
output=proposal_output,
plot=proposal_plots,
**kwargs
)
def setup_output(self, output, resume_file=None):
if not os.path.exists(output):
os.makedirs(output, exist_ok=True)
if resume_file is None:
resume_file = os.path.join(output, "nested_sampler_resume.pkl")
else:
resume_file = os.path.join(output, resume_file)
if self.plot:
os.makedirs(output + '/diagnostics/', exist_ok=True)
return resume_file
def setup_random_seed(self, seed):
self.seed = seed
if self.seed is not None:
logger.debug(f'Setting random seed to {seed}')
np.random.seed(seed=self.seed)
torch.manual_seed(self.seed)
def configure_flow_reset(self, reset_weights, reset_permutations):
if isinstance(reset_weights, (int, float)):
self.reset_weights = float(reset_weights)
else:
raise TypeError(
'`reset_weights` must be a bool, int or float')
if isinstance(reset_permutations, (int, float)):
self.reset_permutations = float(reset_permutations)
else:
raise TypeError(
'`reset_permutations` must be a bool, int or float')
def check_insertion_indices(self, rolling=True, filename=None):
if rolling:
indices = self.insertion_indices[-self.nlive:]
else:
indices = self.insertion_indices
D, p = compute_indices_ks_test(indices, self.nlive)
if p is not None:
if rolling:
logger.warning(f'Rolling KS test: D={D:.4}, p-value={p:.4}')
self.rolling_p.append(p)
else:
logger.warning(f'Final KS test: D={D:.4}, p-value={p:.4}')
if filename is not None:
np.savetxt(os.path.join(self.output, filename),
self.insertion_indices, newline='\n', delimiter=' ')
def log_likelihood(self, x):
return self.model.log_likelihood(x)
def yield_sample(self, oldparam):
while True:
counter = 0
while True:
counter += 1
newparam = self.proposal.draw(oldparam.copy())
if newparam['logP'] != -np.inf:
if not newparam['logL']:
newparam['logL'] = \
self.model.evaluate_log_likelihood(newparam)
if newparam['logL'] > self.logLmin:
self.logLmax = max(self.logLmax, newparam['logL'])
oldparam = newparam.copy()
break
if not self.proposal.populated:
break
yield counter, oldparam
def insert_live_point(self, live_point):
index = np.searchsorted(self.live_points['logL'], live_point['logL'])
self.live_points[:index - 1] = self.live_points[1:index]
self.live_points[index - 1] = live_point
return index - 1
def consume_sample(self):
worst = self.live_points[0].copy()
self.logLmin = worst['logL']
self.state.increment(worst['logL'])
self.nested_samples.append(worst)
self.condition = np.logaddexp(self.state.logZ,
self.logLmax
- self.iteration / float(self.nlive)) \
- self.state.logZ
self.iteration += 1
self.block_iteration += 1
count = 0
while(True):
c, proposed = next(self.yield_sample(worst))
count += c
if proposed['logL'] > self.logLmin:
index = self.insert_live_point(proposed)
self.insertion_indices.append(index)
self.accepted += 1
self.block_acceptance += 1 / count
self.acceptance_history.append(1 / count)
break
else:
self.rejected += 1
self.check_state()
if not self.block_iteration:
self.block_iteration += 1
self.mean_block_acceptance = self.block_acceptance \
/ self.block_iteration
if self.info_enabled:
logger.info(f"{self.iteration:5d}: n: {count:3d} "
f"b_acc: {self.mean_block_acceptance:.3f} "
f"H: {self.state.info[-1]:.2f} "
f"logL: {self.logLmin:.5f} --> {proposed['logL']:.5f} "
f"dZ: {self.condition:.3f} "
f"logZ: {self.state.logZ:.3f} "
f"+/- {np.sqrt(self.state.info[-1] / self.nlive):.3f} "
f"logLmax: {self.logLmax:.2f}")
def populate_live_points(self):
i = 0
live_points = np.empty(self.nlive,
dtype=get_dtype(self.model.names,
DEFAULT_FLOAT_DTYPE))
with tqdm(total=self.nlive, desc='Drawing live points') as pbar:
while i < self.nlive:
while i < self.nlive:
count, live_point = next(
self.yield_sample(self.model.new_point()))
if np.isnan(live_point['logL']):
logger.warning(
'Likelihood function returned NaN for '
f'live_point {live_point}'
)
logger.warning(
'You may want to check your likelihood function'
)
break
if (
np.isfinite(live_point['logP'])
and np.isfinite(live_point['logL'])
):
live_points[i] = live_point
i += 1
pbar.update()
break
self.live_points = np.sort(live_points, order='logL')
if self.store_live_points:
np.savetxt(self.live_points_dir + '/initial_live_points.dat',
self.live_points,
header='\t'.join(self.live_points.dtype.names))
def initialise(self, live_points=True):
flags = [False] * 3
if not self._flow_proposal.initialised:
self._flow_proposal.initialise()
flags[0] = True
if not self._uninformed_proposal.initialised:
self._uninformed_proposal.initialise()
flags[1] = True
if (
self.iteration < self.maximum_uninformed
and self.uninformed_sampling
):
self.proposal = self._uninformed_proposal
else:
self.proposal = self._flow_proposal
if live_points and self.live_points is None:
self.populate_live_points()
flags[2] = True
if self.condition > self.tolerance:
self.finalised = False
if all(flags):
self.initialised = True
def check_proposal_switch(self, force=False):
if (
(self.mean_acceptance < self.uninformed_acceptance_threshold)
or (self.iteration >= self.maximum_uninformed)
or force
):
if self.proposal is self._flow_proposal:
logger.warning('Already using flowproposal')
return True
logger.warning('Switching to FlowProposal')
self.proposal = self._flow_proposal
self.proposal.ns_acceptance = self.mean_block_acceptance
self.uninformed_sampling = False
return True
else:
return False
def check_training(self):
if not self.completed_training:
logger.debug('Training flow (resume)')
return True, True
elif (not self.proposal.populated and
self.train_on_empty and
not self.proposal.populating):
logger.debug('Training flow (proposal empty)')
return True, True
elif (self.mean_block_acceptance < self.acceptance_threshold and
self.retrain_acceptance):
logger.debug('Training flow (acceptance)')
return True, False
elif (self.iteration - self.last_updated) == self.training_frequency:
logger.debug('Training flow (iteration)')
return True, False
else:
return False, False
def check_flow_model_reset(self):
if not self.proposal.training_count:
return
if (self.reset_acceptance
and self.mean_block_acceptance < self.acceptance_threshold):
self.proposal.reset_model_weights(weights=True, permutations=True)
return
self.proposal.reset_model_weights(
weights=(
self.reset_weights and
not (self.proposal.training_count % self.reset_weights)
),
permutations=(
self.reset_permutations and
not (self.proposal.training_count % self.reset_permutations)
),
)
def train_proposal(self, force=False):
if (self.iteration - self.last_updated < self.cooldown and not force):
logger.debug('Not training, still cooling down!')
else:
self.completed_training = False
self.check_flow_model_reset()
training_data = self.live_points.copy()
if self.memory and (len(self.nested_samples) >= self.memory):
training_data = np.concatenate([
training_data, self.nested_samples[-self.memory:].copy()])
st = datetime.datetime.now()
self.proposal.train(training_data)
self.training_time += (datetime.datetime.now() - st)
self.training_iterations.append(self.iteration)
self.block_iteration = 0
self.block_acceptance = 0.
self.completed_training = True
if self.checkpoint_on_training:
self.checkpoint(periodic=True)
def check_state(self, force=False):
if self.uninformed_sampling:
if self.check_proposal_switch():
force = True
else:
return
# General override
train = False
if force:
train = True
logger.debug('Training flow (force)')
elif not train:
train, force = self.check_training()
if train or force:
self.train_proposal(force=force)
def plot_state(self, filename=None):
fig, ax = plt.subplots(6, 1, sharex=True, figsize=(12, 12))
ax = ax.ravel()
it = (np.arange(len(self.min_likelihood))) * (self.nlive // 10)
it[-1] = self.iteration
colours = ['']
for t in self.training_iterations:
for a in ax:
a.axvline(t, ls='-', color='lightgrey')
if not self.train_on_empty:
for p in self.population_iterations:
for a in ax:
a.axvline(p, ls='-', color='tab:orange')
for i in self.checkpoint_iterations:
for a in ax:
a.axvline(i, ls=':', color='
for a in ax:
a.axvline(self.iteration, c='
ax[0].plot(it, self.min_likelihood, label='Min logL',
c=colours[0], ls=ls[0])
ax[0].plot(it, self.max_likelihood, label='Max logL',
c=colours[1], ls=ls[1])
ax[0].set_ylabel('logL')
ax[0].legend(frameon=False)
logX_its = np.arange(len(self.state.log_vols))
ax[1].plot(
logX_its, self.state.log_vols, ls=ls[0], c=colours[0],
label='log X'
)
ax[1].set_ylabel('Log X')
ax[1].legend(frameon=False)
if self.state.track_gradients:
ax_logX_grad = plt.twinx(ax[1])
# Use dotted linestyle (ls[2]) because dashed isn't clear
ax_logX_grad.plot(
logX_its,
rolling_mean(np.abs(self.state.gradients), self.nlive // 10),
c=colours[1],
ls=ls[2],
label='Gradient'
)
ax_logX_grad.set_ylabel(r'$|d\log L/d \log X|$')
ax_logX_grad.set_yscale('log')
handles, labels = ax[1].get_legend_handles_labels()
handles_tw, labels_tw = ax_logX_grad.get_legend_handles_labels()
ax[1].legend(
handles + handles_tw, labels + labels_tw, frameon=False
)
ax[2].plot(it, self.likelihood_evaluations, c=colours[0], ls=ls[0],
label='Evaluations')
ax[2].set_ylabel('logL evaluations')
ax[3].plot(it, self.logZ_history, label='logZ', c=colours[0], ls=ls[0])
ax[3].set_ylabel('logZ')
ax[3].legend(frameon=False)
ax_dz = plt.twinx(ax[3])
ax_dz.plot(it, self.dZ_history, label='dZ', c=colours[1], ls=ls[1])
ax_dz.set_ylabel('dZ')
handles, labels = ax[3].get_legend_handles_labels()
handles_dz, labels_dz = ax_dz.get_legend_handles_labels()
ax[3].legend(handles + handles_dz, labels + labels_dz, frameon=False)
ax[4].plot(it, self.mean_acceptance_history, c=colours[0],
label='Proposal')
ax[4].plot(self.population_iterations, self.population_acceptance,
c=colours[1], ls=ls[1], label='Population')
ax[4].set_ylabel('Acceptance')
ax[4].set_ylim((-0.1, 1.1))
handles, labels = ax[4].get_legend_handles_labels()
ax_r = plt.twinx(ax[4])
ax_r.plot(self.population_iterations, self.population_radii,
label='Radius', color=colours[2], ls=ls[2])
ax_r.set_ylabel('Population radius')
handles_r, labels_r = ax_r.get_legend_handles_labels()
ax[4].legend(handles + handles_r, labels + labels_r, frameon=False)
if len(self.rolling_p):
it = (np.arange(len(self.rolling_p)) + 1) * self.nlive
ax[5].plot(it, self.rolling_p, 'o', c=colours[0], label='p-value')
ax[5].set_ylabel('p-value')
ax[5].set_ylim([-0.1, 1.1])
ax[-1].set_xlabel('Iteration')
fig.suptitle(f'Sampling time: {self.current_sampling_time}',
fontsize=16)
handles = [
Line2D([0], [0], color='#ff9900', linestyle='-.',
label='Current iteration'),
Line2D([0], [0], color='lightgrey', linestyle='-',
markersize=10, markeredgewidth=1.5, label='Training'),
Line2D([0], [0], color='#66ccff', linestyle=':',
label='Checkpoint'),
]
fig.legend(
handles=handles, frameon=False, ncol=3, loc=(0.6, 0.0)
)
fig.tight_layout()
fig.subplots_adjust(top=0.95)
if filename is not None:
fig.savefig(filename)
plt.close(fig)
else:
return fig
def plot_trace(self, filename=None):
if self.nested_samples:
fig = plot_trace(self.state.log_vols[1:], self.nested_samples,
filename=filename)
return fig
else:
logger.warning('Could not produce trace plot. No nested samples!')
def plot_insertion_indices(self, filename=None, **kwargs):
return plot_indices(
self.insertion_indices,
self.nlive,
filename=filename,
**kwargs
)
def update_state(self, force=False):
if not self.proposal._checked_population:
self.population_acceptance.append(
self.proposal.population_acceptance)
self.population_radii.append(self.proposal.r)
self.population_iterations.append(self.iteration)
self.proposal._checked_population = True
if not (self.iteration % (self.nlive // 10)) or force:
self.likelihood_evaluations.append(
self.model.likelihood_evaluations)
self.min_likelihood.append(self.logLmin)
self.max_likelihood.append(self.logLmax)
self.logZ_history.append(self.state.logZ)
self.dZ_history.append(self.condition)
self.mean_acceptance_history.append(self.mean_acceptance)
if not (self.iteration % self.nlive) or force:
logger.warning(
f"it: {self.iteration:5d}: "
f"n eval: {self.likelihood_calls} "
f"H: {self.state.info[-1]:.2f} "
f"dZ: {self.condition:.3f} logZ: {self.state.logZ:.3f} "
f"+/- {np.sqrt(self.state.info[-1] / self.nlive):.3f} "
f"logLmax: {self.logLmax:.2f}")
if self.checkpointing:
self.checkpoint(periodic=True)
if not force:
self.check_insertion_indices()
if self.plot:
plot_indices(self.insertion_indices[-self.nlive:],
self.nlive,
plot_breakdown=False,
filename=(f'{self.output}/diagnostics/'
'insertion_indices_'
f'{self.iteration}.png'))
if self.plot:
self.plot_state(filename=f'{self.output}/state.png')
self.plot_trace(filename=f'{self.output}/trace.png')
if self.uninformed_sampling:
self.block_acceptance = 0.
self.block_iteration = 0
self.proposal.ns_acceptance = self.mean_block_acceptance
def checkpoint(self, periodic=False):
if not periodic:
self.checkpoint_iterations += [self.iteration]
self.sampling_time += \
(datetime.datetime.now() - self.sampling_start_time)
logger.critical('Checkpointing nested sampling')
safe_file_dump(self, self.resume_file, pickle, save_existing=True)
self.sampling_start_time = datetime.datetime.now()
def check_resume(self):
if self.resumed:
if self.uninformed_sampling is False:
self.check_proposal_switch(force=True)
if hasattr(self._flow_proposal, 'resume_populated'):
if (self._flow_proposal.resume_populated and
self._flow_proposal.indices):
self._flow_proposal.populated = True
logger.info('Resumed with populated pool')
self.resumed = False
def finalise(self):
logger.info('Finalising')
for i, p in enumerate(self.live_points):
self.state.increment(p['logL'], nlive=self.nlive-i)
self.nested_samples.append(p)
self.update_state(force=True)
self.state.finalise()
self.finalised = True
def nested_sampling_loop(self):
self.sampling_start_time = datetime.datetime.now()
if not self.initialised:
self.initialise(live_points=True)
if self.prior_sampling:
self.nested_samples = self.live_points.copy()
if self.close_pool:
self.model.close_pool()
return self.nested_samples
self.check_resume()
if self.iteration:
self.update_state()
logger.critical('Starting nested sampling loop')
while self.condition > self.tolerance:
self.check_state()
self.consume_sample()
self.update_state()
if self.iteration >= self.max_iteration:
break
if not self.finalised and (self.condition <= self.tolerance):
self.finalise()
logger.critical(f'Final evidence: {self.state.logZ:.3f} +/- '
f'{np.sqrt(self.state.info[-1] / self.nlive):.3f}')
logger.critical('Information: {0:.2f}'.format(self.state.info[-1]))
self.check_insertion_indices(rolling=False)
self.checkpoint(periodic=True)
if self.close_pool:
self.model.close_pool()
logger.info(f'Total sampling time: {self.sampling_time}')
logger.info(f'Total training time: {self.training_time}')
logger.info(f'Total population time: {self.proposal_population_time}')
logger.info(
f'Total likelihood evaluations: {self.likelihood_calls:3d}')
logger.info(
'Time spent evaluating likelihood: '
f'{self.likelihood_evaluation_time}'
)
return self.state.logZ, np.array(self.nested_samples)
@classmethod
def resume(cls, filename, model, flow_config={}, weights_file=None):
logger.critical('Resuming NestedSampler from ' + filename)
with open(filename, 'rb') as f:
obj = pickle.load(f)
model.likelihood_evaluations += obj.likelihood_evaluations[-1]
obj.model = model
obj._uninformed_proposal.resume(model)
obj._flow_proposal.resume(model, flow_config, weights_file)
obj.resumed = True
return obj
def __getstate__(self):
state = self.__dict__.copy()
del state['model']
return state
def __setstate__(self, state):
self.__dict__ = state
| true
| true
|
1c47785da9d34f0b1c8a9845b5a3002f171b51df
| 8,815
|
py
|
Python
|
src/sensor_placement.py
|
tolgadur/Sensor-Placement
|
ad33477d1fb14052e1a9e58d149d0b8e767ea318
|
[
"MIT"
] | 3
|
2020-05-10T20:37:50.000Z
|
2022-03-31T08:25:23.000Z
|
src/sensor_placement.py
|
tolgadur/Sensor-Placement
|
ad33477d1fb14052e1a9e58d149d0b8e767ea318
|
[
"MIT"
] | null | null | null |
src/sensor_placement.py
|
tolgadur/Sensor-Placement
|
ad33477d1fb14052e1a9e58d149d0b8e767ea318
|
[
"MIT"
] | 2
|
2021-02-26T10:15:24.000Z
|
2021-06-07T11:11:08.000Z
|
#!/usr/bin/python
import numpy as np
import heapq
import pandas as pd
""" FILE NAME: 'sensor_placement.py'
DESCRIPTION: This file is implementing the class that will be used for sensor
positioning according to solution proposed by Krause, Singh and Guestrin (2008).
"""
class SensorPlacement:
@staticmethod
def isMonotonic(cov, k, V, S, U):
""" This method checks if values in the dataset are monotonic or not. For
datasets > 2000 observations, non-monotonicity might lead to suboptimal
results.
Input:
- cov: covariance matrix
- k: number of Sensors to be placed
- V: indices of all position
- S: indices of all possible sensor positions
- U: indices of all impossible sensor positions
"""
A = np.array([])
for j in range(k):
S_A = np.setdiff1d(S, A).astype(int)
for y in S_A:
AHat = np.setdiff1d(V, np.append(A, [y]))
condition = SensorPlacement.__conditionalEntropy(cov, y, A) - SensorPlacement.__conditionalEntropy(cov, y, AHat)
if condition < 0:
print(condition)
return False
return True
@staticmethod
def __conditionalVariance(cov, y, A):
""" This method calculates the conditional variance of y given A. """
var = cov[y, y] - (cov[np.ix_([y], A)] @ np.linalg.inv(cov[np.ix_(A, A)]) @ cov[np.ix_(A, [y])])
# var = np.absolute(cov[y, y] - (cov[np.ix_([y], A)] @ np.linalg.inv(cov[np.ix_(A, A)]) @ cov[np.ix_(A, [y])]))
return var[0][0]
@staticmethod
def __conditionalEntropy(cov, y, A):
""" This method calculates the conditional entropy of y given A. """
conditionalVariance = SensorPlacement.__conditionalVariance(cov, y, A)
return 0.5 * np.log(2*np.pi*conditionalVariance)
@staticmethod
def __localConditionalEntropy(cov, y, A, epsilon):
""" This method calculates the conditional entropy of y given A for
all values where cov[y, A] > epsilon. """
A_ = SensorPlacement.__localSet(cov, y, A, epsilon)
return SensorPlacement.__conditionalEntropy(cov, y, A_)
@staticmethod
def __localConditionalVariance(cov, y, A, epsilon):
""" This method calculates the conditional variance of y given A for
all values where cov[y, A] > epsilon. """
A_ = SensorPlacement.__localSet(cov, y, A, epsilon)
return SensorPlacement.__conditionalVariance(cov, y, A_)
@staticmethod
def __localSet(cov, y, A, epsilon):
""" This method returns the set of points X in S for which K(y*, x) > epsilon.
Input:
- cov: covariance matrix
- S_i: array with all indices of i
- epsilon: hyperparameter
"""
return [x for x in A if cov[y, x] > epsilon]
@staticmethod
def naiveSensorPlacement(cov, k, V, S, U, A, subdomain=None, output=None):
""" This is an implementation of the first approximation method suggested in
the 'Near-Optimal Sensor Placement' paper.
Input:
- cov: covariance matrix
- k: number of Sensors to be placed
- V: indices of all position
- S: indices of all possible sensor positions
- U: indices of all impossible sensor positions
"""
print('Algorithm is starting for subdomain', subdomain, flush=True)
A = A
for j in range(k):
S_A = np.setdiff1d(S, A).astype(int)
delta = np.array([])
for y in S_A:
AHat = np.setdiff1d(V, np.append(A, [y]))
delta = np.append(delta, SensorPlacement.__conditionalVariance(cov, y, A) / \
SensorPlacement.__conditionalVariance(cov, y, AHat))
y_star = S_A[np.argmax(delta)]
A = np.append(A, y_star).astype(int)
print('subdomain ', subdomain, ': ', A, flush=True)
if subdomain != None:
output.put((subdomain, 2*A))
return 2*A
@staticmethod
def lazySensorPlacement(cov, k, V, S, U, A, subdomain=None, output=None):
""" This is an implementation of the second approximation method suggested in
the 'Near-Optimal Sensor Placement' paper. It uses a priority queue in order
to reduce the time complexity from O(k*n^4) to O(k*n^3).
Input:
- cov: covariance matrix
- k: number of Sensors to be placed
- V: indices of all position
- S: indices of all possible sensor positions
- U: indices of all impossible sensor positions
"""
print('Algorithm is starting for subdomain', subdomain, flush=True)
A = A
delta = -1 * np.inf * np.ones((len(S), 1))
heap = [(delta[i], S[i], -1) for i in range(len(delta))]
heapq.heapify(heap)
for j in range(k):
while True:
delta_star, y_star, current = heapq.heappop(heap)
if current == j:
break
AHat = np.setdiff1d(V, np.append(A, [y_star]))
criterion = SensorPlacement.__conditionalVariance(cov, y_star, A) / \
SensorPlacement.__conditionalVariance(cov, y_star, AHat)
heapq.heappush(heap, (-1 * criterion, y_star, j))
A = np.append(A, y_star).astype(int)
print('subdomain ', subdomain, ': ', 2*A, flush=True)
if subdomain != None:
output.put((subdomain, 2*A))
return 2*A
@staticmethod
def localKernelPlacement(cov, k, V, S, U, A, subdomain=None, output=None):
""" This is an implementation of the third approximation method suggested in
the 'Near-Optimal Sensor Placement' paper. It only considers local kernels
in order to reduce the time complexity O(k*n).
Input:
- cov: covariance matrix
- k: number of Sensors to be placed
- V: indices of all position
- S: indices of all possible sensor positions
- U: indices of all impossible sensor positions
"""
print('Algorithm is starting for subdomain', subdomain, flush=True)
A = A
epsilon = 1e-10
delta = np.array([]); N = S
for y in S:
V_y = np.setdiff1d(V, y).astype(int)
delta = np.append(delta, cov[y, y] / SensorPlacement.__localConditionalVariance(cov, y, V_y, epsilon))
for j in range(k):
y_star = N[np.argmax(delta)]
A = np.append(A, y_star).astype(int)
print('subdomain ', subdomain, ': ', A, flush=True)
N = SensorPlacement.__localSet(cov, y_star, S, epsilon)
N = np.setdiff1d(S, A).astype(int)
delta = np.array([])
for y in N:
AHat = np.setdiff1d(V, np.append(A, [y]))
delta = np.append(delta, SensorPlacement.__localConditionalVariance(cov, y, A, epsilon) / \
SensorPlacement.__localConditionalVariance(cov, y, AHat, epsilon))
if subdomain != None:
output.put((subdomain, 2*A))
return 2*A
@staticmethod
def lazyLocalKernelPlacement(cov, k, V, S, U, A, subdomain=None, output=None):
""" This is a mix between the lazySensorPlacement method and the localKernelPlacement
method.
Input:
- cov: covariance matrix
- k: number of Sensors to be placed
- V: indices of all position
- S: indices of all possible sensor positions
- U: indices of all impossible sensor positions
"""
print('Algorithm is starting for subdomain', subdomain, flush=True)
A = A
epsilon = 1e-10
delta = -1 * np.inf * np.ones((len(S), 1))
heap = [(delta[i], S[i], -1) for i in range(len(delta))]
heapq.heapify(heap)
for j in range(k):
while True:
delta_star, y_star, current = heapq.heappop(heap)
if current == j:
break
AHat = np.setdiff1d(V, np.append(A, [y_star]))
criterion = SensorPlacement.__localConditionalVariance(cov, y_star, A, epsilon) / \
SensorPlacement.__localConditionalVariance(cov, y_star, AHat, epsilon)
heapq.heappush(heap, (-1 * criterion, y_star, j))
A = np.append(A, y_star).astype(int)
print('subdomain ', subdomain, ': ', A, flush=True)
if subdomain != None:
output.put((subdomain, 2*A))
return 2*A
| 42.584541
| 128
| 0.569484
|
import numpy as np
import heapq
import pandas as pd
class SensorPlacement:
@staticmethod
def isMonotonic(cov, k, V, S, U):
A = np.array([])
for j in range(k):
S_A = np.setdiff1d(S, A).astype(int)
for y in S_A:
AHat = np.setdiff1d(V, np.append(A, [y]))
condition = SensorPlacement.__conditionalEntropy(cov, y, A) - SensorPlacement.__conditionalEntropy(cov, y, AHat)
if condition < 0:
print(condition)
return False
return True
@staticmethod
def __conditionalVariance(cov, y, A):
var = cov[y, y] - (cov[np.ix_([y], A)] @ np.linalg.inv(cov[np.ix_(A, A)]) @ cov[np.ix_(A, [y])])
return var[0][0]
@staticmethod
def __conditionalEntropy(cov, y, A):
conditionalVariance = SensorPlacement.__conditionalVariance(cov, y, A)
return 0.5 * np.log(2*np.pi*conditionalVariance)
@staticmethod
def __localConditionalEntropy(cov, y, A, epsilon):
A_ = SensorPlacement.__localSet(cov, y, A, epsilon)
return SensorPlacement.__conditionalEntropy(cov, y, A_)
@staticmethod
def __localConditionalVariance(cov, y, A, epsilon):
A_ = SensorPlacement.__localSet(cov, y, A, epsilon)
return SensorPlacement.__conditionalVariance(cov, y, A_)
@staticmethod
def __localSet(cov, y, A, epsilon):
return [x for x in A if cov[y, x] > epsilon]
@staticmethod
def naiveSensorPlacement(cov, k, V, S, U, A, subdomain=None, output=None):
print('Algorithm is starting for subdomain', subdomain, flush=True)
A = A
for j in range(k):
S_A = np.setdiff1d(S, A).astype(int)
delta = np.array([])
for y in S_A:
AHat = np.setdiff1d(V, np.append(A, [y]))
delta = np.append(delta, SensorPlacement.__conditionalVariance(cov, y, A) / \
SensorPlacement.__conditionalVariance(cov, y, AHat))
y_star = S_A[np.argmax(delta)]
A = np.append(A, y_star).astype(int)
print('subdomain ', subdomain, ': ', A, flush=True)
if subdomain != None:
output.put((subdomain, 2*A))
return 2*A
@staticmethod
def lazySensorPlacement(cov, k, V, S, U, A, subdomain=None, output=None):
print('Algorithm is starting for subdomain', subdomain, flush=True)
A = A
delta = -1 * np.inf * np.ones((len(S), 1))
heap = [(delta[i], S[i], -1) for i in range(len(delta))]
heapq.heapify(heap)
for j in range(k):
while True:
delta_star, y_star, current = heapq.heappop(heap)
if current == j:
break
AHat = np.setdiff1d(V, np.append(A, [y_star]))
criterion = SensorPlacement.__conditionalVariance(cov, y_star, A) / \
SensorPlacement.__conditionalVariance(cov, y_star, AHat)
heapq.heappush(heap, (-1 * criterion, y_star, j))
A = np.append(A, y_star).astype(int)
print('subdomain ', subdomain, ': ', 2*A, flush=True)
if subdomain != None:
output.put((subdomain, 2*A))
return 2*A
@staticmethod
def localKernelPlacement(cov, k, V, S, U, A, subdomain=None, output=None):
print('Algorithm is starting for subdomain', subdomain, flush=True)
A = A
epsilon = 1e-10
delta = np.array([]); N = S
for y in S:
V_y = np.setdiff1d(V, y).astype(int)
delta = np.append(delta, cov[y, y] / SensorPlacement.__localConditionalVariance(cov, y, V_y, epsilon))
for j in range(k):
y_star = N[np.argmax(delta)]
A = np.append(A, y_star).astype(int)
print('subdomain ', subdomain, ': ', A, flush=True)
N = SensorPlacement.__localSet(cov, y_star, S, epsilon)
N = np.setdiff1d(S, A).astype(int)
delta = np.array([])
for y in N:
AHat = np.setdiff1d(V, np.append(A, [y]))
delta = np.append(delta, SensorPlacement.__localConditionalVariance(cov, y, A, epsilon) / \
SensorPlacement.__localConditionalVariance(cov, y, AHat, epsilon))
if subdomain != None:
output.put((subdomain, 2*A))
return 2*A
@staticmethod
def lazyLocalKernelPlacement(cov, k, V, S, U, A, subdomain=None, output=None):
print('Algorithm is starting for subdomain', subdomain, flush=True)
A = A
epsilon = 1e-10
delta = -1 * np.inf * np.ones((len(S), 1))
heap = [(delta[i], S[i], -1) for i in range(len(delta))]
heapq.heapify(heap)
for j in range(k):
while True:
delta_star, y_star, current = heapq.heappop(heap)
if current == j:
break
AHat = np.setdiff1d(V, np.append(A, [y_star]))
criterion = SensorPlacement.__localConditionalVariance(cov, y_star, A, epsilon) / \
SensorPlacement.__localConditionalVariance(cov, y_star, AHat, epsilon)
heapq.heappush(heap, (-1 * criterion, y_star, j))
A = np.append(A, y_star).astype(int)
print('subdomain ', subdomain, ': ', A, flush=True)
if subdomain != None:
output.put((subdomain, 2*A))
return 2*A
| true
| true
|
1c4778cd6ee4e3e7a884ff4789b58f8fe5d8053a
| 1,178
|
py
|
Python
|
prime_numbers_test.py
|
mkiterian/prime-numbers
|
be8b3b1250ec8351964c2ef93f8d5e6463efcc7b
|
[
"MIT"
] | null | null | null |
prime_numbers_test.py
|
mkiterian/prime-numbers
|
be8b3b1250ec8351964c2ef93f8d5e6463efcc7b
|
[
"MIT"
] | null | null | null |
prime_numbers_test.py
|
mkiterian/prime-numbers
|
be8b3b1250ec8351964c2ef93f8d5e6463efcc7b
|
[
"MIT"
] | null | null | null |
import unittest
from prime_numbers import generate_prime_numbers
class PrimeNumberTest(unittest.TestCase):
def test_n_is_an_integer(self):
#tests if n is an integer
with self.assertRaises(TypeError, msg='n is not an integer'):
generate_prime_numbers('number')
def test_if_number_is_a_positive_integer(self):
#test if number is a positive integer
self.assertEqual(generate_prime_numbers(-10), 'N should be a positive integer', msg='Number Should be a positive integer')
def test_if_returned_value_is_a_list(self):
#check if number return is a list
self.assertIsInstance(generate_prime_numbers(10), list)
def test_if_number_of_returned_numbers_is_correct(self):
#test if list returned has correct number
actual = len(generate_prime_numbers(11))
expected = 5
self.assertEqual(actual, expected, msg='Number of returned items is not as expected')
def test_generates_correct_prime_numbers(self):
#tests if function returns correct values given n is an integer
actual = generate_prime_numbers(10)
expected = [2,3,5,7]
self.assertEqual(actual, expected, msg='Expected [2,3,5,7] when n is 10')
if __name__ == '__main__':
unittest.main()
| 33.657143
| 124
| 0.77674
|
import unittest
from prime_numbers import generate_prime_numbers
class PrimeNumberTest(unittest.TestCase):
def test_n_is_an_integer(self):
with self.assertRaises(TypeError, msg='n is not an integer'):
generate_prime_numbers('number')
def test_if_number_is_a_positive_integer(self):
self.assertEqual(generate_prime_numbers(-10), 'N should be a positive integer', msg='Number Should be a positive integer')
def test_if_returned_value_is_a_list(self):
self.assertIsInstance(generate_prime_numbers(10), list)
def test_if_number_of_returned_numbers_is_correct(self):
actual = len(generate_prime_numbers(11))
expected = 5
self.assertEqual(actual, expected, msg='Number of returned items is not as expected')
def test_generates_correct_prime_numbers(self):
actual = generate_prime_numbers(10)
expected = [2,3,5,7]
self.assertEqual(actual, expected, msg='Expected [2,3,5,7] when n is 10')
if __name__ == '__main__':
unittest.main()
| true
| true
|
1c4779a4e3f7663805d73bbf5c2232d96cc76f28
| 1,621
|
py
|
Python
|
axley/cogs/misc.py
|
1olipop/Axley
|
9ace6706be58c2a8e066a0dbcdcc337b34cc5da7
|
[
"Apache-2.0"
] | 18
|
2021-05-08T10:28:34.000Z
|
2021-12-30T16:44:19.000Z
|
axley/cogs/misc.py
|
vedrecide/Axley
|
9ace6706be58c2a8e066a0dbcdcc337b34cc5da7
|
[
"Apache-2.0"
] | 1
|
2021-07-05T13:07:20.000Z
|
2021-07-05T13:07:20.000Z
|
axley/cogs/misc.py
|
1olipop/Axley
|
9ace6706be58c2a8e066a0dbcdcc337b34cc5da7
|
[
"Apache-2.0"
] | 6
|
2021-06-01T15:31:10.000Z
|
2021-07-21T17:17:36.000Z
|
import discord
import psutil
import os
from discord.ext import commands
class Misc(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.process = psutil.Process(os.getpid())
@commands.command(name="Ping", description="Ping of the bot")
@commands.guild_only()
async def ping(self, ctx: commands.Context):
await ctx.message.reply(
"**Pong!** `{}ms`".format(round(self.bot.latency * 1000)),
mention_author=False,
)
@commands.command(name="Source", description="Source code of Axley <3")
@commands.guild_only()
async def source(self, ctx: commands.Context):
embed = discord.Embed(
color=0xD9E6D1, description=f"[Click Me!]({self.bot.github_repo})"
)
embed.set_footer(text="Kindly go through the LICENSE file in the repository before blindy checking and copying the codes")
await ctx.message.reply(embed=embed, mention_author=False)
@commands.command(
name="Stats",
aliases=["Botstats", "Botinfo"],
description="You can check bot statistics using this command",
)
@commands.guild_only()
async def stats(self, ctx: commands.Context):
ram_usage = self.process.memory_full_info().rss / 1024 ** 2
embed = discord.Embed(
color=0xD9E6D1,
description="> **RAM:** {:.2f} MB\n> **Commands:** {}\n".format(
ram_usage, len([a.name for a in self.bot.commands])
),
)
await ctx.message.reply(embed=embed, mention_author=False)
def setup(bot):
bot.add_cog(Misc(bot))
| 31.173077
| 130
| 0.623689
|
import discord
import psutil
import os
from discord.ext import commands
class Misc(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.process = psutil.Process(os.getpid())
@commands.command(name="Ping", description="Ping of the bot")
@commands.guild_only()
async def ping(self, ctx: commands.Context):
await ctx.message.reply(
"**Pong!** `{}ms`".format(round(self.bot.latency * 1000)),
mention_author=False,
)
@commands.command(name="Source", description="Source code of Axley <3")
@commands.guild_only()
async def source(self, ctx: commands.Context):
embed = discord.Embed(
color=0xD9E6D1, description=f"[Click Me!]({self.bot.github_repo})"
)
embed.set_footer(text="Kindly go through the LICENSE file in the repository before blindy checking and copying the codes")
await ctx.message.reply(embed=embed, mention_author=False)
@commands.command(
name="Stats",
aliases=["Botstats", "Botinfo"],
description="You can check bot statistics using this command",
)
@commands.guild_only()
async def stats(self, ctx: commands.Context):
ram_usage = self.process.memory_full_info().rss / 1024 ** 2
embed = discord.Embed(
color=0xD9E6D1,
description="> **RAM:** {:.2f} MB\n> **Commands:** {}\n".format(
ram_usage, len([a.name for a in self.bot.commands])
),
)
await ctx.message.reply(embed=embed, mention_author=False)
def setup(bot):
bot.add_cog(Misc(bot))
| true
| true
|
1c477bb7d2693680a90d4f6220d45872d11fc4b0
| 1,663
|
py
|
Python
|
vm.py
|
Ccode-lang/CHex
|
f8138da241a8b96fae5691de7a9d789a9dbcbeb2
|
[
"MIT"
] | 1
|
2022-01-31T18:36:36.000Z
|
2022-01-31T18:36:36.000Z
|
vm.py
|
Ccode-lang/CHex
|
f8138da241a8b96fae5691de7a9d789a9dbcbeb2
|
[
"MIT"
] | null | null | null |
vm.py
|
Ccode-lang/CHex
|
f8138da241a8b96fae5691de7a9d789a9dbcbeb2
|
[
"MIT"
] | null | null | null |
import os
import sys
import codecs
try:
file = open(sys.argv[1], "rb")
except:
print("File does not exist or is not given.")
sys.exit()
bytecode = file.read()
file.close()
bytecode = list(bytecode)
hexcode = []
for dec in bytecode:
hexcode += [hex(dec)]
# print(hexcode)
# magic number check
if hexcode[0] == "0x68" and hexcode[1] == "0x69":
pass
else:
print("Not a CHex bianary file.")
# set offset to 2 because of magic number
offset = 2
# init mem
memory = {}
while True:
try:
hex = hexcode[offset]
except:
sys.exit()
# blank hex
if hex == "0x0":
offset += 1
# print ascii from memory
elif hex == "0x1":
# print(memory[int(hexcode[offset + 1][2:], 16)][2:])
hexval = memory[int(hexcode[offset + 1][2:], 16)][2:]
if not len(hexval) == 2:
hexval = "0" + hexval
print(str(codecs.decode(hexval, "hex"), "utf-8"), end="")
offset += 2
# same as asm jmp
elif hex == "0x2":
offset = int(hexcode[offset + 1], 16)
# store value in mem
elif hex == "0x3":
memory[int(hexcode[offset + 1], 16)] = hexcode[offset + 2]
offset += 3
# jump to hex stored in memory
elif hex == "0x4":
offset = int(memory[int(hexcode[offset + 1], 16)], 16)
# check if values in memory are equal and jump if so
elif hex == "0x5":
if int(memory[int(hexcode[offset + 1], 16)], 16) == int(memory[int(hexcode[offset + 2], 16)], 16):
offset = int(hexcode[offset + 3], 16)
else:
offset += 4
else:
print("Unknown hex at offset: " + str(offset))
sys.exit()
| 27.262295
| 106
| 0.556825
|
import os
import sys
import codecs
try:
file = open(sys.argv[1], "rb")
except:
print("File does not exist or is not given.")
sys.exit()
bytecode = file.read()
file.close()
bytecode = list(bytecode)
hexcode = []
for dec in bytecode:
hexcode += [hex(dec)]
if hexcode[0] == "0x68" and hexcode[1] == "0x69":
pass
else:
print("Not a CHex bianary file.")
offset = 2
memory = {}
while True:
try:
hex = hexcode[offset]
except:
sys.exit()
if hex == "0x0":
offset += 1
elif hex == "0x1":
hexval = memory[int(hexcode[offset + 1][2:], 16)][2:]
if not len(hexval) == 2:
hexval = "0" + hexval
print(str(codecs.decode(hexval, "hex"), "utf-8"), end="")
offset += 2
elif hex == "0x2":
offset = int(hexcode[offset + 1], 16)
elif hex == "0x3":
memory[int(hexcode[offset + 1], 16)] = hexcode[offset + 2]
offset += 3
elif hex == "0x4":
offset = int(memory[int(hexcode[offset + 1], 16)], 16)
elif hex == "0x5":
if int(memory[int(hexcode[offset + 1], 16)], 16) == int(memory[int(hexcode[offset + 2], 16)], 16):
offset = int(hexcode[offset + 3], 16)
else:
offset += 4
else:
print("Unknown hex at offset: " + str(offset))
sys.exit()
| true
| true
|
1c477bc4296ae17f76dbbd9dad1779671e3a34ae
| 9,426
|
py
|
Python
|
_backend_api/migrations/0022_initial.py
|
Amechi101/indieapp
|
606c1346f65c343eb2cc8f7fba9d555b8c30a7fa
|
[
"MIT"
] | null | null | null |
_backend_api/migrations/0022_initial.py
|
Amechi101/indieapp
|
606c1346f65c343eb2cc8f7fba9d555b8c30a7fa
|
[
"MIT"
] | null | null | null |
_backend_api/migrations/0022_initial.py
|
Amechi101/indieapp
|
606c1346f65c343eb2cc8f7fba9d555b8c30a7fa
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Brand'
db.create_table(u'_backend_api_brand', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('brand_name', self.gf('django.db.models.fields.CharField')(max_length=255, unique=True, null=True, blank=True)),
('brand_founded', self.gf('django.db.models.fields.IntegerField')(max_length=4, null=True)),
('brand_origin_city', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('brand_origin_state', self.gf('django.db.models.fields.CharField')(max_length=2, null=True, blank=True)),
('brand_about_description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('brand_collection_description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=255, unique=True, null=True, blank=True)),
('brand_logo', self.gf('cloudinary.models.CloudinaryField')(max_length=100, null=True, blank=True)),
('brand_feature_image', self.gf('cloudinary.models.CloudinaryField')(max_length=100, null=True, blank=True)),
('brand_about_image', self.gf('cloudinary.models.CloudinaryField')(max_length=100, null=True, blank=True)),
('brand_collection_image', self.gf('cloudinary.models.CloudinaryField')(max_length=100, null=True, blank=True)),
('brand_connect_image', self.gf('cloudinary.models.CloudinaryField')(max_length=100, null=True, blank=True)),
('brand_website_url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('brand_email', self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True)),
('brand_state', self.gf('django.db.models.fields.BooleanField')(default=False)),
('brand_location_state', self.gf('django.db.models.fields.BooleanField')(default=False)),
('brand_email_state', self.gf('django.db.models.fields.BooleanField')(default=False)),
('brand_website_state', self.gf('django.db.models.fields.BooleanField')(default=False)),
('menswear', self.gf('django.db.models.fields.BooleanField')(default=False)),
('womenswear', self.gf('django.db.models.fields.BooleanField')(default=False)),
('date_added', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('last_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
))
db.send_create_signal(u'_backend_api', ['Brand'])
# Adding model 'Product'
db.create_table(u'_backend_api_product', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('product_name', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('product_price', self.gf('django.db.models.fields.DecimalField')(default='0.0', max_digits=30, decimal_places=2)),
('product_image', self.gf('cloudinary.models.CloudinaryField')(max_length=255, null=True, blank=True)),
('date_added', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('last_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
('brand', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['_backend_api.Brand'], null=True)),
))
db.send_create_signal(u'_backend_api', ['Product'])
# Adding model 'Location'
db.create_table(u'_backend_api_location', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('brand_address', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('brand_city', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('brand_state', self.gf('django.db.models.fields.CharField')(max_length=2, null=True, blank=True)),
('brand', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['_backend_api.Brand'], null=True)),
))
db.send_create_signal(u'_backend_api', ['Location'])
def backwards(self, orm):
# Deleting model 'Brand'
db.delete_table(u'_backend_api_brand')
# Deleting model 'Product'
db.delete_table(u'_backend_api_product')
# Deleting model 'Location'
db.delete_table(u'_backend_api_location')
models = {
u'_backend_api.brand': {
'Meta': {'object_name': 'Brand'},
'brand_about_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'brand_about_image': ('cloudinary.models.CloudinaryField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'brand_collection_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'brand_collection_image': ('cloudinary.models.CloudinaryField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'brand_connect_image': ('cloudinary.models.CloudinaryField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'brand_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'brand_email_state': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'brand_feature_image': ('cloudinary.models.CloudinaryField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'brand_founded': ('django.db.models.fields.IntegerField', [], {'max_length': '4', 'null': 'True'}),
'brand_location_state': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'brand_logo': ('cloudinary.models.CloudinaryField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'brand_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'brand_origin_city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'brand_origin_state': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'brand_state': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'brand_website_state': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'brand_website_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'menswear': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'womenswear': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'_backend_api.location': {
'Meta': {'object_name': 'Location'},
'brand': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['_backend_api.Brand']", 'null': 'True'}),
'brand_address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'brand_city': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'brand_state': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'_backend_api.product': {
'Meta': {'object_name': 'Product'},
'brand': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['_backend_api.Brand']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'product_image': ('cloudinary.models.CloudinaryField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'product_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'product_price': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '30', 'decimal_places': '2'})
}
}
complete_apps = ['_backend_api']
| 78.55
| 142
| 0.619032
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.create_table(u'_backend_api_brand', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('brand_name', self.gf('django.db.models.fields.CharField')(max_length=255, unique=True, null=True, blank=True)),
('brand_founded', self.gf('django.db.models.fields.IntegerField')(max_length=4, null=True)),
('brand_origin_city', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('brand_origin_state', self.gf('django.db.models.fields.CharField')(max_length=2, null=True, blank=True)),
('brand_about_description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('brand_collection_description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=255, unique=True, null=True, blank=True)),
('brand_logo', self.gf('cloudinary.models.CloudinaryField')(max_length=100, null=True, blank=True)),
('brand_feature_image', self.gf('cloudinary.models.CloudinaryField')(max_length=100, null=True, blank=True)),
('brand_about_image', self.gf('cloudinary.models.CloudinaryField')(max_length=100, null=True, blank=True)),
('brand_collection_image', self.gf('cloudinary.models.CloudinaryField')(max_length=100, null=True, blank=True)),
('brand_connect_image', self.gf('cloudinary.models.CloudinaryField')(max_length=100, null=True, blank=True)),
('brand_website_url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('brand_email', self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True)),
('brand_state', self.gf('django.db.models.fields.BooleanField')(default=False)),
('brand_location_state', self.gf('django.db.models.fields.BooleanField')(default=False)),
('brand_email_state', self.gf('django.db.models.fields.BooleanField')(default=False)),
('brand_website_state', self.gf('django.db.models.fields.BooleanField')(default=False)),
('menswear', self.gf('django.db.models.fields.BooleanField')(default=False)),
('womenswear', self.gf('django.db.models.fields.BooleanField')(default=False)),
('date_added', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('last_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
))
db.send_create_signal(u'_backend_api', ['Brand'])
db.create_table(u'_backend_api_product', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('product_name', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('product_price', self.gf('django.db.models.fields.DecimalField')(default='0.0', max_digits=30, decimal_places=2)),
('product_image', self.gf('cloudinary.models.CloudinaryField')(max_length=255, null=True, blank=True)),
('date_added', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('last_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
('brand', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['_backend_api.Brand'], null=True)),
))
db.send_create_signal(u'_backend_api', ['Product'])
db.create_table(u'_backend_api_location', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('brand_address', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('brand_city', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('brand_state', self.gf('django.db.models.fields.CharField')(max_length=2, null=True, blank=True)),
('brand', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['_backend_api.Brand'], null=True)),
))
db.send_create_signal(u'_backend_api', ['Location'])
def backwards(self, orm):
db.delete_table(u'_backend_api_brand')
db.delete_table(u'_backend_api_product')
db.delete_table(u'_backend_api_location')
models = {
u'_backend_api.brand': {
'Meta': {'object_name': 'Brand'},
'brand_about_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'brand_about_image': ('cloudinary.models.CloudinaryField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'brand_collection_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'brand_collection_image': ('cloudinary.models.CloudinaryField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'brand_connect_image': ('cloudinary.models.CloudinaryField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'brand_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'brand_email_state': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'brand_feature_image': ('cloudinary.models.CloudinaryField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'brand_founded': ('django.db.models.fields.IntegerField', [], {'max_length': '4', 'null': 'True'}),
'brand_location_state': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'brand_logo': ('cloudinary.models.CloudinaryField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'brand_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'brand_origin_city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'brand_origin_state': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'brand_state': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'brand_website_state': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'brand_website_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'menswear': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'womenswear': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'_backend_api.location': {
'Meta': {'object_name': 'Location'},
'brand': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['_backend_api.Brand']", 'null': 'True'}),
'brand_address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'brand_city': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'brand_state': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'_backend_api.product': {
'Meta': {'object_name': 'Product'},
'brand': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['_backend_api.Brand']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'product_image': ('cloudinary.models.CloudinaryField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'product_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'product_price': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '30', 'decimal_places': '2'})
}
}
complete_apps = ['_backend_api']
| true
| true
|
1c477c54727b29435a21a6019d3960076fc447e1
| 4,706
|
py
|
Python
|
nibabel/minc2.py
|
tobon/nibabel
|
ff2b5457207bb5fd6097b08f7f11123dc660fda7
|
[
"BSD-3-Clause"
] | null | null | null |
nibabel/minc2.py
|
tobon/nibabel
|
ff2b5457207bb5fd6097b08f7f11123dc660fda7
|
[
"BSD-3-Clause"
] | null | null | null |
nibabel/minc2.py
|
tobon/nibabel
|
ff2b5457207bb5fd6097b08f7f11123dc660fda7
|
[
"BSD-3-Clause"
] | null | null | null |
# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the NiBabel package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
""" Preliminary MINC2 support
Use with care; I haven't tested this against a wide range of MINC files.
If you have a file that isn't read correctly, please send an example.
Test reading with something like::
import nibabel as nib
img = nib.load('my_funny.mnc')
data = img.get_data()
print(data.mean())
print(data.max())
print(data.min())
and compare against command line output of::
mincstats my_funny.mnc
"""
import numpy as np
from .optpkg import optional_package
h5py, have_h5py, setup_module = optional_package('h5py')
from .minc1 import Minc1File, Minc1Image, MincError
class Hdf5Bunch(object):
""" Make object for accessing attributes of variable
"""
def __init__(self, var):
for name, value in var.attrs.items():
setattr(self, name, value)
class Minc2File(Minc1File):
''' Class to wrap MINC2 format file
Although it has some of the same methods as a ``Header``, we use
this only when reading a MINC2 file, to pull out useful header
information, and for the method of reading the data out
'''
def __init__(self, mincfile):
self._mincfile = mincfile
minc_part = mincfile['minc-2.0']
# The whole image is the first of the entries in 'image'
image = minc_part['image']['0']
self._image = image['image']
self._dim_names = self._get_dimensions(self._image)
dimensions = minc_part['dimensions']
self._dims = [Hdf5Bunch(dimensions[s]) for s in self._dim_names]
# We don't currently support irregular spacing
# http://en.wikibooks.org/wiki/MINC/Reference/MINC2.0_File_Format_Reference#Dimension_variable_attributes
for dim in self._dims:
if dim.spacing != b'regular__':
raise ValueError('Irregular spacing not supported')
self._spatial_dims = [name for name in self._dim_names
if name.endswith('space')]
self._image_max = image['image-max']
self._image_min = image['image-min']
def _get_dimensions(self, var):
# Dimensions for a particular variable
# Differs for MINC1 and MINC2 - see:
# http://en.wikibooks.org/wiki/MINC/Reference/MINC2.0_File_Format_Reference#Associating_HDF5_dataspaces_with_MINC_dimensions
return var.attrs['dimorder'].split(',')
def get_data_dtype(self):
return self._image.dtype
def get_data_shape(self):
return self._image.shape
def _get_valid_range(self):
''' Return valid range for image data
The valid range can come from the image 'valid_range' or
failing that, from the data type range
'''
ddt = self.get_data_dtype()
info = np.iinfo(ddt.type)
try:
valid_range = self._image.attrs['valid_range']
except AttributeError:
valid_range = [info.min, info.max]
else:
if valid_range[0] < info.min or valid_range[1] > info.max:
raise ValueError('Valid range outside input '
'data type range')
return np.asarray(valid_range, dtype=np.float)
def get_scaled_data(self):
data = np.asarray(self._image)
return self._normalize(data)
class Minc2Image(Minc1Image):
''' Class for MINC2 images
The MINC2 image class uses the default header type, rather than a
specific MINC header type - and reads the relevant information from
the MINC file on load.
'''
# MINC2 does not do compressed whole files
_compressed_exts = ()
@classmethod
def from_file_map(klass, file_map):
holder = file_map['image']
if holder.filename is None:
raise MincError('MINC2 needs filename for load')
minc_file = Minc2File(h5py.File(holder.filename, 'r'))
affine = minc_file.get_affine()
if affine.shape != (4, 4):
raise MincError('Image does not have 3 spatial dimensions')
data_dtype = minc_file.get_data_dtype()
shape = minc_file.get_data_shape()
zooms = minc_file.get_zooms()
header = klass.header_class(data_dtype, shape, zooms)
data = klass.ImageArrayProxy(minc_file)
return klass(data, affine, header, extra=None, file_map=file_map)
load = Minc2Image.load
| 35.383459
| 132
| 0.632172
| true
| true
|
|
1c477c95b5afb69f25ef37ab384ae3c2d5026cb5
| 4,980
|
py
|
Python
|
tests/integrationtest/api/test_guards.py
|
RasmusGodske/eo-platform-utils
|
4d7c5bdc102d1eb7a5edff096f2783dbdbaa283d
|
[
"Apache-2.0"
] | null | null | null |
tests/integrationtest/api/test_guards.py
|
RasmusGodske/eo-platform-utils
|
4d7c5bdc102d1eb7a5edff096f2783dbdbaa283d
|
[
"Apache-2.0"
] | null | null | null |
tests/integrationtest/api/test_guards.py
|
RasmusGodske/eo-platform-utils
|
4d7c5bdc102d1eb7a5edff096f2783dbdbaa283d
|
[
"Apache-2.0"
] | null | null | null |
from typing import List
from uuid import uuid4
import pytest
from flask.testing import FlaskClient
from origin.api import (
Application,
EndpointGuard,
TokenGuard,
ScopedGuard,
)
from .endpoints import EmptyEndpoint
class TestGuards:
"""
TODO
"""
@pytest.mark.parametrize('guard', [
TokenGuard(),
ScopedGuard('scope1'),
])
def test__provide_no_token__should_return_status_401(
self,
guard: EndpointGuard,
app: Application,
client: FlaskClient,
):
"""
TODO
"""
# -- Arrange ---------------------------------------------------------
app.add_endpoint(
method='POST',
path='/something',
endpoint=EmptyEndpoint(),
guards=[guard],
)
# -- Act -------------------------------------------------------------
r = client.post('/something')
# -- Assert ----------------------------------------------------------
assert r.status_code == 401
@pytest.mark.parametrize('guard', [
TokenGuard(),
ScopedGuard('scope1'),
])
def test__provide_invalid_token__should_return_status_401(
self,
guard: EndpointGuard,
app: Application,
client: FlaskClient,
):
"""
TODO
"""
# -- Arrange ---------------------------------------------------------
app.add_endpoint(
method='POST',
path='/something',
endpoint=EmptyEndpoint(),
guards=[guard],
)
# -- Act -------------------------------------------------------------
r = client.post(
path='/something',
headers={'Authorization': 'Bearer: NOT-A-VALID-TOKEN'},
)
# -- Assert ----------------------------------------------------------
assert r.status_code == 401
@pytest.mark.parametrize('guard', [
TokenGuard(),
ScopedGuard('scope1'),
])
def test__provide_valid_token__should_return_status_200(
self,
guard: EndpointGuard,
app: Application,
client: FlaskClient,
valid_token_encoded: str,
):
"""
TODO
"""
# -- Arrange ---------------------------------------------------------
app.add_endpoint(
method='POST',
path='/something',
endpoint=EmptyEndpoint(),
guards=[guard],
)
# -- Act -------------------------------------------------------------
r = client.post(
path='/something',
headers={'Authorization': f'Bearer: {valid_token_encoded}'},
)
# -- Assert ----------------------------------------------------------
assert r.status_code == 200
def test__token_missing_required_scope__should_return_status_401(
self,
app: Application,
client: FlaskClient,
valid_token_encoded: str,
):
"""
TODO
"""
# -- Arrange ---------------------------------------------------------
required_scope = str(uuid4()) # Something random
app.add_endpoint(
method='POST',
path='/something',
endpoint=EmptyEndpoint(),
guards=[ScopedGuard(required_scope)],
)
# -- Act -------------------------------------------------------------
r = client.post(
path='/something',
headers={'Authorization': f'Bearer: {valid_token_encoded}'},
)
# -- Assert ----------------------------------------------------------
assert r.status_code == 401
@pytest.mark.parametrize('guards', [
[ScopedGuard('scope1')],
[ScopedGuard('scope2')],
[ScopedGuard('scope1', 'scope2')],
[TokenGuard(), ScopedGuard('scope1')],
[TokenGuard(), ScopedGuard('scope1', 'scope2')],
[TokenGuard(), ScopedGuard('scope1'), ScopedGuard('scope2')],
])
def test__token_has_required_scope__should_return_status_200(
self,
guards: List[EndpointGuard],
app: Application,
client: FlaskClient,
valid_token_encoded: str,
):
"""
TODO
"""
# -- Arrange ---------------------------------------------------------
app.add_endpoint(
method='POST',
path='/something',
endpoint=EmptyEndpoint(),
guards=guards,
)
# -- Act -------------------------------------------------------------
r = client.post(
path='/something',
headers={'Authorization': f'Bearer: {valid_token_encoded}'},
)
# -- Assert ----------------------------------------------------------
assert r.status_code == 200
| 25.9375
| 78
| 0.41245
|
from typing import List
from uuid import uuid4
import pytest
from flask.testing import FlaskClient
from origin.api import (
Application,
EndpointGuard,
TokenGuard,
ScopedGuard,
)
from .endpoints import EmptyEndpoint
class TestGuards:
@pytest.mark.parametrize('guard', [
TokenGuard(),
ScopedGuard('scope1'),
])
def test__provide_no_token__should_return_status_401(
self,
guard: EndpointGuard,
app: Application,
client: FlaskClient,
):
app.add_endpoint(
method='POST',
path='/something',
endpoint=EmptyEndpoint(),
guards=[guard],
)
r = client.post('/something')
assert r.status_code == 401
@pytest.mark.parametrize('guard', [
TokenGuard(),
ScopedGuard('scope1'),
])
def test__provide_invalid_token__should_return_status_401(
self,
guard: EndpointGuard,
app: Application,
client: FlaskClient,
):
app.add_endpoint(
method='POST',
path='/something',
endpoint=EmptyEndpoint(),
guards=[guard],
)
r = client.post(
path='/something',
headers={'Authorization': 'Bearer: NOT-A-VALID-TOKEN'},
)
assert r.status_code == 401
@pytest.mark.parametrize('guard', [
TokenGuard(),
ScopedGuard('scope1'),
])
def test__provide_valid_token__should_return_status_200(
self,
guard: EndpointGuard,
app: Application,
client: FlaskClient,
valid_token_encoded: str,
):
app.add_endpoint(
method='POST',
path='/something',
endpoint=EmptyEndpoint(),
guards=[guard],
)
r = client.post(
path='/something',
headers={'Authorization': f'Bearer: {valid_token_encoded}'},
)
assert r.status_code == 200
def test__token_missing_required_scope__should_return_status_401(
self,
app: Application,
client: FlaskClient,
valid_token_encoded: str,
):
required_scope = str(uuid4())
app.add_endpoint(
method='POST',
path='/something',
endpoint=EmptyEndpoint(),
guards=[ScopedGuard(required_scope)],
)
r = client.post(
path='/something',
headers={'Authorization': f'Bearer: {valid_token_encoded}'},
)
assert r.status_code == 401
@pytest.mark.parametrize('guards', [
[ScopedGuard('scope1')],
[ScopedGuard('scope2')],
[ScopedGuard('scope1', 'scope2')],
[TokenGuard(), ScopedGuard('scope1')],
[TokenGuard(), ScopedGuard('scope1', 'scope2')],
[TokenGuard(), ScopedGuard('scope1'), ScopedGuard('scope2')],
])
def test__token_has_required_scope__should_return_status_200(
self,
guards: List[EndpointGuard],
app: Application,
client: FlaskClient,
valid_token_encoded: str,
):
app.add_endpoint(
method='POST',
path='/something',
endpoint=EmptyEndpoint(),
guards=guards,
)
r = client.post(
path='/something',
headers={'Authorization': f'Bearer: {valid_token_encoded}'},
)
assert r.status_code == 200
| true
| true
|
1c477d2e6f7e2a1431cc5681d3d4bbd7036d06ed
| 191
|
py
|
Python
|
alphapept/__init__.py
|
enryH/alphapept
|
a4a1155b820f3567e21a872e0883e653661efe2b
|
[
"Apache-2.0"
] | null | null | null |
alphapept/__init__.py
|
enryH/alphapept
|
a4a1155b820f3567e21a872e0883e653661efe2b
|
[
"Apache-2.0"
] | null | null | null |
alphapept/__init__.py
|
enryH/alphapept
|
a4a1155b820f3567e21a872e0883e653661efe2b
|
[
"Apache-2.0"
] | null | null | null |
__version__ = "0.4.0"
__requirements__ = {
"": "requirements/requirements.txt",
"develop": "requirements/requirements_develop.txt",
"gui": "requirements/requirements_gui.txt",
}
| 23.875
| 55
| 0.696335
|
__version__ = "0.4.0"
__requirements__ = {
"": "requirements/requirements.txt",
"develop": "requirements/requirements_develop.txt",
"gui": "requirements/requirements_gui.txt",
}
| true
| true
|
1c477dc103178022d9d4cec538afb84e72df6950
| 167
|
py
|
Python
|
django_chatserver/chat/routing.py
|
zhiqiyu/Random-Web
|
10b89776fbcdaa012e1f42a49a050d1b397b73a2
|
[
"MIT"
] | null | null | null |
django_chatserver/chat/routing.py
|
zhiqiyu/Random-Web
|
10b89776fbcdaa012e1f42a49a050d1b397b73a2
|
[
"MIT"
] | null | null | null |
django_chatserver/chat/routing.py
|
zhiqiyu/Random-Web
|
10b89776fbcdaa012e1f42a49a050d1b397b73a2
|
[
"MIT"
] | null | null | null |
from django.urls import re_path
from . import consumers
websocket_urlpatterns = [
re_path(r'ws/chat/(?P<room_name>\w+)/$', consumers.ChatConsumer.as_asgi()),
]
| 18.555556
| 79
| 0.718563
|
from django.urls import re_path
from . import consumers
websocket_urlpatterns = [
re_path(r'ws/chat/(?P<room_name>\w+)/$', consumers.ChatConsumer.as_asgi()),
]
| true
| true
|
1c477e610890926de828f933fc42e26ec8d369e3
| 83
|
py
|
Python
|
MachineLearningToolkitCore/Loss/__init__.py
|
showintime/MachineLearningToolkit
|
cb265f8b0d3ca5aa16ad92cdbe74e138b5f56023
|
[
"Apache-2.0"
] | null | null | null |
MachineLearningToolkitCore/Loss/__init__.py
|
showintime/MachineLearningToolkit
|
cb265f8b0d3ca5aa16ad92cdbe74e138b5f56023
|
[
"Apache-2.0"
] | null | null | null |
MachineLearningToolkitCore/Loss/__init__.py
|
showintime/MachineLearningToolkit
|
cb265f8b0d3ca5aa16ad92cdbe74e138b5f56023
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 2 22:43:31 2019
@author: ZWH
"""
| 10.375
| 35
| 0.542169
| true
| true
|
|
1c477f84e4323ce0c780a036d579746b5abba31d
| 522
|
py
|
Python
|
dynabuffers-python/tests/usecase/Schema03Test.py
|
leftshiftone/dynabuffers
|
c3e94c56989be3df87b50b8d9e17d1ea86199ede
|
[
"Apache-2.0"
] | 2
|
2019-10-28T12:28:01.000Z
|
2020-07-07T12:25:40.000Z
|
dynabuffers-python/tests/usecase/Schema03Test.py
|
leftshiftone/dynabuffers
|
c3e94c56989be3df87b50b8d9e17d1ea86199ede
|
[
"Apache-2.0"
] | 1
|
2021-12-21T07:35:22.000Z
|
2021-12-21T07:35:22.000Z
|
dynabuffers-python/tests/usecase/Schema03Test.py
|
leftshiftone/dynabuffers
|
c3e94c56989be3df87b50b8d9e17d1ea86199ede
|
[
"Apache-2.0"
] | 1
|
2020-03-19T09:19:43.000Z
|
2020-03-19T09:19:43.000Z
|
import os
import unittest
from antlr4 import FileStream
from dynabuffers.Dynabuffers import Dynabuffers
class Schema03Test(unittest.TestCase):
root_dir = os.path.dirname(os.path.realpath(__file__))
def test_parse(self):
engine = Dynabuffers.parse(FileStream(self.root_dir + "/schema03.dbs"))
map = engine.deserialize(engine.serialize({"results": [{"text":"hello world"}]}))
self.assertEqual(map, {"results": [{"text":"hello world"}]})
if __name__ == "__main__":
unittest.main()
| 24.857143
| 89
| 0.697318
|
import os
import unittest
from antlr4 import FileStream
from dynabuffers.Dynabuffers import Dynabuffers
class Schema03Test(unittest.TestCase):
root_dir = os.path.dirname(os.path.realpath(__file__))
def test_parse(self):
engine = Dynabuffers.parse(FileStream(self.root_dir + "/schema03.dbs"))
map = engine.deserialize(engine.serialize({"results": [{"text":"hello world"}]}))
self.assertEqual(map, {"results": [{"text":"hello world"}]})
if __name__ == "__main__":
unittest.main()
| true
| true
|
1c47802de2045227fcff56755ff71d4d4d7c6eba
| 150
|
py
|
Python
|
hubspot/cms/performance/api/__init__.py
|
fakepop/hubspot-api-python
|
f04103a09f93f5c26c99991b25fa76801074f3d3
|
[
"Apache-2.0"
] | 1
|
2020-11-12T08:46:32.000Z
|
2020-11-12T08:46:32.000Z
|
hubspot/cms/performance/api/__init__.py
|
fakepop/hubspot-api-python
|
f04103a09f93f5c26c99991b25fa76801074f3d3
|
[
"Apache-2.0"
] | null | null | null |
hubspot/cms/performance/api/__init__.py
|
fakepop/hubspot-api-python
|
f04103a09f93f5c26c99991b25fa76801074f3d3
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from hubspot.cms.performance.api.default_api import DefaultApi
| 21.428571
| 62
| 0.826667
|
from __future__ import absolute_import
from hubspot.cms.performance.api.default_api import DefaultApi
| true
| true
|
1c47816a5c703047ace0c887f2f265050774570e
| 1,174
|
py
|
Python
|
dir_test/test_me.py
|
splbio/pytestdoc
|
08a8ee1a4014bb78169ee4fc41cc6b722032826e
|
[
"BSD-2-Clause"
] | 9
|
2015-07-08T16:25:32.000Z
|
2021-04-15T10:50:12.000Z
|
dir_test/test_me.py
|
splbio/pytestdoc
|
08a8ee1a4014bb78169ee4fc41cc6b722032826e
|
[
"BSD-2-Clause"
] | 1
|
2015-08-18T06:53:50.000Z
|
2015-10-11T04:55:41.000Z
|
dir_test/test_me.py
|
splbio/pytestdoc
|
08a8ee1a4014bb78169ee4fc41cc6b722032826e
|
[
"BSD-2-Clause"
] | 2
|
2019-04-04T08:44:13.000Z
|
2021-02-22T08:12:03.000Z
|
import json
import pytestdoc
WHAT_IS_THIS = True
def times(x, y):
return x * y
TEST_CATEGORY="derp"
@pytestdoc.tattr_redmine_feature(7474)
@pytestdoc.tattr_redmine_bug(7475, 1776)
@pytestdoc.tattr_incomplete
@pytestdoc.tattr_category("herp")
@pytestdoc.tattr_doc("""
This is the *documentation* for my function
It tests the following things:
- if derps are herps
- all fives are half of 10
""")
def test_positive():
assert times(5,5) == 25
@pytestdoc.tattr_doc("""Test that this works when first item is negative""")
@pytestdoc.tattr_category("herp:negatives")
def test_firstnegative():
assert times(-2,5) == -10
@pytestdoc.tattr_doc("""Test that this works when second item is negative""")
@pytestdoc.tattr_category("herp:negatives")
def test_secondnegative():
assert times(3,-12) == -36
@pytestdoc.tattr_doc("""Test that this works when both items are negative""")
@pytestdoc.tattr_category("herp:negatives")
def test_bothnegative():
assert times(-12,-12) == 144
@pytestdoc.tattr_doc("""Test that this works when first item is negative""")
@pytestdoc.tattr_category("herp:negatives")
def test_firstnegative():
assert times(-2,5) == -10
| 24.978723
| 77
| 0.736797
|
import json
import pytestdoc
WHAT_IS_THIS = True
def times(x, y):
return x * y
TEST_CATEGORY="derp"
@pytestdoc.tattr_redmine_feature(7474)
@pytestdoc.tattr_redmine_bug(7475, 1776)
@pytestdoc.tattr_incomplete
@pytestdoc.tattr_category("herp")
@pytestdoc.tattr_doc("""
This is the *documentation* for my function
It tests the following things:
- if derps are herps
- all fives are half of 10
""")
def test_positive():
assert times(5,5) == 25
@pytestdoc.tattr_doc("""Test that this works when first item is negative""")
@pytestdoc.tattr_category("herp:negatives")
def test_firstnegative():
assert times(-2,5) == -10
@pytestdoc.tattr_doc("""Test that this works when second item is negative""")
@pytestdoc.tattr_category("herp:negatives")
def test_secondnegative():
assert times(3,-12) == -36
@pytestdoc.tattr_doc("""Test that this works when both items are negative""")
@pytestdoc.tattr_category("herp:negatives")
def test_bothnegative():
assert times(-12,-12) == 144
@pytestdoc.tattr_doc("""Test that this works when first item is negative""")
@pytestdoc.tattr_category("herp:negatives")
def test_firstnegative():
assert times(-2,5) == -10
| true
| true
|
1c4781b885c055266febe549972d98ad995a452c
| 2,740
|
py
|
Python
|
aiida_defect/calculations.py
|
unkcpz/aiida-defect
|
592c1d8dd8130b06d06b543d5e5d35286afa63a3
|
[
"MIT"
] | 1
|
2021-02-18T07:20:02.000Z
|
2021-02-18T07:20:02.000Z
|
aiida_defect/calculations.py
|
unkcpz/aiida-defect
|
592c1d8dd8130b06d06b543d5e5d35286afa63a3
|
[
"MIT"
] | null | null | null |
aiida_defect/calculations.py
|
unkcpz/aiida-defect
|
592c1d8dd8130b06d06b543d5e5d35286afa63a3
|
[
"MIT"
] | null | null | null |
"""
Calculations provided by aiida_defect.
Register calculations via the "aiida.calculations" entry point in setup.json.
"""
from __future__ import absolute_import
import six
from aiida.common import datastructures
from aiida.engine import CalcJob
from aiida.orm import SinglefileData
from aiida.plugins import DataFactory
DiffParameters = DataFactory('defect')
class DiffCalculation(CalcJob):
"""
AiiDA calculation plugin wrapping the diff executable.
Simple AiiDA plugin wrapper for 'diffing' two files.
"""
@classmethod
def define(cls, spec):
"""Define inputs and outputs of the calculation."""
# yapf: disable
super(DiffCalculation, cls).define(spec)
spec.input('metadata.options.resources', valid_type=dict, default={'num_machines': 1, 'num_mpiprocs_per_machine': 1})
spec.input('metadata.options.parser_name', valid_type=six.string_types, default='defect')
spec.input('metadata.options.output_filename', valid_type=six.string_types, default='patch.diff')
spec.input('parameters', valid_type=DiffParameters, help='Command line parameters for diff')
spec.input('file1', valid_type=SinglefileData, help='First file to be compared.')
spec.input('file2', valid_type=SinglefileData, help='Second file to be compared.')
spec.output('defect', valid_type=SinglefileData, help='diff between file1 and file2.')
spec.exit_code(100, 'ERROR_MISSING_OUTPUT_FILES', message='Calculation did not produce all expected output files.')
def prepare_for_submission(self, folder):
"""
Create input files.
:param folder: an `aiida.common.folders.Folder` where the plugin should temporarily place all files needed by
the calculation.
:return: `aiida.common.datastructures.CalcInfo` instance
"""
codeinfo = datastructures.CodeInfo()
codeinfo.cmdline_params = self.inputs.parameters.cmdline_params(
file1_name=self.inputs.file1.filename,
file2_name=self.inputs.file2.filename)
codeinfo.code_uuid = self.inputs.code.uuid
codeinfo.stdout_name = self.metadata.options.output_filename
codeinfo.withmpi = self.inputs.metadata.options.withmpi
# Prepare a `CalcInfo` to be returned to the engine
calcinfo = datastructures.CalcInfo()
calcinfo.codes_info = [codeinfo]
calcinfo.local_copy_list = [
(self.inputs.file1.uuid, self.inputs.file1.filename, self.inputs.file1.filename),
(self.inputs.file2.uuid, self.inputs.file2.filename, self.inputs.file2.filename),
]
calcinfo.retrieve_list = [self.metadata.options.output_filename]
return calcinfo
| 40.895522
| 125
| 0.708394
|
from __future__ import absolute_import
import six
from aiida.common import datastructures
from aiida.engine import CalcJob
from aiida.orm import SinglefileData
from aiida.plugins import DataFactory
DiffParameters = DataFactory('defect')
class DiffCalculation(CalcJob):
@classmethod
def define(cls, spec):
super(DiffCalculation, cls).define(spec)
spec.input('metadata.options.resources', valid_type=dict, default={'num_machines': 1, 'num_mpiprocs_per_machine': 1})
spec.input('metadata.options.parser_name', valid_type=six.string_types, default='defect')
spec.input('metadata.options.output_filename', valid_type=six.string_types, default='patch.diff')
spec.input('parameters', valid_type=DiffParameters, help='Command line parameters for diff')
spec.input('file1', valid_type=SinglefileData, help='First file to be compared.')
spec.input('file2', valid_type=SinglefileData, help='Second file to be compared.')
spec.output('defect', valid_type=SinglefileData, help='diff between file1 and file2.')
spec.exit_code(100, 'ERROR_MISSING_OUTPUT_FILES', message='Calculation did not produce all expected output files.')
def prepare_for_submission(self, folder):
codeinfo = datastructures.CodeInfo()
codeinfo.cmdline_params = self.inputs.parameters.cmdline_params(
file1_name=self.inputs.file1.filename,
file2_name=self.inputs.file2.filename)
codeinfo.code_uuid = self.inputs.code.uuid
codeinfo.stdout_name = self.metadata.options.output_filename
codeinfo.withmpi = self.inputs.metadata.options.withmpi
calcinfo = datastructures.CalcInfo()
calcinfo.codes_info = [codeinfo]
calcinfo.local_copy_list = [
(self.inputs.file1.uuid, self.inputs.file1.filename, self.inputs.file1.filename),
(self.inputs.file2.uuid, self.inputs.file2.filename, self.inputs.file2.filename),
]
calcinfo.retrieve_list = [self.metadata.options.output_filename]
return calcinfo
| true
| true
|
1c4782033a601ea0f3de81c2b2d2f03f95b1884b
| 2,006
|
py
|
Python
|
examples/gui/__main__.py
|
vcokltfre/aionasa
|
8cd1d496d7373c806e38eb75e0103e4377da0875
|
[
"MIT"
] | 4
|
2020-11-26T10:49:53.000Z
|
2021-05-18T17:56:08.000Z
|
examples/gui/__main__.py
|
vcokltfre/aionasa
|
8cd1d496d7373c806e38eb75e0103e4377da0875
|
[
"MIT"
] | 1
|
2021-01-07T01:41:27.000Z
|
2021-01-07T01:41:27.000Z
|
examples/gui/__main__.py
|
vcokltfre/aionasa
|
8cd1d496d7373c806e38eb75e0103e4377da0875
|
[
"MIT"
] | 1
|
2021-08-19T18:49:53.000Z
|
2021-08-19T18:49:53.000Z
|
import argparse
import asyncio
import os
from aionasa.epic.api import EPIC
from aionasa.utils import date_strptime
from gui import open_gui
__doc__ = "Download some images from NASA's EPIC archive and open them in a gui browser."
usage = "python -m aionasa.epic [-h] [--date DATE] [--collection COLLECTION] img_folder"
def argument_parser():
"""Generates the parser used by the aionasa.epic.__main__ script."""
parser = argparse.ArgumentParser(description=__doc__, usage=usage)
parser.add_argument(
'--date', '-d', type=date_strptime, default=None,
help="Format: YYYY-MM-DD"
)
parser.add_argument(
'--collection', '-c', default='natural',
help="Collection to get images from. Should be 'natural', 'enhanced', or 'natural,enhanced'"
)
parser.add_argument(
'img_folder',
help='Directory to download the images to.'
)
return parser
async def _task(coro, arg):
"""Safely execute an async function"""
try:
await coro(arg)
except:
pass
async def setup(date, path, collection):
"""Downloads all EPIC images in a collection to a directory given by the 'path' parameter."""
# make image directory if necessary
if not os.path.exists(path):
os.mkdir(path)
async with EPIC() as epic:
# API request, gets images (urls etc)
images = []
if 'natural' in collection:
images += await epic.natural_images(date)
if 'enhanced' in collection:
images += await epic.enhanced_images(date)
# download the images asynchronously
print('downloading', len(images), 'images.')
tasks = [_task(image.save, path + '/' + image.filename) for image in images]
await asyncio.gather(*tasks)
async def main():
await setup(args.date, args.img_folder, args.collection.split(','))
open_gui(args.img_folder)
if __name__ == '__main__':
args = argument_parser().parse_args()
asyncio.run(main())
| 28.657143
| 100
| 0.653539
|
import argparse
import asyncio
import os
from aionasa.epic.api import EPIC
from aionasa.utils import date_strptime
from gui import open_gui
__doc__ = "Download some images from NASA's EPIC archive and open them in a gui browser."
usage = "python -m aionasa.epic [-h] [--date DATE] [--collection COLLECTION] img_folder"
def argument_parser():
parser = argparse.ArgumentParser(description=__doc__, usage=usage)
parser.add_argument(
'--date', '-d', type=date_strptime, default=None,
help="Format: YYYY-MM-DD"
)
parser.add_argument(
'--collection', '-c', default='natural',
help="Collection to get images from. Should be 'natural', 'enhanced', or 'natural,enhanced'"
)
parser.add_argument(
'img_folder',
help='Directory to download the images to.'
)
return parser
async def _task(coro, arg):
try:
await coro(arg)
except:
pass
async def setup(date, path, collection):
# make image directory if necessary
if not os.path.exists(path):
os.mkdir(path)
async with EPIC() as epic:
# API request, gets images (urls etc)
images = []
if 'natural' in collection:
images += await epic.natural_images(date)
if 'enhanced' in collection:
images += await epic.enhanced_images(date)
# download the images asynchronously
print('downloading', len(images), 'images.')
tasks = [_task(image.save, path + '/' + image.filename) for image in images]
await asyncio.gather(*tasks)
async def main():
await setup(args.date, args.img_folder, args.collection.split(','))
open_gui(args.img_folder)
if __name__ == '__main__':
args = argument_parser().parse_args()
asyncio.run(main())
| true
| true
|
1c4782238324e2454e74dfd129755995c5656e98
| 11,993
|
py
|
Python
|
aesara/graph/utils.py
|
danhphan/aesara
|
5a0fb0e731358d54648823170acd911cc1534d6a
|
[
"BSD-3-Clause"
] | null | null | null |
aesara/graph/utils.py
|
danhphan/aesara
|
5a0fb0e731358d54648823170acd911cc1534d6a
|
[
"BSD-3-Clause"
] | null | null | null |
aesara/graph/utils.py
|
danhphan/aesara
|
5a0fb0e731358d54648823170acd911cc1534d6a
|
[
"BSD-3-Clause"
] | null | null | null |
import linecache
import sys
import traceback
from abc import ABCMeta
from io import StringIO
from typing import TYPE_CHECKING, List, Optional, Sequence, Tuple, TypeVar, Union
if TYPE_CHECKING:
from aesara.graph.basic import Apply, Variable
T = TypeVar("T", bound=Union["Apply", "Variable"])
def simple_extract_stack(
f=None, limit: Optional[int] = None, skips: Optional[Sequence[str]] = None
) -> List[Tuple[Optional[str], int, str, Optional[str]]]:
"""This is traceback.extract_stack from python 2.7 with this change:
- Comment the update of the cache.
- Skip internal stack trace level.
The update of the cache call os.stat to verify is the cache is up
to date. This take too much time on cluster.
limit - The number of stack level we want to return. If None, mean
all what we can.
skips - partial path of stack level we don't want to keep and count.
When we find one level that isn't skipped, we stop skipping.
"""
if skips is None:
skips = []
if f is None:
f = sys._getframe().f_back
if limit is None:
if hasattr(sys, "tracebacklimit"):
limit = sys.tracebacklimit
trace: List[Tuple[Optional[str], int, str, Optional[str]]] = []
n = 0
while f is not None and (limit is None or n < limit):
lineno = f.f_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
# linecache.checkcache(filename)
line: Optional[str] = linecache.getline(filename, lineno, f.f_globals)
if line:
line = line.strip()
else:
line = None
f = f.f_back
# Just skip inner level
if len(trace) == 0:
rm = False
for p in skips:
# Julian: I added the 'tests' exception together with
# Arnaud. Otherwise, we'd lose the stack trace during
# in our test cases (e.g. in test_opt.py). We're not
# sure this is the right way to do it though.
if p in filename and "tests" not in filename:
rm = True
break
if rm:
continue
trace.append((filename, lineno, name, line))
n = n + 1
trace.reverse()
return trace
def add_tag_trace(thing: T, user_line: Optional[int] = None) -> T:
"""Add tag.trace to a node or variable.
The argument is returned after being affected (inplace).
Parameters
----------
thing
The object where we add .tag.trace.
user_line
The max number of user line to keep.
Notes
-----
We also use config.traceback__limit for the maximum number of stack level
we look.
"""
from aesara.configdefaults import config
if user_line is None:
user_line = config.traceback__limit
if user_line == -1:
user_line = None
skips = [
"aesara/tensor/",
"aesara\\tensor\\",
"aesara/compile/",
"aesara\\compile\\",
"aesara/graph/",
"aesara\\graph\\",
"aesara/scalar/basic.py",
"aesara\\scalar\\basic.py",
"aesara/sandbox/",
"aesara\\sandbox\\",
"aesara/scan/",
"aesara\\scan\\",
"aesara/sparse/",
"aesara\\sparse\\",
"aesara/typed_list/",
"aesara\\typed_list\\",
]
if config.traceback__compile_limit > 0:
skips = []
tr = simple_extract_stack(limit=user_line, skips=skips)
# Different python version use different sementic for
# limit. python 2.7 include the call to extrack_stack. The -1 get
# rid of it.
if tr:
thing.tag.trace = [tr]
else:
thing.tag.trace = tr
return thing
def get_variable_trace_string(v):
sio = StringIO()
# For backward compatibility with old trace
tr = getattr(v.tag, "trace", [])
if isinstance(tr, list) and len(tr) > 0:
print(" \nBacktrace when that variable is created:\n", file=sio)
# The isinstance is needed to handle old pickled trace
if isinstance(tr[0], tuple):
traceback.print_list(v.tag.trace, sio)
else:
# Print separate message for each element in the list of
# backtraces
for idx, subtr in enumerate(tr):
if len(tr) > 1:
print(f"trace {int(idx)}", file=sio)
traceback.print_list(subtr, sio)
return sio.getvalue()
class InconsistencyError(Exception):
"""
This exception should be thrown by listeners to FunctionGraph when the
graph's state is invalid.
"""
class MissingInputError(Exception):
"""
A symbolic input needed to compute the outputs is missing.
"""
def __init__(self, *args, **kwargs):
if kwargs:
# The call to list is needed for Python 3
assert list(kwargs.keys()) == ["variable"]
error_msg = get_variable_trace_string(kwargs["variable"])
if error_msg:
args = args + (error_msg,)
s = "\n".join(args) # Needed to have the new line print correctly
super().__init__(s)
class TestValueError(Exception):
"""Base exception class for all test value errors."""
class MethodNotDefined(Exception):
"""
To be raised by functions defined as part of an interface.
When the user sees such an error, it is because an important interface
function has been left out of an implementation class.
"""
class MetaType(ABCMeta):
def __new__(cls, name, bases, dct):
props = dct.get("__props__", None)
if props is not None:
if not isinstance(props, tuple):
raise TypeError("__props__ has to be a tuple")
if not all(isinstance(p, str) for p in props):
raise TypeError("elements of __props__ have to be strings")
def _props(self):
"""
Tuple of properties of all attributes
"""
return tuple(getattr(self, a) for a in props)
dct["_props"] = _props
def _props_dict(self):
"""This return a dict of all ``__props__`` key-> value.
This is useful in optimization to swap op that should have the
same props. This help detect error that the new op have at
least all the original props.
"""
return {a: getattr(self, a) for a in props}
dct["_props_dict"] = _props_dict
if "__hash__" not in dct:
def __hash__(self):
return hash((type(self), tuple(getattr(self, a) for a in props)))
dct["__hash__"] = __hash__
if "__eq__" not in dct:
def __eq__(self, other):
return type(self) == type(other) and tuple(
getattr(self, a) for a in props
) == tuple(getattr(other, a) for a in props)
dct["__eq__"] = __eq__
if "__str__" not in dct:
if len(props) == 0:
def __str__(self):
return f"{self.__class__.__name__}"
else:
def __str__(self):
return "{}{{{}}}".format(
self.__class__.__name__,
", ".join(
"{}={!r}".format(p, getattr(self, p)) for p in props
),
)
dct["__str__"] = __str__
return super().__new__(cls, name, bases, dct)
class MetaObject(metaclass=MetaType):
__slots__: List = []
def __ne__(self, other):
return not self == other
class Scratchpad:
def clear(self):
self.__dict__.clear()
def __update__(self, other):
self.__dict__.update(other.__dict__)
return self
def __str__(self):
return "scratchpad" + str(self.__dict__)
def __repr__(self):
return "scratchpad" + str(self.__dict__)
def info(self):
print(f"<aesara.graph.utils.scratchpad instance at {id(self)}>")
for k, v in self.__dict__.items():
print(f" {k}: {v}")
class ValidatingScratchpad(Scratchpad):
"""This `Scratchpad` validates attribute values."""
def __init__(self, attr, attr_filter):
super().__init__()
object.__setattr__(self, "attr", attr)
object.__setattr__(self, "attr_filter", attr_filter)
def __setattr__(self, attr, obj):
if getattr(self, "attr", None) == attr:
obj = self.attr_filter(obj)
return object.__setattr__(self, attr, obj)
class D:
def __init__(self, **d):
self.__dict__.update(d)
class AssocList:
"""An associative list.
This class is like a `dict` that accepts unhashable keys by using an
assoc list for internal use only
"""
def __init__(self):
self._dict = {}
self._list = []
def __getitem__(self, item):
return self.get(item, None)
def __setitem__(self, item, value):
try:
self._dict[item] = value
except Exception:
for i, (key, val) in enumerate(self._list):
if key == item:
self._list[i] = (item, value)
return
self._list.append((item, value))
def __delitem__(self, item):
try:
if item in self._dict:
del self._dict[item]
return
except TypeError as e:
assert "unhashable type" in str(e)
for i, (key, val) in enumerate(self._list):
if key == item:
del self._list[i]
return
raise KeyError(item)
def discard(self, item):
try:
if item in self._dict:
del self._dict[item]
return
except TypeError as e:
assert "unhashable type" in str(e)
for i, (key, val) in enumerate(self._list):
if key == item:
del self._list[i]
return
def get(self, item, default):
try:
return self._dict[item]
except Exception:
for item2, value in self._list:
try:
if item == item2:
return value
if item.equals(item2):
return value
except Exception:
if item is item2:
return value
return default
def clear(self):
self._dict = {}
self._list = []
def __repr__(self):
return f"AssocList({self._dict}, {self._list})"
def toposort(prereqs_d):
"""
Sorts prereqs_d.keys() topologically.
prereqs_d[x] contains all the elements that must come before x
in the ordering.
"""
# all1 = set(prereqs_d.keys())
# all2 = set()
# for x, y in prereqs_d.items():
# all2.update(y)
# print all1.difference(all2)
seq = []
done = set()
postreqs_d = {}
for x, prereqs in prereqs_d.items():
for prereq in prereqs:
postreqs_d.setdefault(prereq, set()).add(x)
next = {k for k in prereqs_d if not prereqs_d[k]}
while next:
bases = next
next = set()
for x in bases:
done.add(x)
seq.append(x)
for x in bases:
for postreq in postreqs_d.get(x, []):
if not prereqs_d[postreq].difference(done):
next.add(postreq)
if len(prereqs_d) != len(seq):
raise Exception(
"Cannot sort topologically: there might be cycles, "
"prereqs_d does not have a key for each element or "
"some orderings contain invalid elements."
)
return seq
| 28.622912
| 85
| 0.547736
|
import linecache
import sys
import traceback
from abc import ABCMeta
from io import StringIO
from typing import TYPE_CHECKING, List, Optional, Sequence, Tuple, TypeVar, Union
if TYPE_CHECKING:
from aesara.graph.basic import Apply, Variable
T = TypeVar("T", bound=Union["Apply", "Variable"])
def simple_extract_stack(
f=None, limit: Optional[int] = None, skips: Optional[Sequence[str]] = None
) -> List[Tuple[Optional[str], int, str, Optional[str]]]:
if skips is None:
skips = []
if f is None:
f = sys._getframe().f_back
if limit is None:
if hasattr(sys, "tracebacklimit"):
limit = sys.tracebacklimit
trace: List[Tuple[Optional[str], int, str, Optional[str]]] = []
n = 0
while f is not None and (limit is None or n < limit):
lineno = f.f_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
line: Optional[str] = linecache.getline(filename, lineno, f.f_globals)
if line:
line = line.strip()
else:
line = None
f = f.f_back
if len(trace) == 0:
rm = False
for p in skips:
# in our test cases (e.g. in test_opt.py). We're not
if p in filename and "tests" not in filename:
rm = True
break
if rm:
continue
trace.append((filename, lineno, name, line))
n = n + 1
trace.reverse()
return trace
def add_tag_trace(thing: T, user_line: Optional[int] = None) -> T:
from aesara.configdefaults import config
if user_line is None:
user_line = config.traceback__limit
if user_line == -1:
user_line = None
skips = [
"aesara/tensor/",
"aesara\\tensor\\",
"aesara/compile/",
"aesara\\compile\\",
"aesara/graph/",
"aesara\\graph\\",
"aesara/scalar/basic.py",
"aesara\\scalar\\basic.py",
"aesara/sandbox/",
"aesara\\sandbox\\",
"aesara/scan/",
"aesara\\scan\\",
"aesara/sparse/",
"aesara\\sparse\\",
"aesara/typed_list/",
"aesara\\typed_list\\",
]
if config.traceback__compile_limit > 0:
skips = []
tr = simple_extract_stack(limit=user_line, skips=skips)
if tr:
thing.tag.trace = [tr]
else:
thing.tag.trace = tr
return thing
def get_variable_trace_string(v):
sio = StringIO()
tr = getattr(v.tag, "trace", [])
if isinstance(tr, list) and len(tr) > 0:
print(" \nBacktrace when that variable is created:\n", file=sio)
if isinstance(tr[0], tuple):
traceback.print_list(v.tag.trace, sio)
else:
for idx, subtr in enumerate(tr):
if len(tr) > 1:
print(f"trace {int(idx)}", file=sio)
traceback.print_list(subtr, sio)
return sio.getvalue()
class InconsistencyError(Exception):
class MissingInputError(Exception):
def __init__(self, *args, **kwargs):
if kwargs:
assert list(kwargs.keys()) == ["variable"]
error_msg = get_variable_trace_string(kwargs["variable"])
if error_msg:
args = args + (error_msg,)
s = "\n".join(args)
super().__init__(s)
class TestValueError(Exception):
class MethodNotDefined(Exception):
class MetaType(ABCMeta):
def __new__(cls, name, bases, dct):
props = dct.get("__props__", None)
if props is not None:
if not isinstance(props, tuple):
raise TypeError("__props__ has to be a tuple")
if not all(isinstance(p, str) for p in props):
raise TypeError("elements of __props__ have to be strings")
def _props(self):
return tuple(getattr(self, a) for a in props)
dct["_props"] = _props
def _props_dict(self):
return {a: getattr(self, a) for a in props}
dct["_props_dict"] = _props_dict
if "__hash__" not in dct:
def __hash__(self):
return hash((type(self), tuple(getattr(self, a) for a in props)))
dct["__hash__"] = __hash__
if "__eq__" not in dct:
def __eq__(self, other):
return type(self) == type(other) and tuple(
getattr(self, a) for a in props
) == tuple(getattr(other, a) for a in props)
dct["__eq__"] = __eq__
if "__str__" not in dct:
if len(props) == 0:
def __str__(self):
return f"{self.__class__.__name__}"
else:
def __str__(self):
return "{}{{{}}}".format(
self.__class__.__name__,
", ".join(
"{}={!r}".format(p, getattr(self, p)) for p in props
),
)
dct["__str__"] = __str__
return super().__new__(cls, name, bases, dct)
class MetaObject(metaclass=MetaType):
__slots__: List = []
def __ne__(self, other):
return not self == other
class Scratchpad:
def clear(self):
self.__dict__.clear()
def __update__(self, other):
self.__dict__.update(other.__dict__)
return self
def __str__(self):
return "scratchpad" + str(self.__dict__)
def __repr__(self):
return "scratchpad" + str(self.__dict__)
def info(self):
print(f"<aesara.graph.utils.scratchpad instance at {id(self)}>")
for k, v in self.__dict__.items():
print(f" {k}: {v}")
class ValidatingScratchpad(Scratchpad):
def __init__(self, attr, attr_filter):
super().__init__()
object.__setattr__(self, "attr", attr)
object.__setattr__(self, "attr_filter", attr_filter)
def __setattr__(self, attr, obj):
if getattr(self, "attr", None) == attr:
obj = self.attr_filter(obj)
return object.__setattr__(self, attr, obj)
class D:
def __init__(self, **d):
self.__dict__.update(d)
class AssocList:
def __init__(self):
self._dict = {}
self._list = []
def __getitem__(self, item):
return self.get(item, None)
def __setitem__(self, item, value):
try:
self._dict[item] = value
except Exception:
for i, (key, val) in enumerate(self._list):
if key == item:
self._list[i] = (item, value)
return
self._list.append((item, value))
def __delitem__(self, item):
try:
if item in self._dict:
del self._dict[item]
return
except TypeError as e:
assert "unhashable type" in str(e)
for i, (key, val) in enumerate(self._list):
if key == item:
del self._list[i]
return
raise KeyError(item)
def discard(self, item):
try:
if item in self._dict:
del self._dict[item]
return
except TypeError as e:
assert "unhashable type" in str(e)
for i, (key, val) in enumerate(self._list):
if key == item:
del self._list[i]
return
def get(self, item, default):
try:
return self._dict[item]
except Exception:
for item2, value in self._list:
try:
if item == item2:
return value
if item.equals(item2):
return value
except Exception:
if item is item2:
return value
return default
def clear(self):
self._dict = {}
self._list = []
def __repr__(self):
return f"AssocList({self._dict}, {self._list})"
def toposort(prereqs_d):
seq = []
done = set()
postreqs_d = {}
for x, prereqs in prereqs_d.items():
for prereq in prereqs:
postreqs_d.setdefault(prereq, set()).add(x)
next = {k for k in prereqs_d if not prereqs_d[k]}
while next:
bases = next
next = set()
for x in bases:
done.add(x)
seq.append(x)
for x in bases:
for postreq in postreqs_d.get(x, []):
if not prereqs_d[postreq].difference(done):
next.add(postreq)
if len(prereqs_d) != len(seq):
raise Exception(
"Cannot sort topologically: there might be cycles, "
"prereqs_d does not have a key for each element or "
"some orderings contain invalid elements."
)
return seq
| true
| true
|
1c47827600948d2b87bf218f91b1371ea3cbb3eb
| 3,488
|
py
|
Python
|
fhirclient/models/coding.py
|
mdx-dev/client-py
|
f6c16c9bd386c5b05d69753b89c6519d568814ac
|
[
"Apache-2.0"
] | null | null | null |
fhirclient/models/coding.py
|
mdx-dev/client-py
|
f6c16c9bd386c5b05d69753b89c6519d568814ac
|
[
"Apache-2.0"
] | null | null | null |
fhirclient/models/coding.py
|
mdx-dev/client-py
|
f6c16c9bd386c5b05d69753b89c6519d568814ac
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/Coding) on 2019-01-22.
# 2019, SMART Health IT.
from . import element
class Coding(element.Element):
"""
A
r
e
f
e
r
e
n
c
e
t
o
a
c
o
d
e
d
e
f
i
n
e
d
b
y
a
t
e
r
m
i
n
o
l
o
g
y
s
y
s
t
e
m
.
"""
resource_type = "Coding"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
"""
S
y
m
b
o
l
i
n
s
y
n
t
a
x
d
e
f
i
n
e
d
b
y
t
h
e
s
y
s
t
e
m
.
Type `str`. """
self.display = None
"""
R
e
p
r
e
s
e
n
t
a
t
i
o
n
d
e
f
i
n
e
d
b
y
t
h
e
s
y
s
t
e
m
.
Type `str`. """
self.system = None
"""
I
d
e
n
t
i
t
y
o
f
t
h
e
t
e
r
m
i
n
o
l
o
g
y
s
y
s
t
e
m
.
Type `str`. """
self.userSelected = None
"""
I
f
t
h
i
s
c
o
d
i
n
g
w
a
s
c
h
o
s
e
n
d
i
r
e
c
t
l
y
b
y
t
h
e
u
s
e
r
.
Type `bool`. """
self.version = None
"""
V
e
r
s
i
o
n
o
f
t
h
e
s
y
s
t
e
m
-
i
f
r
e
l
e
v
a
n
t
.
Type `str`. """
super(Coding, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Coding, self).elementProperties()
js.extend([
("code", "code", str, False, None, False),
("display", "display", str, False, None, False),
("system", "system", str, False, None, False),
("userSelected", "userSelected", bool, False, None, False),
("version", "version", str, False, None, False),
])
return js
| 12.966543
| 103
| 0.294725
|
from . import element
class Coding(element.Element):
resource_type = "Coding"
def __init__(self, jsondict=None, strict=True):
self.code = None
self.display = None
self.system = None
self.userSelected = None
self.version = None
super(Coding, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Coding, self).elementProperties()
js.extend([
("code", "code", str, False, None, False),
("display", "display", str, False, None, False),
("system", "system", str, False, None, False),
("userSelected", "userSelected", bool, False, None, False),
("version", "version", str, False, None, False),
])
return js
| true
| true
|
1c4782c8740a735f5aa4dfddb82ffcdda14f7ceb
| 689
|
py
|
Python
|
packages/cuda/cuSolverDn.py
|
lijun99/pyre
|
004dfd4c06489b4ba5b32877338ca6440f2d523b
|
[
"BSD-3-Clause"
] | 3
|
2019-08-02T21:02:47.000Z
|
2021-09-08T13:59:43.000Z
|
packages/cuda/cuSolverDn.py
|
lijun99/pyre
|
004dfd4c06489b4ba5b32877338ca6440f2d523b
|
[
"BSD-3-Clause"
] | null | null | null |
packages/cuda/cuSolverDn.py
|
lijun99/pyre
|
004dfd4c06489b4ba5b32877338ca6440f2d523b
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Lijun Zhu
# california institute of technology
# (c) 2016-2019 all rights reserved
#
# externals
from . import cuda as libcuda # the extension
from .Matrix import Matrix
class cuSolverDn:
"""
Wrapper for cusolverDn lib utitilies
"""
def create_handle():
"""
create a cusolverDn handle
"""
handle = libcuda.cusolverDnCreate()
return handle
def get_current_handle():
# default device handle
from . import manager
if manager.current_device is None:
manager.device(0)
handle = manager.current_device.cusolverdn_handle
return handle
# end of file
| 20.264706
| 57
| 0.628447
|
from . import cuda as libcuda
from .Matrix import Matrix
class cuSolverDn:
def create_handle():
handle = libcuda.cusolverDnCreate()
return handle
def get_current_handle():
from . import manager
if manager.current_device is None:
manager.device(0)
handle = manager.current_device.cusolverdn_handle
return handle
| true
| true
|
1c478487162412bd45e541a0e720bee7c90272d6
| 42,379
|
py
|
Python
|
tensorflow/python/framework/func_graph.py
|
fraudies/tensorflow
|
a42423e302b71893bbd24aa896869941013c07fb
|
[
"Apache-2.0"
] | 3
|
2016-08-20T04:02:24.000Z
|
2019-04-21T06:18:41.000Z
|
tensorflow/python/framework/func_graph.py
|
fraudies/tensorflow
|
a42423e302b71893bbd24aa896869941013c07fb
|
[
"Apache-2.0"
] | 59
|
2019-06-17T09:37:49.000Z
|
2022-01-19T01:21:34.000Z
|
tensorflow/python/framework/func_graph.py
|
fraudies/tensorflow
|
a42423e302b71893bbd24aa896869941013c07fb
|
[
"Apache-2.0"
] | 1
|
2019-10-31T09:22:30.000Z
|
2019-10-31T09:22:30.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""FuncGraph and related functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as py_collections
import itertools
import weakref
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.eager import tape
from tensorflow.python.eager.graph_only_ops import graph_placeholder
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework.auto_control_deps import AutomaticControlDependencies
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import compat
from tensorflow.python.util import memory
from tensorflow.python.util import nest
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_decorator
from tensorflow.python.util.lazy_loader import LazyLoader
# This is to avoid a circular dependency:
# function -> func_graph
function = LazyLoader("function", globals(),
"tensorflow.python.eager.function")
def_function = LazyLoader(
"def_function", globals(),
"tensorflow.python.eager.def_function")
WHITELIST_COLLECTIONS = [
ops.GraphKeys.GLOBAL_VARIABLES,
ops.GraphKeys.LOCAL_VARIABLES,
ops.GraphKeys.TRAINABLE_VARIABLES,
variable_scope._VARSTORE_KEY, # pylint: disable=protected-access
variable_scope._VARSCOPESTORE_KEY # pylint: disable=protected-access
]
class UnknownArgument(object):
"""Signifies an argument which is not currently handled."""
pass
def convert_structure_to_signature(structure, arg_names=None):
"""Convert a potentially nested structure to a signature.
Args:
structure: Structure to convert, where top level collection is a list or a
tuple.
arg_names: Optional list of arguments that has equal number of elements as
`structure` and is used for naming corresponding TensorSpecs.
Returns:
Identical structure that has TensorSpec objects instead of Tensors and
UknownArgument instead of any unsupported types.
"""
structure = composite_tensor.replace_composites_with_components(structure)
def encode_arg(arg, path):
"""A representation for this argument, for converting into signatures."""
if isinstance(arg, ops.Tensor):
user_specified_name = None
try:
user_specified_name = compat.as_str(
arg.op.get_attr("_user_specified_name"))
except ValueError:
pass
if path and user_specified_name and user_specified_name != path[0]:
# The user has explicitly named the argument differently than the name
# of the function argument.
name = user_specified_name
else:
name = "/".join([str(p) for p in path])
return tensor_spec.TensorSpec(arg.shape, arg.dtype, name)
if isinstance(arg, (
int,
float,
bool,
type(None),
dtypes.DType,
tensor_spec.TensorSpec,
)):
return arg
return UnknownArgument()
# We are using the flattened paths to name the TensorSpecs. We need an
# explicit name for them downstream.
flattened = nest.flatten_with_tuple_paths(structure, expand_composites=True)
if arg_names:
if len(arg_names) != len(structure):
raise ValueError(
"Passed in arg_names don't match actual signature (%s)." % arg_names)
# Replace all top-level names with their actual arg_names. If a path before
# was "(2,'a',1)", it will become "(arg_names[2],'a',1)".
flattened = [
((arg_names[path[0]],) + path[1:], arg) for path, arg in flattened
]
mapped = [encode_arg(arg, path) for path, arg in flattened]
return nest.pack_sequence_as(structure, mapped, expand_composites=True)
class FuncGraph(ops.Graph):
"""Graph representing a function body.
Attributes:
name: The name of the function.
inputs: Placeholder tensors representing the inputs to this function. The
tensors are in this FuncGraph. This represents "regular" inputs as well as
captured inputs (i.e. the values of self.captures), with the regular
inputs coming first.
outputs: Tensors that will be returned by this function. The tensors are in
this FuncGraph.
control_outputs: Operations that must be executed before the function
represented by this graph can be said to have been executed.
structured_input_signature: A tuple of (args, kwargs), which are both
possibly-nested python objects that were received by this function. Note
that these structures might contain Python `None`s.
structured_outputs: A possibly-nested python object which will be returned
by this function. The Tensors in this structure are the same as those of
self.outputs. Note that this structure might contain Python `None`s.
variables: Variables that should be watched during function execution.
outer_graph: The graph this function is defined in. May be another FuncGraph
or the global default Graph.
captures: Maps external tensor -> internal tensor (i.e. input placeholder).
The entries are in the order they were captured.
control_captures: Set of external ops on which this graph has a control
dependency.
seed: The graph-level random seed.
capture_by_value: If True, the func graph will capture Variables by value
instead of reference.
"""
def __init__(self, name, collections=None, capture_by_value=None):
"""Construct a new FuncGraph.
The graph will inherit its graph key, collections, seed, and distribution
strategy stack from the current context or graph.
Args:
name: the name of the function.
collections: a dictionary of collections this FuncGraph should start
with. If not specified (None), the FuncGraph will read (but not write
to) the outer graph's collections that are not whitelisted, and both
read and write to the outer graph's collections that are whitelisted.
The current whitelisted collections are the global variables, the
local variables, and the trainable variables.
Defaults to None.
capture_by_value: An optional boolean. If True, the func graph will
capture Variables by value instead of reference. By default inherit
from outer graphs, and failing that will default to False.
"""
super(FuncGraph, self).__init__()
self.name = name
self.inputs = []
self.outputs = []
self.control_outputs = []
self.control_captures = set()
self.structured_input_signature = None
self.structured_outputs = None
self._weak_variables = []
self._watched_variables = weakref.WeakSet()
self.outer_graph = ops.get_default_graph()
self.captures = py_collections.OrderedDict()
# Inherit capture-by-value from outer graph.
if capture_by_value is not None:
self.capture_by_value = capture_by_value
elif self.outer_graph is not None and isinstance(
self.outer_graph, FuncGraph):
self.capture_by_value = self.outer_graph.capture_by_value
else:
self.capture_by_value = False
self._building_function = True
# Map from resource tensor name to last op (in program order) which uses
# this tensor. Used to enforce that execution order matches program order
# for resource tensors.
self._last_op_using_resource_tensor = {}
graph = self.outer_graph
if context.executing_eagerly():
self.seed = context.global_seed()
# [for tf-data user migration from TF1.0 to 2.0] seed_used keep track of
# any None op_seed for random_op in the function, in which case we end up
# using function seed, which could be unintended behavior for the op.
self._seed_used = False
else:
self.seed = graph.seed
self._seed_used = False
# TODO(allenl): Figure out if we can remove colocation stack
# specialization (currently used in cond_v2), here and in the cache key.
self._colocation_stack = graph._colocation_stack.copy() # pylint: disable=protected-access
if collections is None:
for collection_name in graph.get_all_collection_keys():
if collection_name not in WHITELIST_COLLECTIONS:
self._collections[collection_name] = graph.get_collection(
collection_name)
for collection_name in WHITELIST_COLLECTIONS:
self._collections[collection_name] = graph.get_collection_ref(
collection_name)
else:
self._collections = collections
def __str__(self):
return "FuncGraph(name=%s, id=%s)" % (self.name, id(self))
def watch_variable(self, v):
"""Marks the variable v as accessed while building this graph."""
while self is not None and isinstance(self, FuncGraph):
self._watched_variables.add(v)
self = self.outer_graph
def control_dependencies(self, control_inputs):
"""Handles control dependencies.
FuncGraph wraps Graph's control_dependencies logic by first filtering out
any external tensors / operations and storing them in the graph's
control_captures member. Any consumers of this function graph must then
decide how to handle the control captures.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which
must be executed or computed before running the operations
defined in the context. Can also be `None` to clear the control
dependencies.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
Raises:
TypeError: If `control_inputs` is not a list of `Operation` or
`Tensor` objects.
"""
if control_inputs is None:
return super(FuncGraph, self).control_dependencies(control_inputs)
filtered_control_inputs = []
for c in control_inputs:
# Check for _UnreadVariable
if (isinstance(c, ops.IndexedSlices) or
(hasattr(c, "_handle") and hasattr(c, "op"))):
c = c.op
graph_element = ops._as_graph_element(c) # pylint: disable=protected-access
if graph_element is None:
graph_element = c
if graph_element is not None and getattr(
graph_element, "graph", None) is not self:
self.control_captures.add(graph_element)
else:
filtered_control_inputs.append(graph_element)
return super(FuncGraph, self).control_dependencies(filtered_control_inputs)
def as_default(self):
outer_cm = super(FuncGraph, self).as_default()
@tf_contextlib.contextmanager
def inner_cm():
"""Context manager for copying distribute.Strategy scope information."""
graph = ops.get_default_graph()
# pylint: disable=protected-access
# TODO(b/112906995, nareshmodi): distribution strategy depends on
# inheriting this stack from the default graph even in eager mode. Maybe
# it should be part of the eager context? This would also allow us to
# remove a get_default_graph() call from the function cache lookup.
old_strategy_stack = self._distribution_strategy_stack
self._distribution_strategy_stack = list(
graph._distribution_strategy_stack)
# We ignore device placements from any outer scopes while tracing the
# function when possible, to avoid hard-coding them in the function
# graph. "Default" placements come from the PartitionedCallOp's placement,
# so that the same trace of the Python function may be placed on several
# different devices and saved functions may be placed on new devices when
# restored.
old_device_stack = self._device_function_stack
if context.executing_eagerly():
if self._distribution_strategy_stack:
self._add_device_to_stack(context.context().device_name)
else:
if (self._distribution_strategy_stack
or device_stack_has_callable(graph._device_function_stack)):
# Hard-code devices from device functions in the function body
self._device_function_stack = graph._device_function_stack.copy()
old_creator_stack = self._variable_creator_stack
self._variable_creator_stack = graph._variable_creator_stack
# Inherit the graph key, since this is used for matching variables in
# optimizers.
old_graph_key = self._graph_key
self._graph_key = graph._graph_key
# pylint: enable=protected-access
with outer_cm as g:
try:
yield g
finally:
self._distribution_strategy_stack = old_strategy_stack
self._device_function_stack = old_device_stack
self._variable_creator_stack = old_creator_stack
self._graph_key = old_graph_key
return inner_cm()
@property
def output_types(self):
return [t.dtype for t in self.outputs]
@property
def output_shapes(self):
return [t.shape for t in self.outputs]
@property
def variables(self):
"""A list of variables accessed by this FuncGraph.
Note that functions keep only weak references to variables. Calling the
function after a variable it accesses has been deleted is an error.
Yields:
Strong references to variables accessed by this FuncGraph.
"""
for weak_v in self._weak_variables:
v = weak_v()
if v is None:
raise AssertionError(
"Called a function referencing variables which have been deleted. "
"This likely means that function-local variables were created and "
"not referenced elsewhere in the program. This is generally a "
"mistake; consider storing variables in an object attribute on "
"first call.")
yield v
@variables.setter
def variables(self, var_list):
self._weak_variables = [weakref.ref(v) for v in var_list]
def _capture_by_value(
self,
op_type,
inputs,
dtypes, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_shapes=True,
compute_device=True):
# When capturing by value, do the read outside
reverse_captures = dict((v, k) for k, v in self.captures.items())
uncaptured_inputs = [reverse_captures.get(t, t) for t in inputs]
with ops.init_scope():
if context.executing_eagerly():
attr_list = ("dtype", int(attrs["dtype"].type))
value, = execute.execute(
compat.as_bytes(op_type), 1, uncaptured_inputs, attr_list,
context.context())
else:
op = ops.get_default_graph().create_op(
op_type, uncaptured_inputs, dtypes, input_types, name, attrs,
op_def, compute_shapes, compute_device)
value = op.outputs[0]
captured_value = self.capture(value)
return captured_value.op
def create_op(
self,
op_type,
inputs,
dtypes=None, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_shapes=True,
compute_device=True):
"""Like Graph.create_op, except handles external input tensors.
This overload adds functionality to create_op to "capture" any external
input tensors, i.e. tensors from the eager context or outer function graphs
if this is a nested function. See `capture` for more information.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: (Optional) A list of `DType` objects that will be the types of the
tensors that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of
the tensors that the operation consumes. By default, uses the base
`DType` of each input in `inputs`. Operations that expect
reference-typed inputs must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_shapes: (Optional.) Deprecated. Has no effect (shapes are always
computed).
compute_device: (Optional.) If True, device functions will be executed
to compute the device property of the Operation.
Returns:
An `Operation` object.
"""
if self.capture_by_value and op_type in ["ReadVariableOp",
"ResourceGather"]:
return self._capture_by_value(
op_type, inputs, dtypes, input_types, name, attrs, op_def,
compute_shapes, compute_device)
# This capturing logic interacts poorly with control flow contexts which
# want to replace inputs of ops far too late in the process. This can lead
# the context to get confused and try to create an Enter for an Enter. We
# can detect this here and skip the additional Enter which can confuse loop
# validation logic.
if op_type == "Enter" and inputs[0].op.type == "Enter":
if inputs[0].op.get_attr("frame_name") == attrs["frame_name"].s:
return inputs[0].op
# Calling AddValue on the control flow contexts to force creation of the
# backward accumulators in the original graph before we create placeholders
# to capture the inputs.
ctxt = ops.get_default_graph()._control_flow_context # pylint: disable=protected-access
for i, inp in enumerate(inputs):
# TPU Estimator defines a control flow context with no AddValue method.
if ctxt is not None and hasattr(ctxt, "AddValue"):
inp = ctxt.AddValue(inp)
inp = self.capture(inp)
inputs[i] = inp
return super(FuncGraph, self).create_op(
op_type, inputs, dtypes, input_types, name, attrs, op_def,
compute_device=compute_device)
def capture(self, tensor, name=None):
"""Captures `tensor` if it's external to this graph.
If `tensor` is from a different graph, returns a placeholder for it.
`tensor` and the placeholder will appear in self.captures, and the
placeholder will appear in self.inputs. Multiple calls to this method with
the same `tensor` argument will return the same placeholder. If `tensor` is
from this graph, returns `tensor`.
Args:
tensor: Tensor. May be from this FuncGraph or a different graph.
name: Optional name if a placeholder is created.
Returns:
Tensor from this FuncGraph.
"""
# Note: _forward_func_graph is currently only set when building the gradient
# graph graph of a defun call. If the backwards graph tries to capture
# tensors those will be captured first in the forward graph. This
# makes sure that any tensor needed by a custom_gradient is correctly
# captured.
if (getattr(tensor, "graph", None) is not self and
hasattr(self, "_forward_func_graph") and
isinstance(self._forward_func_graph, FuncGraph)):
tensor = self._forward_func_graph.capture(tensor)
if isinstance(tensor, ops.EagerTensor):
if name is None:
name = str(ops.uid())
return self._capture_helper(tensor, name)
if tensor.graph is not self:
if name is None:
name = tensor.op.name
inner_graph = tensor.graph
while inner_graph is not None and isinstance(inner_graph, FuncGraph):
if inner_graph is self:
raise ValueError(
"Trying to capture a tensor from an inner function. This can be "
"caused by accessing a tensor defined inside a loop or "
"conditional body, or a subfunction, from a calling function, "
"without going through the proper return value mechanism. "
"Consider using TensorFlow mechanisms such as TensorArrays "
"to return tensors from inner functions or loop / conditional "
"bodies. Tensor: %s; tensor graph: %s; this graph: %s"
% (tensor, tensor.graph, self))
inner_graph = inner_graph.outer_graph
return self._capture_helper(tensor, name)
return tensor
def _capture_helper(self, tensor, name):
captured_tensor = self.captures.get(tensor, None)
if captured_tensor is None:
captured_tensor = _create_substitute_placeholder(tensor, name=name,
dtype=tensor.dtype)
self.captures[tensor] = captured_tensor
self.inputs.append(captured_tensor)
tape.record_operation("captured_value", [captured_tensor], [tensor],
lambda x: [x])
return captured_tensor
@property
def external_captures(self):
"""External tensors captured by this function."""
return list(self.captures.keys())
@property
def internal_captures(self):
"""Placeholders in this function corresponding captured tensors."""
return list(self.captures.values())
def func_graph_from_py_func(name,
python_func,
args,
kwargs,
signature=None,
func_graph=None,
autograph=False,
autograph_options=None,
add_control_dependencies=True,
arg_names=None,
op_return_value=None,
collections=None,
capture_by_value=None,
override_flat_arg_shapes=None):
"""Returns a `FuncGraph` generated from `python_func`.
Args:
name: an identifier for the function.
python_func: the Python function to trace.
args: the positional args with which the Python function should be called;
ignored if a signature is provided.
kwargs: the keyword args with which the Python function should be called;
ignored if a signature is provided.
signature: a possibly nested sequence of `TensorSpecs` specifying the shapes
and dtypes of the arguments. When a signature is provided, `args` and
`kwargs` are ignored, and `python_func` is traced with Tensors conforming
to `signature`. If `None`, the shapes and dtypes are inferred from the
inputs.
func_graph: Optional. An instance of FuncGraph. If provided, we will use
this graph else a new one is built and returned.
autograph: whether to use autograph to compile `python_func`.
See https://www.tensorflow.org/guide/autograph for more information.
autograph_options: additional knobs to control when `autograph=True`.
See https://www.tensorflow.org/guide/autograph for more information.
add_control_dependencies: If True, automatically adds control dependencies
to ensure program order matches execution order and stateful ops always
execute.
arg_names: Optional list of argument names, used to give input placeholders
recognizable names.
op_return_value: Optional. A Tensor. If set and `python_func` returns
Operations, those return values will be replaced with this value. If not
set, returning an Operation triggers an error.
collections: a dictionary of collections this FuncGraph should start
with. If not specified (None), the FuncGraph will read (but not write to)
the outer graph's collections that are not whitelisted, and both
read and write to the outer graph's collections that are whitelisted.
The current whitelisted collections are the global variables, the
local variables, and the trainable variables.
Defaults to None.
capture_by_value: An optional boolean. If True, the func graph will capture
Variables by value instead of reference. By default inherit from outer
graphs, and failing that will default to False.
override_flat_arg_shapes: An optional list of instances that are either
`None` or `TensorShape`. The length must match that of
`nest.flatten((args, kwargs), expand_composites=True)`. The entries
containing value `None` must match entries in flattened arguments
containing non-tensors, while entries containing a `TensorShape` must
match entries in the flattened arguments containing tensors.
Returns:
A FuncGraph.
Raises:
TypeError: If any of `python_func`'s return values is neither `None` nor a
`Tensor`.
ValueError: If both `signature` and `override_flat_arg_shapes` are
passed in.
"""
if op_return_value is not None:
assert isinstance(op_return_value, ops.Tensor), op_return_value
if func_graph is None:
func_graph = FuncGraph(name, collections=collections,
capture_by_value=capture_by_value)
assert isinstance(func_graph, FuncGraph)
if add_control_dependencies:
control_manager = AutomaticControlDependencies()
else:
control_manager = ops.NullContextmanager()
with func_graph.as_default(), control_manager as a:
current_scope = variable_scope.get_variable_scope()
default_use_recource = current_scope.use_resource
current_scope.set_use_resource(True)
if signature is not None and override_flat_arg_shapes is not None:
raise ValueError(
"Passed both signature and override_flat_arg_shapes: %s and %s."
% (signature, override_flat_arg_shapes))
if signature is not None:
args = signature
kwargs = {}
# Creates and names placeholders for all arguments.
if override_flat_arg_shapes is not None:
flat_args = nest.flatten(args, expand_composites=True)
arg_shapes = override_flat_arg_shapes[:len(flat_args)]
kwarg_shapes = override_flat_arg_shapes[len(flat_args):]
else:
arg_shapes = None
kwarg_shapes = None
func_args = _get_defun_inputs_from_args(
args, arg_names, flat_shapes=arg_shapes)
func_kwargs = _get_defun_inputs_from_kwargs(
kwargs, flat_shapes=kwarg_shapes)
# Convert all Tensors into TensorSpecs before saving the structured inputs.
# If storing pure concrete functions that are not called through polymorphic
# functions, we don't have access to FunctionSpec, so we need to call the
# TensorSpecs by their `arg_names` for later binding.
func_graph.structured_input_signature = (
convert_structure_to_signature(func_args, arg_names),
convert_structure_to_signature(func_kwargs))
flat_func_args = nest.flatten(func_args, expand_composites=True)
flat_func_kwargs = nest.flatten(func_kwargs, expand_composites=True)
# Temporarily set inputs to allow graph building code to inspect
# them. Reassigned below.
func_graph.inputs = [arg for arg in flat_func_args + flat_func_kwargs
if isinstance(arg, ops.Tensor)]
# Note: `nest.flatten` sorts by keys, as does `_deterministic_dict_values`.
# Variables to help check whether mutation happens in calling the function
# Copy the recursive list, tuple and map structure, but not base objects
func_args_before = nest.pack_sequence_as(func_args, flat_func_args,
expand_composites=True)
func_kwargs_before = nest.pack_sequence_as(
func_kwargs, flat_func_kwargs, expand_composites=True)
def convert(x):
"""Converts a function output to a Tensor."""
if x is None:
return None
if op_return_value is not None and isinstance(x, ops.Operation):
# TODO(b/79881896): we currently can't capture external control deps, so
# this won't work if x needs to be captured (i.e. if python_func returns
# captured Operations).
with ops.control_dependencies([x]):
x = array_ops.identity(op_return_value)
elif not isinstance(x, tensor_array_ops.TensorArray):
try:
x = ops.convert_to_tensor_or_composite(x)
except (ValueError, TypeError):
raise TypeError(
"To be compatible with tf.contrib.eager.defun, Python functions "
"must return zero or more Tensors; in compilation of %s, found "
"return value of type %s, which is not a Tensor." %
(str(python_func), type(x)))
if add_control_dependencies:
x = a.mark_as_return(x)
return x
try:
if autograph:
from tensorflow.python import autograph # pylint: disable=g-import-not-at-top
_, original_func = tf_decorator.unwrap(python_func)
def wrapper(*args, **kwargs):
# Note: functions annotated with @tf.function should always be
# converted even though they would meet autograph's whitelisting
# criteria.
# If this assumption is ever broken, converted_call will need to
# handle the possibility of original_func still being a shim, e.g.
# bound to WeakrefSelf.
return autograph.converted_call(
original_func, None,
autograph.ConversionOptions(
recursive=True,
optional_features=autograph_options,
force_conversion=True,
), args, kwargs)
# Wrapping around a decorator allows checks like tf_inspect.getargspec
# to be accurate.
converted_func = tf_decorator.make_decorator(original_func, wrapper)
python_func = tf_decorator.rewrap(python_func, original_func,
converted_func)
func_outputs = python_func(*func_args, **func_kwargs)
# invariant: `func_outputs` contains only Tensors, CompositeTensors,
# TensorArrays and `None`s.
func_outputs = nest.map_structure(convert, func_outputs,
expand_composites=True)
check_mutation(func_args_before, func_args)
check_mutation(func_kwargs_before, func_kwargs)
finally:
current_scope.set_use_resource(default_use_recource)
# Variables in `func_args`, `func_kwargs` should be explicit inputs
# to the function, not captured inputs.
graph_variables = list(func_graph._watched_variables) # pylint: disable=protected-access
arg_variables = set()
inputs = []
for arg in (nest.flatten(func_args, expand_composites=True) +
nest.flatten(func_kwargs, expand_composites=True)):
if isinstance(arg, resource_variable_ops.ResourceVariable):
# Even if an argument variable was not used in the function, we've
# already manually captured the resource Tensor when creating argument
# placeholders.
resource_placeholder = func_graph.captures.pop(arg.handle, None)
if resource_placeholder is None:
continue
arg_variables.add(arg)
inputs.append(resource_placeholder)
elif isinstance(arg, ops.Tensor):
inputs.append(arg)
variables = [v for v in graph_variables if v not in arg_variables]
func_graph.inputs = inputs + list(func_graph.captures.values())
func_graph.structured_outputs = func_outputs
# Returning a closed-over tensor does not trigger convert_to_tensor.
func_graph.outputs.extend(
func_graph.capture(x)
for x in flatten(func_graph.structured_outputs)
if x is not None)
func_graph.variables = variables
if add_control_dependencies:
func_graph.control_outputs.extend(control_manager.ops_which_must_run)
# Register any other functions defined in the graph.
with ops.init_scope():
if context.executing_eagerly():
for f in func_graph._functions.values(): # pylint: disable=protected-access
# TODO(ashankar): What about the gradient registry?
context.add_function(f._c_func.func) # pylint: disable=protected-access
return func_graph
def maybe_captured(tensor):
"""If t is a captured value placeholder, returns the original captured value.
Args:
tensor: Tensor.
Returns:
A tensor, potentially from a different Graph/FuncGraph.
"""
if (not isinstance(tensor, ops.EagerTensor) and
tensor.op.graph.building_function and tensor.op.type == "Placeholder"):
for input_t, placeholder_t in tensor.op.graph.captures.items():
if tensor == placeholder_t:
return maybe_captured(input_t)
# pylint: enable=protected-access
return tensor
def device_stack_has_callable(device_stack):
"""Checks whether a device stack contains a callable."""
return any(callable(spec._device_name_or_function) # pylint: disable=protected-access
for spec in device_stack.peek_objs())
def check_mutation(n1, n2):
"""Check if two list of arguments are exactly the same."""
errmsg = ("Function to be traced should not modify structure of input "
"arguments. Check if your function has list and dictionary "
"operations that alter input arguments, "
"such as `list.pop`, `list.append`")
try:
nest.assert_same_structure(n1, n2, expand_composites=True)
except ValueError:
raise ValueError(errmsg)
for arg1, arg2 in zip(nest.flatten(n1, expand_composites=True),
nest.flatten(n2, expand_composites=True)):
if arg1 is not arg2:
raise ValueError(errmsg)
# TODO(edloper): If TensorArray becomes a CompositeTensor, then delete this.
def flatten(sequence):
"""Like nest.flatten w/ expand_composites, but returns flow for TensorArrays.
Args:
sequence: A nested structure of Tensors, CompositeTensors, and
TensorArrays.
Returns:
A list of tensors.
"""
flat_sequence = nest.flatten(sequence, expand_composites=True)
return [
item.flow if isinstance(item, tensor_array_ops.TensorArray) else item
for item in flat_sequence]
# TODO(edloper): If TensorArray becomes a CompositeTensor, then delete this.
def pack_sequence_as(structure, flat_sequence):
"""Like `nest.pack_sequence_as` but also builds TensorArrays from flows.
Args:
structure: The structure to pack into. May contain Tensors,
CompositeTensors, or TensorArrays.
flat_sequence: An iterable containing tensors.
Returns:
A nested structure.
Raises:
AssertionError if `structure` and `flat_sequence` are not compatible.
"""
flat_sequence = list(flat_sequence)
flattened_structure = nest.flatten(structure, expand_composites=True)
if len(flattened_structure) != len(flat_sequence):
raise ValueError("Mismatch in element count")
for i in range(len(flat_sequence)):
if isinstance(flattened_structure[i], tensor_array_ops.TensorArray):
flat_sequence[i] = tensor_array_ops.build_ta_with_new_flow(
old_ta=flattened_structure[i], flow=flat_sequence[i])
return nest.pack_sequence_as(structure, flat_sequence, expand_composites=True)
def _create_substitute_placeholder(value, name=None, dtype=None):
"""Creates a placeholder for `value` and propagates shape info to it."""
# Note: setting ops.control_dependencies(None) ensures we always put
# capturing placeholders outside of any control flow context.
with ops.control_dependencies(None):
placeholder = graph_placeholder(
dtype=dtype or value.dtype, shape=value.shape, name=name)
custom_gradient.copy_handle_data(value, placeholder)
return placeholder
def _get_defun_inputs_from_args(args, names, flat_shapes=None):
"""Maps Python function positional args to graph-construction inputs."""
return _get_defun_inputs(
args, names, structure=args, flat_shapes=flat_shapes)
def _get_defun_inputs(args, names, structure, flat_shapes=None):
"""Maps python function args to graph-construction inputs.
Args:
args: A flat list of user-specified arguments.
names: A list of strings with user-specified argument names, same length as
`args`. May be `None`, in which case a generic name is used.
structure: The original argument list or dictionary.
flat_shapes: A flat list of values that are either `None` or
instances of `TensorShape`. If provided, then length must match
that of `nest.flatten(args, expand_composites=True)`; and locations where
`args` are instances of `Tensor` must have a corresponding `TensorShape`
in `flat_shapes`. May be `None`, in which case exact shapes are read
directly from the args.
Returns:
Placeholders with the same structure as `structure`.
Raises:
RuntimeError: if `flat_shapes` is provided, but
`len(flat_shapes) != len(nest.flatten(args, expand_composites=True))`.
RuntimeError: if a shape from `flat_shapes` is not None
for an argument that is not a `Tensor`, `TensorSpec`,
or `ResourceVariable`.
"""
func_graph = ops.get_default_graph()
function_inputs = []
if names is None:
names = [None] * len(args)
if flat_shapes is None:
shapes_iter = itertools.repeat(None)
else:
len_flat_args = len(nest.flatten(args, expand_composites=True))
if len_flat_args != len(flat_shapes):
raise RuntimeError(
"Length of fully flat shapes (%d) must match that of "
"flatten(args) (%d). args: %s, flat_shapes: %s"
% (len(flat_shapes),
len_flat_args,
args,
flat_shapes))
shapes_iter = iter(flat_shapes)
for arg_value, name in zip(args, names):
flattened = nest.flatten(arg_value, expand_composites=True)
tensor_specs = [
arg for arg in flattened if isinstance(arg, tensor_spec.TensorSpec)
]
specified_names = [arg.name for arg in tensor_specs if arg.name]
if specified_names and len(specified_names) < len(tensor_specs):
raise ValueError("If specifying TensorSpec names for nested structures, "
"either zero or all names have to be specified.")
for arg in flattened:
# We have a shape entry for each arg, regadless of whether it's a real
# Tensor or not. For non-tensor entries it should be None.
shape = next(shapes_iter)
if isinstance(arg, (ops.Tensor, tensor_spec.TensorSpec)):
if isinstance(arg, tensor_spec.TensorSpec) and arg.name:
requested_name = arg.name
else:
requested_name = name
placeholder_shape = shape if shape is not None else arg.shape
try:
placeholder = graph_placeholder(
arg.dtype, placeholder_shape,
name=requested_name)
except ValueError:
# Sometimes parameter names are not valid op names, so fall back to
# unnamed placeholders.
placeholder = graph_placeholder(arg.dtype, placeholder_shape)
if name is not None:
# Record the requested/user-specified name in case it's different than
# the uniquified name, for validation when exporting signatures.
placeholder.op._set_attr( # pylint: disable=protected-access
"_user_specified_name",
attr_value_pb2.AttrValue(s=compat.as_bytes(requested_name)))
function_inputs.append(placeholder)
elif isinstance(arg, resource_variable_ops.ResourceVariable):
# Capture arg variables to create placeholders for them. These will be
# removed as captures after the function is traced (since otherwise we'd
# just add it back with a new placeholder when the variable was
# referenced).
placeholder = func_graph.capture(arg.handle, name=name)
placeholder.op._set_attr( # pylint: disable=protected-access
"_user_specified_name",
attr_value_pb2.AttrValue(s=compat.as_bytes(name)))
function_inputs.append(arg)
else:
if shape is not None:
raise RuntimeError(
"Expected provided shape override to be None for arg that isn't "
"a Tensor, but saw arg: '%s', shape: '%s'. args: %s"
% (arg, shape, args))
function_inputs.append(arg)
return nest.pack_sequence_as(structure, function_inputs,
expand_composites=True)
def _get_defun_inputs_from_kwargs(kwargs, flat_shapes):
"""Maps Python function keyword args to graph-construction inputs."""
if kwargs:
names, args = zip(*sorted(kwargs.items()))
else:
names = []
args = []
return _get_defun_inputs(
args, names, structure=kwargs, flat_shapes=flat_shapes)
def dismantle_func_graph(func_graph):
"""Removes reference cycles in `func_graph` FuncGraph.
Helpful for making sure the garbage collector doesn't need to run when
the FuncGraph goes out of scope, e.g. in tests using defun with
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True).
Args:
func_graph: A `FuncGraph` object to destroy. `func_graph` is unusable
after this function.
"""
# TODO(b/115366440): Delete this method when a custom OrderedDict is added.
# Clearing captures using clear() leaves some cycles around.
while func_graph.captures:
func_graph.captures.popitem()
memory.dismantle_ordered_dict(func_graph.captures)
ops.dismantle_graph(func_graph)
| 42.720766
| 97
| 0.697704
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as py_collections
import itertools
import weakref
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.eager import tape
from tensorflow.python.eager.graph_only_ops import graph_placeholder
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework.auto_control_deps import AutomaticControlDependencies
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import compat
from tensorflow.python.util import memory
from tensorflow.python.util import nest
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_decorator
from tensorflow.python.util.lazy_loader import LazyLoader
function = LazyLoader("function", globals(),
"tensorflow.python.eager.function")
def_function = LazyLoader(
"def_function", globals(),
"tensorflow.python.eager.def_function")
WHITELIST_COLLECTIONS = [
ops.GraphKeys.GLOBAL_VARIABLES,
ops.GraphKeys.LOCAL_VARIABLES,
ops.GraphKeys.TRAINABLE_VARIABLES,
variable_scope._VARSTORE_KEY,
variable_scope._VARSCOPESTORE_KEY
]
class UnknownArgument(object):
pass
def convert_structure_to_signature(structure, arg_names=None):
structure = composite_tensor.replace_composites_with_components(structure)
def encode_arg(arg, path):
if isinstance(arg, ops.Tensor):
user_specified_name = None
try:
user_specified_name = compat.as_str(
arg.op.get_attr("_user_specified_name"))
except ValueError:
pass
if path and user_specified_name and user_specified_name != path[0]:
name = user_specified_name
else:
name = "/".join([str(p) for p in path])
return tensor_spec.TensorSpec(arg.shape, arg.dtype, name)
if isinstance(arg, (
int,
float,
bool,
type(None),
dtypes.DType,
tensor_spec.TensorSpec,
)):
return arg
return UnknownArgument()
flattened = nest.flatten_with_tuple_paths(structure, expand_composites=True)
if arg_names:
if len(arg_names) != len(structure):
raise ValueError(
"Passed in arg_names don't match actual signature (%s)." % arg_names)
# Replace all top-level names with their actual arg_names. If a path before
# was "(2,'a',1)", it will become "(arg_names[2],'a',1)".
flattened = [
((arg_names[path[0]],) + path[1:], arg) for path, arg in flattened
]
mapped = [encode_arg(arg, path) for path, arg in flattened]
return nest.pack_sequence_as(structure, mapped, expand_composites=True)
class FuncGraph(ops.Graph):
def __init__(self, name, collections=None, capture_by_value=None):
super(FuncGraph, self).__init__()
self.name = name
self.inputs = []
self.outputs = []
self.control_outputs = []
self.control_captures = set()
self.structured_input_signature = None
self.structured_outputs = None
self._weak_variables = []
self._watched_variables = weakref.WeakSet()
self.outer_graph = ops.get_default_graph()
self.captures = py_collections.OrderedDict()
# Inherit capture-by-value from outer graph.
if capture_by_value is not None:
self.capture_by_value = capture_by_value
elif self.outer_graph is not None and isinstance(
self.outer_graph, FuncGraph):
self.capture_by_value = self.outer_graph.capture_by_value
else:
self.capture_by_value = False
self._building_function = True
# Map from resource tensor name to last op (in program order) which uses
# this tensor. Used to enforce that execution order matches program order
# for resource tensors.
self._last_op_using_resource_tensor = {}
graph = self.outer_graph
if context.executing_eagerly():
self.seed = context.global_seed()
# [for tf-data user migration from TF1.0 to 2.0] seed_used keep track of
# any None op_seed for random_op in the function, in which case we end up
# using function seed, which could be unintended behavior for the op.
self._seed_used = False
else:
self.seed = graph.seed
self._seed_used = False
# TODO(allenl): Figure out if we can remove colocation stack
# specialization (currently used in cond_v2), here and in the cache key.
self._colocation_stack = graph._colocation_stack.copy() # pylint: disable=protected-access
if collections is None:
for collection_name in graph.get_all_collection_keys():
if collection_name not in WHITELIST_COLLECTIONS:
self._collections[collection_name] = graph.get_collection(
collection_name)
for collection_name in WHITELIST_COLLECTIONS:
self._collections[collection_name] = graph.get_collection_ref(
collection_name)
else:
self._collections = collections
def __str__(self):
return "FuncGraph(name=%s, id=%s)" % (self.name, id(self))
def watch_variable(self, v):
while self is not None and isinstance(self, FuncGraph):
self._watched_variables.add(v)
self = self.outer_graph
def control_dependencies(self, control_inputs):
if control_inputs is None:
return super(FuncGraph, self).control_dependencies(control_inputs)
filtered_control_inputs = []
for c in control_inputs:
# Check for _UnreadVariable
if (isinstance(c, ops.IndexedSlices) or
(hasattr(c, "_handle") and hasattr(c, "op"))):
c = c.op
graph_element = ops._as_graph_element(c) # pylint: disable=protected-access
if graph_element is None:
graph_element = c
if graph_element is not None and getattr(
graph_element, "graph", None) is not self:
self.control_captures.add(graph_element)
else:
filtered_control_inputs.append(graph_element)
return super(FuncGraph, self).control_dependencies(filtered_control_inputs)
def as_default(self):
outer_cm = super(FuncGraph, self).as_default()
@tf_contextlib.contextmanager
def inner_cm():
graph = ops.get_default_graph()
# pylint: disable=protected-access
# TODO(b/112906995, nareshmodi): distribution strategy depends on
# inheriting this stack from the default graph even in eager mode. Maybe
# it should be part of the eager context? This would also allow us to
# remove a get_default_graph() call from the function cache lookup.
old_strategy_stack = self._distribution_strategy_stack
self._distribution_strategy_stack = list(
graph._distribution_strategy_stack)
# We ignore device placements from any outer scopes while tracing the
# function when possible, to avoid hard-coding them in the function
# graph. "Default" placements come from the PartitionedCallOp's placement,
old_device_stack = self._device_function_stack
if context.executing_eagerly():
if self._distribution_strategy_stack:
self._add_device_to_stack(context.context().device_name)
else:
if (self._distribution_strategy_stack
or device_stack_has_callable(graph._device_function_stack)):
self._device_function_stack = graph._device_function_stack.copy()
old_creator_stack = self._variable_creator_stack
self._variable_creator_stack = graph._variable_creator_stack
old_graph_key = self._graph_key
self._graph_key = graph._graph_key
with outer_cm as g:
try:
yield g
finally:
self._distribution_strategy_stack = old_strategy_stack
self._device_function_stack = old_device_stack
self._variable_creator_stack = old_creator_stack
self._graph_key = old_graph_key
return inner_cm()
@property
def output_types(self):
return [t.dtype for t in self.outputs]
@property
def output_shapes(self):
return [t.shape for t in self.outputs]
@property
def variables(self):
for weak_v in self._weak_variables:
v = weak_v()
if v is None:
raise AssertionError(
"Called a function referencing variables which have been deleted. "
"This likely means that function-local variables were created and "
"not referenced elsewhere in the program. This is generally a "
"mistake; consider storing variables in an object attribute on "
"first call.")
yield v
@variables.setter
def variables(self, var_list):
self._weak_variables = [weakref.ref(v) for v in var_list]
def _capture_by_value(
self,
op_type,
inputs,
dtypes,
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_shapes=True,
compute_device=True):
reverse_captures = dict((v, k) for k, v in self.captures.items())
uncaptured_inputs = [reverse_captures.get(t, t) for t in inputs]
with ops.init_scope():
if context.executing_eagerly():
attr_list = ("dtype", int(attrs["dtype"].type))
value, = execute.execute(
compat.as_bytes(op_type), 1, uncaptured_inputs, attr_list,
context.context())
else:
op = ops.get_default_graph().create_op(
op_type, uncaptured_inputs, dtypes, input_types, name, attrs,
op_def, compute_shapes, compute_device)
value = op.outputs[0]
captured_value = self.capture(value)
return captured_value.op
def create_op(
self,
op_type,
inputs,
dtypes=None,
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_shapes=True,
compute_device=True):
if self.capture_by_value and op_type in ["ReadVariableOp",
"ResourceGather"]:
return self._capture_by_value(
op_type, inputs, dtypes, input_types, name, attrs, op_def,
compute_shapes, compute_device)
if op_type == "Enter" and inputs[0].op.type == "Enter":
if inputs[0].op.get_attr("frame_name") == attrs["frame_name"].s:
return inputs[0].op
ctxt = ops.get_default_graph()._control_flow_context
for i, inp in enumerate(inputs):
if ctxt is not None and hasattr(ctxt, "AddValue"):
inp = ctxt.AddValue(inp)
inp = self.capture(inp)
inputs[i] = inp
return super(FuncGraph, self).create_op(
op_type, inputs, dtypes, input_types, name, attrs, op_def,
compute_device=compute_device)
def capture(self, tensor, name=None):
if (getattr(tensor, "graph", None) is not self and
hasattr(self, "_forward_func_graph") and
isinstance(self._forward_func_graph, FuncGraph)):
tensor = self._forward_func_graph.capture(tensor)
if isinstance(tensor, ops.EagerTensor):
if name is None:
name = str(ops.uid())
return self._capture_helper(tensor, name)
if tensor.graph is not self:
if name is None:
name = tensor.op.name
inner_graph = tensor.graph
while inner_graph is not None and isinstance(inner_graph, FuncGraph):
if inner_graph is self:
raise ValueError(
"Trying to capture a tensor from an inner function. This can be "
"caused by accessing a tensor defined inside a loop or "
"conditional body, or a subfunction, from a calling function, "
"without going through the proper return value mechanism. "
"Consider using TensorFlow mechanisms such as TensorArrays "
"to return tensors from inner functions or loop / conditional "
"bodies. Tensor: %s; tensor graph: %s; this graph: %s"
% (tensor, tensor.graph, self))
inner_graph = inner_graph.outer_graph
return self._capture_helper(tensor, name)
return tensor
def _capture_helper(self, tensor, name):
captured_tensor = self.captures.get(tensor, None)
if captured_tensor is None:
captured_tensor = _create_substitute_placeholder(tensor, name=name,
dtype=tensor.dtype)
self.captures[tensor] = captured_tensor
self.inputs.append(captured_tensor)
tape.record_operation("captured_value", [captured_tensor], [tensor],
lambda x: [x])
return captured_tensor
@property
def external_captures(self):
return list(self.captures.keys())
@property
def internal_captures(self):
return list(self.captures.values())
def func_graph_from_py_func(name,
python_func,
args,
kwargs,
signature=None,
func_graph=None,
autograph=False,
autograph_options=None,
add_control_dependencies=True,
arg_names=None,
op_return_value=None,
collections=None,
capture_by_value=None,
override_flat_arg_shapes=None):
if op_return_value is not None:
assert isinstance(op_return_value, ops.Tensor), op_return_value
if func_graph is None:
func_graph = FuncGraph(name, collections=collections,
capture_by_value=capture_by_value)
assert isinstance(func_graph, FuncGraph)
if add_control_dependencies:
control_manager = AutomaticControlDependencies()
else:
control_manager = ops.NullContextmanager()
with func_graph.as_default(), control_manager as a:
current_scope = variable_scope.get_variable_scope()
default_use_recource = current_scope.use_resource
current_scope.set_use_resource(True)
if signature is not None and override_flat_arg_shapes is not None:
raise ValueError(
"Passed both signature and override_flat_arg_shapes: %s and %s."
% (signature, override_flat_arg_shapes))
if signature is not None:
args = signature
kwargs = {}
if override_flat_arg_shapes is not None:
flat_args = nest.flatten(args, expand_composites=True)
arg_shapes = override_flat_arg_shapes[:len(flat_args)]
kwarg_shapes = override_flat_arg_shapes[len(flat_args):]
else:
arg_shapes = None
kwarg_shapes = None
func_args = _get_defun_inputs_from_args(
args, arg_names, flat_shapes=arg_shapes)
func_kwargs = _get_defun_inputs_from_kwargs(
kwargs, flat_shapes=kwarg_shapes)
# TensorSpecs by their `arg_names` for later binding.
func_graph.structured_input_signature = (
convert_structure_to_signature(func_args, arg_names),
convert_structure_to_signature(func_kwargs))
flat_func_args = nest.flatten(func_args, expand_composites=True)
flat_func_kwargs = nest.flatten(func_kwargs, expand_composites=True)
# Temporarily set inputs to allow graph building code to inspect
# them. Reassigned below.
func_graph.inputs = [arg for arg in flat_func_args + flat_func_kwargs
if isinstance(arg, ops.Tensor)]
# Note: `nest.flatten` sorts by keys, as does `_deterministic_dict_values`.
# Variables to help check whether mutation happens in calling the function
# Copy the recursive list, tuple and map structure, but not base objects
func_args_before = nest.pack_sequence_as(func_args, flat_func_args,
expand_composites=True)
func_kwargs_before = nest.pack_sequence_as(
func_kwargs, flat_func_kwargs, expand_composites=True)
def convert(x):
if x is None:
return None
if op_return_value is not None and isinstance(x, ops.Operation):
# TODO(b/79881896): we currently can't capture external control deps, so
# captured Operations).
with ops.control_dependencies([x]):
x = array_ops.identity(op_return_value)
elif not isinstance(x, tensor_array_ops.TensorArray):
try:
x = ops.convert_to_tensor_or_composite(x)
except (ValueError, TypeError):
raise TypeError(
"To be compatible with tf.contrib.eager.defun, Python functions "
"must return zero or more Tensors; in compilation of %s, found "
"return value of type %s, which is not a Tensor." %
(str(python_func), type(x)))
if add_control_dependencies:
x = a.mark_as_return(x)
return x
try:
if autograph:
from tensorflow.python import autograph # pylint: disable=g-import-not-at-top
_, original_func = tf_decorator.unwrap(python_func)
def wrapper(*args, **kwargs):
# Note: functions annotated with @tf.function should always be
# converted even though they would meet autograph's whitelisting
return autograph.converted_call(
original_func, None,
autograph.ConversionOptions(
recursive=True,
optional_features=autograph_options,
force_conversion=True,
), args, kwargs)
converted_func = tf_decorator.make_decorator(original_func, wrapper)
python_func = tf_decorator.rewrap(python_func, original_func,
converted_func)
func_outputs = python_func(*func_args, **func_kwargs)
func_outputs = nest.map_structure(convert, func_outputs,
expand_composites=True)
check_mutation(func_args_before, func_args)
check_mutation(func_kwargs_before, func_kwargs)
finally:
current_scope.set_use_resource(default_use_recource)
graph_variables = list(func_graph._watched_variables)
arg_variables = set()
inputs = []
for arg in (nest.flatten(func_args, expand_composites=True) +
nest.flatten(func_kwargs, expand_composites=True)):
if isinstance(arg, resource_variable_ops.ResourceVariable):
# already manually captured the resource Tensor when creating argument
# placeholders.
resource_placeholder = func_graph.captures.pop(arg.handle, None)
if resource_placeholder is None:
continue
arg_variables.add(arg)
inputs.append(resource_placeholder)
elif isinstance(arg, ops.Tensor):
inputs.append(arg)
variables = [v for v in graph_variables if v not in arg_variables]
func_graph.inputs = inputs + list(func_graph.captures.values())
func_graph.structured_outputs = func_outputs
# Returning a closed-over tensor does not trigger convert_to_tensor.
func_graph.outputs.extend(
func_graph.capture(x)
for x in flatten(func_graph.structured_outputs)
if x is not None)
func_graph.variables = variables
if add_control_dependencies:
func_graph.control_outputs.extend(control_manager.ops_which_must_run)
# Register any other functions defined in the graph.
with ops.init_scope():
if context.executing_eagerly():
for f in func_graph._functions.values(): # pylint: disable=protected-access
# TODO(ashankar): What about the gradient registry?
context.add_function(f._c_func.func) # pylint: disable=protected-access
return func_graph
def maybe_captured(tensor):
if (not isinstance(tensor, ops.EagerTensor) and
tensor.op.graph.building_function and tensor.op.type == "Placeholder"):
for input_t, placeholder_t in tensor.op.graph.captures.items():
if tensor == placeholder_t:
return maybe_captured(input_t)
# pylint: enable=protected-access
return tensor
def device_stack_has_callable(device_stack):
return any(callable(spec._device_name_or_function) # pylint: disable=protected-access
for spec in device_stack.peek_objs())
def check_mutation(n1, n2):
errmsg = ("Function to be traced should not modify structure of input "
"arguments. Check if your function has list and dictionary "
"operations that alter input arguments, "
"such as `list.pop`, `list.append`")
try:
nest.assert_same_structure(n1, n2, expand_composites=True)
except ValueError:
raise ValueError(errmsg)
for arg1, arg2 in zip(nest.flatten(n1, expand_composites=True),
nest.flatten(n2, expand_composites=True)):
if arg1 is not arg2:
raise ValueError(errmsg)
# TODO(edloper): If TensorArray becomes a CompositeTensor, then delete this.
def flatten(sequence):
flat_sequence = nest.flatten(sequence, expand_composites=True)
return [
item.flow if isinstance(item, tensor_array_ops.TensorArray) else item
for item in flat_sequence]
# TODO(edloper): If TensorArray becomes a CompositeTensor, then delete this.
def pack_sequence_as(structure, flat_sequence):
flat_sequence = list(flat_sequence)
flattened_structure = nest.flatten(structure, expand_composites=True)
if len(flattened_structure) != len(flat_sequence):
raise ValueError("Mismatch in element count")
for i in range(len(flat_sequence)):
if isinstance(flattened_structure[i], tensor_array_ops.TensorArray):
flat_sequence[i] = tensor_array_ops.build_ta_with_new_flow(
old_ta=flattened_structure[i], flow=flat_sequence[i])
return nest.pack_sequence_as(structure, flat_sequence, expand_composites=True)
def _create_substitute_placeholder(value, name=None, dtype=None):
# Note: setting ops.control_dependencies(None) ensures we always put
# capturing placeholders outside of any control flow context.
with ops.control_dependencies(None):
placeholder = graph_placeholder(
dtype=dtype or value.dtype, shape=value.shape, name=name)
custom_gradient.copy_handle_data(value, placeholder)
return placeholder
def _get_defun_inputs_from_args(args, names, flat_shapes=None):
return _get_defun_inputs(
args, names, structure=args, flat_shapes=flat_shapes)
def _get_defun_inputs(args, names, structure, flat_shapes=None):
func_graph = ops.get_default_graph()
function_inputs = []
if names is None:
names = [None] * len(args)
if flat_shapes is None:
shapes_iter = itertools.repeat(None)
else:
len_flat_args = len(nest.flatten(args, expand_composites=True))
if len_flat_args != len(flat_shapes):
raise RuntimeError(
"Length of fully flat shapes (%d) must match that of "
"flatten(args) (%d). args: %s, flat_shapes: %s"
% (len(flat_shapes),
len_flat_args,
args,
flat_shapes))
shapes_iter = iter(flat_shapes)
for arg_value, name in zip(args, names):
flattened = nest.flatten(arg_value, expand_composites=True)
tensor_specs = [
arg for arg in flattened if isinstance(arg, tensor_spec.TensorSpec)
]
specified_names = [arg.name for arg in tensor_specs if arg.name]
if specified_names and len(specified_names) < len(tensor_specs):
raise ValueError("If specifying TensorSpec names for nested structures, "
"either zero or all names have to be specified.")
for arg in flattened:
# We have a shape entry for each arg, regadless of whether it's a real
shape = next(shapes_iter)
if isinstance(arg, (ops.Tensor, tensor_spec.TensorSpec)):
if isinstance(arg, tensor_spec.TensorSpec) and arg.name:
requested_name = arg.name
else:
requested_name = name
placeholder_shape = shape if shape is not None else arg.shape
try:
placeholder = graph_placeholder(
arg.dtype, placeholder_shape,
name=requested_name)
except ValueError:
placeholder = graph_placeholder(arg.dtype, placeholder_shape)
if name is not None:
# the uniquified name, for validation when exporting signatures.
placeholder.op._set_attr( # pylint: disable=protected-access
"_user_specified_name",
attr_value_pb2.AttrValue(s=compat.as_bytes(requested_name)))
function_inputs.append(placeholder)
elif isinstance(arg, resource_variable_ops.ResourceVariable):
# Capture arg variables to create placeholders for them. These will be
# removed as captures after the function is traced (since otherwise we'd
placeholder = func_graph.capture(arg.handle, name=name)
placeholder.op._set_attr(
"_user_specified_name",
attr_value_pb2.AttrValue(s=compat.as_bytes(name)))
function_inputs.append(arg)
else:
if shape is not None:
raise RuntimeError(
"Expected provided shape override to be None for arg that isn't "
"a Tensor, but saw arg: '%s', shape: '%s'. args: %s"
% (arg, shape, args))
function_inputs.append(arg)
return nest.pack_sequence_as(structure, function_inputs,
expand_composites=True)
def _get_defun_inputs_from_kwargs(kwargs, flat_shapes):
if kwargs:
names, args = zip(*sorted(kwargs.items()))
else:
names = []
args = []
return _get_defun_inputs(
args, names, structure=kwargs, flat_shapes=flat_shapes)
def dismantle_func_graph(func_graph):
# TODO(b/115366440): Delete this method when a custom OrderedDict is added.
# Clearing captures using clear() leaves some cycles around.
while func_graph.captures:
func_graph.captures.popitem()
memory.dismantle_ordered_dict(func_graph.captures)
ops.dismantle_graph(func_graph)
| true
| true
|
1c478522810cfe82e7a178b902b41a16a8504685
| 14,006
|
py
|
Python
|
sdk/python/pulumi_azure_native/azurestackhci/v20210101preview/get_cluster.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/azurestackhci/v20210101preview/get_cluster.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/azurestackhci/v20210101preview/get_cluster.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetClusterResult',
'AwaitableGetClusterResult',
'get_cluster',
]
@pulumi.output_type
class GetClusterResult:
"""
Cluster details.
"""
def __init__(__self__, aad_client_id=None, aad_tenant_id=None, billing_model=None, cloud_id=None, cloud_management_endpoint=None, created_at=None, created_by=None, created_by_type=None, id=None, last_billing_timestamp=None, last_modified_at=None, last_modified_by=None, last_modified_by_type=None, last_sync_timestamp=None, location=None, name=None, provisioning_state=None, registration_timestamp=None, reported_properties=None, status=None, tags=None, trial_days_remaining=None, type=None):
if aad_client_id and not isinstance(aad_client_id, str):
raise TypeError("Expected argument 'aad_client_id' to be a str")
pulumi.set(__self__, "aad_client_id", aad_client_id)
if aad_tenant_id and not isinstance(aad_tenant_id, str):
raise TypeError("Expected argument 'aad_tenant_id' to be a str")
pulumi.set(__self__, "aad_tenant_id", aad_tenant_id)
if billing_model and not isinstance(billing_model, str):
raise TypeError("Expected argument 'billing_model' to be a str")
pulumi.set(__self__, "billing_model", billing_model)
if cloud_id and not isinstance(cloud_id, str):
raise TypeError("Expected argument 'cloud_id' to be a str")
pulumi.set(__self__, "cloud_id", cloud_id)
if cloud_management_endpoint and not isinstance(cloud_management_endpoint, str):
raise TypeError("Expected argument 'cloud_management_endpoint' to be a str")
pulumi.set(__self__, "cloud_management_endpoint", cloud_management_endpoint)
if created_at and not isinstance(created_at, str):
raise TypeError("Expected argument 'created_at' to be a str")
pulumi.set(__self__, "created_at", created_at)
if created_by and not isinstance(created_by, str):
raise TypeError("Expected argument 'created_by' to be a str")
pulumi.set(__self__, "created_by", created_by)
if created_by_type and not isinstance(created_by_type, str):
raise TypeError("Expected argument 'created_by_type' to be a str")
pulumi.set(__self__, "created_by_type", created_by_type)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if last_billing_timestamp and not isinstance(last_billing_timestamp, str):
raise TypeError("Expected argument 'last_billing_timestamp' to be a str")
pulumi.set(__self__, "last_billing_timestamp", last_billing_timestamp)
if last_modified_at and not isinstance(last_modified_at, str):
raise TypeError("Expected argument 'last_modified_at' to be a str")
pulumi.set(__self__, "last_modified_at", last_modified_at)
if last_modified_by and not isinstance(last_modified_by, str):
raise TypeError("Expected argument 'last_modified_by' to be a str")
pulumi.set(__self__, "last_modified_by", last_modified_by)
if last_modified_by_type and not isinstance(last_modified_by_type, str):
raise TypeError("Expected argument 'last_modified_by_type' to be a str")
pulumi.set(__self__, "last_modified_by_type", last_modified_by_type)
if last_sync_timestamp and not isinstance(last_sync_timestamp, str):
raise TypeError("Expected argument 'last_sync_timestamp' to be a str")
pulumi.set(__self__, "last_sync_timestamp", last_sync_timestamp)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if registration_timestamp and not isinstance(registration_timestamp, str):
raise TypeError("Expected argument 'registration_timestamp' to be a str")
pulumi.set(__self__, "registration_timestamp", registration_timestamp)
if reported_properties and not isinstance(reported_properties, dict):
raise TypeError("Expected argument 'reported_properties' to be a dict")
pulumi.set(__self__, "reported_properties", reported_properties)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if trial_days_remaining and not isinstance(trial_days_remaining, float):
raise TypeError("Expected argument 'trial_days_remaining' to be a float")
pulumi.set(__self__, "trial_days_remaining", trial_days_remaining)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="aadClientId")
def aad_client_id(self) -> str:
"""
App id of cluster AAD identity.
"""
return pulumi.get(self, "aad_client_id")
@property
@pulumi.getter(name="aadTenantId")
def aad_tenant_id(self) -> str:
"""
Tenant id of cluster AAD identity.
"""
return pulumi.get(self, "aad_tenant_id")
@property
@pulumi.getter(name="billingModel")
def billing_model(self) -> str:
"""
Type of billing applied to the resource.
"""
return pulumi.get(self, "billing_model")
@property
@pulumi.getter(name="cloudId")
def cloud_id(self) -> str:
"""
Unique, immutable resource id.
"""
return pulumi.get(self, "cloud_id")
@property
@pulumi.getter(name="cloudManagementEndpoint")
def cloud_management_endpoint(self) -> Optional[str]:
"""
Endpoint configured for management from the Azure portal
"""
return pulumi.get(self, "cloud_management_endpoint")
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> Optional[str]:
"""
The timestamp of resource creation (UTC).
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="createdBy")
def created_by(self) -> Optional[str]:
"""
The identity that created the resource.
"""
return pulumi.get(self, "created_by")
@property
@pulumi.getter(name="createdByType")
def created_by_type(self) -> Optional[str]:
"""
The type of identity that created the resource.
"""
return pulumi.get(self, "created_by_type")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastBillingTimestamp")
def last_billing_timestamp(self) -> str:
"""
Most recent billing meter timestamp.
"""
return pulumi.get(self, "last_billing_timestamp")
@property
@pulumi.getter(name="lastModifiedAt")
def last_modified_at(self) -> Optional[str]:
"""
The timestamp of resource last modification (UTC)
"""
return pulumi.get(self, "last_modified_at")
@property
@pulumi.getter(name="lastModifiedBy")
def last_modified_by(self) -> Optional[str]:
"""
The identity that last modified the resource.
"""
return pulumi.get(self, "last_modified_by")
@property
@pulumi.getter(name="lastModifiedByType")
def last_modified_by_type(self) -> Optional[str]:
"""
The type of identity that last modified the resource.
"""
return pulumi.get(self, "last_modified_by_type")
@property
@pulumi.getter(name="lastSyncTimestamp")
def last_sync_timestamp(self) -> str:
"""
Most recent cluster sync timestamp.
"""
return pulumi.get(self, "last_sync_timestamp")
@property
@pulumi.getter
def location(self) -> str:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Provisioning state.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="registrationTimestamp")
def registration_timestamp(self) -> str:
"""
First cluster sync timestamp.
"""
return pulumi.get(self, "registration_timestamp")
@property
@pulumi.getter(name="reportedProperties")
def reported_properties(self) -> 'outputs.ClusterReportedPropertiesResponse':
"""
Properties reported by cluster agent.
"""
return pulumi.get(self, "reported_properties")
@property
@pulumi.getter
def status(self) -> str:
"""
Status of the cluster agent.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="trialDaysRemaining")
def trial_days_remaining(self) -> float:
"""
Number of days remaining in the trial period.
"""
return pulumi.get(self, "trial_days_remaining")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetClusterResult(GetClusterResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetClusterResult(
aad_client_id=self.aad_client_id,
aad_tenant_id=self.aad_tenant_id,
billing_model=self.billing_model,
cloud_id=self.cloud_id,
cloud_management_endpoint=self.cloud_management_endpoint,
created_at=self.created_at,
created_by=self.created_by,
created_by_type=self.created_by_type,
id=self.id,
last_billing_timestamp=self.last_billing_timestamp,
last_modified_at=self.last_modified_at,
last_modified_by=self.last_modified_by,
last_modified_by_type=self.last_modified_by_type,
last_sync_timestamp=self.last_sync_timestamp,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
registration_timestamp=self.registration_timestamp,
reported_properties=self.reported_properties,
status=self.status,
tags=self.tags,
trial_days_remaining=self.trial_days_remaining,
type=self.type)
def get_cluster(cluster_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetClusterResult:
"""
Cluster details.
:param str cluster_name: The name of the cluster.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['clusterName'] = cluster_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:azurestackhci/v20210101preview:getCluster', __args__, opts=opts, typ=GetClusterResult).value
return AwaitableGetClusterResult(
aad_client_id=__ret__.aad_client_id,
aad_tenant_id=__ret__.aad_tenant_id,
billing_model=__ret__.billing_model,
cloud_id=__ret__.cloud_id,
cloud_management_endpoint=__ret__.cloud_management_endpoint,
created_at=__ret__.created_at,
created_by=__ret__.created_by,
created_by_type=__ret__.created_by_type,
id=__ret__.id,
last_billing_timestamp=__ret__.last_billing_timestamp,
last_modified_at=__ret__.last_modified_at,
last_modified_by=__ret__.last_modified_by,
last_modified_by_type=__ret__.last_modified_by_type,
last_sync_timestamp=__ret__.last_sync_timestamp,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
registration_timestamp=__ret__.registration_timestamp,
reported_properties=__ret__.reported_properties,
status=__ret__.status,
tags=__ret__.tags,
trial_days_remaining=__ret__.trial_days_remaining,
type=__ret__.type)
| 39.677054
| 496
| 0.667214
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetClusterResult',
'AwaitableGetClusterResult',
'get_cluster',
]
@pulumi.output_type
class GetClusterResult:
def __init__(__self__, aad_client_id=None, aad_tenant_id=None, billing_model=None, cloud_id=None, cloud_management_endpoint=None, created_at=None, created_by=None, created_by_type=None, id=None, last_billing_timestamp=None, last_modified_at=None, last_modified_by=None, last_modified_by_type=None, last_sync_timestamp=None, location=None, name=None, provisioning_state=None, registration_timestamp=None, reported_properties=None, status=None, tags=None, trial_days_remaining=None, type=None):
if aad_client_id and not isinstance(aad_client_id, str):
raise TypeError("Expected argument 'aad_client_id' to be a str")
pulumi.set(__self__, "aad_client_id", aad_client_id)
if aad_tenant_id and not isinstance(aad_tenant_id, str):
raise TypeError("Expected argument 'aad_tenant_id' to be a str")
pulumi.set(__self__, "aad_tenant_id", aad_tenant_id)
if billing_model and not isinstance(billing_model, str):
raise TypeError("Expected argument 'billing_model' to be a str")
pulumi.set(__self__, "billing_model", billing_model)
if cloud_id and not isinstance(cloud_id, str):
raise TypeError("Expected argument 'cloud_id' to be a str")
pulumi.set(__self__, "cloud_id", cloud_id)
if cloud_management_endpoint and not isinstance(cloud_management_endpoint, str):
raise TypeError("Expected argument 'cloud_management_endpoint' to be a str")
pulumi.set(__self__, "cloud_management_endpoint", cloud_management_endpoint)
if created_at and not isinstance(created_at, str):
raise TypeError("Expected argument 'created_at' to be a str")
pulumi.set(__self__, "created_at", created_at)
if created_by and not isinstance(created_by, str):
raise TypeError("Expected argument 'created_by' to be a str")
pulumi.set(__self__, "created_by", created_by)
if created_by_type and not isinstance(created_by_type, str):
raise TypeError("Expected argument 'created_by_type' to be a str")
pulumi.set(__self__, "created_by_type", created_by_type)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if last_billing_timestamp and not isinstance(last_billing_timestamp, str):
raise TypeError("Expected argument 'last_billing_timestamp' to be a str")
pulumi.set(__self__, "last_billing_timestamp", last_billing_timestamp)
if last_modified_at and not isinstance(last_modified_at, str):
raise TypeError("Expected argument 'last_modified_at' to be a str")
pulumi.set(__self__, "last_modified_at", last_modified_at)
if last_modified_by and not isinstance(last_modified_by, str):
raise TypeError("Expected argument 'last_modified_by' to be a str")
pulumi.set(__self__, "last_modified_by", last_modified_by)
if last_modified_by_type and not isinstance(last_modified_by_type, str):
raise TypeError("Expected argument 'last_modified_by_type' to be a str")
pulumi.set(__self__, "last_modified_by_type", last_modified_by_type)
if last_sync_timestamp and not isinstance(last_sync_timestamp, str):
raise TypeError("Expected argument 'last_sync_timestamp' to be a str")
pulumi.set(__self__, "last_sync_timestamp", last_sync_timestamp)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if registration_timestamp and not isinstance(registration_timestamp, str):
raise TypeError("Expected argument 'registration_timestamp' to be a str")
pulumi.set(__self__, "registration_timestamp", registration_timestamp)
if reported_properties and not isinstance(reported_properties, dict):
raise TypeError("Expected argument 'reported_properties' to be a dict")
pulumi.set(__self__, "reported_properties", reported_properties)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if trial_days_remaining and not isinstance(trial_days_remaining, float):
raise TypeError("Expected argument 'trial_days_remaining' to be a float")
pulumi.set(__self__, "trial_days_remaining", trial_days_remaining)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="aadClientId")
def aad_client_id(self) -> str:
return pulumi.get(self, "aad_client_id")
@property
@pulumi.getter(name="aadTenantId")
def aad_tenant_id(self) -> str:
return pulumi.get(self, "aad_tenant_id")
@property
@pulumi.getter(name="billingModel")
def billing_model(self) -> str:
return pulumi.get(self, "billing_model")
@property
@pulumi.getter(name="cloudId")
def cloud_id(self) -> str:
return pulumi.get(self, "cloud_id")
@property
@pulumi.getter(name="cloudManagementEndpoint")
def cloud_management_endpoint(self) -> Optional[str]:
return pulumi.get(self, "cloud_management_endpoint")
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> Optional[str]:
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="createdBy")
def created_by(self) -> Optional[str]:
return pulumi.get(self, "created_by")
@property
@pulumi.getter(name="createdByType")
def created_by_type(self) -> Optional[str]:
return pulumi.get(self, "created_by_type")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastBillingTimestamp")
def last_billing_timestamp(self) -> str:
return pulumi.get(self, "last_billing_timestamp")
@property
@pulumi.getter(name="lastModifiedAt")
def last_modified_at(self) -> Optional[str]:
return pulumi.get(self, "last_modified_at")
@property
@pulumi.getter(name="lastModifiedBy")
def last_modified_by(self) -> Optional[str]:
return pulumi.get(self, "last_modified_by")
@property
@pulumi.getter(name="lastModifiedByType")
def last_modified_by_type(self) -> Optional[str]:
return pulumi.get(self, "last_modified_by_type")
@property
@pulumi.getter(name="lastSyncTimestamp")
def last_sync_timestamp(self) -> str:
return pulumi.get(self, "last_sync_timestamp")
@property
@pulumi.getter
def location(self) -> str:
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="registrationTimestamp")
def registration_timestamp(self) -> str:
return pulumi.get(self, "registration_timestamp")
@property
@pulumi.getter(name="reportedProperties")
def reported_properties(self) -> 'outputs.ClusterReportedPropertiesResponse':
return pulumi.get(self, "reported_properties")
@property
@pulumi.getter
def status(self) -> str:
return pulumi.get(self, "status")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="trialDaysRemaining")
def trial_days_remaining(self) -> float:
return pulumi.get(self, "trial_days_remaining")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
class AwaitableGetClusterResult(GetClusterResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetClusterResult(
aad_client_id=self.aad_client_id,
aad_tenant_id=self.aad_tenant_id,
billing_model=self.billing_model,
cloud_id=self.cloud_id,
cloud_management_endpoint=self.cloud_management_endpoint,
created_at=self.created_at,
created_by=self.created_by,
created_by_type=self.created_by_type,
id=self.id,
last_billing_timestamp=self.last_billing_timestamp,
last_modified_at=self.last_modified_at,
last_modified_by=self.last_modified_by,
last_modified_by_type=self.last_modified_by_type,
last_sync_timestamp=self.last_sync_timestamp,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
registration_timestamp=self.registration_timestamp,
reported_properties=self.reported_properties,
status=self.status,
tags=self.tags,
trial_days_remaining=self.trial_days_remaining,
type=self.type)
def get_cluster(cluster_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetClusterResult:
__args__ = dict()
__args__['clusterName'] = cluster_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:azurestackhci/v20210101preview:getCluster', __args__, opts=opts, typ=GetClusterResult).value
return AwaitableGetClusterResult(
aad_client_id=__ret__.aad_client_id,
aad_tenant_id=__ret__.aad_tenant_id,
billing_model=__ret__.billing_model,
cloud_id=__ret__.cloud_id,
cloud_management_endpoint=__ret__.cloud_management_endpoint,
created_at=__ret__.created_at,
created_by=__ret__.created_by,
created_by_type=__ret__.created_by_type,
id=__ret__.id,
last_billing_timestamp=__ret__.last_billing_timestamp,
last_modified_at=__ret__.last_modified_at,
last_modified_by=__ret__.last_modified_by,
last_modified_by_type=__ret__.last_modified_by_type,
last_sync_timestamp=__ret__.last_sync_timestamp,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
registration_timestamp=__ret__.registration_timestamp,
reported_properties=__ret__.reported_properties,
status=__ret__.status,
tags=__ret__.tags,
trial_days_remaining=__ret__.trial_days_remaining,
type=__ret__.type)
| true
| true
|
1c478544308d1c24ccd1470dc7b2c5e5197b8d45
| 1,181
|
py
|
Python
|
src/main.py
|
ITAnalyst-JU/process-logger
|
a51d4604b2dc3047dec9adfec96334ff20a3782f
|
[
"MIT"
] | null | null | null |
src/main.py
|
ITAnalyst-JU/process-logger
|
a51d4604b2dc3047dec9adfec96334ff20a3782f
|
[
"MIT"
] | null | null | null |
src/main.py
|
ITAnalyst-JU/process-logger
|
a51d4604b2dc3047dec9adfec96334ff20a3782f
|
[
"MIT"
] | null | null | null |
#!/bin/python
import time
import argparse
from Invoker import Invoker
def parse_cmd_name(parts):
# TODO maybe for eg. 'make testall' this shoule return 'make testall' and not 'make'?
assert len(parts) >= 1
assert len(parts[0]) >= 1
x = parts[0].split('/')[-1]
assert ' ' not in x
return x
def get_time_str():
t = time.localtime()
return f'{t.tm_year}.{t.tm_mon}.{t.tm_mday} {t.tm_hour}:{t.tm_min}:{t.tm_sec}'
# TODO better process for deciding filename and title
def main():
parser = argparse.ArgumentParser(description="Monitor command's output in real time.")
parser.add_argument('cmd', type=str, nargs='+',
help='command invocation to be monitored')
parser.add_argument('-o', '--output', metavar='filename', dest='log_file_location', default=None, type=str,
help='write the output to a given filename')
args = parser.parse_args()
title = parse_cmd_name(args.cmd) + ' ' + get_time_str()
if args.log_file_location is None:
args.log_file_location = title + '.html'
Invoker(args.cmd, args.log_file_location, title)
if __name__ == '__main__':
main()
| 28.804878
| 111
| 0.647756
|
import time
import argparse
from Invoker import Invoker
def parse_cmd_name(parts):
assert len(parts) >= 1
assert len(parts[0]) >= 1
x = parts[0].split('/')[-1]
assert ' ' not in x
return x
def get_time_str():
t = time.localtime()
return f'{t.tm_year}.{t.tm_mon}.{t.tm_mday} {t.tm_hour}:{t.tm_min}:{t.tm_sec}'
def main():
parser = argparse.ArgumentParser(description="Monitor command's output in real time.")
parser.add_argument('cmd', type=str, nargs='+',
help='command invocation to be monitored')
parser.add_argument('-o', '--output', metavar='filename', dest='log_file_location', default=None, type=str,
help='write the output to a given filename')
args = parser.parse_args()
title = parse_cmd_name(args.cmd) + ' ' + get_time_str()
if args.log_file_location is None:
args.log_file_location = title + '.html'
Invoker(args.cmd, args.log_file_location, title)
if __name__ == '__main__':
main()
| true
| true
|
1c478548a8539ffc957d7a9e7b5a3ba080deb1de
| 1,052
|
py
|
Python
|
manabe/public/management/commands/fake_server.py
|
luoyedao/manabe
|
90c158bd23e956308263b542634adc97f6526276
|
[
"Apache-2.0"
] | 16
|
2018-08-12T08:28:00.000Z
|
2022-03-15T02:13:42.000Z
|
manabe/public/management/commands/fake_server.py
|
luoyedao/manabe
|
90c158bd23e956308263b542634adc97f6526276
|
[
"Apache-2.0"
] | 14
|
2020-02-11T23:27:29.000Z
|
2022-02-11T03:43:26.000Z
|
manabe/public/management/commands/fake_server.py
|
luoyedao/manabe
|
90c158bd23e956308263b542634adc97f6526276
|
[
"Apache-2.0"
] | 25
|
2018-08-26T07:38:46.000Z
|
2022-03-15T02:13:45.000Z
|
from random import choice
from django.contrib.auth.models import User
from appinput.models import App
from envx.models import Env
from serverinput.models import Server
def fake_server_data():
Server.objects.all().delete()
print('delete all server data')
user_set = User.objects.all()
app_set = App.objects.all()
env_set = Env.objects.all()
for i in range(100):
ip_address = salt_name = "192.168.0.{}".format(i)
for j in [80, 443, 8080, 8888]:
port = j
name = "192.168.0.{}_{}".format(i, port)
app_user = choice(['root', 'tomcat', 'javauser'])
op_user = choice(user_set)
app_item = choice(app_set)
env_item = choice(env_set)
Server.objects.create(name=name, ip_address=ip_address, port=port,
salt_name=salt_name, env_name=env_item,
app_name=app_item, op_user=op_user,
app_user=app_user)
print('create all server data')
| 35.066667
| 78
| 0.586502
|
from random import choice
from django.contrib.auth.models import User
from appinput.models import App
from envx.models import Env
from serverinput.models import Server
def fake_server_data():
Server.objects.all().delete()
print('delete all server data')
user_set = User.objects.all()
app_set = App.objects.all()
env_set = Env.objects.all()
for i in range(100):
ip_address = salt_name = "192.168.0.{}".format(i)
for j in [80, 443, 8080, 8888]:
port = j
name = "192.168.0.{}_{}".format(i, port)
app_user = choice(['root', 'tomcat', 'javauser'])
op_user = choice(user_set)
app_item = choice(app_set)
env_item = choice(env_set)
Server.objects.create(name=name, ip_address=ip_address, port=port,
salt_name=salt_name, env_name=env_item,
app_name=app_item, op_user=op_user,
app_user=app_user)
print('create all server data')
| true
| true
|
1c47869bfa0f88eba2e94f57df3c36bcb2331ede
| 404
|
py
|
Python
|
server/src/prefect_server/utilities/__init__.py
|
louisditzel/prefect
|
b1a02fee623b965e756a38aa09059db780ab67eb
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-05-10T14:32:32.000Z
|
2020-05-10T14:32:32.000Z
|
server/src/prefect_server/utilities/__init__.py
|
louisditzel/prefect
|
b1a02fee623b965e756a38aa09059db780ab67eb
|
[
"ECL-2.0",
"Apache-2.0"
] | 3
|
2022-02-14T11:25:57.000Z
|
2022-02-27T16:25:14.000Z
|
server/src/prefect_server/utilities/__init__.py
|
louisditzel/prefect
|
b1a02fee623b965e756a38aa09059db780ab67eb
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-05-31T04:42:56.000Z
|
2020-05-31T04:42:56.000Z
|
# Licensed under the Prefect Community License, available at
# https://www.prefect.io/legal/prefect-community-license
import prefect_server.utilities.context
import prefect_server.utilities.exceptions
import prefect_server.utilities.graphql
import prefect_server.utilities.logging
import prefect_server.utilities.names
import prefect_server.utilities.tests
import prefect_server.utilities.asynchronous
| 33.666667
| 60
| 0.868812
|
import prefect_server.utilities.context
import prefect_server.utilities.exceptions
import prefect_server.utilities.graphql
import prefect_server.utilities.logging
import prefect_server.utilities.names
import prefect_server.utilities.tests
import prefect_server.utilities.asynchronous
| true
| true
|
1c47883aeba99de2cb069da42b1663aff45d1bfb
| 11,011
|
py
|
Python
|
data_kits/nf_kits.py
|
Jarvis73/DINs
|
fe967115182a47b9ad1018658cd1be745831e7aa
|
[
"MIT"
] | null | null | null |
data_kits/nf_kits.py
|
Jarvis73/DINs
|
fe967115182a47b9ad1018658cd1be745831e7aa
|
[
"MIT"
] | null | null | null |
data_kits/nf_kits.py
|
Jarvis73/DINs
|
fe967115182a47b9ad1018658cd1be745831e7aa
|
[
"MIT"
] | null | null | null |
# Copyright 2019-2020 Jianwei Zhang All Right Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =================================================================================
import pickle
import zlib
from pathlib import Path
import nibabel as nib
import numpy as np
import pandas as pd
import scipy.ndimage as ndi
import tqdm
ROOT = Path(__file__).parent.parent.parent
DATA_ROOT = ROOT / "data/NF"
def read_nii(file_name, out_dtype=np.int16, special=False, only_header=False):
nib_vol = nib.load(str(file_name))
vh = nib_vol.header
if only_header:
return vh
affine = vh.get_best_affine()
# assert len(np.where(affine[:3, :3].reshape(-1) != 0)[0]) == 3, affine
trans = np.argmax(np.abs(affine[:3, :3]), axis=1)
data = nib_vol.get_fdata().astype(out_dtype).transpose(*trans[::-1])
if special:
data = np.flip(data, axis=2)
if affine[0, trans[0]] > 0: # Increase x from Right to Left
data = np.flip(data, axis=2)
if affine[1, trans[1]] > 0: # Increase y from Anterior to Posterior
data = np.flip(data, axis=1)
if affine[2, trans[2]] < 0: # Increase z from Interior to Superior
data = np.flip(data, axis=0)
return vh, data
def write_nii(data, header, out_path, out_dtype=np.int16, special=False, affine=None):
if header is not None:
affine = header.get_best_affine()
# assert len(np.where(affine[:3, :3].reshape(-1) != 0)[0]) == 3, affine
trans = np.argmax(np.abs(affine[:3, :3]), axis=1)
trans_bk = [np.argwhere(np.array(trans[::-1]) == i)[0][0] for i in range(3)]
if special:
data = np.flip(data, axis=2)
if affine[0, trans[0]] > 0: # Increase x from Right to Left
data = np.flip(data, axis=2)
if affine[1, trans[1]] > 0: # Increase y from Anterior to Posterior
data = np.flip(data, axis=1)
if affine[2, trans[2]] < 0: # Increase z from Interior to Superior
data = np.flip(data, axis=0)
out_image = np.transpose(data, trans_bk).astype(out_dtype)
if header is None and affine is not None:
out = nib.Nifti1Image(out_image, affine=affine)
else:
out = nib.Nifti1Image(out_image, affine=None, header=header)
nib.save(out, str(out_path))
def load_data(logger):
data_dir = DATA_ROOT / "nii_NF"
path_list = list(data_dir.glob("volume*"))
logger.info(f"Loading data ({len(path_list)} examples) ...")
cache_path = DATA_ROOT / "cache.pkl.gz"
if cache_path.exists():
logger.info(f"Loading data cache from {cache_path}")
with cache_path.open("rb") as f:
data = zlib.decompress(f.read())
_data_cache = pickle.loads(data)
logger.info("Finished!")
return _data_cache
_data_cache = {}
for path in tqdm.tqdm(path_list):
pid = path.name.split(".")[0].split("-")[-1]
header, volume = read_nii(path)
la_path = path.parent / path.name.replace("volume", "segmentation")
_, label = read_nii(la_path)
assert volume.shape == label.shape, f"{volume.shape} vs {label.shape}"
_data_cache[int(pid)] = {"im_path": path.absolute(),
"la_path": la_path.absolute(),
"img": volume,
"lab": label.astype(np.uint8),
"pos": np.stack(np.where(label > 0), axis=1),
"meta": header,
"lab_rng": np.unique(label)}
with cache_path.open("wb") as f:
logger.info(f"Saving data cache to {cache_path}")
cache_s = pickle.dumps(_data_cache, pickle.HIGHEST_PROTOCOL)
f.write(zlib.compress(cache_s))
logger.info("Finished!")
return _data_cache
def pre_filter_data(data, filter_thresh, connectivity=3, down_sampling=False):
""" For object-based segmentation tasks.
Pre-compute connected components and remove small objects
"""
_pre_filter_cache = None
cache_path = DATA_ROOT / ("pre-filter.pkl.gz" if not down_sampling else "pre-filter_ds.pkl.gz")
if cache_path.exists():
logger.info(f"Loading pre-filter cache from {cache_path}")
with cache_path.open("rb") as f:
data = zlib.decompress(f.read())
_pre_filter_cache = pickle.loads(data)
logger.info("Finished!")
return _pre_filter_cache
_pre_filter_cache = {}
for pid in data:
mask = data[pid]["lab"]
struct = ndi.generate_binary_structure(3, connectivity)
labeled, n_obj = ndi.label(mask, struct)
slices = ndi.find_objects(labeled)
obj_list = []
for i, sli in enumerate(slices):
patch = labeled[sli]
z, y, x = np.where(patch == i + 1)
if z.shape[0] < filter_thresh:
patch[z, y, x] = 0
else:
obj_list.append(np.stack((z, y, x), axis=1))
better_label = np.clip(labeled, 0, 1)
_pre_filter_cache[pid] = {"lab": better_label,
"obj_list": obj_list}
with cache_path.open("wb") as f:
logger.info(f"Saving pre-filter cache to {cache_path}")
cache_s = pickle.dumps(_pre_filter_cache, pickle.HIGHEST_PROTOCOL)
f.write(zlib.compress(cache_s))
logger.info("Finished!")
return _pre_filter_cache
def load_split(set_key, test_fold):
if set_key in ["train", "val", "eval"]:
fold_path = DATA_ROOT / "split.csv"
folds = pd.read_csv(str(fold_path)).fillna(0).astype(int)
val_split = folds.loc[folds.split == test_fold]
if set_key != "train":
return val_split
train_folds = list(range(5))
train_folds.remove(test_fold)
train_split = folds.loc[folds.split.isin(train_folds)]
return train_split
elif set_key == "test":
fold_path = DATA_ROOT / "split_test.csv"
folds = pd.read_csv(str(fold_path)).fillna(0).astype(int)
test_split = folds.loc[folds.split == 0]
return test_split
elif set_key == "extra": # The dataset with 45 cases of 15 patients
fold_path = DATA_ROOT / "split_extra.csv"
folds = pd.read_csv(str(fold_path)).fillna(0).astype(int)
test_split = folds.loc[folds.split == 0]
return test_split
else:
raise ValueError(f"`set_key` supports [train|val|test|extra], got {set_key}")
def filter_tiny_nf(mask):
struct2 = ndi.generate_binary_structure(2, 1)
for i in range(mask.shape[0]):
res, n_obj = ndi.label(mask[i], struct2)
size = np.bincount(res.flat)
for j in np.where(size <= 2)[0]:
mask[i][res == j] = 0
struct3 = ndi.generate_binary_structure(3, 2)
res, n_obj = ndi.label(mask, struct3)
size = np.bincount(res.flat)
for i in np.where(size <= 5)[0]:
mask[res == i] = 0
return mask
def slim_labels(data, logger):
slim_labels_path = DATA_ROOT / "slim_labels.pkl.gz"
if slim_labels_path.exists():
logger.info(f"Loading slimmed label cache from {slim_labels_path}")
with slim_labels_path.open("rb") as f:
new_labels = pickle.loads(zlib.decompress(f.read()))
for i in data:
data[i]['slim'] = new_labels[i]
logger.info("Finished!")
else:
new_labels = {}
logger.info(f"Saving slimmed label cache to {slim_labels_path}")
for i, item in data.items():
new_labels[i] = filter_tiny_nf(np.clip(item['lab'], 0, 1).copy())
data[i]['slim'] = new_labels[i]
with slim_labels_path.open("wb") as f:
f.write(zlib.compress(pickle.dumps(new_labels, pickle.HIGHEST_PROTOCOL)))
logger.info("Finished!")
return data
def load_test_data_paths():
data_dir = DATA_ROOT / "test_NF"
path_list = list(data_dir.glob("*img.nii.gz"))
dataset = {}
for path in path_list:
pid = int(path.name.split("-")[0])
dataset[pid] = {"img_path": path, "lab_path": path.parent / path.name.replace("img", "mask")}
return dataset
extra_name_mapping = {
"---Abdomen1__20080620-img.nii.gz": 0,
"---Abdomen1__20101129-img.nii.gz": 1,
"---Abdomen1__20130625-img.nii.gz": 2,
"---Airway1__20031216-img.nii.gz": 3,
"---Airway1__20041020-img.nii.gz": 4,
"---Airway1__20060907-img.nii.gz": 5,
"---Airway2__20080707-img.nii.gz": 6,
"---Airway2__20110124-img.nii.gz": 7,
"---Airway2__20130204-img.nii.gz": 8,
"---Back1__20070330-img.nii.gz": 9,
"---Back1__20081117-img.nii.gz": 10,
"---Back1__20100323-img.nii.gz": 11,
"---Brachial-plexus1__20130205-img.nii.gz": 12,
"---Br-plexus1__20120223-img.nii.gz": 13,
"---Br-plexus1__20120625-img.nii.gz": 14,
"---Chest2__20011227-img.nii.gz": 15,
"---Chest2__20050914-img.nii.gz": 16,
"---Chest2__20080918-img.nii.gz": 17,
"---Chest3__20081222-img.nii.gz": 18,
"---Chest3__20110602-img.nii.gz": 19,
"---Chest3__20131122-img.nii.gz": 20,
"---Face1__20100719-img.nii.gz": 21,
"---Face1__20110418-img.nii.gz": 22,
"---Face1__20120924-img.nii.gz": 23,
"---Leg1__20080714-img.nii.gz": 24,
"---Leg1__20100726-img.nii.gz": 25,
"---Leg1__20110228-img.nii.gz": 26,
"---Neck1__20020726-img.nii.gz": 27,
"---Neck1__20040315-img.nii.gz": 28,
"---Neck1__20050527-img.nii.gz": 29,
"---Orbit1__20030225-img.nii.gz": 30,
"---Orbit1__20050217-img.nii.gz": 31,
"---Orbit1__20061016-img.nii.gz": 32,
"---Orbit2__20090403-img.nii.gz": 33,
"---Orbit2__20121018-img.nii.gz": 34,
"---Orbit2__20140520-img.nii.gz": 35,
"---Pelvis1__20030916-img.nii.gz": 36,
"---Pelvis1__20060109-img.nii.gz": 37,
"---Pelvis1__20100726-img.nii.gz": 38,
"---Pelvis2__20090114-img.nii.gz": 39,
"---Pelvis2__20100112-img.nii.gz": 40,
"---Pelvis2__20120423-img.nii.gz": 41,
"---Thigh1__20071019-img.nii.gz": 42,
"---Thigh1__20100712-img.nii.gz": 43,
"---Thigh1__20120106-img.nii.gz": 44,
}
def load_extra_data_paths():
data_dir = DATA_ROOT / "NCI_NF1_InaLabeled"
path_list = list(data_dir.glob("*img.nii.gz"))
dataset = {}
for path in path_list:
pid = extra_name_mapping[path.name]
dataset[pid] = {"img_path": path, "lab_path": path.parent / path.name.replace("img", "mask")}
return dataset
def load_box_csv():
box_file = DATA_ROOT / "nf_box.csv"
box_df = pd.read_csv(box_file)
return box_df
| 38.365854
| 101
| 0.612297
|
import pickle
import zlib
from pathlib import Path
import nibabel as nib
import numpy as np
import pandas as pd
import scipy.ndimage as ndi
import tqdm
ROOT = Path(__file__).parent.parent.parent
DATA_ROOT = ROOT / "data/NF"
def read_nii(file_name, out_dtype=np.int16, special=False, only_header=False):
nib_vol = nib.load(str(file_name))
vh = nib_vol.header
if only_header:
return vh
affine = vh.get_best_affine()
trans = np.argmax(np.abs(affine[:3, :3]), axis=1)
data = nib_vol.get_fdata().astype(out_dtype).transpose(*trans[::-1])
if special:
data = np.flip(data, axis=2)
if affine[0, trans[0]] > 0:
data = np.flip(data, axis=2)
if affine[1, trans[1]] > 0:
data = np.flip(data, axis=1)
if affine[2, trans[2]] < 0:
data = np.flip(data, axis=0)
return vh, data
def write_nii(data, header, out_path, out_dtype=np.int16, special=False, affine=None):
if header is not None:
affine = header.get_best_affine()
trans = np.argmax(np.abs(affine[:3, :3]), axis=1)
trans_bk = [np.argwhere(np.array(trans[::-1]) == i)[0][0] for i in range(3)]
if special:
data = np.flip(data, axis=2)
if affine[0, trans[0]] > 0:
data = np.flip(data, axis=2)
if affine[1, trans[1]] > 0:
data = np.flip(data, axis=1)
if affine[2, trans[2]] < 0:
data = np.flip(data, axis=0)
out_image = np.transpose(data, trans_bk).astype(out_dtype)
if header is None and affine is not None:
out = nib.Nifti1Image(out_image, affine=affine)
else:
out = nib.Nifti1Image(out_image, affine=None, header=header)
nib.save(out, str(out_path))
def load_data(logger):
data_dir = DATA_ROOT / "nii_NF"
path_list = list(data_dir.glob("volume*"))
logger.info(f"Loading data ({len(path_list)} examples) ...")
cache_path = DATA_ROOT / "cache.pkl.gz"
if cache_path.exists():
logger.info(f"Loading data cache from {cache_path}")
with cache_path.open("rb") as f:
data = zlib.decompress(f.read())
_data_cache = pickle.loads(data)
logger.info("Finished!")
return _data_cache
_data_cache = {}
for path in tqdm.tqdm(path_list):
pid = path.name.split(".")[0].split("-")[-1]
header, volume = read_nii(path)
la_path = path.parent / path.name.replace("volume", "segmentation")
_, label = read_nii(la_path)
assert volume.shape == label.shape, f"{volume.shape} vs {label.shape}"
_data_cache[int(pid)] = {"im_path": path.absolute(),
"la_path": la_path.absolute(),
"img": volume,
"lab": label.astype(np.uint8),
"pos": np.stack(np.where(label > 0), axis=1),
"meta": header,
"lab_rng": np.unique(label)}
with cache_path.open("wb") as f:
logger.info(f"Saving data cache to {cache_path}")
cache_s = pickle.dumps(_data_cache, pickle.HIGHEST_PROTOCOL)
f.write(zlib.compress(cache_s))
logger.info("Finished!")
return _data_cache
def pre_filter_data(data, filter_thresh, connectivity=3, down_sampling=False):
_pre_filter_cache = None
cache_path = DATA_ROOT / ("pre-filter.pkl.gz" if not down_sampling else "pre-filter_ds.pkl.gz")
if cache_path.exists():
logger.info(f"Loading pre-filter cache from {cache_path}")
with cache_path.open("rb") as f:
data = zlib.decompress(f.read())
_pre_filter_cache = pickle.loads(data)
logger.info("Finished!")
return _pre_filter_cache
_pre_filter_cache = {}
for pid in data:
mask = data[pid]["lab"]
struct = ndi.generate_binary_structure(3, connectivity)
labeled, n_obj = ndi.label(mask, struct)
slices = ndi.find_objects(labeled)
obj_list = []
for i, sli in enumerate(slices):
patch = labeled[sli]
z, y, x = np.where(patch == i + 1)
if z.shape[0] < filter_thresh:
patch[z, y, x] = 0
else:
obj_list.append(np.stack((z, y, x), axis=1))
better_label = np.clip(labeled, 0, 1)
_pre_filter_cache[pid] = {"lab": better_label,
"obj_list": obj_list}
with cache_path.open("wb") as f:
logger.info(f"Saving pre-filter cache to {cache_path}")
cache_s = pickle.dumps(_pre_filter_cache, pickle.HIGHEST_PROTOCOL)
f.write(zlib.compress(cache_s))
logger.info("Finished!")
return _pre_filter_cache
def load_split(set_key, test_fold):
if set_key in ["train", "val", "eval"]:
fold_path = DATA_ROOT / "split.csv"
folds = pd.read_csv(str(fold_path)).fillna(0).astype(int)
val_split = folds.loc[folds.split == test_fold]
if set_key != "train":
return val_split
train_folds = list(range(5))
train_folds.remove(test_fold)
train_split = folds.loc[folds.split.isin(train_folds)]
return train_split
elif set_key == "test":
fold_path = DATA_ROOT / "split_test.csv"
folds = pd.read_csv(str(fold_path)).fillna(0).astype(int)
test_split = folds.loc[folds.split == 0]
return test_split
elif set_key == "extra":
fold_path = DATA_ROOT / "split_extra.csv"
folds = pd.read_csv(str(fold_path)).fillna(0).astype(int)
test_split = folds.loc[folds.split == 0]
return test_split
else:
raise ValueError(f"`set_key` supports [train|val|test|extra], got {set_key}")
def filter_tiny_nf(mask):
struct2 = ndi.generate_binary_structure(2, 1)
for i in range(mask.shape[0]):
res, n_obj = ndi.label(mask[i], struct2)
size = np.bincount(res.flat)
for j in np.where(size <= 2)[0]:
mask[i][res == j] = 0
struct3 = ndi.generate_binary_structure(3, 2)
res, n_obj = ndi.label(mask, struct3)
size = np.bincount(res.flat)
for i in np.where(size <= 5)[0]:
mask[res == i] = 0
return mask
def slim_labels(data, logger):
slim_labels_path = DATA_ROOT / "slim_labels.pkl.gz"
if slim_labels_path.exists():
logger.info(f"Loading slimmed label cache from {slim_labels_path}")
with slim_labels_path.open("rb") as f:
new_labels = pickle.loads(zlib.decompress(f.read()))
for i in data:
data[i]['slim'] = new_labels[i]
logger.info("Finished!")
else:
new_labels = {}
logger.info(f"Saving slimmed label cache to {slim_labels_path}")
for i, item in data.items():
new_labels[i] = filter_tiny_nf(np.clip(item['lab'], 0, 1).copy())
data[i]['slim'] = new_labels[i]
with slim_labels_path.open("wb") as f:
f.write(zlib.compress(pickle.dumps(new_labels, pickle.HIGHEST_PROTOCOL)))
logger.info("Finished!")
return data
def load_test_data_paths():
data_dir = DATA_ROOT / "test_NF"
path_list = list(data_dir.glob("*img.nii.gz"))
dataset = {}
for path in path_list:
pid = int(path.name.split("-")[0])
dataset[pid] = {"img_path": path, "lab_path": path.parent / path.name.replace("img", "mask")}
return dataset
extra_name_mapping = {
"---Abdomen1__20080620-img.nii.gz": 0,
"---Abdomen1__20101129-img.nii.gz": 1,
"---Abdomen1__20130625-img.nii.gz": 2,
"---Airway1__20031216-img.nii.gz": 3,
"---Airway1__20041020-img.nii.gz": 4,
"---Airway1__20060907-img.nii.gz": 5,
"---Airway2__20080707-img.nii.gz": 6,
"---Airway2__20110124-img.nii.gz": 7,
"---Airway2__20130204-img.nii.gz": 8,
"---Back1__20070330-img.nii.gz": 9,
"---Back1__20081117-img.nii.gz": 10,
"---Back1__20100323-img.nii.gz": 11,
"---Brachial-plexus1__20130205-img.nii.gz": 12,
"---Br-plexus1__20120223-img.nii.gz": 13,
"---Br-plexus1__20120625-img.nii.gz": 14,
"---Chest2__20011227-img.nii.gz": 15,
"---Chest2__20050914-img.nii.gz": 16,
"---Chest2__20080918-img.nii.gz": 17,
"---Chest3__20081222-img.nii.gz": 18,
"---Chest3__20110602-img.nii.gz": 19,
"---Chest3__20131122-img.nii.gz": 20,
"---Face1__20100719-img.nii.gz": 21,
"---Face1__20110418-img.nii.gz": 22,
"---Face1__20120924-img.nii.gz": 23,
"---Leg1__20080714-img.nii.gz": 24,
"---Leg1__20100726-img.nii.gz": 25,
"---Leg1__20110228-img.nii.gz": 26,
"---Neck1__20020726-img.nii.gz": 27,
"---Neck1__20040315-img.nii.gz": 28,
"---Neck1__20050527-img.nii.gz": 29,
"---Orbit1__20030225-img.nii.gz": 30,
"---Orbit1__20050217-img.nii.gz": 31,
"---Orbit1__20061016-img.nii.gz": 32,
"---Orbit2__20090403-img.nii.gz": 33,
"---Orbit2__20121018-img.nii.gz": 34,
"---Orbit2__20140520-img.nii.gz": 35,
"---Pelvis1__20030916-img.nii.gz": 36,
"---Pelvis1__20060109-img.nii.gz": 37,
"---Pelvis1__20100726-img.nii.gz": 38,
"---Pelvis2__20090114-img.nii.gz": 39,
"---Pelvis2__20100112-img.nii.gz": 40,
"---Pelvis2__20120423-img.nii.gz": 41,
"---Thigh1__20071019-img.nii.gz": 42,
"---Thigh1__20100712-img.nii.gz": 43,
"---Thigh1__20120106-img.nii.gz": 44,
}
def load_extra_data_paths():
data_dir = DATA_ROOT / "NCI_NF1_InaLabeled"
path_list = list(data_dir.glob("*img.nii.gz"))
dataset = {}
for path in path_list:
pid = extra_name_mapping[path.name]
dataset[pid] = {"img_path": path, "lab_path": path.parent / path.name.replace("img", "mask")}
return dataset
def load_box_csv():
box_file = DATA_ROOT / "nf_box.csv"
box_df = pd.read_csv(box_file)
return box_df
| true
| true
|
1c4788a7fec1e92cf4988f8cd63897bc0a883269
| 1,288
|
py
|
Python
|
setup.py
|
AbhiProjects/TagLib
|
214139259157a7b3ec3f2fb7b342411a33b85839
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
AbhiProjects/TagLib
|
214139259157a7b3ec3f2fb7b342411a33b85839
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
AbhiProjects/TagLib
|
214139259157a7b3ec3f2fb7b342411a33b85839
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
"""Setup script for taglib"""
import sys
if sys.hexversion < 0x02060000:
print >> sys.stderr, 'Sorry, Python 2.6 is required.'
sys.exit(1)
from distutils.core import setup
sys.dont_write_bytecode = True # don't leave turds
from taglib import __version__
def main():
setup(name='taglib',
author='Chris Jones',
author_email='cjones@gruntle.org',
url='http://code.google.com/p/python-taglib/',
description='Library to manipulate audio file metadata',
license='BSD',
version=__version__,
py_modules=['taglib'],
scripts=['scripts/tagdump'],
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Topic :: Multimedia :: Sound/Audio',
'Topic :: Software Development :: Libraries :: Python Modules'])
return 0
if __name__ == '__main__':
sys.exit(main())
| 29.953488
| 78
| 0.585404
|
import sys
if sys.hexversion < 0x02060000:
print >> sys.stderr, 'Sorry, Python 2.6 is required.'
sys.exit(1)
from distutils.core import setup
sys.dont_write_bytecode = True
from taglib import __version__
def main():
setup(name='taglib',
author='Chris Jones',
author_email='cjones@gruntle.org',
url='http://code.google.com/p/python-taglib/',
description='Library to manipulate audio file metadata',
license='BSD',
version=__version__,
py_modules=['taglib'],
scripts=['scripts/tagdump'],
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Topic :: Multimedia :: Sound/Audio',
'Topic :: Software Development :: Libraries :: Python Modules'])
return 0
if __name__ == '__main__':
sys.exit(main())
| true
| true
|
1c478921c64292aa5b2d3adeb81064377fca26e0
| 1,101
|
py
|
Python
|
azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/schedule_policy.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 4
|
2016-06-17T23:25:29.000Z
|
2022-03-30T22:37:45.000Z
|
azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/schedule_policy.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 54
|
2016-03-25T17:25:01.000Z
|
2018-10-22T17:27:54.000Z
|
azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/schedule_policy.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 3
|
2016-05-03T20:49:46.000Z
|
2017-10-05T21:05:27.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SchedulePolicy(Model):
"""Base class for backup schedule.
:param schedule_policy_type: Polymorphic Discriminator
:type schedule_policy_type: str
"""
_validation = {
'schedule_policy_type': {'required': True},
}
_attribute_map = {
'schedule_policy_type': {'key': 'schedulePolicyType', 'type': 'str'},
}
_subtype_map = {
'schedule_policy_type': {'LongTermSchedulePolicy': 'LongTermSchedulePolicy', 'SimpleSchedulePolicy': 'SimpleSchedulePolicy'}
}
def __init__(self):
self.schedule_policy_type = None
| 30.583333
| 132
| 0.608538
|
from msrest.serialization import Model
class SchedulePolicy(Model):
_validation = {
'schedule_policy_type': {'required': True},
}
_attribute_map = {
'schedule_policy_type': {'key': 'schedulePolicyType', 'type': 'str'},
}
_subtype_map = {
'schedule_policy_type': {'LongTermSchedulePolicy': 'LongTermSchedulePolicy', 'SimpleSchedulePolicy': 'SimpleSchedulePolicy'}
}
def __init__(self):
self.schedule_policy_type = None
| true
| true
|
1c478b7837a4774911d31634003c88b12e9c37bc
| 3,439
|
py
|
Python
|
assets/winc_firmware_upgrade/firmware/handler_search.py
|
rashedtalukder/cryptoauth_trustplatform_designsuite
|
6b42c64071a9fb5dc9894bfedbbfabbcfb7961c1
|
[
"MIT"
] | 11
|
2019-12-03T14:18:38.000Z
|
2021-08-25T16:41:27.000Z
|
assets/winc_firmware_upgrade/firmware/handler_search.py
|
rashedtalukder/cryptoauth_trustplatform_designsuite
|
6b42c64071a9fb5dc9894bfedbbfabbcfb7961c1
|
[
"MIT"
] | 9
|
2020-02-13T09:07:42.000Z
|
2022-03-18T18:29:24.000Z
|
assets/winc_firmware_upgrade/firmware/handler_search.py
|
rashedtalukder/cryptoauth_trustplatform_designsuite
|
6b42c64071a9fb5dc9894bfedbbfabbcfb7961c1
|
[
"MIT"
] | 10
|
2020-04-28T10:35:48.000Z
|
2021-11-03T23:03:30.000Z
|
'''
Simple program to get a hint where simple programs might be installed by chasing thru registry,
does not deal with things like word which are beyonf complicated.
Pass in extention to check and a hint at what program you want.
Returns 0 if found.
2 for parm error
1 for not found
Eg
C:\work_repos\>python handler_search.py cpP studio
""C:\Program Files (x86)\Microsoft Visual Studio 14.0\Common7\IDE\devenv.exe"
C:\work_repos\>python handler_search.py cpP atmelstudio
"C:\Program Files (x86)\Atmel\Studio\7.0\atmelstudio.exe"
'''
import sys
import os
import winreg
roots_hives = {
"HKEY_CLASSES_ROOT": winreg.HKEY_CLASSES_ROOT,
"HKEY_CURRENT_USER": winreg.HKEY_CURRENT_USER,
"HKEY_LOCAL_MACHINE": winreg.HKEY_LOCAL_MACHINE,
"HKEY_USERS": winreg.HKEY_USERS,
"HKEY_PERFORMANCE_DATA": winreg.HKEY_PERFORMANCE_DATA,
"HKEY_CURRENT_CONFIG": winreg.HKEY_CURRENT_CONFIG,
"HKEY_DYN_DATA": winreg.HKEY_DYN_DATA
}
def join(path, *paths):
path = path.strip('/\\')
paths = map(lambda x: x.strip('/\\'), paths)
paths = list(paths)
result = os.path.join(path, *paths)
result = result.replace('/', '\\')
return result
def parse_key(key):
key = key.upper()
aparts = key.split('\\')
parts = list(filter(None, aparts))
root_hive_name = parts[0]
root_hive = roots_hives.get(root_hive_name)
partial_key = '\\'.join(parts[1:])
if not root_hive:
raise Exception('root hive "{}" was not found'.format(root_hive_name))
return partial_key, root_hive
def get_all_values(key):
data = {}
data[0] = [[''],['']]
try:
partial_key, root_hive = parse_key(key)
with winreg.ConnectRegistry(None, root_hive) as reg:
with winreg.OpenKey(reg, partial_key) as key_object:
i = 0
while True:
try:
ret = winreg.EnumValue(key_object, i)
if ret[2] == winreg.REG_EXPAND_SZ:
if ret[0] == '':
data[i] = ["(Default)", expandvars(ret[1])]
else:
data[i] = [ret[0], expandvars(ret[1])]
else:
if ret[0] == '':
data[i] = ["(Default)", ret[1]]
else:
data[i] = [ret[0], ret[1]]
except WindowsError:
break
i += 1
key_object.Close()
except:
pass
return data
def main(argv=None):
argv = sys.argv
args = argv[1:]
key = r'HKEY_CLASSES_ROOT\.' + args[0] + '\\OpenWithProgids'
pkey = r''
data = get_all_values(key)
for x in range(0, len(data)):
strdatax = str(data[x][0])
if args[1].upper() in strdatax.upper():
pkey = r'HKEY_CLASSES_ROOT\\' + strdatax + '\\shell\\open\\command'
break
if str(data[0][1]) == '[\'\']':
print ("Assoc not found")
sys.exit(1)
data = get_all_values(pkey)
for x in range(0, len(data)):
if ".EXE" in str(data[x][1]).upper():
exeind = str(data[x][1]).upper().find('.EXE')
print ('"' + str(data[x][1])[:exeind+4] + '"')
sys.exit(0)
print ("Handler not found")
sys.exit(1)
if __name__ == "__main__":
main()
| 27.95935
| 95
| 0.549578
|
import sys
import os
import winreg
roots_hives = {
"HKEY_CLASSES_ROOT": winreg.HKEY_CLASSES_ROOT,
"HKEY_CURRENT_USER": winreg.HKEY_CURRENT_USER,
"HKEY_LOCAL_MACHINE": winreg.HKEY_LOCAL_MACHINE,
"HKEY_USERS": winreg.HKEY_USERS,
"HKEY_PERFORMANCE_DATA": winreg.HKEY_PERFORMANCE_DATA,
"HKEY_CURRENT_CONFIG": winreg.HKEY_CURRENT_CONFIG,
"HKEY_DYN_DATA": winreg.HKEY_DYN_DATA
}
def join(path, *paths):
path = path.strip('/\\')
paths = map(lambda x: x.strip('/\\'), paths)
paths = list(paths)
result = os.path.join(path, *paths)
result = result.replace('/', '\\')
return result
def parse_key(key):
key = key.upper()
aparts = key.split('\\')
parts = list(filter(None, aparts))
root_hive_name = parts[0]
root_hive = roots_hives.get(root_hive_name)
partial_key = '\\'.join(parts[1:])
if not root_hive:
raise Exception('root hive "{}" was not found'.format(root_hive_name))
return partial_key, root_hive
def get_all_values(key):
data = {}
data[0] = [[''],['']]
try:
partial_key, root_hive = parse_key(key)
with winreg.ConnectRegistry(None, root_hive) as reg:
with winreg.OpenKey(reg, partial_key) as key_object:
i = 0
while True:
try:
ret = winreg.EnumValue(key_object, i)
if ret[2] == winreg.REG_EXPAND_SZ:
if ret[0] == '':
data[i] = ["(Default)", expandvars(ret[1])]
else:
data[i] = [ret[0], expandvars(ret[1])]
else:
if ret[0] == '':
data[i] = ["(Default)", ret[1]]
else:
data[i] = [ret[0], ret[1]]
except WindowsError:
break
i += 1
key_object.Close()
except:
pass
return data
def main(argv=None):
argv = sys.argv
args = argv[1:]
key = r'HKEY_CLASSES_ROOT\.' + args[0] + '\\OpenWithProgids'
pkey = r''
data = get_all_values(key)
for x in range(0, len(data)):
strdatax = str(data[x][0])
if args[1].upper() in strdatax.upper():
pkey = r'HKEY_CLASSES_ROOT\\' + strdatax + '\\shell\\open\\command'
break
if str(data[0][1]) == '[\'\']':
print ("Assoc not found")
sys.exit(1)
data = get_all_values(pkey)
for x in range(0, len(data)):
if ".EXE" in str(data[x][1]).upper():
exeind = str(data[x][1]).upper().find('.EXE')
print ('"' + str(data[x][1])[:exeind+4] + '"')
sys.exit(0)
print ("Handler not found")
sys.exit(1)
if __name__ == "__main__":
main()
| true
| true
|
1c478b7de55a29c23c21c47bbecf9e11a14c3e20
| 7,684
|
py
|
Python
|
test/test_grapher.py
|
leehyoeun96/rosprofiler
|
c7bee4e98d8417cd3e2a8ef246b7930c97c74dc5
|
[
"Apache-2.0"
] | 6
|
2017-11-18T05:59:22.000Z
|
2022-01-01T11:56:00.000Z
|
test/test_grapher.py
|
leehyoeun96/rosprofiler
|
c7bee4e98d8417cd3e2a8ef246b7930c97c74dc5
|
[
"Apache-2.0"
] | 3
|
2015-04-11T20:04:24.000Z
|
2018-06-19T21:55:39.000Z
|
test/test_grapher.py
|
leehyoeun96/rosprofiler
|
c7bee4e98d8417cd3e2a8ef246b7930c97c74dc5
|
[
"Apache-2.0"
] | 15
|
2017-11-19T05:03:29.000Z
|
2021-03-15T15:26:37.000Z
|
#!/usr/bin/env python
# Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import time
import rospy
import rostest
from ros_topology_msgs.msg import *
PKG = 'rosprofiler'
NAME = 'test_grapher'
# TODO: Check services
EXPECTED_NODES = dict()
talker1 = Node(name="/talker1")
talker1.publishes.append("/chatter")
talker1.publishes.append("/rosout")
talker1.connections.append(Connection(destination='/rosout',topic='/rosout',direction=2,transport="TCPROS"))
talker1.connections.append(Connection(destination='/listener1',topic='/chatter',direction=2,transport="TCPROS"))
talker1.connections.append(Connection(destination='/listener2',topic='/chatter',direction=2,transport="TCPROS"))
talker2 = Node(name="/talker2")
talker2.publishes.append("/chatter")
talker2.publishes.append("/rosout")
talker2.connections.append(Connection(destination='/rosout',topic='/rosout',direction=2,transport="TCPROS"))
talker2.connections.append(Connection(destination='/listener1',topic='/chatter',direction=2,transport="TCPROS"))
talker2.connections.append(Connection(destination='/listener2',topic='/chatter',direction=2,transport="TCPROS"))
listener1 = Node(name="/listener1")
listener1.publishes.append("/rosout")
listener1.subscribes.append("/chatter")
listener1.connections.append(Connection(destination='/rosout',topic='/rosout',direction=2,transport="TCPROS"))
listener1.connections.append(Connection(destination='/talker1',topic='/chatter',direction=1,transport="TCPROS"))
listener1.connections.append(Connection(destination='/talker2',topic='/chatter',direction=1,transport="TCPROS"))
listener2 = Node(name="/listener2")
listener2.publishes.append("/rosout")
listener2.subscribes.append("/chatter")
listener2.connections.append(Connection(destination='/rosout',topic='/rosout',direction=2,transport="TCPROS"))
listener2.connections.append(Connection(destination='/talker1',topic='/chatter',direction=1,transport="TCPROS"))
listener2.connections.append(Connection(destination='/talker2',topic='/chatter',direction=1,transport="TCPROS"))
rosout = Node(name="/rosout")
rosout.publishes.append("/rosout_agg")
rosout.subscribes.append("/rosout")
rosout.connections.append(Connection(destination='/talker1',topic='/rosout',direction=1,transport="TCPROS"))
rosout.connections.append(Connection(destination='/talker2',topic='/rosout',direction=1,transport="TCPROS"))
rosout.connections.append(Connection(destination='/listener1',topic='/rosout',direction=1,transport="TCPROS"))
rosout.connections.append(Connection(destination='/listener2',topic='/rosout',direction=1,transport="TCPROS"))
rosout.connections.append(Connection(destination='/test_grapher',topic='/rosout',direction=1,transport="TCPROS"))
rosout.connections.append(Connection(destination='/rosgrapher',topic='/rosout',direction=1,transport="TCPROS"))
grapher = Node(name="/rosgrapher")
grapher.publishes.append("/rosout")
grapher.publishes.append("/topology")
grapher.connections.append(Connection(destination='/rosout',topic='/rosout',direction=2,transport="TCPROS"))
grapher.connections.append(Connection(destination='/'+NAME,topic='/topology',direction=2,transport="TCPROS"))
tester = Node(name="/test_grapher")
tester.publishes.append("/rosout")
tester.subscribes.append("/topology")
tester.connections.append(Connection(destination='/rosout',topic='/rosout',direction=2,transport="TCPROS"))
tester.connections.append(Connection(destination='/rosgrapher',topic='/topology',direction=1,transport="TCPROS"))
EXPECTED_NODES['/talker1'] = talker1
EXPECTED_NODES['/talker2'] = talker2
EXPECTED_NODES['/listener1'] = listener1
EXPECTED_NODES['/listener2'] = listener2
EXPECTED_NODES['/rosout'] = rosout
EXPECTED_NODES['/rosgrapher'] = grapher
EXPECTED_NODES['/'+NAME] = tester
t_chatter = Topic(name="/chatter", type="std_msgs/String")
t_rosout = Topic(name="/rosout", type="rosgraph_msgs/Log")
t_rosout_agg = Topic(name="/rosout_agg", type="rosgraph_msgs/Log")
t_topology = Topic(name="/topology", type="ros_topology_msgs/Graph")
EXPECTED_TOPICS = [t_chatter, t_rosout, t_rosout_agg, t_topology]
class TestGrapher(unittest.TestCase):
def __init__(self, *args):
super(TestGrapher, self).__init__(*args)
# Start time - for calculating timeout
self.start_time = None
self.graph = Graph()
def setUp(self):
rospy.init_node(NAME)
rospy.Subscriber('/topology', Graph, self.callback)
self.wait_for_data(10.0)
def callback(self, data):
self.graph = data
def wait_for_data(self, duration):
""" Waits to receive statistics data """
start_time = rospy.get_rostime()
while not rospy.is_shutdown() and not (rospy.get_rostime() > (start_time + rospy.Duration(duration))):
if len(self.graph.nodes) >= len(EXPECTED_NODES) and len(self.graph.topics) >= len(EXPECTED_TOPICS):
return
rospy.sleep(1.0)
def test_nodes_publishers(self):
for node in self.graph.nodes:
assert node.name in EXPECTED_NODES, "%s not found!"%node.name
testnode = EXPECTED_NODES[node.name]
assert set(node.publishes) == set(testnode.publishes), "%s.publishes=%s, but should be %s"%(node.name,node.publishes,testnode.publishes)
def test_nodes_subscribers(self):
for node in self.graph.nodes:
assert node.name in EXPECTED_NODES, "%s not found!"%node.name
testnode = EXPECTED_NODES[node.name]
assert set(node.subscribes) == set(testnode.subscribes), "%s.subscribes=%s, but should be %s"%(node.name,node.subscribes,testnode.subscribes)
def test_nodes_connections_present(self):
for node in self.graph.nodes:
assert node.name in EXPECTED_NODES, "%s not found!"%node.name
testnode = EXPECTED_NODES[node.name]
for connection in node.connections:
assert connection in testnode.connections, "Node %s has extra connection %s"%(node.name, connection)
def test_nodes_connections_missing(self):
for node in self.graph.nodes:
assert node.name in EXPECTED_NODES, "%s not found!"%node.name
testnode = EXPECTED_NODES[node.name]
for connection in testnode.connections:
assert connection in node.connections, "Node %s expected to find missing connection %s"%(node.name, connection)
def test_nodes_present(self):
for node in self.graph.nodes:
assert node.name in EXPECTED_NODES.keys(), "Found extra node '%s'"%node.name
def test_nodes_missing(self):
for node_name in EXPECTED_NODES.keys():
assert node_name in [n.name for n in self.graph.nodes], "Expected to find missing node '%s'"%node_name
def test_topics_present(self):
for topic in self.graph.topics:
assert topic in EXPECTED_TOPICS, "Found extra topic '%s'"%topic
def test_topics_missing(self):
for topic in EXPECTED_TOPICS:
assert topic in self.graph.topics, "Expected to find missing topic '%s'"%topic
if __name__ == '__main__':
rostest.rosrun(PKG, NAME, TestGrapher, sys.argv)
| 49.574194
| 153
| 0.730739
|
import sys
import unittest
import time
import rospy
import rostest
from ros_topology_msgs.msg import *
PKG = 'rosprofiler'
NAME = 'test_grapher'
EXPECTED_NODES = dict()
talker1 = Node(name="/talker1")
talker1.publishes.append("/chatter")
talker1.publishes.append("/rosout")
talker1.connections.append(Connection(destination='/rosout',topic='/rosout',direction=2,transport="TCPROS"))
talker1.connections.append(Connection(destination='/listener1',topic='/chatter',direction=2,transport="TCPROS"))
talker1.connections.append(Connection(destination='/listener2',topic='/chatter',direction=2,transport="TCPROS"))
talker2 = Node(name="/talker2")
talker2.publishes.append("/chatter")
talker2.publishes.append("/rosout")
talker2.connections.append(Connection(destination='/rosout',topic='/rosout',direction=2,transport="TCPROS"))
talker2.connections.append(Connection(destination='/listener1',topic='/chatter',direction=2,transport="TCPROS"))
talker2.connections.append(Connection(destination='/listener2',topic='/chatter',direction=2,transport="TCPROS"))
listener1 = Node(name="/listener1")
listener1.publishes.append("/rosout")
listener1.subscribes.append("/chatter")
listener1.connections.append(Connection(destination='/rosout',topic='/rosout',direction=2,transport="TCPROS"))
listener1.connections.append(Connection(destination='/talker1',topic='/chatter',direction=1,transport="TCPROS"))
listener1.connections.append(Connection(destination='/talker2',topic='/chatter',direction=1,transport="TCPROS"))
listener2 = Node(name="/listener2")
listener2.publishes.append("/rosout")
listener2.subscribes.append("/chatter")
listener2.connections.append(Connection(destination='/rosout',topic='/rosout',direction=2,transport="TCPROS"))
listener2.connections.append(Connection(destination='/talker1',topic='/chatter',direction=1,transport="TCPROS"))
listener2.connections.append(Connection(destination='/talker2',topic='/chatter',direction=1,transport="TCPROS"))
rosout = Node(name="/rosout")
rosout.publishes.append("/rosout_agg")
rosout.subscribes.append("/rosout")
rosout.connections.append(Connection(destination='/talker1',topic='/rosout',direction=1,transport="TCPROS"))
rosout.connections.append(Connection(destination='/talker2',topic='/rosout',direction=1,transport="TCPROS"))
rosout.connections.append(Connection(destination='/listener1',topic='/rosout',direction=1,transport="TCPROS"))
rosout.connections.append(Connection(destination='/listener2',topic='/rosout',direction=1,transport="TCPROS"))
rosout.connections.append(Connection(destination='/test_grapher',topic='/rosout',direction=1,transport="TCPROS"))
rosout.connections.append(Connection(destination='/rosgrapher',topic='/rosout',direction=1,transport="TCPROS"))
grapher = Node(name="/rosgrapher")
grapher.publishes.append("/rosout")
grapher.publishes.append("/topology")
grapher.connections.append(Connection(destination='/rosout',topic='/rosout',direction=2,transport="TCPROS"))
grapher.connections.append(Connection(destination='/'+NAME,topic='/topology',direction=2,transport="TCPROS"))
tester = Node(name="/test_grapher")
tester.publishes.append("/rosout")
tester.subscribes.append("/topology")
tester.connections.append(Connection(destination='/rosout',topic='/rosout',direction=2,transport="TCPROS"))
tester.connections.append(Connection(destination='/rosgrapher',topic='/topology',direction=1,transport="TCPROS"))
EXPECTED_NODES['/talker1'] = talker1
EXPECTED_NODES['/talker2'] = talker2
EXPECTED_NODES['/listener1'] = listener1
EXPECTED_NODES['/listener2'] = listener2
EXPECTED_NODES['/rosout'] = rosout
EXPECTED_NODES['/rosgrapher'] = grapher
EXPECTED_NODES['/'+NAME] = tester
t_chatter = Topic(name="/chatter", type="std_msgs/String")
t_rosout = Topic(name="/rosout", type="rosgraph_msgs/Log")
t_rosout_agg = Topic(name="/rosout_agg", type="rosgraph_msgs/Log")
t_topology = Topic(name="/topology", type="ros_topology_msgs/Graph")
EXPECTED_TOPICS = [t_chatter, t_rosout, t_rosout_agg, t_topology]
class TestGrapher(unittest.TestCase):
def __init__(self, *args):
super(TestGrapher, self).__init__(*args)
self.start_time = None
self.graph = Graph()
def setUp(self):
rospy.init_node(NAME)
rospy.Subscriber('/topology', Graph, self.callback)
self.wait_for_data(10.0)
def callback(self, data):
self.graph = data
def wait_for_data(self, duration):
start_time = rospy.get_rostime()
while not rospy.is_shutdown() and not (rospy.get_rostime() > (start_time + rospy.Duration(duration))):
if len(self.graph.nodes) >= len(EXPECTED_NODES) and len(self.graph.topics) >= len(EXPECTED_TOPICS):
return
rospy.sleep(1.0)
def test_nodes_publishers(self):
for node in self.graph.nodes:
assert node.name in EXPECTED_NODES, "%s not found!"%node.name
testnode = EXPECTED_NODES[node.name]
assert set(node.publishes) == set(testnode.publishes), "%s.publishes=%s, but should be %s"%(node.name,node.publishes,testnode.publishes)
def test_nodes_subscribers(self):
for node in self.graph.nodes:
assert node.name in EXPECTED_NODES, "%s not found!"%node.name
testnode = EXPECTED_NODES[node.name]
assert set(node.subscribes) == set(testnode.subscribes), "%s.subscribes=%s, but should be %s"%(node.name,node.subscribes,testnode.subscribes)
def test_nodes_connections_present(self):
for node in self.graph.nodes:
assert node.name in EXPECTED_NODES, "%s not found!"%node.name
testnode = EXPECTED_NODES[node.name]
for connection in node.connections:
assert connection in testnode.connections, "Node %s has extra connection %s"%(node.name, connection)
def test_nodes_connections_missing(self):
for node in self.graph.nodes:
assert node.name in EXPECTED_NODES, "%s not found!"%node.name
testnode = EXPECTED_NODES[node.name]
for connection in testnode.connections:
assert connection in node.connections, "Node %s expected to find missing connection %s"%(node.name, connection)
def test_nodes_present(self):
for node in self.graph.nodes:
assert node.name in EXPECTED_NODES.keys(), "Found extra node '%s'"%node.name
def test_nodes_missing(self):
for node_name in EXPECTED_NODES.keys():
assert node_name in [n.name for n in self.graph.nodes], "Expected to find missing node '%s'"%node_name
def test_topics_present(self):
for topic in self.graph.topics:
assert topic in EXPECTED_TOPICS, "Found extra topic '%s'"%topic
def test_topics_missing(self):
for topic in EXPECTED_TOPICS:
assert topic in self.graph.topics, "Expected to find missing topic '%s'"%topic
if __name__ == '__main__':
rostest.rosrun(PKG, NAME, TestGrapher, sys.argv)
| true
| true
|
1c478bdc9499e3ce8182dc63c0c4b8edbd2abeb0
| 12,032
|
py
|
Python
|
stellapy/stellapy_old/stella_read.py
|
SStroteich/stella-1
|
104556a07b9736e7c28e6f1bf2f799384732f38b
|
[
"MIT"
] | 4
|
2021-12-15T08:23:45.000Z
|
2022-02-18T15:14:42.000Z
|
stellapy/stellapy_old/stella_read.py
|
SStroteich/stella-1
|
104556a07b9736e7c28e6f1bf2f799384732f38b
|
[
"MIT"
] | 37
|
2021-07-05T16:41:33.000Z
|
2022-03-21T15:58:05.000Z
|
stellapy/stellapy_old/stella_read.py
|
SStroteich/stella-1
|
104556a07b9736e7c28e6f1bf2f799384732f38b
|
[
"MIT"
] | 7
|
2021-07-05T15:35:55.000Z
|
2022-03-09T09:23:42.000Z
|
import numpy as np
from stella_dirs import *
from scipy.io import netcdf
#plt.rcParams.update({'font.size': 28})
#plt.rcParams['lines.linewidth'] = 2
import tabCompleter
from tabCompleter import *
from plotbox import *
from aux_functions import *
from os import listdir
from netCDF4 import *
import glob
import os.path
# ==============================================================
# Some utils
def format1(value):
return "%.3e" % value
def format2(value):
return "%14.6e" % value
def format3(value):
return "%4.2f" % value
def format4(value):
return "%6.2f" % value
def format6(value):
return "%7.3f" % value
def format5(value):
return "%.5e" % value
def format7(value):
return "%22.3f" % value
def format8(value):
return "%04d" % value
def format9(value):
return "%7.5f" % value
# Some utils ended
#===============================================================
def casestr(case=None):
# Function that returns the string of the input, which
# determines the name of the rest of output files.
if case.endswith(".in"):
buff = case.split("/")
return buff[size(buff)-1].split(".in")[0]
else:
if size(inputlist(case)) > 1:
print("\nSpecify the input in the case field, more than one input file found:\n")
print(inputlist(case))
exit
elif size(inputlist(case) == 1):
return inputlist(case)[0].split(".in")[0]
def inputlist_r(case):
inputs_level_0 = glob.glob(outdir(case)+'/*.in', recursive = True)
inputs_level_1 = glob.glob(outdir(case)+'/*/*.in', recursive = True)
return (inputs_level_0+inputs_level_1)
def inputlist(case, recursive=False):
# Function that returns all the input file names
# with extention ".in"
inlist = []
if recursive:
inlist = inputlist_r(case=case)
else:
for f in listdir(outdir(case)):
if f.endswith('.in'):
if not f.startswith('.'):
inputname=f
inlist.append(f)
return inlist
def outdir(case=None):
if case.endswith(".in"):
vcase=case.split("/")
return runsdir()+'/'+ case.replace("/"+vcase[size(vcase)-1], '')
else:
return runsdir()+'/'+ case
def geotxtfile(case=None):
# It returns the full path of an output file, endind with
# the string value of "quant".
if os.path.isfile(case):
return case.split('.in')[0] + '.geometry'
else:
return outdir(case) + '/' + casestr(case) + '.geometry'
def outfile(case=None, quant=None):
# It returns the full path of an output file, endind with
# the string value of "quant".
if os.path.isfile(case):
return case.split('.in')[0] + '.' + quant
else:
return outdir(case) + '/' + casestr(case) + '.' + quant
def infile(case=None):
# infile = input("Path to netcdf file: ")
return outfile(case, quant='out.nc')
def fluxes_txt(case=None):
# infile = input("Path to netcdf file: ")
return outfile(case, quant='fluxes')
# ==================================================================
# Reading variables in the input *.in file
def torflux(case):
# get torflux from input file.
myfile = open(outfile(case, quant='in'))
content = float(myfile.read().split('torflux')[1].split('\n')[0].split('=')[1])
return content
# ==================================================================
# Translation of quantities in stella_data module by Michael into
# functions with the run directory ("case") as single argument.
def read_stella_float(case, var):
import numpy as np
ncfile = netcdf.netcdf_file(infile(case),'r')
try:
arr = np.copy(ncfile.variables[var][:])
flag = True
except KeyError:
print('INFO: '+var+' not found in netcdf file')
arr = np.arange(1,dtype=float)
flag = False
return arr
def read_stella_value(case, var):
woutfile = infile(case)
d = Dataset(woutfile, mode='r')
return d.variables[var][:]
def kx(case):
# get kx grid
# this is the index of the first negative value of kx
# note stella orders kx as (0, dkx, ..., kx_max, -kx_max, -kx_max+dkx, ..., -dkx)
ncfile = netcdf.netcdf_file(infile(case),'r')
kx_stella = np.copy(ncfile.variables['kx'][:])
nakx = ncfile.dimensions['kx']
nakx_mid = nakx//2+1
kx = np.concatenate((kx_stella[nakx_mid:],kx_stella[:nakx_mid]))
return kx, nakx, nakx_mid
def kx_stella(case):
ncfile = netcdf.netcdf_file(infile(case),'r')
kx_stella = np.copy(ncfile.variables['kx'][:])
return kx_stella
def ky(case):
# get ky grid
ncfile = netcdf.netcdf_file(infile(case),'r')
ky = np.copy(ncfile.variables['ky'][:])
naky = ncfile.dimensions['ky']
return ky, naky
def zed(case):
# get zed grid
ncfile = netcdf.netcdf_file(infile(case),'r')
zed = np.copy(ncfile.variables['zed'][:])
nzed = zed.size
iz0 = nzed//2+1
return zed, nzed, iz0
def time(case):
# get time grid
ncfile = netcdf.netcdf_file(infile(case),'r')
time = np.copy(ncfile.variables['t'][:])
ntime = time.size
return time, ntime
def nspec(case):
# number of kinetic species
ncfile = netcdf.netcdf_file(infile(case),'r')
nspec = ncfile.dimensions['species']
return nspec
def geo(case):
# get geometric quantities
d = Dataset(infile(case), mode='r')
ncfile = netcdf.netcdf_file(infile(case),'r')
bmag = np.copy(ncfile.variables['bmag'][:])
gradpar = np.copy(ncfile.variables['gradpar'][:])
gbdrift = np.copy(ncfile.variables['gbdrift'][:])
gbdrift0 = np.copy(ncfile.variables['gbdrift0'][:])
cvdrift = np.copy(ncfile.variables['cvdrift'][:])
cvdrift0 = np.copy(ncfile.variables['cvdrift0'][:])
gds2 = np.copy(ncfile.variables['gds2'][:])
gds21 = np.copy(ncfile.variables['gds21'][:])
gds22 = np.copy(ncfile.variables['gds22'][:])
shat = float(d.variables['shat'][:])
return bmag, gradpar, gbdrift, gbdrift0, cvdrift, cvdrift0, gds2, gds21, gds22, shat
def phi2_vs_kxky(case):
# electrostatic potential averaged over z as function of (ky,kx,t)
phi2_vs_kxky_stella = read_stella_float(case, 'phi2_vs_kxky')
# phi2_vs_kxky_stella[:,0,0] = 0.0
# phi2_vs_kxky = np.concatenate((phi2_vs_kxky_stella[:, kx(case)[2]:,:],\
# phi2_vs_kxky_stella[:,:kx(case)[2] ,:]),axis=1)
return phi2_vs_kxky_stella
def pflux_vs_kxky(case):
pflux_vs_kxky_stella = read_stella_float(case, 'pflx_kxky')
return pflux_vs_kxky_stella
def vflux_vs_kxky(case):
vflux_vs_kxky_stella = read_stella_float(case, 'vflx_kxky')
return vflux_vs_kxky_stella
def qflux_vs_kxky(case):
qflux_vs_kxky_stella = read_stella_float(case, 'qflx_kxky')
return qflux_vs_kxky_stella
def density_vs_kxky(case):
density_vs_kxky_stella = read_stella_float(case, 'density')
return density_vs_kxky_stella
def upar_vs_kxky(case):
upar_vs_kxky_stella = read_stella_float(case, 'upar')
return upar_vs_kxky_stella
def temperature_vs_kxky(case):
temperature_vs_kxky_stella = read_stella_float(case, 'temperature')
return temperature_vs_kxky_stella
def phi_vs_t(case):
# electrostatic potential as a function of (z,kx,ky,t)
phi_vs_t_stella = read_stella_float(case, 'phi_vs_t')
return phi_vs_t_stella
def gvmus(case):
# |g|^2 averaged over kx, ky, and z
return read_stella_float(case, 'gvmus')
def gzvs(case):
# |g|^2 averaged over kx, ky, and mu
return read_stella_float(case, 'gzvs')
def jacob(case):
# jacobian for transformation to (rho,alpha,z) coordinates
return read_stella_float(case, 'jacob')
def jtwist(case):
# jtwist factor for twist-and-shift BC
return read_stella_value(case, 'jtwist')
def grho(case):
# gradient of normalized radial coordinate rho
return read_stella_float(case, 'grho')
def phi2_stella(case):
# modulus squared of electrostatic potential (averaged over space)
return read_stella_float(case, 'phi2')
def es_part_flux(case):
# time-dependent electrostatic particle flux for each species
return read_stella_float(case, 'es_part_flux')
def es_heat_flux(case):
# electrostatic heat flux
return read_stella_float(case, 'es_heat_flux')
def es_mom_flux(case):
# electrostatic momentum flux
return read_stella_float(case, 'es_mom_flux')
def es_energy_exchange(case):
return read_stella_float(case, 'es_energy_exchange')
def es_part_by_k(case):
# time-dependent particle flux for each species as a function of (kx,ky)
es_part_by_k_stella, es_part_by_k_present = \
read_stella_float(case, 'es_part_by_k')
if es_part_by_k_present is not True:
es_part_by_k_stella, es_part_by_k_present = \
read_stella_float(case, 'es_part_flux_by_mode')
return es_part_by_k_stella, es_part_by_k_present
def es_mom_by_k(case):
# time-dependent momentum flux for each species as a function of (kx,ky)
es_mom_by_k_stella, es_mom_by_k_present = \
read_stella_float(case, 'es_mom_by_k')
if es_mom_by_k_present is not True:
es_mom_by_k_stella, es_mom_by_k_present = \
read_stella_float(case, 'es_mom_flux_by_mode')
return es_mom_by_k_stella, es_mom_by_k_present
def es_energy_exchange_by_k(case):
es_energy_exchange_by_k_stella, es_energy_exchange_by_k_present = \
read_stella_float(case, 'es_energy_exchange_by_k')
if es_energy_exchange_by_k_present is not True:
es_energy_exchange_by_k_stella, es_energy_exchange_by_k_present = \
read_stella_float(case, 'es_energy_exchange_by_mode')
return es_energy_exchange_by_k_stella, es_energy_exchange_by_k_present
def es_energy_exchange_by_ky(case):
return read_stella_float(case, 'es_energy_exchange_by_ky')
def vpa(case):
# parallel velocity grid
return read_stella_float(case, 'vpa')
def mu(case):
# mu grid
return read_stella_float(case, 'mu')
def es_part_sym(case):
# electrostatic particle flux as function of (vpa,z)
return read_stella_float(case, 'es_part_sym')
def es_heat_sym(case):
# electrostatic heat flux as function of (vpa,z)
return read_stella_float(case, 'es_heat_sym')
def es_mom_sym(case):
# electrostatic momentum flux as function of (vpa,z)
es_mom_sym_stella, es_mom_sym_present = read_stella_float(case, 'es_mom_sym')
if vpa(case)[1] == False:
es_mom_sym_present = False
return es_mom_sym_stella, es_mom_sym_present
def xgrid(case):
xgrid_stella, xgrid_present = \
read_stella_float(case, 'xgrid')
xgrid = np.concatenate((xgrid_stella[kx_stella(case).shape[0]//2+1:],\
xgrid_stella[:kx_stella(case).shape[0]//2+1]))
return xgrid, xgrid_present
def dens(case):
dens=read_stella_float(case, 'dens')
dens_exp=factormult(dens,1e19)
return dens_exp, size(dens)
def upar(case):
# parallel flow fluctuation (kx,ky,z,t)
return read_stella_float(case,'upar')
def temp(case):
# temperature fluctuation (kx,ky,z,t)
temp=read_stella_float(case,'temp')
temp_exp=factormult(temp,1000)
return temp_exp, size(temp)
def species(case):
species=read_stella_float(case,'type_of_species')
return species, size(species)
def nprim(case):
return read_stella_float(case,'fprim')
def tprim(case):
return read_stella_float(case,'tprim')
def charge(case):
charge=read_stella_float(case,'charge')
return charge, size(charge)
def mass(case):
charge=read_stella_float(case,'mass')
return charge, size(mass)
# ==================================================================
| 31.915119
| 93
| 0.640376
|
import numpy as np
from stella_dirs import *
from scipy.io import netcdf
import tabCompleter
from tabCompleter import *
from plotbox import *
from aux_functions import *
from os import listdir
from netCDF4 import *
import glob
import os.path
def format1(value):
return "%.3e" % value
def format2(value):
return "%14.6e" % value
def format3(value):
return "%4.2f" % value
def format4(value):
return "%6.2f" % value
def format6(value):
return "%7.3f" % value
def format5(value):
return "%.5e" % value
def format7(value):
return "%22.3f" % value
def format8(value):
return "%04d" % value
def format9(value):
return "%7.5f" % value
def casestr(case=None):
if case.endswith(".in"):
buff = case.split("/")
return buff[size(buff)-1].split(".in")[0]
else:
if size(inputlist(case)) > 1:
print("\nSpecify the input in the case field, more than one input file found:\n")
print(inputlist(case))
exit
elif size(inputlist(case) == 1):
return inputlist(case)[0].split(".in")[0]
def inputlist_r(case):
inputs_level_0 = glob.glob(outdir(case)+'/*.in', recursive = True)
inputs_level_1 = glob.glob(outdir(case)+'/*/*.in', recursive = True)
return (inputs_level_0+inputs_level_1)
def inputlist(case, recursive=False):
inlist = []
if recursive:
inlist = inputlist_r(case=case)
else:
for f in listdir(outdir(case)):
if f.endswith('.in'):
if not f.startswith('.'):
inputname=f
inlist.append(f)
return inlist
def outdir(case=None):
if case.endswith(".in"):
vcase=case.split("/")
return runsdir()+'/'+ case.replace("/"+vcase[size(vcase)-1], '')
else:
return runsdir()+'/'+ case
def geotxtfile(case=None):
if os.path.isfile(case):
return case.split('.in')[0] + '.geometry'
else:
return outdir(case) + '/' + casestr(case) + '.geometry'
def outfile(case=None, quant=None):
if os.path.isfile(case):
return case.split('.in')[0] + '.' + quant
else:
return outdir(case) + '/' + casestr(case) + '.' + quant
def infile(case=None):
return outfile(case, quant='out.nc')
def fluxes_txt(case=None):
return outfile(case, quant='fluxes')
def torflux(case):
myfile = open(outfile(case, quant='in'))
content = float(myfile.read().split('torflux')[1].split('\n')[0].split('=')[1])
return content
def read_stella_float(case, var):
import numpy as np
ncfile = netcdf.netcdf_file(infile(case),'r')
try:
arr = np.copy(ncfile.variables[var][:])
flag = True
except KeyError:
print('INFO: '+var+' not found in netcdf file')
arr = np.arange(1,dtype=float)
flag = False
return arr
def read_stella_value(case, var):
woutfile = infile(case)
d = Dataset(woutfile, mode='r')
return d.variables[var][:]
def kx(case):
ncfile = netcdf.netcdf_file(infile(case),'r')
kx_stella = np.copy(ncfile.variables['kx'][:])
nakx = ncfile.dimensions['kx']
nakx_mid = nakx//2+1
kx = np.concatenate((kx_stella[nakx_mid:],kx_stella[:nakx_mid]))
return kx, nakx, nakx_mid
def kx_stella(case):
ncfile = netcdf.netcdf_file(infile(case),'r')
kx_stella = np.copy(ncfile.variables['kx'][:])
return kx_stella
def ky(case):
ncfile = netcdf.netcdf_file(infile(case),'r')
ky = np.copy(ncfile.variables['ky'][:])
naky = ncfile.dimensions['ky']
return ky, naky
def zed(case):
ncfile = netcdf.netcdf_file(infile(case),'r')
zed = np.copy(ncfile.variables['zed'][:])
nzed = zed.size
iz0 = nzed//2+1
return zed, nzed, iz0
def time(case):
ncfile = netcdf.netcdf_file(infile(case),'r')
time = np.copy(ncfile.variables['t'][:])
ntime = time.size
return time, ntime
def nspec(case):
ncfile = netcdf.netcdf_file(infile(case),'r')
nspec = ncfile.dimensions['species']
return nspec
def geo(case):
d = Dataset(infile(case), mode='r')
ncfile = netcdf.netcdf_file(infile(case),'r')
bmag = np.copy(ncfile.variables['bmag'][:])
gradpar = np.copy(ncfile.variables['gradpar'][:])
gbdrift = np.copy(ncfile.variables['gbdrift'][:])
gbdrift0 = np.copy(ncfile.variables['gbdrift0'][:])
cvdrift = np.copy(ncfile.variables['cvdrift'][:])
cvdrift0 = np.copy(ncfile.variables['cvdrift0'][:])
gds2 = np.copy(ncfile.variables['gds2'][:])
gds21 = np.copy(ncfile.variables['gds21'][:])
gds22 = np.copy(ncfile.variables['gds22'][:])
shat = float(d.variables['shat'][:])
return bmag, gradpar, gbdrift, gbdrift0, cvdrift, cvdrift0, gds2, gds21, gds22, shat
def phi2_vs_kxky(case):
phi2_vs_kxky_stella = read_stella_float(case, 'phi2_vs_kxky')
return phi2_vs_kxky_stella
def pflux_vs_kxky(case):
pflux_vs_kxky_stella = read_stella_float(case, 'pflx_kxky')
return pflux_vs_kxky_stella
def vflux_vs_kxky(case):
vflux_vs_kxky_stella = read_stella_float(case, 'vflx_kxky')
return vflux_vs_kxky_stella
def qflux_vs_kxky(case):
qflux_vs_kxky_stella = read_stella_float(case, 'qflx_kxky')
return qflux_vs_kxky_stella
def density_vs_kxky(case):
density_vs_kxky_stella = read_stella_float(case, 'density')
return density_vs_kxky_stella
def upar_vs_kxky(case):
upar_vs_kxky_stella = read_stella_float(case, 'upar')
return upar_vs_kxky_stella
def temperature_vs_kxky(case):
temperature_vs_kxky_stella = read_stella_float(case, 'temperature')
return temperature_vs_kxky_stella
def phi_vs_t(case):
phi_vs_t_stella = read_stella_float(case, 'phi_vs_t')
return phi_vs_t_stella
def gvmus(case):
return read_stella_float(case, 'gvmus')
def gzvs(case):
return read_stella_float(case, 'gzvs')
def jacob(case):
return read_stella_float(case, 'jacob')
def jtwist(case):
return read_stella_value(case, 'jtwist')
def grho(case):
return read_stella_float(case, 'grho')
def phi2_stella(case):
return read_stella_float(case, 'phi2')
def es_part_flux(case):
return read_stella_float(case, 'es_part_flux')
def es_heat_flux(case):
return read_stella_float(case, 'es_heat_flux')
def es_mom_flux(case):
return read_stella_float(case, 'es_mom_flux')
def es_energy_exchange(case):
return read_stella_float(case, 'es_energy_exchange')
def es_part_by_k(case):
es_part_by_k_stella, es_part_by_k_present = \
read_stella_float(case, 'es_part_by_k')
if es_part_by_k_present is not True:
es_part_by_k_stella, es_part_by_k_present = \
read_stella_float(case, 'es_part_flux_by_mode')
return es_part_by_k_stella, es_part_by_k_present
def es_mom_by_k(case):
es_mom_by_k_stella, es_mom_by_k_present = \
read_stella_float(case, 'es_mom_by_k')
if es_mom_by_k_present is not True:
es_mom_by_k_stella, es_mom_by_k_present = \
read_stella_float(case, 'es_mom_flux_by_mode')
return es_mom_by_k_stella, es_mom_by_k_present
def es_energy_exchange_by_k(case):
es_energy_exchange_by_k_stella, es_energy_exchange_by_k_present = \
read_stella_float(case, 'es_energy_exchange_by_k')
if es_energy_exchange_by_k_present is not True:
es_energy_exchange_by_k_stella, es_energy_exchange_by_k_present = \
read_stella_float(case, 'es_energy_exchange_by_mode')
return es_energy_exchange_by_k_stella, es_energy_exchange_by_k_present
def es_energy_exchange_by_ky(case):
return read_stella_float(case, 'es_energy_exchange_by_ky')
def vpa(case):
return read_stella_float(case, 'vpa')
def mu(case):
return read_stella_float(case, 'mu')
def es_part_sym(case):
return read_stella_float(case, 'es_part_sym')
def es_heat_sym(case):
return read_stella_float(case, 'es_heat_sym')
def es_mom_sym(case):
es_mom_sym_stella, es_mom_sym_present = read_stella_float(case, 'es_mom_sym')
if vpa(case)[1] == False:
es_mom_sym_present = False
return es_mom_sym_stella, es_mom_sym_present
def xgrid(case):
xgrid_stella, xgrid_present = \
read_stella_float(case, 'xgrid')
xgrid = np.concatenate((xgrid_stella[kx_stella(case).shape[0]//2+1:],\
xgrid_stella[:kx_stella(case).shape[0]//2+1]))
return xgrid, xgrid_present
def dens(case):
dens=read_stella_float(case, 'dens')
dens_exp=factormult(dens,1e19)
return dens_exp, size(dens)
def upar(case):
return read_stella_float(case,'upar')
def temp(case):
temp=read_stella_float(case,'temp')
temp_exp=factormult(temp,1000)
return temp_exp, size(temp)
def species(case):
species=read_stella_float(case,'type_of_species')
return species, size(species)
def nprim(case):
return read_stella_float(case,'fprim')
def tprim(case):
return read_stella_float(case,'tprim')
def charge(case):
charge=read_stella_float(case,'charge')
return charge, size(charge)
def mass(case):
charge=read_stella_float(case,'mass')
return charge, size(mass)
| true
| true
|
1c478c2f72be04820d92305cfffce27aa98c7fa4
| 907
|
py
|
Python
|
electrum/tests/__init__.py
|
checho1989/electrum-civx
|
4853bf42f0aa96bb894992c1abf7b8bdda587543
|
[
"MIT"
] | null | null | null |
electrum/tests/__init__.py
|
checho1989/electrum-civx
|
4853bf42f0aa96bb894992c1abf7b8bdda587543
|
[
"MIT"
] | null | null | null |
electrum/tests/__init__.py
|
checho1989/electrum-civx
|
4853bf42f0aa96bb894992c1abf7b8bdda587543
|
[
"MIT"
] | null | null | null |
import unittest
import threading
from electrum_civx import constants
# Set this locally to make the test suite run faster.
# If set, unit tests that would normally test functions with multiple implementations,
# will only be run once, using the fastest implementation.
# e.g. libsecp256k1 vs python-ecdsa. pycryptodomex vs pyaes.
FAST_TESTS = False
# some unit tests are modifying globals; sorry.
class SequentialTestCase(unittest.TestCase):
test_lock = threading.Lock()
def setUp(self):
super().setUp()
self.test_lock.acquire()
def tearDown(self):
super().tearDown()
self.test_lock.release()
class TestCaseForTestnet(SequentialTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
constants.set_testnet()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
constants.set_mainnet()
| 23.25641
| 86
| 0.705623
|
import unittest
import threading
from electrum_civx import constants
FAST_TESTS = False
class SequentialTestCase(unittest.TestCase):
test_lock = threading.Lock()
def setUp(self):
super().setUp()
self.test_lock.acquire()
def tearDown(self):
super().tearDown()
self.test_lock.release()
class TestCaseForTestnet(SequentialTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
constants.set_testnet()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
constants.set_mainnet()
| true
| true
|
1c478c32bd4fd3adda92f37777aa80cd495fcafb
| 926
|
py
|
Python
|
common/models/notice/UserNews.py
|
apanly/python_learn_master
|
93a214241812f77a006cc8350a7bad6c4eec6c89
|
[
"BSD-3-Clause"
] | 5
|
2020-11-29T14:21:18.000Z
|
2021-10-07T04:11:29.000Z
|
common/models/notice/UserNews.py
|
linkgeek/python_flask_cms
|
ff5e794b5b11075670e5d11a8cbda0a137319876
|
[
"BSD-3-Clause"
] | null | null | null |
common/models/notice/UserNews.py
|
linkgeek/python_flask_cms
|
ff5e794b5b11075670e5d11a8cbda0a137319876
|
[
"BSD-3-Clause"
] | 2
|
2020-11-30T09:55:53.000Z
|
2022-03-19T12:49:40.000Z
|
# coding: utf-8
from application import db
class UserNews(db.Model):
__tablename__ = 'user_news'
id = db.Column(db.Integer, primary_key=True, info='消息id')
uid = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue(), info='用户id')
title = db.Column(db.String(255), nullable=False, server_default=db.FetchedValue(), info='标题')
content = db.Column(db.String(1500), nullable=False, server_default=db.FetchedValue(), info='内容')
status = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue(), info='状态 0:未读 1:已读')
updated_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue(), info='更新时间')
created_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue(), info='创建时间')
def __init__(self, **items):
for key in items:
if hasattr(self, key):
setattr(self, key, items[key])
| 51.444444
| 105
| 0.686825
|
from application import db
class UserNews(db.Model):
__tablename__ = 'user_news'
id = db.Column(db.Integer, primary_key=True, info='消息id')
uid = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue(), info='用户id')
title = db.Column(db.String(255), nullable=False, server_default=db.FetchedValue(), info='标题')
content = db.Column(db.String(1500), nullable=False, server_default=db.FetchedValue(), info='内容')
status = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue(), info='状态 0:未读 1:已读')
updated_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue(), info='更新时间')
created_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue(), info='创建时间')
def __init__(self, **items):
for key in items:
if hasattr(self, key):
setattr(self, key, items[key])
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.