input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/04_template.blocks.ipynb (unless otherwise specified).
__all__ = ['Block', 'MolBlock', 'ConstantBlock', 'ConstantMolBlock', 'BlockTemplate', 'RGroupBlockTemplate',
'DoubleRGroupBlockTemplate', 'LinkerBlockTemplate', 'ScaffoldBlockTemplate']
# Cell
from ..imports import *
from ..core import *
from ..chem import *
from .filters import *
from .template import *
# Cell
class Block():
'''
Block - base class for Blocks
Inputs:
- `template Template`: `Template` subclass
- `links list[str]`: list, defines links between this block and other blocks
- `name str`: block name
- `subblocks list[Block]`: list of `Block` classes nested within this block
'''
def __init__(self, template, links, name, subblocks=None):
if subblocks is None:
subblocks = []
self.template = template
self.links = links
self.name = name
self.subblocks = subblocks
self.sublinks = []
self.update_links()
def update_links(self):
# grabs all subblock links
for b in self.subblocks:
self.sublinks.append(b.links)
for sl in b.sublinks:
self.sublinks.append(sl)
def eval_mol(self, mol, previous_pass=True):
'''
eval_mol - evaluates `mol`.
If `mol` passes the hard filters in `self.template`, it is scored by `self.template.soft_filters`.
If not, `self.template.failscore` is given instead.
Returns `hardpass` (result of the hard filters), `score`, and logging information
Context: this will be executed in parallel processing if available, meaning the automated
logging implemented in `Template.__call__` won't work. For this reason, the log information
is captured as an output and added to the template log later (see `BlockTree`)
'''
mol = self.template.to_mol(mol)
smile = self.template.to_string(mol)
if type(smile)==str:
match = self.match_fragment(smile)
else:
match = False
if previous_pass and match:
hardpass, hardlog = self.template.hf(mol)
else:
hardpass = False
hardlog = []
if hardpass:
score, softlog = self.template.sf(mol)
else:
score = self.template.fail_score
softlog = []
return [hardpass, score, hardlog, softlog]
def match_fragment(self, fragment):
# determine if fragment matches block link pattern
raise NotImplementedError
def match_fragment_recursive(self, fragment):
# recursively match fragment to all subblocks
if self.match_fragment(fragment):
output = True
else:
output = False
for block in self.subblocks:
if block.match_fragment_recursive(fragment):
output = True
return output
def sample(self, n, log='hard'):
# wrapper for template log sampling
return self.template.sample(n, log=log)
def load_data(self, fragments, recurse=False):
# checks fragment attachments, then sends to template `load_data`
# optionally recursive
if recurse:
for b in self.subblocks:
b.load_data(fragments, recurse=True)
matches = maybe_parallel(self.match_fragment, fragments)
fragments = [fragments[i] for i in range(len(fragments)) if matches[i]]
self.template.screen_mols(fragments)
def decompose_fragments(self, fragment_string):
# decomposes a string of multiple fragments into a list of single fragments
raise NotImplementedError
def join_fragments(self, fragment_list):
# joins list of fragments into single string
raise NotImplementedError
def fuse_fragments(self, fragment_string):
# fuses fragment string into single output
raise NotImplementedError
def join_and_fuse(self, fragment_list):
return self.fuse_fragments(self.join_fragments(fragment_list))
def recurse_fragments(self, fragment, add_constant=True):
# recursively break down fragments, route to subblocks, fuse and evaluate
raise NotImplementedError
def __repr__(self):
rep_str = f'Block {self.name}: {self.links}\n\t' + '\n\t'.join(self.template.__repr__().split('\n'))
if self.subblocks:
rep_str += '\n'
for b in self.subblocks:
rep_str += '\n\t' + '\n\t'.join(b.__repr__().split('\n'))
return rep_str
# Cell
class MolBlock(Block):
'''
MolBlock - `Block` subclass specific to working with smiles strings. This class expects
links between SMILES fragments to be wildcard atoms of the form `{isotope}*:{map_number}`.
Note that '0' should not be used as an isotope number because RDKit removes '0' isotopes from
SMILES strings automatically.
'''
def __init__(self, template, links, name, subblocks=None):
super().__init__(template, links, name, subblocks=subblocks)
# self.links = ['1*:2', '1*:3']
self.pattern = re.compile('\[.\*:.]')
for link in self.links:
assert not '0*' in link, "Do not use 0 as an isotope, RDKit automatically removes it"
def pattern_match(self, fragment):
"extracts `{isotope}*:{map_number}` substrings from `fragment`"
matches = self.pattern.findall(fragment)
return [i[1:-1] for i in matches]
def is_mapped(self, fragment):
'Validates all wildcards are mapped with the form `{isotope}*:{map_number}`'
if fragment.count('*') == len(self.pattern_match(fragment)):
mapped = True
else:
mapped = False
return mapped
def add_mapping(self, fragment, links=None):
"Maps wildcards in `fragment` (ie changes `*` to `{isotope}*:{map_number}`)"
if self.is_mapped(fragment):
# already mapped
mapped = fragment
else:
if len(self.pattern_match(fragment))>0:
# partially mapped, something went wrong
fragment = self.remove_mapping(fragment)
if links is None:
links = list(self.links)
random.shuffle(links)
mapped = ''
link_count = 0
for s in fragment:
if s=='*':
s = f'[{links[link_count]}]'
link_count += 1
mapped += s
return mapped
def remove_mapping(self, fragment):
"Converts mappings from `{isotope}*:{map_number}` to `*`"
matches = self.pattern_match(fragment)
for match in matches:
fragment = fragment.replace(f'[{match}]', '*')
return fragment
def match_fragment(self, fragment):
"Determines if `fragment` matches the specification in `self.links`"
match = False
if fragment.count('*') == len(self.links):
if not self.is_mapped(fragment):
fragment = self.add_mapping(fragment)
matches = self.pattern_match(fragment)
if len(matches)==len(set(matches)) and set(matches)==set(self.links):
match = True
return match
def _load_fragment(self, fragment):
if fragment.count('*') == len(self.links):
fragment = self.add_mapping(fragment)
fragpass = True
else:
fragpass = False
return [fragment, fragpass]
def load_data(self, fragments, recurse=False):
'''
load_data - checks if elements in `fragments` match `self.links`, then
passes matching fragments to `self.template` for screening and scoring
'''
if recurse:
for b in self.subblocks:
b.load_data(fragments, recurse=True)
fragments = maybe_parallel(self._load_fragment, fragments)
fragments = [i[0] for i in fragments if i[1]]
self.template.screen_mols(fragments)
def sample_smiles(self, n, log='hard'):
return self.template.sample_smiles(n, log=log)
def shuffle_mapping(self, fragment):
'Shuffles map numbers on `fragment`'
current_mapping = self.pattern_match(fragment)
new_mapping = list(current_mapping)
random.shuffle(new_mapping)
fragment = self.remove_mapping(fragment)
fragment = self.add_mapping(fragment, links=new_mapping)
return fragment
def decompose_fragments(self, fragment_string):
return fragment_string.split('.')
def join_fragments(self, fragment_list):
return '.'.join(fragment_list)
def fuse_fragments(self, fragment_string):
try:
new_smile = fuse_on_atom_mapping(fragment_string)
except:
new_smile = ''
return new_smile
def recurse_fragments(self, fragments, add_constant=True):
'''
recurse_fragments - recursively evlauates `fragments` against `self.template`
and all blocks in `self.subblocks`
Inputs:
- `fragments [str, list[str]]`: fragments to process.
Can either be a single string of the form `'f1.f2.f3'`
or a list of the form `['f1','f2','f3']`. All items in
`fragments` should correspond to the same final molecule.
- `add_constant bool`: If True, constant sequences in any `ConstantBlock` subclasses
are added to `fragments` during evaluation. Should be `True` if constant sequences are
missing from `fragments` or False if they are present
Returns:
- `fused str`: fragments fused at this stage
- `total_pass bool`: True if `fragments` passed all subblock
templates and `fused` passed `self.template`
- `total_score float`: sum of scores from `self.template.soft_filters` and subblock template soft filters
- `output_dicts list[dict]`: list of dictionaries holding information from this block and subblocks
Recurse fragments works in the following way:
1. Fragments are decomposed based on `self.decompose_fragments`
2. Fragments are routed to subblocks if present using `self.match_fragment_recursive`
3. Any fragments matching a subblock are first evaluated by that subblock's template
4. If `add_constant=True`, constant sequences from any `ConstantBlock` subblocks are added
5. Fragments are joined and fused using `self.join_fragments` and `self.fuse_fragments`
6. The fused fragments are processed by `self.eval_mol`
'''
output_dicts = []
total_pass = []
total_score = 0.
if not is_container(fragments):
fragments = [fragments]
# if type(fragments) == str:
# fragments = [fragments]
valids = self.template.validate(fragments, cpus=0)
if all(valids):
fragments = [self.decompose_fragments(i) for i in fragments]
fragments = [item for sublist in fragments for item in sublist]
if self.subblocks:
new_fragments = []
unrouted = list(fragments) # copy list
for sb in self.subblocks:
routed = [i for i in unrouted if sb.match_fragment_recursive(i)]
unrouted = [i for i in unrouted if not i in routed]
if routed:
r_fused, r_pass, r_score, subdicts = sb.recurse_fragments(routed)
new_fragments.append(r_fused)
total_pass.append(r_pass)
total_score += r_score
output_dicts += subdicts
if isinstance(sb, ConstantBlock) and add_constant:
new_fragments.append(sb.smile)
fragments = new_fragments + unrouted
joined_fragments = self.join_fragments(fragments)
fused = self.fuse_fragments(joined_fragments)
frag_pass, frag_score, hardlog, softlog = self.eval_mol(fused, all(total_pass))
total_pass.append(frag_pass)
total_score += frag_score
total_pass = all(total_pass)
output_dict = {
'block' : self.name,
'fused' : fused,
'fragments' : fragments,
'block_pass' : frag_pass,
'block_score' : frag_score,
'all_pass' : total_pass,
'all_score' : total_score,
'hardlog' : hardlog,
'softlog' : softlog
}
output_dicts.append(output_dict)
else:
fused = ''
total_pass = False
total_score = self.template.fail_score
output_dicts = {}
return fused, total_pass, total_score, output_dicts
# Cell
class ConstantBlock():
'''
ConstantBlock - base block class for constant sequence
'''
def __init__(self, constant, name):
self.constant = constant
self.name = name
self.links = []
self.subblocks = []
self.sublinks = []
def match_fragment(self, fragment):
return False
def match_fragment_recursive(self, fragment):
return False
def load_data(self, fragments, recurse=False):
pass
def sample(self, n):
return pd.DataFrame([[self.constant, 0.] for i in range(n)], columns=['smiles', 'final'])
def __repr__(self):
rep_str = f'Constant Block: {self.constant}'
return rep_str
class ConstantMolBlock(ConstantBlock):
'''
ConstantMolBlock - constant block for SMILES sequence
'''
def __init__(self, smile, name, links=None):
super().__init__(smile, name)
self.smile = canon_smile(smile)
if | |
postCellId="../AIZL/0/"/>
</projection>
<projection id="NC_HSNR_AIZR_Serotonin_Acetylcholine" postsynapticPopulation="AIZR" presynapticPopulation="HSNR" synapse="">
<connection id="0" preCellId="../HSNR/0/" postCellId="../AIZR/0/"/>
</projection>
<projection id="NC_HSNR_AS5_Serotonin_Acetylcholine" postsynapticPopulation="AS5" presynapticPopulation="HSNR" synapse="">
<connection id="0" preCellId="../HSNR/0/" postCellId="../AS5/0/"/>
</projection>
<projection id="NC_HSNR_ASHL_Serotonin_Acetylcholine" postsynapticPopulation="ASHL" presynapticPopulation="HSNR" synapse="">
<connection id="0" preCellId="../HSNR/0/" postCellId="../ASHL/0/"/>
</projection>
<projection id="NC_HSNR_AVDR_Serotonin_Acetylcholine" postsynapticPopulation="AVDR" presynapticPopulation="HSNR" synapse="">
<connection id="0" preCellId="../HSNR/0/" postCellId="../AVDR/0/"/>
</projection>
<projection id="NC_HSNR_AVFL_Serotonin_Acetylcholine" postsynapticPopulation="AVFL" presynapticPopulation="HSNR" synapse="">
<connection id="0" preCellId="../HSNR/0/" postCellId="../AVFL/0/"/>
</projection>
<projection id="NC_HSNR_AVJL_Serotonin_Acetylcholine" postsynapticPopulation="AVJL" presynapticPopulation="HSNR" synapse="">
<connection id="0" preCellId="../HSNR/0/" postCellId="../AVJL/0/"/>
</projection>
<projection id="NC_HSNR_AVL_Serotonin_Acetylcholine" postsynapticPopulation="AVL" presynapticPopulation="HSNR" synapse="">
<connection id="0" preCellId="../HSNR/0/" postCellId="../AVL/0/"/>
</projection>
<projection id="NC_HSNR_AWBL_Serotonin_Acetylcholine" postsynapticPopulation="AWBL" presynapticPopulation="HSNR" synapse="">
<connection id="0" preCellId="../HSNR/0/" postCellId="../AWBL/0/"/>
</projection>
<projection id="NC_HSNR_BDUR_Serotonin_Acetylcholine" postsynapticPopulation="BDUR" presynapticPopulation="HSNR" synapse="">
<connection id="0" preCellId="../HSNR/0/" postCellId="../BDUR/0/"/>
</projection>
<projection id="NC_HSNR_DA5_Serotonin_Acetylcholine" postsynapticPopulation="DA5" presynapticPopulation="HSNR" synapse="">
<connection id="0" preCellId="../HSNR/0/" postCellId="../DA5/0/"/>
</projection>
<projection id="NC_HSNR_DA6_Serotonin_Acetylcholine" postsynapticPopulation="DA6" presynapticPopulation="HSNR" synapse="">
<connection id="0" preCellId="../HSNR/0/" postCellId="../DA6/0/"/>
</projection>
<projection id="NC_HSNR_HSNL_Generic_GJ" postsynapticPopulation="HSNL" presynapticPopulation="HSNR" synapse="">
<connection id="0" preCellId="../HSNR/0/" postCellId="../HSNL/0/"/>
</projection>
<projection id="NC_HSNR_HSNL_Serotonin_Acetylcholine" postsynapticPopulation="HSNL" presynapticPopulation="HSNR" synapse="">
<connection id="0" preCellId="../HSNR/0/" postCellId="../HSNL/0/"/>
</projection>
<projection id="NC_HSNR_PVNR_Serotonin_Acetylcholine" postsynapticPopulation="PVNR" presynapticPopulation="HSNR" synapse="">
<connection id="0" preCellId="../HSNR/0/" postCellId="../PVNR/0/"/>
</projection>
<projection id="NC_HSNR_PVNR_Generic_GJ" postsynapticPopulation="PVNR" presynapticPopulation="HSNR" synapse="">
<connection id="0" preCellId="../HSNR/0/" postCellId="../PVNR/0/"/>
</projection>
<projection id="NC_HSNR_PVQR_Serotonin_Acetylcholine" postsynapticPopulation="PVQR" presynapticPopulation="HSNR" synapse="">
<connection id="0" preCellId="../HSNR/0/" postCellId="../PVQR/0/"/>
</projection>
<projection id="NC_HSNR_RIFR_Serotonin_Acetylcholine" postsynapticPopulation="RIFR" presynapticPopulation="HSNR" synapse="">
<connection id="0" preCellId="../HSNR/0/" postCellId="../RIFR/0/"/>
</projection>
<projection id="NC_HSNR_RMGR_Serotonin_Acetylcholine" postsynapticPopulation="RMGR" presynapticPopulation="HSNR" synapse="">
<connection id="0" preCellId="../HSNR/0/" postCellId="../RMGR/0/"/>
</projection>
<projection id="NC_HSNR_SABD_Serotonin_Acetylcholine" postsynapticPopulation="SABD" presynapticPopulation="HSNR" synapse="">
<connection id="0" preCellId="../HSNR/0/" postCellId="../SABD/0/"/>
</projection>
<projection id="NC_HSNR_SABVR_Serotonin_Acetylcholine" postsynapticPopulation="SABVR" presynapticPopulation="HSNR" synapse="">
<connection id="0" preCellId="../HSNR/0/" postCellId="../SABVR/0/"/>
</projection>
<projection id="NC_HSNR_VA6_Serotonin_Acetylcholine" postsynapticPopulation="VA6" presynapticPopulation="HSNR" synapse="">
<connection id="0" preCellId="../HSNR/0/" postCellId="../VA6/0/"/>
</projection>
<projection id="NC_HSNR_VC2_Serotonin_Acetylcholine" postsynapticPopulation="VC2" presynapticPopulation="HSNR" synapse="">
<connection id="0" preCellId="../HSNR/0/" postCellId="../VC2/0/"/>
</projection>
<projection id="NC_HSNR_VC3_Serotonin_Acetylcholine" postsynapticPopulation="VC3" presynapticPopulation="HSNR" synapse="">
<connection id="0" preCellId="../HSNR/0/" postCellId="../VC3/0/"/>
</projection>
<projection id="NC_HSNR_VD4_Serotonin_Acetylcholine" postsynapticPopulation="VD4" presynapticPopulation="HSNR" synapse="">
<connection id="0" preCellId="../HSNR/0/" postCellId="../VD4/0/"/>
</projection>
<projection id="NC_I1L_I1R_Generic_GJ" postsynapticPopulation="I1R" presynapticPopulation="I1L" synapse="">
<connection id="0" preCellId="../I1L/0/" postCellId="../I1R/0/"/>
</projection>
<projection id="NC_I1L_I3_Acetylcholine" postsynapticPopulation="I3" presynapticPopulation="I1L" synapse="">
<connection id="0" preCellId="../I1L/0/" postCellId="../I3/0/"/>
</projection>
<projection id="NC_I1L_I5_Generic_GJ" postsynapticPopulation="I5" presynapticPopulation="I1L" synapse="">
<connection id="0" preCellId="../I1L/0/" postCellId="../I5/0/"/>
</projection>
<projection id="NC_I1L_RIPL_Generic_GJ" postsynapticPopulation="RIPL" presynapticPopulation="I1L" synapse="">
<connection id="0" preCellId="../I1L/0/" postCellId="../RIPL/0/"/>
</projection>
<projection id="NC_I1L_RIPR_Generic_GJ" postsynapticPopulation="RIPR" presynapticPopulation="I1L" synapse="">
<connection id="0" preCellId="../I1L/0/" postCellId="../RIPR/0/"/>
</projection>
<projection id="NC_I1R_I1L_Generic_GJ" postsynapticPopulation="I1L" presynapticPopulation="I1R" synapse="">
<connection id="0" preCellId="../I1R/0/" postCellId="../I1L/0/"/>
</projection>
<projection id="NC_I1R_I3_Acetylcholine" postsynapticPopulation="I3" presynapticPopulation="I1R" synapse="">
<connection id="0" preCellId="../I1R/0/" postCellId="../I3/0/"/>
</projection>
<projection id="NC_I1R_I5_Generic_GJ" postsynapticPopulation="I5" presynapticPopulation="I1R" synapse="">
<connection id="0" preCellId="../I1R/0/" postCellId="../I5/0/"/>
</projection>
<projection id="NC_I1R_RIPL_Generic_GJ" postsynapticPopulation="RIPL" presynapticPopulation="I1R" synapse="">
<connection id="0" preCellId="../I1R/0/" postCellId="../RIPL/0/"/>
</projection>
<projection id="NC_I1R_RIPR_Generic_GJ" postsynapticPopulation="RIPR" presynapticPopulation="I1R" synapse="">
<connection id="0" preCellId="../I1R/0/" postCellId="../RIPR/0/"/>
</projection>
<projection id="NC_I2L_I1L_Generic_GJ" postsynapticPopulation="I1L" presynapticPopulation="I2L" synapse="">
<connection id="0" preCellId="../I2L/0/" postCellId="../I1L/0/"/>
</projection>
<projection id="NC_I2L_I1R_Generic_GJ" postsynapticPopulation="I1R" presynapticPopulation="I2L" synapse="">
<connection id="0" preCellId="../I2L/0/" postCellId="../I1R/0/"/>
</projection>
<projection id="NC_I2L_M1_Generic_GJ" postsynapticPopulation="M1" presynapticPopulation="I2L" synapse="">
<connection id="0" preCellId="../I2L/0/" postCellId="../M1/0/"/>
</projection>
<projection id="NC_I2L_M1_Glutamate" postsynapticPopulation="M1" presynapticPopulation="I2L" synapse="">
<connection id="0" preCellId="../I2L/0/" postCellId="../M1/0/"/>
</projection>
<projection id="NC_I2R_I1L_Generic_GJ" postsynapticPopulation="I1L" presynapticPopulation="I2R" synapse="">
<connection id="0" preCellId="../I2R/0/" postCellId="../I1L/0/"/>
</projection>
<projection id="NC_I2R_I1R_Generic_GJ" postsynapticPopulation="I1R" presynapticPopulation="I2R" synapse="">
<connection id="0" preCellId="../I2R/0/" postCellId="../I1R/0/"/>
</projection>
<projection id="NC_I2R_M1_Generic_GJ" postsynapticPopulation="M1" presynapticPopulation="I2R" synapse="">
<connection id="0" preCellId="../I2R/0/" postCellId="../M1/0/"/>
</projection>
<projection id="NC_I2R_M1_Glutamate" postsynapticPopulation="M1" presynapticPopulation="I2R" synapse="">
<connection id="0" preCellId="../I2R/0/" postCellId="../M1/0/"/>
</projection>
<projection id="NC_I3_M1_Glutamate" postsynapticPopulation="M1" presynapticPopulation="I3" synapse="">
<connection id="0" preCellId="../I3/0/" postCellId="../M1/0/"/>
</projection>
<projection id="NC_I3_M2L_Glutamate" postsynapticPopulation="M2L" presynapticPopulation="I3" synapse="">
<connection id="0" preCellId="../I3/0/" postCellId="../M2L/0/"/>
</projection>
<projection id="NC_I3_M2L_Generic_GJ" postsynapticPopulation="M2L" presynapticPopulation="I3" synapse="">
<connection id="0" preCellId="../I3/0/" postCellId="../M2L/0/"/>
</projection>
<projection id="NC_I3_M2R_Glutamate" postsynapticPopulation="M2R" presynapticPopulation="I3" synapse="">
<connection id="0" preCellId="../I3/0/" postCellId="../M2R/0/"/>
</projection>
<projection id="NC_I3_M2R_Generic_GJ" postsynapticPopulation="M2R" presynapticPopulation="I3" synapse="">
<connection id="0" preCellId="../I3/0/" postCellId="../M2R/0/"/>
</projection>
<projection id="NC_I4_I2L_Glutamate" postsynapticPopulation="I2L" presynapticPopulation="I4" synapse="">
<connection id="0" preCellId="../I4/0/" postCellId="../I2L/0/"/>
</projection>
<projection id="NC_I4_I2R_Glutamate" postsynapticPopulation="I2R" presynapticPopulation="I4" synapse="">
<connection id="0" preCellId="../I4/0/" postCellId="../I2R/0/"/>
</projection>
<projection id="NC_I4_I5_Glutamate" postsynapticPopulation="I5" presynapticPopulation="I4" synapse="">
<connection id="0" preCellId="../I4/0/" postCellId="../I5/0/"/>
</projection>
<projection id="NC_I4_M1_Glutamate" postsynapticPopulation="M1" presynapticPopulation="I4" synapse="">
<connection id="0" preCellId="../I4/0/" postCellId="../M1/0/"/>
</projection>
<projection id="NC_I5_I1L_Generic_GJ" postsynapticPopulation="I1L" presynapticPopulation="I5" synapse="">
<connection id="0" preCellId="../I5/0/" postCellId="../I1L/0/"/>
</projection>
<projection id="NC_I5_I1L_Serotonin_Glutamate" postsynapticPopulation="I1L" presynapticPopulation="I5" synapse="">
<connection id="0" preCellId="../I5/0/" postCellId="../I1L/0/"/>
</projection>
<projection id="NC_I5_I1R_Generic_GJ" postsynapticPopulation="I1R" presynapticPopulation="I5" synapse="">
<connection id="0" preCellId="../I5/0/" postCellId="../I1R/0/"/>
</projection>
<projection id="NC_I5_I1R_Serotonin_Glutamate" postsynapticPopulation="I1R" presynapticPopulation="I5" synapse="">
<connection id="0" preCellId="../I5/0/" postCellId="../I1R/0/"/>
</projection>
<projection id="NC_I5_M1_Serotonin_Glutamate" postsynapticPopulation="M1" presynapticPopulation="I5" synapse="">
<connection id="0" preCellId="../I5/0/" postCellId="../M1/0/"/>
</projection>
<projection id="NC_I5_M5_Serotonin_Glutamate" postsynapticPopulation="M5" presynapticPopulation="I5" synapse="">
<connection id="0" preCellId="../I5/0/" postCellId="../M5/0/"/>
</projection>
<projection id="NC_I5_M5_Generic_GJ" postsynapticPopulation="M5" presynapticPopulation="I5" synapse="">
<connection id="0" preCellId="../I5/0/" postCellId="../M5/0/"/>
</projection>
<projection id="NC_I5_MI_Serotonin_Glutamate" postsynapticPopulation="MI" presynapticPopulation="I5" synapse="">
<connection id="0" preCellId="../I5/0/" postCellId="../MI/0/"/>
</projection>
<projection id="NC_I6_I2L_Acetylcholine" postsynapticPopulation="I2L" presynapticPopulation="I6" synapse="">
<connection id="0" preCellId="../I6/0/" postCellId="../I2L/0/"/>
</projection>
<projection id="NC_I6_I2R_Acetylcholine" postsynapticPopulation="I2R" presynapticPopulation="I6" synapse="">
<connection id="0" preCellId="../I6/0/" postCellId="../I2R/0/"/>
</projection>
<projection id="NC_I6_I3_Acetylcholine" postsynapticPopulation="I3" presynapticPopulation="I6" synapse="">
<connection id="0" preCellId="../I6/0/" postCellId="../I3/0/"/>
</projection>
<projection id="NC_I6_M4_Generic_GJ" postsynapticPopulation="M4" presynapticPopulation="I6" synapse="">
<connection id="0" preCellId="../I6/0/" postCellId="../M4/0/"/>
</projection>
<projection id="NC_I6_M5_Generic_GJ" postsynapticPopulation="M5" presynapticPopulation="I6" synapse="">
<connection id="0" preCellId="../I6/0/" postCellId="../M5/0/"/>
</projection>
<projection id="NC_I6_NSML_Acetylcholine" postsynapticPopulation="NSML" presynapticPopulation="I6" synapse="">
<connection id="0" preCellId="../I6/0/" postCellId="../NSML/0/"/>
</projection>
<projection id="NC_I6_NSMR_Acetylcholine" postsynapticPopulation="NSMR" presynapticPopulation="I6" synapse="">
<connection id="0" preCellId="../I6/0/" postCellId="../NSMR/0/"/>
</projection>
<projection id="NC_IL1DL_IL1DR_Generic_GJ" postsynapticPopulation="IL1DR" presynapticPopulation="IL1DL" synapse="">
<connection id="0" preCellId="../IL1DL/0/" postCellId="../IL1DR/0/"/>
</projection>
<projection id="NC_IL1DL_IL1L_Generic_GJ" postsynapticPopulation="IL1L" presynapticPopulation="IL1DL" synapse="">
<connection id="0" preCellId="../IL1DL/0/" postCellId="../IL1L/0/"/>
</projection>
<projection id="NC_IL1DL_OLLL_Generic_GJ" postsynapticPopulation="OLLL" presynapticPopulation="IL1DL" synapse="">
<connection id="0" preCellId="../IL1DL/0/" postCellId="../OLLL/0/"/>
</projection>
<projection id="NC_IL1DL_PVR_Glutamate" postsynapticPopulation="PVR" presynapticPopulation="IL1DL" synapse="">
<connection id="0" preCellId="../IL1DL/0/" postCellId="../PVR/0/"/>
</projection>
<projection id="NC_IL1DL_RIH_Glutamate" postsynapticPopulation="RIH" presynapticPopulation="IL1DL" synapse="">
<connection id="0" preCellId="../IL1DL/0/" postCellId="../RIH/0/"/>
</projection>
<projection id="NC_IL1DL_RIPL_Glutamate" postsynapticPopulation="RIPL" presynapticPopulation="IL1DL" synapse="">
<connection id="0" preCellId="../IL1DL/0/" postCellId="../RIPL/0/"/>
</projection>
<projection id="NC_IL1DL_RMDDR_Glutamate" postsynapticPopulation="RMDDR" presynapticPopulation="IL1DL" synapse="">
<connection id="0" preCellId="../IL1DL/0/" postCellId="../RMDDR/0/"/>
</projection>
<projection id="NC_IL1DL_RMDVL_Glutamate" postsynapticPopulation="RMDVL" presynapticPopulation="IL1DL" synapse="">
<connection id="0" preCellId="../IL1DL/0/" postCellId="../RMDVL/0/"/>
</projection>
<projection id="NC_IL1DL_RMEV_Generic_GJ" postsynapticPopulation="RMEV" presynapticPopulation="IL1DL" synapse="">
<connection id="0" preCellId="../IL1DL/0/" postCellId="../RMEV/0/"/>
</projection>
<projection id="NC_IL1DL_URYDL_Glutamate" postsynapticPopulation="URYDL" presynapticPopulation="IL1DL" synapse="">
<connection id="0" preCellId="../IL1DL/0/" postCellId="../URYDL/0/"/>
</projection>
<projection id="NC_IL1DR_IL1DL_Generic_GJ" postsynapticPopulation="IL1DL" presynapticPopulation="IL1DR" synapse="">
<connection id="0" preCellId="../IL1DR/0/" postCellId="../IL1DL/0/"/>
</projection>
<projection id="NC_IL1DR_IL1R_Generic_GJ" postsynapticPopulation="IL1R" presynapticPopulation="IL1DR" synapse="">
<connection id="0" preCellId="../IL1DR/0/" postCellId="../IL1R/0/"/>
</projection>
<projection id="NC_IL1DR_OLLR_Generic_GJ" postsynapticPopulation="OLLR" presynapticPopulation="IL1DR" synapse="">
<connection id="0" preCellId="../IL1DR/0/" postCellId="../OLLR/0/"/>
</projection>
<projection id="NC_IL1DR_RIPR_Glutamate" postsynapticPopulation="RIPR" presynapticPopulation="IL1DR" synapse="">
<connection id="0" preCellId="../IL1DR/0/" postCellId="../RIPR/0/"/>
</projection>
<projection id="NC_IL1DR_RMDVR_Glutamate" postsynapticPopulation="RMDVR" presynapticPopulation="IL1DR" synapse="">
<connection id="0" preCellId="../IL1DR/0/" postCellId="../RMDVR/0/"/>
</projection>
<projection id="NC_IL1DR_RMEV_Generic_GJ" postsynapticPopulation="RMEV" presynapticPopulation="IL1DR" synapse="">
<connection id="0" preCellId="../IL1DR/0/" postCellId="../RMEV/0/"/>
</projection>
<projection id="NC_IL1L_AVER_Glutamate" postsynapticPopulation="AVER" presynapticPopulation="IL1L" synapse="">
<connection id="0" preCellId="../IL1L/0/" postCellId="../AVER/0/"/>
</projection>
<projection id="NC_IL1L_IL1DL_Generic_GJ" postsynapticPopulation="IL1DL" presynapticPopulation="IL1L" synapse="">
<connection id="0" preCellId="../IL1L/0/" postCellId="../IL1DL/0/"/>
</projection>
<projection id="NC_IL1L_IL1DL_Glutamate" postsynapticPopulation="IL1DL" presynapticPopulation="IL1L" synapse="">
<connection id="0" preCellId="../IL1L/0/" postCellId="../IL1DL/0/"/>
</projection>
<projection id="NC_IL1L_IL1VL_Generic_GJ" postsynapticPopulation="IL1VL" presynapticPopulation="IL1L" synapse="">
<connection id="0" preCellId="../IL1L/0/" postCellId="../IL1VL/0/"/>
</projection>
<projection id="NC_IL1L_RMDDL_Glutamate" postsynapticPopulation="RMDDL" presynapticPopulation="IL1L" synapse="">
<connection id="0" preCellId="../IL1L/0/" postCellId="../RMDDL/0/"/>
</projection>
<projection id="NC_IL1L_RMDL_Glutamate" postsynapticPopulation="RMDL" presynapticPopulation="IL1L" synapse="">
<connection id="0" preCellId="../IL1L/0/" postCellId="../RMDL/0/"/>
</projection>
<projection id="NC_IL1L_RMDR_Glutamate" postsynapticPopulation="RMDR" presynapticPopulation="IL1L" synapse="">
<connection id="0" preCellId="../IL1L/0/" postCellId="../RMDR/0/"/>
</projection>
<projection id="NC_IL1L_RMDVL_Glutamate" postsynapticPopulation="RMDVL" presynapticPopulation="IL1L" synapse="">
<connection id="0" preCellId="../IL1L/0/" postCellId="../RMDVL/0/"/>
</projection>
<projection id="NC_IL1L_RMDVR_Glutamate" postsynapticPopulation="RMDVR" presynapticPopulation="IL1L" synapse="">
<connection id="0" preCellId="../IL1L/0/" postCellId="../RMDVR/0/"/>
</projection>
<projection id="NC_IL1L_RMER_Glutamate" postsynapticPopulation="RMER" presynapticPopulation="IL1L" synapse="">
<connection id="0" preCellId="../IL1L/0/" postCellId="../RMER/0/"/>
</projection>
<projection id="NC_IL1R_AVEL_Glutamate" postsynapticPopulation="AVEL" presynapticPopulation="IL1R" synapse="">
<connection id="0" preCellId="../IL1R/0/" postCellId="../AVEL/0/"/>
</projection>
<projection id="NC_IL1R_AVER_Glutamate" postsynapticPopulation="AVER" presynapticPopulation="IL1R" synapse="">
<connection id="0" preCellId="../IL1R/0/" postCellId="../AVER/0/"/>
</projection>
<projection id="NC_IL1R_IL1DR_Generic_GJ" postsynapticPopulation="IL1DR" presynapticPopulation="IL1R" synapse="">
<connection id="0" preCellId="../IL1R/0/" postCellId="../IL1DR/0/"/>
</projection>
<projection id="NC_IL1R_IL1DR_Glutamate" postsynapticPopulation="IL1DR" presynapticPopulation="IL1R" synapse="">
<connection id="0" preCellId="../IL1R/0/" postCellId="../IL1DR/0/"/>
</projection>
<projection id="NC_IL1R_IL1VR_Generic_GJ" postsynapticPopulation="IL1VR" presynapticPopulation="IL1R" synapse="">
<connection id="0" preCellId="../IL1R/0/" postCellId="../IL1VR/0/"/>
</projection>
<projection id="NC_IL1R_RMDDL_Glutamate" postsynapticPopulation="RMDDL" presynapticPopulation="IL1R" synapse="">
<connection id="0" preCellId="../IL1R/0/" postCellId="../RMDDL/0/"/>
</projection>
<projection id="NC_IL1R_RMDDR_Glutamate" postsynapticPopulation="RMDDR" presynapticPopulation="IL1R" synapse="">
<connection id="0" preCellId="../IL1R/0/" postCellId="../RMDDR/0/"/>
</projection>
<projection id="NC_IL1R_RMDL_Glutamate" postsynapticPopulation="RMDL" presynapticPopulation="IL1R" synapse="">
<connection id="0" preCellId="../IL1R/0/" postCellId="../RMDL/0/"/>
</projection>
<projection id="NC_IL1R_RMDR_Glutamate" postsynapticPopulation="RMDR" presynapticPopulation="IL1R" synapse="">
<connection id="0" preCellId="../IL1R/0/" postCellId="../RMDR/0/"/>
</projection>
<projection id="NC_IL1R_RMDVL_Glutamate" postsynapticPopulation="RMDVL" presynapticPopulation="IL1R" synapse="">
<connection id="0" preCellId="../IL1R/0/" postCellId="../RMDVL/0/"/>
</projection>
<projection id="NC_IL1R_RMDVR_Glutamate" postsynapticPopulation="RMDVR" presynapticPopulation="IL1R" synapse="">
<connection id="0" preCellId="../IL1R/0/" postCellId="../RMDVR/0/"/>
</projection>
<projection id="NC_IL1R_RMEL_Glutamate" postsynapticPopulation="RMEL" presynapticPopulation="IL1R" synapse="">
<connection id="0" preCellId="../IL1R/0/" postCellId="../RMEL/0/"/>
</projection>
<projection id="NC_IL1R_RMHL_Glutamate" postsynapticPopulation="RMHL" presynapticPopulation="IL1R" synapse="">
<connection id="0" preCellId="../IL1R/0/" postCellId="../RMHL/0/"/>
</projection>
<projection id="NC_IL1R_URXR_Glutamate" postsynapticPopulation="URXR" presynapticPopulation="IL1R" synapse="">
<connection id="0" preCellId="../IL1R/0/" postCellId="../URXR/0/"/>
</projection>
<projection id="NC_IL1VL_IL1L_Generic_GJ" postsynapticPopulation="IL1L" presynapticPopulation="IL1VL" synapse="">
<connection id="0" preCellId="../IL1VL/0/" postCellId="../IL1L/0/"/>
</projection>
<projection id="NC_IL1VL_IL1L_Glutamate" postsynapticPopulation="IL1L" presynapticPopulation="IL1VL" synapse="">
<connection id="0" preCellId="../IL1VL/0/" postCellId="../IL1L/0/"/>
</projection>
<projection id="NC_IL1VL_IL1VR_Generic_GJ" postsynapticPopulation="IL1VR" presynapticPopulation="IL1VL" synapse="">
<connection id="0" preCellId="../IL1VL/0/" postCellId="../IL1VR/0/"/>
</projection>
<projection id="NC_IL1VL_RIPL_Glutamate" postsynapticPopulation="RIPL" presynapticPopulation="IL1VL" synapse="">
<connection id="0" preCellId="../IL1VL/0/" postCellId="../RIPL/0/"/>
</projection>
<projection id="NC_IL1VL_RMDDL_Glutamate" postsynapticPopulation="RMDDL" presynapticPopulation="IL1VL" synapse="">
<connection id="0" preCellId="../IL1VL/0/" postCellId="../RMDDL/0/"/>
</projection>
<projection id="NC_IL1VL_RMED_Generic_GJ" postsynapticPopulation="RMED" presynapticPopulation="IL1VL" synapse="">
<connection id="0" preCellId="../IL1VL/0/" postCellId="../RMED/0/"/>
</projection>
<projection id="NC_IL1VL_URYVL_Glutamate" postsynapticPopulation="URYVL" presynapticPopulation="IL1VL" synapse="">
<connection id="0" preCellId="../IL1VL/0/" postCellId="../URYVL/0/"/>
</projection>
<projection id="NC_IL1VR_IL1R_Generic_GJ" postsynapticPopulation="IL1R" presynapticPopulation="IL1VR" synapse="">
<connection id="0" preCellId="../IL1VR/0/" postCellId="../IL1R/0/"/>
</projection>
<projection id="NC_IL1VR_IL1R_Glutamate" postsynapticPopulation="IL1R" presynapticPopulation="IL1VR" synapse="">
<connection id="0" preCellId="../IL1VR/0/" postCellId="../IL1R/0/"/>
</projection>
<projection id="NC_IL1VR_IL1VL_Generic_GJ" postsynapticPopulation="IL1VL" presynapticPopulation="IL1VR" synapse="">
<connection id="0" preCellId="../IL1VR/0/" postCellId="../IL1VL/0/"/>
</projection>
<projection id="NC_IL1VR_IL2R_Generic_GJ" postsynapticPopulation="IL2R" presynapticPopulation="IL1VR" synapse="">
<connection id="0" preCellId="../IL1VR/0/" postCellId="../IL2R/0/"/>
</projection>
<projection id="NC_IL1VR_IL2VR_Glutamate" postsynapticPopulation="IL2VR" presynapticPopulation="IL1VR" synapse="">
<connection id="0" preCellId="../IL1VR/0/" postCellId="../IL2VR/0/"/>
</projection>
<projection id="NC_IL1VR_RIPR_Glutamate" postsynapticPopulation="RIPR" presynapticPopulation="IL1VR" synapse="">
<connection id="0" preCellId="../IL1VR/0/" postCellId="../RIPR/0/"/>
</projection>
<projection id="NC_IL1VR_RMDDR_Glutamate" postsynapticPopulation="RMDDR" presynapticPopulation="IL1VR" synapse="">
<connection id="0" preCellId="../IL1VR/0/" postCellId="../RMDDR/0/"/>
</projection>
<projection id="NC_IL1VR_RMER_Glutamate" postsynapticPopulation="RMER" presynapticPopulation="IL1VR" synapse="">
<connection id="0" preCellId="../IL1VR/0/" postCellId="../RMER/0/"/>
</projection>
<projection id="NC_IL2DL_AUAL_Serotonin" postsynapticPopulation="AUAL" presynapticPopulation="IL2DL" synapse="">
<connection id="0" preCellId="../IL2DL/0/" postCellId="../AUAL/0/"/>
</projection>
<projection id="NC_IL2DL_IL1DL_Serotonin" postsynapticPopulation="IL1DL" presynapticPopulation="IL2DL" synapse="">
<connection id="0" preCellId="../IL2DL/0/" postCellId="../IL1DL/0/"/>
</projection>
<projection id="NC_IL2DL_OLQDL_Serotonin" postsynapticPopulation="OLQDL" presynapticPopulation="IL2DL" synapse="">
<connection id="0" preCellId="../IL2DL/0/" postCellId="../OLQDL/0/"/>
</projection>
<projection id="NC_IL2DL_RIBL_Serotonin" postsynapticPopulation="RIBL" presynapticPopulation="IL2DL" synapse="">
<connection id="0" preCellId="../IL2DL/0/" postCellId="../RIBL/0/"/>
</projection>
<projection id="NC_IL2DL_RIPL_Serotonin" postsynapticPopulation="RIPL" presynapticPopulation="IL2DL" synapse="">
<connection id="0" preCellId="../IL2DL/0/" postCellId="../RIPL/0/"/>
</projection>
<projection id="NC_IL2DL_RMEL_Serotonin" postsynapticPopulation="RMEL" presynapticPopulation="IL2DL" synapse="">
<connection id="0" preCellId="../IL2DL/0/" postCellId="../RMEL/0/"/>
</projection>
<projection id="NC_IL2DL_RMER_Serotonin" postsynapticPopulation="RMER" presynapticPopulation="IL2DL" synapse="">
<connection id="0" preCellId="../IL2DL/0/" | |
from functools import reduce
import time
import math
import copy
import types
import cv2
import numpy as np
import json
from typing import List, Union, Tuple, Any
from ..thresholds.color import built_in_colors
from ..exceptions.exceptions import InvalidCustomFunctionError, CameraError
from ..camera.camera import Camera
from ..partials.filter_applier import filter_applier
from ..thresholds.threshold import Threshold
from ..thresholds.color.color import Color
from ..connections.connection import Connection
from ..directions.director import Director
from ..camera.camera_settings import CameraSettings
from ..connections.network_location import NetworkLocation
from ..directions.directing_functions import center_directions
class Vision:
"""
The Vision object represents the specifications to detect an object in an image.
It performs the detection using a Threshold and filter functions.
Additional capabilities and tuning options are:
Image filters (Blurs, rotations, cropping),
Morphological functions,
Ovl color HSVCalibration,
"""
def __init__(self, threshold: Threshold = None, contour_filters: List[types.FunctionType] = None,
director: Director = None, width=320, height=240, connection: Connection = None,
camera: Union[int, str, Camera, cv2.VideoCapture, Any] = None,
camera_settings: CameraSettings = None, morphological_functions: List[types.FunctionType] = None,
image_filters: List[types.FunctionType] = None, ovl_camera: bool = False, calibration: str = None):
"""
The object that represents the pipeline of processing, detection, direction of values
A connection object can be passed
:param threshold: threshold object, creates the binary mask from a given image
:param contour_filters: the list of contour_filter functions that
remove contours that aren't the target(s)
:param director: a functions that receives a list or a single contour and returns director
:param width: the width (in pixels) of images taken with the camera
:param height: the height (in pixels)
:param connection: a connection object that passes the result to the connection target
:param camera: a Camera object (cv2.VideoCapture, ovl.Camera) or source from which to open a camera
:param camera_settings: Special camera settings like calibration or offset used for
image correction and various direction calculations.
:param morphological_functions: morphological functions used to apply on the binary
mask generated by the Threshold.
:param image_filters: a list of image altering functions that are applied on the image.
:param ovl_camera: a boolean that makes the camera opened to be ovl.Camera instead of cv2.VideoCapture
:param calibration: a dictionary containing color calibration (HSVCalibration) coefficients and intercepts,
used for applying the calibration on the vision object
"""
self.width = width
self.height = height
self.threshold = threshold
self.contour_filters = contour_filters or []
self.director = director or Director(center_directions, failed_detection=9999, target_amount=1)
self.connection = connection
self.image_filters = image_filters or []
self.morphological_functions = morphological_functions or []
self.camera = None
self.camera_port = None
self.camera_settings = camera_settings
if isinstance(camera, (cv2.VideoCapture, Camera)):
self.camera = camera
elif camera is None:
pass
else:
self.camera_setup(camera, width, height, ovl_camera=ovl_camera)
self.calibration_path = None
if calibration:
self.calibration_path = calibration
calibration = json.load(calibration)
self.saturation_weight = calibration['saturation'] if 'saturation' in calibration else None
self.brightness_weight = calibration['brightness'] if 'brightness' in calibration else None
def __repr__(self):
return str(self)
def __str__(self):
filters = [filter_function.__name__ for filter_function in self.contour_filters]
threshold = self.threshold
return "Vision: \n Threshold: {} \n Filters: {}".format(threshold, filters)
@property
def target_amount(self):
"""
The wanted amount of targets
Determined by self.director
(0 None or math.inf if there is no limit, 1 if 1 target is wanted etc.)
"""
if self.director is None:
return math.inf
return self.director.target_amount
def apply_morphological_functions(self, mask, morphological_functions=None):
"""
Applies all morphological functions on the mask (binary images) created using the threshold,
Morphological functions are functions that are applied
to binary images to alter the shape of "detected" regions
NOTE: Vision.detect is mainly used for full object detection and filtering,
refer to it for common use of Vision
:param mask: the mask on which the functions should be applied
:param morphological_functions: list of morphological_functions to be
applied instead of self.morphological_functions
:return: the applied mask
"""
if type(self.morphological_functions) not in (tuple, list, set):
return mask
morphological_functions = morphological_functions or self.morphological_functions
return reduce(filter_applier, morphological_functions, mask)
def send(self, data, *args, **kwargs) -> Any:
"""
Sends data to the destination using self.connection
:param data: The data to send to the Connection
:param args: any other arguments for the send function in your connection
:param kwargs: any other named arguments for the connection object
:return: Depends on the connection object used, returns its result
"""
return self.connection.send(*args, **kwargs, data=data)
def send_to_location(self, data, network_location: NetworkLocation, *args, **kwargs):
"""
:param data: the data to be sent
:param network_location: information used to send the data to a specific 'location'
in the network
:return: Depends on the connection object
"""
return self.connection.send_to_location(data, network_location, *args, **kwargs)
def get_image(self) -> np.ndarray:
"""
Gets an image from self.camera and applies image filters
:return: the image, false if failed to get it
"""
if self.camera is None:
raise CameraError("No camera given, (Camera is None)")
if not self.camera.isOpened():
raise CameraError("Camera given is not open (Has it been closed or disconnected?)")
output = self.camera.read()
if len(output) == 2:
ret, image = output
return image if ret else False
else:
return output
def get_filtered_image(self):
"""
Gets an image from self.camera and applies all image filters
:return: the image filter applied image
"""
output = self.get_image()
return self.apply_image_filters(output)
def apply_filter(self, filter_function, contours, verbose=False):
"""
Applies a filter function on the contour list, this is used to remove contours
that do not match desired features
NOTE: Vision.detect is mainly used for full object detection and filtering,
refer to it for common use of Vision
:param filter_function: Filter functions are function with a contour list variable that apply some
sort of filter on the contours, thus removing ones that don't fit the limit given by the filter.
for example: straight_rectangle_filter removes contours that are not rectangles that are parallel
to the frame of the picture
:param contours: the contours on which the filter should be applied (list of numpy.ndarrays)
:param verbose: if true_shape does not print anything
:return: returns the output of the filter function.
"""
if verbose:
print('Before "{}": {}'.format(filter_function.__name__, len(contours)))
filter_function_output = filter_function(contours)
if isinstance(filter_function_output, tuple):
if len(filter_function_output) == 2:
filtered_contours, ratio = filter_function_output[0], filter_function_output[1]
else:
raise InvalidCustomFunctionError('Filter function must return between 1 and 2 lists.'
'Please refer to the Documentation: '
'https://github.com/1937Elysium/Ovl-Python')
elif isinstance(filter_function_output, list):
filtered_contours, ratio = filter_function_output, []
else:
raise TypeError('The contour list must be a list or tuple of 2 lists (contours and ratios)')
return filtered_contours, ratio
def apply_all_filters(self, contours: List[np.ndarray], verbose=False
) -> Tuple[List[np.ndarray], List[float]]:
"""
Applies all of the filters on a list of contours
:param contours: List of contours (numpy arrays) to
:param verbose: prints out information about filtering process if true (useful for debugging)
:return: a list of all of the ratios given by the filter function in order.
"""
ratios = []
for filter_func in self.contour_filters:
contours, ratio = self.apply_filter(filter_func, contours, verbose=verbose)
ratios.append(ratio)
if verbose:
print("After all filters: {}".format(len(contours)))
return contours, ratios
def apply_image_filters(self, image: np.ndarray) -> np.ndarray:
"""
Applies all given image filters to the given image
This is used to apply various image filters on your image in a pipeline,
like blurs, image cropping, contrasting, sharpening, rotations, translations etc.
:param image: the image that the image filters should be applied on (numpy array)
:return: the image with the filters applied
"""
return reduce(filter_applier, self.image_filters, image)
def apply_threshold(self, image: np.ndarray, threshold=None) -> np.ndarray:
"""
Gets a mask (binary image) for a given image and Threshold object
(uses self.Threshold if given threshold was none)
:param image: the numpy array of the image
:param threshold: the Threshold object used to create the binary mask
:return: the binary mask
"""
threshold = threshold or self.threshold
return threshold.convert(image)
def find_contours_in_mask(self, mask: np.ndarray, return_hierarchy=False, apply_morphs=True
) -> List[np.ndarray]:
"""
Gets contours from the given mask and apply
:param mask: binary image (mask), a numpy array
:param return_hierarchy: if the hierarchy should be returned
:param apply_morphs: if the morphological functions should be applied.
:return: the list of contours
"""
mask = self.apply_morphological_functions(mask) if apply_morphs else mask
result = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if len(result) == 3:
_, contours, hierarchy = result
elif len(result) == 2:
contours, hierarchy = result
else:
raise ValueError("Invalid output from cv2.findContours, check that your cv2 (OpenCV) version is supported")
return (contours, hierarchy) if return_hierarchy else contours
def find_contours(self, image: np.ndarray, threshold=None, return_hierarchy=False) | |
import gevent
import pytest
from raiden.api.python import RaidenAPI
from raiden.app import App
from raiden.storage.sqlite import RANGE_ALL_STATE_CHANGES
from raiden.tests.utils.detect_failure import raise_on_failure
from raiden.tests.utils.events import (
raiden_events_search_for_item,
search_for_item,
wait_for_raiden_event,
wait_for_state_change,
)
from raiden.tests.utils.network import CHAIN
from raiden.tests.utils.protocol import (
dont_handle_lock_expired_mock,
dont_handle_node_change_network_state,
)
from raiden.tests.utils.transfer import (
assert_succeeding_transfer_invariants,
assert_synced_channel_state,
calculate_fee_for_amount,
get_channelstate,
transfer,
wait_assert,
)
from raiden.transfer import channel, views
from raiden.transfer.events import ContractSendChannelUpdateTransfer, EventPaymentSentFailed
from raiden.transfer.mediated_transfer.events import (
SendLockedTransfer,
SendLockExpired,
SendRefundTransfer,
)
from raiden.transfer.mediated_transfer.initiator import calculate_fee_margin
from raiden.transfer.mediated_transfer.state_change import ReceiveLockExpired
from raiden.transfer.state_change import ContractReceiveChannelBatchUnlock, ReceiveProcessed
from raiden.transfer.views import state_from_raiden
from raiden.utils.typing import BlockNumber, List, PaymentAmount, PaymentID, TargetAddress
from raiden.waiting import wait_for_block, wait_for_settle
@raise_on_failure
@pytest.mark.parametrize("channels_per_node", [CHAIN])
@pytest.mark.parametrize("number_of_nodes", [3])
@pytest.mark.parametrize("settle_timeout", [50])
def test_refund_messages(raiden_chain, token_addresses, deposit, network_wait):
# The network has the following topology:
#
# App0 <---> App1 <---> App2
app0, app1, app2 = raiden_chain # pylint: disable=unbalanced-tuple-unpacking
token_address = token_addresses[0]
token_network_registry_address = app0.raiden.default_registry.address
token_network_address = views.get_token_network_address_by_token_address(
views.state_from_app(app0), token_network_registry_address, token_address
)
# Exhaust the channel App1 <-> App2 (to force the refund transfer)
# Here we make a single-hop transfer, no fees are charged so we should
# send the whole deposit amount to drain the channel.
transfer(
initiator_app=app1,
target_app=app2,
token_address=token_address,
amount=deposit,
identifier=PaymentID(1),
)
refund_amount = deposit // 2
refund_fees = calculate_fee_for_amount(refund_amount)
fee_margin = calculate_fee_margin(refund_amount, refund_fees)
refund_amount_with_fees = refund_amount + refund_fees + fee_margin
identifier = 1
payment_status = app0.raiden.mediated_transfer_async(
token_network_address, refund_amount, app2.raiden.address, identifier
)
msg = "Must fail, there are no routes available"
assert isinstance(payment_status.payment_done.wait(), EventPaymentSentFailed), msg
# The transfer from app0 to app2 failed, so the balances did change.
# Since the refund is not unlocked both channels have the corresponding
# amount locked (issue #1091)
send_lockedtransfer = raiden_events_search_for_item(
app0.raiden,
SendLockedTransfer,
{"transfer": {"lock": {"amount": refund_amount_with_fees}}},
)
assert send_lockedtransfer
send_refundtransfer = raiden_events_search_for_item(app1.raiden, SendRefundTransfer, {})
assert send_refundtransfer
with gevent.Timeout(network_wait):
wait_assert(
func=assert_synced_channel_state,
token_network_address=token_network_address,
app0=app0,
balance0=deposit,
pending_locks0=[send_lockedtransfer.transfer.lock],
app1=app1,
balance1=deposit,
pending_locks1=[send_refundtransfer.transfer.lock],
)
# This channel was exhausted to force the refund transfer except for the fees
with gevent.Timeout(network_wait):
wait_assert(
func=assert_succeeding_transfer_invariants,
token_network_address=token_network_address,
app0=app1,
balance0=0,
pending_locks0=[],
app1=app2,
balance1=deposit * 2,
pending_locks1=[],
)
@raise_on_failure
@pytest.mark.parametrize("privatekey_seed", ["test_refund_transfer:{}"])
@pytest.mark.parametrize("number_of_nodes", [3])
@pytest.mark.parametrize("channels_per_node", [CHAIN])
def test_refund_transfer(
raiden_chain, number_of_nodes, token_addresses, deposit, network_wait, retry_timeout
):
"""A failed transfer must send a refund back.
TODO:
- Unlock the token on refund #1091
- Clear the pending locks and update the locked amount #193
- Remove the refund message type #490"""
# Topology:
#
# 0 -> 1 -> 2
#
app0, app1, app2 = raiden_chain
token_address = token_addresses[0]
token_network_registry_address = app0.raiden.default_registry.address
token_network_address = views.get_token_network_address_by_token_address(
views.state_from_app(app0), token_network_registry_address, token_address
)
# make a transfer to test the path app0 -> app1 -> app2
identifier_path = PaymentID(1)
amount_path = PaymentAmount(1)
transfer(
initiator_app=app0,
target_app=app2,
token_address=token_address,
amount=amount_path,
identifier=identifier_path,
timeout=network_wait * number_of_nodes,
)
# drain the channel app1 -> app2
identifier_drain = PaymentID(2)
amount_drain = PaymentAmount(deposit * 8 // 10)
transfer(
initiator_app=app1,
target_app=app2,
token_address=token_address,
amount=amount_drain,
identifier=identifier_drain,
timeout=network_wait,
)
with gevent.Timeout(network_wait):
wait_assert(
assert_synced_channel_state,
token_network_address,
app0,
deposit - amount_path,
[],
app1,
deposit + amount_path,
[],
)
with gevent.Timeout(network_wait):
wait_assert(
assert_synced_channel_state,
token_network_address,
app1,
deposit - amount_path - amount_drain,
[],
app2,
deposit + amount_path + amount_drain,
[],
)
# app0 -> app1 -> app2 is the only available path, but the channel app1 ->
# app2 doesn't have capacity, so a refund will be sent on app1 -> app0
identifier_refund = PaymentID(3)
amount_refund = PaymentAmount(50)
fee = calculate_fee_for_amount(amount_refund)
fee_margin = calculate_fee_margin(amount_refund, fee)
amount_refund_with_fees = amount_refund + fee + fee_margin
payment_status = app0.raiden.mediated_transfer_async(
token_network_address, amount_refund, app2.raiden.address, identifier_refund
)
msg = "there is no path with capacity, the transfer must fail"
assert isinstance(payment_status.payment_done.wait(), EventPaymentSentFailed), msg
# A lock structure with the correct amount
send_locked = raiden_events_search_for_item(
app0.raiden,
SendLockedTransfer,
{"transfer": {"lock": {"amount": amount_refund_with_fees}}},
)
assert send_locked
secrethash = send_locked.transfer.lock.secrethash
send_refund = raiden_events_search_for_item(app1.raiden, SendRefundTransfer, {})
assert send_refund
lock = send_locked.transfer.lock
refund_lock = send_refund.transfer.lock
assert lock.amount == refund_lock.amount
assert lock.secrethash
assert lock.expiration
assert lock.secrethash == refund_lock.secrethash
# Both channels have the amount locked because of the refund message
with gevent.Timeout(network_wait):
wait_assert(
assert_synced_channel_state,
token_network_address,
app0,
deposit - amount_path,
[lock],
app1,
deposit + amount_path,
[refund_lock],
)
with gevent.Timeout(network_wait):
wait_assert(
assert_synced_channel_state,
token_network_address,
app1,
deposit - amount_path - amount_drain,
[],
app2,
deposit + amount_path + amount_drain,
[],
)
# Additional checks for LockExpired causing nonce mismatch after refund transfer:
# https://github.com/raiden-network/raiden/issues/3146#issuecomment-447378046
# At this point make sure that the initiator has not deleted the payment task
assert secrethash in state_from_raiden(app0.raiden).payment_mapping.secrethashes_to_task
# Wait for lock lock expiration but make sure app0 never processes LockExpired
with dont_handle_lock_expired_mock(app0):
wait_for_block(
raiden=app0.raiden,
block_number=BlockNumber(channel.get_sender_expiration_threshold(lock.expiration) + 1),
retry_timeout=retry_timeout,
)
# make sure that app0 still has the payment task for the secrethash
# https://github.com/raiden-network/raiden/issues/3183
assert secrethash in state_from_raiden(app0.raiden).payment_mapping.secrethashes_to_task
# make sure that app1 sent a lock expired message for the secrethash
send_lock_expired = raiden_events_search_for_item(
app1.raiden, SendLockExpired, {"secrethash": secrethash}
)
assert send_lock_expired
# make sure that app0 never got it
state_changes = app0.raiden.wal.storage.get_statechanges_by_range(RANGE_ALL_STATE_CHANGES)
assert not search_for_item(state_changes, ReceiveLockExpired, {"secrethash": secrethash})
# Out of the handicapped app0 transport.
# Now wait till app0 receives and processes LockExpired
receive_lock_expired = wait_for_state_change(
app0.raiden, ReceiveLockExpired, {"secrethash": secrethash}, retry_timeout
)
# And also till app1 received the processed
wait_for_state_change(
app1.raiden,
ReceiveProcessed,
{"message_identifier": receive_lock_expired.message_identifier},
retry_timeout,
)
# make sure app1 queue has cleared the SendLockExpired
chain_state1 = views.state_from_app(app1)
queues1 = views.get_all_messagequeues(chain_state=chain_state1)
result = [
(queue_id, queue)
for queue_id, queue in queues1.items()
if queue_id.recipient == app0.raiden.address and queue
]
assert not result
# and now wait for 1 more block so that the payment task can be deleted
wait_for_block(
raiden=app0.raiden,
block_number=app0.raiden.get_block_number() + 1,
retry_timeout=retry_timeout,
)
# and since the lock expired message has been sent and processed then the
# payment task should have been deleted from both nodes
# https://github.com/raiden-network/raiden/issues/3183
assert secrethash not in state_from_raiden(app0.raiden).payment_mapping.secrethashes_to_task
assert secrethash not in state_from_raiden(app1.raiden).payment_mapping.secrethashes_to_task
@raise_on_failure
@pytest.mark.parametrize("privatekey_seed", ["test_different_view_of_last_bp_during_unlock:{}"])
@pytest.mark.parametrize("number_of_nodes", [3])
@pytest.mark.parametrize("channels_per_node", [CHAIN])
def test_different_view_of_last_bp_during_unlock(
raiden_chain: List[App],
restart_node,
number_of_nodes,
token_addresses,
deposit,
network_wait,
retry_timeout,
blockchain_type,
):
"""Test for https://github.com/raiden-network/raiden/issues/3196#issuecomment-449163888"""
# Topology:
#
# 0 -> 1 -> 2
#
app0, app1, app2 = raiden_chain
token_address = token_addresses[0]
token_network_registry_address = app0.raiden.default_registry.address
token_network_address = views.get_token_network_address_by_token_address(
views.state_from_app(app0), token_network_registry_address, token_address
)
assert token_network_address
token_proxy = app0.raiden.proxy_manager.token(token_address, "latest")
initial_balance0 = token_proxy.balance_of(app0.raiden.address)
initial_balance1 = token_proxy.balance_of(app1.raiden.address)
# make a transfer to test the path app0 -> app1 -> app2
identifier_path = PaymentID(1)
amount_path = PaymentAmount(1)
transfer(
initiator_app=app0,
target_app=app2,
token_address=token_address,
amount=amount_path,
identifier=identifier_path,
timeout=network_wait * number_of_nodes,
)
# drain the channel app1 -> app2
identifier_drain = PaymentID(2)
amount_drain = PaymentAmount(deposit * 8 // 10)
transfer(
initiator_app=app1,
target_app=app2,
token_address=token_address,
amount=amount_drain,
identifier=identifier_drain,
timeout=network_wait,
)
with gevent.Timeout(network_wait):
wait_assert(
assert_synced_channel_state,
token_network_address,
app0,
deposit - amount_path,
[],
app1,
deposit + amount_path,
[],
)
with gevent.Timeout(network_wait):
wait_assert(
assert_synced_channel_state,
token_network_address,
app1,
deposit - amount_path - amount_drain,
[],
app2,
deposit + amount_path + amount_drain,
[],
)
# app0 -> app1 -> app2 is the only available path, but the channel app1 ->
# app2 doesn't have capacity, so a refund will be sent on app1 -> app0
identifier_refund = PaymentID(3)
amount_refund = PaymentAmount(50)
fee = calculate_fee_for_amount(amount_refund)
fee_margin = calculate_fee_margin(amount_refund, fee)
amount_refund_with_fees = amount_refund + fee + fee_margin
payment_status = app0.raiden.mediated_transfer_async(
token_network_address, amount_refund, TargetAddress(app2.raiden.address), identifier_refund
)
msg = "there is no path with capacity, the transfer must fail"
assert isinstance(payment_status.payment_done.wait(), EventPaymentSentFailed), msg
# A lock structure with the correct amount
send_locked = raiden_events_search_for_item(
app0.raiden,
SendLockedTransfer,
{"transfer": {"lock": {"amount": amount_refund_with_fees}}},
)
assert send_locked
secrethash = send_locked.transfer.lock.secrethash
send_refund = raiden_events_search_for_item(app1.raiden, SendRefundTransfer, {})
assert send_refund
lock = send_locked.transfer.lock
refund_lock = send_refund.transfer.lock
assert lock.amount == refund_lock.amount
assert lock.secrethash
assert lock.expiration
assert lock.secrethash == refund_lock.secrethash
# Both channels have the amount locked because of the refund message
with gevent.Timeout(network_wait):
wait_assert(
assert_synced_channel_state,
token_network_address,
app0,
deposit - amount_path,
[lock],
app1,
deposit + amount_path,
[refund_lock],
)
with gevent.Timeout(network_wait):
wait_assert(
assert_synced_channel_state,
token_network_address,
app1,
deposit - amount_path - amount_drain,
[],
app2,
deposit + amount_path + amount_drain,
[],
)
# Additional checks for LockExpired causing nonce mismatch after refund transfer:
# https://github.com/raiden-network/raiden/issues/3146#issuecomment-447378046
# At this point make sure that the initiator has not deleted the payment task
assert secrethash in state_from_raiden(app0.raiden).payment_mapping.secrethashes_to_task
with dont_handle_node_change_network_state():
# now app1 goes offline
app1.raiden.stop()
app1.raiden.greenlet.get()
assert not app1.raiden
# Wait for lock expiration so that app0 sends a LockExpired
wait_for_block(
raiden=app0.raiden,
block_number=BlockNumber(channel.get_sender_expiration_threshold(lock.expiration) + 1),
retry_timeout=retry_timeout,
)
# make sure that app0 sent a lock expired message for the secrethash
wait_for_raiden_event(
app0.raiden, SendLockExpired, {"secrethash": secrethash}, retry_timeout
)
# now app0 closes the channel
RaidenAPI(app0.raiden).channel_close(
| |
<reponame>discoverygarden/dgi_repo<gh_stars>0
"""
Functions to help with FOXML.
"""
import logging
import base64
from io import BytesIO
import os
try:
from os import scandir as scandir
except ImportError:
from scandir import scandir
import requests
from lxml import etree
from psycopg2 import IntegrityError
from psycopg2.extensions import ISOLATION_LEVEL_READ_COMMITTED
import click
import dgi_repo.database.delete.datastream_relations as ds_relations_purger
import dgi_repo.database.write.datastream_relations as ds_relations_writer
import dgi_repo.database.delete.object_relations as object_relations_purger
import dgi_repo.database.write.object_relations as object_relations_writer
import dgi_repo.database.read.datastreams as datastream_reader
import dgi_repo.database.write.repo_objects as object_writer
import dgi_repo.database.read.repo_objects as object_reader
import dgi_repo.database.filestore as filestore
from dgi_repo.database import cache
from dgi_repo.database.read.repo_objects import (object_info_from_raw,
object_id_from_raw)
from dgi_repo.exceptions import (ObjectExistsError,
ExternalDatastreamsNotSupported,
ObjectDoesNotExistError)
from dgi_repo.fcrepo3.utilities import (write_ds, format_date, RDF_NAMESPACE,
dsid_from_fedora_uri)
from dgi_repo.database.write.sources import upsert_user, upsert_source
from dgi_repo.database.utilities import (check_cursor, LITERAL_RDF_OBJECT,
get_connection)
from dgi_repo.database.write.log import upsert_log
from dgi_repo.database.read.sources import user
from dgi_repo import utilities as utils
from dgi_repo.fcrepo3 import relations
from dgi_repo.database.relationships import (
repo_object_rdf_object_from_element,
datastream_rdf_object_from_element
)
from dgi_repo.configuration import configuration as _config
from dgi_repo.database.delete.repo_objects import delete_object
logger = logging.getLogger(__name__)
FOXML_NAMESPACE = 'info:fedora/fedora-system:def/foxml#'
SCHEMA_NAMESPACE = 'http://www.w3.org/2001/XMLSchema-instance'
SCHEMA_LOCATION = ('info:fedora/fedora-system:def/foxml# '
'http://www.fedora.info/definitions/1/0/foxml1-1.xsd')
OBJECT_STATE_MAP = {'A': 'Active', 'I': 'Inactive', 'D': 'Deleted'}
OBJECT_STATE_LABEL_MAP = {'Active': 'A', 'Inactive': 'I', 'Deleted': 'D'}
def import_foxml(xml, source, pid=None, cursor=None):
"""
Create a repo object out of a FOXML file.
"""
foxml_importer = etree.XMLParser(
target=FoxmlTarget(source, pid=pid, cursor=cursor),
huge_tree=True
)
return etree.parse(xml, foxml_importer)
def create_default_dc_ds(object_id, pid, cursor=None):
"""
Populate a minimal DC DS as Fedora does.
"""
cursor = check_cursor(cursor, ISOLATION_LEVEL_READ_COMMITTED)
dc_tree = etree.fromstring('''
<oai_dc:dc xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/
http://www.openarchives.org/OAI/2.0/oai_dc.xsd">
<dc:identifier></dc:identifier>
</oai_dc:dc>
''')
dc_tree[0].text = pid
log = upsert_log('Automatically generated DC.').fetchone()[0]
filestore.create_datastream_from_data(
{
'object': object_id,
'dsid': 'DC',
'label': 'DC Record',
'log': log,
'control_group': 'X'
},
etree.tostring(dc_tree),
'application/xml',
cursor=cursor
)
def generate_foxml(pid, base_url='http://localhost:8080/fedora',
archival=False, inline_to_managed=False, cursor=None):
"""
Generate FOXML from a PID as a SpooledTemporaryFile.
"""
foxml_file = utils.SpooledTemporaryFile()
# Using a spooled temp file, double buffering will just eat memory.
with etree.xmlfile(foxml_file, buffered=False, encoding="utf-8") as foxml:
foxml.write_declaration(version='1.0')
populate_foxml_etree(foxml, pid, base_url=base_url, archival=archival,
inline_to_managed=inline_to_managed,
cursor=cursor)
foxml_file.seek(0)
return foxml_file
return None
def populate_foxml_etree(foxml, pid, base_url='http://localhost:8080/fedora',
archival=False, inline_to_managed=False, cursor=None):
"""
Add FOXML from a PID into an lxml etree.
Raises:
ObjectDoesNotExistError: The object doesn't exist.
"""
attributes = {
'VERSION': '1.1',
'PID': pid,
'{{{}}}schemaLocation'.format(SCHEMA_NAMESPACE): SCHEMA_LOCATION
}
with foxml.element('{{{0}}}digitalObject'.format(FOXML_NAMESPACE),
**attributes):
cursor = object_info_from_raw(pid, cursor=cursor)
object_info = cursor.fetchone()
if object_info is None:
raise ObjectDoesNotExistError(pid)
populate_foxml_properties(foxml, object_info, cursor=cursor)
populate_foxml_datastreams(foxml, pid, object_info, base_url, archival,
inline_to_managed, cursor)
def populate_foxml_properties(foxml, object_info, cursor=None):
"""
Add FOXML properties into an lxml etree.
"""
with foxml.element('{{{0}}}objectProperties'.format(FOXML_NAMESPACE)):
property_element = '{{{0}}}property'.format(FOXML_NAMESPACE)
state_attributes = {
'VALUE': OBJECT_STATE_MAP[object_info['state']],
'NAME': '{}{}'.format(relations.FEDORA_MODEL_NAMESPACE,
relations.STATE_PREDICATE),
}
foxml.write(etree.Element(property_element, state_attributes))
label_attributes = {
'VALUE': object_info['label'] if object_info['label'] else '',
'NAME': '{}{}'.format(relations.FEDORA_MODEL_NAMESPACE,
relations.LABEL_PREDICATE),
}
foxml.write(etree.Element(property_element, label_attributes))
user(object_info['owner'], cursor=cursor)
owner_information = cursor.fetchone()
owner_attributes = {
'VALUE': owner_information['name'],
'NAME': '{}{}'.format(relations.FEDORA_MODEL_NAMESPACE,
relations.OWNER_PREDICATE),
}
foxml.write(etree.Element(property_element, owner_attributes))
created_date_attributes = {
'VALUE': format_date(object_info['created']),
'NAME': '{}{}'.format(relations.FEDORA_MODEL_NAMESPACE,
relations.CREATED_DATE_PREDICATE),
}
foxml.write(etree.Element(property_element,
created_date_attributes))
modified_date_attributes = {
'VALUE': format_date(object_info['modified']),
'NAME': '{}{}'.format(relations.FEDORA_VIEW_NAMESPACE,
relations.LAST_MODIFIED_DATE_PREDICATE)
}
foxml.write(etree.Element(property_element,
modified_date_attributes))
def populate_foxml_datastreams(foxml, pid, object_info,
base_url='http://localhost:8080/fedora',
archival=False, inline_to_managed=False,
cursor=None):
"""
Add FOXML datastreams into an lxml etree.
"""
cursor = datastream_reader.datastreams(object_info['id'])
datastream_list = cursor.fetchall()
for datastream in datastream_list:
populate_foxml_datastream(foxml, pid, datastream, base_url=base_url,
archival=archival,
inline_to_managed=inline_to_managed,
cursor=cursor)
def populate_foxml_datastream(foxml, pid, datastream,
base_url='http://localhost:8080/fedora',
archival=False, inline_to_managed=False,
cursor=None):
"""
Add a FOXML datastream into an lxml etree.
"""
datastream_attributes = {
'ID': datastream['dsid'],
'STATE': datastream['state'],
'CONTROL_GROUP': datastream['control_group'],
'VERSIONABLE': str(datastream['versioned']).lower(),
}
with foxml.element('{{{0}}}datastream'.format(FOXML_NAMESPACE),
datastream_attributes):
versions = list(datastream_reader.old_datastreams(datastream['id']))
versions.append(datastream)
for index, version in enumerate(versions):
datastream_reader.resource(version['resource'], cursor=cursor)
resource_info = cursor.fetchone()
datastream_reader.mime(resource_info['mime'], cursor=cursor)
mime_info = cursor.fetchone()
try:
created = format_date(version['committed'])
except KeyError:
created = format_date(datastream['created'])
version_attributes = {
'ID': '{}.{}'.format(datastream['dsid'], index),
'LABEL': version['label'] if version['label'] else '',
'CREATED': created,
'MIMETYPE': mime_info['mime'],
}
if datastream['control_group'] != 'R':
size = filestore.uri_size(resource_info['uri'])
version_attributes['SIZE'] = str(size)
with foxml.element('{{{0}}}datastreamVersion'.format(
FOXML_NAMESPACE), version_attributes):
datastream_reader.checksums(version['resource'],
cursor=cursor)
checksums = cursor.fetchall()
for checksum in checksums:
foxml.write(etree.Element(
'{{{0}}}datastreamDigest'.format(FOXML_NAMESPACE),
{
'TYPE': checksum['type'],
'DIGEST': checksum['checksum']
}
))
if datastream['control_group'] == 'X' and (not
inline_to_managed):
content_element = etree.Element(
'{{{0}}}xmlContent'.format(FOXML_NAMESPACE)
)
uri = filestore.resolve_uri(resource_info['uri'])
xml_etree = etree.parse(uri)
content_element.append(xml_etree.getroot())
foxml.write(content_element)
elif datastream['control_group'] in ['M', 'X'] and archival:
uri = filestore.resolve_uri(resource_info['uri'])
with open(uri, 'rb') as ds_file:
with foxml.element('{{{0}}}binaryContent'.format(
FOXML_NAMESPACE)):
base64.encode(ds_file, foxml)
else:
if datastream['control_group'] == 'R':
content_attributes = {
'TYPE': 'URL',
'REF': resource_info['uri'],
}
else:
content_attributes = {
'TYPE': 'INTERNAL_ID',
'REF': ('{}/objects/{}/datastreams/{}/'
'content?asOfDateTime={}').format(
base_url,
pid,
datastream['dsid'],
created
),
}
foxml.write(etree.Element(
'{{{0}}}contentLocation'.format(FOXML_NAMESPACE),
content_attributes
))
def internalize_rels(pid, dsid, source, cursor=None):
"""
Internalize rels given a ds_db_id.
"""
cursor = check_cursor(cursor)
if dsid not in ['DC', 'RELS-EXT', 'RELS-INT']:
return cursor
object_reader.object_id_from_raw(pid, cursor=cursor)
object_id = cursor.fetchone()['id']
datastream_reader.datastream({'object': object_id, 'dsid': dsid},
cursor=cursor)
ds_info = cursor.fetchone()
if ds_info is None or ds_info['resource'] is None:
if dsid == 'DC':
internalize_rels_dc(None, object_id, cursor=cursor)
elif dsid == 'RELS-INT':
internalize_rels_int(etree.parse(None), object_id,
source, cursor=cursor)
elif dsid == 'RELS-EXT':
internalize_rels_ext(None, object_id, source,
cursor=cursor)
return cursor
else:
datastream_reader.resource(ds_info['resource'], cursor=cursor)
resource_info = cursor.fetchone()
resource_path = filestore.resolve_uri(resource_info['uri'])
with open(resource_path, 'rb') as relations_file:
if dsid == 'DC':
internalize_rels_dc(relations_file, object_id, cursor=cursor)
elif dsid == 'RELS-INT':
internalize_rels_int(etree.parse(relations_file), object_id,
source, cursor=cursor)
elif dsid == 'RELS-EXT':
internalize_rels_ext(relations_file, object_id, source,
cursor=cursor)
return cursor
def internalize_rels_int(relation_tree, object_id, source, purge=True,
cursor=None):
"""
Update the RELS_INT information in the DB.
"""
cursor = check_cursor(cursor, ISOLATION_LEVEL_READ_COMMITTED)
datastream_reader.datastreams(object_id, cursor=cursor)
ds_db_ids = {row['dsid']: row['id'] for row in cursor}
if purge:
for ds_db_id in ds_db_ids.values():
# Purge existing relations.
ds_relations_purger.delete_datastream_relations(
ds_db_id,
cursor=cursor
)
if relation_tree is None:
return cursor
# Ingest new relations.
for description in relation_tree.getroot():
dsid = dsid_from_fedora_uri(description.attrib['{{{}}}about'.format(
RDF_NAMESPACE
)])
for relation in description:
rdf_object, rdf_type = datastream_rdf_object_from_element(relation,
source,
cursor)
relation_qname = etree.QName(relation)
ds_relations_writer.write_relationship(
relation_qname.namespace,
relation_qname.localname,
ds_db_ids[dsid],
rdf_object,
rdf_type,
cursor=cursor
)
cursor.fetchone()
return cursor
def internalize_rels_dc(relations_file, object_id, purge=True, cursor=None):
"""
Update the DC relation information in the DB.
"""
cursor = check_cursor(cursor, ISOLATION_LEVEL_READ_COMMITTED)
if purge:
# Purge existing relations.
object_relations_purger.delete_dc_relations(object_id, cursor=cursor)
if relations_file is None:
return cursor
# Ingest new relations.
for relation in etree.parse(relations_file).getroot():
if relation.text is None:
rdf_object = ''
else:
rdf_object = relation.text
object_relations_writer.write_relationship(
relations.DC_NAMESPACE,
etree.QName(relation).localname,
object_id,
rdf_object,
LITERAL_RDF_OBJECT,
cursor=cursor
)
cursor.fetchall()
return cursor
def internalize_rels_ext(relations_file, object_id, source, purge=True,
cursor=None):
"""
Update the RELS_EXT information in the DB.
"""
cursor = check_cursor(cursor, ISOLATION_LEVEL_READ_COMMITTED)
if purge:
# Purge existing relations.
object_relations_purger.delete_object_relations(
object_id,
cursor=cursor
)
if relations_file is None:
return cursor
# Ingest new relations.
for relation in etree.parse(relations_file).getroot()[0]:
rdf_object, rdf_type = repo_object_rdf_object_from_element(
relation,
source,
cursor
)
relation_qname = etree.QName(relation)
object_relations_writer.write_relationship(
relation_qname.namespace,
relation_qname.localname,
object_id,
rdf_object,
rdf_type=rdf_type,
cursor=cursor
)
cursor.fetchone()
return cursor
class FoxmlTarget(object):
"""
Parser target for incremental reading/ingest of FOXML.
"""
def __init__(self, source, pid=None, cursor=None):
"""
Prep for use.
"""
self.cursor = check_cursor(cursor, ISOLATION_LEVEL_READ_COMMITTED)
self.source = source
self.object_info = {'PID': pid}
self.ds_info = {}
self.object_id = None
self.rels_int = None
self.ds_file = None
self.tree_builder = None
self.dsid = None
def start(self, tag, attributes, nsmap):
"""
Grab data from the start of tags.
"""
# Start up a file for content.
if (tag == '{{{0}}}xmlContent'.format(FOXML_NAMESPACE) and
self.dsid != 'AUDIT'):
self.tree_builder = etree.TreeBuilder()
elif self.tree_builder is not None:
self.tree_builder.start(tag, attributes, nsmap)
if tag == '{{{0}}}binaryContent'.format(FOXML_NAMESPACE):
self.ds_file = utils.SpooledTemporaryFile()
if tag == '{{{0}}}contentLocation'.format(FOXML_NAMESPACE):
self.ds_info[self.dsid]['versions'][-1]['data_ref'] = attributes
# Record current DSID.
if tag == '{{{0}}}datastream'.format(FOXML_NAMESPACE):
self.dsid = attributes['ID']
self.ds_info[attributes['ID']] = {'versions': []}
# Store DS info.
if (tag == '{{{0}}}datastream'.format(FOXML_NAMESPACE) and
self.dsid != 'AUDIT'):
self.ds_info[self.dsid].update(attributes)
if (tag == '{{{0}}}datastreamVersion'.format(FOXML_NAMESPACE) and
self.dsid != 'AUDIT'):
attributes['data'] = None
attributes['data_ref'] = None
attributes['checksums'] = []
self.ds_info[self.dsid]['versions'].append(attributes)
# Store checksum info.
if tag == '{{{0}}}contentDigest'.format(FOXML_NAMESPACE):
checksum = {
'type': attributes['TYPE'],
'checksum': attributes['DIGEST'],
}
self.ds_info[self.dsid]['versions'][-1]['checksums'].append(
checksum
)
# Store object info.
if tag == '{{{0}}}property'.format(FOXML_NAMESPACE):
self.object_info[attributes['NAME']] = attributes['VALUE']
if tag == '{{{0}}}digitalObject'.format(FOXML_NAMESPACE):
if self.object_info['PID'] is None:
self.object_info['PID'] = attributes['PID']
logger.info('Attempting import of %s.', self.object_info['PID'])
def end(self, tag):
"""
Internalize data at the end of tags.
Raises:
ObjectExistsError: The object already exists.
"""
# Create the object.
if tag == '{{{0}}}objectProperties'.format(FOXML_NAMESPACE):
object_db_info = {}
raw_namespace, object_db_info['pid_id'] = utils.break_pid(
self.object_info['PID']
)
object_db_info['namespace'] = cache.repo_object_namespace_id(
raw_namespace,
cursor=self.cursor
)
raw_log = 'Object created through FOXML import.'
upsert_log(raw_log, cursor=self.cursor)
object_db_info['log'] = self.cursor.fetchone()[0]
try:
raw_owner = self.object_info['{}{}'.format(
relations.FEDORA_MODEL_NAMESPACE,
relations.OWNER_PREDICATE
)]
except KeyError:
pass
else:
upsert_user({'name': raw_owner, 'source': self.source},
cursor=self.cursor)
object_db_info['owner'] = self.cursor.fetchone()[0]
try:
object_db_info['created'] = self.object_info['{}{}'.format(
relations.FEDORA_MODEL_NAMESPACE,
relations.CREATED_DATE_PREDICATE
)]
except KeyError:
pass
try:
object_db_info['modified'] = self.object_info['{}{}'.format(
relations.FEDORA_VIEW_NAMESPACE,
relations.LAST_MODIFIED_DATE_PREDICATE
)]
except KeyError:
pass
try:
object_db_info['state'] = OBJECT_STATE_LABEL_MAP[
self.object_info['{}{}'.format(
relations.FEDORA_MODEL_NAMESPACE,
relations.STATE_PREDICATE
)]]
except KeyError:
try:
object_db_info['state'] = self.object_info['{}{}'.format(
relations.FEDORA_MODEL_NAMESPACE,
relations.STATE_PREDICATE
)]
except KeyError:
pass
object_db_info['label'] = self.object_info['{}{}'.format(
relations.FEDORA_MODEL_NAMESPACE,
relations.LABEL_PREDICATE
)]
object_writer.jump_pids(object_db_info['namespace'],
object_db_info['pid_id'],
cursor=self.cursor)
try:
object_writer.write_object(object_db_info, cursor=self.cursor)
except IntegrityError:
raise ObjectExistsError(self.object_info['PID'])
self.object_id = self.cursor.fetchone()[0]
# Stash content.
if (tag == '{{{0}}}xmlContent'.format(FOXML_NAMESPACE) and
self.dsid != 'AUDIT'):
base_element = self.tree_builder.close()
xml_ds = BytesIO(etree.tostring(base_element))
| |
import math
import re
from collections import defaultdict
from GlyphsApp import Glyphs, OFFCURVE, GSLayer
from Foundation import NSPoint
class layerPositions:
def __init__(self, l, all_indic_headlines=None):
self.layer = l
self.layer_flat = l.copyDecomposedLayer()
self.layer_flat.removeOverlap()
try:
self.italic_angle = Glyphs.font.masters[self.layer.associatedMasterId].italicAngle
except AttributeError:
self.italic_angle = 0
self.is_smallcaps = l.parent.subCategory == 'Smallcaps'
self.layer_metrics = defaultdict(dict)
self.aname_sub = re.compile('^[xy]pos_')
self._leftmost_node = None
self._rightmost_node = None
self._topmost_node = None
self._bottommost_node = None
self._all_layer_nodes = None
self._top_two_nodes = None
self._bottom_two_nodes = None
self._bottom_two_nodes_consecutive = None
self._indic_headlines = all_indic_headlines or self._get_all_indic_headlines()
self._indic_stem_widths = self._get_indic_stem_widths()
def compensate_italic_angle(self, pos_y):
if not self.italic_angle:
return 0
return round(math.tan(math.radians(self.italic_angle)) * pos_y)
def all_layers_nodes(self):
if self._all_layer_nodes is None:
self._all_layer_nodes = [n for p in self.layer_flat.paths for n in p.nodes if n.type != OFFCURVE]
return self._all_layer_nodes
@staticmethod
def get_next_node(node):
next_node = node.nextNode
while next_node.type == OFFCURVE:
next_node = next_node.nextNode
return next_node
@staticmethod
def get_prev_node(node):
prev_node = node.prevNode
while prev_node.type == OFFCURVE:
prev_node = prev_node.nextNode
return prev_node
def leftmost_node(self):
if self._leftmost_node is None:
nodes = [(n.x - self.compensate_italic_angle(n.y), n.x, n.y) for n in self.all_layers_nodes()]
nodes.sort(key=lambda x: x[0])
self._leftmost_node = NSPoint(nodes[0][1], nodes[0][2])
return self._leftmost_node
def rightmost_node(self):
if self._rightmost_node is None:
nodes = [(n.x - self.compensate_italic_angle(n.y), n.x, n.y) for n in self.all_layers_nodes()]
nodes.sort(key=lambda x: x[0])
self._rightmost_node = NSPoint(nodes[-1][1], nodes[-1][2])
return self._rightmost_node
def topmost_node(self):
if self._topmost_node is None:
self._topmost_node = sorted(self.all_layers_nodes(), key=lambda x: x.y)[-1]
return self._topmost_node
def bottommost_node(self):
if self._bottommost_node is None:
self._bottommost_node = sorted(self.all_layers_nodes(), key=lambda x: x.y)[0]
return self._bottommost_node
def top_two_nodes(self):
if self._top_two_nodes is None:
self._top_two_nodes = sorted(self.all_layers_nodes(), key=lambda x: x.y, reverse=True)[:2]
self._top_two_nodes.sort(key=lambda x: x.x)
return self._top_two_nodes
def bottom_two_nodes(self):
if self._bottom_two_nodes is None:
self._bottom_two_nodes = sorted(self.all_layers_nodes(), key=lambda x: x.y)[:2]
self._bottom_two_nodes.sort(key=lambda x: x.x)
return self._bottom_two_nodes
def bottom_two_nodes_consecutive(self):
if self._bottom_two_nodes_consecutive is None:
other_node = min([self.get_next_node(self.bottommost_node()), self.get_prev_node(self.bottommost_node())], key=lambda n: n.y)
self._bottom_two_nodes_consecutive = sorted([self.bottommost_node(), other_node], key=lambda x: x.y)
self._bottom_two_nodes_consecutive.sort(key=lambda x: x.x)
return self._bottom_two_nodes_consecutive
def get_coords(self, pos_x_name, pos_y_name):
try:
if not pos_x_name.startswith('xpos'):
pos_x_name = 'xpos_' + pos_x_name
except AttributeError:
pass
try:
if not pos_y_name.startswith('ypos'):
pos_y_name = 'ypos_' + pos_y_name
except AttributeError:
pass
pos_y = 0
if type(pos_y_name) in [int, float]:
pos_y = pos_y_name
elif pos_y_name is None:
pos_y = None
else:
pos_y = self.layer_metrics.get(self.layer.layerId, {}).get(pos_y_name)
if pos_y is None:
try:
pos_y = getattr(self, pos_y_name)()
except (AttributeError, IndexError):
pass
self.layer_metrics[self.layer.layerId][pos_y_name] = pos_y
pos_x = 0
if type(pos_x_name) in [int, float]:
pos_x = self.xpos_value(pos_x_name, pos_y)
elif pos_x_name is None:
pos_x = None
else:
if not self.italic_angle:
pos_x = self.layer_metrics.get(self.layer.layerId, {}).get(pos_x_name)
else:
pos_x = None
if pos_x is None:
try:
pos_x = getattr(self, pos_x_name)(pos_y)
except (AttributeError, IndexError):
pass
self.layer_metrics[self.layer.layerId][pos_x_name] = pos_x
return pos_x, pos_y
# def _get_anchor_from_other_glyph(self, layer, aname):
# gname = self.aname_sub.sub('', pos_y_name)
# g = Glyphs.font.glyphs[gname]
# if g is not None:
# layer = g.layers[self.layer.associatedMasterId]
# _, pos_y = self._get_anchor_pos(layer, aname)
# pos_y_name += aname
# else:
# _, pos_y = self._get_anchor_pos(self.layer, pos_y_name)
def _get_anchor_pos(self, layer, aname):
aname = self.aname_sub.sub('', aname)
a = layer.anchors[aname]
if a is None:
layer = layer.copyDecomposedLayer()
a = layer.anchors[aname]
if a is None:
return 0, 0
return a.position
def _get_all_indic_headlines(self):
"""
Builds a dictionary holding the x-coordinates for the top and bottom of the headline for each master.
dict[script][master ID][top or bottom]
"""
headline_dict = {}
for gn in ['ka-beng', 'ka-deva']:
g = Glyphs.font.glyphs[gn]
if g:
headline_dict[g.script] = {}
for l in g.layers:
top, bottom = self._get_indic_headline(l)
headline_dict[g.script][l.associatedMasterId] = {
'top': top,
'bottom': bottom,
}
return headline_dict
def _get_indic_stem_widths(self):
"""
Builds a dictionary holding the stem width for each master.
dict[script][master ID] = stem width
"""
temp_dict = defaultdict(dict)
for gn in ['iMatra-beng', 'iMatra-deva']:
g = Glyphs.font.glyphs[gn]
if g:
for l in g.layers:
stem_coords = l.intersectionsBetweenPoints((0, 300), (l.width, 300), components=True)[1:-1]
try:
stem_width = stem_coords[1].x - stem_coords[0].x
temp_dict[g.script][l.associatedMasterId] = stem_width
except IndexError:
temp_dict[g.script][l.associatedMasterId] = None
return temp_dict
@staticmethod
def _get_indic_headline(layer):
"""
Calculates the top and bottom y-values for the indic headline.
"""
layer = layer.copyDecomposedLayer()
layer.removeOverlap()
all_nodes = [n for p in layer.paths for n in p.nodes if not n.type == OFFCURVE]
all_nodes.sort(key=lambda n: n.y, reverse=True)
top = None
for n in all_nodes:
if (n.nextNode.y == n.y and not n.nextNode.type == OFFCURVE) or (n.prevNode.y == n.y and not n.prevNode.type == OFFCURVE):
top = n.y
break
bottom = None
for n in all_nodes:
if ((n.nextNode.y == n.y and not n.nextNode.type == OFFCURVE) or (n.prevNode.y == n.y and not n.prevNode.type == OFFCURVE)) and n.y != top:
bottom = n.y
break
return top, bottom
@staticmethod
def _check_coord(comp_this, comp_against, fuzziness=2):
"""
Checks whether a value is within the bounds of fuzziness.
"""
return comp_against + fuzziness > comp_this > comp_against - fuzziness
def _get_indic_rightmost_stem(self, l):
stem_center = None
number_of_samples = 12
stem_width = self._indic_stem_widths.get(l.parent.script, {}).get(l.associatedMasterId)
if stem_width is None:
return
fuzziness = stem_width * 0.1
measure_interval = int(l.bounds.size.height / number_of_samples)
measure_heights = range(int(l.bounds.origin.y), int(l.bounds.origin.y + l.bounds.size.height), measure_interval)
potential_stems = defaultdict(list)
measured_points = []
# l.guides = []
for height in measure_heights:
for p in l.paths:
measure_l = GSLayer()
measure_l.width = l.width
measure_l.paths.append(p)
measured_points.append(measure_l.intersectionsBetweenPoints((0, height), (measure_l.width, height), components=True)[1:-1])
for c in l.components:
measure_l = c.componentLayer.copyDecomposedLayer()
measure_l.removeOverlap()
measure_l.applyTransform(c.transform)
measured_points.append(measure_l.intersectionsBetweenPoints((0, height), (measure_l.width + c.transform[4], height), components=True)[1:-1])
# if 1:
# ngl = GSGuideLine()
# ngl.position = NSPoint(0, height)
# ngl.setShowMeasurement_(1)
# l.guides.append(ngl)
# print(l, stem_width)
for measure_coords in measured_points:
for ci, coord in enumerate(measure_coords):
try:
next_coord = measure_coords[ci + 1]
except IndexError:
break
coord_distance = next_coord.x - coord.x
# print(coord_distance, measure_coords)
if self._check_coord(coord_distance, stem_width, fuzziness=fuzziness):
stem_mid_point = round((next_coord.x + coord.x) / 2)
stem_mid_point_max = stem_mid_point + fuzziness
stem_mid_point_min = stem_mid_point - fuzziness
added = False
for min_max in potential_stems.keys():
pmin, pmax = min_max
if pmax > stem_mid_point_max > pmin or pmax > stem_mid_point_min > pmin:
potential_stems[min_max].append(stem_mid_point)
added = True
break
if not added:
potential_stems[(stem_mid_point_min, stem_mid_point_max)].append(stem_mid_point)
vals = potential_stems.values()
vals.sort(reverse=True)
vals.sort(key=len, reverse=True)
stem_center = round(sum(vals[0]) / len(vals[0]))
return stem_center
# X positions
# The below methods calculate x-coordinate positions.
# If the font is italic, the x position needs to be adjusted (the y position is unaffected by italic).
def xpos_stem_top_center(self, pos_y):
"""
Finds the 2 highest nodes and returns the x position between them
"""
x_pos = sum([x.x for x in self.top_two_nodes()]) / 2
if self.italic_angle:
measure_node = sum([x.y for x in self.top_two_nodes()]) / 2
x_pos += self.compensate_italic_angle(pos_y - measure_node)
return int(x_pos)
def xpos_stem_top_left(self, pos_y):
"""
Finds the 2 highest nodes and returns the x position of the leftmost one.
"""
x_pos = int(self.top_two_nodes()[0].x)
if self.italic_angle:
measure_node = self.top_two_nodes()[0]
x_pos += self.compensate_italic_angle(pos_y - measure_node.y)
return x_pos
def xpos_stem_top_right(self, pos_y):
"""
Finds the 2 highest nodes and returns the x position of the rightmost one.
"""
x_pos = int(self.top_two_nodes()[-1].x)
if self.italic_angle:
measure_node = self.top_two_nodes()[-1]
x_pos += self.compensate_italic_angle(pos_y - measure_node.y)
return x_pos
def xpos_stem_bottom_center(self, pos_y):
"""
Finds the 2 lowest nodes and returns the x position between them
"""
x_pos = sum([x.x for x in self.bottom_two_nodes_consecutive()]) / 2
if self.italic_angle:
measure_node = sum([x.y for x in self.bottom_two_nodes_consecutive()]) / 2
x_pos += self.compensate_italic_angle(pos_y - measure_node)
return int(x_pos)
def xpos_stem_bottom_right(self, pos_y):
"""
Finds the 2 lowest nodes and returns the x position of the leftmost one.
"""
x_pos = int(self.bottom_two_nodes_consecutive()[-1].x)
if self.italic_angle:
measure_node = self.bottom_two_nodes_consecutive()[-1]
x_pos += self.compensate_italic_angle(pos_y - measure_node.y)
return x_pos
def xpos_stem_bottom_left(self, pos_y):
"""
Finds the 2 lowest nodes and returns the x position of the rightmost one.
"""
x_pos = int(self.bottom_two_nodes_consecutive()[0].x)
if self.italic_angle:
measure_node = self.bottom_two_nodes_consecutive()[0]
x_pos += self.compensate_italic_angle(pos_y - measure_node.y)
return x_pos
def xpos_outline_center(self, pos_y):
"""
Finds the leftmost node and rightmost node and returns the x position of their centre.
"""
# pos_x = int(sum([self.layer.bounds.origin.x, self.layer.bounds.size.width, self.layer.bounds.origin.x]) / 2)
pos_x = int((self.layer.bounds.size.width / 2) + self.layer.bounds.origin.x)
if self.italic_angle:
measure_node = ((self.leftmost_node().x + self.rightmost_node().x) / 2, (self.leftmost_node().y + self.rightmost_node().y) / 2)
italic_compensation = self.compensate_italic_angle(pos_y - measure_node[1])
pos_x = measure_node[0] + italic_compensation
return int(pos_x)
def xpos_outline_left(self, pos_y):
"""
Returns the x position of the leftmost node.
"""
pos_x = self.layer.bounds.origin.x
if self.italic_angle:
measure_node = self.leftmost_node()
italic_compensation = self.compensate_italic_angle(pos_y - measure_node.y)
pos_x = measure_node.x + italic_compensation
return int(pos_x)
def xpos_outline_right(self, pos_y):
"""
Returns the x position of the rightmost node.
"""
pos_x = self.layer.bounds.size.width + self.layer.bounds.origin.x
| |
(class + entity_name)."""
return mapper_registry[self.class_key]
def is_assigned(self, instance):
"""Return True if this mapper handles the given instance.
This is dependent not only on class assignment but the
optional `entity_name` parameter as well.
"""
return instance.__class__ is self.class_ and getattr(instance, '_entity_name', None) == self.entity_name
def _assign_entity_name(self, instance):
"""Assign this Mapper's entity name to the given instance.
Subsequent Mapper lookups for this instance will return the
primary mapper corresponding to this Mapper's class and entity
name.
"""
instance._entity_name = self.entity_name
def get_session(self):
"""Return the contextual session provided by the mapper
extension chain, if any.
Raise ``InvalidRequestError`` if a session cannot be retrieved
from the extension chain.
"""
self.compile()
s = self.extension.get_session()
if s is EXT_PASS:
raise exceptions.InvalidRequestError("No contextual Session is established. Use a MapperExtension that implements get_session or use 'import sqlalchemy.mods.threadlocal' to establish a default thread-local contextual session.")
return s
def has_eager(self):
"""Return True if one of the properties attached to this
Mapper is eager loading.
"""
return len(self._eager_loaders) > 0
def instances(self, cursor, session, *mappers, **kwargs):
"""Return a list of mapped instances corresponding to the rows
in a given ResultProxy.
"""
import sqlalchemy.orm.query
return sqlalchemy.orm.Query(self, session).instances(cursor, *mappers, **kwargs)
def identity_key_from_row(self, row):
"""Return an identity-map key for use in storing/retrieving an
item from the identity map.
row
A ``sqlalchemy.engine.base.RowProxy`` instance or a
dictionary corresponding result-set ``ColumnElement``
instances to their values within a row.
"""
return (self.class_, tuple([row[column] for column in self.pks_by_table[self.mapped_table]]), self.entity_name)
def identity_key_from_primary_key(self, primary_key):
"""Return an identity-map key for use in storing/retrieving an
item from an identity map.
primary_key
A list of values indicating the identifier.
"""
return (self.class_, tuple(util.to_list(primary_key)), self.entity_name)
def identity_key_from_instance(self, instance):
"""Return the identity key for the given instance, based on
its primary key attributes.
This value is typically also found on the instance itself
under the attribute name `_instance_key`.
"""
return self.identity_key_from_primary_key(self.primary_key_from_instance(instance))
def primary_key_from_instance(self, instance):
"""Return the list of primary key values for the given
instance.
"""
return [self.get_attr_by_column(instance, column) for column in self.pks_by_table[self.mapped_table]]
def canload(self, instance):
"""return true if this mapper is capable of loading the given instance"""
if self.polymorphic_on is not None:
return isinstance(instance, self.class_)
else:
return instance.__class__ is self.class_
def instance_key(self, instance):
"""Deprecated. A synonym for `identity_key_from_instance`."""
return self.identity_key_from_instance(instance)
def identity_key(self, primary_key):
"""Deprecated. A synonym for `identity_key_from_primary_key`."""
return self.identity_key_from_primary_key(primary_key)
def identity(self, instance):
"""Deprecated. A synoynm for `primary_key_from_instance`."""
return self.primary_key_from_instance(instance)
def _getpropbycolumn(self, column, raiseerror=True):
try:
prop = self.columntoproperty[column]
except KeyError:
try:
prop = self.__props[column.key]
if not raiseerror:
return None
raise exceptions.InvalidRequestError("Column '%s.%s' is not available, due to conflicting property '%s':%s" % (column.table.name, column.name, column.key, repr(prop)))
except KeyError:
if not raiseerror:
return None
raise exceptions.InvalidRequestError("No column %s.%s is configured on mapper %s..." % (column.table.name, column.name, str(self)))
return prop[0]
def get_attr_by_column(self, obj, column, raiseerror=True):
"""Return an instance attribute using a Column as the key."""
prop = self._getpropbycolumn(column, raiseerror)
if prop is None:
return NO_ATTRIBUTE
#print "get column attribute '%s' from instance %s" % (column.key, mapperutil.instance_str(obj))
return prop.getattr(obj)
def set_attr_by_column(self, obj, column, value):
"""Set the value of an instance attribute using a Column as the key."""
self.columntoproperty[column][0].setattr(obj, value)
def save_obj(self, objects, uowtransaction, postupdate=False, post_update_cols=None, single=False):
"""Issue ``INSERT`` and/or ``UPDATE`` statements for a list of objects.
This is called within the context of a UOWTransaction during a
flush operation.
`save_obj` issues SQL statements not just for instances mapped
directly by this mapper, but for instances mapped by all
inheriting mappers as well. This is to maintain proper insert
ordering among a polymorphic chain of instances. Therefore
save_obj is typically called only on a *base mapper*, or a
mapper which does not inherit from any other mapper.
"""
if self.__should_log_debug:
self.__log_debug("save_obj() start, " + (single and "non-batched" or "batched"))
# if batch=false, call save_obj separately for each object
if not single and not self.batch:
for obj in objects:
self.save_obj([obj], uowtransaction, postupdate=postupdate, post_update_cols=post_update_cols, single=True)
return
connection = uowtransaction.transaction.connection(self)
if not postupdate:
for obj in objects:
if not has_identity(obj):
for mapper in object_mapper(obj).iterate_to_root():
mapper.extension.before_insert(mapper, connection, obj)
else:
for mapper in object_mapper(obj).iterate_to_root():
mapper.extension.before_update(mapper, connection, obj)
for obj in objects:
# detect if we have a "pending" instance (i.e. has no instance_key attached to it),
# and another instance with the same identity key already exists as persistent. convert to an
# UPDATE if so.
mapper = object_mapper(obj)
instance_key = mapper.instance_key(obj)
is_row_switch = not postupdate and not has_identity(obj) and instance_key in uowtransaction.uow.identity_map
if is_row_switch:
existing = uowtransaction.uow.identity_map[instance_key]
if not uowtransaction.is_deleted(existing):
raise exceptions.FlushError("New instance %s with identity key %s conflicts with persistent instance %s" % (mapperutil.instance_str(obj), str(instance_key), mapperutil.instance_str(existing)))
if self.__should_log_debug:
self.__log_debug("detected row switch for identity %s. will update %s, remove %s from transaction" % (instance_key, mapperutil.instance_str(obj), mapperutil.instance_str(existing)))
uowtransaction.unregister_object(existing)
if has_identity(obj):
if obj._instance_key != instance_key:
raise exceptions.FlushError("Can't change the identity of instance %s in session (existing identity: %s; new identity: %s)" % (mapperutil.instance_str(obj), obj._instance_key, instance_key))
inserted_objects = util.Set()
updated_objects = util.Set()
table_to_mapper = {}
for mapper in self.base_mapper().polymorphic_iterator():
for t in mapper.tables:
table_to_mapper.setdefault(t, mapper)
for table in sqlutil.TableCollection(list(table_to_mapper.keys())).sort(reverse=False):
# two lists to store parameters for each table/object pair located
insert = []
update = []
for obj in objects:
mapper = object_mapper(obj)
if table not in mapper.tables or not mapper._has_pks(table):
continue
instance_key = mapper.instance_key(obj)
if self.__should_log_debug:
self.__log_debug("save_obj() table '%s' instance %s identity %s" % (table.name, mapperutil.instance_str(obj), str(instance_key)))
isinsert = not instance_key in uowtransaction.uow.identity_map and not postupdate and not has_identity(obj)
params = {}
hasdata = False
for col in table.columns:
if col is mapper.version_id_col:
if not isinsert:
params[col._label] = mapper.get_attr_by_column(obj, col)
params[col.key] = params[col._label] + 1
else:
params[col.key] = 1
elif col in mapper.pks_by_table[table]:
# column is a primary key ?
if not isinsert:
# doing an UPDATE? put primary key values as "WHERE" parameters
# matching the bindparam we are creating below, i.e. "<tablename>_<colname>"
params[col._label] = mapper.get_attr_by_column(obj, col)
else:
# doing an INSERT, primary key col ?
# if the primary key values are not populated,
# leave them out of the INSERT altogether, since PostGres doesn't want
# them to be present for SERIAL to take effect. A SQLEngine that uses
# explicit sequences will put them back in if they are needed
value = mapper.get_attr_by_column(obj, col)
if value is not None:
params[col.key] = value
elif mapper.polymorphic_on is not None and mapper.polymorphic_on.shares_lineage(col):
if isinsert:
if self.__should_log_debug:
self.__log_debug("Using polymorphic identity '%s' for insert column '%s'" % (mapper.polymorphic_identity, col.key))
value = mapper.polymorphic_identity
if col.default is None or value is not None:
params[col.key] = value
else:
# column is not a primary key ?
if not isinsert:
# doing an UPDATE ? get the history for the attribute, with "passive"
# so as not to trigger any deferred loads. if there is a new
# value, add it to the bind parameters
if post_update_cols is not None and col not in post_update_cols:
continue
elif is_row_switch:
params[col.key] = self.get_attr_by_column(obj, col)
hasdata = True
continue
prop = mapper._getpropbycolumn(col, False)
if prop is None:
continue
history = prop.get_history(obj, passive=True)
if history:
a = history.added_items()
if len(a):
params[col.key] = a[0]
hasdata = True
else:
# doing an INSERT, non primary key col ?
# add the attribute's value to the
# bind parameters, unless its None and the column has a
# default. if its None and theres no default, we still might
# not want to put it in the col list but SQLIte doesnt seem to like that
# if theres no columns at all
value = mapper.get_attr_by_column(obj, col, False)
if value is NO_ATTRIBUTE:
continue
if col.default is None or value is not None:
params[col.key] = value
if not isinsert:
if hasdata:
# if none of the attributes changed, dont even
# add the row to be updated.
update.append((obj, params, mapper))
else:
insert.append((obj, params, mapper))
if len(update):
mapper = table_to_mapper[table]
clause = sql.and_()
for col in mapper.pks_by_table[table]:
clause.clauses.append(col == sql.bindparam(col._label, type=col.type, unique=True))
if mapper.version_id_col is not None:
clause.clauses.append(mapper.version_id_col == sql.bindparam(mapper.version_id_col._label, type=col.type, unique=True))
statement = table.update(clause)
rows = 0
supports_sane_rowcount = True
def comparator(a, b):
for col in mapper.pks_by_table[table]:
x | |
if len(pol['btn_points']):
pol['btn_points'][self.index].state = 'down'
else:
self.remove_gap(pol)
self.pol = None
self.index = None
except (LookupError, TypeError) as er:
print('on remove_point: ', er)
else:
if state:
self.save_state('Removed point {} from polygon {}'
.format(i, p))
self.draw()
def remove_polygon(self, *args):
if not self.to_transfer:
try:
pol = self.rec[self.frame][self.pol]
except LookupError as er:
print('on clear_polygon: ', er)
else:
pol['btn_points'][self.index].dismiss_popup()
while len(pol['btn_points']):
self.scat.remove_widget(pol['btn_points'].pop())
self.scat.remove_widget(pol['label'])
self.remove_gap(pol)
pidx = self.pol
self.pol = None
self.index = None
self.save_state('Removed polygon {}'.format(pidx))
self.draw()
def remove_gap(self, pol):
# Taking care of indices' consecutiveness in case of removing
# a middle one.
frame = self.rec[self.frame]
p = len(frame) - 1
for k, v in iteritems(frame):
if v['number'] > pol['number']:
v['number'] -= 1
elif v == pol:
p = int(k)
pol['number'] = len(frame) - 1
while p < len(frame) - 1:
frame[str(p)] = frame[str(p + 1)]
frame[str(p)]['key'] = p
frame[str(p)]['label'].text = str(p)
p += 1
del frame[str(p)]
def open_polygon(self, *args):
if self.pol:
pol = self.rec[self.frame][self.pol]
pol['btn_points'][self.index].dismiss_popup()
if pol['open']:
pol['open'] = False
self.save_state('Closed polygon {}'.format(self.pol))
else:
pol['open'] = True
self.save_state('Opened polygon {}'.format(self.pol))
self.update()
self.draw()
def deselect_polygon(self):
if self.pol:
self.rec[self.frame][self.pol]['btn_points'][self.index].state = \
'normal'
self.lasts = self.frame, self.pol, self.index
self.pol = None
self.index = None
self.save_state(motion_end=1)
self.draw()
def pick_point(self, point, *args):
point.dismiss_popup()
if point.multi_selected:
self.to_transfer.remove((self.pol,
point.pol['btn_points'].index(point)))
point.multi_selected = False
self.draw()
return
self.to_transfer.append((self.pol, point.pol['btn_points'].index(point)))
point.multi_selected = True
self.draw()
def transfer_points(self, point=None, pp=None):
poli = poii = ''
if pp:
poli, poii = self.pol, self.index
self.pol, self.index = pp[0], pp[1]
self.save_state('Picked Points {}'.format(self.to_transfer))
if pp:
self.pol, self.index = poli, poii
frame = self.rec[self.frame]
ptis = {}
ordered = [None] * len(self.to_transfer)
for e in self.to_transfer:
try:
ptis[e[0]].append(e[1])
except KeyError:
ptis[e[0]] = [e[1]]
if point:
if point.multi_selected:
self.empty_cut()
self.save_state('Cancelled transfer')
return
for p, plg in sorted(iteritems(ptis), reverse=True):
for pidx in sorted(plg, reverse=True):
xpol = frame[p]
cutpoint = xpol['btn_points'][pidx]
ordered[self.to_transfer.index((p, pidx))] = cutpoint
cutpoint.pol = point.pol
cutpoint.multi_selected = False
cutpoint.area_color = cutpoint.norm_fill_color
cutpoint.line_color = cutpoint.norm_line_color
del xpol['btn_points'][pidx]
if not xpol['btn_points']:
for apol in itervalues(frame):
if apol['number'] > xpol['number']:
apol['number'] -= 1
self.scat.remove_widget(xpol['label'])
self.remove_gap(xpol)
i = point.pol['btn_points'].index(point) + 1
for cutpoint in ordered:
cutpoint.pol['btn_points'].insert(i, cutpoint)
i += 1
self.pol = str(point.pol['key'])
curpoint = frame[self.pol]['btn_points'][self.index]
curpoint.area_color = curpoint.norm_fill_color
curpoint.line_color = curpoint.norm_line_color
self.index = i - 1
frame[self.pol]['btn_points'][self.index].state = 'down'
msg = 'Pasted {} points to polygon {}'.format(len(ordered), self.pol)
else:
self.deselect_polygon()
self.pol = str(len(frame))
pol = frame[self.pol] = {}
pol['number'] = pol['key'] = int(self.pol)
pol['open'] = False
pol['btn_points'] = []
pol['color'] = self.default_color
pol['label'] = Label(size=(dp(50), dp(50)), font_size='35sp', bold=True,
color=pol['color'][:-1] + (.3,))
self.scat.add_widget(pol['label'])
for p, plg in sorted(iteritems(ptis), reverse=True):
for pidx in sorted(plg, reverse=True):
xpol = frame[p]
cutpoint = xpol['btn_points'][pidx]
ordered[self.to_transfer.index((p, pidx))] = cutpoint
cutpoint.pol = pol
cutpoint.multi_selected = False
cutpoint.area_color = cutpoint.norm_fill_color
cutpoint.line_color = cutpoint.norm_line_color
del xpol['btn_points'][pidx]
if not xpol['btn_points']:
for apol in itervalues(frame):
if apol['number'] > xpol['number']:
apol['number'] -= 1
self.scat.remove_widget(xpol['label'])
self.remove_gap(xpol)
i = 0
for cutpoint in ordered:
pol['btn_points'].insert(i, cutpoint)
i += 1
self.pol = str(pol['key'])
self.index = i - 1
# noinspection PyUnresolvedReferences
pol['btn_points'][self.index].state = 'down'
msg = 'New polygon ({}) of {} points'.format(self.pol, len(ordered))
self.empty_cut()
self.save_state(msg)
self.draw()
def empty_cut(self):
for entry in self.to_transfer:
try:
point = self.rec[self.frame][entry[0]]['btn_points'][entry[1]]
if entry[1] != self.index:
point.area_color = point.norm_fill_color
point.line_color = point.norm_line_color
point.multi_selected = False
except (KeyError, IndexError) as er:
print('on empty_cut: ', er)
del self.to_transfer[:]
def save_state(self, msg='__state', motion=0, motion_end=0):
if msg != '__state' and self.history \
and self.history[self.state][0] == msg:
return
if not motion:
if self.moves:
args = self.motion_args[-1].split('_')
last = self.moves[-1][:]
last = ('Moved point {} of polygon {} to {}'.format(args[0],
args[1],
args[2]),
last[1])
self.history.append(last)
self.changes += 1
del self.moves[:]
if not motion_end:
if self.state != -1:
index = self.state + len(self.history)
while len(self.history) > index + 1:
self.history.pop()
self.state = -1
project = {'frame': self.frame, 'pol': self.pol, 'index': self.index,
'to_transfer': self.to_transfer[:]}
snapshot = msg, self.store(project)
if motion:
self.motion_args.append(snapshot[0])
self.moves.append(snapshot)
else:
self.history.append(snapshot)
self.changes += 1
if len(self.history) > self.history_states:
self.history.popleft()
self.update()
def change_state(self, btn):
if btn == 'redo':
if not self.state < -1:
return
self.state += 1
self.changes += 1
elif btn == 'undo':
if not self.state > -len(self.history):
return
self.state -= 1
self.changes -= 1
if -len(self.history) <= self.state < 0:
self.busy.opacity = 1
Clock.schedule_once(self.build_state, .1)
def build_state(self, *args):
self.clear_points()
state = self.history[self.state][1]
if self.atlas_source:
self.frame = state['frame']
self.sprite.image.source = (
'atlas://' + self.filename + '/' + self.frame)
self.board1.text = (self.image + '\n(' + str(
self.keys.index(self.frame) + 1) + ' of ' + str(
len(self.keys)) + ' frames)')
self.pol = state['pol']
self.index = state['index']
self.to_transfer = state['to_transfer'][:]
self.restore(state, __version__)
try:
self.rec[self.frame][self.pol]['btn_points'][
self.index].state = 'down'
except (KeyError, IndexError):
pass
for entry in self.to_transfer:
pol = self.rec[self.frame][entry[0]]
cutpoint = pol['btn_points'][entry[1]]
cutpoint.multi_selected = True
self.update()
self.draw()
self.busy.opacity = 0
def navigate(self, btn):
cf = self.frame
if self.atlas_source:
self.empty_cut()
while len(self.scat.children) > 1:
for point in self.scat.children:
if isinstance(point, (Point, Label)):
self.scat.remove_widget(point)
if btn == '>':
self.frame = \
self.keys[self.keys.index(self.frame) + 1] \
if self.keys.index(self.frame) + 1 < len(self.keys) \
else self.keys[0]
else:
self.frame = \
self.keys[self.keys.index(self.frame) - 1] \
if self.keys.index(self.frame) > 0 \
else self.keys[-1]
try:
self.sprite.image.source = ('atlas://' + self.filename + '/'
+ self.frame)
except ZeroDivisionError:
pass
self.sprite.image.size = self.sprite.image.texture_size
self.sprite.size = self.sprite.image.size
self.sprite.center = self.width * .6, self.height * .5
if self.clone_points:
for key in iterkeys(self.rec[cf]):
self.rec[self.frame][key] = {'btn_points': []}
for (k, v), (k2, v2) in \
zip(iteritems(self.rec[cf]),
iteritems(self.rec[self.frame])):
for i, point in enumerate(v['btn_points']):
v2['btn_points'].append(Point(v2, self, self.mag,
text=str(i),
pos=point.center))
v2['key'] = v['key']
v2['number'] = v['number']
v2['open'] = v['open']
v2['color'] = v['color'][:]
v2['label'] = Label(size=(dp(50), dp(50)), font_size='35sp',
bold=True, color=v2['color'][:-1] + (.3,))
self.scat.add_widget(v2['label'])
if self.rec[cf]:
self.save_state('Cloned points of frame {} to frame {}'
.format(cf, self.frame))
for pol in itervalues(self.rec[self.frame]):
for point in pol['btn_points']:
self.scat.add_widget(point)
point.area_color = point.norm_fill_color
point.line_color = point.norm_line_color
self.pol = None
self.index = None
self.adjust_points()
self.update()
self.draw()
self.board1.text = (self.image + '\n('
+ str(self.keys.index(self.frame) + 1)
+ ' of '
+ str(len(self.keys)) + ' frames)')
def clear_points(self):
for frame in itervalues(self.rec):
for pol in itervalues(frame):
while len(pol['btn_points']):
self.scat.remove_widget(pol['btn_points'].pop())
self.scat.remove_widget(pol['label'])
frame.clear()
self.rec.clear()
# ,,,------------------------ INPUT -----------------------
def load_dialog(self, *args):
""" Shows the 'Import/Open' dialog."""
if self.popup:
self.changes = 0
self.dismiss_popup()
if self.changes > 1:
self.warn('Warning!', 'Project is not saved\n'
'and will be lost.\nContinue?',
action=self.load_dialog)
return
content = LoadDialog()
content.load = self.load_check
content.cancel = self.dismiss_popup
content.file_types = ['*.png', '*.atlas', '*.bounds']
if os.path.isdir(self.last_dir):
content.filechooser.path = self.last_dir
else:
content.filechooser.path = './'
self.popup = AutoPopup()
self.popup.content = content
self.popup.size_hint = .6, .9
self.popup.title = 'Open image, atlas or project file:'
self.popup.auto_dismiss = False
self.popup.open()
def load_check(self, path, filename):
if filename:
filename = filename[0]
self.last_dir = path
self.clear_points()
self.rec = {}
del self.keys[:]
self.frame = '0'
self.pol = None
self.index = None
self.lasts = ['0', None, None]
self.history = deque()
self.state = -1
self.changes = 0
self.atlas_source = ''
self.animation = False
self.sprite.size = dp(1), dp(1)
self.sprite.pos = self.width * .6, self.height * .5
self.scat.scale = 1
self.ctrl = False
# for tracing
self.simple_border = []
self.simplest_border = []
self.trace_mode = False
self.temp_img = ''
self.orig_source = None
self.matte = 1, .5, 1, 1
self.multi_shape = False
self.multi_chk.active = False
self.all_frames = False
self.all_chk.active = False
self.all_go = False
self.done = 0
self.trace_box.thres = dp(1.0)
self.empty_cut()
try:
self.sprite.remove_widget(self.sprite.image)
except AttributeError:
pass
if filename.endswith('.bounds'):
self.load_proj(filename, path)
else:
self.load_img(filename, path)
self.save_state("Loaded image '{}'".format(filename))
self.update()
self.draw()
Clock.schedule_interval(self.hover, .1)
def load_img(self, filename, path, source=None):
self.dismiss_popup()
filename = os.path.join(path, filename)\
.replace('./', '').replace('_RESCUED_', '')
file_name = filename.replace('.png', '')\
.replace('.atlas', '').replace('.bounds', '')
if source: # If a project is opened
filename = filename.replace(filename.split('\\')[-1], source)
self.image = filename.split('\\')[-1]
self.save_name = file_name + '.bounds'
try:
self.source = os.path.relpath(os.path.join(path, filename))
except ValueError as er:
self.source = os.path.join(path, filename)
print('on load_img(relpath): ', er)
if filename.endswith('.atlas'):
try:
with open(self.source, 'r', encoding="UTF8") as ani:
atlas = json.load(ani)
except (IOError, KeyError) as er:
print('On load_img(atlas reading):', er)
else:
self.keys = sorted([key for key in iterkeys(
atlas[filename.replace('.atlas', '.png').split('\\')[-1]])])
self.filename = filename.replace('.atlas', '')
try:
self.atlas_source = ('atlas://' +
os.path.relpath(filename) +
'/' + self.keys[0])
except ValueError as er:
self.atlas_source = ('atlas://' +
filename + '/' + self.keys[0])
print('on load_img(atlas relpath): ', er)
for key | |
"""
Copyright 2019 by <NAME> (alohawild)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================
This program reads in the Titanic training and then runs various solution to classify
survival.
"""
__author__ = 'michaelwild'
__copyright__ = "Copyright (C) 2019 <NAME>"
__license__ = "Apache License, Version 2.0"
__version__ = "0.0.3"
__credits__ = ["<NAME>"]
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Initial"
import sys
import numpy as np
import pandas as pd
from time import process_time
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.tree import ExtraTreeClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import VotingClassifier
import sklearn as sk
def runtime(start):
# I use this a lot so I have a routine.
return process_time() - start
class GetTitanicData:
"""
Load Titanic data into data frame
Align data and test it
:return:
"""
def __init__(self, train_file='train.csv', test_file='test.csv',
error_file='error file.csv'):
"""
Read in data as we start up and do a initial RamdomForest and save it.
"""
self.df_train = pd.read_csv(train_file)
self.df_test = pd.read_csv(test_file)
self.df_Titanic = self.align_data()
self.df_Titanic = self.prepare_data(self.df_Titanic)
self.model = RandomForestClassifier(random_state=29,
bootstrap=True, criterion='entropy', max_depth=None,
max_features=20,
min_samples_leaf=3, min_samples_split=2, n_estimators=200
)
df_errors, self.failure = self.train_data_and_validate(self.df_Titanic, self.model, verbose=False)
if (error_file != ''):
df_errors.to_csv(error_file, index=False)
self.df_result = self.train_run(self.df_Titanic, self.model, verbose=False)
def align_data(self):
"""
Combine all the data into one data frame with 'Test' set to 1 for test data
:return:
"""
# Make the data sets match in structure
self.df_test['Survived'] = 0
# move Survived to second place
cols = self.df_test.columns.tolist()
cols = [cols[len(cols)-1]] + cols[:(len(cols)-1)]
self.df_test = self.df_test[cols]
self.df_test['Test'] = 1
# move Survived to front
cols = self.df_test.columns.tolist()
cols = [cols[1]] + cols[0:1] + cols[2:]
self.df_test = self.df_test[cols]
self.df_train['Test'] = 0
frames = [self.df_train, self.df_test]
df_merge = pd.concat(frames)
return df_merge
def align_title(self,title):
"""
This is a 1912 based title alignment. This accept in a title and returns a generalized title.
We have mixed doctors in...there is one Female doctor that this is wrong for but it has no impact
:param title:
:return:
"""
if title in ['Mlle', 'Ms', 'Mme']:
return 'Miss'
elif title in ['Mr', 'Miss', 'Mrs', 'Master']:
return title
elif title in ['Capt', 'Col', 'Don', 'Jonkheer', 'Major', 'Rev', 'Sir']:
return 'Mr'
elif title in ['the Countess', 'Mme', 'Lady', 'Dona', 'Countess']:
return 'Mrs'
else:
return 'Mr' # We get one doctor wrong...it still works
def age_force(self, title):
"""
Well the pivot table I was using stopped working and had a strange error after the last update...
so these values never change so why bother anyway...
:param title:
:return:
"""
if title in ['Miss']:
return 21.83
elif title in ['Mr']:
return 32.81
elif title in ['Mrs']:
return 37.05
elif title in ['Master']:
return 5.48
else:
return 29.29
def prepare_data(self, df):
"""
prepare the data to be used for training and testing
:param df: Titanic Data Frame
:return:
"""
# Class needs to be broken out
df = pd.concat([df, pd.get_dummies(df['Pclass'], prefix='Class')], axis=1)
# Sex is now matched as a value and not a number
df = pd.concat([df, pd.get_dummies(df['Sex'], prefix='Sex')], axis=1)
# align ticket to be more normalized
df['Ticket'] = df['Ticket'].str.replace('.','')
df['Ticket'] = df['Ticket'].str.replace('/', '')
df['Ticket'] = df['Ticket'].str.replace(' ', '')
# Tickets are now replaced with a single value
df['Ticket'] = df['Ticket'].str[0:1]
df = pd.concat([df, pd.get_dummies(df['Ticket'], prefix='Ticket')], axis=1)
# Cabin also broken out
df['Cabin'] = df['Cabin'].fillna('Unknown')
df['Cabin'] = df['Cabin'].str[0:1]
df = pd.concat([df, pd.get_dummies(df['Cabin'], prefix='Cabin')], axis=1)
df['Title'] = df['Name'].str.split(".").str[0]
df['Title'] = df['Title'].str.split(" ").str[-1]
df['Title'] = df['Title'].apply(lambda x: self.align_title(x))
df = pd.concat([df, pd.get_dummies(df['Title'], prefix='Title')], axis=1)
# pivot table broke in version of Pandas
# age_mean = pd.DataFrame()
#age_mean = df.pivot_table('Ags', index=['Title'], aggfunc=np.mean)
#print(age_mean)
df['Ags'] = df[['Ags', 'Title']].apply(lambda x:
self.age_force([x['Title']]) if pd.isnull(x['Ags'])
else x['Ags'], axis=1)
# free fare is set to median
df['Fare'] = df[['Fare']].apply(lambda x:
df['Fare'].median() if pd.isnull(x['Fare']) or x['Fare'] <= 0.0
else x['Fare'], axis=1)
# This seems to be the same thing
df['FamilySize'] = df['SibSp'] + df['Parxg']
# This is what the cool kids do, scale a value
sc = StandardScaler()
scale_columns = ['Ags', 'Fare', 'FamilySize']
df_s = sc.fit_transform(df[scale_columns])
df_s = pd.DataFrame((df_s), columns=scale_columns, index=df.index.get_values())
# add the scaled columns back into the dataframe
df[scale_columns] = df_s
df = df.drop(['Name', 'Ticket', 'Cabin', 'Sex', 'Embarked', 'SibSp', 'Parxg', 'Pclass', 'Title'], axis=1)
return df
def train_data_and_validate(self, df, model, verbose=True):
"""
:param df: Titanic combined prepared data
:param model: random forest
:param verbose: True to trace
:return: data frame of all failed values in original format with extra two columns
"""
number_of_rows = len(df.index)
cut_off = int(number_of_rows * 0.66)
df_train = df.loc[df['Test'] == 0]
df_train = df_train.drop(['Test'], axis=1)
number_of_rows = len(df_train.index)
cut_off = int(number_of_rows * 0.66)
df_train_data = df_train[:cut_off - 1]
df_validate_data = df_train[cut_off:]
# Data frames are not used by the trees routines
train_data = df_train_data.values
try:
model = model.fit(train_data[0:, 2:], train_data[0:, 1])
feature_imp = pd.DataFrame(data=model.feature_importances_)
feature_imp.columns = ['Value']
feature_imp = feature_imp.assign(Feature=df_train_data.columns[2:])
feature_imp.sort_values(['Value'], ascending=False, inplace=True)
feature_imp.reset_index(level=0, inplace=True)
except:
feature_imp = ["N/A"]
if verbose:
print("Features...")
print(feature_imp)
print("...")
# Data frames are not used by the trees routines
validate_data = df_validate_data.values
prediction = model.predict(validate_data[:, 2:])
result = np.c_[validate_data[:, 0].astype(int), prediction.astype(int)]
df_result = pd.DataFrame(result[:, 0:2], columns=['PassengerId', 'Calc Survived'])
df_merge = pd.merge(df_validate_data, df_result, on='PassengerId', how='inner')
df_merge['The Depths'] = 0
df_merge['The Depths'] = df_merge[['The Depths', 'Calc Survived', 'Survived']].apply(lambda x:
0 if x['Calc Survived'] ==
x['Survived'] else 1,
axis=1)
df_merge = df_merge.loc[df_merge['The Depths'] == 1]
failure = len(df_merge.index) / len(df_validate_data.index) * 100
if verbose:
print("Our level if despair is :", failure)
print("Count of miss:", len(df_merge.index))
print("...")
cols = ['PassengerId', 'Calc Survived']
df_final = df_merge[cols]
df_final = pd.merge(df_final, self.df_train, on='PassengerId', how='inner')
df_final = df_final.drop(['Test'], axis=1)
return df_final, failure
def train_run(self, df, model, verbose=False):
"""
:param df: Titanic prepared data
:param model: random tree
:param verbose: True for trace
:return: prediction in data frame
"""
# Train data extracted
df_train_data = df.loc[df['Test'] == 0]
df_train_data = df_train_data.drop(['Test'], axis=1)
# Data frames are not used by the trees routines
train_data = df_train_data.values
model = model.fit(train_data[0:, 2:], train_data[0:, 1])
feature_imp = pd.DataFrame(data=model.feature_importances_)
feature_imp.columns = ['Value']
feature_imp = feature_imp.assign(Feature=df_train_data.columns[2:])
feature_imp.sort_values(['Value'], ascending=False, inplace=True)
feature_imp.reset_index(level=0, inplace=True)
if verbose:
print("Features...")
print(feature_imp)
print("...")
# Test data in DF is extracted
df_test_data = df.loc[df['Test'] == 1]
df_test_data = df_test_data.drop(['Test'], axis=1)
# Data frames are not used by the trees routines
test_data = df_test_data.values
prediction = model.predict(test_data[:, 2:])
result = np.c_[test_data[:, 0].astype(int), prediction.astype(int)]
df_result = pd.DataFrame(result[:, 0:2], columns=['PassengerId', 'Survived'])
return df_result
def get_results(self):
return self.df_result
def get_prepared_data(self):
return self.df_Titanic
def get_class(self):
"""
Get information on models
"""
classifiers = [
("Nearest Neighbors", KNeighborsClassifier(n_neighbors=5,
weights="uniform", algorithm="auto",
leaf_size=30, p=2, metric="minkowski",
metric_params=None, n_jobs=None
)),
("Linear SVM", SVC(kernel="linear", C=0.025)),
("RBF SVM", SVC(gamma=2, C=1)),
("Gaussian Process",GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True)),
("Decision Tree", DecisionTreeClassifier(criterion="entropy", splitter="best",
max_depth=5, min_samples_split=2, min_samples_leaf=3,
min_weight_fraction_leaf=0.0, max_features=None,
random_state=None, max_leaf_nodes=None, min_impurity_decrease=0.0,
min_impurity_split=None, class_weight=None, presort=False)),
("Random Forest", RandomForestClassifier(random_state=29,
bootstrap=True, criterion='entropy', max_depth=None,
max_features=20,
min_samples_leaf=3, min_samples_split=2, n_estimators=200
)),
("Neural Net", MLPClassifier(solver="lbfgs", alpha=1)),
("AdaBoost",AdaBoostClassifier(n_estimators=200, learning_rate=1.0, algorithm="SAMME.R")),
("Naive Bayes", GaussianNB()),
("QDA", QuadraticDiscriminantAnalysis()),
("ExtraTrees", ExtraTreeClassifier(criterion="entropy", splitter="best", max_depth=5,
min_samples_split=2, min_samples_leaf=3, min_weight_fraction_leaf=0.0,
max_features=None, random_state=None, max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None, class_weight=None
)),
("GradientBoost", GradientBoostingClassifier(n_estimators=200)),
("Bagging", BaggingClassifier(n_estimators=50, bootstrap=True))
]
for name, model in classifiers:
print("Classifier:", name)
df_errors, failure = self.train_data_and_validate(self.df_Titanic, model, verbose=False)
print("Dispair:", failure)
print("Voting!")
cf1 = AdaBoostClassifier(n_estimators=200, learning_rate=1.0, algorithm="SAMME.R")
cf2 = GradientBoostingClassifier(n_estimators=200)
cf3 = RandomForestClassifier(random_state=29, bootstrap=True, criterion='entropy', max_depth=None,
max_features=2, min_samples_leaf=3, min_samples_split=2, n_estimators=200
)
cf = VotingClassifier(estimators=[('Ada', cf1), ('GB', cf2), ('RF', cf3)], voting='soft')
df_errors, failure = self.train_data_and_validate(self.df_Titanic, cf, verbose=False)
print("Voting Dispair:", failure)
return
# =============================================================
program = "Final Titanic"
begin_time = process_time()
# =============================================================
# Main program begins here
print(program)
print("Version ", __version__, " ", __copyright__, " ", __license__)
print("Running on ", sys.version)
print("Pandas | |
ImportError: no flate decompressor in Python.
zd = zlib.decompressobj()
elif compress_algo == 3:
import bz2 # ImportError: no bzip2 decompressor in Python.
zd = bz2.BZ2Decompressor()
else:
raise ValueError('Unknown compress_algo: %d' % compress_algo)
zdml = None
if zd:
zd_decompress = zd.decompress
try:
zd_decompress(b'', 42)
if zd.unconsumed_tail != b'':
raise ValueError
# Limit memory usage.
zdml = get_yield_decompress_chunks(zd) # zlib has it.
except (TypeError, ValueError, AttributeError):
zdml = None # lambda data1, _zdd: (_zdd(data1,)) # bz2 doesn't have it.
def yield_data_chunks(data1, data, encrypt_func, is_py_cipher, mdc_obj):
if mdc_obj:
# We don't process data1 yet, because we want to remove the MDC packet
# (22 bytes, packet_type == 19, starts with '\xd3\x14') from the end first.
mdc_queue = data1
else:
mdc_queue = b''
if zdml:
for chunk in zdml(data1):
yield chunk
elif zd_decompress:
yield zd_decompress(data1) # Can raise zlib.error.
else:
yield data1
_fast_strxor = not is_py_cipher and fast_strxor
do_binary = is_buffer_slice
if len(data) == bs:
bs2bs = (bs * (2 + mdc_min_queue_size), bs)
bsmdc = bs * mdc_min_queue_size
while 1:
data2 = fread(bs)
if len(data2) < bs:
fre = encrypt_func(data)
mdc_queue += strxor_bs(data2 + b'\0' * (bs - len(data2)), fre)[:len(data2)]
break
data3 = _fast_strxor and fread(bs2bs)
if data3: # <FAST-DECRYPTION>: large prebuffered chunks.
lbs = bs + len(data3)
# Most of the decryption time is spent in this block below.
#
# hellow5long.bin.gpg is based on 33636451 bytes of uncompressible plaintext.
# time gpg -d --pinentry-mode loopback <hellow5long.bin.gpg >hellow5long.out
# 3.168s user
# $ time ./tinygpgs -d abc <hellow5long.bin.gpg >hellow5long.out
# 4.512s user
#
# Typical lbs is 8192 bytes (for 8192-byte packets).
# For hellow3.bin.gpg, lbs is 384 bytes.
if mdc_queue: # Flush mdc_queue before critical path.
mdc_obj.update(mdc_queue)
if zdml:
for chunk in zdml(mdc_queue):
yield chunk
elif zd_decompress:
yield zd_decompress(mdc_queue)
else:
yield mdc_queue
# A single call to _fast_strxor is faster than
# Crypto.Cipher._AES.MODE_CFB with segment_size=(bs << 3).
#
# Test vectors:
# https://github.com/ircmaxell/PHP-PasswordLib/blob/master/test/Data/Vectors/aes-cfb.test-vectors
# , so we usse manual strxor instead.
#
# Slow copy in: data3[:], (data2 + data3), +=.
datad, data3, data = _fast_strxor(encrypt_func(data), data2), data3[:], data3[lbs - bs - bs:]
if do_binary:
data3 = binary_type(data3)
datad += _fast_strxor(encrypt_func(_buffer((data2 + data3), 0, lbs - bs)), data3)
data2 = _buffer(datad, 0, lbs - bsmdc)
if mdc_obj:
mdc_obj.update(data2)
if zdml:
for chunk in zdml(data2):
yield chunk
elif zd_decompress:
yield zd_decompress(data2)
else:
yield data2
if mdc_obj: # Fill mdc_queue again.
mdc_queue = datad[lbs - bsmdc : lbs]
else: # This branch happens only with empty*.bin.gpg, not even hellow*.bin.gpg.
fre, data = encrypt_func(data), data2
data1 = strxor_bs(data, fre)
if mdc_obj:
if len(mdc_queue) >= bsmdc:
data2 = _buffer(mdc_queue, 0, bs)
mdc_obj.update(data2)
if zdml:
for chunk in zdml(data2):
yield chunk
elif zd_decompress:
yield zd_decompress(data2)
else:
yield data2
mdc_queue = mdc_queue[bs:] + data1 # Short copy.
else:
mdc_queue += data1 # Short copy.
# Now len(mdc_queue) >= bsmdc, and it contains enough
# bytes (>= 22) for an MDC packet: mdc_min_queue_size * bs -
# len(exp2) - len(b'\xd3\x14') >= 22.
else:
if zdml:
for chunk in zdml(data1):
yield chunk
elif zd_decompress:
yield zd_decompress(data1)
else:
yield data1
if mdc_obj:
if len(mdc_queue) < 22:
raise ValueError('EOF in encrypted data before MDC packet.')
if mdc_queue[-22 : -20] != b'\xd3\x14': # packet_type == 19.
raise ValueError('Bad MDC packet header.')
mdc_queue, mdc = _buffer(mdc_queue, 0, len(mdc_queue) - 22), _buffer(mdc_queue, len(mdc_queue) - 20)
mdc_obj.update(mdc_queue)
mdc_obj.update(b'\xd3\x14')
if _buffer(mdc_obj.digest()) == mdc:
mdc_obj = None
if mdc_queue:
if zdml:
for chunk in zdml(mdc_queue):
yield chunk
elif zd_decompress:
yield zd_decompress(mdc_queue)
else:
yield mdc_queue
if zd and getattr(zd, 'flush', None):
yield zd.flush()
# Do this after the very last yield.
if mdc_obj:
raise ValueError('MDC mismatch, message may have been tampered with.')
dparams = {
'cipher_algo': cipher_algo,
'cipher_algo_str': CIPHER_ALGOS_ALL[cipher_algo],
'has_mdc': has_mdc,
}
return iter_to_fread(yield_data_chunks(data1, data, encrypt_func, is_py_cipher, mdc_obj)), dparams
LITERAL_TYPES = b'btul1'
def skip_gpg_literal_packet_header(data):
if len(data) < 6:
raise ValueError('First literal packet too short.')
literal_type, filename_size = struct.unpack('>cB', data[:2])
if literal_type not in LITERAL_TYPES:
raise ValueError('Bad literal type: %r' % literal_type)
if len(data) < 6 + filename_size:
raise ValueError('First literal packet too short for filename.')
# First we get filename, then the date (4 bytes), 4 byte Unix timestamp.
return buffer(data, 6 + filename_size)
def copy_gpg_literal_data(fread, of):
done_state, fwrite = 0, of.write
for packet_type, is_partial, data in yield_gpg_binary_packets(fread):
if done_state == 2:
if packet_type == 2 and not is_partial:
continue # Ignore public-key signature.
raise ValueError('Unexpected packet after literal data: %d' %
packet_type)
if packet_type != 11:
if packet_type == 4 and not is_partial and done_state == 0:
continue # Ignore one-pass packet fur public-key signature.
raise ValueError('Literal packet expected, got: %d' % packet_type)
if done_state == 0:
data = skip_gpg_literal_packet_header(data)
fwrite(data)
done_state = 1 + (not is_partial)
if done_state == 1:
raise ValueError('Missing last literal packet.')
def yield_gpg_literal_data_chunks(fread):
it = yield_gpg_binary_packets(fread)
data, is_done = b'', True
for packet_type, is_partial, data in it:
if packet_type != 11:
if packet_type == 4 and not is_partial:
continue # Ignore one-pass packet fur public-key signature.
raise ValueError('Literal packet expected, got: %d' % packet_type)
data = skip_gpg_literal_packet_header(data)[:]
yield b'' # Indicate successful init.
yield data
is_done = not is_partial
break
else:
yield b'' # Indicate successful init.
for packet_type, is_partial, data in it:
if is_done:
if packet_type == 2 and not is_partial:
continue # Ignore public-key signature.
raise ValueError('Unexpected packet after literal data: %d' %
packet_type)
if packet_type != 11:
raise ValueError('Literal packet expected, got: %d' % packet_type)
yield data
is_done = not is_partial
if not is_done:
raise ValueError('Missing last literal packet.')
def decrypt_symmetric_gpg(fread, of, *args, **kwargs):
fread, dparams = get_decrypt_symmetric_gpg_literal_packet_reader(fread, *args, **kwargs)
try:
copy_gpg_literal_data(fread, of)
finally:
of.flush() # Flush before raising MDC mismatch or something else.
return dparams
# --- GPG encryption.
def get_random_bytes_python(size, _pack=struct.pack):
import random
return b''.join(_pack('>B', random.randrange(0, 255)) for _ in xrange(size))
def get_random_bytes_default(size, _functions=[]):
if size == 0:
return b''
if not _functions:
import os
try:
data = os.urandom(1) # More secure than get_random_bytes_python.
if len(data) != 1:
raise ValueError
_functions.append(os.urandom)
except (ImportError, AttributeError, TypeError, ValueError, OSError):
_functions.append(get_random_bytes_python)
return _functions[0](size)
def get_gpg_packet_header(packet_type, size, _pack=struct.pack):
if not 1 <= packet_type <= 63:
raise ValueError('Invalid GPG packet type: %d' % packet_type)
if size < 0:
raise ValueError('To-be-created GPG packet has negative size.')
elif size < 256 and packet_type < 16:
return _pack('>BB', 128 | (packet_type << 2), size)
elif size < 192:
return _pack('>BB', 192 | packet_type, size)
elif size < 65536 and packet_type < 16:
return _pack('>BH', 129 | (packet_type << 2), size)
elif size < 8192 + 192:
b = size - 192
return _pack('>BBB', 192 | packet_type, 192 | b >> 8, b & 255)
elif size >> 32:
raise ValueError('To-be-created GPG packet too large.')
elif packet_type < 16:
return _pack('>BL', 130 | (packet_type << 2), size)
else:
return _pack('>BBL', 192 | packet_type, 255, size)
def yield_partial_gpg_packet_chunks(packet_type, data, fread, buflog2cap, _pack=struct.pack):
if not 1 <= packet_type <= 63:
raise ValueError('Invalid GPG partial packet type: %d' % packet_type)
if buflog2cap > 30: # The GPG file format doesn't support more.
raise ValueError('buflog2cap must be at most 30, got: %d' % buflog2cap)
if buflog2cap < 9: # The GPG file format doesn't support less for the first partial packet.
raise ValueError('buflog2cap must be at least 9, got: %d' % buflog2cap)
bufsize = 1 << buflog2cap
if len(data) > bufsize:
raise ValueError('Initial partial data too long.')
data += fread(bufsize - len(data))
if len(data) < bufsize:
yield get_gpg_packet_header(packet_type, len(data))
yield data
else:
cont_size_spec = _pack('>B', 224 | buflog2cap)
yield _pack('>Bc', 192 | packet_type, cont_size_spec)
while 1:
yield data
data = fread(bufsize)
if len(data) < bufsize:
break
yield cont_size_spec
if len(data) < 192:
yield _pack('>B', len(data))
elif len(data) < 8192 + 192:
b = len(data) - 192
yield _pack('>BB', 192 | b >> 8, b & 255)
else:
yield _pack('>BL', 255, len(data))
yield data
def write_partial_gpg_packet_chunks(fwrite, packet_type, data, fread, buflog2cap, _pack=struct.pack):
# Similar to yield_partial_gpg_packet_chunks, but calls fwrite instead of
# yield. Added for performance reasons.
if not 1 <= packet_type <= 63:
| |
# # if query failed, refresh school info
# if not user_conf_dict:
# # school info not exist, refresh this school; # {user_name:'',schl_abbr:'', 'open_time':'', school_name:'', classroom:[{'classroom_name':classroom_name,'libid':libid, 'path':classroom_path,'seat_map':''},{},{}...]}
# user_conf_dict = crawldata.refresh_school_info(homepage_url='', homepage_response=homepage_response,
# sess=CF.sess, m_headers=CF.M_HEADERS, m_cookies=CF.M_COOKIES,
# verify_key='',
# schl_abbr=schl_abbr,
# sql_conn=sqlact.conn
# )
# debug_p('func_name=', func_name, 'refresh_school_info()', user_conf_dict)
# action query and refresh both failed
if not user_conf_dict:
reply_text = info['verify_failed_get_school_info']
debug_p('func_name=', func_name, 'reply_text=', reply_text)
return reply_text
# get school info succ and then construct [re_reserve_cmd] data: task_id;userid; 323;21,31; 324;41,51; wechat_sess_id; serverid; comment_info
user_conf_dict['user_name'] = user_name
# get seat coordinate and classroom_name
# all_lib_clssrm dict{libid: clssrm}
all_lib_clssrm = dict([(classroom['libid'], classroom['classroom_name']) for classroom in user_conf_dict['classroom']])
lib_seat_ls = [(libid1, seat_num1), (libid2, seat_num2)]
clssrm_crdnt = CmdFunction.verify_seat(lib_seat_ls, user_conf_dict)
# if coordinate not match, exception
if not clssrm_crdnt:
reply_text = info['verify_failed_seatnum_not_found'].replace('{classrm_libid}', '\n'.join([e['classroom_name'] + '-id=' + str(e['libid']) for e in user_conf_dict['classroom']]))
debug_p('func_name=', func_name, 'reply_text=', reply_text)
return reply_text
classroom_name1, coordinate1 = clssrm_crdnt[0]
classroom_name2, coordinate2 = clssrm_crdnt[1]
debug_p('func_name=', func_name, 'get coordinate1 and coordinate2', 'classroom_name1=', classroom_name1,
'coordinate1=',
coordinate1, 'classroom_name2=', classroom_name2, 'coordinate2=', coordinate2)
# construct[re_reserve_cmd] task_id; userid; user_name; school_name; classroom_name1;323;seat_num; 21,31; classroom_name2; 324; seat_num2; 41,51; wechat_sess_id; serverid; comment_info
open_time = user_conf_dict.get('open_time', '00:00-00:00') if task_kind == CF.TASK_KIND['reserve'] else utils.get_date(format="%H:%M:%S")
submit_time = utils.get_date(format='%Y-%m-%d %H:%M:%S')
open_time = exe_time if exe_time else open_time
wechat_sess_id = wechat_sess_id
succ_failed, detail_info, others_result_info = '', '', ''
task_id = CF.TASK_ID
# others_info is json format
others_info = {}
others_info['all_lib_clssrm'] = all_lib_clssrm
comment_info = ''
serverid = CF.SERVERID if a_task.platform == CF.PLATFORM['IGTL'] else ''
# print('serverid', serverid)
param = (
userid, task_kind, wechat_sess_id, succ_failed, detail_info, others_result_info, task_id,
user_name, school_name, schl_abbr, open_time, classroom_name1, libid1, seat_num1, coordinate1,
classroom_name2, libid2, seat_num2, coordinate2, serverid, comment_info, submit_time,
a_task.pattern, a_task.platform, json.dumps(others_info)
)
#
tb_today_task = 'today_task'
# replace will delete the exist trace and insert a new trace, then the id will change
# insert into tb_today_task
# REPLACE into today_task (userid, task_kind, wechat_sess_id, succ_failed, detail_info, others_result_info , task_id, user_name, school_name, schl_abbr, open_time, classroom_name1, libid1, seat_num1, coordinate1, classroom_name2, libid2, seat_num2, coordinate2, serverid, comment_info, submit_time, pattern, platform, others_info )
sql_today_task = 'REPLACE INTO ' + tb_today_task + \
'(userid, task_kind, wechat_sess_id, succ_failed, detail_info, others_result_info, task_id,' \
'user_name, school_name, schl_abbr, open_time, classroom_name1, libid1, seat_num1, coordinate1,' \
'classroom_name2, libid2, seat_num2, coordinate2, serverid, comment_info, submit_time,' \
'pattern, platform, others_info) ' + \
' VALUES(' + '?,' * (len(param) - 1) + '?)'
sqlact.cur.execute(sql_today_task, param)
sqlact.conn.commit()
debug_p('func_name=', func_name, 'REPLACE and INSERT action; param=', param)
reply_text = info['verify_succ'].replace('{task_id}', str(CF.TASK_ID)).replace('{task_info}', '\n[' + school_name + '-' + schl_abbr + ']' +
'的\n[' + classroom_name1 + '-id=' + libid1 + ']的[' + str(seat_num1) + ']号座位\n' +
'[' + classroom_name2 + '-id=' + libid2 + ']的[' + str(seat_num2) + ']号座位\n执行时间:' + open_time + '') + \
'\n模式:' + ('预定当日💺' if a_task.pattern == CF.PATTERN['TODAY'] else '预约明天💺') + '\n平台:' + ('<我去图书馆>' if a_task.platform == CF.PLATFORM['IGTL'] else '<来选座>')
CF.TASK_ID += 1
debug_p('func_name=', func_name, 'TASK_ID=', CF.TASK_ID, 'grab_seat action over, reply_text=', reply_text)
return reply_text
'''
query_realtime_result
'''
def query_realtime_result(userid, content):
func_name = '[query_realtime_result]'
debug_p(func_name, 'userid, content', userid, content)
return CmdFunction.query_result(userid, content, task_kind=CF.TASK_KIND['realtime'])
'''
parse the dict from memcache
return reply str
'''
def parse_dct_from_mc(result_dct={}, char_limit=CF.CHAR_LIMIT):
# exe trace format
# TRACE_FORMAT = {
# 'head': '状态:{status}\n[{school_name}-{schl_abbr}_{task_id}]\n{submit_time} 提交\n',
# 'exe_trace': '{emoji}{try_cnt}. {exe_time} [{classroom_name}]-[{seat_num}]号座位:{feedback}\n',
# }
default_value = ''
flag = {
'SUCC': '✅',
'FAILED': '❌',
# 'Ongoing': '🔄',
'Ongoing': '🌀',
# 'exe_trace_failed': '⏬'
'exe_trace_failed': '🔸'
}
status = 'Ongoing'
reply_str = '...\n'
reply_str += CF.TRACE_FORMAT['head'].format(status=flag[status] + status, school_name=result_dct.get('school_name', default_value),
schl_abbr=result_dct.get('schl_abbr', default_value), task_id=result_dct.get('task_id', default_value),
submit_time=result_dct.get('submit_time', default_value))
if len(result_dct['exe_trace']) < 1:
return reply_str
code = result_dct['exe_trace'][-1].get('code', default_value)
completed_flag = result_dct['exe_trace'][-1].get('completed_flag', default_value)
if completed_flag == 'completed':
status = 'SUCC' if str(code) == '0' else 'FAILED'
for i, trace in enumerate(result_dct['exe_trace']):
reply_str += CF.TRACE_FORMAT['exe_trace'].format(
emoji=flag['exe_trace_failed'] if str(trace.get('code', default_value)) != '0' else flag['SUCC'],
try_cnt=i, exe_time=trace.get('exe_time', default_value),
classroom_name=trace.get('clssrm', default_value),
seat_num=trace.get('seat_num', default_value), feedback=trace.get('msg', default_value))
return reply_str[-1*char_limit:]
'''
query task result
'''
def query_result(userid, content, task_kind=CF.TASK_KIND['reserve']):
func_name = '[query_result]'
debug_p('func_name=', func_name, 'userid, content', userid, content)
info = {
'default': '没有查询到最近这段时间抢座任务执行状态信息',
}
reply_str = info['default']
result = mc.get_value(key=task_kind + '_' + userid, default='')
if result:
reply_str = CmdFunction.parse_dct_from_mc(result)
# parse the dict from memcache
debug_p(func_name, 'task result reply_str=', reply_str)
# return {'kind': 'no_prefix', 'reply_str': reply_str}
return reply_str
'''
FUNCTION_MAP
'''
FUNCTION_MAP = {
'#check_schl': check_school,
'#add_school_info': add_school_info,
'#force_add_school_info': force_add_school_info,
'#parse_trace': parse_trace,
'#grab_seat': grab_seat,
'#modify_opentime': modify_opentime,
# '#needhelp': needhelp,
'#query_result': query_result,
'#realtime': realtime,
'#query_realtime_result': query_realtime_result,
}
# verify_seat, return clssrm_crdnt=[(classroom_name, coordinate), () ... ]
def verify_seat(lib_seat_ls, user_conf_dict, num_0_value='任意'):
clssrm_crdnt = []
for libid, seatnum in lib_seat_ls:
if int(libid) <= 0:
seatnum = '0'
# user_conf_dict['classroom']:[{'classroom_name':classroom_name,'libid':libid, 'path':classroom_path,'seat_map':''}
# if libid == 0:
classroom_name, coordinate = num_0_value, '0'
for classroom in user_conf_dict['classroom']:
# if int(libid) == 0: classroom_name = "任意"; coordinate = '0'; break
if int(libid) != 0 and coordinate == '0' and classroom['libid'] == libid.replace('-', ''):
classroom_name = classroom['classroom_name']
if seatnum == '0':
coordinate = '0'
break
for pre_0 in ['', '0', '00', '000']:
coordinate = classroom['seat_map'].get(pre_0 + seatnum, coordinate)
if libid != '0' and classroom_name == num_0_value:
# error: libid not found
return []
clssrm_crdnt.append((classroom_name, coordinate))
return clssrm_crdnt
'''
extra help info
'''
class ExtraInfo(object):
prefix = '\n\nℹ️随机帮助信息ℹ️\n'
I = {
# 'help': '强调:wechat_sess_id和serverid是需要自己抓包获取的,不是示例里面的qwertyxxx,请仔细阅读说明\n为了避免id失效,抢座任务请尽量在开抢前的5-30分钟时间段内提交\ngithub:https://github.com/qmppz/igotolibrary',
# 'administrator_info': '如果出现指令无响应无反馈、添加学校失败、多次任务失败...等等摸不着头脑的问题请联系管理员处理。\nwx: turing_01110101',
}
others = ['查看<为了学习>抢座工程的更新进度和即时通知,请看管理员朋友圈。wx: turing_01110101',
'<为了学习>已经向<我去图书馆>官方反馈了抢座漏洞,官方答复:正在修复中。',
'wechat_sess_id、serverid是需要自己去抓包获取的,不是示例里面的qwertyxxxx,具体获取方法请看指令帮助文档',
'指令分隔符可以是逗号或句号或分号或空格或回车,。;,.; 且支持中文符号和英文符号。',
'<为了学习>工程抢座原理已经开源,且无收费的服务、不买卖程序!只为非计算机的同学提供近似公平的抢座。',
'服务器已经升级,抢座task实际测试速度提升明显。',
'服务器指令解析需要时间,请等待几秒钟。',
'有什么意见或者建议请向管理员反馈。',
'指令中的[学校简称]是英文简称,而不是学校名字的首拼。'
'为避免抓包获取的serverid失效以及抢座任务遗漏,请在开抢前5-30分钟时间段提交抢座任务。',
'如果出现指令无响应无反馈、添加学校失败、多次任务失败...等等摸不着头脑的问题请联系管理员。',
'注意不要把抓包获取到的trace发到<我去图书馆>...请认准<为了学习>',
'后台消息过多,反馈问题或者建议意见请发送到管理员的微信 turing_01110101',
'抓包的意思就是进行网络监听并将请求的数据记录显示出来,所以开启抓包软件的时候手机会有风险提示',
'使用[添加指令]需要满足:1, 在自身没有预定座位的状态下; 2, 自习室都开放的状态下',
'自习室数量、开抢时间等不正确请反馈管理员wx:turing_01110101',
'抢座任务在开抢前5-30分钟时间段内提交才能有效',
# '接下来尝试更新'
]
# cmd_help = '\n指令帮助文档:https://mp.weixin.qq.com/s/1FVTjlDunfngwMip3TFakA'
cmd_help = '\n<a href="https://mp.weixin.qq.com/s/8HmS4Ct02ZQIcBYRnhTl9Q"> ☞☞指令帮助文档 </a>'
# get_random_info
def get_random_info(whichone=-1):
info = list(ExtraInfo.I.values()) + ExtraInfo.others
return ExtraInfo.prefix + random.choice(info) + ExtraInfo.cmd_help
'''
parse msg from wechat handle; verify if is cmd and execute the cmd`s function
return response
'''
@utils.catch_exception
def handle_msg(userid, content, my_id, LOCAL=False):
# transfer content from byte to str
m_content = content
if isinstance(content, bytes):
m_content = content.decode(encoding='utf-8')
func_name = '#handle_msg'
debug_p('func_name=', func_name, 'userid=', userid, 'content=', content)
'''
check if is test, discard test flag
'''
if str(m_content[:4].split()[0]).lower() in {'test', '内测', '测试'}:
m_content = m_content[:4].replace('test', '').replace('内测', '').replace('测试', '') +\
m_content[4:]
# else:
# # old version entrance function
# return old_version_entrance(userid, content, my_id)
# content is none
content = m_content
if not content:
# return get_reply_msg(str_info=content)
reply_text = CmdFunction.getico(1) + '\n'
return reply_text + ExtraInfo.get_random_info()
# parse, if command
cmd_pre_flag = {
# 'igotolibrary': {'我去图书馆', '来选座'},
# qiangzuo task
'#grab_seat': {'抢座', '明日预约', '预约座位', '抢座位', '抢坐', '#抢坐', '抢位置', 'grab_seat', '#抢座', 'qz', '#qz'},
# realtime greb seat
'#realtime': {'捡漏', '实时预定', '即时预订', '实时预订', '即时预定', 'jl', 'ssyd', 'jsyd', 'realtime'},
'#check_schl': {'查询', '#查询', 'cx', '#cx', 'chaxun', '#查询学校', '查询学校'},
# parse trace
'#parse_trace': {'jx', '#jx', '解析', '#解析', 'wechatsess_id=', 'get'},
# status query
'#add_school_info': {'#添加学校', '添加学校', 'tj', '#tj', '#添加', '添加'},
# force add school
'#force_add_school_info': {'强制添加', '强制添加学校', '强制添加学校信息', 'qztj', 'qztjxxxx'},
# '#needhelp':{'帮助', 'help', 'bz', '帮助信息', '提示'},
# admin cmd
'#gengxin': {},
# modify opentime
'#modify_opentime': {'修改抢座时间', 'xgqzsj', '修改开抢时间', 'xgkqsj'},
# query reserve result
'#query_result': {'查询结果', '结果', 'jg', 'cxjg', '抢座结果', 'qzjg', '查询抢座结果', '查询抢座'},
# query realtime result
'#query_realtime_result': {'查询捡漏结果', '捡漏结果', 'jljg', 'cxjljg', 'jlqzjg', 'jl结果', '实时预定结果', '实时预订结果'}
}
# formatting split_ch to blank
frmt_content = re.sub(r'[(()),;。;,\.]', ' ', content.replace(u'#', '')
.replace(u'#', '')
.replace(u'-', '-').replace(u'➖', '-').replace('- -', '--')
.replace('=', '=')
.replace('\n', CF.USER_CMD_SPLTCH)
)
# del all \n \r and blank
frmt_content = re.sub(r'\s+', CF.USER_CMD_SPLTCH, frmt_content.strip())
content = frmt_content
# judge which kind cmd from index 0
cmd_ls = content.split(CF.USER_CMD_SPLTCH)
cmd_kind = ''
for pre_flag in cmd_pre_flag.keys():
if cmd_ls[0].lower().replace('#', '').strip() in cmd_pre_flag[pre_flag]:
cmd_kind = pre_flag
break
if not cmd_kind:
# specify parse trace
if len(content) > 100 and content.find('wechatSESS_ID') >= 0: # and content.find('SERVERID') >= 0:
# parse trace
cmd_kind = '#parse_trace'
else:
# content is not cmd
no_match_cmd_reply = ['没有匹配到指令...不知道该回应什么',
'没有匹配到指令...反馈问题请联系管理员']
reply_text = CmdFunction.getico(1) * 3 + random.choice(no_match_cmd_reply) + '\n'
return reply_text + ExtraInfo.get_random_info()
# swap wechatSESS_ID and SERVERID to ...;wechatSESS_ID; SERVERID
# if len(cmd_ls) > 2 and cmd_ls[-1].find('wechatSESS_ID') >= 0 and cmd_ls[-2].find('SERVERID') >= 0:
# cmd_ls[-1], cmd_ls[-2] = cmd_ls[-2], cmd_ls[-1]
# content = CF.USER_CMD_SPLTCH.join(cmd_ls)
# print('cmd_ls=', cmd_ls)
# content is cmd then save cmd log
a_cmd_log = utils.get_date() + '|from_user=' + userid + '|cmd_kind=' + cmd_kind + '|content=' + content + '\n'
debug_p('func_name=', func_name, 'cmd_kind=', | |
setting up the rfmstruct is collecting
# all the information needed to automatically generate it.
# Step 5 and onwards implements this algorithm, using the
# *generic functional form* of the hatted quantities (as
# opposed to the exact closed-form expressions of the
# hatted quantities) computed above.
# Step 5: Now that all hatted quantities are written in terms of generic SymPy functions,
# we will now replace SymPy functions with simple variables using rigid NRPy+ syntax,
# and store these variables to globals defined above.
def make_replacements(expr):
for item in sp.preorder_traversal(expr):
if item.func == sp.Derivative:
stringfunc = str(item.args[0]).split("_funcform(", 1)[0] # store everything before _funcform(...
stringderv = str(item.args[1]).replace(" ", "") # Ignore whitespace
deriv_wrt = stringderv.split(",")[0].replace("(xx", "")
derivorder = int(stringderv.split(",")[1].replace(")", ""))
derivop = "__D"
for i in range(derivorder - 1):
derivop += "D"
derivop += deriv_wrt
for i in range(derivorder - 1):
derivop += deriv_wrt
expr = expr.xreplace(
{item: sp.sympify(stringfunc + derivop)})
for item in sp.preorder_traversal(expr):
if "_funcform" in str(item.func):
stringfunc = str(item.func).split("_funcform", 1)[0] # store everything before _funcform(...
expr = expr.xreplace({item: sp.sympify(stringfunc)})
return expr
detgammahat = make_replacements(detgammahat)
for i in range(DIM):
ReU[i] = make_replacements(ReU[i])
detgammahatdD[i] = make_replacements(detgammahatdD[i])
for j in range(DIM):
ReDD[i][j] = make_replacements(ReDD[i][j])
ReUdD[i][j] = make_replacements(ReUdD[i][j])
ghatDD[i][j] = make_replacements(ghatDD[i][j])
ghatUU[i][j] = make_replacements(ghatUU[i][j])
detgammahatdDD[i][j] = make_replacements(detgammahatdDD[i][j])
for k in range(DIM):
ReDDdD[i][j][k] = make_replacements(ReDDdD[i][j][k])
ReUdDD[i][j][k] = make_replacements(ReUdDD[i][j][k])
ghatDDdD[i][j][k] = make_replacements(ghatDDdD[i][j][k])
GammahatUDD[i][j][k] = make_replacements(GammahatUDD[i][j][k])
for l in range(DIM):
ReDDdDD[i][j][k][l] = make_replacements(ReDDdDD[i][j][k][l])
ghatDDdDD[i][j][k][l] = make_replacements(ghatDDdDD[i][j][k][l])
GammahatUDDdD[i][j][k][l] = make_replacements(GammahatUDDdD[i][j][k][l])
# Step 6: At this point, each expression is written in terms of the generic functions
# of xx0, xx1, and/or xx2 and their derivatives. Depending on the functions, some
# of these derivatives may be zero. In Step 5 we'll evaluate the function
# derivatives exactly and set the expressions to zero. Otherwise in the C code
# we'd be storing performing arithmetic with zeros -- wasteful!
# Step 6.a: Construct the full list of *unique* NRPy+ variables representing the
# SymPy functions and derivatives, so that all zero derivatives can be
# computed.
freevars = []
freevars.extend(detgammahat.free_symbols)
for i in range(DIM):
freevars.extend(ReU[i].free_symbols)
freevars.extend(detgammahatdD[i].free_symbols)
for j in range(DIM):
freevars.extend(ReDD[i][j].free_symbols)
freevars.extend(ReUdD[i][j].free_symbols)
freevars.extend(ghatDD[i][j].free_symbols)
freevars.extend(ghatUU[i][j].free_symbols)
freevars.extend(detgammahatdDD[i][j].free_symbols)
for k in range(DIM):
freevars.extend(ReDDdD[i][j][k].free_symbols)
freevars.extend(ReUdDD[i][j][k].free_symbols)
freevars.extend(ghatDDdD[i][j][k].free_symbols)
freevars.extend(GammahatUDD[i][j][k].free_symbols)
for l in range(DIM):
freevars.extend(ReDDdDD[i][j][k][l].free_symbols)
freevars.extend(ghatDDdDD[i][j][k][l].free_symbols)
freevars.extend(GammahatUDDdD[i][j][k][l].free_symbols)
freevars_uniq = superfast_uniq(freevars)
freevars_uniq_xx_indep = []
for i in range(len(freevars_uniq)):
freevars_uniq_xx_indep.append(freevars_uniq[i])
# Step 6.b: Using the expressions f?_of_xx? set in reference_metric(),
# evaluate each needed derivative and, in the case it is zero,
# set the corresponding "freevar" variable to zero.
freevars_uniq_vals = []
for i in range(len(freevars_uniq)):
var = freevars_uniq[i]
basename = str(var).split("__")[0].replace("_funcform", "")
derivatv = ""
if "__" in str(var):
derivatv = str(var).split("__")[1].replace("_funcform", "")
if basename == "f0_of_xx0":
basefunc = f0_of_xx0
elif basename == "f1_of_xx1":
basefunc = f1_of_xx1
elif basename == "f2_of_xx0_xx1":
basefunc = f2_of_xx0_xx1
elif basename == "f3_of_xx0":
basefunc = f3_of_xx0
elif basename == "f4_of_xx2":
basefunc = f4_of_xx2
else:
print("Error: function inside " + str(var) + " undefined.")
sys.exit(1)
diff_result = basefunc
if derivatv == "":
pass
else:
derivorder = derivatv.replace("d", "").replace("D", "").replace("0", "0 ").replace("1", "1 ").replace(
"2", "2 ").split(" ")
for derivdirn in derivorder:
if derivdirn != "":
derivwrt = xx[int(derivdirn)]
diff_result = sp.diff(diff_result, derivwrt)
freevars_uniq_vals.append(diff_result)
frees_uniq = superfast_uniq(diff_result.free_symbols)
xx_dep = False
for dirn in range(3):
if gri.xx[dirn] in frees_uniq:
xx_dep = True
if xx_dep == False:
freevars_uniq_xx_indep[i] = diff_result
# Step 6.c: Finally, substitute integers for all functions & derivatives that evaluate to integers
for varidx in range(len(freevars_uniq)):
detgammahat = detgammahat.subs(freevars_uniq[varidx], freevars_uniq_xx_indep[varidx])
for i in range(DIM):
ReU[i] = ReU[i].subs(freevars_uniq[varidx], freevars_uniq_xx_indep[varidx])
detgammahatdD[i] = detgammahatdD[i].subs(freevars_uniq[varidx], freevars_uniq_xx_indep[varidx])
for j in range(DIM):
ReDD[i][j] = ReDD[i][j].subs(freevars_uniq[varidx], freevars_uniq_xx_indep[varidx])
ReUdD[i][j] = ReUdD[i][j].subs(freevars_uniq[varidx], freevars_uniq_xx_indep[varidx])
ghatDD[i][j] = ghatDD[i][j].subs(freevars_uniq[varidx], freevars_uniq_xx_indep[varidx])
ghatUU[i][j] = ghatUU[i][j].subs(freevars_uniq[varidx], freevars_uniq_xx_indep[varidx])
detgammahatdDD[i][j] = detgammahatdDD[i][j].subs(freevars_uniq[varidx],
freevars_uniq_xx_indep[varidx])
for k in range(DIM):
ReDDdD[i][j][k] = ReDDdD[i][j][k].subs(freevars_uniq[varidx], freevars_uniq_xx_indep[varidx])
ReUdDD[i][j][k] = ReUdDD[i][j][k].subs(freevars_uniq[varidx], freevars_uniq_xx_indep[varidx])
ghatDDdD[i][j][k] = ghatDDdD[i][j][k].subs(freevars_uniq[varidx], freevars_uniq_xx_indep[varidx])
GammahatUDD[i][j][k] = GammahatUDD[i][j][k].subs(freevars_uniq[varidx],
freevars_uniq_xx_indep[varidx])
for l in range(DIM):
ReDDdDD[i][j][k][l] = ReDDdDD[i][j][k][l].subs(freevars_uniq[varidx],
freevars_uniq_xx_indep[varidx])
ghatDDdDD[i][j][k][l] = ghatDDdDD[i][j][k][l].subs(freevars_uniq[varidx],
freevars_uniq_xx_indep[varidx])
GammahatUDDdD[i][j][k][l] = GammahatUDDdD[i][j][k][l].subs(freevars_uniq[varidx],
freevars_uniq_xx_indep[varidx])
# Step 7: Construct needed C code for declaring rfmstruct, allocating storage for
# rfmstruct arrays, defining each element in each array, reading the
# rfmstruct data from memory (both with and without SIMD enabled), and
# freeing allocated memory for the rfmstrcut arrays.
# struct_str: String that declares the rfmstruct struct.
struct_str = "typedef struct __rfmstruct__ {\n"
define_str = ""
# rfmstruct stores pointers to (so far) 1D arrays. The malloc_str string allocates space for the arrays.
malloc_str = "rfm_struct rfmstruct;\n"
freemm_str = ""
# readvr_str reads the arrays from memory as needed
readvr_str = ["", "", ""]
readvr_SIMD_outer_str = ["", "", ""]
readvr_SIMD_inner_str = ["", "", ""]
# Tease out how many variables each function in freevars_uniq_vals
which_freevar = 0
for expr in freevars_uniq_vals:
if "_of_xx" in str(freevars_uniq_xx_indep[which_freevar]):
frees = expr.free_symbols
frees_uniq = superfast_uniq(frees)
xx_list = []
malloc_size = 1
for i in range(3):
if gri.xx[i] in frees_uniq:
xx_list.append(gri.xx[i])
malloc_size *= gri.Nxx_plus_2NGHOSTS[i]
struct_str += "\tREAL *restrict " + str(freevars_uniq_xx_indep[which_freevar]) + ";\n"
malloc_str += "rfmstruct." + str(
freevars_uniq_xx_indep[which_freevar]) + " = (REAL *)malloc(sizeof(REAL)*" + str(malloc_size) + ");\n"
freemm_str += "free(rfmstruct." + str(freevars_uniq_xx_indep[which_freevar]) + ");\n"
output_define_and_readvr = False
for dirn in range(3):
if (gri.xx[dirn] in frees_uniq) and not (gri.xx[(dirn+1)%3] in frees_uniq) and not (gri.xx[(dirn+2)%3] in frees_uniq):
define_str += "for(int i"+str(dirn)+"=0;i"+str(dirn)+"<Nxx_plus_2NGHOSTS"+str(dirn)+";i"+str(dirn)+"++) {\n"
define_str += " const REAL xx"+str(dirn)+" = xx["+str(dirn)+"][i"+str(dirn)+"];\n"
define_str += " rfmstruct." + str(freevars_uniq_xx_indep[which_freevar]) + "[i"+str(dirn)+"] = " + str(sp.ccode(freevars_uniq_vals[which_freevar])) + ";\n"
define_str += "}\n\n"
readvr_str[dirn] += "const REAL " + str(freevars_uniq_xx_indep[which_freevar]) + " = rfmstruct->" + \
str(freevars_uniq_xx_indep[which_freevar]) + "[i"+str(dirn)+"];\n"
readvr_SIMD_outer_str[dirn] += "const double NOSIMD" + str(
freevars_uniq_xx_indep[which_freevar]) + " = rfmstruct->" + str(freevars_uniq_xx_indep[which_freevar]) + "[i"+str(dirn)+"]; "
readvr_SIMD_outer_str[dirn] += "const REAL_SIMD_ARRAY " + str(freevars_uniq_xx_indep[which_freevar]) + \
" = ConstSIMD(NOSIMD" + str(freevars_uniq_xx_indep[which_freevar]) + ");\n"
readvr_SIMD_inner_str[dirn] += "const REAL_SIMD_ARRAY " + str(freevars_uniq_xx_indep[which_freevar]) + \
" = ReadSIMD(&rfmstruct->" + str(freevars_uniq_xx_indep[which_freevar]) + "[i"+str(dirn)+"]);\n"
output_define_and_readvr = True
if (output_define_and_readvr == False) and (gri.xx[0] in frees_uniq) and (gri.xx[1] in frees_uniq):
define_str += """
for(int i1=0;i1<Nxx_plus_2NGHOSTS1;i1++) for(int i0=0;i0<Nxx_plus_2NGHOSTS0;i0++) {
const REAL xx0 = xx[0][i0];
const REAL xx1 = xx[1][i1];
rfmstruct.""" + str(freevars_uniq_xx_indep[which_freevar]) + """[i0 + Nxx_plus_2NGHOSTS0*i1] = """ + str(sp.ccode(freevars_uniq_vals[which_freevar])) + """;
}\n\n"""
readvr_str[0] += "const REAL " + str(freevars_uniq_xx_indep[which_freevar]) + " = rfmstruct->" + \
str(freevars_uniq_xx_indep[which_freevar]) + "[i0 + Nxx_plus_2NGHOSTS0*i1];\n"
readvr_SIMD_outer_str[0] += "const double NOSIMD" + str(freevars_uniq_xx_indep[which_freevar]) + \
" = rfmstruct->" + str(freevars_uniq_xx_indep[which_freevar]) + "[i0 + Nxx_plus_2NGHOSTS0*i1]; "
readvr_SIMD_outer_str[0] += "const REAL_SIMD_ARRAY " + str(freevars_uniq_xx_indep[which_freevar]) + \
" = ConstSIMD(NOSIMD" + str(freevars_uniq_xx_indep[which_freevar]) + ");\n"
readvr_SIMD_inner_str[0] += "const REAL_SIMD_ARRAY " + str(freevars_uniq_xx_indep[which_freevar]) + \
" = ReadSIMD(&rfmstruct->" + str(freevars_uniq_xx_indep[which_freevar]) + "[i0 + Nxx_plus_2NGHOSTS0*i1]);\n"
output_define_and_readvr = True
if output_define_and_readvr == False:
print("ERROR: Could not figure out the (xx0,xx1,xx2) dependency within the expression for "+str(freevars_uniq_xx_indep[which_freevar])+":")
print(str(freevars_uniq_vals[which_freevar]))
sys.exit(1)
which_freevar += 1
struct_str += "} rfm_struct;\n\n"
# Step 8: Output needed C code to files
outdir = par.parval_from_str(thismodule+"::rfm_precompute_Ccode_outdir")
with open(outdir + "/rfm_struct__declare.h", "w") as file:
file.write(struct_str)
with open(outdir + "/rfm_struct__malloc.h", "w") as file:
file.write(malloc_str)
with open(outdir + "/rfm_struct__define.h", "w") as file:
file.write(define_str)
for i in range(3):
with open(outdir + "/rfm_struct__read" + str(i) + ".h", "w") as file:
file.write(readvr_str[i])
with open(outdir + "/rfm_struct__SIMD_outer_read" + str(i) + ".h", "w") as file:
file.write(readvr_SIMD_outer_str[i])
with open(outdir + "/rfm_struct__SIMD_inner_read" + str(i) + ".h", "w") as file:
file.write(readvr_SIMD_inner_str[i])
with open(outdir + "/rfm_struct__freemem.h", "w") as file:
file.write(freemm_str)
def get_EigenCoord():
CoordSystem_orig = par.parval_from_str("reference_metric::CoordSystem")
for EigenCoordstr in ["Spherical","Cylindrical","SymTP","Cartesian"]:
if EigenCoordstr in CoordSystem_orig:
return EigenCoordstr
print("Error: Could not find EigenCoord for reference_metric::CoordSystem == "+CoordSystem_orig)
sys.exit(1)
def set_Nxx_dxx_invdx_params__and__xx_h(outdir="."):
import os
with open(os.path.join(outdir,"set_Nxx_dxx_invdx_params__and__xx.h"),"w") as file:
file.write("""
void set_Nxx_dxx_invdx_params__and__xx(const int EigenCoord, const int Nxx[3],
paramstruct *restrict params, REAL *restrict xx[3]) {
// Override parameter defaults with values based on command line arguments and NGHOSTS.
params->Nxx0 = Nxx[0];
params->Nxx1 = Nxx[1];
params->Nxx2 = Nxx[2];
params->Nxx_plus_2NGHOSTS0 = Nxx[0] + 2*NGHOSTS;
params->Nxx_plus_2NGHOSTS1 = Nxx[1] + 2*NGHOSTS;
params->Nxx_plus_2NGHOSTS2 = Nxx[2] + 2*NGHOSTS;
// Step 0d: Set up space and time coordinates
// Step 0d.i: Declare \Delta x^i=dxx{0,1,2} and invdxx{0,1,2}, as well as xxmin[3] and xxmax[3]:
#include "set_Cparameters.h"
REAL xxmin[3],xxmax[3];
if(EigenCoord == 0) {
""")
for i | |
# Day 6 Python for chemists
import os
import sys
import csv
import numpy as np
import matplotlib.pyplot as plt
def read_file(path):
"""
Arguments is the path to the file
reads a text file input of x and y coordinates and returns the values as a
2 dimensional array
"""
# open the file in read mode and create a numpy array of a list
# which is each line of the file, split by whitespace
# this is then converted to a numpy float type matrix.
with open(path,'r') as ReadFile:
matrix = np.array(
[line.split() for line in ReadFile.readlines()]
).astype(np.float)
return matrix
def show_graph_from_matrix(matrix):
"""
Arguments is a 2 dimensional matrix of coordinates x and y
[[x1,y1]
[x2,y2]
...]
The 2d matrix will be displayed using matplotlib.pyplot
"""
plt.plot(matrix.T[0],matrix.T[1],)
def read_data_files(path):
"""
Given path to the data files directory,
returns the list of file paths.
"""
# returns the list of all file names in the given directory
file_names = os.listdir(path)
# convert the file_names to full (not-relative) paths
file_paths = [os.path.join(path,name) for name in file_names]
return file_paths
def find_maxima(matrix):
"""
given input of a 2d matrix of x,y coordinates,
this will find all y-maxima in the matrix and return
these maxima as a list.
2 dimensional matrix of coordinates x and y
[[x1,y1]
[x2,y2]
...]
"""
maxima = []
# loop through the indexes of the y coordinates of the matrix matrix.T[1]
# from the 2nd index until the 2nd from last index
for i in range(1,len(matrix.T[1])-1):
# check if the y value is greater than the value before and after
# if this is the case it is a maximum, so append the x,y coordinates
# to the
if matrix.T[1][i-1] < matrix.T[1][i] > matrix.T[1][i+1]:
maxima.append(matrix[i])
# convert maxima into a numpy array
maxima = np.array(maxima)
return maxima
def definite_integral(matrix):
"""
Given matrix(**) of x,y coordinates compute the definite integral between
x=0 and x=1
(**) 2 dimensional matrix of coordinates x and y
[[x1,y1]
[x2,y2]
...]
"""
# Calculates the integral of the supplied matrix, using the supplied x values
# the dx for the x values is consistently 0.01 so accuracy shouldn't be
# a big issue.
integral = np.trapz(matrix.T[1], matrix.T[0])
return integral
def compare_files(og_maxima,new_maxima, compare_file, until=100, divisor=1000):
"""
given input of the maxima of a graph, compare it to the maxima from data100.txt
maxima will be a series of x,y coordinates corresponding to the x,y values of a maximum from a file.
First see if there is a maxima with the same x value as data100.txt, if there is not expand the x value ranges
until a maximum is found. Find out what this dx is for the new file.
Note do it for all the peaks of data100.txt at once, so that if it finds a peak for the 2nd peak of data100.txt,
it doesn't also assign this to the first peak as well.
kewyword arguments until and divisor:
for the dx loop the loop will increase dx from 0 until until/divisor in steps of 1/divisor
eg for default values until=100 and divisor=1000,
it will increase dx from 0 until 100/1000 (=0.1) in steps of 1/1000 (=0.001)
changing these arguments will lead to more or less peak matching, which could
affect the results of the calculation significantly.
"""
if compare_file == 'data100.txt':
return None
# Whenever there is a match we will iterate this, so that we can compare
#this at the end?
number_of_matches = 0
# Initiate two lists to contain all the dx and dy values for each peak that
# is matched by the code.
dx_values = []
dy_values = []
# Loop through the original maxima list (supplied as an argument)
# and also loop through the maxima from the file being compared.
for og_idx,og_val in enumerate(og_maxima.T[0]):
for idx,val in enumerate(new_maxima.T[0]):
#this will loop dx from 0 to (until)/divisor in steps of 1/divisor
for x in range(until+1):
dx = x/divisor
# For the current value of dx see if there is a matching
# peak between the data100.txt file and the file being compared.
# There is a match if the val from the compare_file is within the range
# of the original peak x value +/- the dx value.
if og_val - dx <= val <= og_val + dx:
#if there is a match print some logging information to the console.
print(f"Peak Match : index {og_idx} from data100.txt and {idx} from {compare_file}")
print(f"values are {og_val} and {val} respectively")
# iterate the number of peak matches between the two files being compared.
number_of_matches+=1
# append the current dx value to our running list which will keep track
# of the dx values for all the matched peaks
dx_values.append(dx)
# Get the absolute value of the difference in y values (dy)
dy = abs(og_maxima.T[1][og_idx] - new_maxima.T[1][idx])
dy_values.append(dy)
#breaks us out of the "for x in range" loop
break
# If the for loop (for x in range ...) isn't terminated by a break statement
# I.E. we didn't get a match
else:
"move onto next peak in new_maxima"
continue
# If the for loop does get terminated by the break statement
# I.E. we get a match
"""compare next peak in og_maxima, IE break the new_maxima loop and move onto
next in the original maxima list"""
break
# Calculate the absolute value of the difference in number of peaks
# between the two data files
different_no_peaks = abs(len(new_maxima) - len(og_maxima))
return [dx_values, dy_values, number_of_matches, different_no_peaks]
# If this file is imported by another file, then this code won't be run.
if __name__ == "__main__":
# Specify the path to the directory containing the data files
data = 'C:/Users/Ale/Data/'
# Get the paths for all the files contained within the data directory.
files = read_data_files(data)
# Initialise list which will contain files that have one maximum with
# x value in the range 0 to 0.05
maxima_in_range = []
# Calculate the matrix for data100.txt
data100_matrix = read_file(files[0])
data100_maxima = find_maxima(data100_matrix)
comparisons = []
for path in files:
file_name = os.path.basename(path)
matrix = read_file(path)
#show_graph_from_matrix(matrix)
#plt.savefig(os.path.join('plots',f"{file_name.split('.')[0]}.png"))
#plt.clf()
# Log which file is being processed
print(file_name)
# Calculate the peaks of the data.
maxima = find_maxima(matrix)
print(f"maxima for {file_name} : \n{maxima}")
# Compute the integral for the file
integral = definite_integral(matrix)
print(integral)
normalisation = 1/integral
# Convert the matrix to a numpy array of itself where all the y values are
# multiplied by the normalisation constant
# basically matrix = np.array(xvalues,yvalues * norm const)
# where the x values are the first element of the transpose of the matrix
# and the y values are the second element.
# The numpy array is then transposed at the end to restore it to its original
# orientation.
matrix = np.array([matrix.T[0],matrix.T[1]*normalisation]).T
# Integral of the normalised matrix (should be equal to 1)
normal_integral = definite_integral(matrix)
print(f"normalised integral : {normal_integral}")
#show_graph_from_matrix(matrix)
#plt.show()
#plt.clf()
# creates a list of maximums where the x value is between 0 and 0.05
# if this list length is exactly 1 then it appends the file name to the
# maxima_in_range list.
if 2 > len(
[maximum for maximum in maxima if 0 < maximum[0] < 0.05]
) > 0 :
maxima_in_range.append(file_name)
# Calls the compare files function on the current working file and data100.txt
# returns the dx values, dy values, number of peak matches, difference in number
# of peaks
comparison_data = compare_files(data100_maxima, maxima, file_name)
# comparison data returns None if it is comparing data100.txt and data100.txt
if comparison_data is not None:
# Calculate the comparison value | |
Iii1 . action == LISP_SEND_MAP_REQUEST_ACTION ) :
if 75 - 75: II111iiii . O0 . I1Ii111 * O0 / OoooooooOO
if 60 - 60: OOooOOo - Oo0Ooo * OOooOOo / OoO0O00
if 55 - 55: I1ii11iIi11i * II111iiii * iIii1I11I1II1
if 38 - 38: iIii1I11I1II1 % I1ii11iIi11i . Ii1I + I1IiiI % i11iIiiIii - i11iIiiIii
if 62 - 62: I1Ii111 + I1IiiI
if ( lisp_rate_limit_map_request ( Oooo0oo000O0 , iII1I1iiII11I ) ) : return
lisp_send_map_request ( lisp_send_sockets , lisp_ephem_port ,
Oooo0oo000O0 , iII1I1iiII11I , None )
else :
ooo0OO = green ( iII1I1iiII11I . print_address ( ) , False )
lprint ( "Map-cache entry for {} already exists" . format ( ooo0OO ) )
if 9 - 9: iIii1I11I1II1 / iIii1I11I1II1
if 24 - 24: OOooOOo . I1IiiI % i11iIiiIii
return
if 43 - 43: OoooooooOO . o0oOOo0O0Ooo - I1ii11iIi11i + OoO0O00 . I1Ii111 . iII111i
if 1 - 1: iII111i / OoO0O00 / OoOoOO00 * Oo0Ooo * OoooooooOO
if 59 - 59: iII111i
if 14 - 14: oO0o . IiII + iIii1I11I1II1 - i1IIi
if 46 - 46: i11iIiiIii * II111iiii / i11iIiiIii % i11iIiiIii * II111iiii + i11iIiiIii
if 87 - 87: Oo0Ooo + OoO0O00 / II111iiii * OoooooooOO
if 95 - 95: I1Ii111 * o0oOOo0O0Ooo + OoO0O00 % OoOoOO00 - ooOoO0o / OoOoOO00
def lisp_ipc_map_cache_entry ( mc , jdata ) :
iIIiI11iI1Ii1 = lisp_write_ipc_map_cache ( True , mc , dont_send = True )
jdata . append ( iIIiI11iI1Ii1 )
return ( [ True , jdata ] )
if 45 - 45: OoooooooOO / oO0o / o0oOOo0O0Ooo + Ii1I + O0 . iII111i
if 34 - 34: iIii1I11I1II1 . o0oOOo0O0Ooo + ooOoO0o
if 96 - 96: O0 / ooOoO0o
if 82 - 82: OoO0O00 * OOooOOo * I11i * I1Ii111 % iIii1I11I1II1
if 50 - 50: Ii1I * Ii1I % I11i / iIii1I11I1II1 / ooOoO0o / iII111i
if 91 - 91: Ii1I - O0 . I11i - OoooooooOO * IiII . II111iiii
if 38 - 38: I1IiiI + OoO0O00
if 11 - 11: iIii1I11I1II1 + i1IIi * IiII - Oo0Ooo
def lisp_ipc_walk_map_cache ( mc , jdata ) :
if 66 - 66: I1Ii111 . Ii1I / I1ii11iIi11i / iIii1I11I1II1 + O0 / i1IIi
if 72 - 72: ooOoO0o . II111iiii
if 32 - 32: I1Ii111 - oO0o + OoooooooOO . OoOoOO00 + i11iIiiIii / i1IIi
if 26 - 26: I1IiiI + OoooooooOO % OoOoOO00 . IiII - II111iiii . OoOoOO00
if ( mc . group . is_null ( ) ) : return ( lisp_ipc_map_cache_entry ( mc , jdata ) )
if 37 - 37: OoO0O00 % O0 + OoOoOO00 * I11i . Ii1I * OoO0O00
if ( mc . source_cache == None ) : return ( [ True , jdata ] )
if 18 - 18: o0oOOo0O0Ooo / OOooOOo
if 28 - 28: O0 / Ii1I - oO0o % I1ii11iIi11i % O0 . OoO0O00
if 100 - 100: O0
if 19 - 19: Ii1I * iIii1I11I1II1 * Oo0Ooo - i11iIiiIii * i11iIiiIii - OOooOOo
if 88 - 88: O0 . iIii1I11I1II1 . I1ii11iIi11i
jdata = mc . source_cache . walk_cache ( lisp_ipc_map_cache_entry , jdata )
return ( [ True , jdata ] )
if 80 - 80: oO0o / i1IIi * iIii1I11I1II1
if 38 - 38: Ii1I
if 20 - 20: iIii1I11I1II1 + Oo0Ooo - Ii1I / i11iIiiIii . OoO0O00
if 66 - 66: OoooooooOO - Ii1I / iII111i . I1IiiI + I1ii11iIi11i - I1Ii111
if 36 - 36: I1Ii111 - OoO0O00 . I1ii11iIi11i * I1ii11iIi11i
if 9 - 9: OOooOOo - oO0o - iIii1I11I1II1 * i11iIiiIii / I11i
if 2 - 2: i1IIi % iII111i * ooOoO0o / OoOoOO00 + Oo0Ooo
def lisp_itr_discover_eid ( db , eid , input_interface , routed_interface ,
lisp_ipc_listen_socket ) :
oOoo0OooOOo00 = eid . print_address ( )
if ( db . dynamic_eids . has_key ( oOoo0OooOOo00 ) ) :
db . dynamic_eids [ oOoo0OooOOo00 ] . last_packet = lisp_get_timestamp ( )
return
if 59 - 59: i11iIiiIii / I1IiiI * iII111i
if 16 - 16: i11iIiiIii * II111iiii - ooOoO0o
if 80 - 80: iIii1I11I1II1 + iIii1I11I1II1 + I1Ii111 - IiII * iII111i - Ii1I
if 89 - 89: O0 * ooOoO0o
if 36 - 36: I1ii11iIi11i * II111iiii * iII111i + I1IiiI + OoO0O00 + oO0o
OoiiI11111II = lisp_dynamic_eid ( )
OoiiI11111II . dynamic_eid . copy_address ( eid )
OoiiI11111II . interface = routed_interface
OoiiI11111II . last_packet = lisp_get_timestamp ( )
OoiiI11111II . get_timeout ( routed_interface )
db . dynamic_eids [ oOoo0OooOOo00 ] = OoiiI11111II
if 28 - 28: Ii1I - i11iIiiIii . oO0o / II111iiii
O0o000oo00o00 = ""
if ( input_interface != routed_interface ) :
O0o000oo00o00 = ", routed-interface " + routed_interface
if 22 - 22: II111iiii . OoOoOO00 * Ii1I * Ii1I / i11iIiiIii * O0
if 67 - 67: oO0o / I11i . Oo0Ooo
IIi1iI = green ( oOoo0OooOOo00 , False ) + bold ( " discovered" , False )
lprint ( "Dynamic-EID {} on interface {}{}, timeout {}" . format ( IIi1iI , input_interface , O0o000oo00o00 , OoiiI11111II . timeout ) )
if 45 - 45: I1Ii111 / iIii1I11I1II1 . I1IiiI
if 60 - 60: OoooooooOO + i11iIiiIii - o0oOOo0O0Ooo . OoooooooOO + oO0o / ooOoO0o
if 93 - 93: I1ii11iIi11i - ooOoO0o - Oo0Ooo + o0oOOo0O0Ooo . ooOoO0o
if 98 - 98: II111iiii
if 56 - 56: i1IIi % IiII / I1Ii111
IIi1IIII = "learn%{}%{}" . format ( oOoo0OooOOo00 , routed_interface )
IIi1IIII = lisp_command_ipc ( IIi1IIII , "lisp-itr" )
lisp_ipc ( IIi1IIII , lisp_ipc_listen_socket , "lisp-etr" )
return
if 1 - 1: I1IiiI / OoOoOO00 - oO0o + OoooooooOO
if 51 - 51: ooOoO0o + Ii1I * o0oOOo0O0Ooo * I1IiiI / oO0o + OoO0O00
if 92 - 92: oO0o * o0oOOo0O0Ooo % ooOoO0o + OoOoOO00 * OoooooooOO * Oo0Ooo
if 86 - 86: iII111i / OoooooooOO * I1Ii111 % I1IiiI + Ii1I
if 16 - 16: OoO0O00
if 41 - 41: i1IIi
if 72 - 72: OoooooooOO / i11iIiiIii - O0 . OoOoOO00
if 41 - 41: IiII + oO0o * iIii1I11I1II1 % oO0o + IiII
if 64 - 64: I1ii11iIi11i % OoO0O00 + oO0o
if 47 - 47: I1ii11iIi11i + Ii1I % I1Ii111 % OoO0O00 . IiII % i1IIi
if 14 - 14: O0 / I1IiiI . I1ii11iIi11i
if 47 - 47: I1Ii111 * ooOoO0o / iII111i . O0
if 61 - 61: II111iiii . OoO0O00 * OoO0O00 % II111iiii % OOooOOo * OoOoOO00
def lisp_retry_decap_keys ( addr_str , packet , iv , packet_icv ) :
if ( lisp_search_decap_keys == False ) : return
if 82 - 82: Ii1I
if 83 - 83: I1IiiI
if 22 - 22: IiII / Ii1I + I1Ii111 % iIii1I11I1II1
if 75 - 75: OoOoOO00 % OoOoOO00 % o0oOOo0O0Ooo % I1ii11iIi11i + IiII
if ( addr_str . find ( ":" ) != - 1 ) : return
if 45 - 45: I11i - iIii1I11I1II1
iiiIIIII1iIi = lisp_crypto_keys_by_rloc_decap [ addr_str ]
if 20 - 20: OoOoOO00
for o0OoOo0o0OOoO0 in lisp_crypto_keys_by_rloc_decap :
if 84 - 84: OoOoOO00
if 59 - 59: Ii1I / I1Ii111 + i11iIiiIii
if 20 - 20: O0 / I1Ii111 - OOooOOo % iIii1I11I1II1
if 89 - 89: O0 * OoOoOO00 . ooOoO0o
if ( o0OoOo0o0OOoO0 . find ( addr_str ) == - 1 ) : continue
if 11 - 11: iIii1I11I1II1 * OoO0O00 . I1IiiI * OoOoOO00 / II111iiii
if 72 - 72: I11i
if 7 - 7: i1IIi - o0oOOo0O0Ooo - I1IiiI
if 62 - 62: OoOoOO00 * oO0o - I1IiiI / Ii1I
if ( o0OoOo0o0OOoO0 == addr_str ) : continue
if 48 - 48: o0oOOo0O0Ooo % o0oOOo0O0Ooo - OoOoOO00
if 13 - 13: OoO0O00 - Ii1I . ooOoO0o / O0 * OoOoOO00
if 57 - 57: O0 + OoooooooOO % o0oOOo0O0Ooo / I1Ii111 / OOooOOo - OoOoOO00
if 48 - | |
<reponame>rpatil524/t2wml<filename>backend/causx/country_entities.py
country_entities={
"Q1000": {
"description": "equatorial country in West Africa",
"label": "Gabon"
},
"Q1005": {
"description": "sovereign state in West Africa",
"label": "The Gambia"
},
"Q1006": {
"description": "sovereign state in West Africa",
"label": "Guinea"
},
"Q1007": {
"description": "sovereign state in Western Africa",
"label": "Guinea-Bissau"
},
"Q1008": {
"description": "sovereign state in West Africa",
"label": "Ivory Coast"
},
"Q1009": {
"description": "sovereign state in West Africa",
"label": "Cameroon"
},
"Q1011": {
"description": "sovereign state comprising ten islands off the Western coast of Africa",
"label": "Cape Verde"
},
"Q1013": {
"description": "sovereign state in southern Africa",
"label": "Lesotho"
},
"Q1014": {
"description": "sovereign state in West Africa",
"label": "Liberia"
},
"Q1016": {
"description": "sovereign state in north Africa",
"label": "Libya"
},
"Q1019": {
"description": "island sovereign state off the coast of Southeast Africa, in the Indian Ocean",
"label": "Madagascar"
},
"Q1020": {
"description": "sovereign state in Africa",
"label": "Malawi"
},
"Q1025": {
"description": "sovereign state in Africa",
"label": "Mauritania"
},
"Q1027": {
"description": "island sovereign state in the Indian Ocean",
"label": "Mauritius"
},
"Q1028": {
"description": "sovereign state in North Africa",
"label": "Morocco"
},
"Q1029": {
"description": "sovereign state in Africa",
"label": "Mozambique"
},
"Q1030": {
"description": "sovereign state in southern Africa",
"label": "Namibia"
},
"Q1032": {
"description": "sovereign state in Western Africa",
"label": "Niger"
},
"Q1033": {
"description": "sovereign state in West Africa",
"label": "Nigeria"
},
"Q1034173": {
"description": "Peasant rebellion then the decentralized Imperial dynasty in Vietnam, 1778 to 1802.",
"label": "Tây Sơn Dynasty"
},
"Q1036": {
"description": "sovereign state in East Africa",
"label": "Uganda"
},
"Q1037": {
"description": "sovereign state in Africa",
"label": "Rwanda"
},
"Q1039": {
"description": "island sovereign state in Africa",
"label": "São Tomé and Príncipe"
},
"Q1041": {
"description": "sovereign state in Western Africa",
"label": "Senegal"
},
"Q1042": {
"description": "island sovereign state off the eastern coast of Africa",
"label": "Seychelles"
},
"Q1044": {
"description": "sovereign state in West Africa",
"label": "Sierra Leone"
},
"Q1045": {
"description": "sovereign state in Africa",
"label": "Somalia"
},
"Q1049": {
"description": "sovereign state in Northeast Africa",
"label": "Sudan"
},
"Q1050": {
"description": "Kingdom in southern Africa",
"label": "Eswatini"
},
"Q114": {
"description": "sovereign state in East Africa",
"label": "Kenya"
},
"Q115": {
"description": "country in East Africa",
"label": "Ethiopia"
},
"Q117": {
"description": "sovereign state in West Africa",
"label": "Ghana"
},
"Q11703": {
"description": "group of islands in the Caribbean",
"label": "United States Virgin Islands"
},
"Q1183": {
"description": "unincorporated territory of the United States",
"label": "Puerto Rico"
},
"Q1246": {
"description": "partially recognized country in southeastern Europe",
"label": "Kosovo"
},
"Q12585": {
"description": "region of the Americas where Romance languages are primarily spoken",
"label": "Latin America"
},
"Q126125": {
"description": "French overseas collectivity, part of the island of Saint Martin in the Lesser Antilles",
"label": "Saint Martin (French part)"
},
"Q132959": {
"description": "area of the continent of Africa that lies south of the Sahara Desert",
"label": "Sub-Saharan Africa"
},
"Q1410": {
"description": "British Overseas Territory",
"label": "Gibraltar"
},
"Q142": {
"description": "country in Western Europe",
"label": "France"
},
"Q1433120": {
"description": "Developing countries with high levels of poverty and debt",
"label": "Heavily Indebted Poor Countries"
},
"Q143487": {
"description": "geographic and cultural region; collective term for the Arabic-speaking countries of Asia and Africa, or of Asia only",
"label": "Arab world"
},
"Q145": {
"description": "country in Western Europe",
"label": "United Kingdom"
},
"Q14773": {
"description": "Special Administrative Region of China",
"label": "Macau"
},
"Q148": {
"description": "socialist state in East Asia",
"label": "People's Republic of China"
},
"Q15180": {
"description": "federal socialist country in Eastern Europe and Northern Asia (1922–1991)",
"label": "Soviet Union"
},
"Q155": {
"description": "country in South America",
"label": "Brazil"
},
"Q159": {
"description": "sovereign state in Eastern Europe and Northern Asia",
"label": "Russia"
},
"Q16": {
"description": "sovereign state in North America",
"label": "Canada"
},
"Q16502": {
"description": "planet Earth and all life upon it, including human civilization",
"label": "world"
},
"Q16635": {
"description": "island territory of the United States of America",
"label": "Guam"
},
"Q16641": {
"description": "US territory in the Pacific",
"label": "American Samoa"
},
"Q16644": {
"description": "American-dependent insular area in the western Pacific",
"label": "Northern Mariana Islands"
},
"Q17": {
"description": "sovereign state in East Asia",
"label": "Japan"
},
"Q17050654": {
"description": "old name for Poland",
"label": "Lechia"
},
"Q17054": {
"description": "island in the Lesser Antilles, overseas region and department of France",
"label": "Martinique"
},
"Q17070": {
"description": "island in the Indian Ocean, overseas region of France",
"label": "Réunion"
},
"Q18221": {
"description": "British Overseas Territory in the Caribbean",
"label": "Turks and Caicos Islands"
},
"Q183": {
"description": "country in Central Europe",
"label": "Germany"
},
"Q184": {
"description": "country in eastern Europe",
"label": "Belarus"
},
"Q189": {
"description": "sovereign state in Northern Europe, situated on an island in the far North Atlantic Ocean",
"label": "Iceland"
},
"Q191": {
"description": "sovereign state in northeastern Europe",
"label": "Estonia"
},
"Q20": {
"description": "country in northern Europe",
"label": "Norway"
},
"Q211": {
"description": "sovereign state in northeastern Europe",
"label": "Latvia"
},
"Q212": {
"description": "sovereign state in eastern Europe",
"label": "Ukraine"
},
"Q21203": {
"description": "island country in the Caribbean, part of the Kingdom of the Netherlands",
"label": "Aruba"
},
"Q213": {
"description": "country in Central Europe",
"label": "Czech Republic"
},
"Q214": {
"description": "country in Central Europe",
"label": "Slovakia"
},
"Q215": {
"description": "country in Central Europe",
"label": "Slovenia"
},
"Q217": {
"description": "sovereign state in southeastern Europe",
"label": "Moldova"
},
"Q218": {
"description": "country in Central and Eastern Europe",
"label": "Romania"
},
"Q219": {
"description": "sovereign state in southeastern Europe",
"label": "Bulgaria"
},
"Q219060": {
"description": "de jure sovereign state in the Middle East, recognized as such by several countries and international organizations, and as an observer member in the United Nations and for most other countries",
"label": "State of Palestine"
},
"Q221": {
"description": "country in southeastern Europe",
"label": "North Macedonia"
},
"Q222": {
"description": "country in southeastern Europe",
"label": "Albania"
},
"Q223": {
"description": "autonomous country within the Kingdom of Denmark",
"label": "Greenland"
},
"Q224": {
"description": "country in southeastern Europe",
"label": "Croatia"
},
"Q225": {
"description": "country in southeastern Europe",
"label": "Bosnia and Herzegovina"
},
"Q227": {
"description": "sovereign state in Western Asia and Eastern Europe",
"label": "Azerbaijan"
},
"Q228": {
"description": "sovereign microstate between France and Spain, in Western Europe",
"label": "Andorra"
},
"Q229": {
"description": "sovereign state situated on an island in the Eastern Mediterranean Sea",
"label": "Cyprus"
},
"Q230": {
"description": "country in the Caucasus between Europe and Asia",
"label": "Georgia"
},
"Q232": {
"description": "sovereign state in Eastern Europe and Central Asia",
"label": "Kazakhstan"
},
"Q233": {
"description": "sovereign state in Southern Europe situated on an archipelago in the Mediterranean Sea",
"label": "Malta"
},
"Q235": {
"description": "sovereign city-state on the French Riviera",
"label": "Monaco"
},
"Q236": {
"description": "country in southeastern Europe",
"label": "Montenegro"
},
"Q23635": {
"description": "British overseas territory in the North Atlantic Ocean",
"label": "Bermuda"
},
"Q23681": {
"description": "limited-recognition state on the Island of Cyprus",
"label": "Turkish Republic of Northern Cyprus"
},
"Q237": {
"description": "independent city-state enclaved within Rome, Italy under the sovereignty of the Holy See; world's smallest sovereign state",
"label": "Vatican City"
},
"Q238": {
"description": "sovereign state in Southern Europe, enclaved within Italy",
"label": "San Marino"
},
"Q241": {
"description": "sovereign state situated on an island in the Caribbean Sea",
"label": "Cuba"
},
"Q242": {
"description": "sovereign state in Central America",
"label": "Belize"
},
"Q244": {
"description": "sovereign state situated on an island in the Western Atlantic Ocean",
"label": "Barbados"
},
"Q252": {
"description": "sovereign state in Southeast Asia situated on more than 17,000 islands",
"label": "Indonesia"
},
"Q25227": {
"description": "former country within the Kingdom of the Netherlands",
"label": "Netherlands Antilles"
},
| |
<reponame>samwaseda/clartbeat
import numpy as np
from scipy import ndimage
from scipy.spatial import cKDTree
from sklearn.cluster import DBSCAN
from clartbeat.area import Area
import matplotlib.pylab as plt
from scipy.spatial import ConvexHull
from skimage import feature
from skimage import filters
from sklearn.cluster import AgglomerativeClustering
from clartbeat.tools import *
from clartbeat.surface import Surface
class ProcessImage:
def __init__(
self,
ref_job,
file_name,
parameters,
):
self.ref_job = ref_job
self._contact_peri = None
self._reduction = None
self._canny_edge_all = None
self._canny_edge_perimeter = None
self._elastic_net_perimeter = None
self._white_color_threshold = None
self._total_area = None
self._white_area = None
self._base_color = None
self._stiched = False
self.file_name = file_name
self._img = None
self.parameters = parameters
@property
def img(self):
if self._img is None:
self._img = cleanse_edge(
img=self.load_image(target_size=self.parameters['target_size']),
erase_edge=self.parameters['erase_edge']
)
self._white_color_threshold = get_white_color_threshold(
self._img, **self.parameters['white_color']
)
self._img = clear_dirt(
self._img,
self.white_color_threshold,
**self.parameters['clear_dirt'],
)
self._img = _clean_noise(
self._img,
self.white_color_threshold,
eps=self.parameters['clean_noise']['eps']
)
return self._img
@property
def white_color_threshold(self):
if self._white_color_threshold is None:
_ = self.img
return self._white_color_threshold
def load_image(self, file_name=None, reduction=None, target_size=None):
if file_name is None and not hasattr(self, 'file_name'):
raise ValueError('file_name not specified')
if file_name is None:
file_name = self.file_name
img = plt.imread(file_name)
if target_size is not None:
reduction = np.rint(np.sqrt(np.prod(img.shape[:2])/target_size)).astype(int)
reduction = np.max([1, reduction])
if self._reduction is None:
self._reduction = reduction
self.resolution = (self.parameters['resolution']*self._reduction)**2
if reduction is None:
reduction = self._reduction
return get_reduced_mean(img, reduction)
@property
def canny_edge_all(self):
if self._canny_edge_all is None:
self._canny_edge_all = get_edge(
self.get_image(mean=True), self.get_base_color()/255, **self.parameters['canny_edge']
)
return np.stack(np.where(self._canny_edge_all), axis=-1)
def _get_main_edges(self, eps_areas=5, min_fraction=0.2):
labels = DBSCAN(eps=eps_areas).fit(self.canny_edge_all).labels_
unique_labels, counts = np.unique(labels, return_counts=True)
large_enough = large_chunk(labels, min_fraction=min_fraction)
hull = ConvexHull(self.canny_edge_all[large_enough])
return self.canny_edge_all[
find_common_labels(labels[large_enough][hull.vertices], labels)
]
def get_total_area(
self,
number_of_points=360,
sigma=0.05,
height_unit=40,
eps_areas=5,
min_fraction=0.04
):
p = self._get_main_edges(eps_areas=eps_areas, min_fraction=min_fraction).astype(float)
mean = np.mean(p, axis=0)
p -= mean
x_i = np.arctan2(*p.T[::-1])
y_i = np.linalg.norm(p, axis=-1)
x_i = np.concatenate((x_i-2*np.pi, x_i, x_i+2*np.pi))
y_i = np.concatenate((y_i, y_i, y_i))
x_range = np.linspace(0, 2*np.pi, number_of_points, endpoint=False)
dist = x_range[:,None]-x_i[None,:]
dist -= np.rint(dist/np.pi/2)*2*np.pi
w = np.exp((y_i[None,:]-y_i.mean())/height_unit-dist**2/(2*sigma**2))
slope, intersection = get_local_linear_fit(y_i, x_i, w)
xx = (slope*x_range+intersection)*np.cos(x_range)+mean[0]
yy = (slope*x_range+intersection)*np.sin(x_range)+mean[1]
xx[xx<0] = 0
yy[yy<0] = 0
shape = self.get_image().shape[:-1]
xx[xx>=shape[0]] = shape[0]-1
yy[yy>=shape[1]] = shape[1]-1
return np.stack([xx, yy], axis=-1)
@property
def canny_edge_perimeter(self):
if self._canny_edge_perimeter is None:
self._canny_edge_perimeter = Surface(
self.get_total_area(**self.parameters['total_area'])
)
self._elastic_net_perimeter = self._canny_edge_perimeter.copy()
return self._canny_edge_perimeter
@property
def total_area(self):
if self._total_area is None:
self._total_area = self.determine_total_area()
return self._total_area
def determine_total_area(self):
canvas = np.ones_like(self.get_image(mean=True))
mean = np.mean(self.total_perimeter.x, axis=0)
x = canvas*np.arange(canvas.shape[0])[:,None]
y = canvas*np.arange(canvas.shape[1])
x -= mean[0]
y -= mean[1]
canvas_r = np.sqrt(x**2+y**2)
canvas_angle = np.arctan2(y, x)
x = self.total_perimeter.x-mean
r = np.linalg.norm(x, axis=-1)
angle = np.arctan2(x[:,1], x[:,0])
argmin = np.argmin(np.absolute(canvas_angle[:,:,None]-angle[None,None,:]), axis=-1)
return canvas_r<r[argmin]
def stich_high_angles(
self,
sigma=5,
max_angle=16.2,
max_angle_diff=100,
):
total_number = len(self.canny_edge_perimeter.x)
high_angles = -self.canny_edge_perimeter.get_curvature(sigma=sigma)>max_angle
high_angles = np.arange(len(high_angles))[high_angles]
if len(high_angles)<2:
return
labels = AgglomerativeClustering(
n_clusters=None, distance_threshold=1.1, linkage='single'
).fit_predict(high_angles.reshape(-1, 1))
indices = np.sort([
np.rint(high_angles[labels==l].mean()).astype(int)
for l in np.unique(labels)
])
if len(indices)!=2:
return
d = np.diff(indices)[0]
if np.absolute(d-np.rint(d/total_number)*total_number) > max_angle_diff:
return
if np.diff(indices)[0]>0.5*total_number:
indices = np.roll(indices, 1)
self._elastic_net_perimeter.x = np.roll(
self._elastic_net_perimeter.x, -indices[0], axis=0
)
indices = (np.diff(indices)+total_number)[0]%total_number
i_range = np.arange(indices)/indices
dr = i_range[:,None]*(
self._elastic_net_perimeter.x[indices]-self._elastic_net_perimeter.x[0]
)
self._elastic_net_perimeter.x[:indices] = dr+self._elastic_net_perimeter.x[0]
center = np.mean(self._elastic_net_perimeter.x, axis=0)
r_a = np.linalg.norm(self._elastic_net_perimeter.x[0]-center)
r_b = np.linalg.norm(self._elastic_net_perimeter.x[indices]-center)
inner_prod = np.dot(
self._elastic_net_perimeter.x[0]-center,
self._elastic_net_perimeter.x[indices]-center
)
magnifier = i_range*r_a+(1-i_range)*r_b
magnifier /= np.sqrt(
i_range**2*r_a**2+(1-i_range)**2*r_b**2+2*i_range*(1-i_range)*inner_prod
)
self._elastic_net_perimeter.x[:indices] = magnifier[:,None]*(
self._elastic_net_perimeter.x[:indices]-center
)
self._elastic_net_perimeter.x[:indices] += center
self._stiched = True
@property
def total_perimeter(self):
if self._elastic_net_perimeter is None:
self.stich_high_angles(**self.parameters['stich_high_angles'])
self.run_elastic_net(**self.parameters['elastic_net'])
return self._elastic_net_perimeter
def unstich(self):
if not self._stiched:
return
self.ref_job.initialize()
self._elastic_net_perimeter = self.canny_edge_perimeter.copy()
self.run_elastic_net(**self.parameters['elastic_net'])
def run_elastic_net(
self,
sigma_sobel=15,
sigma_gauss=5,
line_tension=0.2,
dt=0.1,
max_iter=1000,
max_gradient=0.1,
repel_strength=0.01,
):
if max_iter < 1:
return
sobel = filters.sobel(
ndimage.gaussian_filter(self.get_image(mean=True), sigma=sigma_sobel)
)
gauss = repel_strength*ndimage.gaussian_filter(
self.get_image(mean=True), sigma=sigma_gauss
)
self._elastic_net_perimeter.set_energy_field(sobel)
self._elastic_net_perimeter.set_energy_field(gauss)
for i in range(1000):
f_spring = line_tension*self._elastic_net_perimeter.dhook
f_total = self._elastic_net_perimeter.force_field+f_spring
self._elastic_net_perimeter.x -= f_total*dt
if np.linalg.norm(f_total, axis=-1).max()<max_gradient:
break
def get_image(self, mean=False):
if mean:
return np.mean(self.img, axis=-1)
return self.img.copy()
@property
def non_white_area(self):
return self.get_image(mean=True) < self.white_color_threshold
@staticmethod
def _find_maximum(indices, sigma=8, n_items=256, min_fraction=0.5):
count = np.zeros(n_items)
np.add.at(count, indices, 1)
count = ndimage.gaussian_filter(count, sigma)
cond = np.where((count[1:-1]>count[:-2])*(count[1:-1]>count[2:]))[0]
if np.sum(cond)==0:
return count.argmax()
cond = cond[count[cond]/count[cond].max()>min_fraction]
return cond[0]
def get_base_color(self, mean=True, sigma=6, min_fraction=0.5):
if self._base_color is None:
all_colors = self.get_image()[self.non_white_area]
unique_colors, counts = np.unique(all_colors, return_counts=True, axis=0)
field = np.zeros((256, 256, 256))
field[tuple(unique_colors.T)] = counts
field = ndimage.gaussian_filter(field, sigma=sigma)
cond = (field==ndimage.maximum_filter(field, size=sigma))*(field!=0)
colors = np.stack(np.where(cond)).T
colors = colors[field[cond]>min_fraction*field[cond].max()]
self._base_color = colors[np.std(colors, axis=-1).argmax()]
if mean:
return np.mean(self._base_color)
return self._base_color
@property
def relative_distance_from_base_color(self):
img = self.get_image()-self.get_base_color(mean=False)
img = np.linalg.norm(img, axis=-1)
return img/img.max()
def _get_max_angle(self, x):
center = np.stack(np.where(self.total_area), axis=-1).mean(axis=0)
x = x.copy()-center
return np.min([np.arctan2(x[:,1], x[:,0]).ptp(), np.arctan2(x[:,1], -x[:,0]).ptp()])
def _get_biased_coordinates(self, x, bias):
center = np.stack(np.where(self.total_area), axis=-1).mean(axis=0)
x = x.copy()-center
phi = np.arctan2(x[:,1], x[:,0])
r = np.linalg.norm(x, axis=-1)
r *= bias[0]
phi *= bias[1]
return np.stack((r*np.cos(phi), r*np.sin(phi)), axis=-1)
def _get_relative_coordinates(self, x, theta_0=0):
xx = self.ref_job.heart.pca.get_relative_points(x)
theta = np.arctan2(*xx.T)
theta -= theta_0
theta -= np.rint(theta*0.5/np.pi)*2*np.pi
r = np.linalg.norm(xx, axis=-1)
return np.stack((r, theta), axis=-1)
def _polar_to_cartesian(self, rt):
return self.ref_job.heart.pca.get_absolute_points(
np.stack(rt[:,0]*np.array([np.cos(rt[:,1]), np.sin(rt[:,1])]), axis=-1)
)
def _find_neighbors(self, key, bias=None, max_dist=20, min_counts=1):
x_current = self.white_area.get_all_positions(key)
if bias is not None:
theta_0 = np.mean(self._get_relative_coordinates(x_current)[:,1])
rt_l = self._get_relative_coordinates(x_current, theta_0)
x_current = self._polar_to_cartesian(rt_l*bias)
tree = cKDTree(x_current)
for ii,x in zip(
self.white_area.get_indices('unknown', unique=True), self.white_area.get_positions()
):
if bias is not None:
x = self._polar_to_cartesian(self._get_relative_coordinates(x, theta_0)*bias)
counts = tree.count_neighbors(cKDTree(x), r=max_dist)/max_dist**3
if counts > min_counts:
self.white_area.tags[ii] = key
self._find_neighbors(key, bias=bias, max_dist=max_dist, min_counts=min_counts)
@property
def total_mean_radius(self):
return np.sqrt(np.sum(self.total_area)/np.pi)
def _left_lumen_exists(self, size, dist, dist_interval=None, fraction_interval=None):
if dist_interval is None or fraction_interval is None:
return True
fraction_criterion = get_slope(size/np.sum(self.total_area), fraction_interval)
dist_criterion = get_slope(dist/self.total_mean_radius, dist_interval)
return np.any(fraction_criterion*dist_criterion > 0.5)
def _remove_excess(
self,
points,
eps=1.5,
size=0.05,
min_samples=5,
min_fraction=0.2,
):
if size*eps==0:
return points
size = np.rint(np.sqrt(len(points))*size).astype(int)
area = self.get_canvas(points, fill_value=0)
x = np.stack(np.where(ndimage.minimum_filter(area, size=size)>0), axis=-1)
labels = DBSCAN(eps=eps, min_samples=min_samples).fit_predict(x)
tree = cKDTree(data=x)
dist, indices = tree.query(points, p=np.inf, distance_upper_bound=size)
x, indices = abridge(dist<size, points, indices)
labels = labels[indices]
return x[large_chunk(labels, min_fraction=min_fraction)]
def get_left_lumen(
self,
max_dist=20,
dist_interval=None,
fraction_interval=[0.001, 0.006],
recursion=0,
min_counts=1,
eps_excess=1.5,
size_excess=0.05,
min_samples=5,
min_fraction=0.2
):
if 'left' in self.white_area.tags:
return self.white_area.get_all_positions('left')
heart_center = self.heart_area.mean(axis=0)
distances = np.array([
np.linalg.norm(heart_center-np.mean(xx, axis=0), axis=-1)
for xx in self.white_area.get_positions(tag='unknown')
])
size = self.white_area.get_counts(tag='unknown')
if not self._left_lumen_exists(size, distances, dist_interval, fraction_interval):
return None
x = self._get_radial_mean_value()
indices = np.argmin(np.linalg.norm(x-heart_center, axis=-1)**2/size)
indices = np.unique(indices)
self.white_area[indices] = 'left'
if max_dist > 0:
self._find_neighbors('left', max_dist=max_dist, min_counts=min_counts)
x = self.white_area.get_all_positions('left')
return self._remove_excess(
x,
eps=eps_excess,
size=size_excess,
min_samples=min_samples,
min_fraction=min_fraction
)
def _get_rl_contact_counts(self, tree, r_max, contact_interval, tag='unknown'):
if tree is None:
return 0
indices, values = self._get_contact_counts(tree=tree, r_max=r_max, tag=tag)
return self.white_area.fill(
get_slope(values, contact_interval), indices, filler=1.0, tag=tag
)
def _get_rl_perimeter(self, r_max=3, contact_interval=[0.3, 0], tag='unknown'):
return self._get_rl_contact_counts(
self.ref_job.heart.perimeter.tree,
r_max=r_max,
contact_interval=contact_interval,
tag=tag
)
def _get_rl_left(self, r_max=5, contact_interval=[0.3, 0], tag='unknown'):
return self._get_rl_contact_counts(
self.ref_job.left.tree, r_max=r_max, contact_interval=contact_interval, tag=tag
)
def _get_rl_size(self, tag='unknown'):
return self.white_area.fill(
self.white_area.get_counts(tag=tag)/len(self.heart_area), tag=tag
)
def _get_rl_distance(self, tag='unknown'):
distance = np.log(
self.ref_job.left.pca.get_scaled_distance(self._get_radial_mean_value(tag=tag))
)
distance += np.log(self.ref_job.left.get_length().mean())
distance -= np.log(self.ref_job.heart.get_length().mean())
return self.white_area.fill(get_softplus(distance), tag=tag)
def _get_rl_curvature(
self,
sigmas=[20, 35],
sigma_interval=[0.08, 0.15],
curvature_interval=[0.002, -0.002],
tag='unknown'
):
sigma = sigmas[0]+get_slope(
np.sqrt(self.white_area.get_counts(tag=tag).max()/len(self.heart_area)),
sigma_interval
)*np.diff(sigmas)[0]
return self.white_area.fill(get_slope([
self.ref_job.heart.perimeter.get_crossing_curvature(
self.ref_job.left.get_center(),
np.mean(x, axis=0),
sigma=sigma,
laplacian=True
)
for x in self.white_area.get_positions(tag=tag)
], curvature_interval), tag=tag)
def get_rl_weights(
self,
r_perimeter=3,
r_left=5,
contact_interval=[0.3, 0],
curvature_sigmas=[20, 30],
curvature_sigma_interval=[0.08, 0.12],
curvature_interval=[0.002, -0.002],
tag='unknown',
):
w = self._get_rl_perimeter(r_max=r_perimeter, contact_interval=contact_interval, tag=tag)
w *= self._get_rl_left(r_max=r_left, contact_interval=contact_interval, tag=tag)
w *= self._get_rl_size(tag=tag)
w *= self._get_rl_distance(tag=tag)
w *= self._get_rl_curvature(
sigmas=curvature_sigmas,
sigma_interval=curvature_sigma_interval,
curvature_interval=curvature_interval,
tag=tag
)
return w
def get_right_lumen(
self,
max_dist=20,
bias=[1.0, 0.2],
min_counts=1,
dist_interval=None,
recursion=0,
r_perimeter=3,
r_left=5,
contact_interval=[0.3, 0],
curvature_sigmas=[20, 30],
curvature_sigma_interval=[0.08, 0.12],
curvature_interval=[0.002, -0.002],
min_weight=0.0017,
eps_excess=1.5,
size_excess=0.05,
min_samples=5,
min_fraction=0.2
):
if 'right' in self.white_area.tags:
return self.white_area.get_all_positions('right')
if not self.ref_job.left.exists():
return None
weights = self.get_rl_weights(
r_perimeter=r_perimeter,
r_left=r_left,
contact_interval=contact_interval,
curvature_sigmas=curvature_sigmas,
curvature_sigma_interval=curvature_sigma_interval,
curvature_interval=curvature_interval
)
if weights.max() < min_weight:
return None
indices = np.argmax(weights)
self.white_area.tags[indices] = 'right'
if max_dist > 0:
self._find_neighbors(
'right',
bias=bias,
max_dist=max_dist,
min_counts=min_counts,
)
x = self.white_area.get_all_positions('right')
return self._remove_excess(
x,
eps=eps_excess,
size=size_excess,
min_samples=min_samples,
min_fraction=min_fraction
)
def _get_radial_mean_value(self, center=None, tag='unknown'):
if center is None:
center = self.heart_area.mean(axis=0)
x_mean_lst = []
for x in self.white_area.get_positions(tag=tag):
xx = x-center
r_mean = np.linalg.norm(xx, axis=-1).mean()
x_mean_lst.append(xx.mean(axis=0)/np.linalg.norm(xx.mean(axis=0))*r_mean+center)
return np.array(x_mean_lst)
def _get_white_area(self, eps=1, min_samples=5, size=6, max_regroup_fraction=0.1):
x_min = self.apply_filter(ndimage.minimum_filter, size=size)
tree = cKDTree(data=x_min)
labels = DBSCAN(eps=eps, min_samples=min_samples).fit_predict(x_min)
x = self.apply_filter(ndimage.median_filter, size=size)
dist = tree.query(x, p=np.inf, distance_upper_bound=size)[0]
x_core = x[dist<size]
if len(np.unique(labels[large_chunk(labels, max_regroup_fraction)]))==1:
tree = cKDTree(data=x)
labels = DBSCAN(eps=eps, min_samples=min_samples).fit_predict(x)
labels = labels[tree.query(x_core)[1]]
return WhiteArea(*abridge(labels!=-1, x_core, labels))
def apply_filter(self, filter_to_apply, size):
| |
except:
raise Exception('Failed to generate lidar DEM to estimate height range!')
minHeight = lidarMin
maxHeight = lidarMax
# Get the min/max height in the reference DEM region
# - The lidar field is narrower than the image so sometimes it can miss terrain features
# that show up in the lower resolution DEM.
try:
demCropPath = os.path.join(options.outputFolder, 'cropped_ref_dem.tif')
cropGdalImage(projBounds, options.referenceDem, demCropPath, logger)
refDemMin, refDemMax, refDemMean, refDemStd = asp_image_utils.getImageStats(demCropPath)[0]
logger.info('Found ref DEM height min = %f, max = %f' % (refDemMin, refDemMax))
if refDemMin < minHeight:
minHeight = refDemMin
if refDemMax > maxHeight:
maxHeight = refDemMax
except:
logger.warning('Error generating reference DEM height estimate for ')
minHeight = minHeight - HEIGHT_BUFFER
maxHeight = maxHeight + HEIGHT_BUFFER
# Generate the height string
s = '--elevation-limit ' + str(minHeight) +' '+ str(maxHeight)
return (s, lidarDemPath)
def getWidthAndMemUsageFromStereoOutput(outputText, errorText):
'''Parse the output from running stereo and return the search range width and
the memory usage in GB.'''
successParsintStats = False
corrSearchWidth = -1
memUsage = -1
elapsed = "-1"
out = outputText + "\n" + errorText
for line in out.split('\n'):
m = re.match("^.*?Search\s+Range:.*?Origin:.*?width:\s*(\d+)", line, re.IGNORECASE)
if m:
corrSearchWidth = float(m.group(1))
successParsintStats = True
m = re.match("^.*?elapsed=(.*?)\s+mem=(\d.*?)\s+.*?time_stereo_corr", line, re.IGNORECASE)
if m:
elapsed = m.group(1)
memUsage = float(m.group(2))
memUsage = float(round(memUsage/100000.0))/10.0 # convert to GB
successParsintStats = True
return (corrSearchWidth, memUsage, elapsed, successParsintStats)
def createDem(i, options, inputPairs, prefixes, demFiles, projString,
heightLimitString, threadText, matchFilePair,
suppressOutput, redo, logger=None):
'''Create a DEM from a pair of images'''
# Since we use epipolar alignment our images should be aligned at least this well.
VERTICAL_SEARCH_LIMIT = 10
TIMEOUT = 40*60 # Do not let any process take more than this time in seconds
# Get the appropriate image to use as a stereo pair
pairIndex = i + options.stereoImageInterval
thisPairPrefix = prefixes[i]
argString = ('%s %s %s %s ' % (inputPairs[i][0], inputPairs[pairIndex][0],
inputPairs[i][1], inputPairs[pairIndex][1]))
# - This epipolar threshold is post camera model based alignment so it can be quite restrictive.
# - Note that the base level memory usage ignoring the SGM buffers is about 2 GB so this memory
# usage is in addition to that.
minIpString = '--min-num-ip 40'
stereoCmd = (('stereo %s %s %s %s -t nadirpinhole --alignment-method epipolar ' +
'--skip-rough-homography --corr-blob-filter 50 --corr-seed-mode 0 ' +
'--remove-outliers-by-disparity-params 90.0 3.0 --epipolar-threshold 10 %s ') %
(argString, thisPairPrefix, threadText, heightLimitString, minIpString))
searchLimitString = (' --corr-search-limit -9999 -' + str(VERTICAL_SEARCH_LIMIT) +
' 9999 ' + str(VERTICAL_SEARCH_LIMIT) )
if '--stereo-algorithm 0' not in options.stereoArgs:
correlationArgString = (' --xcorr-threshold 2 --corr-kernel 7 7 '
+ ' --corr-tile-size 9000 --cost-mode 4 --sgm-search-buffer 4 2 '
+ searchLimitString + ' --corr-memory-limit-mb 6000 '
+ options.stereoArgs
)
#+ ' --corr-blob-filter 100')
filterArgString = (' --rm-cleanup-passes 0 --median-filter-size 5 ' +
' --texture-smooth-size 17 --texture-smooth-scale 0.14 ')
else:
correlationArgString = options.stereoArgs
filterArgString = ''
stereoCmd += correlationArgString
stereoCmd += filterArgString
stereoCmd += ' --check-mem-usage'
# Call and check status
triOutput = thisPairPrefix + '-PC.tif'
icebridge_common.logger_print(logger, stereoCmd)
if (not options.manyip) or (matchFilePair[0] == ""):
(out, err, status) = asp_system_utils.executeCommand(stereoCmd, triOutput,
suppressOutput, redo, noThrow=True,
timeout = TIMEOUT)
else:
# Jump directly to using the ip from bundle_adjust
(out, err, status) = ("", "", -1)
if status != 0:
# If stereo failed, try it again with the .match file that was created by bundle_adjust.
if not options.manyip:
icebridge_common.logger_print(logger, 'First stereo attempt failed, will copy .match file from bundle_adjust and retry.')
else:
icebridge_common.logger_print(logger, 'Copy right away .match file from bundle_adjust and retry.')
# Clear any existing .match file then link in the new one.
cmd = 'rm -f ' + thisPairPrefix + '*.match'
icebridge_common.logger_print(logger, cmd)
os.system(cmd)
if matchFilePair[0] == "":
# This can happen if the bundle adjust directory got cleaned up. Nothing we can do.
raise Exception("No usable match files. Stereo call failed.")
icebridge_common.makeSymLink(matchFilePair[0], matchFilePair[1])
if not os.path.exists(matchFilePair[1]):
raise Exception('Failed to create .match file symlink: ' + matchFilePair[1])
# With the .match file copied we can retry with the same parameters.
# - Remove some filtering steps we don't need.
# - Exception is the height limit string, which we can remove if using existing IP.
stereoCmd = stereoCmd.replace(minIpString, '--min-num-ip 10')
m = re.match("^\s*$", heightLimitString)
if not m: # This is a bugfix, check for empty heightLimitString
stereoCmd = stereoCmd.replace(heightLimitString, ' ')
icebridge_common.logger_print(logger, stereoCmd)
os.system('rm -f ' + triOutput) # In case the output cloud exists but is bad
(out, err, status) = asp_system_utils.executeCommand(stereoCmd, triOutput, suppressOutput,
redo, noThrow=True, timeout = TIMEOUT)
if status != 0:
# If we fail again give up.
icebridge_common.logger_print(logger, out + '\n' + err)
raise Exception('Stereo call failed!')
# Extract the search range width and memory usage from the output text.
(corrSearchWidth, memUsage, elapsed, gotMemStats) = \
getWidthAndMemUsageFromStereoOutput(out, err)
icebridge_common.logger_print(logger, ("Corr search width: %d mem usage: %f GB elapsed: %s" %
(corrSearchWidth, memUsage, elapsed) ) )
if i == 0 and gotMemStats:
# If we could not parse the data, write nothing. Maybe this time
# we are rerunning things, and did not actually do any work.
filePath = os.path.join(os.path.dirname(os.path.dirname(thisPairPrefix)),
icebridge_common.getRunStatsFile())
icebridge_common.logger_print(logger, "Writing: " + filePath)
with open(filePath, 'w') as f:
f.write( ("%d, %f, %s\n") % (corrSearchWidth, memUsage, elapsed) )
# point2dem on the result of ASP
# - The size limit is to prevent bad point clouds from creating giant DEM files which
# cause the processing node to crash.
cmd = (('point2dem ' + ' --errorimage ' +
# '--max-valid-triangulation-error 10 ' + # useful when studying distortion
'--max-output-size 10000 10000 --dem-spacing %lf --t_srs %s %s %s')
% (options.demResolution, projString, triOutput, threadText))
p2dOutput = demFiles[i]
icebridge_common.logger_print(logger, cmd)
(out, err, status) = asp_system_utils.executeCommand(cmd, p2dOutput, suppressOutput,
redo, noThrow=True)
if status != 0:
icebridge_common.logger_print(logger, out + '\n' + err)
raise Exception('point2dem call on stereo pair failed!')
# Require a certain percentage of valid output pixels to go forwards with this DEM
# - This calculation currently does not work well but anything under this is probably bad.
# TODO: This validity fraction is NOT ACCURATE and needs to be improved!
MIN_FRACTION_VALID_PIXELS = 0.10
percentageFlagFile = os.path.join(options.outputFolder, 'valid_pixel_fraction.txt')
fractionValid = 1.0;
# Try to parse the output text for the percentage or read it from disk if we already logged it.
m = re.findall(r"Percentage of valid pixels = ([0-9e\-\.\+]+)", out)
if len(m) == 1:
fractionValid = float(m[0])
icebridge_common.logger_print(logger, 'Valid DEM pixel fraction = ' + str(fractionValid))
with open(percentageFlagFile, 'w') as f: # Log the percentage to disk
f.write(str(fractionValid))
else:
try: # Read the percentage from disk
with open(percentageFlagFile, 'r') as f:
fractionValid = float(f.read())
except:
icebridge_common.logger_print(logger, 'Unable to read dem percentage fraction from file ' + percentageFlagFile)
icebridge_common.logger_print(logger, 'Detected valid pixel fraction = ' + str(fractionValid))
if fractionValid < MIN_FRACTION_VALID_PIXELS:
raise Exception('Required DEM pixel fraction is ' + str(MIN_FRACTION_VALID_PIXELS) +
', got instead ' + str(fractionValid) + ' aborting processing on this DEM.')
# If the output DEM is too small then something is probably wrong.
MIN_DEM_SIZE_PIXELS = 200
(width, height) = asp_image_utils.getImageSize(p2dOutput)
if (width < MIN_DEM_SIZE_PIXELS) or (height < MIN_DEM_SIZE_PIXELS):
raise Exception('DEM size (' + str(width) + ', ' + str(height) +
') is less than minumum size ' + str(MIN_DEM_SIZE_PIXELS))
# The DEM with larger footprint, not filtered out as agressively. We use
# the valid pixels in this DEM's footprint as a template where to blend.
p2dFoot = thisPairPrefix + '-footprint'
cmd = ( ('point2dem --max-output-size 10000 10000 --tr %lf --t_srs %s %s %s ' +
' --remove-outliers-params 75 12 -o %s ')
% (options.demResolution, projString, triOutput, threadText, p2dFoot))
p2dFoot = p2dFoot + '-DEM.tif'
icebridge_common.logger_print(logger, cmd)
(out, err, status) = asp_system_utils.executeCommand(cmd, p2dFoot, suppressOutput, redo,
noThrow=True)
if status != 0:
icebridge_common.logger_print(logger, out + '\n' + err)
raise Exception('point2dem call on stereo pair failed!')
def cleanBatch(batchFolder, alignPrefix, stereoPrefixes,
interDiffPaths, fireballDiffPaths):
'''Clean up all non-output files to conserve space.
Setting | |
IDD field `Relative Ratios of Air Flow Rates Passing
through Zone 7`"""
self[
"Relative Ratios of Air Flow Rates Passing through Zone 7"] = value
@property
def cross_sectional_areas_of_air_channel_inlet_7(self):
"""field `Cross Sectional Areas of Air Channel Inlet 7`
| Units: m2
Args:
value (float): value for IDD Field `Cross Sectional Areas of Air Channel Inlet 7`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `cross_sectional_areas_of_air_channel_inlet_7` or None if not set
"""
return self["Cross Sectional Areas of Air Channel Inlet 7"]
@cross_sectional_areas_of_air_channel_inlet_7.setter
def cross_sectional_areas_of_air_channel_inlet_7(self, value=None):
"""Corresponds to IDD field `Cross Sectional Areas of Air Channel Inlet
7`"""
self["Cross Sectional Areas of Air Channel Inlet 7"] = value
@property
def zone_8_name(self):
"""field `Zone 8 Name`
Args:
value (str): value for IDD Field `Zone 8 Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `zone_8_name` or None if not set
"""
return self["Zone 8 Name"]
@zone_8_name.setter
def zone_8_name(self, value=None):
"""Corresponds to IDD field `Zone 8 Name`"""
self["Zone 8 Name"] = value
@property
def distance_from_top_of_thermal_chimney_to_inlet_8(self):
"""field `Distance from Top of Thermal Chimney to Inlet 8`
| Units: m
Args:
value (float): value for IDD Field `Distance from Top of Thermal Chimney to Inlet 8`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `distance_from_top_of_thermal_chimney_to_inlet_8` or None if not set
"""
return self["Distance from Top of Thermal Chimney to Inlet 8"]
@distance_from_top_of_thermal_chimney_to_inlet_8.setter
def distance_from_top_of_thermal_chimney_to_inlet_8(self, value=None):
"""Corresponds to IDD field `Distance from Top of Thermal Chimney to
Inlet 8`"""
self["Distance from Top of Thermal Chimney to Inlet 8"] = value
@property
def relative_ratios_of_air_flow_rates_passing_through_zone_8(self):
"""field `Relative Ratios of Air Flow Rates Passing through Zone 8`
| value <= 1.0
Args:
value (float): value for IDD Field `Relative Ratios of Air Flow Rates Passing through Zone 8`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `relative_ratios_of_air_flow_rates_passing_through_zone_8` or None if not set
"""
return self["Relative Ratios of Air Flow Rates Passing through Zone 8"]
@relative_ratios_of_air_flow_rates_passing_through_zone_8.setter
def relative_ratios_of_air_flow_rates_passing_through_zone_8(
self,
value=None):
"""Corresponds to IDD field `Relative Ratios of Air Flow Rates Passing
through Zone 8`"""
self[
"Relative Ratios of Air Flow Rates Passing through Zone 8"] = value
@property
def cross_sectional_areas_of_air_channel_inlet_8(self):
"""field `Cross Sectional Areas of Air Channel Inlet 8`
| Units: m2
Args:
value (float): value for IDD Field `Cross Sectional Areas of Air Channel Inlet 8`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `cross_sectional_areas_of_air_channel_inlet_8` or None if not set
"""
return self["Cross Sectional Areas of Air Channel Inlet 8"]
@cross_sectional_areas_of_air_channel_inlet_8.setter
def cross_sectional_areas_of_air_channel_inlet_8(self, value=None):
"""Corresponds to IDD field `Cross Sectional Areas of Air Channel Inlet
8`"""
self["Cross Sectional Areas of Air Channel Inlet 8"] = value
@property
def zone_9_name(self):
"""field `Zone 9 Name`
Args:
value (str): value for IDD Field `Zone 9 Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `zone_9_name` or None if not set
"""
return self["Zone 9 Name"]
@zone_9_name.setter
def zone_9_name(self, value=None):
"""Corresponds to IDD field `Zone 9 Name`"""
self["Zone 9 Name"] = value
@property
def distance_from_top_of_thermal_chimney_to_inlet_9(self):
"""field `Distance from Top of Thermal Chimney to Inlet 9`
| Units: m
Args:
value (float): value for IDD Field `Distance from Top of Thermal Chimney to Inlet 9`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `distance_from_top_of_thermal_chimney_to_inlet_9` or None if not set
"""
return self["Distance from Top of Thermal Chimney to Inlet 9"]
@distance_from_top_of_thermal_chimney_to_inlet_9.setter
def distance_from_top_of_thermal_chimney_to_inlet_9(self, value=None):
"""Corresponds to IDD field `Distance from Top of Thermal Chimney to
Inlet 9`"""
self["Distance from Top of Thermal Chimney to Inlet 9"] = value
@property
def relative_ratios_of_air_flow_rates_passing_through_zone_9(self):
"""field `Relative Ratios of Air Flow Rates Passing through Zone 9`
| value <= 1.0
Args:
value (float): value for IDD Field `Relative Ratios of Air Flow Rates Passing through Zone 9`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `relative_ratios_of_air_flow_rates_passing_through_zone_9` or None if not set
"""
return self["Relative Ratios of Air Flow Rates Passing through Zone 9"]
@relative_ratios_of_air_flow_rates_passing_through_zone_9.setter
def relative_ratios_of_air_flow_rates_passing_through_zone_9(
self,
value=None):
"""Corresponds to IDD field `Relative Ratios of Air Flow Rates Passing
through Zone 9`"""
self[
"Relative Ratios of Air Flow Rates Passing through Zone 9"] = value
@property
def cross_sectional_areas_of_air_channel_inlet_9(self):
"""field `Cross Sectional Areas of Air Channel Inlet 9`
| Units: m2
Args:
value (float): value for IDD Field `Cross Sectional Areas of Air Channel Inlet 9`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `cross_sectional_areas_of_air_channel_inlet_9` or None if not set
"""
return self["Cross Sectional Areas of Air Channel Inlet 9"]
@cross_sectional_areas_of_air_channel_inlet_9.setter
def cross_sectional_areas_of_air_channel_inlet_9(self, value=None):
"""Corresponds to IDD field `Cross Sectional Areas of Air Channel Inlet
9`"""
self["Cross Sectional Areas of Air Channel Inlet 9"] = value
@property
def zone_10_name(self):
"""field `Zone 10 Name`
Args:
value (str): value for IDD Field `Zone 10 Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `zone_10_name` or None if not set
"""
return self["Zone 10 Name"]
@zone_10_name.setter
def zone_10_name(self, value=None):
"""Corresponds to IDD field `Zone 10 Name`"""
self["Zone 10 Name"] = value
@property
def distance_from_top_of_thermal_chimney_to_inlet_10(self):
"""field `Distance from Top of Thermal Chimney to Inlet 10`
| Units: m
Args:
value (float): value for IDD Field `Distance from Top of Thermal Chimney to Inlet 10`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `distance_from_top_of_thermal_chimney_to_inlet_10` or None if not set
"""
return self["Distance from Top of Thermal Chimney to Inlet 10"]
@distance_from_top_of_thermal_chimney_to_inlet_10.setter
def distance_from_top_of_thermal_chimney_to_inlet_10(self, value=None):
"""Corresponds to IDD field `Distance from Top of Thermal Chimney to
Inlet 10`"""
self["Distance from Top of Thermal Chimney to Inlet 10"] = value
@property
def relative_ratios_of_air_flow_rates_passing_through_zone_10(self):
"""field `Relative Ratios of Air Flow Rates Passing through Zone 10`
| value <= 1.0
Args:
value (float): value for IDD Field `Relative Ratios of Air Flow Rates Passing through Zone 10`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `relative_ratios_of_air_flow_rates_passing_through_zone_10` or None if not set
"""
return self[
"Relative Ratios of Air Flow Rates Passing through Zone 10"]
@relative_ratios_of_air_flow_rates_passing_through_zone_10.setter
def relative_ratios_of_air_flow_rates_passing_through_zone_10(
self,
value=None):
"""Corresponds to IDD field `Relative Ratios of Air Flow Rates Passing
through Zone 10`"""
self[
"Relative Ratios of Air Flow Rates Passing through Zone 10"] = value
@property
def cross_sectional_areas_of_air_channel_inlet_10(self):
"""field `Cross Sectional Areas of Air Channel Inlet 10`
| Units: m2
Args:
value (float): value for IDD Field `Cross Sectional Areas of Air Channel Inlet 10`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `cross_sectional_areas_of_air_channel_inlet_10` or None if not set
"""
return self["Cross Sectional Areas of Air Channel Inlet 10"]
@cross_sectional_areas_of_air_channel_inlet_10.setter
def cross_sectional_areas_of_air_channel_inlet_10(self, value=None):
"""Corresponds to IDD field `Cross Sectional Areas of Air Channel Inlet
10`"""
self["Cross Sectional Areas of Air Channel Inlet 10"] = value
@property
def zone_11_name(self):
"""field `Zone 11 Name`
Args:
value (str): value for IDD Field `Zone 11 Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `zone_11_name` or None if not set
"""
return self["Zone 11 Name"]
@zone_11_name.setter
def zone_11_name(self, value=None):
"""Corresponds to IDD field `Zone 11 Name`"""
self["Zone 11 Name"] = value
@property
def distance_from_top_of_thermal_chimney_to_inlet_11(self):
"""field `Distance from Top of Thermal Chimney to Inlet 11`
| Units: m
Args:
value (float): value for IDD Field `Distance from Top of Thermal Chimney to Inlet 11`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `distance_from_top_of_thermal_chimney_to_inlet_11` or None if not set
"""
return self["Distance from Top of Thermal Chimney to Inlet 11"]
@distance_from_top_of_thermal_chimney_to_inlet_11.setter
def distance_from_top_of_thermal_chimney_to_inlet_11(self, value=None):
"""Corresponds to IDD field `Distance from Top of Thermal Chimney to
Inlet 11`"""
self["Distance from Top of Thermal Chimney to Inlet 11"] = value
@property
def relative_ratios_of_air_flow_rates_passing_through_zone_11(self):
"""field `Relative Ratios of Air Flow Rates Passing through Zone 11`
| value <= | |
hasattr( listener, "enterSymbolPlusTail" ):
listener.enterSymbolPlusTail(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSymbolPlusTail" ):
listener.exitSymbolPlusTail(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSymbolPlusTail" ):
return visitor.visitSymbolPlusTail(self)
else:
return visitor.visitChildren(self)
def symbolPlusTail(self):
localctx = SygusParser.SymbolPlusTailContext(self, self._ctx, self.state)
self.enterRule(localctx, 34, self.RULE_symbolPlusTail)
try:
self.state = 202
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SygusParser.SYMBOL]:
self.enterOuterAlt(localctx, 1)
self.state = 198
self.symbol()
self.state = 199
self.symbolPlusTail()
pass
elif token in [SygusParser.T__2]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SetOptsCmdContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def optList(self):
return self.getTypedRuleContext(SygusParser.OptListContext,0)
def getRuleIndex(self):
return SygusParser.RULE_setOptsCmd
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetOptsCmd" ):
listener.enterSetOptsCmd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetOptsCmd" ):
listener.exitSetOptsCmd(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetOptsCmd" ):
return visitor.visitSetOptsCmd(self)
else:
return visitor.visitChildren(self)
def setOptsCmd(self):
localctx = SygusParser.SetOptsCmdContext(self, self._ctx, self.state)
self.enterRule(localctx, 36, self.RULE_setOptsCmd)
try:
self.enterOuterAlt(localctx, 1)
self.state = 204
self.match(SygusParser.T__0)
self.state = 205
self.match(SygusParser.T__14)
self.state = 206
self.optList()
self.state = 207
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class OptListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbolPairPlus(self):
return self.getTypedRuleContext(SygusParser.SymbolPairPlusContext,0)
def getRuleIndex(self):
return SygusParser.RULE_optList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterOptList" ):
listener.enterOptList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitOptList" ):
listener.exitOptList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitOptList" ):
return visitor.visitOptList(self)
else:
return visitor.visitChildren(self)
def optList(self):
localctx = SygusParser.OptListContext(self, self._ctx, self.state)
self.enterRule(localctx, 38, self.RULE_optList)
try:
self.enterOuterAlt(localctx, 1)
self.state = 209
self.match(SygusParser.T__0)
self.state = 210
self.symbolPairPlus()
self.state = 211
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SymbolPairPlusContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbolPair(self):
return self.getTypedRuleContext(SygusParser.SymbolPairContext,0)
def symbolPairPlusTail(self):
return self.getTypedRuleContext(SygusParser.SymbolPairPlusTailContext,0)
def getRuleIndex(self):
return SygusParser.RULE_symbolPairPlus
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSymbolPairPlus" ):
listener.enterSymbolPairPlus(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSymbolPairPlus" ):
listener.exitSymbolPairPlus(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSymbolPairPlus" ):
return visitor.visitSymbolPairPlus(self)
else:
return visitor.visitChildren(self)
def symbolPairPlus(self):
localctx = SygusParser.SymbolPairPlusContext(self, self._ctx, self.state)
self.enterRule(localctx, 40, self.RULE_symbolPairPlus)
try:
self.enterOuterAlt(localctx, 1)
self.state = 213
self.symbolPair()
self.state = 214
self.symbolPairPlusTail()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SymbolPairPlusTailContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbolPair(self):
return self.getTypedRuleContext(SygusParser.SymbolPairContext,0)
def symbolPairPlusTail(self):
return self.getTypedRuleContext(SygusParser.SymbolPairPlusTailContext,0)
def getRuleIndex(self):
return SygusParser.RULE_symbolPairPlusTail
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSymbolPairPlusTail" ):
listener.enterSymbolPairPlusTail(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSymbolPairPlusTail" ):
listener.exitSymbolPairPlusTail(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSymbolPairPlusTail" ):
return visitor.visitSymbolPairPlusTail(self)
else:
return visitor.visitChildren(self)
def symbolPairPlusTail(self):
localctx = SygusParser.SymbolPairPlusTailContext(self, self._ctx, self.state)
self.enterRule(localctx, 42, self.RULE_symbolPairPlusTail)
try:
self.state = 220
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SygusParser.T__0]:
self.enterOuterAlt(localctx, 1)
self.state = 216
self.symbolPair()
self.state = 217
self.symbolPairPlusTail()
pass
elif token in [SygusParser.T__2]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SymbolPairContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def QUOTEDLIT(self):
return self.getToken(SygusParser.QUOTEDLIT, 0)
def getRuleIndex(self):
return SygusParser.RULE_symbolPair
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSymbolPair" ):
listener.enterSymbolPair(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSymbolPair" ):
listener.exitSymbolPair(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSymbolPair" ):
return visitor.visitSymbolPair(self)
else:
return visitor.visitChildren(self)
def symbolPair(self):
localctx = SygusParser.SymbolPairContext(self, self._ctx, self.state)
self.enterRule(localctx, 44, self.RULE_symbolPair)
try:
self.enterOuterAlt(localctx, 1)
self.state = 222
self.match(SygusParser.T__0)
self.state = 223
self.symbol()
self.state = 224
self.match(SygusParser.QUOTEDLIT)
self.state = 225
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class FunDefCmdContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def argList(self):
return self.getTypedRuleContext(SygusParser.ArgListContext,0)
def sortExpr(self):
return self.getTypedRuleContext(SygusParser.SortExprContext,0)
def term(self):
return self.getTypedRuleContext(SygusParser.TermContext,0)
def getRuleIndex(self):
return SygusParser.RULE_funDefCmd
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterFunDefCmd" ):
listener.enterFunDefCmd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitFunDefCmd" ):
listener.exitFunDefCmd(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitFunDefCmd" ):
return visitor.visitFunDefCmd(self)
else:
return visitor.visitChildren(self)
def funDefCmd(self):
localctx = SygusParser.FunDefCmdContext(self, self._ctx, self.state)
self.enterRule(localctx, 46, self.RULE_funDefCmd)
try:
self.enterOuterAlt(localctx, 1)
self.state = 227
self.match(SygusParser.T__0)
self.state = 228
self.match(SygusParser.T__15)
self.state = 229
self.symbol()
self.state = 230
self.argList()
self.state = 231
self.sortExpr()
self.state = 232
self.term()
self.state = 233
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class FunDeclCmdContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def sortStar(self):
return self.getTypedRuleContext(SygusParser.SortStarContext,0)
def sortExpr(self):
return self.getTypedRuleContext(SygusParser.SortExprContext,0)
def getRuleIndex(self):
return SygusParser.RULE_funDeclCmd
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterFunDeclCmd" ):
listener.enterFunDeclCmd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitFunDeclCmd" ):
listener.exitFunDeclCmd(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitFunDeclCmd" ):
return visitor.visitFunDeclCmd(self)
else:
return visitor.visitChildren(self)
def funDeclCmd(self):
localctx = SygusParser.FunDeclCmdContext(self, self._ctx, self.state)
self.enterRule(localctx, 48, self.RULE_funDeclCmd)
try:
self.enterOuterAlt(localctx, 1)
self.state = 235
self.match(SygusParser.T__0)
self.state = 236
self.match(SygusParser.T__16)
self.state = 237
self.symbol()
self.state = 238
self.match(SygusParser.T__0)
self.state = 239
self.sortStar()
self.state = 240
self.match(SygusParser.T__2)
self.state = 241
self.sortExpr()
self.state = 242
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SortStarContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def sortExpr(self):
return self.getTypedRuleContext(SygusParser.SortExprContext,0)
def sortStar(self):
return self.getTypedRuleContext(SygusParser.SortStarContext,0)
def getRuleIndex(self):
return SygusParser.RULE_sortStar
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSortStar" ):
listener.enterSortStar(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSortStar" ):
listener.exitSortStar(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSortStar" ):
return visitor.visitSortStar(self)
else:
return visitor.visitChildren(self)
def sortStar(self):
localctx = SygusParser.SortStarContext(self, self._ctx, self.state)
self.enterRule(localctx, 50, self.RULE_sortStar)
try:
self.state = 248
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SygusParser.T__0, SygusParser.T__6, SygusParser.T__7, SygusParser.T__8, SygusParser.SYMBOL]:
self.enterOuterAlt(localctx, 1)
self.state = 244
self.sortExpr()
self.state = 245
self.sortStar()
pass
elif token in [SygusParser.T__2]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ArgListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbolSortPairStar(self):
return self.getTypedRuleContext(SygusParser.SymbolSortPairStarContext,0)
def getRuleIndex(self):
return SygusParser.RULE_argList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterArgList" ):
listener.enterArgList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitArgList" ):
listener.exitArgList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitArgList" ):
return visitor.visitArgList(self)
else:
return visitor.visitChildren(self)
def argList(self):
localctx = SygusParser.ArgListContext(self, self._ctx, self.state)
self.enterRule(localctx, 52, self.RULE_argList)
try:
self.enterOuterAlt(localctx, 1)
self.state = 250
self.match(SygusParser.T__0)
self.state = 251
self.symbolSortPairStar()
self.state = 252
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SymbolSortPairStarContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbolSortPair(self):
return self.getTypedRuleContext(SygusParser.SymbolSortPairContext,0)
def symbolSortPairStar(self):
return self.getTypedRuleContext(SygusParser.SymbolSortPairStarContext,0)
def getRuleIndex(self):
return SygusParser.RULE_symbolSortPairStar
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSymbolSortPairStar" ):
listener.enterSymbolSortPairStar(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSymbolSortPairStar" ):
listener.exitSymbolSortPairStar(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSymbolSortPairStar" ):
return visitor.visitSymbolSortPairStar(self)
else:
return visitor.visitChildren(self)
def symbolSortPairStar(self):
localctx = SygusParser.SymbolSortPairStarContext(self, self._ctx, self.state)
self.enterRule(localctx, 54, self.RULE_symbolSortPairStar)
try:
self.state = 258
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SygusParser.T__0]:
self.enterOuterAlt(localctx, 1)
self.state = 254
self.symbolSortPair()
self.state = 255
self.symbolSortPairStar()
pass
elif token in [SygusParser.T__2]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SymbolSortPairContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def sortExpr(self):
return self.getTypedRuleContext(SygusParser.SortExprContext,0)
def getRuleIndex(self):
return SygusParser.RULE_symbolSortPair
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSymbolSortPair" ):
listener.enterSymbolSortPair(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSymbolSortPair" ):
listener.exitSymbolSortPair(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSymbolSortPair" ):
return visitor.visitSymbolSortPair(self)
else:
return visitor.visitChildren(self)
def symbolSortPair(self):
localctx = SygusParser.SymbolSortPairContext(self, self._ctx, self.state)
self.enterRule(localctx, 56, self.RULE_symbolSortPair)
try:
self.enterOuterAlt(localctx, 1)
self.state = 260
self.match(SygusParser.T__0)
self.state = 261
self.symbol()
self.state = 262
self.sortExpr()
self.state = 263
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TermContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def termStar(self):
return self.getTypedRuleContext(SygusParser.TermStarContext,0)
def literal(self):
return self.getTypedRuleContext(SygusParser.LiteralContext,0)
def letTerm(self):
return self.getTypedRuleContext(SygusParser.LetTermContext,0)
def getRuleIndex(self):
return SygusParser.RULE_term
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTerm" ):
listener.enterTerm(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, | |
<reponame>SwampDolphin97/Social_Networking_Website<gh_stars>0
from flask import Flask, flash, app, request, session, render_template, url_for,\
logging, redirect
from wtforms import Form, StringField, TextAreaField, PasswordField, validators
from passlib.hash import sha256_crypt
from functools import wraps
from datetime import datetime, timedelta
import pymysql.cursors
app = Flask(__name__)
#hoyin
# conn = pymysql.connect(host='localhost',
# user='root',
# password='<PASSWORD>',
# port=8889,
# db='social',
# charset='utf8mb4',
# cursorclass=pymysql.cursors.DictCursor)
# ashley
conn = pymysql.connect(host='localhost',
user='root',
password='<PASSWORD>',
#port=8889,
db='social',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
# hui
#conn = pymysql.connect(host='localhost',
# user='root',
# password='password',
# db='social',
# charset='utf8mb4',
# cursorclass=pymysql.cursors.DictCursor)
# timeout function
@app.before_request
def make_session_permanent():
session.permanent = True
app.permanent_session_lifetime = timedelta(minutes=30)
# if 'logged_in' not in session:
# flash('Timed out, please login again', 'danger')
# return redirect(url_for('login'))
@app.route('/')
def index():
# if not session.get('logged_in'):
# return render_template('login.html')
# else:
if 'logged_in' in session:
return redirect(url_for('dashboard'))
return render_template('home.html')
class RegisterForm(Form):
first_name = StringField('First Name', [validators.Length(min=1, max=50)])
last_name = StringField('Last Name', [validators.Length(min=1, max=50)])
username = StringField('Username', [validators.Length(min=4, max=50)])
password = PasswordField('Password', [
validators.DataRequired(),
validators.EqualTo('confirm', message='Passwords do not match'),
validators.Length(min=5, max=100)
])
confirm = PasswordField('Confirm Password')
@app.route('/register', methods=['GET', 'POST'])
def register():
form = RegisterForm(request.form)
if request.method == 'POST' and form.validate():
first_name = form.first_name.data
last_name = form.last_name.data
username = form.username.data
# Switch from sha to md5 b/c sha is too long
password = sha256_crypt.encrypt(str(form.password.data))
# Create cursor
cur = conn.cursor()
# Check if Username already exists
query = 'SELECT * FROM Person WHERE username = %s'
cur.execute(query, (username))
# Store query in variable
data = cur.fetchone()
# If user already exists
if (data):
# error = "This user already exists"
flash('This username is already taken')
cur.close()
return redirect(url_for('register'))
# return render_template('register.html', error=error)
else:
# Execute Query
cur.execute("INSERT INTO Person(first_name, last_name, username, password)\
VALUES(%s, %s, %s, %s)", (first_name, last_name, username, password))
# Commit to DB
conn.commit()
# Close the Connection
cur.close()
flash('You are now registered and can log in', 'success')
return redirect(url_for('login'))
return render_template('register.html', form=form)
@app.route('/login', methods=['POST','GET'])
def login():
if request.method == 'POST':
# Get form fields
username = request.form['username']
password_candidate = request.form['password']
# Creating cursor
cur = conn.cursor()
# Get users from database
query = cur.execute("SELECT * FROM Person WHERE username = %s", [username])
# If you get a result from query
if(query > 0):
# get stored hashed password
data = cur.fetchone()
password = data['password']
# Compare the passwords
if sha256_crypt.verify(password_candidate, password):
# Valid User
session['logged_in'] = True
session['username'] = username
flash("You are now logged in", "success")
return redirect(url_for("dashboard"))
else:
error = "Incorrect password"
return render_template("login.html", error=error)
# close connection
cur.close()
else:
error = "Username not found"
return render_template('login.html', error=error)
cur.close()
return render_template('login.html')
@app.route('/changePassword', methods=['POST','GET'])
def changePassword():
if 'logged_in' in session:
if request.method=='POST':
# Create cursor
cur = conn.cursor()
curPass = request.form["currentPass"]
password_cand = request.form["newPass"]
password_cand = sha256_crypt.encrypt(str(password_cand))
username = session['username']
# Get users from database
query = cur.execute('SELECT * FROM Person WHERE username = %s',\
[username])
data = cur.fetchone()
password = data['password']
#Compare passwords
if sha256_crypt.verify(curPass, password):
# authorized to change pass
cur.execute("UPDATE Person SET password=%s WHERE username=%s",\
(password_cand, username))
# Commit to DB
conn.commit()
# Close Connection
cur.close()
session.clear();
flash("Password changed successfully", "success")
return redirect(url_for("login"))
else:
error = "Incorrect password"
return render_template('changePassword.html', error=error)
return render_template('changePassword.html')
@app.route('/changeUsername', methods=['POST','GET'])
def changeUsername():
if 'logged_in' in session:
if request.method=='POST':
# Create cursor
cur = conn.cursor()
curPass = request.form["currPass"]
username_cand = request.form["newUsername"]
username = session['username']
# Get users from database
query = cur.execute('SELECT * FROM Person WHERE username = %s',\
[username])
data = cur.fetchone()
password = data['password']
#Compare passwords
if sha256_crypt.verify(curPass, password):
# authorized to change pass
cur.execute("UPDATE Person SET username=%s WHERE username=%s",\
(username_cand, username))
# Commit to DB
conn.commit()
# Close Connection
cur.close()
session.clear()
flash("Username changed successfully, please login again", "success")
return redirect(url_for("login"))
else:
error = "Incorrect password"
return render_template('changeUsername.html', error=error)
return render_template('changeUsername.html')
# Check for if user logged in
def is_logged_in(f):
@wraps(f)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return f(*args, **kwargs)
else:
flash('Not authorized, please login', 'danger')
return redirect(url_for('login'))
return wrap
# search function
# add timeout session for extra feature
# Logout function
@app.route("/logout")
def logout():
session.clear()
flash('You are now logged out', 'success')
return render_template('home.html')
# Leads to dashboard
@app.route('/dashboard')
@is_logged_in
def dashboard():
username = session['username']
cursor = conn.cursor()
query = 'SELECT timest, content_name, id FROM Content WHERE username = %s OR public = 1 ORDER BY id DESC'
cursor.execute(query, (username))
data = cursor.fetchall()
# pull comments from content, share and tag
query2 = 'SELECT Comment.timest, comment_text, Comment.id, Comment.username FROM Comment JOIN Content on Comment.id = Content.id WHERE public = 1 or Content.username = %s ORDER BY id DESC'
cursor.execute(query2, (username))
comments = cursor.fetchall()
query3 = 'SELECT Tag.timest, Tag.username_taggee, Tag.id, Person.first_name, Person.last_name FROM Tag NATURAL JOIN Person WHERE Tag.username_taggee = Person.username ORDER BY id DESC'
cursor.execute(query3,)
tags = cursor.fetchall()
# cursor.close()
#only the owner can share
query4 = 'SELECT group_name FROM FriendGroup WHERE username = %s'
cursor.execute(query4,(username))
groups = cursor.fetchall()
# cursor.close()
# cursor = conn.cursor()
tagged_query = 'SELECT DISTINCT Content.timest, content_name, Content.id FROM Content JOIN Tag ON Content.id = Tag.id WHERE (username_taggee = %s) AND (status = 1) ORDER BY timest DESC'
cursor.execute(tagged_query,(username))
data2 = cursor.fetchall()
tagged_comments = 'SELECT DISTINCT Comment.id, Comment.timest, comment_text, Comment.username\
FROM Comment JOIN Tag on Comment.id = Tag.id WHERE status = 1 and username_taggee = %s ORDER BY id DESC'
cursor.execute(tagged_comments,(username))
comments_tagged = cursor.fetchall()
# shared_posts, comments, tags
shared_query = "SELECT DISTINCT timest, Share.id, content_name \
from Share join Content on Share.id = Content.id join Member on Share.group_name = Member.group_name \
where Member.username = %s order by timest desc"
cursor.execute(shared_query,(username))
data3 = cursor.fetchall()
shared_comments = 'SELECT DISTINCT Comment.id, Comment.timest, comment_text, Comment.username \
FROM Comment JOIN Share on Comment.id = Share.id JOIN Member on Share.group_name = Member.group_name \
where Member.username = %s order by timest desc'
cursor.execute(shared_comments,(username))
comments_shared = cursor.fetchall()
cursor.close()
return render_template('dashboard.html', username=username, posts=data,\
comments=comments, tags = tags, taggedposts = data2, groups=groups,\
shared_posts = data3, comments_tagged = comments_tagged, \
comments_shared = comments_shared)
#user can see all the posts that they made
@app.route('/addfriends', methods=['GET','POST'])
def add_friends():
username = request.form['username']
first_name = session["addfriend_first_name"]
last_name = session["addfriend_last_name"]
group_name = session["addfriend_group_name"]
username_creator = session["addfriend_creator"]
cur = conn.cursor()
query2 = "SELECT * FROM Member WHERE username = %s && group_name = %s"
cur.execute(query2, (username, group_name))
data = cur.fetchone()
if (data):
flash('This friend is already in your friend group!', "danger")
conn.commit()
cur.close()
return render_template('addfriend.html')
else:
query1 = "INSERT INTO Member(username, group_name, username_creator) VALUES(%s, %s, %s)"
cur.execute(query1, (username, group_name, username_creator))
conn.commit()
cur.close()
flash('Your friend has been added to your friend group!', "success")
return render_template('addfriend.html')
@app.route('/addfriend', methods=['GET','POST'])
def add_friend():
first_name = request.form['first_name']
last_name = request.form['last_name']
group_name = request.form['group_name']
username_creator = request.form['username_creator']
cur = conn.cursor()
query1 = "SELECT COUNT(*) FROM Person WHERE first_name = %s && last_name = %s"
cur.execute(query1, (first_name, last_name))
num = cur.fetchone()
if (num["COUNT(*)"]== 0):
flash("This person does not exist! Tell them to create an account!", "danger")
return render_template('addfriend.html')
elif (num["COUNT(*)"] > 1):
session["addfriend_first_name"] = first_name
session["addfriend_last_name"] = last_name
session["addfriend_group_name"] = group_name
session["addfriend_creator"] = username_creator
return render_template('altaddfriend.html')
else:
query2 = "SELECT username FROM Person WHERE first_name=%s && last_name=%s"
cur.execute(query2, (first_name, last_name))
username2 = cur.fetchone()
query3 = "SELECT COUNT(*) FROM Member WHERE username = %s && group_name = %s"
cur.execute(query3, (username2["username"], group_name))
data = cur.fetchone()
if (data["COUNT(*)"]!=0):
flash('This friend is already in your friend group!', "danger")
conn.commit()
cur.close()
return render_template('addfriend.html')
else:
query1 = "INSERT INTO Member(username, group_name, username_creator) VALUES(%s, %s, %s)"
cur.execute(query1, (username2["username"], group_name, username_creator))
conn.commit()
cur.close()
flash('Your friend has been added to your friend group!', "success")
return render_template('addfriend.html')
@app.route('/deletefriend', methods=['GET','POST'])
def delete_friend():
if (data):
query2 = "DELETE FROM Member WHERE username = %s && group_name = %s && username_creator = %s"
cur.execute(query2, (username, group_name, username_creator))
conn.commit()
cur.close()
flash('Your friend has been removed from your friend group!', "success")
return render_template('addfriend.html')
else:
flash('This friend does not exist in your friend group!', "danger")
return render_template('addfriend.html')
@app.route('/managefriend', methods=['GET','POST'])
def manage_friend():
return render_template('addfriend.html')
@app.route('/post', methods=['GET', 'POST'])
def post():
if 'logged_in' in session:
#username
username = session['username']
cursor = conn.cursor()
content_name = request.form['content_name']
p_status = False if request.form.get('p_status') else True
if p_status == True:
query = 'INSERT INTO Content (content_name, username, public) VALUES(%s, %s, %s)'
cursor.execute(query, (content_name, username, p_status))
conn.commit()
cursor.close()
flash('You have successfully posted!', 'success')
return redirect(url_for('dashboard'))
elif p_status == False:
query | |
"清新", "fresh", 1939],
["Mohu", "模胡", "fuzzy", 1935],
["Konghuang", "恐慌", "scared", 1919],
["Kaixin", "开心", "happy", 1917],
["Gaowei", "高位", "high level", 1910],
["Koutou", "口头", "oral", 1902],
["Kaikuo", "开阔", "open", 1889],
["Hemu", "和睦", "harmonious", 1888],
["Youyong", "有用", "useful", 1887],
["Yanming", "严明", "strict", 1887],
["Jixiang", "吉祥", "propitious", 1876],
["Dasheng", "大声", "loud", 1874],
["Jianlou", "简陋", "crude", 1872],
["Feijiu", "废旧", "worn out", 1868],
["Jingye", "敬业", "professional", 1865],
["Canzhong", "惨重", "serious", 1863],
["Yanre", "炎热", "burning hot", 1856],
["Yinan", "疑难", "difficult", 1855],
["Weiji", "危急", "critical", 1850],
["Duoyun", "多云", "cloudy", 1849],
["Zhengjie", "整洁", "neat", 1848],
["Minglang", "明朗", "bright", 1847],
["Huopo", "活泼", "lively", 1841],
["Songxie", "松懈", "lax", 1841],
["Ewai", "额外", "extra", 1840],
["Baoman", "爆满", "full", 1828],
["Anxin", "安心", "relieved", 1817],
["Shiheng", "失衡", "unbalanced", 1816],
["Chaoqian", "超前", "advanced", 1805],
["Wenxin", "温馨", "warm", 1802],
["Youmo", "幽默", "humorous", 1802],
["Tongfeng", "通风", "well ventilated", 1782],
["Liwai", "例外", "exceptional", 1772],
["Qiaomiao", "巧妙", "ingenious", 1770],
["Yanzheng", "严正", "grave", 1766],
["Jidu", "极度", "extreme", 1754],
["Qite", "奇特", "unusual", 1742],
["Maoxian", "冒险", "risky", 1735],
["Gaoya", "高雅", "lofty", 1729],
["Xiongmeng", "凶猛", "fierce", 1709],
["Qingwei", "轻微", "slight", 1707],
["Zaogao", "糟糕", "bad", 1707],
["Changjiu", "长久", "long-time", 1703],
["Yanjin", "严谨", "rigorous", 1700],
["Wuqi", "无期", "indefinite", 1699],
["Kuankuo", "宽阔", "broad", 1693],
["Nanmian", "难免", "unavoidable", 1691],
["Jianmei", "健美", "healthy", 1689],
["Cubao", "粗暴", "crude", 1684],
["Jingya", "惊讶", "surprised", 1679],
["Ganzao", "干燥", "dry", 1676],
["Jiyue", "集约", "intensive", 1665],
["Miren", "迷人", "charming", 1664],
["Guaiguai", "怪怪", "strange", 1664],
["Youqian", "有钱", "rich", 1657],
["Xinxi", "欣喜", "joyful", 1654],
["Tashi", "踏实", "steadfast", 1647],
["Toudeng", "头等", "prime", 1637],
["Bianzheng", "辩证", "dialectical", 1635],
["Tanlv", "坦率", "frank", 1635],
["Jianyao", "简要", "brief", 1632],
["Shichang", "失常", "abnormal", 1625],
["Xiazhai", "狭窄", "narrow", 1609],
["Zouxiao", "奏效", "effective", 1608],
["Chentong", "沉痛", "painful", 1605],
["Shentong", "沈痛", "painful", 1605],
["Anran", "安然", "safe", 1600],
["Shangcan", "伤残", "disabled", 1595],
["Liaokuo", "辽阔", "vast", 1588],
["Bobo", "勃勃", "vigorous", 1581],
["Xuyi", "蓄意", "premeditated", 1579],
["Taiping", "太平", "peaceful", 1578],
["Suiji", "随机", "stochastic", 1576],
["Xunchang", "寻常", "common", 1573],
["Lingxing", "零星", "fragmentary", 1570],
["Baohe", "饱和", "saturated", 1569],
["Didao", "地道", "typical", 1564],
["Shiren", "时任", "then", 1561],
["Qima", "起码", "minimum", 1555],
["Zhongsheng", "终生", "life-long", 1552],
["Mingzhi", "明智", "wise", 1552],
["Chengshi", "诚实", "honest", 1551],
["Zhizhu", "执著", "rigid", 1546],
["Zhizhe", "执着", "rigid", 1546],
["Fanduo", "繁多", "many", 1538],
["Kongju", "恐惧", "frightened", 1537],
["Xini", "细腻", "exquisite", 1536],
["Zhenhan", "震撼", "shocking", 1531],
["Jinggan", "精干", "capable", 1525],
["Langman", "浪漫", "romantic", 1521],
["Wuqing", "无情", "brutal", 1521],
["Zhuozhu", "卓著", "outstanding", 1520],
["Zhuozhe", "卓着", "outstanding", 1520],
["Kuanchang", "宽敞", "spacious", 1519],
["Zhuangli", "壮丽", "grand", 1516],
["Feifan", "非凡", "extraordinary", 1510],
["Guoliang", "过量", "excessive", 1509],
["Reqie", "热切", "earnest", 1488],
["Liuchang", "流畅", "smooth", 1485],
["Heian", "黑暗", "dark", 1479],
["Tezhi", "特制", "specially-made", 1479],
["Canyu", "残馀", "remaining", 1474],
["Mianqiang", "勉强", "reluctant", 1467],
["Jiejing", "洁净", "pure", 1462],
["Pinming", "拼命", "desperately", 1459],
["Wangu", "顽固", "stubborn", 1444],
["Minggui", "名贵", "precious", 1432],
["Yinqie", "殷切", "earnest", 1431],
["Qiangsheng", "强盛", "powerful", 1430],
["Beiguan", "悲观", "pessimistic", 1427],
["Yinxing", "隐形", "invisible", 1425],
["Ningjing", "宁静", "tranquil", 1423],
["Jishou", "棘手", "thorny", 1420],
["Lengqing", "冷清", "lonely", 1380],
["Yongheng", "永恒", "eternal", 1377],
["Xiyou", "稀有", "rare", 1376],
["Kegui", "可贵", "valuable", 1376],
["Mabi", "麻痹", "lacking in vigilance", 1373],
["Shanyi", "善意", "well-meaning", 1373],
["Xianshu", "娴熟", "adept", 1372],
["Shenghang", "盛行", "popular", 1371],
["Jianbian", "简便", "simple", 1370],
["Yaoyan", "耀眼", "dazzling", 1366],
["Xixin", "细心", "careful", 1365],
["Pianmian", "片面", "one-sided", 1362],
["Bingfa", "并发", "concurrent", 1351],
["Pingzheng", "平整", "smooth", 1330],
["Mingliang", "明亮", "bright", 1318],
["Qiangshi", "强势", "strong", 1317],
["Renqi", "人气", "popular", 1317],
["Suibian", "随便", "casual", 1315],
["Xianhuo", "鲜活", "bright", 1313],
["Yougong", "有功", "active", 1308],
["Zhunshi", "准时", "punctual", 1308],
["Meiguan", "美观", "artistic", 1307],
["Chongpei", "充沛", "abundant", 1303],
["Lihai", "厉害", "fierce", 1299],
["Shouruan", "手软", "lenient", 1289],
["Duobian", "多变", "changeable", 1288],
["Zhuguan", "主观", "subjective", 1288],
["Mianyan", "绵延", "continuous", 1287],
["Xinglong", "兴隆", "prosperous", 1287],
["Wuzhu", "无助", "no use", 1284],
["Pingping", "平平", "average", 1284],
["Lizhi", "理智", "sane", 1283],
["Xiongwei", "雄伟", "grand", 1282],
["Chiming", "驰名", "famous", 1281],
["Jimi", "机密", "secret", 1281],
["Wuqiong", "无穷", "infinite", 1280],
["Weixiao", "微小", "small", 1278],
["Linchang", "临场", "on site", 1278],
["Gupu", "古朴", "plain", 1269],
["Zhengda", "正大", "honorable", 1268],
["Shuailao", "衰老", "senile", 1266],
["Daotui", "倒退", "back up", 1260],
["Choue", "丑恶", "ugly", 1260],
["Xiumei", "秀美", "elegant", 1257],
["Xiee", "邪恶", "evil", 1255],
["Chijing", "吃惊", "surprised", 1254],
["Pingtan", "平坦", "smooth", 1246],
["Chenji", "沉寂", "quiet", 1245],
["Shenji", "沈寂", "quiet", 1245],
["Yinbi", "隐蔽", "covert", 1239],
["Xishao", "稀少", "scarce", 1236],
["Qiguai", "奇怪", "strange", 1235],
["Bisai", "闭塞", "unenlightened", 1234],
["Eyi", "恶意", "malicious", 1232],
["Shunchang", "顺畅", "smooth", 1230],
["Jihao", "极好", "excellent", 1226],
["Jingming", "精明", "astute", 1220],
["Jingxian", "惊险", "thrilling", 1197],
["Dandiao", "单调", "monotonous", 1194],
["Pusu", "朴素", "simple", 1193],
["Heying", "合营", "joint", 1191],
["Duanzheng", "端正", "straight", 1189],
["Chenmo", "沉默", "silent", 1188],
["Shenmo", "沈默", "silent", 1188],
["Lengluo", "冷落", "desolate", 1185],
["Jixing", "畸形", "abnormal", 1183],
["Anjing", "安静", "peaceful", 1181],
["Pushi", "朴实", "simple", 1180],
["Dapai", "大牌", "famous", 1175],
["Pojiu", "破旧", "worn-out", 1172],
["Youren", "诱人", "attractive", 1170],
["Chuncui", "纯粹", "pure", 1167],
["Congrong", "从容", "calm", 1167],
["Chaoe", "超额", "extra", 1165],
["Chongyu", "充裕", "abundant", 1161],
["Guangda", "光大", "brilliant", 1160],
["Haokan", "好看", "attractive", 1158],
["Limao", "礼貌", "polite", 1156],
["Dianya", "典雅", "elegant", 1156],
["Kunhuo", "困惑", "puzzled", 1155],
["Xuanli", "绚丽", "gorgeous", 1139],
["Kuanrong", "宽容", "tolerant", 1139],
["Youyang", "悠扬", "melodious", 1137],
["Jingzhi", "精致", "fine", 1136],
["Huihong", "恢弘", "broad", 1135],
["Laolei", "劳累", "tired", 1132],
["Xiaotiao", "萧条", "bleak", 1130],
["Zhixiao", "滞销", "unsalable", 1130],
["Ziruo", "自若", "free", 1125],
["Ziru", "自如", "free", 1125],
["Jiebai", "洁白", "pure white", 1123],
["Kanke", "坎坷", "rough", 1122],
["Gudu", "孤独", "lonely", 1119],
["Chaoda", "超大", "ultra-large", 1118],
["Xionghen", "凶狠", "cut-throat", 1110],
["Lisan", "离散", "separate", 1109],
["Jiaoren", "骄人", "outstanding", 1106],
["Ganhe", "干涸", "dry", 1105],
["Fenming", "分明", "distinct", 1105],
["Rongqia", "融洽", "harmonious", 1098],
["Liangli", "亮丽", "sharp", 1098],
["Pingdan", "平淡", "light", 1096],
["Guiju", "规矩", "well-mannered", 1095],
["Pibei", "疲惫", "exhausted", 1095],
["Xiangliang", "响亮", "resounding", 1095],
["Tongsu", "通俗", "popular", 1094],
["Gongdao", "公道", "just", 1094],
["Xiongzhuang", "雄壮", "majestic", 1090],
["Shanliang", "善良", "kind", 1089],
["Shufu", "舒服", "comfortable", 1088],
["Biezhi", "别致", "unique", 1088],
["Xiaosa", "潇洒", "natural", 1087],
["Yaoxing", "侥幸", "lucky", 1086],
["Minrui", "敏锐", "keen", 1084],
["Nanguo", "难过", "sad", 1083],
["Haoqi", "好奇", "curious", 1082],
["Meimiao", "美妙", "wonderful", 1077],
["Duoyu", "多馀", "unnecessary", 1077],
["Yousheng", "优胜", "superior", 1072],
["Liuli", "流利", "fluent", 1069],
["Fuxiu", "腐朽", "decayed", 1067],
["Xiangshi", "翔实", "full and accurate", 1062],
["Shenmei", "审美", "aesthetic", 1059],
["Chaoshi", "潮湿", "moist", 1058],
["Beican", "悲惨", "pitiful", 1047],
["Cuican", "璀璨", "radiant", 1039],
["Dedang", "得当", "appropriate", 1037],
["Dafang", "大方", "natural", 1035],
["Huore", "火热", "fiery", 1034],
["Jiangu", "坚固", "firm", 1034],
["Kongdong", "空洞", "empty", 1026],
["Yongxin", "用心", "attentive", 1026],
["Danbao", "淡薄", "light", 1023],
["Zhiguan", "直观", "direct-viewing", 1020],
["Kewai", "课外", "extracurricular", 1015],
["Gaoduan", "高端", "high-end", 1013],
["Dahong", "大红", "scarlet", 1013],
["Shechi", "奢侈", "luxurious", 1012],
["Bibei", "必备", "necessary", 1009],
["Shirun", "湿润", "moist", 1008],
["Jiaolv", "焦虑", "anxious", 1004],
["Pinghe", "平和", "gentle", 1002],
["Xinkai", "新开", "newly-opened", 998],
["Jimo", "寂默", "lonely", 997],
["Miaomang", "渺茫", "uncertain", 995],
["Shigan", "实干", "practical", 989],
["Songdong", "松动", "loose", 986],
["Qifen", "气愤", "indignant", 982],
["Chengken", "诚恳", "sincere", 977],
["Chaochang", "超常", "superior", 975],
["Huanteng", "欢腾", "jubilant", 970],
["Jimang", "急忙", "hurried", 970],
["Jianting", "坚挺", "strong", 967],
["Keshi", "可视", "visible", 967],
["Kangkai", "慷慨", "generous", 959],
["Binghang", "并行", "parallel", 958],
["Shangxin", "伤心", "sad", 955],
["Yiren", "宜人", "pleasant", 950],
["Kuanguang", "宽广", "broad", 948],
["Xiongyong", "汹涌", "turbulent", 946],
["Gaoqiang", "高强", "excel", 944],
["Xiangcheng", "相称", "symmetric", 944],
["Quezao", "确凿", "conclusive", 943],
["Fengqu", "风趣", "charming", 941],
["Hongwai", "红外", "infrared", 940],
["Yuanda", "远大", "broad", 937],
["Kuada", "夸大", "exaggerating", 934],
["Xunse", "逊色", "inferior", 933],
["Congmang", "匆忙", "hurried", 930],
["Bizhen", "逼真", "lifelike", 923],
["Meiwei", "美味", "delicious", 921],
["Songsan", "松散", "loose", 921],
["Xiaxiao", "狭小", "narrow and small", 919],
["Tongxin", "痛心", "distressed", 914],
["Xirang", "熙攘", "bustling", 909],
["Chuming", "出名", "famous", 909],
["Nenggan", "能干", "competent", 905],
["Jiaoji", "焦急", "anxious", 905],
["Zhuangzhong", "庄重", "grave", 904],
["Shangjia", "上佳", "good", 904],
["Dae", "大额", "large", 903],
["Chuzhong", "出众", "outstanding", 902],
["Guizhong", "贵重", "precious", 898],
["Mingxi", "明晰", "defined", 889],
["Xuexing", "血腥", "bloody", 886],
["Didiao", "低调", "low-key", 886],
["Jingjiao", "警觉", "vigilant", 884],
["Weizhong", "危重", "seriously injured", 884],
["Chunjing", "纯净", "pure", 883],
["Duideng", "对等", "coordinated", 881],
["Zhenzhi", "真挚", "sincere", 875],
["Lunfan", "轮番", "one after another", 868],
["Jiejian", "节俭", "thrifty", 860],
["Chenzhe", "沉着", "calm", 859],
["Chuiwei", "垂危", "critically-ill", 857],
["Quanzi", "全资", "wholly-owned", 857],
["Dangji", "党际", "inter-party", 856],
["Cucao", "粗糙", "rough", 855],
["Kelian", "可怜", "pitiful", 849],
["Huise", "灰色", "pessimistic", 847],
["Deyi", "得意", "self-satisfied", 846],
["Shenyong", "神勇", "supernaturally brave", 842],
["Huamei", "华美", "gorgeous", | |
consumable supplies & those which will get turned into Assets at their destination.': 'Itens de invenrário incluem ambos suprimentos consumíveis & aqueles que se transformarão em Ativos no seu destino.',
'Inventory Management': 'Gerenciamento de Inventário',
'Inventory Stock Position': 'Inventory Stock Position',
'Inventory functionality is available for:': 'Inventário de funcionalidades esta disponível para:',
'Inventory of Effects': 'Inventário de Efeitos',
'Is editing level L%d locations allowed?': 'É permitido editar o nível dos locais L%d?',
'Is it safe to collect water?': 'É seguro coletar água?',
'Is this a strict hierarchy?': 'Esta é uma hierarquia rigorosa?',
'Issuing Authority': 'Autoridade emissora',
'It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': 'Ele captura não apenas os locais onde elas estão ativas, mas também captura informações sobre o conjunto de projetos que está fornecendo em cada região.',
'Italian': 'Italian',
'Item': 'Item',
'Item Added to Shipment': 'Item Incluído para Embarque',
'Item Catalog Details': 'Detalhes do item do catálogo',
'Item Categories': 'Categorias do Item',
'Item Category': 'Categoria do Item',
'Item Category Details': 'Detalhes da categoria de item',
'Item Category added': 'Categoria de item incluída',
'Item Category deleted': 'Categoria de item excluída',
'Item Category updated': 'Atualização da categoria de item',
'Item Details': 'Detalhes do item',
'Item Pack Details': 'Detalhes do pacote de itens',
'Item Pack added': 'Pacote de itens',
'Item Pack deleted': 'Pacote de itens excluído',
'Item Pack updated': 'Itens de Pacote atualizados',
'Item Packs': 'Item de Pacotes',
'Item added': 'Item incluído',
'Item added to Inventory': 'Itens adicionados ao Inventário',
'Item added to shipment': 'Item incluído para embarque',
'Item already in Bundle!': 'Item já no pacote configurável!',
'Item already in Kit!': 'Item já no Kit!',
'Item already in budget!': 'Item já no Orçamento!',
'Item deleted': 'Item Excluído',
'Item removed from Inventory': 'Item removido do Inventário',
'Item updated': 'Item atualizado',
'Items': 'Itens',
'Items in Category can be Assets': 'itens na categoria podem ser ativos',
'Japanese': 'japonês',
'Jerry can': 'Jerry pode',
'Jew': 'Judeu',
'Job Market': 'Mercado de trabalho',
'Job Role': 'Função de trabalho',
'Job Role Catalog': 'Catalogo de Funçao de trabalho',
'Job Role Details': 'Detalhes da Função',
'Job Role added': 'funçao de trabalho inclusa',
'Job Role deleted': 'Funçao de trabalho excluida',
'Job Role updated': 'Função actualizada',
'Job Roles': 'Funções',
'Job Title': 'Título do Cargo',
'Jobs': 'Tarefas',
'Journal': 'Diário',
'Journal Entry Details': 'Detalhes da Entrada de Diário',
'Journal entry added': 'Entrada de diário incluída',
'Journal entry deleted': 'Entrada de diário removida',
'Journal entry updated': 'Entrada de diário atualizado',
'Key': 'Tecla',
'Key Details': 'Detalhes da Chave',
'Key added': 'Chave adicionada',
'Key deleted': 'Chave removida',
'Key updated': 'Chave actualizada',
'Keys': 'Teclas',
'Kit': 'kit',
'Kit Contents': 'Conteúdo Kit',
'Kit Details': 'Detalhes do Kit',
'Kit Updated': 'Kit de Atualização',
'Kit added': 'Pacote adicionado',
'Kit deleted': 'Kit excluído',
'Kit updated': 'Kit de atualização',
'Kits': 'Kits',
'Known Identities': 'Identidades conhecido',
'Known incidents of violence against women/girls': 'Incidentes de violência conhecidos contra mulheres/garotas',
'Known incidents of violence since disaster': 'Incidentes de violência conhecidos desde o desastre',
'Korean': 'Korean',
'LICENSE': 'LICENÇA',
'Lack of material': 'Falta de material',
'Lack of school uniform': 'Falta de uniforme escolar',
'Lack of supplies at school': 'Falta de suprimentos na escola',
'Lack of transport to school': 'Falta de transporte escolar',
'Lactating women': 'Mulheres lactantes',
'Lahar': 'Lahar',
'Landslide': 'Deslizamento',
'Language': 'Linguagem',
'Last Name': 'sobrenome',
'Last known location': 'Último local conhecido',
'Last name': 'Last name',
'Last synchronization time': 'Horário da última sincronização',
'Last updated': 'Última atualização',
'Last updated ': 'Last updated ',
'Last updated by': 'Última atualização por',
'Last updated on': 'Última Atualização em',
'Latitude': 'Latitude',
'Latitude & Longitude': 'Latitude & Longitude',
'Latitude is North-South (Up-Down).': 'Latitude é sentido norte-sul (emcima-embaixo).',
'Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': 'Latitude é zero na linha do Equador, positiva no hemisfério norte e negativa no hemisfério sul.',
'Latitude of Map Center': 'Latitude DO MAPA Centro',
'Latitude of far northern end of the region of interest.': 'Latitude do extremo Norte longe do Região de interesse.',
'Latitude of far southern end of the region of interest.': 'Latitude da extremidade sul longe do Região de interesse.',
'Latitude should be between': 'Latitude deve estar entre',
'Latrines': 'Privadas',
'Law enforcement, military, homeland and local/private security': 'Execução da lei militar, interna e segurança local/privada',
'Layer': 'Camada',
'Layer Details': 'Detalhes de Camada',
'Layer ID': 'Layer ID',
'Layer Name': 'Layer Name',
'Layer Type': 'Layer Type',
'Layer added': 'Camada incluída',
'Layer deleted': 'Camada excluída',
'Layer has been Disabled': 'Layer has been Disabled',
'Layer has been Enabled': 'Layer has been Enabled',
'Layer updated': 'Camada atualizada',
'Layers': 'Camadas',
'Layers updated': 'Camadas atualizadas',
'Layout': 'Modelo',
'Leader': 'guia',
'Leave blank to request an unskilled person': 'Leave blank to request an unskilled person',
'Legend Format': 'Formato da Legenda',
'Length (m)': 'Comprimento (m)',
'Level': 'Nível',
'Level 1': 'Nível 1',
'Level 1 Assessment Details': 'Detalhes da Avaliação Nível 1',
'Level 1 Assessment added': 'Avaliação Nível 1 incluído',
'Level 1 Assessment deleted': 'Avaliação Nível 1 excluído',
'Level 1 Assessment updated': 'Avaliação Nível 1 atualizada',
'Level 1 Assessments': 'Avaliações Nível 1',
'Level 2': 'nível 2',
'Level 2 Assessment Details': 'Nível 2 de avaliação Detalhado',
'Level 2 Assessment added': 'Nível 2 avaliação incluído',
'Level 2 Assessment deleted': 'Nível 2 de avaliação excluído',
'Level 2 Assessment updated': 'Nível 2 de avaliação atualizada',
'Level 2 Assessments': 'Nível 2 de Avaliações',
'Level 2 or detailed engineering evaluation recommended': 'Nível 2 ou engenharia detalhada de avaliação recomendado',
"Level is higher than parent's": 'Nível superior ao dos pais',
'Library support not available for OpenID': 'Apoio de biblioteca não está disponível para OpenID',
'License Number': 'License Number',
'License Plate': 'License Plate',
'LineString': 'cadeia-de-linhas',
'List': 'Listar',
'List / Add Baseline Types': 'Lista / Incluir Linha de Tipos',
'List / Add Impact Types': 'Lista / Incluir Tipos de Impacto',
'List / Add Services': 'Lista / Incluir Serviços',
'List / Add Types': 'Lista / Incluir Tipos',
'List Activities': 'listar atividades',
'List All': 'Mostrar Tudo',
'List All Assets': 'Lista todos os ativos',
'List All Catalog Items': 'Lista todos os Itens Do Catálogo',
'List All Commitments': 'Lista todos os compromissos',
'List All Entries': 'Listar todas as entradas',
'List All Item Categories': 'Lista todos os itens Categorias',
'List All Memberships': 'Listar Todas As Associações',
'List All Received Shipments': 'Lista todas as transferências Recebidas',
'List All Records': 'Lista todos os registros',
'List All Reports': 'Listar todos os Relatórios',
'List All Requested Items': 'Lista Todos Os itens solicitados',
'List All Requested Skills': 'List All Requested Skills',
'List All Requests': 'Lista Todos Os Pedidos',
'List All Sent Shipments': 'Listar todos os embarques enviados',
'List All Vehicles': 'List All Vehicles',
'List Alternative Items': 'Listar Itens Alternativos',
'List Assessment Summaries': 'Listar Resumos das Avaliações',
'List Assessments': 'Listar as Avaliações',
'List Asset Assignments': 'Listar Atribuições de Ativos',
'List Assets': 'Listar Ativos',
'List Availability': 'Listar Disponibilidade',
'List Baseline Types': 'Lista de Tipos De Linha',
'List Baselines': 'Lista de Linhas',
'List Brands': 'Lista de Marcas',
'List Budgets': 'Listar Orçamentos',
'List Bundles': 'Listar Pacotes',
'List Camp Services': 'Listar Serviços de Acampamento',
'List Camp Types': 'Listar Tipos de Acampamentos',
'List Camps': 'Listar Acampamentos',
'List Catalog Items': 'Lista de Itens Do Catálogo',
'List Catalogs': 'Listar catálogos',
'List Certificates': 'Listar certificados',
'List Certifications': 'Listar certificações',
'List Checklists': 'Lista Listas de Verificação.',
'List Cluster Subsectors': 'Lista Subsetores de Cluster',
'List Clusters': 'Lista Clusters',
'List Commitment Items': 'Lista Itens de Compromisso',
'List Commitments': 'Lista Compromissos',
'List Committed People': 'List Committed People',
'List Competencies': 'Listar competencias',
'List Competency Ratings': 'Listar classificações de competencias',
'List Conflicts': 'Lista Conflitos',
'List Contact Information': 'Listar informações do contato',
'List Contacts': 'Listar contatos',
'List Course Certificates': 'Listar certificados de cursos',
'List Courses': 'Listar Cursos',
'List Credentials': 'Listar credenciais',
'List Current': 'Lista Atual',
'List Documents': 'Listar documentos',
'List Donors': 'Listar doadores',
'List Events': 'Lista de Eventos',
'List Facilities': 'Lista de Facilidades',
'List Feature Classes': 'Listar Classes De Recursos',
'List Feature Layers': 'LISTAr Camadas DE RECURSOS',
'List Flood Reports': 'Listar Relatórios de Inundações',
'List GPS data': 'List GPS data',
'List Groups': 'Listar grupos',
'List Groups/View Members': 'Listar Grupos/visualizar membros',
'List Homes': 'List Homes',
'List Hospitals': 'Listar de Hospitais',
'List Human Resources': 'Lista de Recursos Humanos',
'List Identities': 'Lista de Identidades',
'List Images': 'Lista de Imagens',
'List Impact Assessments': 'Lista de Avaliações De Impacto',
'List Impact Types': 'Lista de Tipos De Impacto',
'List Impacts': 'Lista de impactos',
'List Import Files': 'List Import Files',
'List Incident Reports': 'Lista de relatórios de incidentes',
'List Inventory Items': 'Listar ítens de inventário',
'List Item Categories': 'Listar categorias de ítens',
'List Item Packs': 'Lista pacotes de itens',
'List Items': 'Listar itens',
'List Items in Inventory': 'Lista de Itens no inventário',
'List Job Roles': 'Listar cargos',
'List Keys': 'Listar Chaves',
'List Kits': 'LISTAR Kits',
'List Layers': 'Listar Camadas',
'List Level 1 Assessments': 'Listar avaliações nível 1',
'List Level 1 assessments': 'Listar avaliação nível 1',
'List Level 2 Assessments': 'Listar avaliações nível 2',
'List Level 2 assessments': 'Listar avaliações nível 2',
'List Locations': 'Listar Localizações',
'List Log Entries': 'Listar as entradas de log',
'List Map Configurations': 'Listar configurações de mapa',
'List Markers': 'Listar marcadores',
'List Members': 'Lista de membros',
'List Memberships': 'Lista de associados',
'List Messages': 'Listar Mensagens',
'List Missing Persons': 'Lista de pessoas desaparecidas',
'List Missions': 'Listar Missões',
'List Need Types': 'Listar tipos de necessidades',
'List Needs': 'Lista | |
<filename>filmatyk/filmweb.py
from datetime import date
import binascii
import html
import json
import pickle
from bs4 import BeautifulSoup as BS
import requests_html
import containers
ConnectionError = requests_html.requests.ConnectionError
class UnauthenticatedError(ConnectionError):
"""Raised by API functions if they detect the active session was refused."""
class Constants():
"""URLs and HTML component names for data acquisition.
Create an instance for a given user to generate their specific URLs.
"""
login_path = 'https://www.filmweb.pl/j_login'
base_path = 'https://www.filmweb.pl'
main_class = 'userVotesPage__results'
item_class = 'userVotesPage__result'
rating_source = 'userVotes'
rating_stype = 'application/json'
no_access_class = 'noResultsPlaceholder'
movie_count_span = 'blockHeader__titleInfoCount'
series_count_span = 'blockHeader__titleInfoCount'
game_count_span = 'blockHeader__titleInfoCount'
def __init__(self, username):
self.username = username
self.userpage = self.getUserPage()
def getUserPage(self):
return self.base_path + '/user/' + self.username
def getUserMoviePage(self, page=1):
return self.userpage + '/films?page={}'.format(page)
def getUserSeriesPage(self, page=1):
return self.userpage + '/serials?page={}'.format(page)
def getUserGamePage(self, page=1):
return self.userpage + '/games?page={}'.format(page)
class FilmwebAPI():
"""HTML-based API for acquiring data from Filmweb."""
@staticmethod
def login(username, password):
"""Attempt to acquire an authenticated user session."""
session = requests_html.HTMLSession()
auth_package = {
'j_username': username,
'j_password': password,
'_login_redirect_url': '',
'_prm': True,
}
# Catch connection errors
try:
log = session.post(Constants.login_path, data=auth_package)
except ConnectionError:
return (False, True)
# Catch authentication errors
if len(session.cookies) == 0:
print('BŁĄD LOGOWANIA')
return (False, False)
else:
return (True, session)
def enforceSession(fun):
"""Decorator to mark API functions that require an authenticated session.
This safeguards the calls to ensure they do not fail due to a lack of
authentication with Filmweb. To achieve this goal, two checks are made:
* before calling the decorated function, a check whether a live HTMLSession
exists is made; if not, a login is requested,
* the call itself is guarded against UnauthenticatedError, also resulting
in a request for login and re-calling of the function.
Additionally, session cookies are watched for changes, in order to set the
isDirty flag in case that happens.
Because it assumes that the first argument of the wrapped function is
a bound FilmwebAPI instance ("self"), it shall only be used with FilmwebAPI
methods.
Because it is meant to be used as a class-level function decorator, it has
no real "self" argument. It is effectively something like a static method.
See the following links for more info:
https://stackoverflow.com/q/21382801/6919631
https://stackoverflow.com/q/11058686/6919631
The bottom line is that it should NEVER be called directly.
"""
def wrapper(*args, **kwargs):
# Extract the bound FilmwebAPI instance
self = args[0]
# First check: for presence of a live session
if not self.checkSession():
return None
old_cookies = set(self.session.cookies.values())
# Second check: whether the call failed due to lack of authentication
try:
result = fun(*args, **kwargs)
except UnauthenticatedError:
# Request login and call again
print('Session was stale! Requesting login...')
self.requestSession()
if not self.session:
return None
result = fun(*args, **kwargs)
# Session change detection
new_cookies = set(self.session.cookies.values())
if old_cookies != new_cookies:
self.isDirty = True
# Finally the produced data is returned
return result
return wrapper
def __init__(self, login_handler, username:str=''):
self.username = username
self.constants = Constants(username)
self.login_handler = login_handler
self.session = None
self.isDirty = False
self.parsingRules = {}
for container in containers.classByString.keys():
self.__cacheParsingRules(container)
# bind specific methods and constants to their item types
self.urlGenerationMethods = {
'Movie': self.constants.getUserMoviePage,
'Series': self.constants.getUserSeriesPage,
'Game': self.constants.getUserGamePage
}
self.countSpanClasses = {
'Movie': self.constants.movie_count_span,
'Series': self.constants.series_count_span,
'Game': self.constants.game_count_span
}
def __cacheParsingRules(self, itemtype:str):
"""Converts parsing rules for a given type into a neater representation.
The rules for each Blueprint are expressed in a human-readable and human-
writable form that makes them easy to modify if need be, but not very
convenient for the parser. This method groups rules in a parser-friendly
representation that makes its job easier.
"""
# Get all the blueprints of a given class
rawRules = {}
for key, val in containers.classByString[itemtype].blueprints.items():
rawRules[key] = val.getParsing()
# Convert them to a parsing tree
pTree = {}
classes = set(rule['tag'] for rule in rawRules.values() if rule is not None)
for c in classes:
pTree[c] = {}
for key, rule in rawRules.items():
# Ignore any non-parsable fields
if rule is None:
continue
# Process only the rules of class c
if rule['tag'] != c:
continue
pClass = rule['class']
pTree[c][pClass] = {
'name': key,
'text': rule['text'],
'list': rule['list'] if 'list' in rule.keys() else False,
'attr': rule['attr'] if 'attr' in rule.keys() else None,
'type': rule['type'] if 'type' in rule.keys() else None
}
# Bind the result to a type name
self.parsingRules[itemtype] = pTree
def checkSession(self):
"""Check if there exists a session instance and acquire a new one if not."""
session_requested = False
if not self.session:
self.requestSession()
session_requested = True
# Check again - in case the user cancelled a login
if not self.session:
return False
# If a new session was requested in the process - set the dirty flag
if session_requested:
self.isDirty = True
# At this point everything is definitely safe
return True
def requestSession(self):
"""Call the GUI to handle a login and bind a session object to self."""
# This pauses execution until the user logs in or cancels
session, username = self.login_handler(self.username)
if session:
# Set the username in case it's a first run (it will be empty)
if not self.username:
self.username = username
else:
# If it's not the first log in, make sure the user has logged as the
# same user. If the GUI is to be trusted, it shouldn't be possible, but
# we can still check in case of an accident during external usage.
# Returned value isn't important. *NOT* setting self.session is.
if username != self.username:
return None
self.session = session
def storeSession(self):
"""Stores the sessions cookies to a base64-encoded pickle string."""
if self.session:
cookies_bin = pickle.dumps(self.session.cookies)
cookies_str = binascii.b2a_base64(cookies_bin).decode('utf-8').strip()
return cookies_str
else:
return 'null'
def restoreSession(self, pickle_str:str):
"""Restores the session cookies from a base64-encoded pickle string."""
if pickle_str == 'null':
return
self.session = requests_html.HTMLSession()
cookies_bin = binascii.a2b_base64(pickle_str.encode('utf-8'))
cookies_obj = pickle.loads(cookies_bin)
self.session.cookies = cookies_obj
@enforceSession
def getNumOf(self, itemtype:str):
"""Return the number of items of a given type that the user has rated.
Returns a tuple: (number of rated items, number of items per page).
"""
getURL = self.urlGenerationMethods[itemtype]
spanClass = self.countSpanClasses[itemtype]
url = getURL()
page = self.fetchPage(url)
# TODO: in principle, this page could be cached for some small time
#the number of user's movies is inside a span of a specific class
items = 0
for span in page.body.find_all('span'):
if not span.has_attr('class'):
continue
if spanClass not in span.attrs['class']:
continue
items = int(span.text)
#find all voting divs, like during parsing
per_page = 0
for div in page.body.find_all('div'):
if not div.has_attr('data-id') or not div.has_attr('class'):
continue
if not self.constants.item_class in div.attrs['class']:
continue
per_page += 1
return items, per_page
@enforceSession
def getItemsPage(self, itemtype:str, page:int=1):
"""Acquire items of a given type from a given page number.
The user's ratings are displayed on pages. This fetches a page by number,
parses it and returns a list of Item-based objects. URL is delivered by a
cached dict, binding URL-generating functions to respective item types.
"""
getURL = self.urlGenerationMethods[itemtype]
url = getURL(page)
page = self.fetchPage(url)
data = self.parsePage(page, itemtype)
return data
@enforceSession
def fetchPage(self, url):
"""Fetch the page and return its BeautifulSoup representation.
ConnectionError is raised in case of any failure to get HTML data or page
status being not-ok after get.
UnauthenticatedError is raised if the response contains a span indicating
that the session used to obtain it is no longer valid.
"""
try:
page = self.session.get(url)
except:
raise ConnectionError
if not page.ok:
status = page.status_code
print("FETCH ERROR {}".format(status))
raise ConnectionError
else:
bspage = BS(page.html.html, 'lxml')
# If a request required an active session but the one we had happened to be
# stale, this magical span will be found in the page data:
span = bspage.find('span', attrs={'class': self.constants.no_access_class})
if span:
raise UnauthenticatedError
return bspage
def parsePage(self, page, itemtype:str):
"""Parse items and ratings, returning constructed Item objects."""
data_div = self.extractDataSource(page)
sub_divs = self.extractItems(data_div)
parsed_items = [self.parseOne(div, itemtype) for div in sub_divs]
ratings = [self.parseRating(txt) for txt in self.extractRatings(data_div)]
for rating, iid in ratings:
for item in parsed_items:
if item.getRawProperty('id') == iid:
item.addRating(rating)
return | |
self.AddLayer(self.DrawPolygonLayer, draw_data, map_rel,
visible=visible, show_levels=show_levels,
selectable=selectable, name=name,
type=self.TypePolygon, update=update)
def AddImageLayer(self, data, map_rel=True, visible=True,
show_levels=None, selectable=False,
name='<image_layer>', **kwargs):
"""Add a layer of images to the map.
data list of (lon, lat, fname[, attributes]) (map_rel)
or list of (x, y, fname, [attributes]) (view relative)
attributes is a dictionary of attribute keys:
placement a placement string
offset_x X offset
offset_y Y offset
data image data object
map_rel points drawn relative to map if True, else view relative
visible True if the layer is to be immediately visible
show_levels list of levels at which layer is auto-shown (or None)
selectable True if select operates on this layer
name name of this layer
kwargs dictionary of extra params:
placement string describing placement wrt hotspot
offset_x hotspot X offset in pixels
offset_y hotspot Y offset in pixels
The hotspot is placed at (lon, lat) or (x, y). 'placement' controls
where the image is displayed relative to the hotspot.
"""
# get global attribute values
default_placement = kwargs.get('placement',
self.DefaultImagePlacement)
default_offset_x = kwargs.get('offset_x', self.DefaultImageOffsetX)
default_offset_y = kwargs.get('offset_y', self.DefaultImageOffsetY)
# define cache variables for the image data
fname_cache = None
bmp_cache = None
w_cache = None
h_cache = None # used to optimize file access
# load all image files, convert to bitmaps, create draw_data iterable
draw_data = []
for d in data:
if len(d) == 4:
(lon, lat, fname, attributes) = d
elif len(d) == 3:
(lon, lat, fname) = d
attributes = {}
else:
msg = ('Points data must be iterable of tuples: '
'(x, y, [dict])\nGot: %s' % str(d))
raise Exception(msg)
placement = attributes.get('placement', default_placement)
offset_x = attributes.get('offset_x', default_offset_x)
offset_y = attributes.get('offset_y', default_offset_y)
data = attributes.get('data', None)
if fname == fname_cache:
bmap = bmp_cache
w = w_cache
h = h_cache
else:
fname_cache = fname
img = wx.Image(fname, wx.BITMAP_TYPE_ANY)
bmp_cache = bmap = img.ConvertToBitmap()
(w, h) = bmap.GetSize()
w_cache = w
h_cache = h
draw_data.append((lon, lat, bmap, w, h, placement.lower(),
offset_x, offset_y, data))
return self.AddLayer(self.DrawImageLayer, draw_data, map_rel,
visible=visible, show_levels=show_levels,
selectable=selectable, name=name,
type=self.TypeImage)
def AddTextLayer(self, text, map_rel=True, visible=True, show_levels=None,
selectable=False, name='<text_layer>', update=True, **kwargs):
"""Add a text layer to the map.
text list of sequence of (lon, lat, text, [dict]) coordinates
map_rel points drawn relative to map if True, else view relative
visible True if the layer is to be immediately visible
show_levels list of levels at which layer is auto-shown
selectable True if select operates on this layer
name name of this layer
kwargs a dictionary of changeable text attributes
(placement, radius, fontname, fontsize, colour, data)
these supply any data missing in 'data'
"""
if map_rel:
default_placement = kwargs.get('placement', self.DefaultTextPlacement)
default_radius = kwargs.get('radius', self.DefaultTextRadius)
default_fontname = kwargs.get('fontname', self.DefaultTextFontname)
default_fontsize = kwargs.get('fontsize', self.DefaultTextFontSize)
default_colour = self.get_i18n_kw(kwargs, ('colour', 'color'),
self.DefaultTextColour)
default_textcolour = self.get_i18n_kw(kwargs,
('textcolour', 'textcolor'),
self.DefaultTextTextColour)
default_offset_x = kwargs.get('offset_x', self.DefaultTextOffsetX)
default_offset_y = kwargs.get('offset_y', self.DefaultTextOffsetY)
default_data = kwargs.get('data', self.DefaultTextData)
else:
default_placement = kwargs.get('placement', self.DefaultTextViewPlacement)
default_radius = kwargs.get('radius', self.DefaultTextViewRadius)
default_fontname = kwargs.get('fontname', self.DefaultTextViewFontname)
default_fontsize = kwargs.get('fontsize', self.DefaultTextViewFontSize)
default_colour = self.get_i18n_kw(kwargs, ('colour', 'color'),
self.DefaultTextViewColour)
default_textcolour = self.get_i18n_kw(kwargs,
('textcolour', 'textcolor'),
self.DefaultTextViewTextColour)
default_offset_x = kwargs.get('offset_x', self.DefaultTextViewOffsetX)
default_offset_y = kwargs.get('offset_y', self.DefaultTextViewOffsetY)
default_data = kwargs.get('data', self.DefaultTextData)
# create data iterable ready for drawing
draw_data = []
for t in text:
if len(t) == 4:
(lon, lat, tdata, attributes) = t
elif len(t) == 3:
(lon, lat, tdata) = t
attributes = {}
else:
msg = ('Text data must be iterable of tuples: '
'(lon, lat, text, [dict])\n'
'Got: %s' % str(t))
raise Exception(msg)
# plug in any required defaults
placement = attributes.get('placement', default_placement)
radius = attributes.get('radius', default_radius)
fontname = attributes.get('fontname', default_fontname)
fontsize = attributes.get('fontsize', default_fontsize)
colour = self.get_i18n_kw(attributes, ('colour', 'color'),
default_colour)
textcolour = self.get_i18n_kw(attributes,
('textcolour', 'textcolor'),
default_textcolour)
offset_x = attributes.get('offset_x', default_offset_x)
offset_y = attributes.get('offset_y', default_offset_y)
data = attributes.get('data', default_data)
draw_data.append((lon, lat, tdata, placement.lower(),
radius, colour, textcolour, fontname, fontsize,
offset_x, offset_y, data))
return self.AddLayer(self.DrawTextLayer, draw_data, map_rel,
visible=visible, show_levels=show_levels,
selectable=selectable, name=name,
type=self.TypeText, update=update)
def AddLayer(self, render, data, map_rel, visible, show_levels,
selectable, name, type, update=True):
"""Add a generic layer to the system.
render the function used to render the layer
data actual layer data (depends on layer type)
map_rel True if points are map relative, else view relative
visible True if layer is to be immediately shown, else False
show_levels list of levels at which to auto-show the layer
selectable True if select operates on this layer
name name for this layer
type flag for layer 'type'
Returns unique ID of the new layer.
"""
# get layer ID
id = self.next_layer_id
self.next_layer_id += 1
# prepare the show_level value
if show_levels is None:
show_levels = range(self.min_level, self.max_level + 1)
# create layer, add unique ID to Z order list
l = _Layer(id=id, painter=render, data=data, map_rel=map_rel,
visible=visible, show_levels=show_levels,
selectable=selectable, name=name, type=type)
self.layer_mapping[id] = l
self.layer_z_order.append(id)
# force display of new layer if it's visible
if visible and update:
self.Update()
return id
######
# Layer manipulation routines.
######
def ShowLayer(self, id):
"""Show a layer.
id the layer id
"""
self.layer_mapping[id].visible = True
self.Update()
def HideLayer(self, id):
"""Hide a layer.
id the layer id
"""
self.layer_mapping[id].visible = False
self.Update()
def DeleteLayer(self, id, update=True):
"""Delete a layer.
id the layer id
"""
# just in case we got None
if id:
# see if what we are about to remove might be visible
visible = self.layer_mapping[id].visible
del self.layer_mapping[id]
self.layer_z_order.remove(id)
# if layer was visible, refresh display
if visible and update:
self.Update()
def SetLayerShowLevels(self, id, show_levels=None):
"""Update the show_levels list for a layer.
id ID of the layer we are going to update
show_levels new layer show list
"""
# just in case we got None
if id:
layer = self.layer_mapping[id]
# prepare the show_level value
if show_levels is None:
show_levels = range(self.min_level, self.max_level + 1)[:]
layer.show_levels = show_levels
# if layer was visible, refresh display
if visible:
self.Update()
def SetLayerSelectable(self, id, selectable=False):
"""Update the .selectable attribute for a layer.
id ID of the layer we are going to update
selectable new .selectable attribute value (True or False)
"""
# just in case we got None
if id:
layer = self.layer_mapping[id]
layer.selectable = selectable
######
# Play with layers Z order
######
def PushLayerToBack(self, id):
"""Make layer specified be drawn at back of Z order.
id ID of the layer to push to the back
"""
self.layer_z_order.remove(id)
self.layer_z_order.insert(0, id)
self.Update()
def PopLayerToFront(self, id):
"""Make layer specified be drawn at front of Z order.
id ID of the layer to pop to the front
"""
self.layer_z_order.remove(id)
self.layer_z_order.append(id)
self.Update()
def PlaceLayerBelowLayer(self, id, top_id):
"""Place a layer so it will be drawn behind another layer.
id ID of layer to place underneath 'top_id'
top_id ID of layer to be drawn *above* 'id'
"""
self.layer_z_order.remove(id)
i = self.layer_z_order.index(top_id)
self.layer_z_order.insert(i, id)
self.Update()
######
# Layer drawing routines
######
def LightweightDrawPointLayer2(self, dc, data, map_rel):
"""Draw a points layer.
dc the device context to draw on
data an iterable of point tuples:
(x, y, place, radius, colour, x_off, y_off, pdata)
map_rel points relative to map if True, MUST BE TRUE for lightweight
Assumes all points are the same colour, saving 100's of ms.
In contrast to LightweightDrawPointLayer, this function draws
rectangles or points (rather than circles) for performance reasons.
"""
assert map_rel is True
if len(data)==0: return
(lon, lat, place,
radius, colour, x_off, y_off, pdata) = data[0]
# draw points on map/view
if map_rel:
# GCDC device context permits antialiasing and transparent colors.
# But, signficant time savings by not allowing these features
# It's not clear that we actually want or use them anyway
#dc = wx.GCDC(dc) # allow transparent colours
dc.SetPen(wx.Pen(colour))
dc.SetBrush(wx.Brush(colour))
points = []
rectangles = []
if radius:
diameter = 2 * radius
for (lon, lat, place,
radius, colour, x_off, y_off, pdata) in data:
pt = self.ConvertGeo2ViewMasked((lon, lat))
if pt:
(x, y) = pt
if radius:
rectangles.append(
(x + x_off - radius, y + y_off - radius,
diameter, diameter))
else:
points.append((x + x_off, y + y_off))
| |
<reponame>tobspr/Panda3D-Bam-Exporter
# -*- encoding: utf-8 -*-
import os
import bpy
import time
import math
import mathutils
from ExportException import ExportException
from TextureWriter import TextureWriter
from GeometryWriter import GeometryWriter
from MaterialWriter import MaterialWriter
from pybamwriter.panda_types import *
from pybamwriter.bam_writer import BamWriter
class DummyCurve:
def __init__(self, value):
self.value = value
def evaluate(self, i):
return self.value
class SceneWriter:
""" This class handles the conversion from the blender scene graph to the
virtual scene graph, to be able to export that converted scene graph to a
bam file. """
def __init__(self):
""" Creates a new scene writer """
self._stats_exported_vertices = 0
self._stats_exported_tris = 0
self._stats_exported_objs = 0
self._stats_exported_geoms = 0
self._stats_duplicated_vertices = 0
self.texture_writer = TextureWriter(self)
self.geometry_writer = GeometryWriter(self)
self.material_writer = MaterialWriter(self)
self.characters = {}
def set_log_instance(self, log_instance):
""" Sets the export logger instance, used for reporting warnings and errors
during the export """
self.log_instance = log_instance
def set_filepath(self, filepath):
""" Sets the filepath used to store the bam file. In future, the writer
will be able to write to a stream aswell """
self.filepath = filepath
def set_context(self, context):
""" Sets the blender context """
self.context = context
def set_objects(self, objects):
""" Sets the object to export. Usually this is the list of all selected
objects """
self.objects = objects
def set_settings(self, settings):
""" Sets the handle to the PBEExportSettings structure, stored in the
scene datablock """
self.settings = settings
def write_bam_file(self):
""" Writes out the bam file, convertin the scene to a virtual scene graph
first, and then exporting that to a bam file """
# Make the output easier to read - just for debugging
# os.system("cls")
start_time = time.time()
# Create the root of our model. All objects will be parented to this
virtual_model_root = ModelRoot("SceneRoot")
# First import all armatures.
for armature in bpy.data.armatures:
self.characters[armature] = self._handle_armature(armature, virtual_model_root)
# Handle all selected objects
for obj in self.objects:
try:
if obj.type != 'ARMATURE':
self._handle_object(obj, virtual_model_root)
except Exception as msg:
self.log_instance.error("Exception while exporting object '{}': {}".format(obj.name, msg))
raise
writer = BamWriter()
writer.file_version = tuple(int(i) for i in self.settings.bam_version.split("."))
writer.open_file(self.filepath)
writer.write_object(virtual_model_root)
writer.close()
end_time = time.time()
duration = round(end_time - start_time, 4)
self.log_instance.info("-" * 50)
self.log_instance.info("Wrote out bam with the version", writer.file_version)
self.log_instance.info("Export finished in", duration, "seconds.")
self.log_instance.info("Exported", format(self._stats_exported_vertices, ",d"),
"Vertices and", format(self._stats_exported_tris, ",d"), "Triangles")
self.log_instance.info("Exported", self._stats_exported_objs,
"Objects and", self._stats_exported_geoms, "Geoms")
if self._stats_duplicated_vertices:
self.log_instance.info("Had to duplicate", format(self._stats_duplicated_vertices, ",d"),
"Vertices due to different texture coordinates.")
self.log_instance.info("Exported", len(self.material_writer.material_state_cache), "materials")
self.log_instance.info("Exported", len(self.texture_writer.textures_cache),
"texture slots, using", len(self.texture_writer.images_cache), "images")
self.log_instance.info("-" * 50)
def _handle_camera(self, obj, parent):
""" Internal method to handle a camera """
pass
def _handle_light(self, obj, parent):
""" Internal method to handle a light """
print("Exporting light", obj.name, "of type", obj.data.type)
if obj.data.type == "POINT":
light_node = SphereLight(obj.name)
light_node.radius = obj.data.pbepbs.sphere_radius
elif obj.data.type == "SPOT":
light_node = Spotlight(obj.name)
light_node.exponent = obj.data.spot_size
elif obj.data.type == "AREA":
light_node = RectangleLight(obj.name)
size_x = obj.data.size
size_y = size_x
if obj.data.shape != "SQUARE":
size_y = obj.data.size_y
parent.transform.mat *= mathutils.Matrix(
((0, size_x, 0, 0),
(0, 0, size_y, 0),
(1, 0, 0, 0),
(0, 0, 0, 1)))
else:
self.log_instance.warning("TODO: Support light type:", obj.data.type)
return
color = obj.data.color
if obj.data.pbepbs.use_temperature:
color = obj.data.pbepbs.color_preview
light_node.color = list(color) + [obj.data.energy]
light_node.specular_color = light_node.color
light_node.shadow_caster = obj.data.use_shadow
light_node.sb_xsize = int(obj.data.pbepbs.shadow_map_res)
light_node.sb_ysize = light_node.sb_xsize
if obj.data.type in ("SPOT", "POINT"):
light_node.attenuation = (0, 0, 1)
profile = obj.data.pbepbs.ies_profile
if profile != "none":
light_node.tags["ies_profile"] = profile
light_node.max_distance = obj.data.distance
parent.add_child(light_node)
def _handle_empty(self, obj, parent):
""" Internal method to handle an empty object """
pass
def _handle_curve(self, obj, parent):
""" Internal method to handle a curve """
self.log_instance.warning("TODO: Handle curve:", obj.name)
def _handle_font(self, obj, parent):
""" Internal method to handle a font """
self.log_instance.warning("TODO: Handle font:", obj.name)
def _handle_lattice(self, obj, parent):
""" Internal method to handle a lattice """
self.log_instance.warning("TODO: Handle lattice:", obj.name)
def _handle_armature(self, obj, parent):
""" Internal method to handle an armature """
char = Character(obj.name)
bundle = char.bundles[0]
skeleton = PartGroup(bundle, '<skeleton>')
for bone in obj.bones:
if bone.parent is None:
self._handle_bone(bone, char, bundle, skeleton)
parent.add_child(char)
# Is there an active animation? Find any instances of this armature.
action = None
pose = None
for scene_obj in self.objects:
if scene_obj.data == obj and scene_obj.animation_data and scene_obj.animation_data.action:
action = scene_obj.animation_data.action
pose = scene_obj.pose
break
if action:
fps = bpy.context.scene.render.fps
num_frames = int(action.frame_range[1]) - 1
bundle = AnimBundle(action.name, fps, num_frames)
# Create the AnimGroup hierarchy.
skeleton = AnimGroup(bundle, '<skeleton>')
for bone in obj.bones:
if bone.parent is None:
self._handle_bone_anim(bone, pose, action.fcurves, skeleton)
parent.add_child(AnimBundleNode(obj.name, bundle))
return char
def _handle_bone(self, obj, char, root, parent):
""" Internal method to handle a bone """
matrix = obj.matrix_local
if obj.parent:
matrix = obj.parent.matrix_local.inverted() * matrix
joint = CharacterJoint(char, root, parent, obj.name, matrix)
joint.initial_net_transform_inverse = obj.matrix_local.inverted()
for bone in obj.children:
self._handle_bone(bone, char, root, joint)
def _handle_bone_anim(self, bone, pose, fcurves, parent):
""" Internal method to handle the animation of a bone """
# Find all f-curves belonging to this bone.
prefix = 'pose.bones["{0}"].'.format(bone.name)
num_frames = parent.root.num_frames
bone_matrix = bone.matrix_local
if bone.parent:
bone_matrix = bone.parent.matrix_local.inverted() * bone_matrix
pose_bone = pose.bones[bone.name]
lx_curve = fcurves.find(prefix + 'location', 0) or DummyCurve(pose_bone.location.x)
ly_curve = fcurves.find(prefix + 'location', 1) or DummyCurve(pose_bone.location.y)
lz_curve = fcurves.find(prefix + 'location', 2) or DummyCurve(pose_bone.location.z)
qw_curve = fcurves.find(prefix + 'rotation_quaternion', 0) or DummyCurve(pose_bone.rotation_quaternion.w)
qx_curve = fcurves.find(prefix + 'rotation_quaternion', 1) or DummyCurve(pose_bone.rotation_quaternion.x)
qy_curve = fcurves.find(prefix + 'rotation_quaternion', 2) or DummyCurve(pose_bone.rotation_quaternion.y)
qz_curve = fcurves.find(prefix + 'rotation_quaternion', 3) or DummyCurve(pose_bone.rotation_quaternion.z)
sx_curve = fcurves.find(prefix + 'scale', 0) or DummyCurve(pose_bone.scale.x)
sy_curve = fcurves.find(prefix + 'scale', 1) or DummyCurve(pose_bone.scale.y)
sz_curve = fcurves.find(prefix + 'scale', 2) or DummyCurve(pose_bone.scale.z)
group = AnimChannelMatrixXfmTable(parent, bone.name)
tables = group.tables
for i in range(num_frames):
matrix = mathutils.Matrix(((sx_curve.evaluate(i), 0, 0, 0),
(0, sy_curve.evaluate(i), 0, 0),
(0, 0, sz_curve.evaluate(i), 0),
(0, 0, 0, 1)))
matrix = mathutils.Quaternion((qw_curve.evaluate(i),
qx_curve.evaluate(i),
qy_curve.evaluate(i),
qz_curve.evaluate(i))).to_matrix().to_4x4() * matrix
matrix = mathutils.Matrix.Translation((lx_curve.evaluate(i),
ly_curve.evaluate(i),
lz_curve.evaluate(i))) * matrix
matrix = bone_matrix * matrix
loc, rot, scale = matrix.decompose()
prh = rot.to_euler('YXZ')
tables[6].append(math.degrees(prh[2]))
tables[7].append(math.degrees(prh[0]))
tables[8].append(math.degrees(prh[1]))
tables[9].append(loc[0])
tables[10].append(loc[1])
tables[11].append(loc[2])
tables[0].append(scale[0])
tables[1].append(scale[1])
tables[2].append(scale[2])
# Reduce tables down to single item if they are all the same.
for table in tables:
if len(table) > 1 and table.count(table[0]) == len(table):
table[1:] = []
# Clear the tables if they are containing default values.
for table in tables[:3]:
if table == [1]:
table.clear()
for table in tables[3:12]:
if table == [0]:
table.clear()
for child in bone.children:
self._handle_bone_anim(child, pose, fcurves, group)
def _handle_mesh(self, obj, parent):
""" Internal method to handle a mesh """
self.geometry_writer.write_mesh(obj, parent)
def _handle_lod(self, obj, lod_node):
""" Internal method to handle LOD levels """
distances = [level.distance for level in obj.lod_levels]
distances.append(float('inf'))
for i, level in enumerate(obj.lod_levels):
lod_node.add_switch(distances[i + 1], distances[i])
if level.use_mesh:
self._handle_object_data(level.object, lod_node)
else:
self._handle_object_data(object, lod_node)
def _handle_object(self, obj, parent):
""" Internal method to process an object during the export process """
print("Exporting object:", obj.name)
self._stats_exported_objs += 1
transform = TransformState()
transform.mat = obj.matrix_world
# Create a new panda node with the transform
if hasattr(obj, 'lod_levels') and len(obj.lod_levels) > 0:
node = LODNode(obj.name)
node.transform = transform
self._handle_lod(obj, node)
else:
node = PandaNode(obj.name)
node.transform = transform
self._handle_object_data(obj, node)
# Attach the node to the scene graph
parent.add_child(node)
self._set_tags(obj, node)
self._check_dupli(obj, node)
self._check_billboard(obj, node)
def _handle_object_data(self, obj, parent):
""" Internal method to process an object datablock """
if obj.type == "CAMERA":
self._handle_camera(obj, parent)
elif obj.type == "LAMP":
self._handle_light(obj, parent)
elif obj.type == "MESH":
self._handle_mesh(obj, parent)
elif obj.type == "EMPTY":
self._handle_empty(obj, parent)
elif obj.type == "CURVE":
self._handle_curve(obj, parent)
elif obj.type == "FONT":
self._handle_font(obj, parent)
elif obj.type == "LATTICE":
self._handle_lattice(obj, parent)
elif obj.type == "ARMATURE":
pass
else:
self.log_instance.warning("Skipping object '" + obj.name + "' with unkown type: '" + str(obj.type) + "'")
def _check_dupli(self, obj, parent):
""" Checks for a dupli group """
if obj.dupli_type != "NONE":
if obj.dupli_type != "GROUP":
self.log_instance.warning("Unsupported dupli type:", obj.dupli_type)
return
for sub_obj in obj.dupli_group.objects:
self.log_instance.info("Exporting duplicated object:", sub_obj.name, "for parent", obj.name)
self._handle_object(sub_obj, parent)
return
def _check_billboard(self, obj, node):
""" Checks for a billboard """
if not obj.active_material or not obj.active_material.game_settings:
return
orient = obj.active_material.game_settings.face_orientation
if orient not in ('HALO', 'BILLBOARD'):
return
# Extract the rotation from the transform. | |
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
ImageRegionUtilities: Helper module to process regions and line segments
'''
# Images
from ImageUtilities import imageReadL, createImageF, createImageL, showImageL, createVectorF
from ImageOperatorsUtilities import applyCannyEdgeDetector
from ImagePropertiesUtilities import imageMaxMin
# Math and iteration
from math import pi, sqrt, sin, cos, atan, atan2, factorial, exp
from random import shuffle, sample
from timeit import itertools
# Array to store image data
from numpy import amax, amin
import numpy as np
# Combination function
def nCr(n, r):
if n < 0 or r < 0 or n-r < 0: return 0
return factorial(n) / (factorial(r) * factorial(n-r))
# Rising factorial function
def risingFactorial(x, n):
p = 1
for k in range(0,n):
p = p * (x + k)
return p
# Compute reference point in an edge image
def computeReferencePoint(edgeImage):
height, width = len(edgeImage), len(edgeImage[0])
refPoint = [0,0]
edgePoints = []
for x,y in itertools.product(range(0, width), range(0, height)):
if edgeImage[y,x] != 0:
refPoint[0] += y
refPoint[1] += x
edgePoints.append((y,x))
numPts = len(edgePoints)
refPoint = [int(refPoint[0]/numPts),int(refPoint[1]/numPts)]
return refPoint, edgePoints
# Return the longest segment from edges
def findLongestSegment(edges):
height, width = len(edges), len(edges[0])
# Find line segments
segmentsList = []
segmentsImage = createImageF(width, height)
maxSegmentLenght = 0
maxSegmentIndex = 0
for x,y in itertools.product(range(0, width), range(0, height)):
if edges[y,x] != 0 and segmentsImage[y,x] == 0:
segment = [ ]
segmentPoints = [(y,x)]
segmentsImage[y,x] = 255
while len(segmentPoints) > 0:
yc = (segmentPoints[0])[0]
xc = (segmentPoints[0])[1]
segment.append((yc,xc))
segmentPoints = segmentPoints[1:]
for dx,dy in itertools.product(range(-1,2), range(-1,2)):
xn, yn = xc+dx, yc+dy
if dx!=0 or dy!=0 and xn > 0 and yn > 0 and xn < width and yn < height:
if edges[yn,xn] != 0 and segmentsImage[yn,xn] == 0:
segmentPoints.append((yn,xn))
segmentsImage[yn,xn] = 255
segmentsList.append(segment)
if len(segment) > maxSegmentLenght:
maxSegmentLenght = len(segment)
maxSegmentIndex = len(segmentsList) - 1
mainSegment = []
segment = segmentsList[maxSegmentIndex]
curentElement = segment.pop(0)
sy,sx = curentElement[0], curentElement[1]
mainSegment.append(curentElement)
numPoints = len(segment)
while numPoints > 0:
closestElement = [0, float("inf")]
cy,cx = curentElement[0], curentElement[1]
for p in range(0, numPoints):
y,x = (segment[p])[0], (segment[p])[1]
d = sqrt((cx-x) * (cx-x) + (cy-y) * (cy-y) )
if d < closestElement[1] or (d == closestElement[1] and y > cy):
closestElement = [p, d]
# If we are closer to the first point, then end now
dFirst = sqrt((cx-sx) * (cx-sx) + (cy-sy) * (cy-sy) )
if (cx!=sx or cy!=sy) and 2*dFirst < closestElement[1]:
break
curentElement = segment.pop(closestElement[0])
numPoints = len(segment)
mainSegment.append(curentElement)
numPoints = len(mainSegment)
# Average to get more accurate direction
averageSize = 1
totalPixels = float(1 + 2*averageSize)
mainSegmentAverage = [ ]
for p in range(0, numPoints):
y,x = 0, 0
for w in range(-averageSize, averageSize+1):
p1 = p + w
if p1 < 0: p1 = p1 + numPoints
if p1 >= numPoints: p1 = p1 - numPoints
x += (mainSegment[p1])[1]
y += (mainSegment[p1])[0]
mainSegmentAverage.append((y/totalPixels, x/totalPixels))
return mainSegmentAverage
# Return the longest segment from an image
def findLongestCentredSegmentinImage(imageName, gaussianKernelSize, sobelKernelSize, upperT, lowerT):
# Read image into array and show
inputImage, width, height = imageReadL(imageName)
# Compute edges and find the segment in the image
magnitude, _ = applyCannyEdgeDetector(inputImage, gaussianKernelSize, sobelKernelSize, upperT, lowerT)
mainSegmentAverage = findLongestSegment(magnitude)
# Compute centre
numPoints = len(mainSegmentAverage)
centre = [0,0]
for p in range(0, numPoints):
centre[0] += (mainSegmentAverage[p])[0]
centre[1] += (mainSegmentAverage[p])[1]
centre[0] /= numPoints
centre[1] /= numPoints
# Respect to the center and convert to an image array
shape= createImageF(numPoints, 2)
for p in range(0, numPoints):
y,x = (mainSegmentAverage[p])[0], (mainSegmentAverage[p])[1]
shape[0, p] = y-centre[0]
shape[1, p] = x-centre[1]
return centre, shape, width, height
def findLongesSegmentinImage(imageName, gaussianKernelSize, sobelKernelSize, upperT, lowerT):
# Read image into array and show
inputImage, width, height = imageReadL(imageName)
# Compute edges and find the segment in the image
magnitude, _ = applyCannyEdgeDetector(inputImage, gaussianKernelSize, sobelKernelSize, upperT, lowerT)
mainSegmentAverage = findLongestSegment(magnitude)
# Convert to an image array
numPoints = len(mainSegmentAverage)
shape= createImageF(numPoints, 2)
for p in range(0, numPoints):
y,x = (mainSegmentAverage[p])[0], (mainSegmentAverage[p])[1]
shape[0, p] = y
shape[1, p] = x
return shape, width, height
# Get a list with the pixels outside a backgroundRange
def pixlesList(image, backgroundRange):
listPixels = [ ]
height, width = len(image), len(image[0])
for x,y in itertools.product(range(0, width), range(0, height)):
if image[y,x] < backgroundRange[0] or image[y,x] > backgroundRange[1]:
listPixels.append((y,x,1))
return listPixels
def edgesList(image, shapeImage, backgroundRange):
edgePixels = [ ]
height, width = len(image), len(image[0])
numPoints = len(shapeImage)
for indexPixel in range(0, numPoints):
y, x = (shapeImage[indexPixel])[0], (shapeImage[indexPixel])[1]
edge = False
for wx,wy in itertools.product(range(-1, 2), range(-1, 2)):
posX, posY = x + wx, y+ wy
if posY > -1 and posY < height and posX > -1 and posX < width:
if image[posY,posX] >= backgroundRange[0] and image[posY,posX] <= backgroundRange[1] :
edge = True
if edge:
edgePixels.append((y,x))
return edgePixels
def computeAngularFunctions(shape):
# Compute the accumulative arc lengths
numPoints = len(shape[0])
sumArcLenghts = []
y0, x0 = shape[0, numPoints-1], shape[1, numPoints-1]
shapeLenght = 0.0
for p in range(0, numPoints):
y,x = shape[0,p], shape[1,p]
shapeLenght += sqrt((y-y0)*(y-y0) + (x-x0)*(x-x0))
sumArcLenghts.append(shapeLenght)
y0,x0 = y,x
# Normalized lengths
normArcLenghts = []
for p in range(0, numPoints):
normArcLenghts.append((2.0*pi*sumArcLenghts[p])/shapeLenght);
# Compute angular function by an average window
windowSize = [5,10]
d = float(windowSize[1] -windowSize[0])
angularFunc = [ ]
for p in range(0, numPoints):
x1,x2,y1,y2 = 0.0, 0.0, 0.0, 0.0
# Average change
for q in range(windowSize[0], windowSize[1]):
pa,pb = p-q,p+q
if pa<0: pa += numPoints
if pb>=numPoints: pb -= numPoints
ya,xa = shape[0,pa], shape[1,pa]
yb,xb = shape[0,pb], shape[1,pb]
x1,y1 = x1+xa, y1+ya
x2,y2 = x2+xb, y2+yb
dx, dy = (x2-x1)/d, (y2-y1)/d
angle = atan2(dy, dx)
angularFunc.append(angle)
# Compute cumulative angular function
cumulativeFunc = [ ]
angle0 = angularFunc[numPoints-1]
sumAngle = 0.0
for p in range(0, numPoints):
angle = angularFunc[p]
diff = angle-angle0
if diff < pi:
diff += 2.0* pi
if diff > pi:
diff -= 2.0 * pi
sumAngle += diff
cumulativeFunc.append(sumAngle)
angle0 = angle
# Compute cumulative angular accumulated
cumulativeNormFunc = [ ]
for p in range(0, numPoints):
cumulativeNormFunc.append(cumulativeFunc[p]+normArcLenghts[p])
return sumArcLenghts, normArcLenghts, angularFunc, cumulativeFunc, cumulativeNormFunc
def weightedKrawtchoukPolynomials(p, width):
# Data containers
sigma = createVectorF(width)
ro = createVectorF(width)
K = createImageF(width,width)
# Coefficient size
N = width-1
# Weight
for x in range(0,width):
sigma[x] = nCr(N, x) * pow(p,x) * pow(1-p,N-x)
# Scale factor. Commented direct computation and using for to avoid factorial
#for n in range(0,width):
# ro[n] = pow(-1,n) * pow((1-p)/p,n) * (float(factorial(n)) / risingFactorial(-N, n))
ro[0] = 1
for n in range(1,N):
ro[n] = (-1*((1.0-p)/p)*n/(-N+(n-1)))*ro[n-1]
ro[N]=(((1.0-p)/p)*N)*ro[N-1]
# Krawtchouk matrix that store result of the polynomial
# Each row is a polynomial each column is the polynomial value for an x value
# Alternatively, we could have used the polynomial generating function
q = 1.0/p
for n,x in itertools.product(range(0, width), range(0, width)):
for s in range(0,width):
K[n,x] += pow(-1,s) * nCr(N-x, n-s) * nCr(x, s) * pow(q-1,n-s)
# Normalize rows for stability
for n in range(0,width):
scale = K[n,0]
for x in range(0,width):
K[n,x] /= scale
# Obtain the coefficients A of the polynomials from K
# Solve for the coefficients A in A*C = K
C = createImageF(width,width)
for n,x in itertools.product(range(0, width), range(0, width)):
C[n,x] = pow(x,n)
CT = np.transpose(C)
KT = np.transpose(K)
AT = np.linalg.solve(CT, KT) # solves the equation A*x=b A*C = k, C'*A' = K'
A = np.transpose(AT)
# Product defining the weighted
w = createImageF(width,width)
for n,x in itertools.product(range(0, width), range(0, width)):
w[n,x] = sqrt(sigma[x]/ro[n])
return K, A, sigma, ro, w
def geometricMoments(pixelList, numMoments):
numPoints = len(pixelList)
# Compute moments
M = createImageF(numMoments,numMoments)
for m,n in itertools.product(range(0, numMoments), range(0, numMoments)):
for indexPixel in range(0, numPoints):
y = (pixelList[indexPixel])[0]
x = (pixelList[indexPixel])[1]
val = (pixelList[indexPixel])[2]
M[n,m] += (x**n) * (y**m) * val
return M
def geometricInvariantMoments(pixelList, numMoments):
numPoints = len(pixelList)
# | |
<filename>bot.py
""" Copyright (c) 2021 Cisco and/or its affiliates.
This software is licensed to you under the terms of the Cisco Sample
Code License, Version 1.1 (the "License"). You may obtain a copy of the
License at
https://developer.cisco.com/docs/licenses
All use of the material herein must be in accordance with the terms of
the License. All rights not expressly granted by the License are
reserved. Unless required by applicable law or agreed to separately in
writing, software distributed under the License is distributed on an "AS
IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied.
"""
import os
from webexteamsbot import TeamsBot
from webexteamsbot.models import Response
import sys
import json
import requests
from dotenv import load_dotenv
from adaptive_card import adaptive_card, get_measurement_card, get_confirm_viewing_card, get_refuse_viewing_card, observance_ended_card, get_open_door_alert_card
from send import generate_snapshot, download_file, send_file
# load all environment variables
load_dotenv()
bot_email = os.getenv("TEAMS_BOT_EMAIL")
teams_token = os.getenv("TEAMS_BOT_TOKEN")
bot_url = os.getenv("TEAMS_BOT_URL")
bot_app_name = os.getenv("TEAMS_BOT_APP_NAME")
room_id = os.getenv("WEBEX_ROOM_ID")
beehive_id = os.getenv("BEEHIVE_ID")
beep_api_token = os.getenv("BEEP_API_TOKEN")
meraki_api_key = os.getenv("MERAKI_API_KEY")
merak_serial_front = os.getenv("MERAKI_CAMERA_SERIAL_FRONT")
meraki_serial_side = os.getenv("MERAKI_CAMERA_SERIAL_SIDE")
meraki_serial_distance = os.getenv("MERAKI_CAMERA_SERIAL_DISTANCE")
meraki_serial_ap = os.getenv("MERAKI_CAMERA_SERIAL_AP")
meraki_device_clients = os.getenv("MERAKI_DEVICE_CLIENTS")
WEBEX_BASE_URL = "https://webexapis.com/v1"
MERAKI_BASE_URL = "https://api.meraki.com/api/v0"
BEEP_BASE_URl = "https://api.beep.nl/api"
# If any of the bot environment variables are missing, terminate the app
if not bot_email or not teams_token or not bot_url or not bot_app_name:
print(
"bot.py - Missing Environment Variable. Please see the 'Usage'"
" section in the README."
)
if not bot_email:
print("TEAMS_BOT_EMAIL")
if not teams_token:
print("TEAMS_BOT_TOKEN")
if not bot_url:
print("TEAMS_BOT_URL")
if not bot_app_name:
print("TEAMS_BOT_APP_NAME")
sys.exit()
# Create a Bot Object
# Note: debug mode prints out more details about processing to terminal
# Note: the `approved_users=approved_users` line commented out and shown as reference
bot = TeamsBot(
bot_app_name,
teams_bot_token=teams_token,
teams_bot_url=bot_url,
teams_bot_email=bot_email,
debug=False,
# approved_users=approved_users,
webhook_resource_event=[
{"resource": "messages", "event": "created"},
{"resource": "attachmentActions", "event": "created"},
],
)
# Create a custom bot greeting function returned when no command is given.
# The default behavior of the bot is to return the '/help' command response
def greeting(incoming_msg):
# Loopkup details about sender
sender = bot.teams.people.get(incoming_msg.personId)
# Create a Response object and craft a reply in Markdown.
response = Response()
response.markdown = "Hello {}, I'm the Connected Bees bot. I am a busy bee that can collect data and info for you. ".format(sender.firstName)
response.markdown += "See what I can do by asking for **/help**."
return response
# Function to send a message with a card attachment
def create_message_with_attachment(rid, msgtxt, attachment, toPersonEmail=""):
headers = {
"content-type": "application/json; charset=utf-8",
"authorization": "Bearer " + teams_token,
}
url = f"{WEBEX_BASE_URL}/messages"
if toPersonEmail == "":
data = {"roomId": rid, "attachments": [attachment], "markdown": msgtxt}
else:
data = {"toPersonEmail": toPersonEmail, "attachments": [attachment], "markdown": msgtxt}
response = requests.post(url, json=data, headers=headers)
return response.json()
def handle_meraki_webhook():
try:
# Get the Clients that are connected to the AP
session = requests.Session()
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"X-Cisco-Meraki-API-Key": meraki_api_key
}
# Note: timespan indicates the last x seconds you would like to search for
clients_response = session.get(
f'{MERAKI_BASE_URL}/devices/{meraki_serial_ap}/clients?timespan=300',
headers=headers
)
clients_response.raise_for_status()
clients = clients_response.json()
# Match the detected clients with the known clients
detected_clients = []
for client in clients:
if not client["description"]:
continue
if client["description"] in meraki_device_clients:
detected_clients.append(client["description"])
attachment = {
"contentType": "application/vnd.microsoft.card.adaptive",
"content": get_open_door_alert_card(detected_clients)
}
print("Successfully created attachment")
c = create_message_with_attachment(room_id, "hello, the Meraki webhook has been triggered", attachment)
return ""
except:
print("something went wrong!")
return ""
def get_weight(incoming_msg):
"""
A function to retrieve the latest weight of the bee hive
:param incoming_msg: The incoming message object from Teams
:return: A text or markdown based reply
"""
# Loopkup details about sender
sender = bot.teams.people.get(incoming_msg.personId)
room_id = incoming_msg.roomId
url = f"{BEEP_BASE_URl}/sensors/lastvalues?id={beehive_id}"
headers = {
'Authorization': f'Bearer {beep_api_token}'
}
response = requests.get(url, headers=headers)
response.raise_for_status()
print(json.dumps(response.json(), indent=2))
weight = round(response.json()['weight_kg'], 3)
adaptive_card = get_measurement_card("Weight", "weight", weight, "kilograms")
attachment = {
"contentType": "application/vnd.microsoft.card.adaptive",
"content": adaptive_card
}
c = create_message_with_attachment(room_id, f"Hello {sender.firstName}, the latest weight of the bee hive is {weight} kilograms", attachment)
return ""
def get_temperature_outside(incoming_msg):
"""
A function to retrieve the latest temperature around the bee hive
:param incoming_msg: The incoming message object from Teams
:return: A text or markdown based reply
"""
# Loopkup details about sender
sender = bot.teams.people.get(incoming_msg.personId)
room_id = incoming_msg.roomId
url = f"{BEEP_BASE_URl}/sensors/lastvalues?id={beehive_id}"
headers = {
'Authorization': f'Bearer {beep_api_token}'
}
response = requests.get(url, headers=headers)
response.raise_for_status()
print(json.dumps(response.json(), indent=2))
temperature = response.json()['t']
adaptive_card = get_measurement_card("Temperature around beehive", "temperature around the beehive", temperature, "degrees celsius")
attachment = {
"contentType": "application/vnd.microsoft.card.adaptive",
"content": adaptive_card
}
c = create_message_with_attachment(room_id, f"Hello {sender.firstName}, the latest weight of the bee hive is {temperature} degrees celsius", attachment)
return ""
def get_temperature_inside(incoming_msg):
"""
A function to retrieve the latest temperature inside the bee hive
:param incoming_msg: The incoming message object from Teams
:return: A text or markdown based reply
"""
# Loopkup details about sender
sender = bot.teams.people.get(incoming_msg.personId)
room_id = incoming_msg.roomId
url = f"{BEEP_BASE_URl}/sensors/lastvalues?id={beehive_id}"
headers = {
'Authorization': f'Bearer {beep_api_token}'
}
response = requests.get(url, headers=headers)
response.raise_for_status()
temperature = response.json()['t_i']
adaptive_card = get_measurement_card("Temperature inside beehive", "temperature inside the beehive", temperature, "degrees celsius")
attachment = {
"contentType": "application/vnd.microsoft.card.adaptive",
"content": adaptive_card
}
c = create_message_with_attachment(room_id, f"Hello {sender.firstName}, the latest weight of the bee hive is {temperature} degrees celsius", attachment)
return ""
def get_humidity(incoming_msg):
"""
A function to retrieve the latest temperature inside the bee hive
:param incoming_msg: The incoming message object from Teams
:return: A text or markdown based reply
"""
# Loopkup details about sender
sender = bot.teams.people.get(incoming_msg.personId)
room_id = incoming_msg.roomId
url = f"{BEEP_BASE_URl}/sensors/lastvalues?id={beehive_id}"
headers = {
'Authorization': f'Bearer {beep_api_token}'
}
response = requests.get(url, headers=headers)
response.raise_for_status()
relative_humidity = response.json()['h']
adaptive_card = get_measurement_card("Humidity", "relative humidity around the beehive", relative_humidity, "%RH")
attachment = {
"contentType": "application/vnd.microsoft.card.adaptive",
"content": adaptive_card
}
c = create_message_with_attachment(room_id, f"Hello {sender.firstName}, the latest measurement of the relative humidity is {relative_humidity} %RH", attachment)
return ""
def get_snapshot_base(incoming_msg, serial_number, msg="Snapshot of the beehive"):
# Loopkup details about sender
room_id = incoming_msg.roomId
session = requests.Session()
# Format message
headers = {
'content-type': 'application/json; charset=utf-8',
'authorization': f'Bearer {teams_token}'
}
payload = {
'roomId': room_id,
}
# Generating screenshot for latest time since when I selected a timestamp that was too close
# to real time the camera had not had a chance to store it and make it available for sending
print("About to generate snapshot with serial ",serial_number)
theScreenShotURL=generate_snapshot(serial_number, None, session)
print("theScreenShotURL=",theScreenShotURL)
file_url=theScreenShotURL
if file_url: # download/GET image from URL
temp_file = download_file(session, merak_serial_front, file_url)
if temp_file:
send_file(session, headers, payload, msg, temp_file, file_type='image/jpg')
return ""
else:
return 'snapshot unsuccessfully retrieved'
def get_snapshot_front(incoming_msg):
"""
A function to generate a snapshot of the camera facing the front of the bee hive
:param incoming_msg: The incoming message object from Teams
:return: A text or markdown based reply
"""
return get_snapshot_base(incoming_msg, merak_serial_front, "Snapshot of the front of the beehive")
def get_snapshot_side(incoming_msg):
"""
A function to generate a snapshot of the camera facing the side of the bee hive
:param incoming_msg: The incoming message object from Teams
:return: A text or markdown based reply
"""
return get_snapshot_base(incoming_msg, meraki_serial_side, "Snapshot of the side of the beehive")
def get_snapshot_distance(incoming_msg):
"""
A function to generate a snapshot of the camera facing the side of the bee hive
:param incoming_msg: The incoming message object from Teams
:return: A text or markdown based reply
"""
return get_snapshot_base(incoming_msg, meraki_serial_distance, "Snapshot of beehive in the distance")
def handle_cards(api, incoming_msg):
m = get_attachment_actions(incoming_msg["data"]["id"])
print(json.dumps(incoming_msg, indent=2))
sender = bot.teams.people.get(incoming_msg["data"]["personId"])
name = sender.firstName
print(m)
if "confirm_remote_observation" in m["inputs"]:
#delete previous message
api.messages.delete(messageId=m["messageId"])
attachment = {
"contentType": "application/vnd.microsoft.card.adaptive",
"content": get_confirm_viewing_card(name)
}
c = create_message_with_attachment(room_id, "Remote observance has been confirmed", attachment)
return ""
elif "refuse_remote_observation" in m["inputs"]:
#delete previous message
api.messages.delete(messageId=m["messageId"])
attachment = {
"contentType": "application/vnd.microsoft.card.adaptive",
"content": get_refuse_viewing_card(name)
}
c = create_message_with_attachment(room_id, "Remote observance has been refused", attachment)
return ""
elif "end_remote_observation" in m["inputs"]:
#delete previous message
api.messages.delete(messageId=m["messageId"])
attachment = {
"contentType": "application/vnd.microsoft.card.adaptive",
"content": observance_ended_card
}
c = create_message_with_attachment(room_id, "Remote observance has ended", attachment)
return ""
return "Could not find the right action"
def get_attachment_actions(attachmentid):
headers = {
"content-type": "application/json; charset=utf-8",
"authorization": "Bearer " + teams_token,
}
url = "{WEBEX_BASE_URl}/attachment/actions/" + attachmentid
response = requests.get(url, headers=headers)
return response.json()
bot.add_new_url("/meraki_webhook", "meraki_webhook", handle_meraki_webhook)
# Set the bot greeting.
bot.set_greeting(greeting)
# Add new commands to the bot.
bot.add_command("attachmentActions", "*", handle_cards)
bot.add_command("/weight", "Get the latest weight of the bee hive", get_weight)
bot.add_command("/temp_outside", "Get the latest temperature around the bee hive", | |
% (basename,
' '.join(sorted(project_names))))
else:
raise BadArgsError(
'No such Project "%s". There are no Projects in the current Folder.'
% basename)
def _LookupFolder(state, argument):
"""Returns the specified Folder and its parent Container.
Args:
state: State
argument: basestring
Returns:
Folder
Raises:
BadArgsError
NoSuchContainerError
"""
try:
the_uid = lexer.ParseSyntaxForUID(argument)
except lexer.Error as e:
raise BadArgsError(e)
if the_uid is not None:
x = state.ToDoList().FolderByUID(the_uid)
if x is None:
raise NoSuchContainerError('No Folder exists with UID %s' % the_uid)
the_folder, unused_path = x
return the_folder
try:
dirname = state.DirName(argument)
basename = state.BaseName(argument)
if not basename:
# For Containers, it'd also be fine to ignore a trailing slash.
raise BadArgsError('Unexpected trailing "%s"' % FLAGS.pyatdl_separator)
except state_module.InvalidPathError as e:
raise BadArgsError(e)
containr = state.GetContainerFromPath(dirname)
folder_names = []
for item in containr.items:
if isinstance(item, folder.Folder):
folder_names.append(item.name)
if item.name == basename:
return item
if folder_names:
raise NoSuchContainerError(
'No such Folder "%s". Choices: %s'
% (basename,
' '.join(sorted(folder_names))))
else:
raise NoSuchContainerError(
'No such Folder "%s". There are no Folders within the specified Folder.'
% basename)
def _LookupAction(state, argument):
"""Returns the specified Action and its containing Prj.
Args:
state: State
argument: basestring
Returns:
(Action, Prj)
Raises:
BadArgsError
NoSuchContainerError # TODO(chandler37): This is a misnomer; NoSuchItemError?
"""
try:
the_uid = lexer.ParseSyntaxForUID(argument)
except lexer.Error as e:
raise BadArgsError(e)
if the_uid is not None:
x = state.ToDoList().ActionByUID(the_uid) # None if the_uid is invalid
if x is None:
raise NoSuchContainerError('No Action with UID %s exists.' % the_uid)
return x
try:
dirname = state.DirName(argument)
basename = state.BaseName(argument)
if not basename:
raise BadArgsError('Unexpected trailing "%s"' % FLAGS.pyatdl_separator)
except state_module.Error as e:
raise BadArgsError(e)
try:
containr = state.GetContainerFromPath(dirname)
except state_module.Error as e:
raise BadArgsError(e)
if not isinstance(containr, prj.Prj):
raise BadArgsError(
'This command only makes sense inside a Project, not inside "%s". See "help pwd".'
% (containr.name if containr.name else FLAGS.pyatdl_separator,))
action_names = []
for item in containr.items:
assert isinstance(item, action.Action), str(item)
action_names.append(item.name)
if the_uid == item.uid or item.name == basename:
return item, containr
if action_names:
raise BadArgsError(
'No such Action "%s". Choices: %s'
% (basename,
' '.join(sorted(action_names))))
else:
raise BadArgsError(
'No such Action "%s". There are no Actions in the current Project.'
% basename)
def _LookupContext(state, argument):
"""Returns the specified Ctx.
Args:
state: State
argument: basestring
Returns:
None|Ctx
Raises:
BadArgsError
"""
# The Django UI uses UID 0 to mean <none> a.k.a. "Actions Without Context"
if argument == 'uid=0' or argument == FLAGS.no_context_display_string:
return None
try:
the_uid = lexer.ParseSyntaxForUID(argument)
except lexer.Error as e:
raise BadArgsError(e)
if the_uid is not None:
return state.ToDoList().ContextByUID(the_uid) # None if the_uid is invalid
return state.ToDoList().ContextByName(argument)
def _ExecuteUICmd(the_state, argv, generate_undo_info=True):
"""Executes a UICmd. Assumes it will not have an error.
Args:
the_state: State
argv: [str]
generate_undo_info: bool
Returns:
None
Raises:
Error
"""
try:
try:
APP_NAMESPACE.FindCmdAndExecute(
the_state, argv, generate_undo_info=generate_undo_info)
except AssertionError as e:
raise AssertionError('argv=%s err=%s' % (argv, str(e)))
except (appcommandsutil.CmdNotFoundError,
appcommandsutil.InvalidUsageError,
appcommandsutil.IncorrectUsageError) as e:
raise AssertionError('argv=%s error=%s' % (argv, str(e)))
class UICmd(appcommands.Cmd): # pylint: disable=too-few-public-methods
"""Superclass for all UI commands."""
@staticmethod
def RaiseUnlessNArgumentsGiven(n, args):
"""Raises an exception unless the correct number of arguments was given.
Args:
n: int
args: [str]
Raises:
BadArgsError # TODO(chandler): Why not app.UsageError which prints
# full help? Should we abandon BadArgsError entirely,
# printing full help always?
"""
assert n >= 1
if len(args) != n + 1: # $0 isn't an argument
if len(args) < 2:
if n == 1:
raise BadArgsError('Needs a single positional argument; found none')
else:
raise BadArgsError('Needs %d positional arguments; found none' % n)
else:
if n == 1:
raise BadArgsError('Needs a single positional argument; found these: %s' % repr(args[1:]))
else:
raise BadArgsError('Needs %d positional arguments; found these: %s' % (n, repr(args[1:])))
@staticmethod
def RaiseIfAnyArgumentsGiven(args):
"""Raises an exception unless there are no positional args.
Args:
args: [str]
Raises:
BadArgsError
"""
if len(args) != 1: # $0 isn't an argument
raise BadArgsError(
'Takes no arguments; found these arguments: %s' % repr(args[1:]))
def IsUndoable(self): # pylint: disable=no-self-use
"""Returns True iff this command is a mutation.
Returns:
bool
"""
return False
# def Run(self, args):
# """Override."""
class UndoableUICmd(UICmd): # pylint: disable=too-few-public-methods
"""A command that mutates the to-do list.
It would confuse the User if we undid a read-only command like 'ls';
nothing would happen.
"""
def IsUndoable(self):
return True
class UICmdEcho(UICmd):
"""Echoes the arguments and prints a newline as the unix command echo(1) does.
This is helpful for documenting lists of commands.
"""
def __init__(self, name, flag_values, **kargs):
super().__init__(name, flag_values, **kargs)
flags.DEFINE_bool('stdout', False,
'For debugging, output directly to stdout.',
flag_values=flag_values)
def Run(self, args): # pylint: disable=missing-docstring,no-self-use
p = ' '.join(x for x in args[1:])
state = FLAGS.pyatdl_internal_state
if FLAGS.stdout:
print(p)
else:
state.Print(p)
class UICmdEcholines(UICmd):
"""Echoes the arguments, printing a newline after each.
This is helpful for testing argument processing.
"""
def Run(self, args): # pylint: disable=missing-docstring,no-self-use
state = FLAGS.pyatdl_internal_state
for x in args[1:]:
state.Print(x)
class UICmdChclock(UICmd):
"""Sets the system clock. Useful for unittests.
There are two forms:
chclock 1409712847.989031 # Absolute. Clock stops incrementing.
chclock +1 # Relative. Clock does not stop.
"""
def Run(self, args): # pylint: disable=missing-docstring,no-self-use
self.RaiseUnlessNArgumentsGiven(1, args)
arg = args[-1]
relative_not_absolute = False
if arg.startswith('+'):
relative_not_absolute = True
arg = arg[1:]
if arg.startswith('+'):
raise BadArgsError('A leading \'++\' makes no sense.')
assert arg, arg
try:
a_float = float(arg)
except ValueError:
raise BadArgsError(
'Needs a numeric argument, seconds since the epoch (1970 CE). To move '
'the clock relative to the old clock, prepend the argument with \'+\'. The argument: %s' % (repr(arg),))
if a_float < 0 and not relative_not_absolute:
raise BadArgsError('Minimum value is 0, a.k.a. 1970 CE.')
if relative_not_absolute:
old_time = time.time
def NewTime(): # pylint: disable=missing-docstring
return old_time() + a_float
time.time = NewTime
else:
def AbsoluteNewTime(): # pylint: disable=missing-docstring
return a_float
time.time = AbsoluteNewTime
class UICmdLs(UICmd):
"""Lists immediate contents of the current working Folder/Project (see "help pwd").
The 'view' command (see 'help view') controls which items are visible. 'ls -a'
ignores the view filter and shows all items, including '.', the working
directory, and '..', its parent.
The following timestamps are displayed when the '-l' argument is given:
* ctime: Time of creation
* mtime: Time of last modification
* dtime: Time of deletion
"""
def __init__(self, name, flag_values, **kargs):
super().__init__(name, flag_values, **kargs)
flags.DEFINE_bool('show_all', False,
'Additionally lists everything, even hidden objects, '
'overriding the view filter',
short_name='a', flag_values=flag_values)
flags.DEFINE_bool('recursive', False,
'Additionally lists subdirectories/subprojects recursively',
short_name='R', flag_values=flag_values)
flags.DEFINE_bool('show_timestamps', False,
'Additionally lists timestamps ctime, dtime, mtime',
short_name='l', flag_values=flag_values)
flags.DEFINE_enum('view_filter', None, sorted(view_filter.CLS_BY_UI_NAME),
'Instead of using the global view filter (see "help '
'view"), override it and use this view filter. Note: '
'this is ignored in --show_all mode',
short_name='v', flag_values=flag_values)
def Run(self, args): # pylint: disable=missing-docstring,no-self-use
state = FLAGS.pyatdl_internal_state
override = None
if FLAGS.view_filter:
override = state.NewViewFilter(
filter_cls=view_filter.CLS_BY_UI_NAME[FLAGS.view_filter])
def DoIt(obj, location): # pylint: disable=missing-docstring
_PerformLs(obj, location, state,
recursive=FLAGS.recursive, show_uid=FLAGS.pyatdl_show_uid,
show_all=FLAGS.show_all, show_timestamps=FLAGS.show_timestamps,
view_filter_override=override)
if len(args) == 1:
DoIt(state.CurrentWorkingContainer(), '.')
else:
for i, name in enumerate(args[1:]):
try:
dirname = state.DirName(name)
basename = state.BaseName(name)
if not basename and name != FLAGS.pyatdl_separator:
raise BadArgsError(
'Unexpected trailing "%s"; dirname=%s and basename=%s'
% (FLAGS.pyatdl_separator, dirname, basename))
obj = state.GetObjectFromPath(name)
except state_module.InvalidPathError as e:
raise BadArgsError(e)
if isinstance(obj, container.Container) and len(args) > 2:
state.Print('%s:' % name)
DoIt(obj, dirname)
if i < len(args) - 2:
state.Print('')
def _FindParentOf(state, obj):
"""Returns the Container that contains obj, or
state.ToDoList().root if obj is '/' or a Context.
Args:
obj: AuditableObject
Returns:
Container
"""
item = None
if isinstance(obj, ctx.Ctx):
return state.ToDoList().root
for (c, path) in state.ToDoList().ContainersPreorder():
if c.uid == obj.uid:
if path:
item = path[0]
else:
item = state.ToDoList().root
break
for subitem in c.items:
if subitem.uid == obj.uid:
item = c
break
if item is not None:
break
else:
raise AssertionError(
'Cannot happen. %s %s %s'
% (state.CurrentWorkingContainer().name, str(state.ToDoList().root),
obj.uid))
return item
def _PerformLs(current_obj, location, state, recursive, show_uid, show_all, # pylint: disable=too-many-arguments
show_timestamps, view_filter_override=None):
"""Performs 'ls'.
Args:
current_obj: AuditableObject
location: basestring
state: State
recursive: bool
show_uid: bool
show_all: bool
show_timestamps: bool
view_filter_override: None|ViewFilter
"""
if show_all and isinstance(current_obj, container.Container):
state.Print(_ListingForOneItem(
show_uid,
show_timestamps,
current_obj,
state.ToDoList(),
'.'))
state.Print(_ListingForOneItem(
show_uid,
show_timestamps,
| |
tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/accounts/password/change".format(api_version,
tenant_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def pathgroups(self, data, tenant_id=None, api_version="v2.1"):
"""
Create a Path Group for a tenant.
**Parameters:**:
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.1)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/pathgroups".format(api_version,
tenant_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def pathgroups_query(self, data, tenant_id=None, api_version="v2.1"):
"""
Queries db for limit number of network contexts that match query params.
**Parameters:**:
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.1)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/pathgroups/query".format(api_version,
tenant_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def policyrules(self, policyset_id, data, tenant_id=None, api_version="v3.1"):
"""
Create a new Policy
**Parameters:**:
- **policyset_id**: Policy Set ID
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v3.1)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/policysets/{}/policyrules".format(api_version,
tenant_id,
policyset_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def policyrules_query(self, data, tenant_id=None, api_version="v3.1"):
"""
Queries db for policyrules that match query params.
**Parameters:**:
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v3.1)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/policyrules/query".format(api_version,
tenant_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def policysets(self, data, tenant_id=None, api_version="v3.0"):
"""
Create a new Policy Set
**Parameters:**:
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v3.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/policysets".format(api_version,
tenant_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def policysets_bulk_config_state_query(self, data, tenant_id=None, api_version="v2.0"):
"""
Get all config/state info across all policysets from NB
**Parameters:**:
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/policysets/bulk_config_state/query".format(api_version,
tenant_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def policysets_query(self, data, tenant_id=None, api_version="v3.0"):
"""
Queries db for policysets that match query params.
**Parameters:**:
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v3.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/policysets/query".format(api_version,
tenant_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def prefixfilters(self, site_id, data, tenant_id=None, api_version="v2.0"):
"""
Create an association between site and security prefix filter.
**Parameters:**:
- **site_id**: Site ID
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/sites/{}/prefixfilters".format(api_version,
tenant_id,
site_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def prefixfilters_query(self, site_id, data, tenant_id=None, api_version="v2.0"):
"""
Query security prefix filter for NB API.
**Parameters:**:
- **site_id**: Site ID
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/sites/{}/prefixfilters/query".format(api_version,
tenant_id,
site_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def prioritypolicyglobalprefixes(self, data, tenant_id=None, api_version="v2.0"):
"""
Create a new global prefix.
**Parameters:**:
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/prioritypolicyglobalprefixes".format(api_version,
tenant_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def prioritypolicyglobalprefixes_query(self, data, tenant_id=None, api_version="v2.0"):
"""
Query Priority Global Prefixes.
**Parameters:**:
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/prioritypolicyglobalprefixes/query".format(api_version,
tenant_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def prioritypolicylocalprefixes_query(self, data, tenant_id=None, api_version="v2.0"):
"""
Query site priority prefix association.
**Parameters:**:
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/prioritypolicylocalprefixes/query".format(api_version,
tenant_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data)
def prioritypolicyrules(self, prioritypolicyset_id, data, tenant_id=None, api_version="v2.0"):
"""
Create a new PriorityPolicyRule
**Parameters:**:
- **prioritypolicyset_id**: Priority Policy Set ID
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set | |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import numpy as np
import pandas as pd
import random
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras.layers import Dense,Flatten,GlobalAveragePooling2D,Input,Lambda
from tensorflow.keras.models import Model,load_model
import tensorflow.keras.backend as K
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications.vgg16 import preprocess_input
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical
from sklearn.metrics import accuracy_score,confusion_matrix
from skimage.color import rgb2gray
import cv2
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing import image
# In[110]:
def brute_vgg16():
(x_train, y_train_without_one_hot), (x_test, y_test_without_one_hot) = tf.keras.datasets.cifar10.load_data()
y_train = to_categorical(y_train_without_one_hot)
y_test = to_categorical(y_test_without_one_hot)
x_train,x_valid,y_train,y_valid = train_test_split(x_train,y_train,test_size = 0.2,shuffle=True,random_state = 42)
print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
print(y_test.shape)
print(x_valid.shape)
print(y_valid.shape)
vgg16 = VGG16(include_top=True,weights = None, input_shape = (32,32,3))
out = Dense(10,activation='softmax',name = 'fc3')(vgg16.get_layer('fc2').output)
brute_model = Model(inputs = vgg16.input,outputs = out)
epochs = 20
learning_rate = 0.1
decay_rate = learning_rate/epochs
sgd = tf.keras.optimizers.SGD(lr=learning_rate, decay=decay_rate, momentum=0.9, nesterov=False)
brute_model.compile(loss = 'categorical_crossentropy',optimizer = 'sgd',metrics=['accuracy']) #tf.keras.optimizers.Adam(learning_rate=0.0001)
history = brute_model.fit(x_train, y_train, validation_data=(x_valid, y_valid), epochs=epochs, batch_size=16)
brute_model.save("vgg16_cifar10")
y_pred_train = brute_model.predict(x_train)
predictions_train = np.argmax(y_pred_train,axis=1)
print("training accuracy:",accuracy_score(np.argmax(y_train,axis=1),predictions_train))
y_pred_test = brute_model.predict(x_test)
prediction_test = np.argmax(y_pred_test,axis=1)
print("test accuracy:",accuracy_score(np.argmax(y_test,axis=1),prediction_test))
# plot loss during training
plt.subplot(211)
plt.title('Loss')
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
# plot accuracy during training
plt.subplot(212)
plt.title('Accuracy')
plt.plot(history.history['accuracy'], label='train')
plt.plot(history.history['val_accuracy'], label='test')
plt.legend()
plt.show()
return brute_model
# In[150]:
def test_brute_model_on_gray_scale_test_images(brute_model):
(x_train, y_train_without_one_hot), (x_test, y_test_without_one_hot) = tf.keras.datasets.cifar10.load_data()
y_test = to_categorical(y_test_without_one_hot)
def gray_images(x_test):
gray_x_test = []
for i in x_test:
gray_scale = rgb2gray(i)
gray_x_test.append(np.dstack((gray_scale,gray_scale,gray_scale)))
gray_x_test = np.array(gray_x_test)
print(gray_x_test.shape)
return gray_x_test
gray_x_test = gray_images(x_test)
y_pred_test = brute_model.predict(gray_x_test)
prediction_test = np.argmax(y_pred_test,axis=1)
print("test accuracy:",accuracy_score(np.argmax(y_test,axis=1),prediction_test)*100,"%")
print("gray scale confusion matrix:\n",confusion_matrix(np.argmax(y_test,axis=1),prediction_test))
# In[112]:
def class_wise_accuracy(models):
labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
(x_train, y_train_without_one_hot), (x_test, y_test_without_one_hot) = tf.keras.datasets.cifar10.load_data()
y_test = to_categorical(y_test_without_one_hot)
y_pred_test = models.predict(x_test)
prediction_test = np.argmax(y_pred_test,axis=1)
print("test accuracy:",accuracy_score(np.argmax(y_test,axis=1),prediction_test)*100,"%")
confus_matrix = confusion_matrix(np.argmax(y_test,axis=1),prediction_test)
print("confusion_matrix:\n",confus_matrix)
class_accuracy = []
class_TP = []
for i in range(confus_matrix.shape[0]):
for j in range(confus_matrix.shape[1]):
if i == j:
TP = confus_matrix[i][j]
class_TP.append(TP)
for k in range(confus_matrix.shape[1]):
ca = (class_TP[k] / confus_matrix[:,k].sum())*100
class_accuracy.append(ca)
print("class ",k," accuracy ",labels[k]," :",ca,"%")
class_accuracy = np.array(class_accuracy)
return class_accuracy
# In[113]:
def bias_metrics(class_accuracy,models):
dob = np.std(class_accuracy)
print("Degree of Bias:",dob)
(x_train, y_train_without_one_hot), (x_test, y_test_without_one_hot) = tf.keras.datasets.cifar10.load_data()
y_pred_test = models.predict(x_test)
prediction_test = np.argmax(y_pred_test,axis=1)
from sklearn.metrics import confusion_matrix
confuse_matrix = confusion_matrix(y_test_without_one_hot, prediction_test)
print("confusion_matrix:\n",confuse_matrix)
FP = confuse_matrix.sum(axis=0) - np.diag(confuse_matrix)
FN = confuse_matrix.sum(axis=1) - np.diag(confuse_matrix)
TP = np.diag(confuse_matrix)
TN = confuse_matrix.sum() - (FP+FN+TP)
FP=FP.astype(float)
TP=TP.astype(float)
FN=FN.astype(float)
TN=TN.astype(float)
FNR = FN/(TP+FN)
FPR = FP/(TN+FN)
print("FPR:",FPR)
print("FNR:",FNR)
AFR = ((FPR.sum()/10)+(FNR.sum()/10))/2
print("AFR:",AFR)
# In[151]:
test_brute_model_on_gray_scale_test_images(brute_model)
# In[115]:
#brute model
print("/nbrute model/n")
brute_model = brute_vgg16()
test_brute_model_on_gray_scale_test_images(brute_model)
class_accuracy_brute_model = class_wise_accuracy(brute_model)
bias_metrics(class_accuracy_brute_model,brute_model)
# In[40]:
def create_results(brute_model):
(x_train, y_train_without_one_hot), (x_test, y_test_without_one_hot) = tf.keras.datasets.cifar10.load_data()
y_train = to_categorical(y_train_without_one_hot)
y_test = to_categorical(y_test_without_one_hot)
y_pred_test = brute_model.predict(x_test)
prediction_test = np.argmax(y_pred_test,axis=1)
df = pd.DataFrame(np.hstack((y_test_without_one_hot,prediction_test.reshape(len(prediction_test),1))),columns=['y_test','y_test_pred'],index=None)
print(df.head())
df.to_csv("y_test_prediction_test.csv",index=False)
correct_idxes = []
incorrect_idxes = []
for i in range(len(prediction_test)):
if y_test_without_one_hot[i] == prediction_test[i]:
correct_idxes.append(i)
elif y_test_without_one_hot[i] != prediction_test[i]:
incorrect_idxes.append(i)
cv2.imwrite("correct"+str(int(y_test_without_one_hot[correct_idxes[0]][0]))+".jpg",x_test[correct_idxes[0]])
cv2.imwrite("correct"+str(int(y_test_without_one_hot[correct_idxes[1]][0]))+".jpg",x_test[correct_idxes[1]])
cv2.imwrite("incorrect"+str(int(y_test_without_one_hot[incorrect_idxes[0]][0]))+".jpg",x_test[incorrect_idxes[0]])
cv2.imwrite("incorrect"+str(int(y_test_without_one_hot[incorrect_idxes[1]][0]))+".jpg",x_test[incorrect_idxes[1]])
# In[68]:
class GradCAM:
def __init__(self, model, classIdx, layerName=None):
self.model = model
self.classIdx = classIdx
self.layerName = layerName
if self.layerName is None:
self.layerName = self.find_target_layer()
def find_target_layer(self):
for layer in reversed(self.model.layers):
if len(layer.output_shape) == 4:
return layer.name
raise ValueError("Could not find 4D layer. Cannot apply GradCAM.")
def compute_heatmap(self, image, eps=1e-8):
gradModel = Model(inputs=[self.model.inputs],outputs=[self.model.get_layer(self.layerName).output, self.model.output])
with tf.GradientTape() as tape:
inputs = tf.cast(image, tf.float32)
(convOutputs, predictions) = gradModel(inputs)
loss = predictions[:, tf.argmax(predictions[0])]
grads = tape.gradient(loss, convOutputs)
castConvOutputs = tf.cast(convOutputs > 0, "float32")
castGrads = tf.cast(grads > 0, "float32")
guidedGrads = castConvOutputs * castGrads * grads
convOutputs = convOutputs[0]
guidedGrads = guidedGrads[0]
weights = tf.reduce_mean(guidedGrads, axis=(0, 1))
cam = tf.reduce_sum(tf.multiply(weights, convOutputs), axis=-1)
(w, h) = (image.shape[2], image.shape[1])
heatmap = cv2.resize(cam.numpy(), (w, h))
numer = heatmap - np.min(heatmap)
denom = (heatmap.max() - heatmap.min()) + eps
heatmap = numer / denom
heatmap = (heatmap * 255).astype("uint8")
return heatmap
def overlay_heatmap(self, heatmap, image, alpha=0.5,colormap=cv2.COLORMAP_VIRIDIS):
heatmap = cv2.applyColorMap(heatmap, colormap)
output = cv2.addWeighted(image, alpha, heatmap, 1 - alpha, 0)
return (heatmap, output)
def make_gradCAM(img_path,brute_model,classified,layer_name="block5_conv3"):
image = cv2.imread(img_path)
image = cv2.resize(image, (32, 32))
image = np.expand_dims(image, axis=0)
preds = brute_model.predict(image)
i = np.argmax(preds[0])
icam = GradCAM(brute_model, i,layer_name)
heatmap = icam.compute_heatmap(image)
heatmap = cv2.resize(heatmap, (32, 32))
image = cv2.imread(img_path)
image = cv2.resize(image, (32, 32))
(heatmap, output) = icam.overlay_heatmap(heatmap, image, alpha=0.5)
fig, ax = plt.subplots(1, 3)
ax[0].imshow(heatmap)
ax[1].imshow(image)
ax[2].imshow(output)
plt.savefig("GradCAM_"+ str(classified)+str(img_path[-5])+".jpg")
plt.show()
plt.close()
layer_names = ["block5_conv3","block4_conv2"]
for l in layer_names:
print("layer name:",l)
make_gradCAM("/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q2/correct_actual7pred7.jpg",brute_model,classified="correct",layer_name=l)
make_gradCAM("/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q2/correct_actual8pred8.jpg",brute_model,classified="correct",layer_name=l)
make_gradCAM("/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q2/incorrect_actual3pred0.jpg",brute_model,classified="incorrect",layer_name=l)
make_gradCAM("/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q2/incorrect_actual5pred4.jpg",brute_model,classified="incorrect",layer_name =l)
# In[161]:
def grad_cam_pp(model, img,layer_name="block5_conv3", label_name=None,category_id=None):
img_tensor = np.expand_dims(img, axis=0)
conv_layer = model.get_layer(layer_name)
heatmap_model = Model([model.inputs], [conv_layer.output, model.output])
with tf.GradientTape() as gtape1:
with tf.GradientTape() as gtape2:
with tf.GradientTape() as gtape3:
conv_output, predictions = heatmap_model(img_tensor)
if category_id==None:
category_id = np.argmax(predictions[0])
output = predictions[:, category_id]
conv_first_grad = gtape3.gradient(output, conv_output)
conv_second_grad = gtape2.gradient(conv_first_grad, conv_output)
conv_third_grad = gtape1.gradient(conv_second_grad, conv_output)
global_sum = np.sum(conv_output, axis=(0, 1, 2))
alpha_num = conv_second_grad[0]
alpha_denom = conv_second_grad[0]*2.0 + conv_third_grad[0]*global_sum
alpha_denom = np.where(alpha_denom != 0.0, alpha_denom, 1e-10)
alphas = alpha_num/alpha_denom
alpha_normalization_constant = np.sum(alphas, axis=(0,1))
alphas /= alpha_normalization_constant
weights = np.maximum(conv_first_grad[0], 0.0)
deep_linearization_weights = np.sum(weights*alphas, axis=(0,1))
grad_CAM_map = np.sum(deep_linearization_weights*conv_output[0], axis=2)
heatmap = np.maximum(grad_CAM_map, 0)
max_heat = np.max(heatmap)
if max_heat == 0:
max_heat = 1e-10
heatmap /= max_heat
return heatmap
def superimpose(img, cam):
heatmap = cv2.resize(cam, (img.shape[1], img.shape[0]))
heatmap = np.uint8(255 * heatmap)
heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
heatmap = cv2.cvtColor(heatmap,cv2.COLOR_BGR2RGB)
superimposed_img = heatmap * .5 + img * .5
superimposed_img = np.minimum(superimposed_img, 255.0).astype(np.uint8)
return img, heatmap, superimposed_img
def plot(img,cam):
img = cv2.resize(img, (32, 32))
img, heatmap, superimposed_img = superimpose(img, cam)
fig, axs = plt.subplots(ncols=3, figsize=(9, 4))
axs[0].imshow(img)
axs[0].set_title('original image')
axs[0].axis('off')
axs[1].imshow(heatmap)
axs[1].set_title('heatmap')
axs[1].axis('off')
axs[2].imshow(superimposed_img)
axs[2].set_title('superimposed image')
axs[2].axis('off')
plt.show()
plt.close()
layer_names = ["block5_conv3","block4_conv2"]
for l in layer_names:
print("layer name:",l)
img_path1 = "/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q2/correct_actual7pred7.jpg"
img_path2 = "/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q2/correct_actual8pred8.jpg"
img_path3 = "/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q2/incorrect_actual3pred0.jpg"
img_path4 = "/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q2/incorrect_actual5pred4.jpg"
img1 = cv2.imread(img_path1)
cam1 = grad_cam_pp(brute_model, img1,layer_name=l, label_name=labels,category_id=int(img_path1[-5]))
plot(img1,cam1)
img2 = cv2.imread(img_path2)
cam2 = grad_cam_pp(brute_model, img2,layer_name=l, label_name=labels,category_id=int(img_path2[-5]))
plot(img2,cam2)
img3 = cv2.imread(img_path3)
cam3 = grad_cam_pp(brute_model, img3,layer_name=l, label_name=labels,category_id=int(img_path3[-5]))
plot(img3,cam3)
img4 = cv2.imread(img_path4)
cam4 = grad_cam_pp(brute_model, img4,layer_name=l, label_name=labels,category_id=int(img_path4[-5]))
plot(img4,cam4)
# In[162]:
def preprocessed_data_model():
(x_train, y_train_without_one_hot), (x_test, y_test_without_one_hot) = tf.keras.datasets.cifar10.load_data()
y_train = to_categorical(y_train_without_one_hot)
y_test = to_categorical(y_test_without_one_hot)
x_train,x_valid,y_train,y_valid = train_test_split(x_train,y_train,test_size = 0.2,shuffle=True,random_state = 42)
print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
print(y_test.shape)
print(x_valid.shape)
print(y_valid.shape)
train_datagen = ImageDataGenerator(featurewise_center=True,featurewise_std_normalization=True,horizontal_flip=True, rotation_range=20)
train_datagen.fit(x_train)
valid_datagen = ImageDataGenerator(featurewise_center=True,featurewise_std_normalization=True)
valid_datagen.fit(x_valid)
test_datagen = ImageDataGenerator(featurewise_center=True,featurewise_std_normalization=True)
test_datagen.fit(x_test)
train_generator = train_datagen.flow(x_train, y_train, batch_size=16)
valid_generator = valid_datagen.flow(x_valid, y_valid, batch_size=16)
test_generator = test_datagen.flow(x_test, y_test, batch_size=16)
vgg16 = VGG16(include_top=True,weights = None, input_shape = (32,32,3))
out = Dense(10,activation='softmax',name = 'fc3')(vgg16.get_layer('fc2').output)
preprocessed_model = Model(inputs = vgg16.input,outputs = out)
epochs = 20
learning_rate = 0.1
decay_rate = learning_rate/epochs
sgd = tf.keras.optimizers.SGD(lr=learning_rate, decay=decay_rate, momentum=0.9, nesterov=False)
preprocessed_model.compile(loss = 'categorical_crossentropy',optimizer = 'sgd',metrics=['accuracy'])
history = preprocessed_model.fit(x=train_generator,steps_per_epoch=len(train_generator),validation_data=valid_generator,validation_steps=len(valid_generator),epochs=epochs)
# model evaluation
_, test_accuracy = preprocessed_model.evaluate_generator(test_generator, steps=len(test_generator),verbose=0)
print("test accuracy:",test_accuracy)
train_datagen = ImageDataGenerator(featurewise_center=True,featurewise_std_normalization=True,horizontal_flip=True, rotation_range=20)
train_datagen.fit(x_train)
test_datagen = ImageDataGenerator(featurewise_center=True,featurewise_std_normalization=True)
test_datagen.fit(x_test)
train_generator = train_datagen.flow(x_train, y_train, batch_size=16)
test_generator = test_datagen.flow(x_test, y_test, batch_size=16)
_, train_accuracy = preprocessed_model.evaluate_generator(train_generator, steps=len(train_generator),verbose=0)
print("train accuracy:",train_accuracy)
y_pred_test = preprocessed_model.predict(x=test_generator, steps=len(test_generator))
predictions_test = np.argmax(y_pred_test, axis=1)
preprocessed_model.save("vgg16_cifar10_preprocessed_rot_new")
plt.subplot(211)
plt.title('Loss')
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
plt.savefig("loss_preprocess_flip_rot.png")
plt.close()
plt.subplot(212)
plt.title('Accuracy')
plt.plot(history.history['accuracy'], label='train')
plt.plot(history.history['val_accuracy'], label='test')
plt.legend()
plt.savefig("accuracy_preprocess_flip_rot.png")
plt.close()
return preprocessed_model
# In[38]:
def preprocess_helper():
(x_train, y_train_without_one_hot), (x_test, y_test_without_one_hot) = tf.keras.datasets.cifar10.load_data()
y_test = to_categorical(y_test_without_one_hot)
test_datagen = ImageDataGenerator(featurewise_center=True,featurewise_std_normalization=True)
test_datagen.fit(x_test)
test_generator = test_datagen.flow(x_test, y_test, batch_size=16)
x_test_preprocessed = []
y_test_preprocessed = []
for i in range(len(test_generator)):
for img in test_generator[i][0]:
x_test_preprocessed.append(img)
for lb in test_generator[i][1]:
y_test_preprocessed.append(lb)
x_test_preprocessed = np.array(x_test_preprocessed)
y_test_preprocessed = np.array(y_test_preprocessed)
return x_test_preprocessed,y_test_preprocessed
def class_wise_accuracy_preprocess(models,x_test,y_test):
labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
y_test_without_one_hot = np.argmax(y_test,axis=1)
y_pred_test = models.predict(x_test)
prediction_test = np.argmax(y_pred_test,axis=1)
print("test accuracy:",accuracy_score(np.argmax(y_test,axis=1),prediction_test)*100,"%")
confus_matrix = confusion_matrix(np.argmax(y_test,axis=1),prediction_test)
print("confusion_matrix:\n",confus_matrix)
class_accuracy = []
class_TP = []
for i in range(confus_matrix.shape[0]):
for j in range(confus_matrix.shape[1]):
if i == j:
TP = confus_matrix[i][j]
class_TP.append(TP)
for k in range(confus_matrix.shape[1]):
ca = (class_TP[k] / confus_matrix[:,k].sum())*100
class_accuracy.append(ca)
print("class ",k," accuracy ",labels[k]," :",ca,"%")
class_accuracy = np.array(class_accuracy)
return class_accuracy
def bias_metrics_preprocess(class_accuracy,models,x_test,y_test):
dob = np.std(class_accuracy)
print("Degree of Bias:",dob)
y_test_without_one_hot = np.argmax(y_test,axis=1)
y_pred_test = models.predict(x_test)
prediction_test = np.argmax(y_pred_test,axis=1)
from sklearn.metrics import confusion_matrix
confuse_matrix = confusion_matrix(y_test_without_one_hot, prediction_test)
print("confusion_matrix:\n",confuse_matrix)
FP = confuse_matrix.sum(axis=0) - np.diag(confuse_matrix)
FN = confuse_matrix.sum(axis=1) - np.diag(confuse_matrix)
TP = np.diag(confuse_matrix)
TN = confuse_matrix.sum() - (FP+FN+TP)
FP=FP.astype(float)
TP=TP.astype(float)
FN=FN.astype(float)
| |
from __future__ import print_function, division
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from train_steering_wheel.train import (
MODEL_HEIGHT as CNN_MODEL_HEIGHT,
MODEL_HEIGHT as CNN_MODEL_WIDTH,
ANGLE_BIN_SIZE as CNN_ANGLE_BIN_SIZE,
extract_steering_wheel_image as cnn_extract_steering_wheel_image,
downscale_image as cnn_downscale_image
)
from train_steering_wheel import models as cnn_models
from lib import util
from config import Config
from scipy import misc, ndimage
import cv2
import imgaug as ia
import numpy as np
from skimage.transform import hough_line, hough_line_peaks
from skimage import filters
from skimage import morphology
import math
import torch
#cv2.namedWindow("thresh", cv2.WINDOW_NORMAL)
STEERING_WHEEL_TRACKER_CNN_FP = os.path.join(Config.MAIN_DIR, "train_steering_wheel/steering_wheel.tar")
class SteeringWheelTrackerCNN(object):
def __init__(self, default_angle=0):
assert os.path.isfile(STEERING_WHEEL_TRACKER_CNN_FP)
checkpoint = torch.load(STEERING_WHEEL_TRACKER_CNN_FP)
self.model = cnn_models.SteeringWheelTrackerCNNModel()
self.model.load_state_dict(checkpoint["tracker_cnn_state_dict"])
self.model.cuda(Config.GPU)
self.model.eval()
self.default_angle = default_angle
self.reset()
def reset(self):
self.last_match = None
self.last_angle = self.default_angle
self.last_angle_raw = self.default_angle
self.overflow_degrees = 45
self.overflow_max_count = 3
self.overflow_counter = 0
def estimate_angle(self, image):
#from scipy import misc
subimg = cnn_extract_steering_wheel_image(image)
#misc.imshow(subimg)
subimg = cnn_downscale_image(subimg)
#misc.imshow(subimg)
angle_raw_bins = self.model.forward_image(subimg, volatile=True, requires_grad=False, gpu=Config.GPU, softmax=True)
angle_raw_bins = angle_raw_bins.data[0].cpu().numpy()
angle_raw_bin = np.argmax(angle_raw_bins)
#print(angle_raw_bins.data.cpu().numpy())
"""
angle_raw_center = angle_raw_bin * CNN_ANGLE_BIN_SIZE + CNN_ANGLE_BIN_SIZE * 0.5 - 180
angle_raw_left = angle_raw_center - CNN_ANGLE_BIN_SIZE
angle_raw_right = angle_raw_center + CNN_ANGLE_BIN_SIZE
angle_raw_center_p = angle_raw_bins[angle_raw_bin]
angle_raw_left_p = angle_raw_bins[angle_raw_bin-1] if angle_raw_bin-1 > 0 else 0
angle_raw_right_p = angle_raw_bins[angle_raw_bin+1] if angle_raw_bin+1 < angle_raw_bins.shape[0] else 0
angle_raw = angle_raw_left_p * angle_raw_left + angle_raw_center_p * angle_raw_center + angle_raw_right_p * angle_raw_right
"""
angle_raw = angle_raw_bin * CNN_ANGLE_BIN_SIZE + CNN_ANGLE_BIN_SIZE * 0.5 - 180
#print(angle_raw)
possible_angles = [angle_raw]
if angle_raw < 0:
possible_angles.append(180+(180-abs(angle_raw)))
possible_angles.append(-360-abs(angle_raw))
if angle_raw > 0:
possible_angles.append(-180-(180-abs(angle_raw)))
possible_angles.append(360+abs(angle_raw))
possible_angles_dist = [(a, abs(self.last_angle - a)) for a in possible_angles]
possible_angles_dist_sort = sorted(possible_angles_dist, key=lambda t: t[1])
angle = possible_angles_dist_sort[0][0]
if angle > Config.STEERING_WHEEL_MAX:
angle = angle - 360
elif angle < Config.STEERING_WHEEL_MIN:
angle = angle + 360
if abs(angle - self.last_angle) >= self.overflow_degrees:
if self.overflow_counter >= self.overflow_max_count:
self.last_angle = angle
self.last_angle_raw = angle_raw
self.overflow_counter = 0
else:
angle = self.last_angle
angle_raw = self.last_angle_raw
self.overflow_counter += 1
else:
self.last_angle = angle
self.last_angle_raw = angle_raw
self.overflow_counter = 0
return angle, angle_raw
class SteeringWheelTracker(object):
def __init__(self, default_angle=0):
self.default_angle = default_angle
self.max_angle = Config.STEERING_WHEEL_MAX # wheel can be turned by roughly +/- 360+90 degrees, add 40deg tolerance
self.reset()
def reset(self):
self.last_match = None
self.last_angle = self.default_angle
self.last_angle_raw1 = self.default_angle
self.last_angle_raw2 = self.default_angle
self.overflow_degrees = 45
self.overflow_max_count = 3
self.overflow_counter = 0
def estimate_angle(self, image, visualize=False):
if visualize:
match, image_viz = estimate_by_last_match(image, last_match=self.last_match, visualize=visualize)
#if match is None and self.last_match is not None:
# match, image_viz = estimate_by_last_match(image, last_match=None, visualize=visualize)
else:
match = estimate_by_last_match(image, last_match=self.last_match, visualize=visualize)
#if match is None and self.last_match is not None:
# match = estimate_by_last_match(image, last_match=None, visualize=visualize)
if match is None:
#print("no match")
self.reset()
else:
v1 = np.float32([1, 0])
#print(match)
v2a = np.float32([
match["right_x"] - match["left_x"],
match["right_y"] - match["left_y"]
])
v2b = np.float32([
match["left_x"] - match["right_x"],
match["left_y"] - match["right_y"]
])
angle_raw1 = get_angle(v1, v2a)
angle_raw2 = get_angle(v1, v2b)
#print("early angle_raw1", angle_raw1, "angle_raw2", angle_raw2)
if angle_raw1 > 180:
angle_raw1 = -(360 - angle_raw1)
if angle_raw2 > 180:
angle_raw2 = -(360 - angle_raw2)
#distance_from_90 = (angle_raw1 % 90)
#p_flipped = 1 - (min(distance_from_90, 90-distance_from_90) / 45)
#flip_distance = min(abs(abs(angle_raw1) - 270), abs(abs(angle_raw1) - 90)) / 90
#maxp = 0.95
#p_flipped = np.clip(1 - flip_distance, 0, maxp)
# maxp and p_flipped is legacy stuff, can be removed
maxp = 0.95
p_flipped = maxp
possible_angles = [
(-360+angle_raw1, maxp),
(-360+angle_raw2, p_flipped),
(angle_raw1, maxp),
(angle_raw2, p_flipped),
(360+angle_raw1, maxp),
(360+angle_raw2, p_flipped),
]
possible_angles = [(r, p) for (r, p) in possible_angles if r < self.max_angle]
possible_angles_dist = [(poss, abs(poss - self.last_angle), p) for (poss, p) in possible_angles]
possible_angles_dist_sort = sorted(possible_angles_dist, key=lambda t: t[1]*(1-t[2]))
angle = possible_angles_dist_sort[0][0]
#print("angle_raw1 %.2f | angle_raw2 %.2f | after add %.2f | poss %s | poss sort %s" % (angle_raw1, angle_raw2, angle, str(possible_angles_dist), str(possible_angles_dist_sort)))
#print("after add", angle)
if abs(angle - self.last_angle) >= self.overflow_degrees:
if self.overflow_counter >= self.overflow_max_count:
self.last_match = match
self.last_angle = angle
self.last_angle_raw1 = angle_raw1
self.last_angle_raw2 = angle_raw2
self.overflow_counter = 0
else:
self.last_match = None
angle = self.last_angle
angle_raw1 = self.last_angle_raw1
angle_raw2 = self.last_angle_raw2
self.overflow_counter += 1
else:
self.last_match = match
self.last_angle = angle
self.last_angle_raw1 = angle_raw1
self.last_angle_raw2 = angle_raw2
self.overflow_counter = 0
if visualize:
return self.last_angle, (self.last_angle_raw1, self.last_angle_raw2), image_viz
else:
return self.last_angle, (self.last_angle_raw1, self.last_angle_raw2)
def get_angle(v1, v2):
v1_theta = math.atan2(v1[1], v1[0])
v2_theta = math.atan2(v2[1], v2[0])
r = (v2_theta - v1_theta) * (180.0 / math.pi)
if r < 0:
r += 360.0
return r
def estimate_by_last_match(image, last_match=None, visualize=False):
if last_match is None:
return estimate(image, visualize=visualize)
else:
#search_rect = (
# last_match["min_x"], last_match["min_y"],
# last_match["max_x"], last_match["max_y"]
#)
search_rect = None
#expected_position = (last_match["center_x"], last_match["center_y"])
expected_position = None
return estimate(image, expected_position=expected_position, search_rect=search_rect, visualize=visualize)
#@profile
def estimate(image, expected_position=None, search_rect=None, search_rect_border=0.05, expected_pixels=(10, 200), optimal_size=90, visualize=False):
downscale_factor = 1 # legacy stuff
h, w = image.shape[0:2]
if search_rect is not None:
x1, y1, x2, y2 = search_rect[0], search_rect[1], search_rect[2], search_rect[3]
if search_rect_border > 0:
clip = np.clip
bx = int(w * search_rect_border)
by = int(h * search_rect_border)
x1 = clip(x1 - bx, 0, w-2)
y1 = clip(y1 - by, 0, h-2)
x2 = clip(x2 + bx, x1, w-1)
y2 = clip(y2 + by, y1, h-1)
else:
# full wheel: x1=440, x2=870, y1=440, y2=720
# wheel w=440, h=270
x1 = int(w * (480/1280))
x2 = int(w * (830/1280))
y1 = int(h * (520/720))
y2 = int(h * (720/720))
rect_h = y2 - y1
rect_w = x2 - x1
if expected_position is None:
expected_position = (
int(w * (646/1280)),
int(h * (684/720))
)
img_wheel = image[y1:y2+1, x1:x2+1, :]
img_wheel_rs = img_wheel
img_wheel_rsy = cv2.cvtColor(img_wheel_rs, cv2.COLOR_RGB2GRAY)
expected_position_rs = (
int((expected_position[0]-x1) * downscale_factor),
int((expected_position[1]-y1) * downscale_factor)
)
#thresh_mask = filters.threshold_li(img_wheel_rsy)
thresh_mask = filters.threshold_isodata(img_wheel_rsy)
thresh = img_wheel_rsy > thresh_mask #40
#cv2.imshow("thresh", thresh.astype(np.uint8)*255)
#cv2.waitKey(10)
thresh = morphology.binary_dilation(thresh, morphology.square(3))
img_labeled, num_labels = morphology.label(
thresh, background=0, connectivity=1, return_num=True
)
segments = []
for label in range(1, num_labels+1):
img_seg = (img_labeled == label)
(yy, xx) = np.nonzero(img_seg)
# size of correct segment is around 60 pixels without dilation and 90 with dilation
# position is at around x=21, y=13
# (both numbers for screenshots after jpg-compression/decompression at 1/4 the original
# size, i.e. 1280/4 x 720/4)
if expected_pixels[0] <= len(yy) <= expected_pixels[1]:
center_x = np.average(xx)
center_y = np.average(yy)
# euclidean distance to expected position
# segments which's center is at the expected position get a 0
# segments which a further away get higher values
dist_pos = 0.1 * math.sqrt((center_x - expected_position_rs[0]) ** 2 + (center_y - expected_position_rs[1])**2)
# distance to optimal size (number of pixels)
# segments that have the same number of pixels as the expected size
# get a 0, segments with 50pecent more/less pixels get a 0
dist_size = np.clip(
1/(optimal_size*0.5) * abs(len(yy) - optimal_size),
0, 1
)
dist = dist_pos + dist_size
segments.append({
"xx": xx,
"yy": yy,
"center_x": center_x,
"center_y": center_y,
"dist_pos": dist_pos,
"dist_size": dist_size,
"dist": dist,
"img_seg": img_seg
})
if len(segments) == 0:
return (None, None) if visualize else None
segments = sorted(segments, key=lambda d: d["dist"])
best_match = segments[0]
xx = x1 + (best_match["xx"].astype(np.float32) * (1/downscale_factor)).astype(np.int32)
yy = y1 + (best_match["yy"].astype(np.float32) * (1/downscale_factor)).astype(np.int32)
image_segment = best_match["img_seg"]
image_segment = morphology.binary_erosion(image_segment, morphology.square(3))
cx, cy = int(best_match["center_x"]), int(best_match["center_y"])
sy, sx = 10, 10
hx1 = np.clip(cx - sx, 0, image_segment.shape[1])
hx2 = np.clip(cx + sx + 1, 0, image_segment.shape[1])
hy1 = np.clip(cy - sy, 0, image_segment.shape[0])
hy2 = np.clip(cy + sy + 1, 0, image_segment.shape[0])
hough_segment = image_segment[hy1:hy2, hx1:hx2]
h, theta, d = hough_line(hough_segment)
if len(h) == 0:
return (None, None) if visualize else None
hspaces, angles, dists = hough_line_peaks(h, theta, d, num_peaks=1)
if len(hspaces) == 0:
return (None, None) if visualize else None
hspace, angle, dist = hspaces[0], angles[0], dists[0]
line_y0 = (dist - 0 * np.cos(angle)) / np.sin(angle)
line_y1 = (dist - hough_segment.shape[1] * np.cos(angle)) / np.sin(angle)
slope = (line_y1 - line_y0) / (hx2 - hx1)
left_x = cx - 3
right_x = cx + 3
left_y = cy + (-3) * slope
right_y = cy + 3 * slope
#print("x1 %d x2 %d y1 %d y2 %d | cx %d cy %d | hx1 %d hx2 %d hy1 %d hy2 %d | line_y0 %.2f line_y1 %.2f | left_x %d | |
<reponame>slayer96/keepa
"""Interface module to download Amazon product and history data from
keepa.com
"""
from tqdm import tqdm
import aiohttp
import asyncio
import datetime
import json
import logging
import numpy as np
import pandas as pd
import time
from keepa.query_keys import DEAL_REQUEST_KEYS, PRODUCT_REQUEST_KEYS
log = logging.getLogger(__name__)
log.setLevel('DEBUG')
# hardcoded ordinal time from
KEEPA_ST_ORDINAL = np.datetime64('2011-01-01')
# Request limit
REQUEST_LIMIT = 100
# Status code dictionary/key
SCODES = {'400': 'REQUEST_REJECTED',
'402': 'PAYMENT_REQUIRED',
'405': 'METHOD_NOT_ALLOWED',
'429': 'NOT_ENOUGH_TOKEN'}
# domain codes
# Valid values: [ 1: com | 2: co.uk | 3: de | 4: fr | 5:
# co.jp | 6: ca | 7: cn | 8: it | 9: es | 10: in | 11: com.mx ]
DCODES = ['RESERVED', 'US', 'GB', 'DE', 'FR', 'JP', 'CA', 'CN', 'IT', 'ES',
'IN', 'MX']
# csv indices. used when parsing csv and stats fields.
# https://github.com/keepacom/api_backend
# see api_backend/src/main/java/com/keepa/api/backend/structs/Product.java
# [index in csv, key name, isfloat(is price or rating)]
csv_indices = [[0, 'AMAZON', True],
[1, 'NEW', True],
[2, 'USED', True],
[3, 'SALES', False],
[4, 'LISTPRICE', True],
[5, 'COLLECTIBLE', True],
[6, 'REFURBISHED', True],
[7, 'NEW_FBM_SHIPPING', True],
[8, 'LIGHTNING_DEAL', True],
[9, 'WAREHOUSE', True],
[10, 'NEW_FBA', True],
[11, 'COUNT_NEW', False],
[12, 'COUNT_USED', False],
[13, 'COUNT_REFURBISHED', False],
[14, 'CollectableOffers', False],
[15, 'EXTRA_INFO_UPDATES', False],
[16, 'RATING', True],
[17, 'COUNT_REVIEWS', False],
[18, 'BUY_BOX_SHIPPING', True],
[19, 'USED_NEW_SHIPPING', True],
[20, 'USED_VERY_GOOD_SHIPPING', True],
[21, 'USED_GOOD_SHIPPING', True],
[22, 'USED_ACCEPTABLE_SHIPPING', True],
[23, 'COLLECTIBLE_NEW_SHIPPING', True],
[24, 'COLLECTIBLE_VERY_GOOD_SHIPPING', True],
[25, 'COLLECTIBLE_GOOD_SHIPPING', True],
[26, 'COLLECTIBLE_ACCEPTABLE_SHIPPING', True],
[27, 'REFURBISHED_SHIPPING', True],
[28, 'EBAY_NEW_SHIPPING', True],
[29, 'EBAY_USED_SHIPPING', True],
[30, 'TRADE_IN', True],
[31, 'RENT', False]]
def _parse_stats(stats, to_datetime):
stats_parsed = {}
for stat_key, stat_value in stats.items():
if isinstance(stat_value, int) and stat_value < 0: # -1 or -2 means not exist. 0 doesn't mean not exist.
stat_value = None
if stat_value is not None:
if stat_key == 'lastOffersUpdate':
stats_parsed[stat_key] = keepa_minutes_to_time([stat_value], to_datetime)[0]
elif isinstance(stat_value, list) and len(stat_value) > 0:
stat_value_dict = {}
convert_time_in_value_pair = any(map(lambda v: v is not None and isinstance(v, list), stat_value))
for ind, key, isfloat in csv_indices:
stat_value_item = stat_value[ind] if ind < len(stat_value) else None
def normalize_value(v):
if v < 0:
return None
if isfloat:
v = float(v) / 100
if key == 'RATING':
v = v * 10
return v
if stat_value_item is not None:
if convert_time_in_value_pair:
stat_value_time, stat_value_item = stat_value_item
stat_value_item = normalize_value(stat_value_item)
if stat_value_item is not None:
stat_value_time = keepa_minutes_to_time([stat_value_time], to_datetime)[0]
stat_value_item = (stat_value_time, stat_value_item)
else:
stat_value_item = normalize_value(stat_value_item)
if stat_value_item is not None:
stat_value_dict[key] = stat_value_item
if len(stat_value_dict) > 0:
stats_parsed[stat_key] = stat_value_dict
else:
stats_parsed[stat_key] = stat_value
return stats_parsed
_seller_time_data_keys = ['trackedSince', 'lastUpdate']
def _parse_seller(seller_raw_response, to_datetime):
sellers = list(seller_raw_response.values())
for seller in sellers:
def convert_time_data(key):
date_val = seller.get(key, None)
if date_val is not None:
return (key, keepa_minutes_to_time([date_val], to_datetime)[0])
else:
return None
seller.update(filter(lambda p: p is not None, map(convert_time_data, _seller_time_data_keys)))
return dict(map(lambda seller: (seller['sellerId'], seller), sellers))
def parse_csv(csv, to_datetime=True, out_of_stock_as_nan=True):
"""Parses csv list from keepa into a python dictionary.
Parameters
----------
csv : list
csv list from keepa
to_datetime : bool, optional
Modifies numpy minutes to datetime.datetime values.
Default True.
out_of_stock_as_nan : bool, optional
When True, prices are NAN when price category is out of stock.
When False, prices are -0.01
Default True
Returns
-------
product_data : dict
Dictionary containing the following fields with timestamps:
AMAZON: Amazon price history
NEW: Marketplace/3rd party New price history - Amazon is
considered to be part of the marketplace as well, so if
Amazon has the overall lowest new (!) price, the
marketplace new price in the corresponding time interval
will be identical to the Amazon price (except if there is
only one marketplace offer). Shipping and Handling costs
not included!
USED: Marketplace/3rd party Used price history
SALES: Sales Rank history. Not every product has a Sales Rank.
LISTPRICE: List Price history
5 COLLECTIBLE: Collectible Price history
6 REFURBISHED: Refurbished Price history
7 NEW_FBM_SHIPPING: 3rd party (not including Amazon) New price
history including shipping costs, only fulfilled by
merchant (FBM).
8 LIGHTNING_DEAL: 3rd party (not including Amazon) New price
history including shipping costs, only fulfilled by
merchant (FBM).
9 WAREHOUSE: Amazon Warehouse Deals price history. Mostly of
used condition, rarely new.
10 NEW_FBA: Price history of the lowest 3rd party (not
including Amazon/Warehouse) New offer that is fulfilled
by Amazon
11 COUNT_NEW: New offer count history
12 COUNT_USED: Used offer count history
13 COUNT_REFURBISHED: Refurbished offer count history
14 COUNT_COLLECTIBLE: Collectible offer count history
16 RATING: The product's rating history. A rating is an
integer from 0 to 50 (e.g. 45 = 4.5 stars)
17 COUNT_REVIEWS: The product's review count history.
18 BUY_BOX_SHIPPING: The price history of the buy box. If no
offer qualified for the buy box the price has the value
-1. Including shipping costs. The ``buybox`` parameter
must be True for this field to be in the data.
19 USED_NEW_SHIPPING: "Used - Like New" price history
including shipping costs.
20 USED_VERY_GOOD_SHIPPING: "Used - Very Good" price history
including shipping costs.
21 USED_GOOD_SHIPPING: "Used - Good" price history including
shipping costs.
22 USED_ACCEPTABLE_SHIPPING: "Used - Acceptable" price history
including shipping costs.
23 COLLECTIBLE_NEW_SHIPPING: "Collectible - Like New" price
history including shipping costs.
24 COLLECTIBLE_VERY_GOOD_SHIPPING: "Collectible - Very Good"
price history including shipping costs.
25 COLLECTIBLE_GOOD_SHIPPING: "Collectible - Good" price
history including shipping costs.
26 COLLECTIBLE_ACCEPTABLE_SHIPPING: "Collectible - Acceptable"
price history including shipping costs.
27 REFURBISHED_SHIPPING: Refurbished price history including
shipping costs.
30 TRADE_IN: The trade in price history. Amazon trade-in is
not available for every locale.
31 RENT: Rental price history. Requires use of the rental
and offers parameter. Amazon Rental is only available
for Amazon US.
Notes
-----
Negative prices
"""
product_data = {}
for ind, key, isfloat in csv_indices:
if csv[ind]: # Check if entry it exists
if 'SHIPPING' in key: # shipping price is included
# Data goes [time0, value0, shipping0, time1, value1,
# shipping1, ...]
times = csv[ind][::3]
values = np.array(csv[ind][1::3])
values += np.array(csv[ind][2::3])
else:
# Data goes [time0, value0, time1, value1, ...]
times = csv[ind][::2]
values = np.array(csv[ind][1::2])
# Convert to float price if applicable
if isfloat:
nan_mask = values < 0
values = values.astype(np.float)/100
if out_of_stock_as_nan:
values[nan_mask] = np.nan
if key == 'RATING':
values *= 10
timeval = keepa_minutes_to_time(times, to_datetime)
product_data['%s_time' % key] = timeval
product_data[key] = values
# combine time and value into a data frame using time as index
product_data['df_%s' % key] = pd.DataFrame({'value': values}, index=timeval)
return product_data
def format_items(items):
""" Checks if the input items are valid and formats them """
if isinstance(items, list) or isinstance(items, np.ndarray):
return np.unique(items)
elif isinstance(items, str):
return np.asarray([items])
class AsyncKeepa():
"""Class to support a Python interface to keepa server.
Initializes API with access key. Access key can be obtained by
signing up for a reoccurring or one time plan at:
https://keepa.com/#!api
Parameters
----------
accesskey : str
64 character access key string.
Examples
--------
Create the api object
>>> import keepa
>>> mykey = 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
>>> api = await keepa.AsyncKeepa.create(mykey)
Request data from two ASINs
>>> products = await api.query(['0439064872', '1426208081'])
Print item details
>>> print('Item 1')
>>> print('\t ASIN: {:s}'.format(products[0]['asin']))
>>> print('\t Title: {:s}'.format(products[0]['title']))
Print item price
>>> usedprice = products[0]['data']['MarketplaceUsed']
>>> usedtimes = products[0]['data']['MarketplaceUsed_time']
>>> print('\t Used price: ${:.2f}'.format(usedprice[-1]))
>>> print('\t as of: {:s}'.format(str(usedtimes[-1])))
"""
@classmethod
async def create(cls, accesskey):
self = AsyncKeepa()
self.accesskey = accesskey
self.status = None
self.tokens_left = 0
# Store user's available tokens
log.info('Connecting to keepa using key ending in %s' % accesskey[-6:])
await self.update_status()
log.info('%d tokens remain' % self.tokens_left)
return self
@property
def time_to_refill(self):
""" Returns the time to refill in seconds """
# Get current timestamp in miliseconds from unix epoch
now = int(time.time() * 1000)
timeatrefile = self.status['timestamp'] + self.status['refillIn']
# wait plus one second fudge factor
timetorefil = timeatrefile - now + 1000
if timetorefil < 0:
timetorefil = 0
# Account for negative tokens left
if self.tokens_left < 0:
timetorefil += (abs(self.tokens_left) / self.status['refillRate']) * 60000
# Return value in seconds
return timetorefil / 1000.0
async def update_status(self):
""" Updates available tokens """
self.status = await self._request('token', {'key': self.accesskey}, wait=False)
async def wait_for_tokens(self):
"""Checks any remaining tokens and | |
Node category (ACTIVE|INACTIVE|MIGRATION|TEST|DUPLICATE|INTEGRATED) (optional)
:param configuration: The configuration as a JSON hash of key values pairs (optional)
:param ctc_bucket: Name of the S3 bucket to use for a cloud to cloud gateway (optional)
:param facility_contact: Name of the facility contact (optional)
:param facility_contact_title: Title of the facility contact (optional)
:param facility_email: Email of the facility contact (optional)
:param facility_name: Name of the facility it is installed at (optional)
:param facility_notes: Notes about the facility (optional)
:param facility_zip: Zip code of the facility it is installed at (optional)
:param is_public: Flag if the node is public (optional)
:param monitor_email: Email address(es) to send monitor failure notices (optional)
:param monitor_node_last_send: Check if the node has sent a study recently (optional)
:param monitor_node_last_send_threshold: Threshold in minutes for triggering the monitor_node_last_send notification (optional)
:param monitor_node_ping: Check if the node is pinging (optional)
:param monitor_node_slow_push: Check if the node is pushing slowly (optional)
:param monitor_node_slow_push_threshold: Threshold in minutes for triggering the monitor_node_slow_push notification (optional)
:param monitor_study_create: Check if the node is sending studies normally (optional)
:param monitor_study_create_threshold: Threshold in minutes for triggering the monitor_study_create notification (optional)
:param name: Description of the node (optional)
:param reload_configuration: If this flag is set the node will be instructed to reload it's configuration on the next ping (optional)
:param serial_no: serial_no
:param setting_param: Set an individual setting. This is an alternative to the settings hash for easier use in the API tester (optional)
:param settings: A hash of the account settings that the node can override (optional)
:param storage_namespace: Namespace uuid to attach the node to. This requires a sysadmin sid and must be within the same account (optional)
:param warning_email: Email address(es) to send warning notices (optional)
"""
request_data = {
'category': category,
'configuration': configuration,
'ctc_bucket': ctc_bucket,
'facility_contact': facility_contact,
'facility_contact_title': facility_contact_title,
'facility_email': facility_email,
'facility_name': facility_name,
'facility_notes': facility_notes,
'facility_zip': facility_zip,
'is_public': is_public,
'monitor_email': monitor_email,
'monitor_node_last_send': monitor_node_last_send,
'monitor_node_last_send_threshold': monitor_node_last_send_threshold,
'monitor_node_ping': monitor_node_ping,
'monitor_node_slow_push': monitor_node_slow_push,
'monitor_node_slow_push_threshold': monitor_node_slow_push_threshold,
'monitor_study_create': monitor_study_create,
'monitor_study_create_threshold': monitor_study_create_threshold,
'name': name,
'reload_configuration': reload_configuration,
'serial_no': serial_no,
'settings': settings,
'storage_namespace': storage_namespace,
'uuid': uuid,
'warning_email': warning_email,
}
if setting_param is not None:
setting_param_dict = {'{prefix}{k}'.format(prefix='setting_', k=k): v for k,v in setting_param.items()}
request_data.update(setting_param_dict)
errors_mapping = {}
errors_mapping[('INVALID_CONFIGURATION', None)] = InvalidConfiguration('An invalid combination of configuration options was set. The error_subtype will hold more detail')
errors_mapping[('INVALID_JSON', None)] = InvalidJson('The field is not in valid JSON format. The error_subtype holds the name of the field')
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The node can not be found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to edit this node')
errors_mapping[('NO_NODE_OVERRIDE', None)] = NoNodeOverride('The setting does not allow a node override')
query_data = {
'api': self._api,
'url': '/node/set',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return AsyncQueryO(**query_data)
def get(
self,
uuid,
serial_no=None,
):
"""Get.
:param uuid: The node id
:param serial_no: serial_no
"""
request_data = {
'serial_no': serial_no,
'uuid': uuid,
}
errors_mapping = {}
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The node can not be found')
query_data = {
'api': self._api,
'url': '/node/get',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return AsyncQueryO(**query_data)
def ping(
self,
ack,
serial_no,
uuid,
):
"""Ping.
:param ack: Flag if the gateway wants to use the acknowledge workflow
:param serial_no: The serial number of the node
:param uuid: The node id
"""
request_data = {
'ack': ack,
'serial_no': serial_no,
'uuid': uuid,
}
errors_mapping = {}
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The node can not be found')
query_data = {
'api': self._api,
'url': '/node/ping',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': False,
}
return AsyncQueryO(**query_data)
def ping_ack(
self,
serial_no,
uuid,
):
"""Ping ack.
:param serial_no: The serial number of the node
:param uuid: The node id
"""
request_data = {
'serial_no': serial_no,
'uuid': uuid,
}
errors_mapping = {}
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The node can not be found')
query_data = {
'api': self._api,
'url': '/node/ping/ack',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': False,
}
return AsyncQueryO(**query_data)
def deliver(
self,
serial_no,
status,
uuid,
ack=None,
destination_id=None,
email_reason=None,
is_local=None,
job_id=None,
status_reason=None,
study_uid=None,
):
"""Deliver.
:param serial_no: The serial number of the node
:param status: Status code of the job (S|F|P|B|U) - Success, failure, partial transfer, blocked or uncached
:param uuid: The node id
:param ack: The HL7 ACK if this was an HL7 job (optional)
:param destination_id: The uuid of the destination, required for local pushes (optional)
:param email_reason: Email the user this reason for the status change (optional)
:param is_local: The flag used to indicate the local push (optional)
:param job_id: The uuid of the push job, not used for local pushes (optional)
:param status_reason: Detail on the status change (optional)
:param study_uid: The study uid of the local push, required for local pushes only (optional)
"""
request_data = {
'ack': ack,
'destination_id': destination_id,
'email_reason': email_reason,
'is_local': is_local,
'job_id': job_id,
'serial_no': serial_no,
'status': status,
'status_reason': status_reason,
'study_uid': study_uid,
'uuid': uuid,
}
errors_mapping = {}
errors_mapping[('CONFLICTING_STATUS', None)] = ConflictingStatus('This status conflicts with a prior status update')
errors_mapping[('INVALID_STATUS', None)] = InvalidStatus('Invalid status code')
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The node or job can not be found')
query_data = {
'api': self._api,
'url': '/node/deliver',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': False,
}
return AsyncQueryO(**query_data)
def retrieve(
self,
job_id,
serial_no,
status,
uuid,
):
"""Retrieve.
:param job_id: The uuid of the fetch job
:param serial_no: The serial number of the node
:param status: Status code of the job (S|F|P) - Success, failure, partial transfer
:param uuid: The node id
"""
request_data = {
'job_id': job_id,
'serial_no': serial_no,
'status': status,
'uuid': uuid,
}
errors_mapping = {}
errors_mapping[('INVALID_STATUS', None)] = InvalidStatus('Invalid status code')
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The node or job can not be found')
query_data = {
'api': self._api,
'url': '/node/retrieve',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': False,
}
return AsyncQueryO(**query_data)
def webhook(
self,
serial_no,
status,
uuid,
webhook_id,
error_message=None,
):
"""Webhook.
:param serial_no: The serial number of the node
:param status: Status code of the job (S|F) - Success, failure
:param uuid: The node id
:param webhook_id: The uuid of the webhook job
:param error_message: Detailed error message (optional)
"""
request_data = {
'error_message': error_message,
'serial_no': serial_no,
'status': status,
'uuid': uuid,
'webhook_id': webhook_id,
}
errors_mapping = {}
errors_mapping[('INVALID_STATUS', None)] = InvalidStatus('Invalid status code')
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The node or webhook can not be found')
query_data = {
'api': self._api,
'url': '/node/webhook',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': False,
}
return AsyncQueryO(**query_data)
def configuration(
self,
serial_no,
uuid,
):
"""Configuration.
:param serial_no: The serial number of the node
:param uuid: The node id
"""
request_data = {
'serial_no': serial_no,
'uuid': uuid,
}
errors_mapping = {}
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The node can not be found')
query_data = {
'api': self._api,
'url': '/node/configuration',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': False,
}
return AsyncQueryO(**query_data)
def delete(
self,
uuid,
):
"""Delete.
:param | |
<filename>pyFileFixity/lib/profilers/memory_profiler/memory_profiler.py
"""Profile the memory usage of a Python program"""
# .. we'll use this to pass it to the child script ..
_clean_globals = globals().copy()
__version__ = '0.31'
_CMD_USAGE = "python -m memory_profiler script_file.py"
import time
import sys
import os
import pdb
import warnings
import linecache
import inspect
import subprocess
from copy import copy
# TODO: provide alternative when multprocessing is not available
try:
from multiprocessing import Process, Pipe
except ImportError:
from multiprocessing.dummy import Process, Pipe
_TWO_20 = float(2 ** 20)
has_psutil = False
# .. get available packages ..
try:
import psutil
has_psutil = True
except ImportError:
pass
def _get_memory(pid, timestamps=False, include_children=False):
# .. only for current process and only on unix..
if pid == -1:
pid = os.getpid()
# .. cross-platform but but requires psutil ..
if has_psutil:
process = psutil.Process(pid)
try:
mem_info = getattr(process, 'memory_info', process.get_memory_info)
mem = mem_info()[0] / _TWO_20
if include_children:
for p in process.get_children(recursive=True):
mem_info = getattr(p, 'memory_info', p.get_memory_info)
mem += mem_info()[0] / _TWO_20
return (mem, time.time()) if timestamps else mem
except psutil.AccessDenied:
pass
# continue and try to get this from ps
if os.name != 'posix':
raise NotImplementedError('The psutil module is required for non-unix '
'platforms')
if include_children:
raise NotImplementedError('The psutil module is required when to'
' monitor memory usage of children'
' processes')
warnings.warn("psutil module not found. memory_profiler will be slow")
# ..
# .. memory usage in MiB ..
# .. this should work on both Mac and Linux ..
# .. subprocess.check_output appeared in 2.7, using Popen ..
# .. for backwards compatibility ..
out = subprocess.Popen(['ps', 'v', '-p', str(pid)],
stdout=subprocess.PIPE
).communicate()[0].split(b'\n')
try:
vsz_index = out[0].split().index(b'RSS')
mem = float(out[1].split()[vsz_index]) / 1024
return (mem, time.time()) if timestamps else mem
except:
return (-1, time.time()) if timestamps else -1
class MemTimer(Process):
"""
Fetch memory consumption from over a time interval
"""
def __init__(self, monitor_pid, interval, pipe, max_usage=False,
*args, **kw):
self.monitor_pid = monitor_pid
self.interval = interval
self.pipe = pipe
self.cont = True
self.max_usage = max_usage
self.n_measurements = 1
if "timestamps" in kw:
self.timestamps = kw["timestamps"]
del kw["timestamps"]
else:
self.timestamps = False
if "include_children" in kw:
self.include_children = kw["include_children"]
del kw["include_children"]
else:
self.include_children = False
# get baseline memory usage
self.mem_usage = [
_get_memory(self.monitor_pid, timestamps=self.timestamps,
include_children=self.include_children)]
super(MemTimer, self).__init__(*args, **kw)
def run(self):
self.pipe.send(0) # we're ready
stop = False
while True:
cur_mem = _get_memory(self.monitor_pid, timestamps=self.timestamps,
include_children=self.include_children)
if not self.max_usage:
self.mem_usage.append(cur_mem)
else:
self.mem_usage[0] = max(cur_mem, self.mem_usage[0])
self.n_measurements += 1
if stop:
break
stop = self.pipe.poll(self.interval)
# do one more iteration
self.pipe.send(self.mem_usage)
self.pipe.send(self.n_measurements)
def memory_usage(proc=-1, interval=.1, timeout=None, timestamps=False,
include_children=False, max_usage=False, retval=False,
stream=None):
"""
Return the memory usage of a process or piece of code
Parameters
----------
proc : {int, string, tuple, subprocess.Popen}, optional
The process to monitor. Can be given by an integer/string
representing a PID, by a Popen object or by a tuple
representing a Python function. The tuple contains three
values (f, args, kw) and specifies to run the function
f(*args, **kw).
Set to -1 (default) for current process.
interval : float, optional
Interval at which measurements are collected.
timeout : float, optional
Maximum amount of time (in seconds) to wait before returning.
max_usage : bool, optional
Only return the maximum memory usage (default False)
retval : bool, optional
For profiling python functions. Save the return value of the profiled
function. Return value of memory_usage becomes a tuple:
(mem_usage, retval)
timestamps : bool, optional
if True, timestamps of memory usage measurement are collected as well.
stream : File
if stream is a File opened with write access, then results are written
to this file instead of stored in memory and returned at the end of
the subprocess. Useful for long-running processes.
Implies timestamps=True.
Returns
-------
mem_usage : list of floating-poing values
memory usage, in MiB. It's length is always < timeout / interval
if max_usage is given, returns the two elements maximum memory and
number of measurements effectuated
ret : return value of the profiled function
Only returned if retval is set to True
"""
if stream is not None:
timestamps = True
ret = [] if not max_usage else -1
if timeout is not None:
max_iter = int(timeout / interval)
elif isinstance(proc, int):
# external process and no timeout
max_iter = 1
else:
# for a Python function wait until it finishes
max_iter = float('inf')
if hasattr(proc, '__call__'):
proc = (proc, (), {})
if isinstance(proc, (list, tuple)):
if len(proc) == 1:
f, args, kw = (proc[0], (), {})
elif len(proc) == 2:
f, args, kw = (proc[0], proc[1], {})
elif len(proc) == 3:
f, args, kw = (proc[0], proc[1], proc[2])
else:
raise ValueError
while True:
child_conn, parent_conn = Pipe() # this will store MemTimer's results
p = MemTimer(os.getpid(), interval, child_conn, timestamps=timestamps,
max_usage=max_usage, include_children=include_children)
p.start()
parent_conn.recv() # wait until we start getting memory
returned = f(*args, **kw)
parent_conn.send(0) # finish timing
ret = parent_conn.recv()
n_measurements = parent_conn.recv()
if retval:
ret = ret, returned
p.join(5 * interval)
if n_measurements > 4 or interval < 1e-6:
break
interval /= 10.
elif isinstance(proc, subprocess.Popen):
# external process, launched from Python
line_count = 0
while True:
if not max_usage:
mem_usage = _get_memory(proc.pid, timestamps=timestamps,
include_children=include_children)
if stream is not None:
stream.write("MEM {0:.6f} {1:.4f}\n".format(*mem_usage))
else:
ret.append(mem_usage)
else:
ret = max([ret,
_get_memory(proc.pid,
include_children=include_children)])
time.sleep(interval)
line_count += 1
# flush every 50 lines. Make 'tail -f' usable on profile file
if line_count > 50:
line_count = 0
if stream is not None:
stream.flush()
if timeout is not None:
max_iter -= 1
if max_iter == 0:
break
if proc.poll() is not None:
break
else:
# external process
if max_iter == -1:
max_iter = 1
counter = 0
while counter < max_iter:
counter += 1
if not max_usage:
mem_usage = _get_memory(proc, timestamps=timestamps,
include_children=include_children)
if stream is not None:
stream.write("MEM {0:.6f} {1:.4f}\n".format(*mem_usage))
else:
ret.append(mem_usage)
else:
ret = max([ret,
_get_memory(proc, include_children=include_children)
])
time.sleep(interval)
# Flush every 50 lines.
if counter % 50 == 0 and stream is not None:
stream.flush()
if stream:
return None
return ret
# ..
# .. utility functions for line-by-line ..
def _find_script(script_name):
""" Find the script.
If the input is not a file, then $PATH will be searched.
"""
if os.path.isfile(script_name):
return script_name
path = os.getenv('PATH', os.defpath).split(os.pathsep)
for folder in path:
if not folder:
continue
fn = os.path.join(folder, script_name)
if os.path.isfile(fn):
return fn
sys.stderr.write('Could not find script {0}\n'.format(script_name))
raise SystemExit(1)
class _TimeStamperCM(object):
"""Time-stamping context manager."""
def __init__(self, timestamps):
self._timestamps = timestamps
def __enter__(self):
self._timestamps.append(_get_memory(os.getpid(), timestamps=True))
def __exit__(self, *args):
self._timestamps.append(_get_memory(os.getpid(), timestamps=True))
class TimeStamper:
""" A profiler that just records start and end execution times for
any decorated function.
"""
def __init__(self):
self.functions = {}
def __call__(self, func):
if not hasattr(func, "__call__"):
raise ValueError("Value must be callable")
self.add_function(func)
f = self.wrap_function(func)
f.__module__ = func.__module__
f.__name__ = func.__name__
f.__doc__ = func.__doc__
f.__dict__.update(getattr(func, '__dict__', {}))
return f
def timestamp(self, name="<block>"):
"""Returns a context manager for timestamping a block of code."""
# Make a fake function
func = lambda x: x
func.__module__ = ""
func.__name__ = name
self.add_function(func)
timestamps = []
self.functions[func].append(timestamps)
# A new object is required each time, since there can be several
# nested context managers.
return _TimeStamperCM(timestamps)
def add_function(self, func):
if func not in self.functions:
self.functions[func] = []
def wrap_function(self, func):
""" Wrap a function to timestamp it.
"""
def f(*args, **kwds):
# Start time
timestamps = [_get_memory(os.getpid(), timestamps=True)]
self.functions[func].append(timestamps)
try:
result = func(*args, **kwds)
finally:
# end time
timestamps.append(_get_memory(os.getpid(), timestamps=True))
return result
return f
def show_results(self, stream=None):
if stream is None:
stream = sys.stdout
for func, timestamps in self.functions.items():
function_name = "%s.%s" % (func.__module__, func.__name__)
for ts in timestamps:
stream.write("FUNC %s %.4f %.4f %.4f %.4f\n" % (
(function_name,) + ts[0] + ts[1]))
class LineProfiler(object):
""" A profiler that records the amount of memory for each line """
def __init__(self, **kw):
self.code_map = {}
self.enable_count = 0
self.max_mem = kw.get('max_mem', None)
self.prevline = None
self.include_children = kw.get('include_children', False)
def __call__(self, func):
self.add_function(func)
f = self.wrap_function(func)
f.__module__ = func.__module__
f.__name__ = func.__name__
f.__doc__ = func.__doc__
f.__dict__.update(getattr(func, '__dict__', {}))
return f
def add_code(self, code, toplevel_code=None):
if code not in self.code_map:
self.code_map[code] = {}
for subcode in filter(inspect.iscode, | |
"""Definitions and implementations for data-path expressions to query and manipulate (insert, update, delete)."""
from . import urlquote
import copy
from datetime import date
import itertools
import logging
import re
from requests import HTTPError
import warnings
__all__ = ['DataPathException', 'Min', 'Max', 'Sum', 'Avg', 'Cnt', 'CntD', 'Array', 'ArrayD', 'Bin']
logger = logging.getLogger(__name__)
"""Logger for this module"""
_system_defaults = {'RID', 'RCT', 'RCB', 'RMT', 'RMB'}
"""Set of system default column names"""
def deprecated(f):
"""A simple 'deprecated' function decorator."""
def wrapper(*args, **kwargs):
warnings.warn("'%s' has been deprecated" % f.__name__, DeprecationWarning, stacklevel=2)
return f(*args, **kwargs)
return wrapper
def from_catalog(catalog):
"""Wraps an ErmrestCatalog object for use in datapath expressions.
:param catalog: an ErmrestCatalog object
:return: a datapath._CatalogWrapper object
"""
return _CatalogWrapper(catalog)
def _isidentifier(a):
"""Tests if string is a valid python identifier.
This function is intended for internal usage within this module.
:param a: a string
"""
if hasattr(a, 'isidentifier'):
return a.isidentifier()
else:
return re.match("[_A-Za-z][_a-zA-Z0-9]*$", a) is not None
def _identifier_for_name(name, *reserveds):
"""Makes an identifier from a given name and disambiguates if it is reserved.
1. replace invalid identifier characters with '_'
2. prepend with '_' if first character is a digit
3. append a disambiguating positive integer if it is reserved
:param name: a string of any format
:param *reserveds: iterable collections of reserved strings
:return: a valid identifier string for the given name
"""
assert len(name) > 0, 'empty strings are not allowed'
# replace invalid characters with '_'s
identifier = re.sub("[^_a-zA-Z0-9]", "_", name)
# prepend with '_' is it starts with a digit
if identifier[0].isdigit():
identifier = '_' + identifier
# append a disambiguating positive integer if it is reserved
disambiguator = 1
ambiguous = identifier
while any(identifier in reserved for reserved in reserveds):
identifier = ambiguous + str(disambiguator)
disambiguator += 1
return identifier
def _make_identifier_to_name_mapping(names, reserved):
"""Makes a dictionary of (valid) identifiers to (original) names.
Try to favor the names that require the least modification:
1. add all names that are valid identifiers and do not conflict with reserved names
2. add all names that are valid identifiers but do conflict with reserved names by appending a disambiguator
3. add an unambiguous identifier made from the name, when the name is not already a valid identifier
:param names: iterable collection of strings
:param reserved: iterable collection of reserved identifiers
:return: a dictionary to map from identifier to name
"""
reserved = set(reserved)
assert all(_isidentifier(r) for r in reserved), 'all reserved names must be valid identifiers'
mappings = { # first, add all non-offending names
name: name
for name in names if _isidentifier(name) and name not in reserved
}
mappings.update({ # second, add all names that conflict with reserved strings
name + '1': name
for name in names if name in reserved and name + '1' not in mappings
})
invalid_names = set(names) - mappings.keys()
# third, convert and disambiguate remaining names
for name in invalid_names:
mappings[_identifier_for_name(name, mappings.keys(), reserved)] = name
return mappings
def _http_error_message(e):
"""Returns a formatted error message from the raw HTTPError.
"""
return '\n'.join(e.response.text.splitlines()[1:]) + '\n' + str(e)
class DataPathException (Exception):
"""Exception in a datapath expression.
"""
def __init__(self, message, reason=None):
super(DataPathException, self).__init__(message, reason)
self.message = message
self.reason = reason
def __str__(self):
return self.message
class _CatalogWrapper (object):
"""Wraps a Catalog for datapath expressions.
"""
def __init__(self, catalog):
"""Creates the _CatalogWrapper.
:param catalog: ErmrestCatalog object
"""
super(_CatalogWrapper, self).__init__()
self._wrapped_catalog = catalog
self._wrapped_model = catalog.getCatalogModel()
self.schemas = {
k: _SchemaWrapper(self, v)
for k, v in self._wrapped_model.schemas.items()
}
self._identifiers = _make_identifier_to_name_mapping(
self.schemas.keys(),
super(_CatalogWrapper, self).__dir__())
def __dir__(self):
return itertools.chain(
super(_CatalogWrapper, self).__dir__(),
self._identifiers.keys()
)
def __getattr__(self, a):
if a in self._identifiers:
return self.schemas[self._identifiers[a]]
else:
return getattr(super(_CatalogWrapper, self), a)
@classmethod
def compose(cls, *paths):
"""Compose path fragments into a path.
The root of any path fragment must be found in the table instances of the currently composed path from left
to right, _but_ it does not have to be the current context (last table instance) of the last left hand path.
Paths must not have overlapping table instances with the currently composed path from left to right, except for
each subsequent path's root table instance which _must_ be defined in one of the left hand paths.
No input path in 'paths' will be mutated.
:param paths: instances of `DataPath`
:return: a new `DataPath` instance composed from the 'paths'
"""
if not paths:
raise ValueError("No input path(s) given")
if not all(isinstance(path, DataPath) for path in paths):
raise TypeError("Input 'paths' must be an instance of %s" % type(DataPath).__name__)
base = copy.deepcopy(paths[0])
for path in paths[1:]:
base.merge(path)
return base
class _SchemaWrapper (object):
"""Wraps a Schema for datapath expressions.
"""
def __init__(self, catalog, schema):
"""Creates the _SchemaWrapper.
:param catalog: the catalog wrapper to which this schema wrapper belongs
:param schema: the wrapped schema object
"""
super(_SchemaWrapper, self).__init__()
self._catalog = catalog
self._wrapped_schema = schema
self._name = schema.name
self.tables = {
k: _TableWrapper(self, v)
for k, v in schema.tables.items()
}
self._identifiers = _make_identifier_to_name_mapping(
self.tables.keys(),
super(_SchemaWrapper, self).__dir__())
def __dir__(self):
return itertools.chain(
super(_SchemaWrapper, self).__dir__(),
self._identifiers.keys()
)
def __getattr__(self, a):
if a in self._identifiers:
return self.tables[self._identifiers[a]]
else:
return getattr(super(_SchemaWrapper, self), a)
@deprecated
def describe(self):
"""Provides a description of the model element.
:return: a user-friendly string representation of the model element.
"""
s = "_SchemaWrapper name: '%s'\nList of tables:\n" % self._name
if len(self.tables) == 0:
s += "none"
else:
s += "\n".join(" '%s'" % tname for tname in self.tables)
return s
@deprecated
def _repr_html_(self):
return self.describe()
class DataPath (object):
"""Represents a datapath expression.
"""
def __init__(self, root):
assert isinstance(root, _TableAlias)
self._path_expression = _Root(root)
self._root = root
self._base_uri = root._schema._catalog._wrapped_catalog._server_uri
self._table_instances = dict() # map of alias_name => _TableAlias object
self._context = None
self._identifiers = {}
self._bind_table_instance(root)
def __dir__(self):
return itertools.chain(
super(DataPath, self).__dir__(),
self._identifiers.keys()
)
def __getattr__(self, a):
if a in self._identifiers:
return self._table_instances[self._identifiers[a]]
else:
return getattr(super(DataPath, self), a)
def __deepcopy__(self, memodict={}):
cp = DataPath(copy.deepcopy(self._root, memo=memodict))
for alias in copy.deepcopy(self._table_instances, memo=memodict).values():
if alias != cp._root:
cp._bind_table_instance(alias)
cp._context = cp._table_instances[self._context._name]
cp._path_expression = copy.deepcopy(self._path_expression, memo=memodict)
assert not cp._table_instances.keys() - set(cp._identifiers)
assert cp._table_instances.keys() == self._table_instances.keys()
assert cp._identifiers.keys() == self._identifiers.keys()
assert cp._root._name in cp._table_instances
assert cp._root == cp._table_instances[cp._root._name]
assert cp._root != self._root
assert cp._root._name == self._root._name
assert cp._context != self._context
assert cp._context._name == self._context._name
assert str(cp._path_expression) == str(self._path_expression)
assert cp._path_expression != self._path_expression
return cp
@property
def table_instances(self):
"""Collection of the table instances in this datapath expression."""
return self._table_instances
@property
def context(self):
"""Context (i.e., last bound table instance) of this datapath expression."""
return self._context
@context.setter
def context(self, value):
"""Updates the context of this datapath expression (must be a table instance bound to this expression)."""
if not isinstance(value, _TableAlias):
raise TypeError('context must be a table alias object')
if value._name not in self._table_instances:
raise ValueError('table alias must be bound in this path')
if self._context != value:
self._path_expression = _ResetContext(self._path_expression, value)
self._context = value
@property
def uri(self):
"""The current URI serialization of this datapath expression."""
return self._base_uri + str(self._path_expression)
def _contextualized_uri(self, context):
"""Returns a path uri for the specified context.
:param context: a table instance that is bound to this path
:return: string representation of the path uri
"""
assert isinstance(context, _TableAlias)
assert context._name in self._table_instances
if self._context != context:
return self._base_uri + str(_ResetContext(self._path_expression, context))
else:
return self.uri
def _bind_table_instance(self, alias):
"""Binds a new table instance into this path.
"""
assert isinstance(alias, _TableAlias)
alias._bind(self)
self._table_instances[alias._name] = self._context = alias
self._identifiers[_identifier_for_name(alias._name, self._identifiers.keys(), super(DataPath, self).__dir__())] = alias._name
def delete(self):
"""Deletes the entity set referenced by the data path.
"""
try:
path = str(self._path_expression)
logger.debug("Deleting: {p}".format(p=path))
self._root._schema._catalog._wrapped_catalog.delete(path)
except HTTPError as e:
logger.debug(e.response.text)
if 400 <= e.response.status_code < 500:
raise DataPathException(_http_error_message(e), e)
else:
raise e
def filter(self, filter_expression):
"""Filters the path based on the specified formula.
:param filter_expression: should be a valid _Predicate object
:return: self
"""
assert isinstance(filter_expression, _Predicate)
self._path_expression = _Filter(self._path_expression, filter_expression)
return self
def link(self, right, on=None, join_type=''):
"""Links this path with another table.
To link a table with an unambigious relationship where table A is related to table B via a single foreign key
reference, the `on` clause is not.
```
# let A and B be variables for tables from the | |
from enum XlChartType
xlSurfaceWireframe = 84 # from enum XlChartType
xlXYScatter = -4169 # from enum XlChartType
xlXYScatterLines = 74 # from enum XlChartType
xlXYScatterLinesNoMarkers = 75 # from enum XlChartType
xlXYScatterSmooth = 72 # from enum XlChartType
xlXYScatterSmoothNoMarkers = 73 # from enum XlChartType
class CheckInVersionType:
xlCheckInMajorVersion = 1 # from enum XlCheckInVersionType
xlCheckInMinorVersion = 0 # from enum XlCheckInVersionType
xlCheckInOverwriteVersion = 2 # from enum XlCheckInVersionType
class ClipboardFormat:
xlClipboardFormatBIFF = 8 # from enum XlClipboardFormat
xlClipboardFormatBIFF12 = 63 # from enum XlClipboardFormat
xlClipboardFormatBIFF2 = 18 # from enum XlClipboardFormat
xlClipboardFormatBIFF3 = 20 # from enum XlClipboardFormat
xlClipboardFormatBIFF4 = 30 # from enum XlClipboardFormat
xlClipboardFormatBinary = 15 # from enum XlClipboardFormat
xlClipboardFormatBitmap = 9 # from enum XlClipboardFormat
xlClipboardFormatCGM = 13 # from enum XlClipboardFormat
xlClipboardFormatCSV = 5 # from enum XlClipboardFormat
xlClipboardFormatDIF = 4 # from enum XlClipboardFormat
xlClipboardFormatDspText = 12 # from enum XlClipboardFormat
xlClipboardFormatEmbedSource = 22 # from enum XlClipboardFormat
xlClipboardFormatEmbeddedObject = 21 # from enum XlClipboardFormat
xlClipboardFormatLink = 11 # from enum XlClipboardFormat
xlClipboardFormatLinkSource = 23 # from enum XlClipboardFormat
xlClipboardFormatLinkSourceDesc = 32 # from enum XlClipboardFormat
xlClipboardFormatMovie = 24 # from enum XlClipboardFormat
xlClipboardFormatNative = 14 # from enum XlClipboardFormat
xlClipboardFormatObjectDesc = 31 # from enum XlClipboardFormat
xlClipboardFormatObjectLink = 19 # from enum XlClipboardFormat
xlClipboardFormatOwnerLink = 17 # from enum XlClipboardFormat
xlClipboardFormatPICT = 2 # from enum XlClipboardFormat
xlClipboardFormatPrintPICT = 3 # from enum XlClipboardFormat
xlClipboardFormatRTF = 7 # from enum XlClipboardFormat
xlClipboardFormatSYLK = 6 # from enum XlClipboardFormat
xlClipboardFormatScreenPICT = 29 # from enum XlClipboardFormat
xlClipboardFormatStandardFont = 28 # from enum XlClipboardFormat
xlClipboardFormatStandardScale = 27 # from enum XlClipboardFormat
xlClipboardFormatTable = 16 # from enum XlClipboardFormat
xlClipboardFormatText = 0 # from enum XlClipboardFormat
xlClipboardFormatToolFace = 25 # from enum XlClipboardFormat
xlClipboardFormatToolFacePICT = 26 # from enum XlClipboardFormat
xlClipboardFormatVALU = 1 # from enum XlClipboardFormat
xlClipboardFormatWK1 = 10 # from enum XlClipboardFormat
class CmdType:
xlCmdCube = 1 # from enum XlCmdType
xlCmdDefault = 4 # from enum XlCmdType
xlCmdList = 5 # from enum XlCmdType
xlCmdSql = 2 # from enum XlCmdType
xlCmdTable = 3 # from enum XlCmdType
class ColorIndex:
xlColorIndexAutomatic = -4105 # from enum XlColorIndex
xlColorIndexNone = -4142 # from enum XlColorIndex
class ColumnDataType:
xlDMYFormat = 4 # from enum XlColumnDataType
xlDYMFormat = 7 # from enum XlColumnDataType
xlEMDFormat = 10 # from enum XlColumnDataType
xlGeneralFormat = 1 # from enum XlColumnDataType
xlMDYFormat = 3 # from enum XlColumnDataType
xlMYDFormat = 6 # from enum XlColumnDataType
xlSkipColumn = 9 # from enum XlColumnDataType
xlTextFormat = 2 # from enum XlColumnDataType
xlYDMFormat = 8 # from enum XlColumnDataType
xlYMDFormat = 5 # from enum XlColumnDataType
class CommandUnderlines:
xlCommandUnderlinesAutomatic = -4105 # from enum XlCommandUnderlines
xlCommandUnderlinesOff = -4146 # from enum XlCommandUnderlines
xlCommandUnderlinesOn = 1 # from enum XlCommandUnderlines
class CommentDisplayMode:
xlCommentAndIndicator = 1 # from enum XlCommentDisplayMode
xlCommentIndicatorOnly = -1 # from enum XlCommentDisplayMode
xlNoIndicator = 0 # from enum XlCommentDisplayMode
class ConditionValueTypes:
xlConditionValueAutomaticMax = 7 # from enum XlConditionValueTypes
xlConditionValueAutomaticMin = 6 # from enum XlConditionValueTypes
xlConditionValueFormula = 4 # from enum XlConditionValueTypes
xlConditionValueHighestValue = 2 # from enum XlConditionValueTypes
xlConditionValueLowestValue = 1 # from enum XlConditionValueTypes
xlConditionValueNone = -1 # from enum XlConditionValueTypes
xlConditionValueNumber = 0 # from enum XlConditionValueTypes
xlConditionValuePercent = 3 # from enum XlConditionValueTypes
xlConditionValuePercentile = 5 # from enum XlConditionValueTypes
class ConnectionType:
xlConnectionTypeODBC = 2 # from enum XlConnectionType
xlConnectionTypeOLEDB = 1 # from enum XlConnectionType
xlConnectionTypeTEXT = 4 # from enum XlConnectionType
xlConnectionTypeWEB = 5 # from enum XlConnectionType
xlConnectionTypeXMLMAP = 3 # from enum XlConnectionType
class ConsolidationFunction:
xlAverage = -4106 # from enum XlConsolidationFunction
xlCount = -4112 # from enum XlConsolidationFunction
xlCountNums = -4113 # from enum XlConsolidationFunction
xlMax = -4136 # from enum XlConsolidationFunction
xlMin = -4139 # from enum XlConsolidationFunction
xlProduct = -4149 # from enum XlConsolidationFunction
xlStDev = -4155 # from enum XlConsolidationFunction
xlStDevP = -4156 # from enum XlConsolidationFunction
xlSum = -4157 # from enum XlConsolidationFunction
xlUnknown = 1000 # from enum XlConsolidationFunction
xlVar = -4164 # from enum XlConsolidationFunction
xlVarP = -4165 # from enum XlConsolidationFunction
class ContainsOperator:
xlBeginsWith = 2 # from enum XlContainsOperator
xlContains = 0 # from enum XlContainsOperator
xlDoesNotContain = 1 # from enum XlContainsOperator
xlEndsWith = 3 # from enum XlContainsOperator
class CopyPictureFormat:
xlBitmap = 2 # from enum XlCopyPictureFormat
xlPicture = -4147 # from enum XlCopyPictureFormat
class CorruptLoad:
xlExtractData = 2 # from enum XlCorruptLoad
xlNormalLoad = 0 # from enum XlCorruptLoad
xlRepairFile = 1 # from enum XlCorruptLoad
class Creator:
xlCreatorCode = 1480803660 # from enum XlCreator
class CredentialsMethod:
xlCredentialsMethodIntegrated = 0 # from enum XlCredentialsMethod
xlCredentialsMethodNone = 1 # from enum XlCredentialsMethod
xlCredentialsMethodStored = 2 # from enum XlCredentialsMethod
class CubeFieldSubType:
xlCubeAttribute = 4 # from enum XlCubeFieldSubType
xlCubeCalculatedMeasure = 5 # from enum XlCubeFieldSubType
xlCubeHierarchy = 1 # from enum XlCubeFieldSubType
xlCubeKPIGoal = 7 # from enum XlCubeFieldSubType
xlCubeKPIStatus = 8 # from enum XlCubeFieldSubType
xlCubeKPITrend = 9 # from enum XlCubeFieldSubType
xlCubeKPIValue = 6 # from enum XlCubeFieldSubType
xlCubeKPIWeight = 10 # from enum XlCubeFieldSubType
xlCubeMeasure = 2 # from enum XlCubeFieldSubType
xlCubeSet = 3 # from enum XlCubeFieldSubType
class CubeFieldType:
xlHierarchy = 1 # from enum XlCubeFieldType
xlMeasure = 2 # from enum XlCubeFieldType
xlSet = 3 # from enum XlCubeFieldType
class CutCopyMode:
xlCopy = 1 # from enum XlCutCopyMode
xlCut = 2 # from enum XlCutCopyMode
class DVAlertStyle:
xlValidAlertInformation = 3 # from enum XlDVAlertStyle
xlValidAlertStop = 1 # from enum XlDVAlertStyle
xlValidAlertWarning = 2 # from enum XlDVAlertStyle
class DVType:
xlValidateCustom = 7 # from enum XlDVType
xlValidateDate = 4 # from enum XlDVType
xlValidateDecimal = 2 # from enum XlDVType
xlValidateInputOnly = 0 # from enum XlDVType
xlValidateList = 3 # from enum XlDVType
xlValidateTextLength = 6 # from enum XlDVType
xlValidateTime = 5 # from enum XlDVType
xlValidateWholeNumber = 1 # from enum XlDVType
class DataBarAxisPosition:
xlDataBarAxisAutomatic = 0 # from enum XlDataBarAxisPosition
xlDataBarAxisMidpoint = 1 # from enum XlDataBarAxisPosition
xlDataBarAxisNone = 2 # from enum XlDataBarAxisPosition
class DataBarBorderType:
xlDataBarBorderNone = 0 # from enum XlDataBarBorderType
xlDataBarBorderSolid = 1 # from enum XlDataBarBorderType
class DataBarFillType:
xlDataBarFillGradient = 1 # from enum XlDataBarFillType
xlDataBarFillSolid = 0 # from enum XlDataBarFillType
class DataBarNegativeColorType:
xlDataBarColor = 0 # from enum XlDataBarNegativeColorType
xlDataBarSameAsPositive = 1 # from enum XlDataBarNegativeColorType
class DataLabelPosition:
xlLabelPositionAbove = 0 # from enum XlDataLabelPosition
xlLabelPositionBelow = 1 # from enum XlDataLabelPosition
xlLabelPositionBestFit = 5 # from enum XlDataLabelPosition
xlLabelPositionCenter = -4108 # from enum XlDataLabelPosition
xlLabelPositionCustom = 7 # from enum XlDataLabelPosition
xlLabelPositionInsideBase = 4 # from enum XlDataLabelPosition
xlLabelPositionInsideEnd = 3 # from enum XlDataLabelPosition
xlLabelPositionLeft = -4131 # from enum XlDataLabelPosition
xlLabelPositionMixed = 6 # from enum XlDataLabelPosition
xlLabelPositionOutsideEnd = 2 # from enum XlDataLabelPosition
xlLabelPositionRight = -4152 # from enum XlDataLabelPosition
class DataLabelSeparator:
xlDataLabelSeparatorDefault = 1 # from enum XlDataLabelSeparator
class DataLabelsType:
xlDataLabelsShowBubbleSizes = 6 # from enum XlDataLabelsType
xlDataLabelsShowLabel = 4 # from enum XlDataLabelsType
xlDataLabelsShowLabelAndPercent = 5 # from enum XlDataLabelsType
xlDataLabelsShowNone = -4142 # from enum XlDataLabelsType
xlDataLabelsShowPercent = 3 # from enum XlDataLabelsType
xlDataLabelsShowValue = 2 # from enum XlDataLabelsType
class DataSeriesDate:
xlDay = 1 # from enum XlDataSeriesDate
xlMonth = 3 # from enum XlDataSeriesDate
xlWeekday = 2 # from enum XlDataSeriesDate
xlYear = 4 # from enum XlDataSeriesDate
xlAutoFill = 4 # from enum XlDataSeriesType
xlChronological = 3 # from enum XlDataSeriesType
xlDataSeriesLinear = -4132 # from enum XlDataSeriesType
xlGrowth = 2 # from enum XlDataSeriesType
class DeleteShiftDirection:
xlShiftToLeft = -4159 # from enum XlDeleteShiftDirection
xlShiftUp = -4162 # from enum XlDeleteShiftDirection
class Direction:
xlDown = -4121 # from enum XlDirection
xlToLeft = -4159 # from enum XlDirection
xlToRight = -4161 # from enum XlDirection
xlUp = -4162 # from enum XlDirection
class DisplayBlanksAs:
xlInterpolated = 3 # from enum XlDisplayBlanksAs
xlNotPlotted = 1 # from enum XlDisplayBlanksAs
xlZero = 2 # from enum XlDisplayBlanksAs
class DisplayDrawingObjects:
xlDisplayShapes = -4104 # from enum XlDisplayDrawingObjects
xlHide = 3 # from enum XlDisplayDrawingObjects
xlPlaceholders = 2 # from enum | |
import os
from cs50 import SQL
from flask import Flask, flash, jsonify, redirect, render_template, request, session, url_for
from flask_session import Session
from tempfile import mkdtemp
from werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError
from werkzeug.security import check_password_hash, generate_password_hash
from helpers import apology, login_required, create_room
# Configure application
app = Flask(__name__)
# Ensure templates are auto-reloaded
app.config["TEMPLATES_AUTO_RELOAD"] = True
# Ensure responses aren't cached
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
# Configure session to use filesystem (instead of signed cookies)
app.config["SESSION_FILE_DIR"] = mkdtemp()
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# Configure CS50 Library to use SQLite database
db = SQL("sqlite:///database.db")
# Search for rooms
@app.route("/", methods=["GET", "POST"])
@login_required
def index():
# Display Welcome message and box to search rooms
if request.method == "GET":
return render_template("index.html", search=False)
else:
room_id = request.form.get("room_id")
room = db.execute("SELECT * FROM rooms WHERE room_id=:room_id", room_id=room_id)
if len(room) != 1:
return render_template("index.html", search=False, message="Room not Found")
else:
return render_template("index.html", room=room, search=True)
@app.route("/login", methods=["GET", "POST"])
def login():
"""Log user in"""
# Forget any user_id
session.clear()
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return apology("must provide username", 403)
# Ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password", 403)
# Query database for username
rows = db.execute("SELECT * FROM users WHERE username = :username",
username=request.form.get("username"))
# Ensure username exists and password is correct
if len(rows) != 1 or not check_password_hash(rows[0]["hash"], request.form.get("password")):
return apology("invalid username and/or password", 403)
# Remember which user has logged in
session["user_id"] = rows[0]["id"]
session["username"] = rows[0]["username"]
# Redirect user to home page
return redirect(url_for('index'))
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("login.html")
@app.route("/logout")
def logout():
"""Log user out"""
# Forget any user_id
session.clear()
# Redirect user to login form
return redirect(url_for('index'))
@app.route("/create", methods=["GET", "POST"])
@login_required
def createroom():
"""Create Room"""
# if Method is GET Display Room list
if request.method == "GET":
return render_template("create.html")
# Else
else:
room_name = request.form.get("room_name")
create_room(db, room_name, session['user_id'])
room_id = db.execute("SELECT MAX (room_id) AS room_id FROM rooms")
session['edit_room'] = room_id[0]['room_id']
return redirect(url_for('add_list'))
@app.route("/rooms", methods=["GET", "POST"])
@login_required
def show_rooms():
if request.method == "GET":
# Room user owns and moderates
rooms = db.execute("SELECT * FROM rooms WHERE user_id=:user_id", user_id=session['user_id'])
# Rooms user is part of and can vote
rooms_joins = db.execute("SELECT * FROM rooms WHERE room_id IN (SELECT room_id FROM roomjoins WHERE user_id=:user_id AND status='join')", user_id=session['user_id'])
return render_template("rooms.html", rooms=rooms, rooms_joins=rooms_joins)
else:
room_id = request.form.get("room_id")
if(request.form.get("option") == "edit"):
# Go to create list:
session['edit_room'] = room_id
return redirect(url_for('add_list'))
elif (request.form.get("option") == "reset"):
# Reset all votes on the list
db.execute("DELETE FROM voting WHERE room_id=:room_id", room_id=room_id)
# Change Status of room to open
db.execute("UPDATE rooms SET status='open' WHERE room_id=:room_id", room_id=room_id)
# reset user vote voted to no
db.execute("UPDATE roomjoins SET voted='no' WHERE room_id=:room_id",
room_id=room_id
)
return redirect(url_for('show_rooms'))
elif (request.form.get("option") == "close"):
# Close Room
db.execute("UPDATE rooms SET status='close' WHERE room_id=:room_id", room_id=room_id)
return redirect(url_for('show_rooms'))
elif (request.form.get("option") == "delete"):
# Delete Room, Votes and Options
# Delete Options:
db.execute("DELETE FROM options WHERE room_id=:room_id", room_id=room_id)
# Delete roomsjoins table
db.execute("DELETE FROM roomjoins WHERE room_id=:room_id", room_id=room_id)
# delete votes
db.execute("DELETE FROM voting WHERE room_id=:room_id", room_id=room_id)
# Delete room table
db.execute("DELETE FROM rooms WHERE room_id=:room_id", room_id=room_id)
return redirect(url_for('show_rooms'))
elif request.form.get("option_joins") == "dashboard":
session['edit_room'] = room_id
return redirect(url_for('dashboard'))
elif request.form.get("option_joins") == "leave":
# leave room and render again /rooms
room_id = request.form.get("room_id")
user_id = session['user_id']
db.execute("UPDATE roomjoins SET status='leave' WHERE room_id=:room_id AND user_id=:user_id", room_id=room_id, user_id=user_id)
return redirect(url_for('show_rooms'))
@app.route("/modifylist", methods=["GET", "POST"])
@login_required
def edit_list():
if request.method =="GET":
room_id = session['edit_room']
room_options = db.execute("SELECT * FROM options WHERE room_id=:room_id", room_id=room_id)
room = db.execute("SELECT * FROM rooms WHERE room_id=:room_id", room_id=room_id)
return render_template("showlist.html", room=room_options, room_name=room[0]['room_name'], room_id=room_id)
else:
room_id = request.form.get("room_id")
db.execute("UPDATE rooms SET status='edit' WHERE room_id=:room_id", room_id=room_id)
session['edit_room'] = room_id
# Update user in the room status to not voted
db.execute("UPDATE roomjoins SET voted='no' WHERE room_id=:room_id",
room_id=room_id
)
# Delete votes
db.execute("DELETE FROM voting WHERE room_id=:room_id", room_id=room_id)
return redirect(url_for('add_list'))
@app.route("/createlist", methods=["GET", "POST"])
@login_required
def add_list():
if request.method == "GET":
# check if room is already voting or closed, else it can be modified
room_id = session['edit_room']
room_options = db.execute("SELECT * FROM options WHERE room_id=:room_id", room_id=room_id)
room = db.execute("SELECT * FROM rooms WHERE room_id=:room_id", room_id=room_id)
if room[0]['status'] == 'edit':
return render_template("createlist.html", room=room_options, room_name=room[0]['room_name'], room_id=room_id)
else:
# room_id = session['edit_room']
return redirect(url_for('edit_list'))
else:
if request.form.get("add") == "add":
new_option = request.form.get("option")
room_id = session['edit_room']
# Add Option to option table
db.execute("INSERT INTO options (option_name, room_id) VALUES (:option_name, :room_id)", option_name=new_option, room_id=room_id)
return redirect(url_for('add_list'))
else:
option_id = request.form.get("change") # option_id
# Remove Option from options table option_id
db.execute("DELETE FROM options WHERE option_id=:option_id", option_id=option_id)
return redirect(url_for('add_list'))
@app.route("/register", methods=["GET", "POST"])
def register():
"""Register user"""
if request.method == "GET":
return render_template("register.html", error="")
else:
# Get new user details
username = request.form.get("username")
password = request.form.get("password")
hash_pass = generate_password_hash(password)
# Check if user already exists
user_registered = db.execute("SELECT * FROM users WHERE users.username=:username", username=username)
if len(user_registered) != 0:
return render_template("register.html", error="Sorry..user Name already exists")
# Else Continue
db.execute("INSERT INTO users (username, hash)\
VALUES (:username, :hash)",
username=username,
hash=hash_pass)
return redirect(url_for('login'))
@app.route("/godashboard")
@login_required
def godashboard():
room_id = request.args['room_id_join']
session["edit_room"] = room_id
# Check if user is already in this room or has ever been in the room:
in_room = db.execute("SELECT * FROM roomjoins WHERE room_id=:room_id AND user_id=:user_id", room_id=room_id, user_id=session['user_id'])
if len(in_room) != 1:
# Add user to room
db.execute("INSERT INTO roomjoins (room_id, user_id, status)\
VALUES (:room_id, :user_id, :status)",
room_id=room_id,
user_id=session['user_id'],
status="join"
)
else:
# update user to join
db.execute("UPDATE roomjoins SET status='join' WHERE room_id=:room_id AND user_id=:user_id",
room_id=room_id,
user_id=session['user_id']
)
if request.args["index_join"] != "yes":
db.execute("UPDATE rooms SET status='open' WHERE room_id=:room_id AND user_id=:user_id",
room_id=room_id,
user_id=session['user_id']
)
return redirect(url_for('dashboard'))
@app.route("/dashboard", methods=["GET", "POST"])
@login_required
def dashboard():
if request.method == "GET":
# Get Room and Room Options information
room_id = session['edit_room']
room = db.execute("SELECT * FROM rooms WHERE room_id=:room_id", room_id=room_id)
options = db.execute("SELECT * FROM options WHERE room_id=:room_id", room_id=room_id)
room_user_data = db.execute("SELECT * FROM roomjoins WHERE room_id=:room_id AND user_id=:user_id", room_id=room_id, user_id=session['user_id'])
# Room is open:
room_status = room[0]['status']
user_voted = room_user_data[0]['voted'] == 'yes'
in_room = room_user_data[0]['status'] == 'join'
# Check if user is in the room:
if not in_room:
# User not in the room display apology
return apology("you are not in this room")
# User is in the room, continue
# if room is open
if room_status == "open":
if not user_voted:
# Ask user to vote:
session['edit_room'] = room_id
return render_template('dashboard_vote.html', room=room, options=options)
# user voted -> Show user result only
else:
message = "These are your Votes!, to see the Room Results \
you have to wait the voting finishes!"
# Show dashboard wiht user result
user_votes = db.execute("SELECT options.option_id, options.option_name, voting.vote \
FROM voting\
JOIN options ON voting.option_id=options.option_id\
WHERE voting.user_id=:user_id AND voting.room_id=:room_id",
user_id=session["user_id"],
room_id=room_id)
return render_template("dashboard_result.html",
user_votes=user_votes,
room=room,
room_close=False,
user_voted=user_voted,
message=message
)
# else if Room is open or close and user voted -> Show Dashboard with ALL results
elif room_status == "close":
if not user_voted:
# Show results and tell user it did not vote:
message = "You did not Vote! but these are the Results!"
room_votes = db.execute("SELECT options.option_name, SUM (voting.vote) AS all_votes \
FROM voting \
JOIN options ON voting.option_id=options.option_id \
WHERE options.room_id=:room_id\
GROUP BY (options.option_name) \
ORDER BY all_votes DESC",
room_id=room_id)
return render_template("dashboard_result.html",
room_votes=room_votes,
room=room,
room_close=True,
user_voted=user_voted,
message=message
)
# Show all results
else:
# Show dashboard with user result
message = "Here are your votes and the Results!"
# User Votes:
user_votes = db.execute("SELECT options.option_id, options.option_name, voting.vote\
FROM voting\
JOIN options ON voting.option_id=options.option_id\
WHERE voting.user_id=:user_id AND voting.room_id=:room_id",
user_id=session["user_id"],
room_id=room_id)
# Room Results
#TODO check if there is a tie
room_votes = db.execute("SELECT options.option_name, SUM (voting.vote) AS all_votes\
FROM voting \
JOIN options ON voting.option_id=options.option_id \
WHERE options.room_id=:room_id\
GROUP BY (options.option_name) \
ORDER BY all_votes DESC",
room_id=room_id
)
# Create pie chart list
chart_list = [["Option", "Vote"]]
for row in room_votes:
temp = [row['option_name'], row['all_votes']]
chart_list.append(temp)
return render_template("dashboard_result.html",
room_votes=room_votes,
user_votes=user_votes,
room=room,
room_close=True,
user_voted=user_voted,
message=message,
chart_list=chart_list)
# Else room is being edited
else:
| |
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 15 10:32:22 2018
@author: Shin2018
"""
import numpy as np
def generate_traindata_for_train(traindata_all,traindata_labels,input_size,label_size,batch_size,Setting02_AngualrViews):
"""
input: traindata_all (16x512x512x9x9x3) uint8
traindata_label (16x512x512x9x9) float32
input_size 23~ int
label_size 1~ int
batch_size 16 int
Setting02_AngualrViews [0,1,2,3,4,5,6,7,8] for 9x9
boolmask_img4 (512x512) bool // reflection mask for images[4]
boolmask_img6 (512x512) bool // reflection mask for images[6]
boolmask_img15 (512x512) bool // reflection mask for images[15]
Generate traindata using LF image and disparity map
by randomly chosen variables.
1. gray image: random R,G,B --> R*img_R + G*img_G + B*imgB
2. patch-wise learning: random x,y --> LFimage[x:x+size1,y:y+size2]
3. scale augmentation: scale 1,2,3 --> ex> LFimage[x:x+2*size1:2,y:y+2*size2:2]
output: traindata_batch_90d (batch_size x input_size x input_size x len(Setting02_AngualrViews)) float32
traindata_batch_0d (batch_size x input_size x input_size x len(Setting02_AngualrViews)) float32
traindata_batch_45d (batch_size x input_size x input_size x len(Setting02_AngualrViews)) float32
traindata_batch_m45d (batch_size x input_size x input_size x len(Setting02_AngualrViews)) float32
traindata_batch_label (batch_size x label_size x label_size ) float32
"""
""" initialize image_stack & label """
traindata_batch_90d=np.zeros((batch_size,input_size,input_size,len(Setting02_AngualrViews)),dtype=np.float32)
traindata_batch_0d=np.zeros((batch_size,input_size,input_size,len(Setting02_AngualrViews)),dtype=np.float32)
# traindata_batch_45d=np.zeros((batch_size,input_size,input_size,len(Setting02_AngualrViews)),dtype=np.float32)
traindata_batch_m45d=np.zeros((batch_size,input_size,input_size,len(Setting02_AngualrViews)),dtype=np.float32)
traindata_batch_label=np.zeros((batch_size,label_size,label_size))
""" inital variable """
start1=Setting02_AngualrViews[0]
end1=Setting02_AngualrViews[-1]
crop_half1=int(0.5*(input_size-label_size))
""" Generate image stacks"""
for ii in range(0,batch_size):
sum_diff=0
valid=0
while( sum_diff<0.01*input_size*input_size or valid<1 ):
"""//Variable for gray conversion//"""
rand_3color=0.05+np.random.rand(3)
rand_3color=rand_3color/np.sum(rand_3color)
R=rand_3color[0]
G=rand_3color[1]
B=rand_3color[2]
aa_arr =np.array(range(traindata_all.shape[0]))
imid=np.random.choice(aa_arr)
"""
//Shift augmentation for 7x7, 5x5 viewpoints,.. //
Details in our epinet paper.
"""
#randomly decide the scale##
kk=np.random.randint(17)
if(kk<8):
scale=1
elif(kk<14):
scale=2
elif(kk<17):
scale=3
############################
idx_start = np.random.randint(0,512-scale*input_size)
idy_start = np.random.randint(0,512-scale*input_size)
valid=1
"""
boolmask: reflection masks for images(4,6,15)
"""
# if(imid==4 or 6 or 15):
# if(imid==4):
# a_tmp=boolmask_img4
# if(imid==6):
# a_tmp=boolmask_img6
# if(imid==15):
# a_tmp=boolmask_img15
# if( np.sum(a_tmp[idx_start+scale*crop_half1: idx_start+scale*crop_half1+scale*label_size:scale,
# idy_start+scale*crop_half1: idy_start+scale*crop_half1+scale*label_size:scale])>0
# or np.sum(a_tmp[idx_start: idx_start+scale*input_size:scale,
# idy_start: idy_start+scale*input_size:scale])>0 ):
# valid=0
if(valid>0):
# seq0to8=np.array(Setting02_AngualrViews)#+ix_rd
# seq8to0=np.array(Setting02_AngualrViews[::-1])#+iy_rd
image_center=(1/255)*np.squeeze(R*traindata_all[imid, idx_start: idx_start+scale*input_size:scale, idy_start: idy_start+scale*input_size:scale, 4, 4, 0].astype('float32')+
G*traindata_all[imid, idx_start: idx_start+scale*input_size:scale, idy_start: idy_start+scale*input_size:scale, 4, 4, 1].astype('float32')+
B*traindata_all[imid, idx_start: idx_start+scale*input_size:scale, idy_start: idy_start+scale*input_size:scale, 4, 4, 2].astype('float32'))
sum_diff=np.sum(np.abs(image_center-np.squeeze(image_center[int(0.5*input_size),int(0.5*input_size)])))
'''
Four image stacks are selected from LF full(512x512) images.
gray-scaled, cropped and scaled
traindata_batch_0d <-- RGBtoGray( traindata_all[random_index, scaled_input_size, scaled_input_size, 4(center), 0to8 ] )
traindata_batch_90d <-- RGBtoGray( traindata_all[random_index, scaled_input_size, scaled_input_size, 8to0, 4(center) ] )
traindata_batch_45d <-- RGBtoGray( traindata_all[random_index, scaled_input_size, scaled_input_size, 8to0, 0to8 ] )
traindata_batch_m45d <-- RGBtoGray( traindata_all[random_index, scaled_input_size, scaled_input_size, 0to8, 0to8 ] )
'''
corner_code = ["NW","NE","SW","SE"][np.random.randint(0,4)]
corner_dict = {"NW":0,"NE":8,"SW":72,"SE":80}
if corner_code == "NW":
rotm = 0
iars_0d = 0
iacs_0d = list(range(0,9))
iars_90d = list(range(0,9)[::-1])
iacs_90d = 0
iars_m45d = 0
iacs_m45d = 0
ksignr = 1
ksignc = 1
elif corner_code == "NE":
rotm = 1
iars_0d = list(range(0,9))
iacs_0d = 8
iars_90d = 0
iacs_90d = list(range(0,9))
iars_m45d = 0
iacs_m45d = 8
ksignr = 1
ksignc = -1
elif corner_code == "SW":
rotm = -1
iars_0d = list(range(0,9))[::-1]
iacs_0d = 0
iars_90d = 8
iacs_90d = list(range(0,9))[::-1]
iars_m45d = 8
iacs_m45d = 0
ksignr = -1
ksignc = 1
elif corner_code == "SE":
rotm = 2
iars_0d = 8
iacs_0d = list(range(0,9))[::-1]
iars_90d = list(range(0,9))
iacs_90d = 8
iars_m45d = 8
iacs_m45d = 8
ksignr = -1
ksignc = -1
traindata_batch_0d[ii,:,:,:]=np.rot90(np.squeeze(R*traindata_all[imid:imid+1, idx_start: idx_start+scale*input_size:scale, idy_start: idy_start+scale*input_size:scale, iars_0d, iacs_0d, 0].astype('float32')+
G*traindata_all[imid:imid+1, idx_start: idx_start+scale*input_size:scale, idy_start: idy_start+scale*input_size:scale, iars_0d,iacs_0d, 1].astype('float32')+
B*traindata_all[imid:imid+1, idx_start: idx_start+scale*input_size:scale, idy_start: idy_start+scale*input_size:scale, iars_0d, iacs_0d, 2].astype('float32')),rotm,(0,1))
#
traindata_batch_90d[ii,:,:,:]=np.rot90(np.squeeze(R*traindata_all[imid:imid+1, idx_start: idx_start+scale*input_size:scale, idy_start: idy_start+scale*input_size:scale, iars_90d, iacs_90d, 0].astype('float32')+
G*traindata_all[imid:imid+1, idx_start: idx_start+scale*input_size:scale, idy_start: idy_start+scale*input_size:scale, iars_90d, iacs_90d, 1].astype('float32')+
B*traindata_all[imid:imid+1, idx_start: idx_start+scale*input_size:scale, idy_start: idy_start+scale*input_size:scale, iars_90d, iacs_90d, 2].astype('float32')),rotm,(0,1))
for kkk in range(start1,end1+1):
#
traindata_batch_m45d[ii,:,:,kkk-start1]=np.rot90(np.squeeze(R*traindata_all[imid:imid+1, idx_start: idx_start+scale*input_size:scale, idy_start: idy_start+scale*input_size:scale, ksignr*kkk + iars_m45d, ksignc*kkk + iacs_m45d, 0].astype('float32')+
G*traindata_all[imid:imid+1, idx_start: idx_start+scale*input_size:scale, idy_start: idy_start+scale*input_size:scale, ksignr*kkk + iars_m45d, ksignc*kkk + iacs_m45d, 1].astype('float32')+
B*traindata_all[imid:imid+1, idx_start: idx_start+scale*input_size:scale, idy_start: idy_start+scale*input_size:scale, ksignr*kkk + iars_m45d, ksignc*kkk + iacs_m45d, 2].astype('float32')),rotm,(0,1))
'''
traindata_batch_label <-- scale_factor*traindata_label[random_index, scaled_label_size, scaled_label_size]
'''
traindata_batch_label[ii,:,:]=(1.0/scale)*np.rot90(traindata_labels[imid, idx_start+scale*crop_half1: idx_start+scale*crop_half1+scale*label_size:scale,
idy_start+scale*crop_half1: idy_start+scale*crop_half1+scale*label_size:scale,corner_dict[corner_code]],rotm,(0,1))
# if(len(traindata_labels.shape)==5):
# traindata_batch_label[ii,:,:]=(1.0/scale)*traindata_labels[imid, idx_start+scale*crop_half1: idx_start+scale*crop_half1+scale*label_size:scale,
# idy_start+scale*crop_half1: idy_start+scale*crop_half1+scale*label_size:scale,0,0]
# else:
# traindata_batch_label[ii,:,:]=(1.0/scale)*traindata_labels[imid, idx_start+scale*crop_half1: idx_start+scale*crop_half1+scale*label_size:scale,
# idy_start+scale*crop_half1: idy_start+scale*crop_half1+scale*label_size:scale]
traindata_batch_90d=np.float32((1/255)*traindata_batch_90d)
traindata_batch_0d =np.float32((1/255)*traindata_batch_0d)
# traindata_batch_45d=np.float32((1/255)*traindata_batch_45d)
traindata_batch_m45d=np.float32((1/255)*traindata_batch_m45d)
return traindata_batch_90d,traindata_batch_0d,traindata_batch_m45d, traindata_batch_label #,usage_check
def data_augmentation_for_train(traindata_batch_90d, traindata_batch_0d,
traindata_batch_m45d, traindata_label_batchNxN, batch_size):
"""
For Data augmentation
(rotation, transpose and gamma)
"""
for batch_i in range(batch_size):
gray_rand=0.4*np.random.rand()+0.8
traindata_batch_90d[batch_i,:,:,:]=pow(traindata_batch_90d[batch_i,:,:,:],gray_rand)
traindata_batch_0d[batch_i,:,:,:]=pow(traindata_batch_0d[batch_i,:,:,:],gray_rand)
traindata_batch_m45d[batch_i,:,:,:]=pow(traindata_batch_m45d[batch_i,:,:,:],gray_rand)
rotation_or_transp_rand=np.random.randint(0,2)
if rotation_or_transp_rand==1:
##take the transpose of each view seperately:
traindata_batch_90d_tmp6=np.copy(np.transpose(np.squeeze(traindata_batch_90d[batch_i,:,:,:]),(1, 0, 2)) )
traindata_batch_0d_tmp6=np.copy(np.transpose(np.squeeze(traindata_batch_0d[batch_i,:,:,:]),(1, 0, 2)) )
traindata_batch_m45d_tmp6=np.copy(np.transpose(np.squeeze(traindata_batch_m45d[batch_i,:,:,:]),(1, 0, 2)) )
##take the transpose of the whole view grid:
traindata_batch_0d[batch_i,:,:,:]=np.copy(traindata_batch_90d_tmp6[:,:,::-1])
traindata_batch_90d[batch_i,:,:,:]=np.copy(traindata_batch_0d_tmp6[:,:,::-1])
traindata_batch_m45d[batch_i,:,:,:]=np.copy(traindata_batch_m45d_tmp6)#[:,:,::-1])
##############################################
traindata_label_batchNxN[batch_i,:,:]=np.copy(np.transpose(traindata_label_batchNxN[batch_i,:,:],(1, 0)))
return traindata_batch_90d, traindata_batch_0d,traindata_batch_m45d, traindata_label_batchNxN
def generate_traindata512(traindata_all,traindata_label,Setting02_AngualrViews,corner_code):
"""
Generate validation or test set( = full size(512x512) LF images)
input: traindata_all (16x512x512x9x9x3) uint8
traindata_label (16x512x512x9x9) float32
Setting02_AngualrViews [0,1,2,3,4,5,6,7,8] for 9x9
output: traindata_batch_90d (batch_size x 512 x 512 x len(Setting02_AngualrViews)) float32
traindata_batch_0d (batch_size x 512 x 512 x len(Setting02_AngualrViews)) float32
traindata_batch_45d (batch_size x 512 x 512 x len(Setting02_AngualrViews)) float32
traindata_batch_m45d (batch_size x 512 x 512 x len(Setting02_AngualrViews)) float32
traindata_label_batchNxN (batch_size x 512 x 512 ) float32
"""
corner_dict = {"NW":0,"NE":8,"SW":72,"SE":80}
input_size=512; label_size=512;
traindata_batch_90d=np.zeros((len(traindata_all),input_size,input_size,len(Setting02_AngualrViews)),dtype=np.float32)
traindata_batch_0d=np.zeros((len(traindata_all),input_size,input_size,len(Setting02_AngualrViews)),dtype=np.float32)
traindata_batch_m45d=np.zeros((len(traindata_all),input_size,input_size,len(Setting02_AngualrViews)),dtype=np.float32)
traindata_label_batchNxN=np.zeros((len(traindata_all),label_size,label_size))
""" inital setting """
### sz = (16, 27, 9, 512, 512)
crop_half1=int(0.5*(input_size-label_size))
start1=Setting02_AngualrViews[0]
end1=Setting02_AngualrViews[-1]
# starttime=time.process_time() 0.375초 정도 걸림. i5 기준
if corner_code == "NW":
rotm = 0
iars_0d = 0
iacs_0d = list(range(0,9))
iars_90d = list(range(0,9)[::-1])
iacs_90d = 0
iars_m45d = 0
iacs_m45d = 0
ksignr = 1
ksignc = 1
elif corner_code == "NE":
rotm = 1
iars_0d = list(range(0,9))
iacs_0d = 8
iars_90d = 0
iacs_90d = list(range(0,9))
iars_m45d = 0
iacs_m45d = 8
ksignr = 1
ksignc = -1
elif corner_code == "SW":
rotm = -1
iars_0d = list(range(0,9))[::-1]
iacs_0d = 0
iars_90d = 8
iacs_90d = list(range(0,9))[::-1]
iars_m45d = 8
iacs_m45d = 0
ksignr = -1
ksignc = 1
elif corner_code == "SE":
rotm = 2
iars_0d = 8
iacs_0d = list(range(0,9))[::-1]
iars_90d = list(range(0,9))
iacs_90d = 8
iars_m45d = 8
iacs_m45d = 8
ksignr = -1
ksignc = -1
for ii in range(0,len(traindata_all)):
R = 0.299 ### 0,1,2,3 = R, G, B, Gray // 0.299 0.587 0.114
G = 0.587
B = 0.114
imid = ii
idx_start = 0
idy_start = 0
traindata_batch_0d[ii,:,:,:]=np.rot90(np.squeeze(R*traindata_all[imid:imid+1, idx_start: idx_start+input_size, idy_start: idy_start+input_size, iars_0d, iacs_0d, 0].astype('float32')+
G*traindata_all[imid:imid+1, idx_start: idx_start+input_size, idy_start: idy_start+input_size, iars_0d, iacs_0d, 1].astype('float32')+
B*traindata_all[imid:imid+1, idx_start: idx_start+input_size, idy_start: idy_start+input_size, iars_0d, iacs_0d, 2].astype('float32')),rotm,(0,1))
traindata_batch_90d[ii,:,:,:]=np.rot90(np.squeeze(R*traindata_all[imid:imid+1, idx_start: idx_start+input_size,idy_start: idy_start+input_size, iars_90d, iacs_90d, 0].astype('float32')+
G*traindata_all[imid:imid+1, idx_start: idx_start+input_size,idy_start: idy_start+input_size, iars_90d, iacs_90d, 1].astype('float32')+
B*traindata_all[imid:imid+1, idx_start: idx_start+input_size,idy_start: idy_start+input_size, iars_90d, iacs_90d, 2].astype('float32')),rotm,(0,1))
for kkk in range(start1,end1+1):
traindata_batch_m45d[ii,:,:,kkk-start1]=np.rot90(np.squeeze(R*traindata_all[imid:imid+1, idx_start: idx_start+input_size,idy_start: idy_start+input_size, ksignr*kkk + iars_m45d, ksignc*kkk + iacs_m45d, 0].astype('float32')+
G*traindata_all[imid:imid+1, idx_start: idx_start+input_size,idy_start: idy_start+input_size, ksignr*kkk + iars_m45d, ksignc*kkk + iacs_m45d, 1].astype('float32')+
B*traindata_all[imid:imid+1, idx_start: idx_start+input_size,idy_start: idy_start+input_size, ksignr*kkk + iars_m45d, ksignc*kkk + iacs_m45d, 2].astype('float32')),rotm,(0,1))
traindata_label_batchNxN[ii,:,:]=np.rot90(traindata_label[imid ,idx_start+crop_half1: idx_start+crop_half1+label_size,idy_start+crop_half1: idy_start+crop_half1+label_size, corner_dict[corner_code]],rotm,(0,1))
traindata_batch_90d=np.float32((1/255)*traindata_batch_90d)
traindata_batch_0d =np.float32((1/255)*traindata_batch_0d)
# traindata_batch_45d=np.float32((1/255)*traindata_batch_45d)
traindata_batch_m45d=np.float32((1/255)*traindata_batch_m45d)
traindata_batch_90d=np.minimum(np.maximum(traindata_batch_90d,0),1)
traindata_batch_0d=np.minimum(np.maximum(traindata_batch_0d,0),1)
# traindata_batch_45d=np.minimum(np.maximum(traindata_batch_45d,0),1)
traindata_batch_m45d=np.minimum(np.maximum(traindata_batch_m45d,0),1)
return traindata_batch_90d,traindata_batch_0d,traindata_batch_m45d, traindata_label_batchNxN
##############################################################
##############################################################
if __name__ == "__main__":
#traindata_all = np.ones((16,512,512,9,9,3), dtype=np.uint8)
#traindata_label = np.ones((16,512,512,9,9), dtype=np.float32)
input_size=25
label_size=input_size-22
batch_size=1
Setting02_AngualrViews = [0,1,2,3,4,5,6,7,8]
nviews = len(Setting02_AngualrViews)
#boolmask_img4 = np.ndarray((512,512),np.bool)
#boolmask_img6 = np.ndarray((512,512),np.bool)
#boolmask_img15 = np.ndarray((512,512),np.bool)
#
#(traindata_batch_90d, traindata_batch_0d,
# traindata_batch_45d, traindata_batch_m45d,
# traindata_label_batchNxN)= generate_traindata_for_train(traindata_all,traindata_label,
# input_size,label_size,batch_size,
# Setting02_AngualrViews,
# boolmask_img4,boolmask_img6,boolmask_img15)
#(traindata_batch_90d, traindata_batch_0d,
# traindata_batch_45d,traindata_batch_m45d,
# traindata_label_batchNxN) = data_augmentation_for_train(traindata_batch_90d,
# traindata_batch_0d,
# traindata_batch_45d,
# traindata_batch_m45d,
# traindata_label_batchNxN,
# batch_size)
from matplotlib import pyplot as plt
import cv2
dog=plt.imread('C:\\Local\\vhemka\\Data\\dog.png')
dog = cv2.cvtColor(cv2.resize(dog,(input_size,input_size)),cv2.COLOR_RGB2GRAY)
plt.imshow(dog)
plt.show()
traindata_batch_90d = np.tile(dog[np.newaxis,:,:,np.newaxis],(1,1,1,nviews))
##########################################################
cat=plt.imread('C:\\Local\\vhemka\\Data\\cat.png')
cat = cv2.cvtColor(cv2.resize(cat,(input_size,input_size)),cv2.COLOR_RGB2GRAY)
plt.imshow(cat)
plt.show()
traindata_batch_0d = np.tile(cat[np.newaxis,:,:,np.newaxis],(1,1,1,nviews))
##########################################################
car=plt.imread('C:\\Local\\vhemka\\Data\\car.png')
car = cv2.cvtColor(cv2.resize(car,(input_size,input_size)),cv2.COLOR_RGB2GRAY)
plt.imshow(car)
plt.show()
traindata_batch_45d = np.tile(car[np.newaxis,:,:,np.newaxis],(1,1,1,nviews))
##########################################################
house=plt.imread('C:\\Local\\vhemka\\Data\\house.png')
house = cv2.cvtColor(cv2.resize(house,(input_size,input_size)),cv2.COLOR_RGB2GRAY)
plt.imshow(house)
plt.show()
traindata_batch_m45d = np.tile(house[np.newaxis,:,:,np.newaxis],(1,1,1,nviews))
traindata_label_batchNxN=np.zeros((batch_size,label_size,label_size))
"""
For Data augmentation
(rotation, transpose and gamma)
"""
#for batch_i in range(batch_size):
batch_i=0
gray_rand=0.4*np.random.rand()+0.8
traindata_batch_90d[batch_i,:,:,:]=pow(traindata_batch_90d[batch_i,:,:,:],gray_rand)
traindata_batch_0d[batch_i,:,:,:]=pow(traindata_batch_0d[batch_i,:,:,:],gray_rand)
traindata_batch_45d[batch_i,:,:,:]=pow(traindata_batch_45d[batch_i,:,:,:],gray_rand)
traindata_batch_m45d[batch_i,:,:,:]=pow(traindata_batch_m45d[batch_i,:,:,:],gray_rand)
##take the | |
<gh_stars>1-10
from copy import copy
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Type, cast
from pypika import Table
from pypika.terms import Criterion
from tortoise.exceptions import FieldError, OperationalError
from tortoise.fields.relational import BackwardFKRelation, ManyToManyFieldInstance, RelationalField
if TYPE_CHECKING: # pragma: nocoverage
from tortoise.models import Model
from tortoise.queryset import QuerySet
def _process_filter_kwarg(
model: "Type[Model]", key: str, value: Any, table: Table
) -> Tuple[Criterion, Optional[Tuple[Table, Criterion]]]:
join = None
if value is None and f"{key}__isnull" in model._meta.filters:
param = model._meta.get_filter(f"{key}__isnull")
value = True
else:
param = model._meta.get_filter(key)
pk_db_field = model._meta.db_pk_column
if param.get("table"):
join = (
param["table"],
table[pk_db_field] == param["table"][param["backward_key"]],
)
if param.get("value_encoder"):
value = param["value_encoder"](value, model)
criterion = param["operator"](param["table"][param["field"]], value)
else:
field_object = model._meta.fields_map[param["field"]]
encoded_value = (
param["value_encoder"](value, model, field_object)
if param.get("value_encoder")
else model._meta.db.executor_class._field_to_db(field_object, value, model)
)
criterion = param["operator"](table[param["source_field"]], encoded_value)
return criterion, join
def _get_joins_for_related_field(
table: Table, related_field: RelationalField, related_field_name: str
) -> List[Tuple[Table, Criterion]]:
required_joins = []
related_table: Table = related_field.related_model._meta.basetable
if isinstance(related_field, ManyToManyFieldInstance):
through_table = Table(related_field.through)
required_joins.append(
(
through_table,
table[related_field.model._meta.db_pk_column]
== through_table[related_field.backward_key],
)
)
required_joins.append(
(
related_table,
through_table[related_field.forward_key]
== related_table[related_field.related_model._meta.db_pk_column],
)
)
elif isinstance(related_field, BackwardFKRelation):
to_field_source_field = (
related_field.to_field_instance.source_field
or related_field.to_field_instance.model_field_name
)
if table == related_table:
related_table = related_table.as_(f"{table.get_table_name()}__{related_field_name}")
required_joins.append(
(
related_table,
table[to_field_source_field] == related_table[related_field.relation_source_field],
)
)
else:
to_field_source_field = (
related_field.to_field_instance.source_field
or related_field.to_field_instance.model_field_name
)
from_field = related_field.model._meta.fields_map[related_field.source_field] # type: ignore
from_field_source_field = from_field.source_field or from_field.model_field_name
related_table = related_table.as_(f"{table.get_table_name()}__{related_field_name}")
required_joins.append(
(related_table, related_table[to_field_source_field] == table[from_field_source_field],)
)
return required_joins
class EmptyCriterion(Criterion): # type: ignore
def __or__(self, other: Criterion) -> Criterion:
return other
def __and__(self, other: Criterion) -> Criterion:
return other
def __bool__(self) -> bool:
return False
def _and(left: Criterion, right: Criterion) -> Criterion:
if left and not right:
return left
return left & right
def _or(left: Criterion, right: Criterion) -> Criterion:
if left and not right:
return left
return left | right
class QueryModifier:
"""
Internal structure used to generate SQL Queries.
"""
def __init__(
self,
where_criterion: Optional[Criterion] = None,
joins: Optional[List[Tuple[Table, Criterion]]] = None,
having_criterion: Optional[Criterion] = None,
) -> None:
self.where_criterion: Criterion = where_criterion or EmptyCriterion()
self.joins = joins if joins else []
self.having_criterion: Criterion = having_criterion or EmptyCriterion()
def __and__(self, other: "QueryModifier") -> "QueryModifier":
return QueryModifier(
where_criterion=_and(self.where_criterion, other.where_criterion),
joins=self.joins + other.joins,
having_criterion=_and(self.having_criterion, other.having_criterion),
)
def __or__(self, other: "QueryModifier") -> "QueryModifier":
if self.having_criterion or other.having_criterion:
# TODO: This could be optimized?
result_having_criterion = _or(
_and(self.where_criterion, self.having_criterion),
_and(other.where_criterion, other.having_criterion),
)
return QueryModifier(
joins=self.joins + other.joins, having_criterion=result_having_criterion
)
if self.where_criterion and other.where_criterion:
return QueryModifier(
where_criterion=self.where_criterion | other.where_criterion,
joins=self.joins + other.joins,
)
return QueryModifier(
where_criterion=self.where_criterion or other.where_criterion,
joins=self.joins + other.joins,
)
def __invert__(self) -> "QueryModifier":
if not self.where_criterion and not self.having_criterion:
return QueryModifier(joins=self.joins)
if self.having_criterion:
# TODO: This could be optimized?
return QueryModifier(
joins=self.joins,
having_criterion=_and(self.where_criterion, self.having_criterion).negate(),
)
return QueryModifier(where_criterion=self.where_criterion.negate(), joins=self.joins)
def get_query_modifiers(self) -> Tuple[Criterion, List[Tuple[Table, Criterion]], Criterion]:
"""
Returns a tuple of the query criterion.
"""
return self.where_criterion, self.joins, self.having_criterion
class Q:
"""
Q Expression container.
Q Expressions are a useful tool to compose a query from many small parts.
:param join_type: Is the join an AND or OR join type?
:param args: Inner ``Q`` expressions that you want to wrap.
:param kwargs: Filter statements that this Q object should encapsulate.
"""
__slots__ = (
"children",
"filters",
"join_type",
"_is_negated",
"_annotations",
"_custom_filters",
)
AND = "AND"
OR = "OR"
def __init__(self, *args: "Q", join_type: str = AND, **kwargs: Any) -> None:
if args and kwargs:
newarg = Q(join_type=join_type, **kwargs)
args = (newarg,) + args
kwargs = {}
if not all(isinstance(node, Q) for node in args):
raise OperationalError("All ordered arguments must be Q nodes")
#: Contains the sub-Q's that this Q is made up of
self.children: Tuple[Q, ...] = args
#: Contains the filters applied to this Q
self.filters: Dict[str, Any] = kwargs
if join_type not in {self.AND, self.OR}:
raise OperationalError("join_type must be AND or OR")
#: Specifies if this Q does an AND or OR on its children
self.join_type = join_type
self._is_negated = False
self._annotations: Dict[str, Any] = {}
self._custom_filters: Dict[str, Dict[str, Any]] = {}
def __and__(self, other: "Q") -> "Q":
"""
Returns a binary AND of Q objects, use ``AND`` operator.
:raises OperationalError: AND operation requires a Q node
"""
if not isinstance(other, Q):
raise OperationalError("AND operation requires a Q node")
return Q(self, other, join_type=self.AND)
def __or__(self, other: "Q") -> "Q":
"""
Returns a binary OR of Q objects, use ``OR`` operator.
:raises OperationalError: OR operation requires a Q node
"""
if not isinstance(other, Q):
raise OperationalError("OR operation requires a Q node")
return Q(self, other, join_type=self.OR)
def __invert__(self) -> "Q":
"""
Returns a negated instance of the Q object, use ``~`` operator.
"""
q = Q(*self.children, join_type=self.join_type, **self.filters)
q.negate()
return q
def negate(self) -> None:
"""
Negates the curent Q object. (mutation)
"""
self._is_negated = not self._is_negated
def _resolve_nested_filter(
self, model: "Type[Model]", key: str, value: Any, table: Table
) -> QueryModifier:
related_field_name = key.split("__")[0]
related_field = cast(RelationalField, model._meta.fields_map[related_field_name])
required_joins = _get_joins_for_related_field(table, related_field, related_field_name)
modifier = Q(**{"__".join(key.split("__")[1:]): value}).resolve(
model=related_field.related_model,
annotations=self._annotations,
custom_filters=self._custom_filters,
table=required_joins[-1][0],
)
return QueryModifier(joins=required_joins) & modifier
def _resolve_custom_kwarg(
self, model: "Type[Model]", key: str, value: Any, table: Table
) -> QueryModifier:
having_info = self._custom_filters[key]
annotation = self._annotations[having_info["field"]]
annotation_info = annotation.resolve(model, table)
operator = having_info["operator"]
overridden_operator = model._meta.db.executor_class.get_overridden_filter_func(
filter_func=operator
)
if overridden_operator:
operator = overridden_operator
if annotation_info["field"].is_aggregate:
modifier = QueryModifier(having_criterion=operator(annotation_info["field"], value))
else:
modifier = QueryModifier(where_criterion=operator(annotation_info["field"], value))
return modifier
def _resolve_regular_kwarg(
self, model: "Type[Model]", key: str, value: Any, table: Table
) -> QueryModifier:
if key not in model._meta.filters and key.split("__")[0] in model._meta.fetch_fields:
modifier = self._resolve_nested_filter(model, key, value, table)
else:
criterion, join = _process_filter_kwarg(model, key, value, table)
joins = [join] if join else []
modifier = QueryModifier(where_criterion=criterion, joins=joins)
return modifier
def _get_actual_filter_params(
self, model: "Type[Model]", key: str, value: Table
) -> Tuple[str, Any]:
filter_key = key
if key in model._meta.fk_fields or key in model._meta.o2o_fields:
field_object = model._meta.fields_map[key]
if hasattr(value, "pk"):
filter_value = value.pk
else:
filter_value = value
filter_key = cast(str, field_object.source_field)
elif key in model._meta.m2m_fields:
if hasattr(value, "pk"):
filter_value = value.pk
else:
filter_value = value
elif (
key.split("__")[0] in model._meta.fetch_fields
or key in self._custom_filters
or key in model._meta.filters
):
filter_value = value
else:
allowed = sorted(
model._meta.fields | model._meta.fetch_fields | set(self._custom_filters)
)
raise FieldError(f"Unknown filter param '{key}'. Allowed base values are {allowed}")
return filter_key, filter_value
def _resolve_kwargs(self, model: "Type[Model]", table: Table) -> QueryModifier:
modifier = QueryModifier()
for raw_key, raw_value in self.filters.items():
key, value = self._get_actual_filter_params(model, raw_key, raw_value)
if key in self._custom_filters:
filter_modifier = self._resolve_custom_kwarg(model, key, value, table)
else:
filter_modifier = self._resolve_regular_kwarg(model, key, value, table)
if self.join_type == self.AND:
modifier &= filter_modifier
else:
modifier |= filter_modifier
if self._is_negated:
modifier = ~modifier
return modifier
def _resolve_children(self, model: "Type[Model]", table: Table) -> QueryModifier:
modifier = QueryModifier()
for node in self.children:
node_modifier = node.resolve(model, self._annotations, self._custom_filters, table)
if self.join_type == self.AND:
modifier &= node_modifier
else:
modifier |= node_modifier
if self._is_negated:
modifier = ~modifier
return modifier
def resolve(
self,
model: "Type[Model]",
annotations: Dict[str, Any],
custom_filters: Dict[str, Dict[str, Any]],
table: Table,
) -> QueryModifier:
"""
Resolves the logical Q chain into the parts of a SQL statement.
:param model: The Model this Q Expression should be resolved on.
:param annotations: Extra annotations one wants to inject into the resultset.
:param custom_filters: Pre-resolved filters to be passed though.
:param table: ``pypika.Table`` to keep track of the virtual SQL table
(to allow self referential joins)
"""
self._annotations = annotations
self._custom_filters = custom_filters
if self.filters:
return self._resolve_kwargs(model, table)
return self._resolve_children(model, table)
class Prefetch:
"""
Prefetcher container. One would directly use this when wanting to attach a custom QuerySet
for specialised prefetching.
:param relation: Related field name.
:param queryset: Custom QuerySet to use for prefetching.
"""
__slots__ = ("relation", "queryset")
def __init__(self, relation: str, queryset: "QuerySet") -> None:
self.relation = relation
self.queryset = queryset
self.queryset.query = copy(self.queryset.model._meta.basequery)
def resolve_for_queryset(self, queryset: "QuerySet") -> None:
"""
Called internally to generate prefetching query.
:param queryset: Custom QuerySet to use for prefetching.
:raises OperationalError: If field does not exist in model.
"""
relation_split = self.relation.split("__")
first_level_field = relation_split[0]
if first_level_field not in queryset.model._meta.fetch_fields:
raise OperationalError(
f"relation {first_level_field} for {queryset.model._meta.db_table} not found"
)
forwarded_prefetch = "__".join(relation_split[1:])
if forwarded_prefetch:
if | |
six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_folder got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"catalogId": catalog_id,
"dataAssetKey": data_asset_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_folder_details,
response_type="Folder")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_folder_details,
response_type="Folder")
def create_folder_tag(self, catalog_id, data_asset_key, folder_key, create_folder_tag_details, **kwargs):
"""
Creates a new folder tag.
:param str catalog_id: (required)
Unique catalog identifier.
:param str data_asset_key: (required)
Unique data asset key.
:param str folder_key: (required)
Unique folder key.
:param CreateTagDetails create_folder_tag_details: (required)
The information used to create the folder tag.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_catalog.models.FolderTag`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/catalogs/{catalogId}/dataAssets/{dataAssetKey}/folders/{folderKey}/tags"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_folder_tag got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"catalogId": catalog_id,
"dataAssetKey": data_asset_key,
"folderKey": folder_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_folder_tag_details,
response_type="FolderTag")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_folder_tag_details,
response_type="FolderTag")
def create_glossary(self, catalog_id, create_glossary_details, **kwargs):
"""
Creates a new glossary.
:param str catalog_id: (required)
Unique catalog identifier.
:param CreateGlossaryDetails create_glossary_details: (required)
The information used to create the glossary.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_catalog.models.Glossary`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/catalogs/{catalogId}/glossaries"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_glossary got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"catalogId": catalog_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_glossary_details,
response_type="Glossary")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_glossary_details,
response_type="Glossary")
def create_job(self, catalog_id, create_job_details, **kwargs):
"""
Creates a new job.
:param str catalog_id: (required)
Unique catalog identifier.
:param CreateJobDetails create_job_details: (required)
The information used to create the job.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_catalog.models.Job`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/catalogs/{catalogId}/jobs"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_job got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"catalogId": catalog_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_job_details,
response_type="Job")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_job_details,
response_type="Job")
def create_job_definition(self, catalog_id, create_job_definition_details, **kwargs):
"""
Creates a new job definition.
:param str catalog_id: (required)
Unique catalog identifier.
:param CreateJobDefinitionDetails create_job_definition_details: (required)
The information used to create the job definition.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be | |
<filename>spots_objects.py
import glob
import io
import os
import json
import subprocess
import html
import logging
import spotipy
import spotipy.util as util
from jinja2 import Environment, PackageLoader, select_autoescape
# Keywords for use in the HTML template
# (key is Jinja variable name,
# value is Jinja variable value)
#
kwargkward = dict(
BASEURL = '/spots/'
)
# Static variables
IDS_JSON_FILE = 'master_list.json'
DETAILS_JSON_FILE = 'master_list_details.json'
OUTPUT_DIR = 'output'
ASSETS_DIR = 'assets'
class SpotifyMasterList(object):
"""
This object represents a file
containing a list of spotify
playlists and IDs.
This is a JSON file with a list
of maps/dicts, with title, description,
and spotify ID.
The user edits this file to remove the
playlists they don't want.
"""
def __init__(self, token, username):
"""
- create an API instance
- populate the list of all playlists
- dump it to a file
"""
self.username = username
self.sp = spotipy.Spotify(auth=token)
# this is the master list of
# playlist IDs only
self.master_list = None
# this stores the details of each playlist
self.playlists = []
# static vars
self.IDS_JSON_FILE = IDS_JSON_FILE
self.DETAILS_JSON_FILE = DETAILS_JSON_FILE
self.OUTPUT_DIR = OUTPUT_DIR
self.ASSETS_DIR = ASSETS_DIR
#############################
# Function to make a static site
# using the info obtained from
# the Spotify API and the playlists
# the user has selected.
def static_site(self, overwrite=False):
"""
Create all the pages
"""
output_dir = self.OUTPUT_DIR
if os.path.exists(output_dir) and overwrite is False:
raise Exception("Error: output dir %s already exists!"%(output_dir))
# populate self.playlists
logging.debug('importing playlist details')
self.import_playlist_details()
# Jinja env
env = Environment(
loader=PackageLoader('spots', 'templates'),
autoescape=select_autoescape(['html', 'xml'])
)
self._copy_assets(env,overwrite)
self._static_master_list(env)
self._static_playlist_pages(env)
def _copy_assets(self,env,overwrite=False):
"""
Copy static assets into the output dir
"""
output_dir = self.OUTPUT_DIR
assets_dir = self.ASSETS_DIR
if os.path.exists(self.OUTPUT_DIR) and overwrite is True:
subprocess.call(['rm','-fr',self.OUTPUT_DIR])
subprocess.call(['mkdir',self.OUTPUT_DIR])
else:
subprocess.call(['mkdir','-p',output_dir])
assets_source = glob.glob('%s/*'%(self.ASSETS_DIR))
assets_target = '%s/.'%(self.OUTPUT_DIR)
subprocess.call(['cp','-r',*assets_source,assets_target])
def _static_master_list(self,env):
"""
Render a static page for the master playlist.
"""
output_dir = self.OUTPUT_DIR
t = 'playlists.html'
contents = env.get_template(t).render(
playlists_items = self.playlists,
**kwargkward
)
with open('%s/index.html'%(output_dir),'w') as f:
f.write(contents)
def _static_playlist_pages(self,env):
"""
Render a static page for each playlist.
"""
output_dir = self.OUTPUT_DIR
t = 'playlist.html'
for playlist in self.playlists:
contents = env.get_template(t).render(
playlist = playlist,
**kwargkward
)
with open('%s/%s.html'%(output_dir,playlist['id']),'w') as f:
f.write(contents)
#############################
# Functions to import/export
# a list of playlist IDs
# easily edited by the user
# to control which playlists
# are rendered on the final
# static site.
#
# The export_ method is intended
# to be called by the user to export
# all their playlists, and edit that
# file.
#
# The import_ method is not called by
# the user directly, it is called by
# other methods (below).
def export_ids_list_to_file(self):
"""
Step 1 by the user - dump the full list
of Spotify playlists owned by the user
into a JSON file, and let the user edit
this file to pick playlists.
"""
json_file = self.IDS_JSON_FILE
if os.path.exists(json_file):
raise Exception('Error: %s already exists.'%(json_file))
# Takes a while
logging.debug('running method to load my playlists')
my_playlists = self._get_my_playlists()
logging.debug('done loading my playlists')
logging.debug('dumping playlist ids to json file %s'%(json_file))
with open(json_file,'w') as f:
json.dump(my_playlists,f,indent=4)
logging.debug('done dumping playlist ids to json file %s'%(json_file))
def _get_my_playlists(self):
"""
Private method: call the Spotify API to get
a list of all playlists created by this user.
"""
temp_playlists = []
# Step 1:
# Get every playlist a user follows
logging.debug('getting every playlist from user %s'%(self.username))
# Fencepost
step = 50
############
# Throttle API calls here
############
logging.debug(f'first {step} api call')
response = self.sp.user_playlists(
self.username,
limit=step,
offset=0
)
temp_playlists += response['items']
c = step
total = response['total']
while c < total:
logging.debug('next %d api call'%(c))
response = self.sp.user_playlists(
self.username,
limit=step,
offset=c
)
temp_playlists += response['items']
c += step
logging.debug('finished with api calls to get all playlists')
# Step 2:
# Reduce list to playlists the user created
logging.debug('reducing playlists to those created by this user')
my_playlists = []
logging.debug('About to process %d playlists'%(len(temp_playlists)))
for i, playlist in enumerate(temp_playlists):
pwner = playlist['owner']['id']
if pwner == self.username:
# ID for this playlist
pid = playlist['id']
# Name for this playlist
pname = playlist['name']
logging.debug('Processing playlist %s (%s)'%(pid,pname))
results = self.sp.user_playlist(
self.username,
pid,
fields="description"
)
# Playlist description
if 'description' in results.keys() and results['description'] is not None:
pdescr = html.unescape(results['description'])
else:
pdescr = ""
# Populate the item we are saving
save = dict(
spotify_id = pid,
name = pname,
description = pdescr
)
# Save details
my_playlists.append(save)
logging.debug('Reduced all %d playlists to %d owned by user'%(len(temp_playlists), len(my_playlists)))
return my_playlists
def import_ids_list_from_file(self):
"""
Step 2 by the user - once the user has
edited the list of playlists, load the
final list of Spotify playlist IDs to
render on the static site.
"""
json_file = self.IDS_JSON_FILE
if not os.path.exists(json_file):
raise Exception('Error: %s does not exist.'%(json_file))
my_playlists = []
logging.debug('opening json file %s'%(json_file))
with open(json_file,'r') as f:
my_playlists = json.load(f)
logging.debug('done opening json file %s'%(json_file))
playlist_ids = [j['spotify_id'] for j in my_playlists]
self.master_list = playlist_ids
logging.debug('master list has %d playlists'%(len(self.master_list)))
#############################
# Functions to get details about
# each playlist in the master list
# by calling the Spotify API or
# using cached details.
#
# The user will not call export_ or
# import_ methods for playlist details.
#
# The user will ask to create the static
# site, which will create the master list
# and individual playlists.
#
# Those methods will call these methods.
def export_playlist_details(self):
"""
Load the list of playlist IDs,
look up the details of each playlist
using the API, and store the details
in a cache file.
"""
json_file = self.DETAILS_JSON_FILE
if os.path.exists(json_file):
raise Exception('Error: %s already exists.'%(json_file))
# Load the list of playlist ids,
# then create the details file
# by making API calls
logging.debug('assembling the master list of playlist IDs')
if self.master_list is None:
# If the user hasn't done this yet,
# this will fail
self.import_ids_list_from_file()
self.playlists = []
# For each playlist id:
for i, pid in enumerate(self.master_list):
logging.debug('now processing playlist %d of %d'%(i+1, len(self.master_list)))
# Get the playlist details
logging.debug('calling api for playlist details...')
deets = self.sp.user_playlist(
self.username,
pid,
fields="id,name,tracks,images,description,uri,external_urls"
)
logging.debug('extracting playlist details...')
self.playlists.append(self._extract_details(deets))
logging.debug('done processing playlist %d of %d'%(i+1, len(self.master_list)))
## If you want to shorten this process,
## for example for tests/testing,
## add a break statement here.
#if (i+1)%10==0:
# break
# Store details in the details json file
with open(json_file,'w') as f:
json.dump(self.playlists,f)
logging.debug('done exporting playlist details to file')
def _extract_details(self,playlist_json):
"""
Keys:
- name
- descr
- images
- external_urls
- uri
- tracks
"""
# Extract everything we will need
# to make both the master list
# and each individual list.
#
# Do this by flattening maps/dicts
# as much as possible.
list_details = {}
# spotify id
list_details['id'] = playlist_json['id']
list_details['name'] = playlist_json['name']
list_details['url'] = playlist_json['external_urls']['spotify']
list_details['uri'] = playlist_json['uri']
list_details['count'] = len(playlist_json['tracks']['items'])
# image
if playlist_json['images'] is not None:
list_details['image'] = playlist_json['images'][0]['url']
else:
list_details['image'] = "http://placehold.it/500x400"
# description
if 'description' in playlist_json.keys() and playlist_json['description'] is not None:
list_details['description'] = html.unescape(playlist_json['description'])
else:
list_details['description'] = ''
# We also need to flatten maps/dicts
# for individual tracks on this playlist
# to make it easier to construct the
# individual playlist pages.
# Iterate over items and flatten them
playlist_items = []
for p in playlist_json['tracks']['items']:
it = {}
it['name'] = p['track']['name']
it['artist'] = ", ".join([j['name'] for j in p['track']['artists']])
it['url_listen'] = p['track']['preview_url']
try:
it['url_spotify'] = p['track']['external_urls']['spotify']
except KeyError:
it['url_spotify'] = '#'
it['uri'] = p['track']['uri']
playlist_items.append(it)
list_details['items'] = playlist_items
list_details['total'] = playlist_json['tracks']['total']
return list_details
def import_playlist_details(self):
"""
Import the playlist details from
the details JSON file.
"""
json_file = self.DETAILS_JSON_FILE
if os.path.exists(json_file):
logging.debug('found json file %s, importing'%(json_file))
# Details are stored in the details json file
try:
# try to open the details json file
with open(json_file,'w') as f:
self.playlists = json.load(f)
| |
<reponame>tarrade/proj_NLP_text_classification_with_GCP
"""
Created on Wed Nov 7 2018
@author: <NAME> <EMAIL>
<NAME> <EMAIL>
<NAME> <EMAIL>
"""
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin, TransformerMixin
from sklearn.utils.validation import check_is_fitted
from sklearn.utils.multiclass import unique_labels
from sklearn.metrics import (log_loss, f1_score, accuracy_score, average_precision_score, precision_score,
recall_score, roc_auc_score, mean_squared_error, r2_score)
from sklearn.model_selection import train_test_split
from gensim.models import FastText
from scipy.optimize import minimize
import nltk
import time
class W2VTransformer(BaseEstimator, TransformerMixin):
"""
Scikit-learn wrapper for gensim.models.FastText.
Parameters
----------
sentences : iterable of iterables
The `sentences` iterable can be simply a list of lists of tokens, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus`
or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such examples.
If you don't supply `sentences`, the model is left uninitialized -- use if you plan to initialize it
in some other way.
sg : int {1, 0}
Defines the training algorithm. If 1, skip-gram is used, otherwise, CBOW is employed.
size : int
Dimensionality of the feature vectors.
window : int
The maximum distance between the current and predicted word within a sentence.
alpha : float
The initial learning rate.
min_alpha : float
Learning rate will linearly drop to `min_alpha` as training progresses.
seed : int
Seed for the random number generator. Initial vectors for each word are seeded with a hash of
the concatenation of word + `str(seed)`. Note that for a fully deterministically-reproducible run,
you must also limit the model to a single worker thread (`workers=1`), to eliminate ordering jitter
from OS thread scheduling. (In Python 3, reproducibility between interpreter launches also requires
use of the `PYTHONHASHSEED` environment variable to control hash randomization).
min_count : int
Ignores all words with total frequency lower than this.
max_vocab_size : int
Limits the RAM during vocabulary building; if there are more unique
words than this, then prune the infrequent ones. Every 10 million word types need about 1GB of RAM.
Set to `None` for no limit.
sample : float
The threshold for configuring which higher-frequency words are randomly downsampled,
useful range is (0, 1e-5).
workers : int
Use these many worker threads to train the model (=faster training with multicore machines).
hs : int {1,0}
If 1, hierarchical softmax will be used for model training.
If set to 0, and `negative` is non-zero, negative sampling will be used.
negative : int
If > 0, negative sampling will be used, the int for negative specifies how many "noise words"
should be drawn (usually between 5-20).
If set to 0, no negative sampling is used.
cbow_mean : int {1,0}
If 0, use the sum of the context word vectors. If 1, use the mean, only applies when cbow is used.
iter : int
Number of iterations (epochs) over the corpus.
trim_rule : function
Vocabulary trimming rule, specifies whether certain words should remain in the vocabulary,
be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used, look to :func:`~gensim.utils.keep_vocab_item`),
or a callable that accepts parameters (word, count, min_count) and returns either
:attr:`gensim.utils.RULE_DISCARD`, :attr:`gensim.utils.RULE_KEEP` or :attr:`gensim.utils.RULE_DEFAULT`.
Note: The rule, if given, is only used to prune vocabulary during build_vocab() and is not stored as part
of the model.
sorted_vocab : int {1,0}
If 1, sort the vocabulary by descending frequency before assigning word indexes.
batch_words : int
Target size (in words) for batches of examples passed to worker threads (and
thus cython routines).(Larger batches will be passed if individual
texts are longer than 10000 words, but the standard cython code truncates to that maximum.)
min_n : int
Min length of char ngrams to be used for training word representations.
max_n : int
Max length of char ngrams to be used for training word representations. Set `max_n` to be
lesser than `min_n` to avoid char ngrams being used.
word_ngrams : int {1,0}
If 1, uses enriches word vectors with subword(ngrams) information.
If 0, this is equivalent to word2vec.
bucket : int
Character ngrams are hashed into a fixed number of buckets, in order to limit the
memory usage of the model. This option specifies the number of buckets used by the model.
callbacks : :obj: `list` of :obj: `~gensim.models.callbacks.CallbackAny2Vec`
List of callbacks that need to be executed/run at specific stages during training.
"""
def __init__(self, sentences=None, sg=0, hs=0, size=100, alpha=0.025,
window=5, min_count=5, max_vocab_size=None, word_ngrams=1,
sample=0.001, seed=1, workers=3, min_alpha=0.0001, negative=5,
cbow_mean=1, iter=5, null_word=0, min_n=3, max_n=6,
sorted_vocab=1, bucket=2000000, trim_rule=None,
batch_words=10000, callbacks=()):
self.sentences = sentences
self.sg = sg
self.hs = hs
self.size = size
self.alpha = alpha
self.window = window
self.min_count = min_count
self.max_vocab_size = max_vocab_size
self.word_ngrams = word_ngrams
self.sample = sample
self.seed = seed
self.workers = workers
self.min_alpha = min_alpha
self.negative = negative
self.cbow_mean = cbow_mean
self.iter = iter
self.null_word = null_word
self.min_n = min_n
self.max_n = max_n
self.sorted_vocab = sorted_vocab
self.bucket = bucket
self.trim_rule = trim_rule
self.batch_words = batch_words
self.callbacks = callbacks
self.n_features_ = None
self.model_ = None
def fit(self, X, y=None):
"""
A reference implementation of a fitting function for a transformer.
Parameters
----------
X : list
list of input texts
y : None
There is no need of a target in a transformer, yet the pipeline API
requires this parameter.
Returns
-------
self : object
Returns self.
"""
self.n_features_ = 1
tokenized_corpus = self.tokenize(X)
self.model_ = FastText(sentences=tokenized_corpus,
sg=self.sg,
hs=self.hs,
size=self.size,
alpha=self.alpha,
window=self.window,
min_count=self.min_count,
max_vocab_size=self.max_vocab_size,
word_ngrams=self.word_ngrams,
sample=self.sample,
seed=self.seed,
workers=self.workers,
min_alpha=self.min_alpha,
negative=self.negative,
cbow_mean=self.cbow_mean,
iter=self.iter,
null_word=self.null_word,
min_n=self.min_n,
max_n=self.max_n,
sorted_vocab=self.sorted_vocab,
bucket=self.bucket,
trim_rule=self.trim_rule,
batch_words=self.batch_words,
callbacks=self.callbacks
)
# Return the transformer
return self
def transform(self, X):
"""
A reference implementation of a transform function.
Parameters
----------
X : list
list of input texts
Returns
-------
X_transformed : array, shape (n_samples, size)
"""
# Check if fit had been called
check_is_fitted(self, 'n_features_')
tokenized_corpus = self.tokenize(X)
w2v_feature_array = self.averaged_word_vectorizer(tokenized_corpus)
return w2v_feature_array
def average_word_vectors(self, words, vocabulary):
"""
Returns the mean feature vector of a text based on its tokens.
Parameters
----------
words : list
list of tokens of which a feature vector is calculated
vocabulary : list
list of known words
Returns
-------
Feature vector representation
"""
feature_vector = np.zeros((self.size,), dtype="float64")
nwords = 0.
for word in words:
if word in vocabulary:
nwords = nwords + 1.
feature_vector = np.add(feature_vector, self.model_.wv[word])
if nwords:
feature_vector = np.divide(feature_vector, nwords)
return feature_vector
def averaged_word_vectorizer(self, corpus):
"""
Calculates the feature representation of a sorpus (list of list of words)
Parameters
----------
corpus : list
list of list of words. Input features of which a vector representation is calculated
Returns
-------
np.array
array of feature representations
"""
vocabulary = set(self.model_.wv.index2word)
features = [self.average_word_vectors(tokenized_sentence, vocabulary) for tokenized_sentence in corpus]
return np.array(features)
def tokenize(self, X):
"""
Tokenizes X using nltk.word_tokenize
Parameters
----------
X : list
list of strings which are tokenized
Returns
-------
list
list of tokens
"""
docs = [doc for doc in X]
tokenized_corpus = []
for doc in docs:
tokens = nltk.word_tokenize(doc)
tokenized_corpus.append(tokens)
return tokenized_corpus
scorer = {'accuracy': accuracy_score, 'log_loss': log_loss, 'f1': f1_score,
'average_precision': average_precision_score, 'precision': precision_score,
'roc_auc': roc_auc_score, 'mean_squared_error': mean_squared_error,
'r2': r2_score, 'recall': recall_score}
class VotingClassifier(BaseEstimator, ClassifierMixin):
"""
A classifier that taks a bunch of classifiers (pipelines) and creates a combined one.
Applies a soft voting rule based on a weighted average of the class probabilities. Thereby,
the weights found such that the scoring function is optimized.
Parameters
----------
models : list,
list of scikit-learn pipelines or list of tuples (pipeline, fitting_params)
weights: optional, array-like, shape (n_models,)
weights used for the final classifier. If specified
fit does not optimize the weights further.
split_data: bool, optional
default True, if True, then different data sets are used
for training and optimizing the weights. If False, all passed data
is used for both.
test_size: float, optional
default 0.33, only relevant if split_data=True, relative size of the
data-set used for optimizing the weights.
Attributes
----------
X_ : ndarray, shape (n_samples, n_features)
The input passed during :meth:`fit`.
y_ : ndarray, shape (n_samples,)
The labels passed during :meth:`fit`.
classes_ : ndarray, shape (n_classes,)
The classes | |
<reponame>jovial/refstack
# Copyright (c) 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of SQLAlchemy backend."""
import base64
import hashlib
import sys
import uuid
from oslo_config import cfg
from oslo_db import options as db_options
from oslo_db.sqlalchemy import session as db_session
from oslo_log import log
from refstack.api import constants as api_const
from refstack.db.sqlalchemy import models
CONF = cfg.CONF
_FACADE = None
LOG = log.getLogger(__name__)
db_options.set_defaults(cfg.CONF)
class NotFound(Exception):
"""Raise if item not found in db."""
pass
class Duplication(Exception):
"""Raise if unique constraint violates."""
pass
def _create_facade_lazily():
"""Create DB facade lazily."""
global _FACADE
if _FACADE is None:
_FACADE = db_session.EngineFacade.from_config(CONF)
return _FACADE
def get_engine():
"""Get DB engine."""
facade = _create_facade_lazily()
return facade.get_engine()
def get_session(**kwargs):
"""Get DB session."""
facade = _create_facade_lazily()
return facade.get_session(**kwargs)
def get_backend():
"""The backend is this module itself."""
return sys.modules[__name__]
def _to_dict(sqlalchemy_object, allowed_keys=None):
if isinstance(sqlalchemy_object, list):
return [_to_dict(obj, allowed_keys=allowed_keys)
for obj in sqlalchemy_object]
if (hasattr(sqlalchemy_object, 'keys')
and hasattr(sqlalchemy_object, 'index')):
return {key: getattr(sqlalchemy_object, key)
for key in sqlalchemy_object.keys()}
if hasattr(sqlalchemy_object, 'default_allowed_keys'):
items = sqlalchemy_object.iteritems()
if not allowed_keys:
allowed_keys = sqlalchemy_object.default_allowed_keys
if allowed_keys:
items = filter(lambda item: item[0] in allowed_keys, items)
result = {}
for key, value in items:
if key in sqlalchemy_object.metadata_keys:
result[key] = {
item.get(sqlalchemy_object.metadata_keys[key]['key']):
item.get(sqlalchemy_object.metadata_keys[key]['value'])
for item in value}
elif hasattr(value, 'default_allowed_keys'):
result[key] = _to_dict(value)
elif (isinstance(value, list) and value
and hasattr(value[0], 'default_allowed_keys')):
result[key] = [_to_dict(item) for item in value]
else:
result[key] = value
return result
if hasattr(sqlalchemy_object, 'all'):
return _to_dict(sqlalchemy_object.all())
return sqlalchemy_object
def store_test_results(results):
"""Store test results."""
test = models.Test()
test_id = str(uuid.uuid4())
test.id = test_id
test.cpid = results.get('cpid')
test.duration_seconds = results.get('duration_seconds')
test.product_version_id = results.get('product_version_id')
session = get_session()
with session.begin():
for result in results.get('results', []):
test_result = models.TestResults()
test_result.test_id = test_id
test_result.name = result['name']
test_result.uuid = result.get('uuid', None)
test.results.append(test_result)
for k, v in results.get('meta', {}).items():
meta = models.TestMeta()
meta.meta_key, meta.value = k, v
test.meta.append(meta)
test.save(session)
return test_id
def get_test_result(test_id, allowed_keys=None):
"""Get test info."""
session = get_session()
test_info = session.query(models.Test). \
filter_by(id=test_id). \
first()
if not test_info:
raise NotFound('Test result %s not found' % test_id)
return _to_dict(test_info, allowed_keys)
def delete_test_result(test_id):
"""Delete test information from the database."""
session = get_session()
with session.begin():
test = session.query(models.Test).filter_by(id=test_id).first()
if test:
session.query(models.TestMeta) \
.filter_by(test_id=test_id).delete()
session.query(models.TestResults) \
.filter_by(test_id=test_id).delete()
session.delete(test)
else:
raise NotFound('Test result %s not found' % test_id)
def update_test_result(test_info):
"""Update test from the given test_info dictionary."""
session = get_session()
_id = test_info.get('id')
test = session.query(models.Test).filter_by(id=_id).first()
if test is None:
raise NotFound('Test result with id %s not found' % _id)
keys = ['product_version_id', 'verification_status']
for key in keys:
if key in test_info:
setattr(test, key, test_info[key])
with session.begin():
test.save(session=session)
return _to_dict(test)
def get_test_result_meta_key(test_id, key, default=None):
"""Get metadata value related to specified test run."""
session = get_session()
meta_item = session.query(models.TestMeta). \
filter_by(test_id=test_id). \
filter_by(meta_key=key). \
first()
value = meta_item.value if meta_item else default
return value
def save_test_result_meta_item(test_id, key, value):
"""Store or update item value related to specified test run."""
session = get_session()
meta_item = (session.query(models.TestMeta)
.filter_by(test_id=test_id)
.filter_by(meta_key=key).first() or models.TestMeta())
meta_item.test_id = test_id
meta_item.meta_key = key
meta_item.value = value
with session.begin():
meta_item.save(session)
def delete_test_result_meta_item(test_id, key):
"""Delete metadata item related to specified test run."""
session = get_session()
meta_item = session.query(models.TestMeta). \
filter_by(test_id=test_id). \
filter_by(meta_key=key). \
first()
if meta_item:
with session.begin():
session.delete(meta_item)
else:
raise NotFound('Metadata key %s '
'not found for test run %s' % (key, test_id))
def get_test_results(test_id):
"""Get test results."""
session = get_session()
results = session.query(models.TestResults). \
filter_by(test_id=test_id). \
all()
return [_to_dict(result) for result in results]
def _apply_filters_for_query(query, filters):
"""Apply filters for DB query."""
start_date = filters.get(api_const.START_DATE)
if start_date:
query = query.filter(models.Test.created_at >= start_date)
end_date = filters.get(api_const.END_DATE)
if end_date:
query = query.filter(models.Test.created_at <= end_date)
cpid = filters.get(api_const.CPID)
if cpid:
query = query.filter(models.Test.cpid == cpid)
verification_status = filters.get(api_const.VERIFICATION_STATUS)
if verification_status:
query = query.filter(models.Test.verification_status ==
verification_status)
if api_const.PRODUCT_ID in filters:
query = (query
.join(models.ProductVersion)
.filter(models.ProductVersion.product_id ==
filters[api_const.PRODUCT_ID]))
all_product_tests = filters.get(api_const.ALL_PRODUCT_TESTS)
signed = api_const.SIGNED in filters
# If we only want to get the user's test results.
if signed:
query = (query
.join(models.Test.meta)
.filter(models.TestMeta.meta_key == api_const.USER)
.filter(models.TestMeta.value == filters[api_const.OPENID])
)
elif not all_product_tests:
# Get all non-signed (aka anonymously uploaded) test results
# along with signed but shared test results.
signed_results = (query.session
.query(models.TestMeta.test_id)
.filter_by(meta_key=api_const.USER))
shared_results = (query.session
.query(models.TestMeta.test_id)
.filter_by(meta_key=api_const.SHARED_TEST_RUN))
query = (query.filter(models.Test.id.notin_(signed_results))
.union(query.filter(models.Test.id.in_(shared_results))))
return query
def get_test_result_records(page, per_page, filters):
"""Get page with list of test records."""
session = get_session()
query = session.query(models.Test)
query = _apply_filters_for_query(query, filters)
results = query.order_by(models.Test.created_at.desc()). \
offset(per_page * (page - 1)). \
limit(per_page).all()
return _to_dict(results)
def get_test_result_records_count(filters):
"""Get total test records count."""
session = get_session()
query = session.query(models.Test.id)
records_count = _apply_filters_for_query(query, filters).count()
return records_count
def user_get(user_openid):
"""Get user info by openid."""
session = get_session()
user = session.query(models.User).filter_by(openid=user_openid).first()
if user is None:
raise NotFound('User with OpenID %s not found' % user_openid)
return user
def user_save(user_info):
"""Create user DB record if it exists, otherwise record will be updated."""
try:
user = user_get(user_info['openid'])
except NotFound:
user = models.User()
session = get_session()
with session.begin():
user.update(user_info)
user.save(session=session)
return user
def get_pubkey(key):
"""Get the pubkey info corresponding to the given public key.
The md5 hash of the key is used for the query for quicker lookups.
"""
session = get_session()
md5_hash = hashlib.md5(base64.b64decode(key)).hexdigest()
pubkeys = session.query(models.PubKey).filter_by(md5_hash=md5_hash).all()
if len(pubkeys) == 1:
return pubkeys[0]
elif len(pubkeys) > 1:
for pubkey in pubkeys:
if pubkey['pubkey'] == key:
return pubkey
return None
def store_pubkey(pubkey_info):
"""Store public key in to DB."""
pubkey = models.PubKey()
pubkey.openid = pubkey_info['openid']
pubkey.format = pubkey_info['format']
pubkey.pubkey = pubkey_info['pubkey']
pubkey.md5_hash = hashlib.md5(
base64.b64decode(
pubkey_info['pubkey']
)
).hexdigest()
pubkey.comment = pubkey_info['comment']
session = get_session()
with session.begin():
pubkeys_collision = (session.
query(models.PubKey).
filter_by(md5_hash=pubkey.md5_hash).
filter_by(pubkey=pubkey.pubkey).all())
if not pubkeys_collision:
pubkey.save(session)
else:
raise Duplication('Public key already exists.')
return pubkey.id
def delete_pubkey(id):
"""Delete public key from DB."""
session = get_session()
with session.begin():
key = session.query(models.PubKey).filter_by(id=id).first()
session.delete(key)
def get_user_pubkeys(user_openid):
"""Get public pubkeys for specified user."""
session = get_session()
pubkeys = session.query(models.PubKey).filter_by(openid=user_openid).all()
return _to_dict(pubkeys)
def add_user_to_group(user_openid, group_id, created_by_user):
"""Add specified user to specified group."""
item = models.UserToGroup()
session = get_session()
with session.begin():
item.user_openid = user_openid
item.group_id = group_id
item.created_by_user = created_by_user
item.save(session=session)
def remove_user_from_group(user_openid, group_id):
"""Remove specified user from specified group."""
session = get_session()
with session.begin():
(session.query(models.UserToGroup).
filter_by(user_openid=user_openid).
filter_by(group_id=group_id).
delete(synchronize_session=False))
def add_organization(organization_info, creator):
"""Add organization."""
session = get_session()
with session.begin():
group = models.Group()
group.name = 'Group for %s' % organization_info['name']
group.save(session=session)
group_id = group.id
item = models.UserToGroup()
item.user_openid = creator
item.group_id = group_id
item.created_by_user = creator
item.save(session=session)
organization = models.Organization()
organization.type = organization_info.get(
'type', api_const.PRIVATE_VENDOR)
organization.name = organization_info['name']
organization.description = organization_info.get('description')
organization.group_id = group_id
organization.created_by_user = creator
organization.properties = organization_info.get('properties')
organization.save(session=session)
return _to_dict(organization)
def update_organization(organization_info):
"""Update organization."""
session = get_session()
_id = organization_info['id']
organization = (session.query(models.Organization).
filter_by(id=_id).first())
if organization is None:
raise NotFound('Organization with id %s not found' % _id)
with session.begin():
organization.type = organization_info.get(
'type', organization.type)
organization.name = organization_info.get(
'name', organization.name)
organization.description = organization_info.get(
'description', organization.description)
organization.properties = organization_info.get(
'properties', organization.properties)
organization.save(session=session)
return _to_dict(organization)
def get_organization(organization_id, allowed_keys=None):
"""Get organization by id."""
session = get_session()
organization = (session.query(models.Organization).
filter_by(id=organization_id).first())
if organization is None:
raise NotFound('Organization with id %s not found' % organization_id)
return _to_dict(organization, allowed_keys=allowed_keys)
def delete_organization(organization_id):
"""delete organization by id."""
session = get_session()
with session.begin():
product_ids = (session
.query(models.Product.id)
.filter_by(organization_id=organization_id))
(session.query(models.ProductVersion).
filter(models.ProductVersion.product_id.in_(product_ids)).
delete(synchronize_session=False))
(session.query(models.Product).
filter_by(organization_id=organization_id).
delete(synchronize_session=False))
(session.query(models.Organization).
filter_by(id=organization_id).
delete(synchronize_session=False))
def add_product(product_info, creator):
"""Add product."""
product = models.Product()
product.id = str(uuid.uuid4())
product.type = product_info['type']
product.product_type = product_info['product_type']
product.product_ref_id = product_info.get('product_ref_id')
product.name = product_info['name']
product.description = product_info.get('description')
product.organization_id = product_info['organization_id']
product.created_by_user = creator
product.public = product_info.get('public', False)
product.properties = product_info.get('properties')
session = get_session()
with session.begin():
product.save(session=session)
product_version = models.ProductVersion()
product_version.created_by_user = creator
product_version.version = product_info.get('version')
product_version.product_id = product.id
product_version.save(session=session)
return _to_dict(product)
def update_product(product_info):
"""Update product by id."""
session = get_session()
_id = product_info.get('id')
product = session.query(models.Product).filter_by(id=_id).first()
if product is None:
raise NotFound('Product with id %s not found' % _id)
keys = ['name', 'description', 'product_ref_id', 'public', 'properties']
for key in keys:
if key in product_info:
setattr(product, key, product_info[key])
| |
string
:param ParameterGroupName: **[REQUIRED]**
The name of the parameter group to be deleted.
Constraints:
* Must be the name of an existing cluster parameter group.
* Cannot delete a default cluster parameter group.
:returns: None
"""
pass
def delete_cluster_security_group(self, ClusterSecurityGroupName: str):
"""
Deletes an Amazon Redshift security group.
.. note::
You cannot delete a security group that is associated with any clusters. You cannot delete the default security group.
For information about managing security groups, go to `Amazon Redshift Cluster Security Groups <https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-security-groups.html>`__ in the *Amazon Redshift Cluster Management Guide* .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/redshift-2012-12-01/DeleteClusterSecurityGroup>`_
**Request Syntax**
::
response = client.delete_cluster_security_group(
ClusterSecurityGroupName='string'
)
:type ClusterSecurityGroupName: string
:param ClusterSecurityGroupName: **[REQUIRED]**
The name of the cluster security group to be deleted.
:returns: None
"""
pass
def delete_cluster_snapshot(self, SnapshotIdentifier: str, SnapshotClusterIdentifier: str = None) -> Dict:
"""
Deletes the specified manual snapshot. The snapshot must be in the ``available`` state, with no other users authorized to access the snapshot.
Unlike automated snapshots, manual snapshots are retained even after you delete your cluster. Amazon Redshift does not delete your manual snapshots. You must delete manual snapshot explicitly to avoid getting charged. If other accounts are authorized to access the snapshot, you must revoke all of the authorizations before you can delete the snapshot.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/redshift-2012-12-01/DeleteClusterSnapshot>`_
**Request Syntax**
::
response = client.delete_cluster_snapshot(
SnapshotIdentifier='string',
SnapshotClusterIdentifier='string'
)
**Response Syntax**
::
{
'Snapshot': {
'SnapshotIdentifier': 'string',
'ClusterIdentifier': 'string',
'SnapshotCreateTime': datetime(2015, 1, 1),
'Status': 'string',
'Port': 123,
'AvailabilityZone': 'string',
'ClusterCreateTime': datetime(2015, 1, 1),
'MasterUsername': 'string',
'ClusterVersion': 'string',
'SnapshotType': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'DBName': 'string',
'VpcId': 'string',
'Encrypted': True|False,
'KmsKeyId': 'string',
'EncryptedWithHSM': True|False,
'AccountsWithRestoreAccess': [
{
'AccountId': 'string',
'AccountAlias': 'string'
},
],
'OwnerAccount': 'string',
'TotalBackupSizeInMegaBytes': 123.0,
'ActualIncrementalBackupSizeInMegaBytes': 123.0,
'BackupProgressInMegaBytes': 123.0,
'CurrentBackupRateInMegaBytesPerSecond': 123.0,
'EstimatedSecondsToCompletion': 123,
'ElapsedTimeInSeconds': 123,
'SourceRegion': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'RestorableNodeTypes': [
'string',
],
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'ManualSnapshotRetentionPeriod': 123,
'ManualSnapshotRemainingDays': 123,
'SnapshotRetentionStartTime': datetime(2015, 1, 1)
}
}
**Response Structure**
- *(dict) --*
- **Snapshot** *(dict) --*
Describes a snapshot.
- **SnapshotIdentifier** *(string) --*
The snapshot identifier that is provided in the request.
- **ClusterIdentifier** *(string) --*
The identifier of the cluster for which the snapshot was taken.
- **SnapshotCreateTime** *(datetime) --*
The time (in UTC format) when Amazon Redshift began the snapshot. A snapshot contains a copy of the cluster data as of this exact time.
- **Status** *(string) --*
The snapshot status. The value of the status depends on the API operation used:
* CreateClusterSnapshot and CopyClusterSnapshot returns status as "creating".
* DescribeClusterSnapshots returns status as "creating", "available", "final snapshot", or "failed".
* DeleteClusterSnapshot returns status as "deleted".
- **Port** *(integer) --*
The port that the cluster is listening on.
- **AvailabilityZone** *(string) --*
The Availability Zone in which the cluster was created.
- **ClusterCreateTime** *(datetime) --*
The time (UTC) when the cluster was originally created.
- **MasterUsername** *(string) --*
The master user name for the cluster.
- **ClusterVersion** *(string) --*
The version ID of the Amazon Redshift engine that is running on the cluster.
- **SnapshotType** *(string) --*
The snapshot type. Snapshots created using CreateClusterSnapshot and CopyClusterSnapshot are of type "manual".
- **NodeType** *(string) --*
The node type of the nodes in the cluster.
- **NumberOfNodes** *(integer) --*
The number of nodes in the cluster.
- **DBName** *(string) --*
The name of the database that was created when the cluster was created.
- **VpcId** *(string) --*
The VPC identifier of the cluster if the snapshot is from a cluster in a VPC. Otherwise, this field is not in the output.
- **Encrypted** *(boolean) --*
If ``true`` , the data in the snapshot is encrypted at rest.
- **KmsKeyId** *(string) --*
The AWS Key Management Service (KMS) key ID of the encryption key that was used to encrypt data in the cluster from which the snapshot was taken.
- **EncryptedWithHSM** *(boolean) --*
A boolean that indicates whether the snapshot data is encrypted using the HSM keys of the source cluster. ``true`` indicates that the data is encrypted using HSM keys.
- **AccountsWithRestoreAccess** *(list) --*
A list of the AWS customer accounts authorized to restore the snapshot. Returns ``null`` if no accounts are authorized. Visible only to the snapshot owner.
- *(dict) --*
Describes an AWS customer account authorized to restore a snapshot.
- **AccountId** *(string) --*
The identifier of an AWS customer account authorized to restore a snapshot.
- **AccountAlias** *(string) --*
The identifier of an AWS support account authorized to restore a snapshot. For AWS support, the identifier is ``amazon-redshift-support`` .
- **OwnerAccount** *(string) --*
For manual snapshots, the AWS customer account used to create or copy the snapshot. For automatic snapshots, the owner of the cluster. The owner can perform all snapshot actions, such as sharing a manual snapshot.
- **TotalBackupSizeInMegaBytes** *(float) --*
The size of the complete set of backup data that would be used to restore the cluster.
- **ActualIncrementalBackupSizeInMegaBytes** *(float) --*
The size of the incremental backup.
- **BackupProgressInMegaBytes** *(float) --*
The number of megabytes that have been transferred to the snapshot backup.
- **CurrentBackupRateInMegaBytesPerSecond** *(float) --*
The number of megabytes per second being transferred to the snapshot backup. Returns ``0`` for a completed backup.
- **EstimatedSecondsToCompletion** *(integer) --*
The estimate of the time remaining before the snapshot backup will complete. Returns ``0`` for a completed backup.
- **ElapsedTimeInSeconds** *(integer) --*
The amount of time an in-progress snapshot backup has been running, or the amount of time it took a completed backup to finish.
- **SourceRegion** *(string) --*
The source region from which the snapshot was copied.
- **Tags** *(list) --*
The list of tags for the cluster snapshot.
- *(dict) --*
A tag consisting of a name/value pair for a resource.
- **Key** *(string) --*
The key, or name, for the resource tag.
- **Value** *(string) --*
The value for the resource tag.
- **RestorableNodeTypes** *(list) --*
The list of node types that this cluster snapshot is able to restore into.
- *(string) --*
- **EnhancedVpcRouting** *(boolean) --*
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see `Enhanced VPC Routing <https://docs.aws.amazon.com/redshift/latest/mgmt/enhanced-vpc-routing.html>`__ in the Amazon Redshift Cluster Management Guide.
If this option is ``true`` , enhanced VPC routing is enabled.
Default: false
- **MaintenanceTrackName** *(string) --*
The name of the maintenance track for the snapshot.
- **ManualSnapshotRetentionPeriod** *(integer) --*
The number of days that a manual snapshot is retained. If the value is -1, the manual snapshot is retained indefinitely.
The value must be either -1 or an integer between 1 and 3,653.
- **ManualSnapshotRemainingDays** *(integer) --*
The number of days until a manual snapshot will pass its retention period.
- **SnapshotRetentionStartTime** *(datetime) --*
A timestamp representing the start of the retention period for the snapshot.
:type SnapshotIdentifier: string
:param SnapshotIdentifier: **[REQUIRED]**
The unique identifier of the manual snapshot to be deleted.
Constraints: Must be the name of an existing snapshot that is in the ``available`` , ``failed`` , or ``cancelled`` state.
:type SnapshotClusterIdentifier: string
:param SnapshotClusterIdentifier:
The unique identifier of the cluster the snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.
Constraints: Must be the name of valid cluster.
:rtype: dict
:returns:
"""
| |
fullchain_dest: /etc/httpd/ssl/sample.com-fullchain.crt
register: sample_com_challenge
# Alternative first step:
- name: Create a challenge for sample.com using a account key file.
acme_certificate:
account_key_src: /etc/pki/cert/private/account.key
csr: /etc/pki/cert/csr/sample.com.csr
dest: /etc/httpd/ssl/sample.com.crt
fullchain_dest: /etc/httpd/ssl/sample.com-fullchain.crt
register: sample_com_challenge
# perform the necessary steps to fulfill the challenge
# for example:
#
# - copy:
# dest: /var/www/html/{{ sample_com_challenge['challenge_data']['sample.com']['http-01']['resource'] }}
# content: "{{ sample_com_challenge['challenge_data']['sample.com']['http-01']['resource_value'] }}"
# when: sample_com_challenge is changed and 'sample.com' in sample_com_challenge['challenge_data']
#
# Alternative way:
#
# - copy:
# dest: /var/www/{{ item.key }}/{{ item.value['http-01']['resource'] }}
# content: "{{ item.value['http-01']['resource_value'] }}"
# loop: "{{ sample_com_challenge.challenge_data | dictsort }}"
# when: sample_com_challenge is changed
- name: Let the challenge be validated and retrieve the cert and intermediate certificate
acme_certificate:
account_key_src: /etc/pki/cert/private/account.key
csr: /etc/pki/cert/csr/sample.com.csr
dest: /etc/httpd/ssl/sample.com.crt
fullchain_dest: /etc/httpd/ssl/sample.com-fullchain.crt
chain_dest: /etc/httpd/ssl/sample.com-intermediate.crt
data: "{{ sample_com_challenge }}"
### Example with DNS challenge against production ACME server ###
- name: Create a challenge for sample.com using a account key file.
acme_certificate:
account_key_src: /etc/pki/cert/private/account.key
account_email: <EMAIL>
src: /etc/pki/cert/csr/sample.com.csr
cert: /etc/httpd/ssl/sample.com.crt
challenge: dns-01
acme_directory: https://acme-v01.api.letsencrypt.org/directory
# Renew if the certificate is at least 30 days old
remaining_days: 60
register: sample_com_challenge
# perform the necessary steps to fulfill the challenge
# for example:
#
# - route53:
# zone: sample.com
# record: "{{ sample_com_challenge.challenge_data['sample.com']['dns-01'].record }}"
# type: TXT
# ttl: 60
# state: present
# wait: yes
# # Note: route53 requires TXT entries to be enclosed in quotes
# value: "{{ sample_com_challenge.challenge_data['sample.com']['dns-01'].resource_value | regex_replace('^(.*)$', '\"\\1\"') }}"
# when: sample_com_challenge is changed and 'sample.com' in sample_com_challenge.challenge_data
#
# Alternative way:
#
# - route53:
# zone: sample.com
# record: "{{ item.key }}"
# type: TXT
# ttl: 60
# state: present
# wait: yes
# # Note: item.value is a list of TXT entries, and route53
# # requires every entry to be enclosed in quotes
# value: "{{ item.value | map('regex_replace', '^(.*)$', '\"\\1\"' ) | list }}"
# loop: "{{ sample_com_challenge.challenge_data_dns | dictsort }}"
# when: sample_com_challenge is changed
- name: Let the challenge be validated and retrieve the cert and intermediate certificate
acme_certificate:
account_key_src: /etc/pki/cert/private/account.key
account_email: <EMAIL>
src: /etc/pki/cert/csr/sample.com.csr
cert: /etc/httpd/ssl/sample.com.crt
fullchain: /etc/httpd/ssl/sample.com-fullchain.crt
chain: /etc/httpd/ssl/sample.com-intermediate.crt
challenge: dns-01
acme_directory: https://acme-v01.api.letsencrypt.org/directory
remaining_days: 60
data: "{{ sample_com_challenge }}"
when: sample_com_challenge is changed
'''
RETURN = '''
cert_days:
description: The number of days the certificate remains valid.
returned: success
type: int
challenge_data:
description:
- Per identifier / challenge type challenge data.
- Since Ansible 2.8.5, only challenges which are not yet valid are returned.
returned: changed
type: list
elements: dict
contains:
resource:
description: The challenge resource that must be created for validation.
returned: changed
type: str
sample: .well-known/acme-challenge/evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ-PCt92wr-oA
resource_original:
description:
- The original challenge resource including type identifier for C(tls-alpn-01)
challenges.
returned: changed and challenge is C(tls-alpn-01)
type: str
sample: DNS:example.com
version_added: "2.8"
resource_value:
description:
- The value the resource has to produce for the validation.
- For C(http-01) and C(dns-01) challenges, the value can be used as-is.
- "For C(tls-alpn-01) challenges, note that this return value contains a
Base64 encoded version of the correct binary blob which has to be put
into the acmeValidation x509 extension; see
U(https://www.rfc-editor.org/rfc/rfc8737.html#section-3)
for details. To do this, you might need the C(b64decode) Jinja filter
to extract the binary blob from this return value."
returned: changed
type: str
sample: IlirfxKKXA...17Dt3juxGJ-PCt92wr-oA
record:
description: The full DNS record's name for the challenge.
returned: changed and challenge is C(dns-01)
type: str
sample: _acme-challenge.example.com
version_added: "2.5"
challenge_data_dns:
description:
- List of TXT values per DNS record, in case challenge is C(dns-01).
- Since Ansible 2.8.5, only challenges which are not yet valid are returned.
returned: changed
type: dict
version_added: "2.5"
authorizations:
description:
- ACME authorization data.
- Maps an identifier to ACME authorization objects. See U(https://tools.ietf.org/html/rfc8555#section-7.1.4).
returned: changed
type: dict
sample: '{"example.com":{...}}'
order_uri:
description: ACME order URI.
returned: changed
type: str
version_added: "2.5"
finalization_uri:
description: ACME finalization URI.
returned: changed
type: str
version_added: "2.5"
account_uri:
description: ACME account URI.
returned: changed
type: str
version_added: "2.5"
all_chains:
description:
- When I(retrieve_all_alternates) is set to C(yes), the module will query the ACME server
for alternate chains. This return value will contain a list of all chains returned,
the first entry being the main chain returned by the server.
- See L(Section 7.4.2 of RFC8555,https://tools.ietf.org/html/rfc8555#section-7.4.2) for details.
returned: when certificate was retrieved and I(retrieve_all_alternates) is set to C(yes)
type: list
elements: dict
contains:
cert:
description:
- The leaf certificate itself, in PEM format.
type: str
returned: always
chain:
description:
- The certificate chain, excluding the root, as concatenated PEM certificates.
type: str
returned: always
full_chain:
description:
- The certificate chain, excluding the root, but including the leaf certificate,
as concatenated PEM certificates.
type: str
returned: always
'''
from ansible.module_utils.acme import (
ModuleFailException,
write_file, nopad_b64, pem_to_der,
ACMEAccount,
HAS_CURRENT_CRYPTOGRAPHY,
cryptography_get_csr_identifiers,
openssl_get_csr_identifiers,
cryptography_get_cert_days,
set_crypto_backend,
process_links,
)
import base64
import hashlib
import locale
import os
import re
import textwrap
import time
import urllib
from datetime import datetime
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes
from ansible.module_utils.compat import ipaddress as compat_ipaddress
def get_cert_days(module, cert_file):
'''
Return the days the certificate in cert_file remains valid and -1
if the file was not found. If cert_file contains more than one
certificate, only the first one will be considered.
'''
if HAS_CURRENT_CRYPTOGRAPHY:
return cryptography_get_cert_days(module, cert_file)
if not os.path.exists(cert_file):
return -1
openssl_bin = module.get_bin_path('openssl', True)
openssl_cert_cmd = [openssl_bin, "x509", "-in", cert_file, "-noout", "-text"]
dummy, out, dummy = module.run_command(openssl_cert_cmd, check_rc=True, encoding=None)
try:
not_after_str = re.search(r"\s+Not After\s*:\s+(.*)", out.decode('utf8')).group(1)
not_after = datetime.fromtimestamp(time.mktime(time.strptime(not_after_str, '%b %d %H:%M:%S %Y %Z')))
except AttributeError:
raise ModuleFailException("No 'Not after' date found in {0}".format(cert_file))
except ValueError:
raise ModuleFailException("Failed to parse 'Not after' date of {0}".format(cert_file))
now = datetime.utcnow()
return (not_after - now).days
class ACMEClient(object):
'''
ACME client class. Uses an ACME account object and a CSR to
start and validate ACME challenges and download the respective
certificates.
'''
def __init__(self, module):
self.module = module
self.version = module.params['acme_version']
self.challenge = module.params['challenge']
self.csr = module.params['csr']
self.dest = module.params.get('dest')
self.fullchain_dest = module.params.get('fullchain_dest')
self.chain_dest = module.params.get('chain_dest')
self.account = ACMEAccount(module)
self.directory = self.account.directory
self.data = module.params['data']
self.authorizations = None
self.cert_days = -1
self.order_uri = self.data.get('order_uri') if self.data else None
self.finalize_uri = None
# Make sure account exists
modify_account = module.params['modify_account']
if modify_account or self.version > 1:
contact = []
if module.params['account_email']:
contact.append('mailto:' + module.params['account_email'])
created, account_data = self.account.setup_account(
contact,
agreement=module.params.get('agreement'),
terms_agreed=module.params.get('terms_agreed'),
allow_creation=modify_account,
)
if account_data is None:
raise ModuleFailException(msg='Account does not exist or is deactivated.')
updated = False
if not created and account_data and modify_account:
updated, account_data = self.account.update_account(account_data, contact)
self.changed = created or updated
else:
# This happens if modify_account is False and the ACME v1
# protocol is used. In this case, we do not call setup_account()
# to avoid accidental creation of an account. This is OK
# since for ACME v1, the account URI is not needed to send a
# signed ACME request.
pass
if not os.path.exists(self.csr):
raise ModuleFailException("CSR %s not found" % (self.csr))
self._openssl_bin = module.get_bin_path('openssl', True)
# Extract list of identifiers from CSR
self.identifiers = self._get_csr_identifiers()
def _get_csr_identifiers(self):
'''
Parse the CSR and return the list of requested identifiers
'''
if HAS_CURRENT_CRYPTOGRAPHY:
return cryptography_get_csr_identifiers(self.module, self.csr)
else:
return openssl_get_csr_identifiers(self._openssl_bin, self.module, self.csr)
def _add_or_update_auth(self, identifier_type, identifier, auth):
'''
Add or update the given authorization in the global authorizations list.
Return True if the auth was updated/added and False if no change was
necessary.
'''
if self.authorizations.get(identifier_type + ':' + identifier) == auth:
return False
self.authorizations[identifier_type + ':' + identifier] = auth
return True
def _new_authz_v1(self, identifier_type, identifier):
'''
Create a new authorization for the given identifier.
Return the authorization object of the new authorization
https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.4
'''
new_authz = {
"resource": "new-authz",
"identifier": {"type": identifier_type, "value": identifier},
}
result, info = self.account.send_signed_request(self.directory['new-authz'], new_authz)
if info['status'] not in [200, 201]:
raise ModuleFailException("Error requesting challenges: CODE: {0} RESULT: {1}".format(info['status'], result))
else:
result['uri'] = info['location']
return result
def _get_challenge_data(self, auth, identifier_type, identifier):
'''
Returns a dict with the data for all proposed (and supported) challenges
of the given authorization.
'''
data = {}
# no need to choose a specific challenge here as this module
# is not responsible for fulfilling the challenges. Calculate
# and return the required information for each challenge.
for challenge in auth['challenges']:
challenge_type = challenge['type']
token = re.sub(r"[^A-Za-z0-9_\-]", "_", challenge['token'])
keyauthorization = self.account.get_keyauthorization(token)
if challenge_type == 'http-01':
# https://tools.ietf.org/html/rfc8555#section-8.3
resource = '.well-known/acme-challenge/' + token
data[challenge_type] = {'resource': resource, 'resource_value': keyauthorization}
| |
'return_failure_list': [ return_failure_list, 'return-failure-list', [ bool, 'None' ], False ],
'query': [ query, 'query', [ SnapmirrorInfo, 'None' ], False ],
}, {
'num-succeeded': [ int, False ],
'num-failed': [ int, False ],
'success-list': [ SnapmirrorPromoteIterInfo, True ],
'failure-list': [ SnapmirrorPromoteIterInfo, True ],
} )
def snapmirror_initialize(self, source_vserver=None, source_volume=None, destination_snapshot=None, transfer_priority=None, source_cluster=None, destination_vserver=None, destination_location=None, destination_volume=None, source_location=None, source_snapshot=None, max_transfer_rate=None, destination_cluster=None):
"""
Performs the initial update of a SnapMirror relationship.
You must specify the destination endpoint when using
snapmirror-initialize.
This API must be used from the destination storage system on
Data ONTAP operating in 7-Mode, from the destination cluster
on Data ONTAP 8.1 operating in Cluster-Mode, and from the
destination Vserver on Data ONTAP 8.2 or later operating
in Cluster-Mode.
<p>
On Data ONTAP operating in 7-Mode, If the destination endpoint
is a volume, the volume must be in the restricted state.
If the destination endpoint is a qtree, the qtree must not
already exist.
<p>
On Data ONTAP operating in Cluster-Mode, this API is usually
used after the snapmirror-create API, but it can be used alone,
that is, without the snapmirror-create API, to create and
initially update a SnapMirror relationship.
<p>
On Data ONTAP 8.1 operating in Cluster-Mode, and on Data ONTAP
8.2 operating in Cluster-Mode for relationships using
a control plane compatible with Data ONTAP 8.1 operating
Cluster-Mode (The relationship-control-plane field is set to
'v1'), a job will be spawned to operate on the SnapMirror
relationship, and the job id will be returned. The progress of
the job can be tracked using the job APIs.
<p>
On Data ONTAP 8.2 or later operating in Cluster-Mode, for
vault relationships, a 32-bit volume cannot be the source
or destination of the relationship.
<p>
On Data ONTAP 8.2 or later operating in Cluster-Mode, you
can track the progress of the operation using the
snapmirror-get API, except for relationships using a control
plane compatible with Data ONTAP 8.1 operating in Cluster-Mode.
:param source_vserver: Specifies the source Vserver of the SnapMirror relationship.
If using this parameter, the following parameters must also be
specified:
<ul>
<li> The name of the source volume.
<li> The name of the source cluster on Data ONTAP 8.1
operating in Cluster-Mode, or on Data ONTAP 8.2 or later
operating in Cluster-Mode if the relationship control
plane is 'v1'.
</ul>
:param source_volume: Specifies the source volume of the SnapMirror relationship. The
If using this parameter, the following parameters must also be
specified:
<ul>
<li> The name of the source Vserver.
<li> The name of the source cluster on Data ONTAP 8.1
operating in Cluster-Mode, or on Data ONTAP 8.2 or later
operating in Cluster-Mode if the relationship control
plane is 'v1'.
</ul>
:param destination_snapshot: Creates the specified snapshot (in addition to the regular
SnapMirror snapshot) on the destination after the qtree
SnapMirror transfer is over.
:param transfer_priority: Specifies the priority at which the transfer runs.
Possible values are: "normal", and "low". The default
value is the value specified in the snapmirror policy which
is associated with the relationship.
<p>This parameter only applies on Data ONTAP 8.2 or later
operating in Cluster-Mode if the relationship control plane
is 'v2'.
:param source_cluster: Specifies the source cluster of the SnapMirror relationship. The
source Vserver and source volume must also be specified if using
this parameter.
:param destination_vserver: Specifies the destination Vserver of the SnapMirror relationship.
If using this parameter, the following parameters must also be
specified:
<ul>
<li> The name of the destination volume.
<li> The name of the destination cluster on Data ONTAP 8.1
operating in Cluster-Mode, or on Data ONTAP 8.2
operating in Cluster-Mode if the relationship control
plane is 'v1'.
</ul>
:param destination_location: Specifies the destination endpoint of the SnapMirror
relationship in the following formats:
<ul>
<li> <system>:/vol/<volume>[/<qtree>]
On Data ONTAP operating in 7-Mode.
<li> [<cluster>:]//<vserver>/<volume>
On Data ONTAP 8.1 operating in Cluster-Mode, and on Data
ONTAP 8.2 operating in Cluster-Mode for relationships using
a control plane compatible with Data ONTAP 8.1 operating in
Cluster-Mode.
<li> <[vserver:]volume>
On Data ONTAP 8.2 or later operating in Cluster-Mode except
for relationships using a control plane compatible with Data
ONTAP 8.1 operating in Cluster-Mode. This format depends
on the Vserver peering setup between the source and
destination Vservers.
<ul>
This format may change in the future.
On Data ONTAP operating in Cluster-Mode, when specifying a
destination endpoint, you must use either the destination
location, or the destination cluster, destination Vserver, and
destination volume.
On Data ONTAP operating in 7-Mode, if the destination endpoint
is a volume, the volume must be in the restricted state.
If the destination endpoint is a qtree, the qtree must not
already exist.
<p> This parameter is mandatory on Data ONTAP operating in
7-mode.
:param destination_volume: Specifies the destination volume of the SnapMirror relationship.
If using this parameter, the following parameters must also be
specified:
<ul>
<li> The name of the destination Vserver.
<li> The name of the destination cluster on Data ONTAP 8.1
operating in Cluster-Mode, or on Data ONTAP 8.2
operating in Cluster-Mode if the relationship control
plane is 'v1'.
</ul>
:param source_location: Specifies the source endpoint of the SnapMirror
relationship in the following formats:
<ul>
<li> <system>:/vol/<volume>[/<qtree>]
On Data ONTAP operating in 7-Mode.
<li> [<cluster>:]//<vserver>/<volume>
On Data ONTAP 8.1 operating in Cluster-Mode, and on Data
ONTAP 8.2 operating in Cluster-Mode for relationships using
a control plane compatible with Data ONTAP 8.1 operating in
Cluster-Mode.
<li> <[vserver:]volume>
On Data ONTAP 8.2 or later operating in Cluster-Mode except
for relationships using a control plane compatible with Data
ONTAP 8.1 operating in Cluster-Mode. This format depends
on the Vserver peering setup between the source and
destination Vservers.
<ul>
This format may change in the future.
On Data ONTAP operating in Cluster-Mode when specifying a
source endpoint, you must use either the source location, or the
source cluster, source Vserver, and source volume.
On Data ONTAP operating in 7-Mode, If the source-location is not
specified, then the source in /etc/snapmirror.conf for the
destination path is used.
:param source_snapshot: Designates the source snapshot to use for a qtree update
on Data ONTAP operating in 7-mode, and the snapshot on the
source volume to use for the baseline transfer
on Data ONTAP 8.2 or later operating in
Cluster-Mode.
The default creates new snapshot on the source for the
transfer.
<p>This parameter only applies on Data ONTAP 8.2 or later
operating in Cluster-Mode if the relationship control
plane is 'v2'.
:param max_transfer_rate: Specifies the upper bound, in kilobytes per second, at which data
is transferred. The default is unlimited (0) which permits the
SnapMirror relationship to fully utilize the available network
bandwidth.
On Data ONTAP operating in Cluster-Mode, the max-transfer-rate
option does not affect load-sharing transfers and transfers for
other relationships with Relationship Capability of Pre 8.2
confined to a single cluster.
:param destination_cluster: Specifies the destination cluster of the SnapMirror relationship.
The destination Vserver and destination volume must also be
specified if using this parameter.
"""
return self.request( "snapmirror-initialize", {
'source_vserver': [ source_vserver, 'source-vserver', [ basestring, 'None' ], False ],
'source_volume': [ source_volume, 'source-volume', [ basestring, 'None' ], False ],
'destination_snapshot': [ destination_snapshot, 'destination-snapshot', [ basestring, 'None' ], False ],
'transfer_priority': [ transfer_priority, 'transfer-priority', [ basestring, 'None' ], False ],
'source_cluster': [ source_cluster, 'source-cluster', [ basestring, 'None' ], False ],
'destination_vserver': [ destination_vserver, 'destination-vserver', [ basestring, 'None' ], False ],
'destination_location': [ destination_location, 'destination-location', [ basestring, 'None' ], False ],
'destination_volume': [ destination_volume, 'destination-volume', [ basestring, 'None' ], False ],
'source_location': [ source_location, 'source-location', [ basestring, 'None' ], False ],
'source_snapshot': [ source_snapshot, 'source-snapshot', [ basestring, 'None' ], False ],
'max_transfer_rate': [ max_transfer_rate, 'max-transfer-rate', [ int, 'None' ], False ],
'destination_cluster': [ destination_cluster, 'destination-cluster', [ basestring, 'None' ], False ],
}, {
| |
import os
from os.path import getsize
import logging
import sys
import copy
import clique
import errno
import six
from pymongo import DeleteOne, InsertOne
import pyblish.api
from avalon import io
from avalon.vendor import filelink
# this is needed until speedcopy for linux is fixed
if sys.platform == "win32":
from speedcopy import copyfile
else:
from shutil import copyfile
log = logging.getLogger(__name__)
class IntegrateAssetNew(pyblish.api.InstancePlugin):
"""Resolve any dependency issues
This plug-in resolves any paths which, if not updated might break
the published file.
The order of families is important, when working with lookdev you want to
first publish the texture, update the texture paths in the nodes and then
publish the shading network. Same goes for file dependent assets.
Requirements for instance to be correctly integrated
instance.data['representations'] - must be a list and each member
must be a dictionary with following data:
'files': list of filenames for sequence, string for single file.
Only the filename is allowed, without the folder path.
'stagingDir': "path/to/folder/with/files"
'name': representation name (usually the same as extension)
'ext': file extension
optional data
"frameStart"
"frameEnd"
'fps'
"""
label = "Integrate Asset New"
order = pyblish.api.IntegratorOrder
families = ["workfile",
"pointcache",
"camera",
"animation",
"model",
"mayaAscii",
"setdress",
"layout",
"ass",
"vdbcache",
"scene",
"vrayproxy",
"render",
"prerender",
"imagesequence",
"review",
"rendersetup",
"rig",
"plate",
"look",
"lut",
"audio",
"yetiRig",
"yeticache",
"nukenodes",
"gizmo",
"source",
"matchmove",
"image",
"source",
"assembly",
"fbx",
"textures",
"action",
"harmony.template",
"harmony.palette",
"editorial"
]
exclude_families = ["clip"]
db_representation_context_keys = [
"project", "asset", "task", "subset", "version", "representation",
"family", "hierarchy", "task", "username"
]
default_template_name = "publish"
template_name_profiles = None
def process(self, instance):
if [ef for ef in self.exclude_families
if instance.data["family"] in ef]:
return
self.register(instance)
self.log.info("Integrating Asset in to the database ...")
self.log.info("instance.data: {}".format(instance.data))
if instance.data.get('transfer', True):
self.integrate(instance)
def register(self, instance):
# Required environment variables
anatomy_data = instance.data["anatomyData"]
io.install()
context = instance.context
project_entity = instance.data["projectEntity"]
context_asset_name = context.data["assetEntity"]["name"]
asset_name = instance.data["asset"]
asset_entity = instance.data.get("assetEntity")
if not asset_entity or asset_entity["name"] != context_asset_name:
asset_entity = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project_entity["_id"]
})
assert asset_entity, (
"No asset found by the name \"{0}\" in project \"{1}\""
).format(asset_name, project_entity["name"])
instance.data["assetEntity"] = asset_entity
# update anatomy data with asset specific keys
# - name should already been set
hierarchy = ""
parents = asset_entity["data"]["parents"]
if parents:
hierarchy = "/".join(parents)
anatomy_data["hierarchy"] = hierarchy
task_name = instance.data.get("task")
if task_name:
anatomy_data["task"] = task_name
anatomy_data["family"] = instance.data.get("family")
stagingdir = instance.data.get("stagingDir")
if not stagingdir:
self.log.info((
"{0} is missing reference to staging directory."
" Will try to get it from representation."
).format(instance))
else:
self.log.debug(
"Establishing staging directory @ {0}".format(stagingdir)
)
# Ensure at least one file is set up for transfer in staging dir.
repres = instance.data.get("representations")
assert repres, "Instance has no files to transfer"
assert isinstance(repres, (list, tuple)), (
"Instance 'files' must be a list, got: {0} {1}".format(
str(type(repres)), str(repres)
)
)
subset = self.get_subset(asset_entity, instance)
instance.data["subsetEntity"] = subset
version_number = instance.data["version"]
self.log.debug("Next version: v{}".format(version_number))
version_data = self.create_version_data(context, instance)
version_data_instance = instance.data.get('versionData')
if version_data_instance:
version_data.update(version_data_instance)
# TODO rename method from `create_version` to
# `prepare_version` or similar...
version = self.create_version(
subset=subset,
version_number=version_number,
data=version_data
)
self.log.debug("Creating version ...")
new_repre_names_low = [_repre["name"].lower() for _repre in repres]
existing_version = io.find_one({
'type': 'version',
'parent': subset["_id"],
'name': version_number
})
if existing_version is None:
version_id = io.insert_one(version).inserted_id
else:
# Check if instance have set `append` mode which cause that
# only replicated representations are set to archive
append_repres = instance.data.get("append", False)
# Update version data
# TODO query by _id and
io.update_many({
'type': 'version',
'parent': subset["_id"],
'name': version_number
}, {
'$set': version
})
version_id = existing_version['_id']
# Find representations of existing version and archive them
current_repres = list(io.find({
"type": "representation",
"parent": version_id
}))
bulk_writes = []
for repre in current_repres:
if append_repres:
# archive only duplicated representations
if repre["name"].lower() not in new_repre_names_low:
continue
# Representation must change type,
# `_id` must be stored to other key and replaced with new
# - that is because new representations should have same ID
repre_id = repre["_id"]
bulk_writes.append(DeleteOne({"_id": repre_id}))
repre["orig_id"] = repre_id
repre["_id"] = io.ObjectId()
repre["type"] = "archived_representation"
bulk_writes.append(InsertOne(repre))
# bulk updates
if bulk_writes:
io._database[io.Session["AVALON_PROJECT"]].bulk_write(
bulk_writes
)
version = io.find_one({"_id": version_id})
instance.data["versionEntity"] = version
existing_repres = list(io.find({
"parent": version_id,
"type": "archived_representation"
}))
instance.data['version'] = version['name']
intent_value = instance.context.data.get("intent")
if intent_value and isinstance(intent_value, dict):
intent_value = intent_value.get("value")
if intent_value:
anatomy_data["intent"] = intent_value
anatomy = instance.context.data['anatomy']
# Find the representations to transfer amongst the files
# Each should be a single representation (as such, a single extension)
representations = []
destination_list = []
if 'transfers' not in instance.data:
instance.data['transfers'] = []
template_name = self.template_name_from_instance(instance)
published_representations = {}
for idx, repre in enumerate(instance.data["representations"]):
published_files = []
# create template data for Anatomy
template_data = copy.deepcopy(anatomy_data)
if intent_value is not None:
template_data["intent"] = intent_value
resolution_width = repre.get("resolutionWidth")
resolution_height = repre.get("resolutionHeight")
fps = instance.data.get("fps")
if resolution_width:
template_data["resolution_width"] = resolution_width
if resolution_width:
template_data["resolution_height"] = resolution_height
if resolution_width:
template_data["fps"] = fps
files = repre['files']
if repre.get('stagingDir'):
stagingdir = repre['stagingDir']
if repre.get("outputName"):
template_data["output"] = repre['outputName']
template = os.path.normpath(
anatomy.templates[template_name]["path"])
sequence_repre = isinstance(files, list)
repre_context = None
if sequence_repre:
self.log.debug(
"files: {}".format(files))
src_collections, remainder = clique.assemble(files)
self.log.debug(
"src_tail_collections: {}".format(str(src_collections)))
src_collection = src_collections[0]
# Assert that each member has identical suffix
src_head = src_collection.format("{head}")
src_tail = src_collection.format("{tail}")
# fix dst_padding
valid_files = [x for x in files if src_collection.match(x)]
padd_len = len(
valid_files[0].replace(src_head, "").replace(src_tail, "")
)
src_padding_exp = "%0{}d".format(padd_len)
test_dest_files = list()
for i in [1, 2]:
template_data["representation"] = repre['ext']
template_data["frame"] = src_padding_exp % i
anatomy_filled = anatomy.format(template_data)
template_filled = anatomy_filled[template_name]["path"]
if repre_context is None:
repre_context = template_filled.used_values
test_dest_files.append(
os.path.normpath(template_filled)
)
template_data["frame"] = repre_context["frame"]
self.log.debug(
"test_dest_files: {}".format(str(test_dest_files)))
dst_collections, remainder = clique.assemble(test_dest_files)
dst_collection = dst_collections[0]
dst_head = dst_collection.format("{head}")
dst_tail = dst_collection.format("{tail}")
index_frame_start = None
if repre.get("frameStart"):
frame_start_padding = int(
anatomy.templates["render"].get(
"frame_padding",
anatomy.templates["render"].get("padding")
)
)
index_frame_start = int(repre.get("frameStart"))
# exception for slate workflow
if index_frame_start and "slate" in instance.data["families"]:
index_frame_start -= 1
dst_padding_exp = src_padding_exp
dst_start_frame = None
for i in src_collection.indexes:
# TODO 1.) do not count padding in each index iteration
# 2.) do not count dst_padding from src_padding before
# index_frame_start check
src_padding = src_padding_exp % i
src_file_name = "{0}{1}{2}".format(
src_head, src_padding, src_tail)
dst_padding = src_padding_exp % i
if index_frame_start:
dst_padding_exp = "%0{}d".format(frame_start_padding)
dst_padding = dst_padding_exp % index_frame_start
index_frame_start += 1
dst = "{0}{1}{2}".format(
dst_head,
dst_padding,
dst_tail).replace("..", ".")
self.log.debug("destination: `{}`".format(dst))
src = os.path.join(stagingdir, src_file_name)
self.log.debug("source: {}".format(src))
instance.data["transfers"].append([src, dst])
published_files.append(dst)
# for adding first frame into db
if not dst_start_frame:
dst_start_frame = dst_padding
# Store used frame value to template data
template_data["frame"] = dst_start_frame
dst = "{0}{1}{2}".format(
dst_head,
dst_start_frame,
dst_tail
).replace("..", ".")
repre['published_path'] = dst
else:
# Single file
# _______
# | |\
# | |
# | |
# | |
# |_______|
#
template_data.pop("frame", None)
fname = files
assert not os.path.isabs(fname), (
"Given file name is a full path"
)
template_data["representation"] = repre['ext']
src = os.path.join(stagingdir, fname)
anatomy_filled = anatomy.format(template_data)
template_filled = anatomy_filled[template_name]["path"]
repre_context = template_filled.used_values
dst = os.path.normpath(template_filled).replace("..", ".")
instance.data["transfers"].append([src, dst])
published_files.append(dst)
repre['published_path'] = dst
self.log.debug("__ dst: {}".format(dst))
repre["publishedFiles"] = published_files
for key in self.db_representation_context_keys:
value = template_data.get(key)
if not value:
continue
repre_context[key] = template_data[key]
# Use previous representation's id if there are any
repre_id = None
repre_name_low = repre["name"].lower()
for _repre in existing_repres:
# NOTE should we check lowered names?
if repre_name_low == _repre["name"]:
repre_id = _repre["orig_id"]
break
# Create new id if existing representations does not match
if repre_id is None:
repre_id = io.ObjectId()
representation = {
"_id": repre_id,
"schema": "pype:representation-2.0",
"type": "representation",
"parent": version_id,
"name": repre['name'],
"data": {'path': dst, 'template': template},
"dependencies": instance.data.get("dependencies", "").split(),
# Imprint shortcut to context
# for performance reasons.
"context": repre_context
}
if repre.get("outputName"):
representation["context"]["output"] = repre['outputName']
if sequence_repre and repre.get("frameStart"):
representation['context']['frame'] = (
dst_padding_exp % int(repre.get("frameStart"))
)
self.log.debug("__ representation: {}".format(representation))
destination_list.append(dst)
self.log.debug("__ destination_list: {}".format(destination_list))
instance.data['destination_list'] = destination_list
representations.append(representation)
published_representations[repre_id] = {
"representation": representation,
"anatomy_data": template_data,
"published_files": published_files
}
self.log.debug("__ representations: {}".format(representations))
# Remove old representations if there are any (before insertion of new)
if existing_repres:
repre_ids_to_remove = []
for repre in existing_repres:
repre_ids_to_remove.append(repre["_id"])
io.delete_many({"_id": {"$in": repre_ids_to_remove}})
self.log.debug("__ representations: {}".format(representations))
for rep in instance.data["representations"]:
self.log.debug("__ represNAME: {}".format(rep['name']))
| |
dataset_0
Id: 6
Wavelength: 10
Number of columns: 2
label #valid %valid min max type
column_9 0 0.00% None None B: BATCH number
column_10 0 0.00% None None ?: *** UNDEFINED column type ***
Dataset 2:
Name: dataset_1
Id: 7
Wavelength: 9
Number of columns: 3
label #valid %valid min max type
column_11 0 0.00% None None F: amplitude
column_12 0 0.00% None None B: BATCH number
column_13 0 0.00% None None ?: *** UNDEFINED column type ***
Dataset 3:
Name: dataset_2
Id: 8
Wavelength: 8
Number of columns: 1
label #valid %valid min max type
column_14 0 0.00% None None ?: *** UNDEFINED column type ***
Dataset 4:
Name: dataset_3
Id: 9
Wavelength: 7
Number of columns: 2
label #valid %valid min max type
column_15 0 0.00% None None B: BATCH number
column_16 0 0.00% None None ?: *** UNDEFINED column type ***
Crystal 3:
Name: crystal_2
Project: project_2
Id: 3
Unit cell: (12, 20, 20, 90, 90, 120)
Number of datasets: 3
Dataset 1:
Name: dataset_0
Id: 10
Wavelength: 10
Number of columns: 3
label #valid %valid min max type
column_17 0 0.00% None None ?: *** UNDEFINED column type ***
column_18 0 0.00% None None F: amplitude
column_19 0 0.00% None None B: BATCH number
Dataset 2:
Name: dataset_1
Id: 11
Wavelength: 9
Number of columns: 1
label #valid %valid min max type
column_20 0 0.00% None None B: BATCH number
Dataset 3:
Name: dataset_2
Id: 12
Wavelength: 8
Number of columns: 2
label #valid %valid min max type
column_21 0 0.00% None None F: amplitude
column_22 0 0.00% None None B: BATCH number
""")
for column in mtz_object.columns():
assert column.array_size() == 2000
assert column.array_capacity() == 2402
mtz_object.reserve(5000)
for column in mtz_object.columns():
assert column.array_size() == 2000
assert column.array_capacity() == 5000
mtz_object.reserve(100)
for column in mtz_object.columns():
assert column.array_size() == 2000
assert column.array_capacity() == 5000
#
mtz_object = mtz.object() \
.set_title(title="exercise") \
.set_space_group_name("sg") \
.set_space_group_number(123) \
.set_point_group_name("pg") \
.set_lattice_centring_type("pg") \
.set_space_group(sgtbx.space_group_info(number=123).group())
unit_cell = uctbx.unit_cell((10,10,10,90,90,90))
mtz_object.set_hkl_base(unit_cell=unit_cell)
dataset = mtz_object.add_crystal(
name="crystal_1",
project_name="crystal_1",
unit_cell=unit_cell).add_dataset(
name="crystal_1",
wavelength=0)
try: dataset.add_column(label="a,b,c", type="H")
except RuntimeError as e:
assert str(e) == 'mtz::dataset::add_column(label="a,b,c", ...):' \
' label must not include commas.'
else: raise Exception_expected
for label in "HKL":
dataset.add_column(label=label, type="H")
column = dataset.add_column(label="F", type="F")
mtz_reflection_indices = column.set_reals(
miller_indices=flex.miller_index([(1,2,3),(2,3,4),(3,4,5)]),
data=flex.double([10,20,30]))
assert list(mtz_reflection_indices) == [0,1,2]
column = dataset.add_column(label="SigF", type="Q")
column.set_reals(
mtz_reflection_indices=mtz_reflection_indices,
data=flex.double([1,2,3]))
group = mtz_object.extract_observations(
column_label_data="F",
column_label_sigmas="SigF")
assert list(group.indices) == [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
assert approx_equal(group.data, [10, 20, 30])
assert approx_equal(group.sigmas, [1, 2, 3])
column = dataset.add_column(label="I", type="F")
mtz_reflection_indices = column.set_reals(
miller_indices=flex.miller_index([(2,3,5),(1,2,3),(3,4,5)]),
data=flex.double([11,21,31]))
assert list(mtz_reflection_indices) == [3, 0, 2]
column = dataset.add_column(label="SigI", type="Q")
column.set_reals(
mtz_reflection_indices=mtz_reflection_indices,
data=flex.double([4,5,6]))
group = mtz_object.extract_observations(
column_label_data="I",
column_label_sigmas="SigI")
assert list(group.indices) == [(1, 2, 3), (3, 4, 5), (2, 3, 5)]
assert approx_equal(group.data, [21, 31, 11])
assert approx_equal(group.sigmas, [5, 6, 4])
if (not verbose): out = StringIO()
mtz_object.show_summary(out=out)
if (not verbose):
assert not show_diff(out.getvalue(), """\
Title: exercise
Space group symbol from file: sg
Space group number from file: 123
Space group from matrices: P 4/m m m (No. 123)
Point group symbol from file: pg
Number of crystals: 2
Number of Miller indices: 4
Resolution range: 2.67261 1.41421
History:
Crystal 1:
Name: HKL_base
Project: HKL_base
Id: 0
Unit cell: (10, 10, 10, 90, 90, 90)
Number of datasets: 1
Dataset 1:
Name: HKL_base
Id: 0
Wavelength: 0
Number of columns: 0
Crystal 2:
Name: crystal_1
Project: crystal_1
Id: 1
Unit cell: (10, 10, 10, 90, 90, 90)
Number of datasets: 1
Dataset 1:
Name: crystal_1
Id: 1
Wavelength: 0
Number of columns: 7
label #valid %valid min max type
H 4 100.00% 1.00 3.00 H: index h,k,l
K 4 100.00% 2.00 4.00 H: index h,k,l
L 4 100.00% 3.00 5.00 H: index h,k,l
F 3 75.00% 10.00 30.00 F: amplitude
SigF 3 75.00% 1.00 3.00 Q: standard deviation
I 3 75.00% 11.00 31.00 F: amplitude
SigI 3 75.00% 4.00 6.00 Q: standard deviation
""")
if (not verbose): out = StringIO()
assert mtz_object.show_column_data(out=out) is mtz_object
if (not verbose):
assert not show_diff(out.getvalue(), """\
Column data:
-------------------------------------------------------------------------------
F SigF I SigI
1 2 3 10 1 21 5
2 3 4 20 2 None None
3 4 5 30 3 31 6
2 3 5 None None 11 4
-------------------------------------------------------------------------------
""")
mtz_object.write(file_name="tmp_iotbx_mtz_ext.mtz")
if (not verbose): out = StringIO()
mtz.object(file_name="tmp_iotbx_mtz_ext.mtz").show_summary(out=out)
if (not verbose):
assert not show_diff(out.getvalue(), """\
Title: exercise
Space group symbol from file: sg
Space group number from file: 123
Space group from matrices: P 4/m m m (No. 123)
Point group symbol from file: pg
Number of crystals: 2
Number of Miller indices: 4
Resolution range: 2.67261 1.41421
History:
Crystal 1:
Name: HKL_base
Project: HKL_base
Id: 0
Unit cell: (10, 10, 10, 90, 90, 90)
Number of datasets: 1
Dataset 1:
Name: HKL_base
Id: 0
Wavelength: 0
Number of columns: 0
Crystal 2:
Name: crystal_1
Project: crystal_1
Id: 2
Unit cell: (10, 10, 10, 90, 90, 90)
Number of datasets: 1
Dataset 1:
Name: crystal_1
Id: 1
Wavelength: 0
Number of columns: 7
label #valid %valid min max type
H 4 100.00% 1.00 3.00 H: index h,k,l
K 4 100.00% 2.00 4.00 H: index h,k,l
L 4 100.00% 3.00 5.00 H: index h,k,l
F 3 75.00% 10.00 30.00 F: amplitude
SigF 3 75.00% 1.00 3.00 Q: standard deviation
I 3 75.00% 11.00 31.00 F: amplitude
SigI 3 75.00% 4.00 6.00 Q: standard deviation
""")
#
original_miller_indices = mtz_object.extract_miller_indices()
assert list(original_miller_indices) \
== [(1, 2, 3), (2, 3, 4), (3, 4, 5), (2, 3, 5)]
new_miller_indices = flex.miller_index(
[(3, -1, 2), (-4, 2, -3), (5, -3, 4), (-5, 2, -3)])
assert not mtz_object.extract_miller_indices().all_eq(new_miller_indices)
mtz_object.replace_miller_indices(miller_indices=new_miller_indices)
assert mtz_object.extract_miller_indices().all_eq(new_miller_indices)
mtz_object.replace_miller_indices(miller_indices=original_miller_indices)
assert not mtz_object.extract_miller_indices().all_eq(new_miller_indices)
assert mtz_object.extract_miller_indices().all_eq(original_miller_indices)
#
c = mtz_object.get_column(label="F")
s = c.selection_valid()
v = c.extract_values(not_a_number_substitute=-1)
assert list(s) == [True, True, True, False]
assert approx_equal(v, [10.0, 20.0, 30.0, -1.0])
c.set_values(values=flex.float([5,9,3,7]), selection_valid=None)
v = c.extract_values(not_a_number_substitute=-1)
assert approx_equal(v, [5.0, 9.0, 3.0, 7.0])
c.set_values(values=flex.float([7,8,2,0]))
v = c.extract_values(not_a_number_substitute=-1)
assert approx_equal(v, [7.0, 8.0, 2.0, 0.0])
c.set_values(
values=flex.float([5,9,3,7]),
selection_valid=flex.bool([False]*4))
v = c.extract_values(not_a_number_substitute=-1)
assert approx_equal(v, [-1]*4)
for i_trial in range(10):
s = flex.random_bool(size=4, threshold=0.5)
v = flex.float(list(flex.random_double(size=4)*10-5))
c.set_values(values=v, selection_valid=s)
sx = c.selection_valid()
vx = c.extract_values(not_a_number_substitute=99)
assert list(s) == list(sx)
assert vx.select(s).all_eq(v.select(s))
assert vx.select(~s).all_ne(v.select(~s))
assert vx.select(~s).all_eq(99)
#
values_in = count()
values_out = count()
for i_batch in range(10):
batch = mtz_object.add_batch()
assert batch.num() == i_batch+1
assert batch.set_num(value=next(values_in)) is batch
assert batch.num() == next(values_out)
assert batch.set_num(value=i_batch+1) is batch
assert batch.title() == " "
assert batch.set_title("Hello MTZ") is batch
assert batch.title() == "Hello MTZ"
assert batch.set_title("Hello MTZ"*10) is batch
assert len(batch.title()) == 70
assert list(batch.gonlab()) == ["", "", ""]
assert batch.set_gonlab(
flex.std_string(["what", "ever", "this_is....."])) is batch
assert list(batch.gonlab()) == ["what", "ever", "this_is"]
assert batch.iortyp() == 0
assert batch.set_iortyp(value=next(values_in)) is batch
assert batch.iortyp() == next(values_out)
assert list(batch.lbcell()) == [0, 0, 0, 0, 0, 0]
assert batch.set_lbcell(flex.int(range(3,9))) is batch
assert list(batch.lbcell()) == list(range(3,9))
assert batch.misflg() == 0
assert batch.set_misflg(value=next(values_in)) is batch
assert batch.misflg() == next(values_out)
assert batch.jumpax() == 0
assert batch.set_jumpax(value=next(values_in)) is batch
assert batch.jumpax() == next(values_out)
assert batch.ncryst() == 0
assert batch.set_ncryst(value=next(values_in)) is batch
assert batch.ncryst() == next(values_out)
assert batch.lcrflg() == 0
assert batch.set_lcrflg(value=next(values_in)) is batch
assert batch.lcrflg() == next(values_out)
assert batch.ldtype() == 0
assert batch.set_ldtype(value=next(values_in)) is batch
assert batch.ldtype() == next(values_out)
assert batch.jsaxs() == 0
assert batch.set_jsaxs(value=next(values_in)) is batch
assert batch.jsaxs() == next(values_out)
assert batch.nbscal() == 0
assert batch.set_nbscal(value=next(values_in)) is batch
assert batch.nbscal() == next(values_out)
assert batch.ngonax() == 0
assert batch.set_ngonax(value=next(values_in)) is batch
assert batch.ngonax() == next(values_out)
assert batch.lbmflg() == 0
assert batch.set_lbmflg(value=next(values_in)) is batch
assert batch.lbmflg() == next(values_out)
assert batch.ndet() == 0
assert batch.set_ndet(value=next(values_in) % 3) is batch
assert batch.ndet() == next(values_out) % 3
assert batch.nbsetid() == 0
assert batch.set_nbsetid(value=next(values_in)) is batch
assert batch.nbsetid() == next(values_out)
assert list(batch.cell()) == [0]*6
assert batch.set_cell(flex.float(range(18,24))) is batch
assert list(batch.cell()) == list(range(18,24))
assert list(batch.umat()) == [0]*9
assert batch.set_umat(flex.float(range(16,25))) is batch
assert list(batch.umat()) == list(range(16,25))
assert list(batch.phixyz()) == [0]*6
assert batch.set_phixyz(flex.float(range(28,34))) is batch
assert list(batch.phixyz()) == list(range(28,34))
assert list(batch.crydat()) == [0]*12
assert batch.set_crydat(flex.float(range(26,38))) is batch
assert list(batch.crydat()) == list(range(26,38))
assert list(batch.datum()) == [0]*3
assert batch.set_datum(flex.float(range(26,29))) is batch
assert list(batch.datum()) == list(range(26,29))
assert batch.phistt() == 0
| |
sibling hidden state and add to parent state
# This way we can choose to add some sibling information in parent information
# Think about the case when there are two func declarations,
# without this add gate, e.g. the function names would get exactly the same parent state
sibling_mul_state = self.sigmoid(self.W_s_mul(sibling_state))
sibling_add_state = torch.tanh(self.W_s_add(sibling_state))
parent_state_updated = parent_state + sibling_mul_state * sibling_add_state
h_parent[-1], c_parent[-1] = torch.split(parent_state_updated, int(parent_state_updated.shape[-1]/2), dim=-1)
# Calculate parent and sibling loss
parent_loss = self.bce_loss(
self.depth_pred(h_pred), is_parent) / total_nodes
sibling_loss = self.bce_loss(
self.width_pred(h_pred), has_sibling) / total_nodes
loss += parent_loss + sibling_loss
individual_losses['PARENT'] += parent_loss.item()
individual_losses['SIBLING'] += sibling_loss.item()
accuracies['PARENT'] += sum(
(self.sigmoid(self.depth_pred(h_pred)) >= 0.5) == (is_parent == 1)).item()
accuracies['SIBLING'] += sum(
(self.sigmoid(self.width_pred(h_pred)) >= 0.5) == (has_sibling == 1)).item()
# Get true label values
label = features[current_nodes_indices].long()
# print(label[label == 21], is_parent[torch.where(label == 21)], current_nodes_indices[torch.where(label == 21)[0]])
# Iterate over possible node types and predict labels for each node type
for k, prediction_layer in list(self.prediction_layers.items()) + [('LITERAL', _)]:
# Only do actual calculations when the amount of nodes for this node type > 0
if len(h_pred[vocabs_mask == k]) > 0:
# Get label predictions
if k == 'LITERAL':
# TODO look into how we can incorporate offset parent and offset sibling here (see below)
# or if it even influences our loss since we work with the vocab types so maybe we can remove it altogether
label_pred, label_loss = self.label_losses[k](
h_pred[vocabs_mask == k], label[vocabs_mask == k].view(-1))
label_loss /= sum(target['vocabs'] == k)
accuracies[k] += sum(self.label_losses[k].predict(h_pred[vocabs_mask == k])
== label[vocabs_mask == k].view(-1)).item()
else:
label_pred = prediction_layer(h_pred[vocabs_mask == k])
# Calculate cross entropy loss of label prediction
label_loss = self.label_losses[k]((label_pred
# + self.offset_parent(is_parent[vocabs_mask == k])
# + self.offset_sibling(has_sibling[vocabs_mask == k])
), label[vocabs_mask == k].view(-1)) / sum(target['vocabs'] == k)
accuracies[k] += sum(torch.argmax(self.softmax(label_pred
# + self.offset_parent(is_parent[vocabs_mask == k])
# + self.offset_sibling(has_sibling[vocabs_mask == k])
), dim=-1) == label[vocabs_mask == k].view(-1)).item()
# pred_labels = torch.argmax(self.softmax(label_pred), dim=-1)
# print(sibling_index, iteration, pred_labels[pred_labels != label[vocabs_mask == k].view(-1)].tolist(), pred_labels.tolist(), label[vocabs_mask == k].view(-1).tolist()) #, self.softmax(label_pred).topk(3, sorted=True)[1], self.softmax(label_pred).topk(3, sorted=True)[0], h_pred[:, :5],)
loss += label_loss
individual_losses[k] += label_loss.item()
# Calculate embedding of true label -> teacher forcing
if 'RES' in k or not self.params['INDIV_LAYERS_VOCABS']:
embedding_dim = self.params['EMBEDDING_DIM']
else:
embedding_dim = self.params['LEAF_EMBEDDING_DIM']
emb_label = self.embedding_layers[k](
label[vocabs_mask == k]).view(-1, embedding_dim)
if 'RES' in k:
for i in range(self.params['NUM_LSTM_LAYERS']):
# Compute hidden and cell values of current nodes
if i == 0:
h_parent_new, c_parent_new = self.lstms_parent[i](
emb_label,
(h_parent[i][vocabs_mask == k],
c_parent[i][vocabs_mask == k])
)
h_sibling_new, c_sibling_new = self.lstms_sibling[i](
emb_label,
(h_prev_sibling[i][vocabs_mask == k],
c_prev_sibling[i][vocabs_mask == k])
)
else:
h_parent_new, c_parent_new = self.lstms_parent[i](
h_parent_new,
(h_parent[i][vocabs_mask == k],
c_parent[i][vocabs_mask == k])
)
h_sibling_new, c_sibling_new = self.lstms_sibling[i](
h_sibling_new,
(h_prev_sibling[i][vocabs_mask == k],
c_prev_sibling[i][vocabs_mask == k])
)
# Update the hidden and cell values matrices
h_p[i][current_nodes_indices[vocabs_mask == k]] = h_parent_new
c_p[i][current_nodes_indices[vocabs_mask == k]] = c_parent_new
h_s[i][current_nodes_indices[vocabs_mask == k]] = h_sibling_new
c_s[i][current_nodes_indices[vocabs_mask == k]] = c_sibling_new
else:
# Compute hidden and cell values of current nodes for previous siblings only (since we are not parents in the leafs)
if self.params['INDIV_LAYERS_VOCABS']:
h, c = self.leaf_lstms_sibling[k](
emb_label,
(h_prev_sibling[vocabs_mask == k],
c_prev_sibling[vocabs_mask == k])
)
else:
for i in range(self.params['NUM_LSTM_LAYERS']):
if i == 0:
h_sibling_new, c_sibling_new = self.lstms_sibling[i](
emb_label,
(h_prev_sibling[i][vocabs_mask == k],
c_prev_sibling[i][vocabs_mask == k])
)
else:
h_sibling_new, c_sibling_new = self.lstms_sibling[i](
h_sibling_new,
(h_prev_sibling[i][vocabs_mask == k],
c_prev_sibling[i][vocabs_mask == k])
)
# Update the hidden and cell values matrices
h_s[i][current_nodes_indices[vocabs_mask == k]] = h_sibling_new
c_s[i][current_nodes_indices[vocabs_mask == k]] = c_sibling_new
# h, c = self.lstm_sibling(
# emb_label, (h_prev_sibling[vocabs_mask == k], c_prev_sibling[vocabs_mask == k]))
# Update hidden and cell values matrices for siblings only (leafs cannot be parents)
# h_s[current_nodes_indices[vocabs_mask == k]] = h
# c_s[current_nodes_indices[vocabs_mask == k]] = c
return loss
def get_hidden_values(self, iteration, adj_list, edge_order, h_p, c_p, h_s, c_s, sibling_index,
first_sibling_indices, current_indices, node_order, parent_indices, vocabs):
# At sibling index 0, there should not be any previous siblings
if sibling_index == 0:
num_first_siblings = len(first_sibling_indices)
# Initialize to hidden, cell of siblings to zero
h_prev_sibling = []
c_prev_sibling = []
for i in range(self.params['NUM_LSTM_LAYERS']):
h_prev_sibling.append(torch.zeros(
num_first_siblings, self.latent_dim, device=self.device))
c_prev_sibling.append(torch.zeros(
num_first_siblings, self.latent_dim, device=self.device))
current_nodes_indices = current_indices[first_sibling_indices]
parent_indices_siblings = parent_indices[first_sibling_indices]
else:
indices = []
for i, first_sibling_index in enumerate(first_sibling_indices):
if (i + 1 < len(first_sibling_indices) and sibling_index + first_sibling_index < first_sibling_indices[i + 1])\
or (i + 1 == len(first_sibling_indices) and sibling_index + first_sibling_index < len(current_indices)):
indices.append(first_sibling_index + sibling_index - 1)
prev_siblings_indices = current_indices[indices]
h_prev_sibling = []
c_prev_sibling = []
for i in range(self.params['NUM_LSTM_LAYERS']):
h_prev_sibling.append(h_s[i][prev_siblings_indices, :])
c_prev_sibling.append(c_s[i][prev_siblings_indices, :])
parent_indices_siblings = parent_indices[indices]
current_nodes_indices = current_indices[[
ind + 1 for ind in indices]]
h_parent = []
c_parent = []
for i in range(self.params['NUM_LSTM_LAYERS']):
h_parent.append(h_p[i][parent_indices_siblings, :])
c_parent.append(c_p[i][parent_indices_siblings, :])
adj_list_curr = adj_list[edge_order == iteration, :]
sib = list(first_sibling_indices) + [len(parent_indices)]
vocabs_mask = np.atleast_1d(vocabs[current_nodes_indices.cpu()])
is_parent = torch.tensor([[1.] if index in adj_list_curr[:, 0] else [
0.] for index in current_nodes_indices], device=self.device)
has_sibling = torch.tensor([[1.] if j-i - 1 > sibling_index else [0.]
for i, j in zip(sib[:-1], sib[1:]) if j-i > sibling_index], device=self.device)
return h_parent, c_parent, h_prev_sibling, c_prev_sibling, is_parent, has_sibling, current_nodes_indices, vocabs_mask
def init_decode_eval(self, parent_state, sibling_state, label_to_idx):
# Get the root node ID
root_id = label_to_idx['root']
# Update the parent state
emb_label = self.embedding_layers['RES'](torch.tensor([root_id], device=self.device)).view(-1, self.params['EMBEDDING_DIM'])
for i in range(self.params['NUM_LSTM_LAYERS']):
if i == 0:
parent_state[i] = self.lstms_parent[i](emb_label, parent_state[i])
else:
parent_state[i] = self.lstms_parent[i](parent_state[i-1][0], None)
# Create root node of the tree
root_node = Node(root_id, is_reserved=True, parent=None)
root_node = BeamSearchNode(root_node, root_id, 0, 'RES', parent_state, parent_state, sibling_state, True, False, None)
return root_node, parent_state
def decode_eval(self, parent_state, sibling_state, idx_to_label, label_to_idx, placeholderid_to_nameid, parent_node=None):
h_parent, c_parent = parent_state[-1]
if not None in sibling_state:
h_prev_sibling, c_prev_sibling = sibling_state[-1]
else:
# Initialize to hidden, cell of siblings to zero
h_prev_sibling = torch.zeros(
1, self.latent_dim, device=self.device)
c_prev_sibling = torch.zeros(
1, self.latent_dim, device=self.device)
parent_state_combined = torch.cat([h_parent, c_parent], dim=-1)
sibling_state_combined = torch.cat([h_prev_sibling, c_prev_sibling], dim=-1)
h_pred = torch.tanh(self.U_parent(parent_state_combined) +
self.U_sibling(sibling_state_combined))
# sibling add gate to add to parent state
sibling_mul_state = self.sigmoid(self.W_s_mul(sibling_state_combined))
sibling_add_state = torch.tanh(self.W_s_add(sibling_state_combined))
parent_state_updated = parent_state_combined + sibling_mul_state * sibling_add_state
parent_state_updated = torch.split(parent_state_updated, int(parent_state_updated.shape[-1]/2), dim=-1)
# Probability of the node having children
p_parent = self.sigmoid(self.depth_pred(h_pred))
# Probability of the node having successor children
p_sibling = self.sigmoid(self.width_pred(h_pred))
# Sample is_parent and has_sibling from predicted probability of parent/sibling
is_parent = True if p_parent >= 0.5 else False
has_sibling = True if p_sibling >= 0.5 else False
# is_parent = torch.distributions.bernoulli.Bernoulli(p_parent).sample()
# has_sibling = torch.distributions.bernoulli.Bernoulli(
# p_sibling).sample()
# TODO Look into changing AST parser such that ACESS SPECIFIER child, such as public, is not a terminal anymore or not a reserved node
# As well as ARGUMENTS that have NO children, and RETURN statement that has no children, these all do not get seen as reserved keywords here.
if is_parent or 'ACCESS_SPECIFIER' in idx_to_label[parent_node.node.token] or 'COMPOUND_STMT' in idx_to_label[parent_node.node.token] or 'CALL_EXPR' in idx_to_label[parent_node.node.token]:
node_type = 'RES'
elif 'LITERAL' in idx_to_label[parent_node.node.token]:
node_type = 'LITERAL'
elif 'TYPE' == idx_to_label[parent_node.node.token]:
node_type = 'TYPE'
else:
node_type = 'NAME'
# Node label prediction
if node_type == 'LITERAL':
predicted_label = self.label_losses[node_type].log_prob(h_pred)
else:
label_pred = self.prediction_layers[node_type](h_pred)
predicted_label = self.softmax(
label_pred
# + self.offset_parent(is_parent) + self.offset_sibling(has_sibling)
)
# TODO beam search
# predicted_label = torch.distributions.categorical.Categorical(torch.exp(predicted_label)).sample()
topk_log_prob, topk_labels = predicted_label.topk(self.params['BEAM_WIDTH'], sorted=True)
# predicted_label = torch.argmax(predicted_label, dim=-1)
nodes = []
for beam_idx in range(self.params['BEAM_WIDTH']):
new_parent_node = deepcopy(parent_node)
predicted_label = topk_labels[0][beam_idx].view(1)
node = self.build_tree(predicted_label, node_type, placeholderid_to_nameid, parent_node.node)
new_parent_node.update(node, predicted_label, topk_log_prob[0][beam_idx], node_type, parent_state, parent_state_updated, sibling_state, is_parent, has_sibling, parent_node) #BeamSearchNode(node, topk_log_prob[beam_idx])
nodes.append(new_parent_node)
# If we are done, return the root node (which contains the entire tree)
return nodes
def eval_step_sibling(self, emb_label, node, idx_to_label, label_to_idx, placeholderid_to_nameid):
if node.is_parent or not self.params['INDIV_LAYERS_VOCABS']:
# Calculate next hidden sibling state
for i in range(self.params['NUM_LSTM_LAYERS']):
if i == 0:
node.sibling_state[i] = self.lstms_sibling[i](emb_label, node.sibling_state[i])
else:
node.sibling_state[i] = self.lstms_sibling[i](node.sibling_state[i-1][0], node.sibling_state[i])
else:
# Calculate next hidden sibling state
node.sibling_state = self.leaf_lstms_sibling[node.node_type](
emb_label, node.sibling_state)
# Pass the same parent state, but updated sibling state
return self.decode_eval(node.parent_state, node.sibling_state, idx_to_label, label_to_idx,
placeholderid_to_nameid, node.parent_node)
def eval_step_parent(self, emb_label, node, idx_to_label, label_to_idx, placeholderid_to_nameid):
# We set | |
# INFO : ini merupakan copy source code dari repo one4ubot, dan sudah mendapatkan izin dari pemilik.
# INFO : This is a copy of the source code from the One4ubot repo, and has the permission of the owner.
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot module which contains afk-related commands """
import time
from datetime import datetime
from random import choice, randint
from telethon.events import StopPropagation
from userbot import (
AFKREASON,
COUNT_MSG,
CMD_HELP,
ISAFK,
BOTLOG,
BOTLOG_CHATID,
USERS,
PM_AUTO_BAN) # pylint: disable=unused-imports
from userbot.events import register
# ========================= CONSTANTS ============================
AFKSTR = [
"Gua sibuk sekarang. Tolong bicara di dalam hati dan ketika saya kembali Anda bisa memberi saya itu!",
"Saya pergi sekarang. Jika Anda butuh sesuatu, tinggalkan pesan setelah bunyi beep:\n`beeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeep`!",
"Anda merindukan saya, lain kali bidik lebih baik.",
"Saya akan kembali dalam beberapa menit dan jika tidak...,\ntunggu lebih lama.",
"Saya tidak di sini sekarang, jadi saya mungkin di tempat lain. ",
"Mawar itu merah, \nBiola itu biru, \nTinggalkan aku pesan, \nDan aku akan menghubungi kamu kembali.",
"Terkadang hal terbaik dalam hidup layak untuk ditunggu… \nAku akan segera kembali.",
"Aku akan segera kembali, \ntapi jika aku tidak segera kembali, \nAku akan kembali nanti.",
"Jika kamu belum mengetahuinya, \nAku tidak di sini.",
"Halo, selamat datang di pesan tandang saya, bagaimana saya bisa mengabaikan Anda hari ini?",
"Saya berada di 7 lautan dan 7 negara, \n7 perairan dan 7 benua, \n7 gunung dan 7 bukit, \n7 dataran dan 7 gundukan, \n7 kolam dan 7 danau, \n7 mata air dan 7 padang rumput, \n7 kota dan 7 lingkungan, \n7 blok dan 7 rumah ... \n\nDi mana bahkan pesan Anda tidak bisa sampai ke saya! ",
"Saya sedang tidak menggunakan keyboard saat ini, tetapi jika Anda akan berteriak cukup keras di layar, saya mungkin akan mendengar Anda.",
"Saya pergi ke sana \n ---->",
"Aku pergi ke sini \n <----",
"Silakan tinggalkan pesan dan buat saya merasa lebih penting daripada sebelumnya.",
"Saya tidak di sini jadi berhentilah menulis kepada saya, \nAnda juga tidak akan menemukan diri Anda dengan layar yang penuh dengan pesan Anda sendiri.",
"Jika aku ada di sini, \nAku akan memberitahumu di mana aku berada. \n\nTapi aku tidak, \njadi tanya aku kapan aku kembali ...",
"Aku pergi! \nAku tidak tahu kapan aku akan kembali! \nSemoga beberapa menit dari sekarang!",
"Saya tidak ada saat ini jadi tolong tinggalkan nama, nomor, dan alamat Anda dan saya akan menguntit Anda nanti.",
"Maaf, saya tidak di sini sekarang. \nJangan ragu untuk berbicara dengan userbot saya selama Anda suka. \nSaya akan menghubungi Anda lagi nanti.",
"Saya yakin Anda mengharapkan pesan tandang!",
"Hidup ini sangat singkat, begitu banyak hal yang harus dilakukan ... \nSaya akan melakukan salah satunya ..",
"Aku tidak di sini sekarang ... \ntapi jika aku ... \n\nbukankah itu keren?",
]
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global afk_start
global afk_end
USER_AFK = {}
afk_time = None
afk_start = {}
# =================================================================
@register(outgoing=True, pattern="^.afk(?: |$)(.*)", disable_errors=True)
async def set_afk(afk_e):
""" Untuk memungkinkan Anda memberi tahu orang-orang bahwa Anda afk saat mereka mengirimi Anda pesan, gunakan perintah .afk, """
afk_e.text
string = afk_e.pattern_match.group(1)
global ISAFK
global AFKREASON
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global afk_start
global afk_end
global reason
USER_AFK = {}
afk_time = None
afk_end = {}
start_1 = datetime.now()
afk_start = start_1.replace(microsecond=0)
if string:
AFKREASON = string
await afk_e.edit(
f"Gua AFK Bro!\
\nAlasan: `{string}`"
)
else:
await afk_e.edit("Gua AFK Bro!")
if BOTLOG:
await afk_e.client.send_message(BOTLOG_CHATID, "#AFK\nKamu Telah AFK!")
ISAFK = True
afk_time = datetime.now() # pylint:disable=E0602
raise StopPropagation
@register(outgoing=True)
async def type_afk_is_not_true(notafk):
""" This sets your status as not afk automatically when you write something while being afk """
global ISAFK
global COUNT_MSG
global USERS
global AFKREASON
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global afk_start
global afk_end
back_alive = datetime.now()
afk_end = back_alive.replace(microsecond=0)
if ISAFK:
ISAFK = False
msg = await notafk.respond("I'm no longer AFK.")
time.sleep(3)
await msg.delete()
if BOTLOG:
await notafk.client.send_message(
BOTLOG_CHATID,
"Kamu Mendapatkan Pesan "
+ str(COUNT_MSG)
+ " Pesan Dari "
+ str(len(USERS))
+ " Pesan Saat Kamu AFK",
)
for i in USERS:
if str(i).isnumeric():
name = await notafk.client.get_entity(i)
name0 = str(name.first_name)
await notafk.client.send_message(
BOTLOG_CHATID,
"[" + name0 + "](tg://user?id=" + str(i) + ")" +
" Mengirim Kamu " + "`" + str(USERS[i]) + " Pesan`",
)
else: # anon admin
await notafk.client.send_message(
BOTLOG_CHATID,
"Admin anonim di `" + i + "` mengirim kamu " + "`" +
str(USERS[i]) + " pesan`",
)
COUNT_MSG = 0
USERS = {}
AFKREASON = None
@register(incoming=True, disable_edited=True)
async def mention_afk(mention):
""" This function takes care of notifying the people who mention you that you are AFK."""
global COUNT_MSG
global USERS
global ISAFK
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global afk_start
global afk_end
back_alivee = datetime.now()
afk_end = back_alivee.replace(microsecond=0)
afk_since = "Beberapa saat yang lalu...."
if ISAFK and mention.message.mentioned:
now = datetime.now()
datime_since_afk = now - afk_time # pylint:disable=E0602
time = float(datime_since_afk.seconds)
days = time // (24 * 3600)
time = time % (24 * 3600)
hours = time // 3600
time %= 3600
minutes = time // 60
time %= 60
seconds = time
if days == 1:
afk_since = "Kemarin"
elif days > 1:
if days > 6:
date = now + datetime.timedelta(
days=-days, hours=-hours, minutes=-minutes
)
afk_since = date.strftime("%A, %Y %B %m, %H:%I")
else:
wday = now + datetime.timedelta(days=-days)
afk_since = wday.strftime("%A")
elif hours > 1:
afk_since = f"`{int(hours)}jam{int(minutes)}` lalu"
elif minutes > 0:
afk_since = f"`{int(minutes)}menit{int(seconds)}` lalu"
else:
afk_since = f"`{int(seconds)}detik` lalu"
is_bot = False
if (sender := await mention.get_sender()):
is_bot = sender.bot
if is_bot: return # ignore bot
chat_obj = await mention.client.get_entity(mention.chat_id)
chat_title = chat_obj.title
if mention.sender_id not in USERS or chat_title not in USERS:
if AFKREASON:
await mention.reply(
f"Saya AFK sejak {afk_since}.\
\nAlasan: `{AFKREASON}`"
)
else:
await mention.reply(str(choice(AFKSTR)))
if mention.sender_id is not None:
USERS.update({mention.sender_id: 1})
else:
USERS.update({chat_title: 1})
else:
if USERS[mention.sender_id] % randint(2, 4) == 0:
if AFKREASON:
await mention.reply(
f"Saya masih AFK sejak {afk_since}.\
\nAlasan: `{AFKREASON}`"
)
else:
await mention.reply(str(choice(AFKSTR)))
if mention.sender_id is not None:
USERS[mention.sender_id] += 1
else:
USERS[chat_title] += 1
COUNT_MSG += 1
@register(incoming=True, disable_errors=True)
async def afk_on_pm(sender):
""" Function which informs people that you are AFK in PM """
global ISAFK
global USERS
global COUNT_MSG
global COUNT_MSG
global USERS
global ISAFK
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global afk_start
global afk_end
back_alivee = datetime.now()
afk_end = back_alivee.replace(microsecond=0)
afk_since = "Beberapa saat yang lalu"
if (
sender.is_private
and sender.sender_id != 777000
and not (await sender.get_sender()).bot
):
if PM_AUTO_BAN:
try:
from userbot.modules.sql_helper.pm_permit_sql import is_approved
apprv = is_approved(sender.sender_id)
except AttributeError:
apprv = True
else:
apprv = True
if apprv and ISAFK:
now = datetime.now()
datime_since_afk = now - afk_time # pylint:disable=E0602
time = float(datime_since_afk.seconds)
days = time // (24 * 3600)
time = time % (24 * 3600)
hours = time // 3600
time %= 3600
minutes = time // 60
time %= 60
seconds = time
if days == 1:
afk_since = "Kemarin"
elif days > 1:
if days > 6:
date = now + datetime.timedelta(
days=-days, hours=-hours, minutes=-minutes
)
afk_since = date.strftime("%A, %Y %B %m, %H:%I")
else:
wday = now + datetime.timedelta(days=-days)
afk_since = wday.strftime("%A")
elif hours > 1:
afk_since = f"`{int(hours)}jam{int(minutes)}` lalu"
elif minutes > 0:
afk_since = f"`{int(minutes)}menit{int(seconds)}` lalu"
else:
afk_since = f"`{int(seconds)}detik` lalu"
if sender.sender_id not in USERS:
if AFKREASON:
await sender.reply(
f"Saya AFK Sejak {afk_since}.\
\nAlasan: `{AFKREASON}`"
)
else:
await sender.reply(str(choice(AFKSTR)))
USERS.update({sender.sender_id: 1})
COUNT_MSG = COUNT_MSG + 1
elif apprv and sender.sender_id in USERS:
if USERS[sender.sender_id] % randint(2, 4) == 0:
if AFKREASON:
await sender.reply(
f"Saya masih AFK sejak {afk_since}.\
\nAlasan: `{AFKREASON}`"
)
else:
await sender.reply(str(choice(AFKSTR)))
USERS[sender.sender_id] = USERS[sender.sender_id] + 1
COUNT_MSG = COUNT_MSG + 1
else:
USERS[sender.sender_id] = USERS[sender.sender_id] + 1
COUNT_MSG = COUNT_MSG + 1
CMD_HELP.update(
{
"afk": ".afk [Alasan Optional]\
\nDigunakan: Menetapkan Anda sebagai afk.\nBalasan untuk siapa saja yang tag / PM \
Anda memberi | |
<filename>src/medigan/generators.py<gh_stars>1-10
# -*- coding: utf-8 -*-
# ! /usr/bin/env python
""" Base class providing user-library interaction methods for config management, and model selection and execution.
.. codeauthor:: <NAME> <<EMAIL>>
.. codeauthor:: <NAME> <<EMAIL>>
"""
# Import python native libs
from __future__ import absolute_import
import logging
# Import library internal modules
from .config_manager import ConfigManager
from .constants import CONFIG_FILE_KEY_EXECUTION, MODEL_ID
from .model_executor import ModelExecutor
from .model_selector import ModelSelector
# Import pypi libs
class Generators:
""" `Generators` class: Contains medigan's public methods to facilitate users' automated sample generation workflows.
Parameters
----------
config_manager: ConfigManager
Provides the config dictionary, based on which `model_ids` are retrieved and models are selected and executed
model_selector: ModelSelector
Provides model comparison, search, and selection based on keys/values in the selection part of the config dict
model_executors: list
List of initialized `ModelExecutor` instances that handle model package download, init, and sample generation
initialize_all_models: bool
Flag indicating, if True, that one `ModelExecutor` for each `model_id` in the config dict should be
initialized triggered by creation of `Generators` class instance. Note that, if False, the `Generators` class
will only initialize a `ModelExecutor` on the fly when need be i.e. when the generate method for the respective
model is called.
Attributes
----------
config_manager: ConfigManager
Provides the config dictionary, based on which model_ids are retrieved and models are selected and executed
model_selector: ModelSelector
Provides model comparison, search, and selection based on keys/values in the selection part of the config dict
model_executors: list
List of initialized `ModelExecutor` instances that handle model package download, init, and sample generation
"""
def __init__(
self,
config_manager: ConfigManager = None,
model_selector: ModelSelector = None,
model_executors: list = None,
initialize_all_models: bool = False
):
if config_manager is None:
self.config_manager = ConfigManager()
logging.debug(f"Initialized ConfigManager instance: {self.config_manager}")
else:
self.config_manager = config_manager
if model_selector is None:
self.model_selector = ModelSelector(config_manager=self.config_manager)
logging.debug(f"Initialized ModelSelector instance: {self.model_selector}")
else:
self.model_selector = model_selector
if model_executors is None:
self.model_executors = []
else:
self.model_executors = model_executors
if initialize_all_models:
self.add_all_model_executors()
############################ CONFIG MANAGER METHODS ############################
def get_config_by_id(self, model_id: str, config_key: str = None) -> dict:
""" Get and return the part of the config below a `config_key` for a specific `model_id`.
The config_key parameters can be separated by a '.' (dot) to allow for retrieval of nested config keys, e.g,
'execution.generator.name'
This function calls an identically named function in a `ConfigManager` instance.
Parameters
----------
model_id: str
The generative model's unique id
config_key: str
A key of interest present in the config dict
Returns
-------
dict
a dictionary from the part of the config file corresponding to `model_id` and `config_key`.
"""
return self.config_manager.get_config_by_id(model_id=model_id, config_key=config_key)
############################ MODEL SELECTOR METHODS ############################
def get_selection_criteria_by_id(self, model_id: str, is_model_id_removed: bool = True) -> dict:
""" Get and return the selection config dict for a specific model_id.
This function calls an identically named function in a `ModelSelector` instance.
Parameters
----------
model_id: str
The generative model's unique id
is_model_id_removed: bool
flag to to remove the model_ids from first level of dictionary.
Returns
-------
dict
a dictionary corresponding to the selection config of a model
"""
return self.model_selector.get_selection_criteria_by_id(model_id=model_id)
def get_selection_criteria_by_ids(self, model_ids: list = None, are_model_ids_removed: bool = True) -> list:
""" Get and return a list of selection config dicts for each of the specified model_ids.
This function calls an identically named function in a `ModelSelector` instance.
Parameters
----------
model_ids: list
A list of generative models' unique ids
are_model_ids_removed: bool
flag to remove the model_ids from first level of dictionary.
Returns
-------
list
a list of dictionaries each corresponding to the selection config of a model
"""
return self.model_selector.get_selection_criteria_by_ids(model_ids=model_ids,
are_model_ids_removed=are_model_ids_removed)
def get_selection_values_for_key(self, key: str, model_id: str = None) -> list:
""" Get and return the value of a specified key of the selection dict in the config for a specific model_id.
The key param can contain '.' (dot) separations to allow for retrieval of nested config keys such as
'execution.generator.name'
This function calls an identically named function in a `ModelSelector` instance.
Parameters
----------
key: str
The key in the selection dict
model_id: str
The generative model's unique id
Returns
-------
list
a list of the values that correspond to the key in the selection config of the `model_id`.
"""
return self.model_selector.get_selection_values_for_key(key=key, model_id=model_id)
def get_selection_keys(self, model_id: str = None) -> list:
""" Get and return all first level keys from the selection config dict for a specific model_id.
This function calls an identically named function in a `ModelSelector` instance.
Parameters
----------
model_id: str
The generative model's unique id
Returns
-------
list
a list containing the keys as strings of the selection config of the `model_id`.
"""
return self.model_selector.get_selection_keys(model_id=model_id)
def get_models_by_key_value_pair(self, key1: str, value1: str, is_case_sensitive: bool = False) -> list:
""" Get and return a list of model_id dicts that contain the specified key value pair in their selection config.
The key param can contain '.' (dot) separations to allow for retrieval of nested config keys such as
'execution.generator.name'
This function calls an identically named function in a `ModelSelector` instance.
Parameters
----------
key1: str
The key in the selection dict
value1: str
The value in the selection dict that corresponds to key1
is_case_sensitive: bool
flag to evaluate keys and values with case sensitivity if set to True
Returns
-------
list
a list of the dictionaries each containing a models id and the found key-value pair in the models config
"""
return self.model_selector.get_models_by_key_value_pair(key1=key1, value1=value1,
is_case_sensitive=is_case_sensitive)
def rank_models_by_performance(self, model_ids: list = None, metric: str = 'SSIM', order: str = "asc") -> list:
""" Rank model based on a provided metric and return sorted list of model dicts.
The metric param can contain '.' (dot) separations to allow for retrieval of nested metric config keys such as
'downstream_task.CLF.accuracy'
This function calls an identically named function in a `ModelSelector` instance.
Parameters
----------
model_ids: list
only evaluate the `model_ids` in this list. If none, evaluate all available `model_ids`
metric: str
The key in the selection dict that corresponds to the metric of interest
order: str
the sorting order of the ranked results. Should be either "asc" (ascending) or "desc" (descending)
Returns
-------
list
a list of model dictionaries containing metric and `model_id`, sorted by metric.
"""
return self.model_selector.rank_models_by_performance(model_ids=model_ids, metric=metric, order=order)
def find_matching_models_by_values(self, values: list, target_values_operator: str = 'AND',
are_keys_also_matched: bool = False, is_case_sensitive: bool = False) -> list:
""" Search for values (and keys) in model configs and return a list of each matching `ModelMatchCandidate`.
This function calls an identically named function in a `ModelSelector` instance.
Parameters
----------
values: list
list of values used to search and find models corresponding to these `values`
target_values_operator: str
the operator indicating the relationship between `values` in the evaluation of model search results.
Should be either "AND", "OR", or "XOR".
are_keys_also_matched: bool
flag indicating whether, apart from values, the keys in the model config should also be searchable
is_case_sensitive: bool
flag indicating whether the search for values (and) keys in the model config should be case-sensitive.
Returns
-------
list
a list of `ModelMatchCandidate` class instances each of which was successfully matched against the search
values.
"""
return self.model_selector.find_matching_models_by_values(values=values,
target_values_operator=target_values_operator,
are_keys_also_matched=are_keys_also_matched,
is_case_sensitive=is_case_sensitive)
def find_models_and_rank(self, values: list, target_values_operator: str = 'AND',
are_keys_also_matched: bool = False, is_case_sensitive: bool = False,
metric: str = 'SSIM', order: str = "asc") -> list:
""" Search for values (and keys) in model configs, rank results and return sorted list of model dicts.
This function calls an identically named function in a `ModelSelector` instance.
Parameters
----------
values: list`
list of values used to search and find models corresponding to these `values`
target_values_operator: str
the operator indicating the relationship between `values` in the evaluation of model search results.
Should be either "AND", "OR", or "XOR".
are_keys_also_matched: bool
flag indicating whether, apart from values, the keys in the model config should also be searchable
is_case_sensitive: bool
flag indicating whether the search for values (and) keys in the model config should be case-sensitive.
metric: str
The key in the selection dict that corresponds to the metric of interest
order: | |
visibility.
# TODO: Let renderer actually use the visibility to decide whether to render or not.
def isVisible(self) -> bool:
if self._parent is not None and self._visible:
return self._parent.isVisible()
else:
return self._visible
## Set the visibility of this SceneNode.
def setVisible(self, visible: bool) -> None:
self._visible = visible
## \brief Get the (original) mesh data from the scene node/object.
# \returns MeshData
def getMeshData(self) -> Optional[MeshData]:
return self._mesh_data
## \brief Get the transformed mesh data from the scene node/object, based on the transformation of scene nodes wrt root.
# If this node is a group, it will recursively concatenate all child nodes/objects.
# \returns MeshData
def getMeshDataTransformed(self) -> Optional[MeshData]:
return MeshData(vertices = self.getMeshDataTransformedVertices(), normals = self.getMeshDataTransformedNormals())
## \brief Get the transformed vertices from this scene node/object, based on the transformation of scene nodes wrt root.
# If this node is a group, it will recursively concatenate all child nodes/objects.
# \return numpy.ndarray
def getMeshDataTransformedVertices(self) -> numpy.ndarray:
transformed_vertices = None
if self.callDecoration("isGroup"):
for child in self._children:
tv = child.getMeshDataTransformedVertices()
if transformed_vertices is None:
transformed_vertices = tv
else:
transformed_vertices = numpy.concatenate((transformed_vertices, tv), axis = 0)
else:
if self._mesh_data:
transformed_vertices = self._mesh_data.getTransformed(self.getWorldTransformation()).getVertices()
return transformed_vertices
## \brief Get the transformed normals from this scene node/object, based on the transformation of scene nodes wrt root.
# If this node is a group, it will recursively concatenate all child nodes/objects.
# \return numpy.ndarray
def getMeshDataTransformedNormals(self) -> numpy.ndarray:
transformed_normals = None
if self.callDecoration("isGroup"):
for child in self._children:
tv = child.getMeshDataTransformedNormals()
if transformed_normals is None:
transformed_normals = tv
else:
transformed_normals = numpy.concatenate((transformed_normals, tv), axis = 0)
else:
if self._mesh_data:
transformed_normals = self._mesh_data.getTransformed(self.getWorldTransformation()).getNormals()
return transformed_normals
## \brief Set the mesh of this node/object
# \param mesh_data MeshData object
def setMeshData(self, mesh_data: Optional[MeshData]) -> None:
self._mesh_data = mesh_data
self._resetAABB()
self.meshDataChanged.emit(self)
## Emitted whenever the attached mesh data object changes.
meshDataChanged = Signal()
def _onMeshDataChanged(self) -> None:
self.meshDataChanged.emit(self)
## \brief Add a child to this node and set it's parent as this node.
# \params scene_node SceneNode to add.
def addChild(self, scene_node: "SceneNode") -> None:
if scene_node in self._children:
return
scene_node.transformationChanged.connect(self.transformationChanged)
scene_node.childrenChanged.connect(self.childrenChanged)
scene_node.meshDataChanged.connect(self.meshDataChanged)
self._children.append(scene_node)
self._resetAABB()
self.childrenChanged.emit(self)
if not scene_node._parent is self:
scene_node._parent = self
scene_node._transformChanged()
scene_node.parentChanged.emit(self)
## \brief remove a single child
# \param child Scene node that needs to be removed.
def removeChild(self, child: "SceneNode") -> None:
if child not in self._children:
return
child.transformationChanged.disconnect(self.transformationChanged)
child.childrenChanged.disconnect(self.childrenChanged)
child.meshDataChanged.disconnect(self.meshDataChanged)
self._children.remove(child)
child._parent = None
child._transformChanged()
child.parentChanged.emit(self)
self._resetAABB()
self.childrenChanged.emit(self)
## \brief Removes all children and its children's children.
def removeAllChildren(self) -> None:
for child in self._children:
child.removeAllChildren()
self.removeChild(child)
self.childrenChanged.emit(self)
## \brief Get the list of direct children
# \returns List of children
def getChildren(self) -> List["SceneNode"]:
return self._children
def hasChildren(self) -> bool:
return True if self._children else False
## \brief Get list of all children (including it's children children children etc.)
# \returns list ALl children in this 'tree'
def getAllChildren(self) -> List["SceneNode"]:
children = []
children.extend(self._children)
for child in self._children:
children.extend(child.getAllChildren())
return children
## \brief Emitted whenever the list of children of this object or any child object changes.
# \param object The object that triggered the change.
childrenChanged = Signal()
## \brief Computes and returns the transformation from world to local space.
# \returns 4x4 transformation matrix
def getWorldTransformation(self) -> Matrix:
if self._world_transformation is None:
self._updateWorldTransformation()
return self._world_transformation.copy()
## \brief Returns the local transformation with respect to its parent. (from parent to local)
# \retuns transformation 4x4 (homogenous) matrix
def getLocalTransformation(self) -> Matrix:
if self._transformation is None:
self._updateLocalTransformation()
return self._transformation.copy()
def setTransformation(self, transformation: Matrix):
self._transformation = transformation.copy() # Make a copy to ensure we never change the given transformation
self._transformChanged()
## Get the local orientation value.
def getOrientation(self) -> Quaternion:
return deepcopy(self._orientation)
def getWorldOrientation(self) -> Quaternion:
return deepcopy(self._derived_orientation)
## \brief Rotate the scene object (and thus its children) by given amount
#
# \param rotation \type{Quaternion} A quaternion indicating the amount of rotation.
# \param transform_space The space relative to which to rotate. Can be any one of the constants in SceneNode::TransformSpace.
def rotate(self, rotation: Quaternion, transform_space: int = TransformSpace.Local) -> None:
if not self._enabled:
return
orientation_matrix = rotation.toMatrix()
if transform_space == SceneNode.TransformSpace.Local:
self._transformation.multiply(orientation_matrix)
elif transform_space == SceneNode.TransformSpace.Parent:
self._transformation.preMultiply(orientation_matrix)
elif transform_space == SceneNode.TransformSpace.World:
self._transformation.multiply(self._world_transformation.getInverse())
self._transformation.multiply(orientation_matrix)
self._transformation.multiply(self._world_transformation)
self._transformChanged()
## Set the local orientation of this scene node.
#
# \param orientation \type{Quaternion} The new orientation of this scene node.
# \param transform_space The space relative to which to rotate. Can be Local or World from SceneNode::TransformSpace.
def setOrientation(self, orientation: Quaternion, transform_space: int = TransformSpace.Local) -> None:
if not self._enabled or orientation == self._orientation:
return
if transform_space == SceneNode.TransformSpace.World:
if self.getWorldOrientation() == orientation:
return
new_orientation = orientation * (self.getWorldOrientation() * self._orientation.getInverse()).invert()
orientation_matrix = new_orientation.toMatrix()
else: # Local
orientation_matrix = orientation.toMatrix()
euler_angles = orientation_matrix.getEuler()
new_transform_matrix = Matrix()
new_transform_matrix.compose(scale = self._scale, angles = euler_angles, translate = self._position, shear = self._shear)
self._transformation = new_transform_matrix
self._transformChanged()
## Get the local scaling value.
def getScale(self) -> Vector:
return self._scale
def getWorldScale(self) -> Vector:
return self._derived_scale
## Scale the scene object (and thus its children) by given amount
#
# \param scale \type{Vector} A Vector with three scale values
# \param transform_space The space relative to which to scale. Can be any one of the constants in SceneNode::TransformSpace.
def scale(self, scale: Vector, transform_space: int = TransformSpace.Local) -> None:
if not self._enabled:
return
scale_matrix = Matrix()
scale_matrix.setByScaleVector(scale)
if transform_space == SceneNode.TransformSpace.Local:
self._transformation.multiply(scale_matrix)
elif transform_space == SceneNode.TransformSpace.Parent:
self._transformation.preMultiply(scale_matrix)
elif transform_space == SceneNode.TransformSpace.World:
self._transformation.multiply(self._world_transformation.getInverse())
self._transformation.multiply(scale_matrix)
self._transformation.multiply(self._world_transformation)
self._transformChanged()
## Set the local scale value.
#
# \param scale \type{Vector} The new scale value of the scene node.
# \param transform_space The space relative to which to rotate. Can be Local or World from SceneNode::TransformSpace.
def setScale(self, scale: Vector, transform_space: int = TransformSpace.Local) -> None:
if not self._enabled or scale == self._scale:
return
if transform_space == SceneNode.TransformSpace.Local:
self.scale(scale / self._scale, SceneNode.TransformSpace.Local)
return
if transform_space == SceneNode.TransformSpace.World:
if self.getWorldScale() == scale:
return
self.scale(scale / self._scale, SceneNode.TransformSpace.World)
## Get the local position.
def getPosition(self) -> Vector:
return self._position
## Get the position of this scene node relative to the world.
def getWorldPosition(self) -> Vector:
return self._derived_position
## Translate the scene object (and thus its children) by given amount.
#
# \param translation \type{Vector} The amount to translate by.
# \param transform_space The space relative to which to translate. Can be any one of the constants in SceneNode::TransformSpace.
def translate(self, translation: Vector, transform_space: int = TransformSpace.Local) -> None:
if not self._enabled:
return
translation_matrix = Matrix()
translation_matrix.setByTranslation(translation)
if transform_space == SceneNode.TransformSpace.Local:
self._transformation.multiply(translation_matrix)
elif transform_space == SceneNode.TransformSpace.Parent:
self._transformation.preMultiply(translation_matrix)
elif transform_space == SceneNode.TransformSpace.World:
world_transformation = self._world_transformation.copy()
self._transformation.multiply(self._world_transformation.getInverse())
self._transformation.multiply(translation_matrix)
self._transformation.multiply(world_transformation)
self._transformChanged()
## Set the local position value.
#
# \param position The new position value of the SceneNode.
# \param transform_space The space relative to which to rotate. Can be Local or World from SceneNode::TransformSpace.
def setPosition(self, position: Vector, transform_space: int = TransformSpace.Local) -> None:
if not self._enabled or position == self._position:
return
if transform_space == SceneNode.TransformSpace.Local:
self.translate(position - self._position, SceneNode.TransformSpace.Parent)
if transform_space == SceneNode.TransformSpace.World:
if self.getWorldPosition() == position:
return
self.translate(position - self._derived_position, SceneNode.TransformSpace.World)
## Signal. Emitted whenever the transformation of this object or any child object changes.
# \param object The object that caused the change.
transformationChanged = Signal()
## Rotate this scene node in such a way that it is looking at target.
#
# \param target \type{Vector} The target to look at.
# \param up \type{Vector} The vector to consider up. Defaults to Vector.Unit_Y, i.e. (0, 1, 0).
def lookAt(self, target: Vector, up: Vector = Vector.Unit_Y) -> None:
if not self._enabled:
return
eye = self.getWorldPosition()
f = (target - eye).normalized()
up = up.normalized()
s = f.cross(up).normalized()
u = s.cross(f).normalized()
m = Matrix([
[ s.x, u.x, -f.x, 0.0],
[ s.y, u.y, -f.y, 0.0],
[ s.z, u.z, -f.z, 0.0],
[ 0.0, 0.0, 0.0, 1.0]
])
self.setOrientation(Quaternion.fromMatrix(m))
## Can be overridden by child nodes if they need to perform special rendering.
# If | |
import ctypes
import numpy
from numpy.ctypeslib import ndpointer
import pkg_resources
import enum
import os
import platform
import sys
import struct
import json
from typing import List, Set, Dict, Tuple
from nptyping import NDArray, Float64
from brainflow.exit_codes import BrainflowExitCodes
class BoardIds(enum.IntEnum):
"""Enum to store all supported Board Ids"""
PLAYBACK_FILE_BOARD = -3 #:
STREAMING_BOARD = -2 #:
SYNTHETIC_BOARD = -1 #:
CYTON_BOARD = 0 #:
GANGLION_BOARD = 1 #:
CYTON_DAISY_BOARD = 2 #:
GALEA_BOARD = 3 #:
GANGLION_WIFI_BOARD = 4 #:
CYTON_WIFI_BOARD = 5 #:
CYTON_DAISY_WIFI_BOARD = 6 #:
BRAINBIT_BOARD = 7 #:
UNICORN_BOARD = 8 #:
CALLIBRI_EEG_BOARD = 9 #:
CALLIBRI_EMG_BOARD = 10 #:
CALLIBRI_ECG_BOARD = 11 #:
FASCIA_BOARD = 12 #:
NOTION_OSC_BOARD = 13 #:
NOTION_1_BOARD = 13 #:
NOTION_2_BOARD = 14 #:
IRONBCI_BOARD = 15 #:
GFORCE_PRO_BOARD = 16 #:
FREEEEG32_BOARD = 17 #:
class LogLevels(enum.IntEnum):
"""Enum to store all log levels supported by BrainFlow"""
LEVEL_TRACE = 0 #:
LEVEL_DEBUG = 1 #:
LEVEL_INFO = 2 #:
LEVEL_WARN = 3 #:
LEVEL_ERROR = 4 #:
LEVEL_CRITICAL = 5 #:
LEVEL_OFF = 6 #:
class IpProtocolType(enum.IntEnum):
"""Enum to store Ip Protocol types"""
NONE = 0 #:
UDP = 1 #:
TCP = 2 #:
class BrainFlowInputParams(object):
""" inputs parameters for prepare_session method
:param serial_port: serial port name is used for boards which reads data from serial port
:type serial_port: str
:param mac_address: mac address for example its used for bluetooth based boards
:type mac_address: str
:param ip_address: ip address is used for boards which reads data from socket connection
:type ip_address: str
:param ip_port: ip port for socket connection, for some boards where we know it in front you dont need this parameter
:type ip_port: int
:param ip_protocol: ip protocol type from IpProtocolType enum
:type ip_protocol: int
:param other_info: other info
:type other_info: str
:param serial_number: serial number
:type serial_number: str
:param file: file
:type file: str
"""
def __init__(self) -> None:
self.serial_port = ''
self.mac_address = ''
self.ip_address = ''
self.ip_port = 0
self.ip_protocol = IpProtocolType.NONE.value
self.other_info = ''
self.timeout = 0
self.serial_number = ''
self.file = ''
def to_json(self) -> None:
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
class BrainFlowError(Exception):
"""This exception is raised if non-zero exit code is returned from C code
:param message: exception message
:type message: str
:param exit_code: exit code flow low level API
:type exit_code: int
"""
def __init__(self, message: str, exit_code: int) -> None:
detailed_message = '%s:%d %s' % (BrainflowExitCodes(exit_code).name, exit_code, message)
super(BrainFlowError, self).__init__(detailed_message)
self.exit_code = exit_code
class BoardControllerDLL(object):
__instance = None
@classmethod
def get_instance(cls):
if cls.__instance is None:
cls.__instance = cls()
return cls.__instance
def __init__(self):
if platform.system() == 'Windows':
if struct.calcsize("P") * 8 == 64:
dll_path = 'lib\\BoardController.dll'
else:
dll_path = 'lib\\BoardController32.dll'
elif platform.system() == 'Darwin':
dll_path = 'lib/libBoardController.dylib'
else:
dll_path = 'lib/libBoardController.so'
full_path = pkg_resources.resource_filename(__name__, dll_path)
if os.path.isfile(full_path):
# for python we load dll by direct path but this dll may depend on other dlls and they will not be found!
# to solve it we can load all of them before loading the main one or change PATH\LD_LIBRARY_PATH env var.
# env variable looks better, since it can be done only once for all dependencies
dir_path = os.path.abspath(os.path.dirname(full_path))
if platform.system() == 'Windows':
os.environ['PATH'] = dir_path + os.pathsep + os.environ.get('PATH', '')
else:
os.environ['LD_LIBRARY_PATH'] = dir_path + os.pathsep + os.environ.get('LD_LIBRARY_PATH', '')
self.lib = ctypes.cdll.LoadLibrary(full_path)
else:
raise FileNotFoundError(
'Dynamic library %s is missed, did you forget to compile brainflow before installation of python package?' % full_path)
self.prepare_session = self.lib.prepare_session
self.prepare_session.restype = ctypes.c_int
self.prepare_session.argtypes = [
ctypes.c_int,
ctypes.c_char_p
]
self.is_prepared = self.lib.is_prepared
self.is_prepared.restype = ctypes.c_int
self.is_prepared.argtypes = [
ndpointer(ctypes.c_int32),
ctypes.c_int,
ctypes.c_char_p
]
self.start_stream = self.lib.start_stream
self.start_stream.restype = ctypes.c_int
self.start_stream.argtypes = [
ctypes.c_int,
ctypes.c_char_p,
ctypes.c_int,
ctypes.c_char_p
]
self.stop_stream = self.lib.stop_stream
self.stop_stream.restype = ctypes.c_int
self.stop_stream.argtypes = [
ctypes.c_int,
ctypes.c_char_p
]
self.get_current_board_data = self.lib.get_current_board_data
self.get_current_board_data.restype = ctypes.c_int
self.get_current_board_data.argtypes = [
ctypes.c_int,
ndpointer(ctypes.c_double),
ndpointer(ctypes.c_int32),
ctypes.c_int,
ctypes.c_char_p
]
self.get_board_data = self.lib.get_board_data
self.get_board_data.restype = ctypes.c_int
self.get_board_data.argtypes = [
ctypes.c_int,
ndpointer(ctypes.c_double),
ctypes.c_int,
ctypes.c_char_p
]
self.release_session = self.lib.release_session
self.release_session.restype = ctypes.c_int
self.release_session.argtypes = [
ctypes.c_int,
ctypes.c_char_p
]
self.get_board_data_count = self.lib.get_board_data_count
self.get_board_data_count.restype = ctypes.c_int
self.get_board_data_count.argtypes = [
ndpointer(ctypes.c_int32),
ctypes.c_int,
ctypes.c_char_p
]
self.set_log_level = self.lib.set_log_level
self.set_log_level.restype = ctypes.c_int
self.set_log_level.argtypes = [
ctypes.c_int
]
self.set_log_file = self.lib.set_log_file
self.set_log_file.restype = ctypes.c_int
self.set_log_file.argtypes = [
ctypes.c_char_p
]
self.log_message = self.lib.log_message
self.log_message.restype = ctypes.c_int
self.log_message.argtypes = [
ctypes.c_int,
ctypes.c_char_p
]
self.config_board = self.lib.config_board
self.config_board.restype = ctypes.c_int
self.config_board.argtypes = [
ctypes.c_char_p,
ndpointer(ctypes.c_ubyte),
ndpointer(ctypes.c_int32),
ctypes.c_int,
ctypes.c_char_p
]
self.get_sampling_rate = self.lib.get_sampling_rate
self.get_sampling_rate.restype = ctypes.c_int
self.get_sampling_rate.argtypes = [
ctypes.c_int,
ndpointer(ctypes.c_int32)
]
self.get_battery_channel = self.lib.get_battery_channel
self.get_battery_channel.restype = ctypes.c_int
self.get_battery_channel.argtypes = [
ctypes.c_int,
ndpointer(ctypes.c_int32)
]
self.get_package_num_channel = self.lib.get_package_num_channel
self.get_package_num_channel.restype = ctypes.c_int
self.get_package_num_channel.argtypes = [
ctypes.c_int,
ndpointer(ctypes.c_int32)
]
self.get_timestamp_channel = self.lib.get_timestamp_channel
self.get_timestamp_channel.restype = ctypes.c_int
self.get_timestamp_channel.argtypes = [
ctypes.c_int,
ndpointer(ctypes.c_int32)
]
self.get_num_rows = self.lib.get_num_rows
self.get_num_rows.restype = ctypes.c_int
self.get_num_rows.argtypes = [
ctypes.c_int,
ndpointer(ctypes.c_int32)
]
self.get_eeg_names = self.lib.get_eeg_names
self.get_eeg_names.restype = ctypes.c_int
self.get_eeg_names.argtypes = [
ctypes.c_int,
ndpointer(ctypes.c_ubyte),
ndpointer(ctypes.c_int32)
]
self.get_device_name = self.lib.get_device_name
self.get_device_name.restype = ctypes.c_int
self.get_device_name.argtypes = [
ctypes.c_int,
ndpointer(ctypes.c_ubyte),
ndpointer(ctypes.c_int32)
]
self.get_eeg_channels = self.lib.get_eeg_channels
self.get_eeg_channels.restype = ctypes.c_int
self.get_eeg_channels.argtypes = [
ctypes.c_int,
ndpointer(ctypes.c_int32),
ndpointer(ctypes.c_int32)
]
self.get_exg_channels = self.lib.get_exg_channels
self.get_exg_channels.restype = ctypes.c_int
self.get_exg_channels.argtypes = [
ctypes.c_int,
ndpointer(ctypes.c_int32),
ndpointer(ctypes.c_int32)
]
self.get_emg_channels = self.lib.get_emg_channels
self.get_emg_channels.restype = ctypes.c_int
self.get_emg_channels.argtypes = [
ctypes.c_int,
ndpointer(ctypes.c_int32),
ndpointer(ctypes.c_int32)
]
self.get_ecg_channels = self.lib.get_ecg_channels
self.get_ecg_channels.restype = ctypes.c_int
self.get_ecg_channels.argtypes = [
ctypes.c_int,
ndpointer(ctypes.c_int32),
ndpointer(ctypes.c_int32)
]
self.get_eog_channels = self.lib.get_eog_channels
self.get_eog_channels.restype = ctypes.c_int
self.get_eog_channels.argtypes = [
ctypes.c_int,
ndpointer(ctypes.c_int32),
ndpointer(ctypes.c_int32)
]
self.get_ppg_channels = self.lib.get_ppg_channels
self.get_ppg_channels.restype = ctypes.c_int
self.get_ppg_channels.argtypes = [
ctypes.c_int,
ndpointer(ctypes.c_int32),
ndpointer(ctypes.c_int32)
]
self.get_eda_channels = self.lib.get_eda_channels
self.get_eda_channels.restype = ctypes.c_int
self.get_eda_channels.argtypes = [
ctypes.c_int,
ndpointer(ctypes.c_int32),
ndpointer(ctypes.c_int32)
]
self.get_accel_channels = self.lib.get_accel_channels
self.get_accel_channels.restype = ctypes.c_int
self.get_accel_channels.argtypes = [
ctypes.c_int,
ndpointer(ctypes.c_int32),
ndpointer(ctypes.c_int32)
]
self.get_analog_channels = self.lib.get_analog_channels
self.get_analog_channels.restype = ctypes.c_int
self.get_analog_channels.argtypes = [
ctypes.c_int,
ndpointer(ctypes.c_int32),
ndpointer(ctypes.c_int32)
]
self.get_gyro_channels = self.lib.get_gyro_channels
self.get_gyro_channels.restype = ctypes.c_int
self.get_gyro_channels.argtypes = [
ctypes.c_int,
ndpointer(ctypes.c_int32),
ndpointer(ctypes.c_int32)
]
self.get_other_channels = self.lib.get_other_channels
self.get_other_channels.restype = ctypes.c_int
self.get_other_channels.argtypes = [
ctypes.c_int,
ndpointer(ctypes.c_int32),
ndpointer(ctypes.c_int32)
]
self.get_temperature_channels = self.lib.get_temperature_channels
self.get_temperature_channels.restype = ctypes.c_int
self.get_temperature_channels.argtypes = [
ctypes.c_int,
ndpointer(ctypes.c_int32),
ndpointer(ctypes.c_int32)
]
self.get_resistance_channels = self.lib.get_resistance_channels
self.get_resistance_channels.restype = ctypes.c_int
self.get_resistance_channels.argtypes = [
ctypes.c_int,
ndpointer(ctypes.c_int32),
ndpointer(ctypes.c_int32)
]
class BoardShim(object):
"""BoardShim class is a primary interface to all boards
:param board_id: Id of your board
:type board_id: int
:param input_params: board specific structure to pass required arguments
:type input_params: BrainFlowInputParams
"""
def __init__(self, board_id: int, input_params: BrainFlowInputParams) -> None:
try:
self.input_json = input_params.to_json().encode()
except:
self.input_json = input_params.to_json()
self.board_id = board_id
# we need it for streaming board
if board_id == BoardIds.STREAMING_BOARD.value or board_id == BoardIds.PLAYBACK_FILE_BOARD.value:
try:
self._master_board_id = int(input_params.other_info)
except:
raise BrainFlowError('set master board id using params.other_info',
BrainflowExitCodes.INVALID_ARGUMENTS_ERROR.value)
else:
self._master_board_id = self.board_id
@classmethod
def set_log_level(cls, log_level: int) -> None:
"""set BrainFlow log level, use it only if you want to write your own messages to BrainFlow logger,
otherwise use enable_board_logger, enable_dev_board_logger or disable_board_logger
:param log_level: log level, to specify it you should use values from LogLevels enum
:type log_level: int
"""
res = BoardControllerDLL.get_instance().set_log_level(log_level)
if res != BrainflowExitCodes.STATUS_OK.value:
raise BrainFlowError('unable to enable logger', res)
@classmethod
def enable_board_logger(cls) -> None:
"""enable BrainFlow Logger with level INFO, uses stderr for log messages by default"""
cls.set_log_level(LogLevels.LEVEL_INFO.value)
@classmethod
def disable_board_logger(cls) -> None:
"""disable BrainFlow Logger"""
cls.set_log_level(LogLevels.LEVEL_OFF.value)
@classmethod
def enable_dev_board_logger(cls) -> None:
"""enable BrainFlow Logger with level TRACE, uses stderr for log messages by default"""
cls.set_log_level(LogLevels.LEVEL_TRACE.value)
@classmethod
def log_message(cls, log_level: int, message: str) -> None:
"""write your own log message to BrainFlow logger, use it if you wanna have single logger for your own code and BrainFlow's code
:param log_level: log level
:type log_file: int
:param message: message
:type message: str
"""
try:
msg = message.encode()
except:
msg = message
res = BoardControllerDLL.get_instance().log_message(log_level, msg)
if res != BrainflowExitCodes.STATUS_OK.value:
raise BrainFlowError('unable to write log message', res)
@classmethod
def set_log_file(cls, log_file: str) -> None:
"""redirect logger from stderr to file, can be called any time
:param log_file: log file name
:type log_file: str
"""
try:
file = log_file.encode()
except:
file = log_file
res = BoardControllerDLL.get_instance().set_log_file(file)
if res != BrainflowExitCodes.STATUS_OK.value:
raise BrainFlowError('unable to redirect logs to a file', res)
@classmethod
def get_sampling_rate(cls, board_id: int) -> int:
"""get sampling rate for a board
| |
import math
from itertools import combinations
from collections import namedtuple, defaultdict
import cvxpy as cvx
import numpy as np
import numpy.linalg as LA
import scipy.optimize
import matplotlib.pyplot as plt
PI2 = math.pi * 2
MAX_ANGLE = PI2 / 8 / 10
cmap = plt.get_cmap("brg")
def vp_from_lines(x1, y1, x2, y2):
x, y = cvx.Variable(), cvx.Variable()
assert len(x1) == len(x2) == len(y1) == len(y2)
objective = cvx.sum(
cvx.abs(cvx.multiply(x2 - x1, y1 - y) - cvx.multiply(y2 - y1, x1 - x))
)
problem = cvx.Problem(cvx.Minimize(objective), [])
problem.solve()
return x.value, y.value
def estimate_intrinsic_from_depth(vertices, line):
edges = defaultdict(list)
for v0, v1 in line:
edges[v0].append(v1)
edges[v1].append(v0)
for v in edges.copy():
if len(edges[v]) == 1:
del edges[v]
def objective(x):
inv2f = np.array([x[0], x[1], 1])
o = 0
for v0 in edges:
for v1, v2 in combinations(edges[v0], 2):
dv1 = vertices[v1] - vertices[v0]
dv2 = vertices[v2] - vertices[v0]
o += abs((inv2f * dv1) @ dv2)
return o
inv2f = scipy.optimize.minimize(
objective, np.array([1, 1]), options={"disp": True}
).x
assert (inv2f > 0).all()
f = (1 / inv2f) ** 0.5
return np.array([[f[0], 0, 0], [0, f[1], 0], [0, 0, 1]])
def estimate_intrinsic_from_vp(vp1, vp2, vp3):
def objective(x):
inv2f = np.array([x[0], x[0], 1])
o = 0
o += abs((inv2f * vp1) @ vp2)
o += abs((inv2f * vp2) @ vp3)
o += abs((inv2f * vp3) @ vp1)
return o
inv2f = scipy.optimize.minimize(objective, np.array([1]), method="COBYLA").x
if inv2f[0] < 0:
return np.array([[0, 0, 0], [0, 0, 0], [0, 0, 1]]), objective(inv2f)
f = (1 / inv2f) ** 0.5
return np.array([[f[0], 0, 0], [0, f[0], 0], [0, 0, 1]]), objective(inv2f)
def estimate_3intrinsic_from_vp(vp1, vp2, vp3):
def objective(x):
S = np.array([[1, 0, -x[0]], [0, 1, -x[1]], [-x[0], -x[1], x[2]]])
o = (vp1 @ S @ vp2) ** 2 + (vp2 @ S @ vp3) ** 2 + (vp3 @ S @ vp1) ** 2
return o
S = scipy.optimize.minimize(objective, np.array([1, 1, 1]), method="COBYLA").x
ox, oy = S[0], S[1]
assert S[2] - ox ** 2 - oy ** 2 > 0
f = math.sqrt(S[2] - ox ** 2 - oy ** 2)
return np.array([[f, 0, ox], [0, f, oy], [0, 0, 1]]), objective(S)
def to_world(junctions, juncdepth, lines, K):
vertices = np.c_[junctions, np.ones(len(junctions))]
vertices *= juncdepth[:, None]
vertices = vertices @ LA.inv(K).T
return (
vertices,
np.array(
[
[K[0, 0], 0, K[0, 2], 0],
[0, K[1, 1], K[1, 2], 0],
[0, 0, 0, -1],
[0, 0, 1, 0],
],
dtype=np.float32,
),
)
def vanish_point_clustering(junctions, lines):
junctions_ = np.concatenate([junctions, np.ones([junctions.shape[0], 1])], axis=1)
normals, weights = [], []
for i1, i2 in lines:
n = np.cross(junctions_[i1], junctions_[i2])
n = n / LA.norm(n)
normals.append(n)
weights.append(LA.norm(junctions[i1] - junctions[i2]))
normals, weights = np.array(normals), np.array(weights)
weights /= np.amax(weights)
clusters = []
for i, (n1, (i1, i2)) in enumerate(zip(normals, lines)):
for j, (n2, (j1, j2)) in enumerate(zip(normals[:i], lines[:i])):
w = np.cross(n1, n2)
if LA.norm(w) < 1e-4:
continue
w /= LA.norm(w)
theta = np.abs(np.arcsin((w[None, :] * normals).sum(axis=1)))
c = np.where(theta < PI2 / 8 / 15)[0]
intersected = False
if not intersected and len(c) >= 7:
clusters.append((w, c))
# plt.scatter(w[0] / w[2], w[1] / w[2])
# for l in c:
# v1, v2 = junctions[lines[l][0]], junctions[lines[l][1]]
# plt.plot([v1[0], v2[0]], [v1[1], v2[1]])
# plt.show()
vp = []
while True:
w0, c = max(clusters, key=lambda x: len(x[1]) + weights[x[1]].sum())
sc = set(c)
w = np.zeros(3)
weight = 0
for l1 in c:
for l2 in c:
if l1 == l2:
continue
w_ = np.cross(normals[l1], normals[l2])
if w_ @ w > 0:
w += w_
else:
w -= w_
weight += LA.norm(w_)
w /= weight
vp.append((w, c))
clusters = [
(ww, list(set(cc) - sc)) for ww, cc in clusters if len(set(cc) - sc) >= 7
]
if len(clusters) == 0:
break
# plt.figure(), plt.tight_layout()
# for i, (w, c) in enumerate(vp):
# plt.xlim([-5, 5])
# plt.ylim([-5, 5])
# plt.scatter(w[0] / w[2], w[1] / w[2], c=cmap(i / len(vp)))
# for l in c:
# v1, v2 = junctions[lines[l][0]], junctions[lines[l][1]]
# plt.plot([v1[0], v2[0]], [v1[1], v2[1]], c=cmap(i / len(vp)))
# plt.show()
return vp
def vanish_point_clustering2(junctions, lines):
"""Improved version of orthogonal vanish point clustering"""
junctions_ = np.concatenate([junctions, np.ones([junctions.shape[0], 1])], axis=1)
normals, weights = [], []
def nearby_lines(w):
theta = np.abs(np.arcsin((w[None, :] * normals).sum(axis=1)))
return np.where(theta < MAX_ANGLE)[0]
for i1, i2 in lines:
n = np.cross(junctions_[i1], junctions_[i2])
n = n / LA.norm(n)
normals.append(n)
weights.append(LA.norm(junctions[i1] - junctions[i2]))
normals, weights = np.array(normals), np.array(weights)
weights /= np.amax(weights)
candidates = set([i for i in range(len(lines)) if weights[i] > 0.05])
clusters = []
for i, j in combinations(candidates, 2):
w = np.cross(normals[i], normals[j])
if LA.norm(w) < 1e-4:
continue
w /= LA.norm(w)
line_candidates = set(nearby_lines(w)) & candidates
if len(line_candidates) > 3:
w = np.zeros(3)
for p, q in combinations(line_candidates, 2):
wp = np.cross(normals[p], normals[q])
w += wp if wp @ w > 0 else -wp
w /= LA.norm(w)
if all(math.acos(abs(w @ wp * 0.99)) > 2 * MAX_ANGLE for wp, _ in clusters):
clusters.append((w, line_candidates))
tbd = set()
for i, j in combinations(range(len(clusters)), 2):
if i in tbd or j in tbd:
continue
c1 = clusters[i][1]
c2 = clusters[j][1]
if c1 >= c2:
tbd.add(j)
elif c1 <= c2:
tbd.add(i)
adj = defaultdict(list)
for lineid in candidates:
v1, v2 = lines[lineid]
adj[v1].append(lineid)
adj[v2].append(lineid)
for i in range(len(clusters)):
if i in tbd:
continue
for ls in adj.values():
count = 0
for l in ls:
if l in clusters[i][1]:
count += 1
if count > 1:
tbd.add(i)
break
clusters = [clusters[i] for i in range(len(clusters)) if i not in tbd]
print("Len of clusters", len(clusters), clusters)
# for i, (w, c) in enumerate(clusters):
# plt.figure()
# plt.xlim([-1, 1])
# plt.ylim([-1, 1])
# plt.scatter(w[0] / w[2], w[1] / w[2], c=cmap(i / len(clusters)))
# for l in c:
# v1, v2 = junctions[lines[l][0]], junctions[lines[l][1]]
# plt.plot([v1[0], v2[0]], [v1[1], v2[1]])
W = []
if len(clusters) < 3:
W = [c for c, _ in clusters]
else:
best_cost = 1e8
best_coverage = 0
for (w1, c1), (w2, c2), (w3, c3) in combinations(clusters, 3):
if max(len(c1 & c2), len(c2 & c3), len(c3 & c1)) > 2:
continue
coverage = len(c1 | c2 | c3)
if coverage < best_coverage - 1:
continue
K, cost = estimate_intrinsic_from_vp(w1, w2, w3)
if K is None:
continue
if 1.8 <= K[0, 0] < 4 and cost < best_cost:
W = [w1, w2, w3]
best_cost = cost
best_coverage = coverage
print(K, cost)
vp = [(w, set()) for w in W]
for i in sorted(list(candidates)):
best = MAX_ANGLE * 5
bestc = None
for w, c in vp:
degree = abs(math.asin(w @ normals[i]))
if degree < best:
best = degree
bestc = c
if bestc is not None:
bestc.add(i)
# plt.figure(), plt.tight_layout()
# for i, (w, c) in enumerate(vp):
# plt.xlim([-1, 1])
# plt.ylim([-1, 1])
# plt.scatter(w[0] / w[2], w[1] / w[2], c=cmap(i / len(vp)))
# for l in c:
# v1, v2 = junctions[lines[l][0]], junctions[lines[l][1]]
# plt.plot([v1[0], v2[0]], [v1[1], v2[1]], c=cmap(i / len(vp)))
# plt.show()
return vp
def vanish_point_refine(junctions, lines, vps, blacklist=[], total_iter=4, plot=False):
vps = vps[:, :2] / vps[:, 2:]
assignment = [[], [], []]
for niter in range(total_iter):
for i in range(3):
if len(assignment[i]) <= 1:
continue
assignment[i].sort(key=lambda x: x[1])
c = assignment[i][: math.ceil(len(assignment[i]) * 0.6)]
c = [i for i, score in c]
x1, y1 = junctions[lines[c, 0], :].T
x2, y2 = junctions[lines[c, 1], :].T
vps[i] = vp_from_lines(x1, y1, x2, y2)
assignment = [[], [], []]
for i, (a, b) in enumerate(lines):
if i in blacklist:
continue
bestd = 1e100
v = junctions[a] - junctions[b]
for j in range(3):
dist = abs(np.cross(v, vps[j] - junctions[b]))
if dist < bestd:
bestd = dist
bestv = v
bestj = j
assignment[bestj].append((i, bestd / LA.norm(bestv)))
if plot:
plt.figure(), plt.tight_layout()
for i, (c, vp) in enumerate(zip(assignment, vps)):
plt.xlim([-1, 1])
plt.ylim([-1, 1])
plt.scatter(vp[0], vp[1], c=cmap(i / len(vp)))
for l, _ in c:
v1, v2 = junctions[lines[l][0]], junctions[lines[l][1]]
plt.plot([v1[0], v2[0]], [v1[1], v2[1]], c=cmap(i / len(vp)))
plt.show()
vps = np.c_[vps, np.ones(3)]
for i in range(3):
vps[i] /= LA.norm(vps[i])
return [(vps[i], [i for i, score in assignment[i]]) for i in range(3)]
def | |
"""
"MooseGesture 0.1" a mouse gestures recognition library.
<NAME> <EMAIL>
http://coffeeghost.net/2011/05/09/moosegesture-python-mouse-gestures-module
Usage:
import moosegesture
gesture = moosegesture.getGesture(points)
Where "points" is a list of x, y coordinate tuples, e.g. [(100, 200), (1234, 5678), ...]
getGesture returns a list of integers for the recognized mouse gesture. The integers
correspond to the 8 cardinal and diagonal directions:
up-left up up-right
7 8 9
left 4 6 right
1 2 3
down-left down down-right
Second usage:
strokes = [2, 4, 6]
gestures = [[2, 4, 2], [2, 6, 9]]
gesture = moosegesture.findClosestMatchingGesture(strokes, gestures)
gesture == [2, 4, 2]
Where "strokes" is a list of the directional integers that are returned from
getGesture(). This returns the closest resembling gesture from the list of
gestures that is passed to the function.
The optional "tolerance" parameter can ensure that the "closest" identified
gesture isn't too different.
Explanation of the nomenclature in this module:
A "point" is a 2D tuple of x, y values. These values can be ints or floats,
MooseGesture supports both.
A "point pair" is a point and its immediately subsequent point, i.e. two
points that are next to each other.
A "segment" is two or more ordered points forming a series of lines.
A "stroke" is a segment going in a single direction (one of the 8 cardinal or
diagonal directions: up, upright, left, etc.)
A "gesture" is one or more strokes in a specific pattern, e.g. up then right
then down then left.
# Copyright (c) 2011, <NAME>
# All rights reserved.
#
# BSD-style license:
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the MooseGesture nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY <NAME> "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Al Sweigart BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from math import sqrt
from sys import maxsize
# This is the minimum distance the mouse must travel (in pixels) before a
# segment will be considered for stroke interpretation.
_MIN_SEG_LEN = 60
# The integers-to-directions mapping matches the keypad:
# 7 8 9
# 4 6
# 1 2 3
DOWNLEFT = 1
DOWN = 2
DOWNRIGHT = 3
LEFT = 4
RIGHT = 6
UPLEFT = 7
UP = 8
UPRIGHT = 9
_strokesStrings = {1:'DL', 2:'D', 3:'DR', 4:'L', 6:'R', 7:'UL', 8:'U', 9:'UR'}
def getGesture(points):
# Returns a gesture as a list of directional integers, i.e. [2,6,4] for
# the down-left-right gesture.
#
# The points param is a list of tuples of XY points that make up the user's
# mouse gesture.
return _identifyStrokes(points)[0]
def getSegments(points):
# Returns a list of tuples of integers. The tuples are the start and end
# indexes of the points that make up a consistent stroke.
return _identifyStrokes(points)[1]
def getGestureAndSegments(points):
# Returns a list of tuples. The first item in the tuple is the directional
# integer, and the second item is a tuple of integers for the start and end
# indexes of the points that make up the stroke.
strokes, strokeSegments = _identifyStrokes(points)
return list(zip(strokes, strokeSegments))
def getGestureStr(strokes):
# Returns a string of space-delimited text characters that represent the
# strokes passed in. For example, getGesture([2, 6, 4]) returns "D R L".
#
# The strokes parameter is a list of directional integers, like the kind
# returned by getGesture().
if len(strokes) and type(strokes[0]) == type(0):
# points is a list of directional integers, returned from getGesture()
return ' '.join(_strokesStrings[x] for x in strokes)
else:
# points is returned from getGestureAndSegments()
return ' '.join(_strokesStrings[x] for x in _identifyStrokes(strokes)[0])
def findClosestMatchingGesture(strokes, gestureList, tolerance=maxsize):
# Returns the gesture in gestureList that closest matches the gesture in
# strokes. The tolerance is how many differences there can be and still
# be considered a match.
if len(gestureList) == 0:
return None
strokes = ''.join(strokes)
gestureList = [''.join(x) for x in gestureList]
gestureList = list(frozenset(gestureList)) # make a unique list
distances = {}
for g in gestureList:
dist = levenshteinDistance(strokes, g)
if dist in distances:
distances[dist].append(g)
else:
distances[dist] = [g]
smallestKey = min(distances.keys())
if len(distances[smallestKey]) == 1 and smallestKey <= tolerance:
return [int(x) for x in distances[min(distances.keys())]]
else:
return None
def levenshteinDistance(s1, s2):
# Returns the Levenshtein Distance between two strings as an integer.
# http://en.wikipedia.org/wiki/Levenshtein_distance
# The Levenshtein Distance (aka edit distance) is how many changes (i.e.
# insertions, deletions, substitutions) have to be made to convert one
# string into another.
#
# For example, the Levenshtein distance between "kitten" and "sitting" is
# 3, since the following three edits change one into the other, and there
# is no way to do it with fewer than three edits:
# kitten -> sitten -> sittin -> sitting
len1 = len(s1)
len2 = len(s2)
matrix = list(range(len1 + 1)) * (len2 + 1)
for i in range(len2 + 1):
matrix[i] = list(range(i, i + len1 + 1))
for i in range(len2):
for j in range(len1):
if s1[j] == s2[i]:
matrix[i+1][j+1] = min(matrix[i+1][j] + 1, matrix[i][j+1] + 1, matrix[i][j])
else:
matrix[i+1][j+1] = min(matrix[i+1][j] + 1, matrix[i][j+1] + 1, matrix[i][j] + 1)
return matrix[len2][len1]
def setMinStrokeLen(val):
# Set the length (in pixels) a stroke must be to be recognized as a stroke.
_MIN_SEG_LEN = val
def getMinStrokeLen():
# Get the minimum segment length.
return _MIN_SEG_LEN
# Private Functions:
def _identifyStrokes(points):
strokes = []
strokeSegments = []
# calculate lengths between each sequential points
distances = []
for i in range(len(points)-1):
distances.append( _distance(points[i], points[i+1]) )
# keeps getting points until we go past the min. segment length
#startSegPoint = 0
#while startSegPoint < len(points)-1:
for startSegPoint in range(len(points)-1):
segmentDist = 0
curDir = None
consistent = True
direction = None
for curSegPoint in range(startSegPoint, len(points)-1):
segmentDist += distances[curSegPoint]
if segmentDist >= _MIN_SEG_LEN:
# check if all points are going the same direction.
for i in range(startSegPoint, curSegPoint):
direction = _getDir(points[i], points[i+1])
if curDir is None:
curDir = direction
elif direction != curDir:
consistent = False
break
break
if not consistent:
continue
elif (direction is not None and ( (not len(strokes)) or (len(strokes) and strokes[-1] != direction) )):
strokes.append(direction)
strokeSegments.append( [startSegPoint, curSegPoint] )
elif len(strokeSegments):
# update and lengthen the latest stroke since this stroke is being lengthened.
strokeSegments[-1][1] = curSegPoint
return strokes, strokeSegments
def _getDir(coord1, coord2):
# Return the integer of one of the 8 directions this line is going in.
# coord1 and coord2 are (x, y) integers coordinates.
x1, y1 = coord1
x2, y2 = coord2
if x1 == x2 and y1 == y2:
return None # two coordinates are the same.
elif x1 == x2 and y1 > y2:
return UP
elif x1 == x2 and y1 < y2:
return DOWN
elif x1 > x2 and y1 == y2:
return LEFT
elif x1 < x2 and y1 == y2:
return RIGHT
slope = float(y2 - y1) / float(x2 - x1)
# Figure out which quadrant the line is going in, and then
# determine the closest direction by calculating the slope
if x2 > x1 and y2 < y1: # up right quadrant
if slope > -0.4142:
return RIGHT | |
from __future__ import print_function
import os
import re
import subprocess
import io
import threading
import shlex
import sys
import shutil
import filecmp
from optparse import OptionParser
import stat
import glob
import platform
import zipfile
from xml.dom.minidom import parse
import contextlib
def abort(msg):
"""Print to stderr and stop with exit 1"""
#print >> sys.stderr, "\n", msg, "\nSetup is not complete\n"
print("\n", msg, "\nSetup is not complete\n", file=sys.stderr)
sys.exit(1)
def getProperties(fileName, needed):
"""Read properties files and check that the properties in the needed list are present"""
if not os.path.exists(fileName):
abort (fileName + " file not found")
p = re.compile(r"")
f = open(fileName)
props = {}
for line in f:
line = line.strip()
if line and not line.startswith("#") and not line.startswith("!"):
nfirst = len(line)
for sep in [r"\s*=\s*", r"\s*:\s*", r"\s+"]:
match = re.search(sep, line)
if match and match.start() < nfirst:
nfirst = match.start()
nlast = match.end()
if nfirst == len(line):
key = line
value = ""
else:
key = line[:nfirst]
value = line[nlast:]
props[key] = value
f.close()
for item in needed:
if (item not in props):
abort(item + " must be specified in " + fileName)
return props
def getActions(file_name=None, required=[], binDir=False, appDir=False):
if not os.path.exists ("setup"): abort ("This must be run from the unpacked distribution directory")
parser = OptionParser("usage: %prog [options] configure | install | uninstall")
try:
root = os.getuid() == 0
except: # Probably windows
root = 1
if binDir:
if root: default = '/usr/bin'
else: default = '~/bin'
parser.add_option("--binDir", "-b", help="location to store executables [" + default + "]", default=default)
if appDir:
if root: default = '/usr/share'
else: default = '~/java'
parser.add_option("--appDir", "-a", help="location to store java applications [" + default + "]", default=default)
parser.add_option("--verbose", "-v", help="produce more output - this may appear twice to get even more", action="count")
options, args = parser.parse_args()
if len(args) != 1:abort("Must have one argument: install' or 'uninstall'")
arg = args[0].upper()
if arg not in ["CONFIGURE", "INSTALL", "UNINSTALL"]: abort("Must have one argument: 'install' or 'uninstall'")
if binDir and not os.path.isdir(os.path.expanduser(options.binDir)): abort("Please create directory " + options.binDir + " or specify --binDir")
if appDir and not os.path.isdir(os.path.expanduser(options.appDir)): abort("Please create directory " + options.appDir + " or specify --appDir")
if file_name:
if not os.path.exists(file_name):
shutil.copy(file_name + ".example", file_name)
if platform.system() != "Windows": os.chmod(file_name, stat.S_IRUSR | stat.S_IWUSR)
abort ("\nPlease edit " + file_name + " to meet your requirements then re-run the command")
if os.stat(file_name).st_mode & stat.S_IROTH:
if platform.system() == "Windows":
print("Warning: '" + file_name + "' should not be world readable")
else:
os.chmod(file_name, stat.S_IRUSR | stat.S_IWUSR)
print("'" + file_name + "' mode changed to 0600")
props = getProperties(file_name, required + ["secure", "home", "container", "port"])
if props["secure"].lower() == "true": secure = True
elif props["secure"].lower() == "false": secure = False
else : abort ("Secure must be true or false")
if "db.vendor" in props:
abort("db.vendor should no longer be specified in " + file_name + " - consider setting db.target")
container = props["container"]
if container == "Glassfish": actions = GlassfishActions(props, options)
elif container == "JBoss": actions = WildflyActions(props, options)
else : abort ("container must be Glassfish or JBoss")
else:
props = {"secure":"NA"}
actions = Actions(props, options)
return actions, arg, props
class Actions(object):
def __init__(self, props, options):
self.verbosity = options.verbose or 0
self.secure = props["secure"].lower() == "true"
try: self.binDir = os.path.expanduser(options.binDir)
except: pass
try: self.appDir = os.path.expanduser(options.appDir)
except: pass
self.clashes = 0
def _fix_persistence_xml(self, container, target, logging):
f = os.path.join("unzipped", "WEB-INF", "classes", "META-INF", "persistence.xml")
if os.path.exists(f):
with open(f) as fi:
doc = parse(fi)
for prop in doc.getElementsByTagName("property"):
if prop.getAttribute("name") == "eclipselink.target-server":
prop.setAttribute("value", container)
if target and prop.getAttribute("name") == "eclipselink.target-database":
prop.setAttribute("value", target)
if prop.getAttribute("name") == "eclipselink.logging.level":
if logging:
prop.setAttribute("value", logging)
else:
prop.setAttribute("value", "OFF")
if prop.getAttribute("name") == "eclipselink.logging.level.sql":
if logging:
prop.setAttribute("value", logging)
else:
prop.setAttribute("value", "OFF")
if prop.getAttribute("name") == "eclipselink.logging.parameters":
if logging:
prop.setAttribute("value", "true")
else:
prop.setAttribute("value", "false")
with open(f, "w") as fi:
fi.write(doc.toxml())
def _zip(self, war):
z = zipfile.ZipFile("zip", "w")
for dirName, subdirList, fileList in os.walk("unzipped"):
shortd = dirName[9:]
for fname in fileList:
z.write(os.path.join(dirName, fname), os.path.join(shortd, fname))
z.close()
if platform.system() == "Windows": os.remove(war)
os.rename("zip", war)
shutil.rmtree("unzipped")
if self.verbosity:
print("\nConverted ", war)
def _unzip(self):
if os.path.exists("unzipped"):
shutil.rmtree("unzipped")
files = glob.glob("*.war")
if len(files) != 1: abort("Exactly one war file must be present")
war = files[0]
with contextlib.closing(zipfile.ZipFile(war)) as z: z.extractall("unzipped")
return war
def restartApp(self, appName):
self.disableApp(appName)
self.enableApp(appName)
def getBinDir(self):
return self.binDir
def execute(self, cmd):
if platform.system() == "Windows":
cmd = cmd.split()
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
cmd = shlex.split(cmd)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stringOut = io.StringIO()
mstdout = Tee(proc.stdout, stringOut)
mstdout.start()
stringErr = io.StringIO()
mstderr = Tee(proc.stderr, stringErr)
mstderr.start()
rc = proc.wait()
mstdout.join()
mstderr.join()
out = stringOut.getvalue().strip()
stringOut.close()
err = stringErr.getvalue().strip()
stringErr.close()
return out, err, rc
def configure(self, file_name, expected):
if not os.path.exists(file_name):
shutil.copy(file_name + ".example", file_name)
print("\nCopied " + file_name + ".example" + " to " + file_name)
print("Please edit", file_name, "to meet your requirements")
abort("... and then re-run the command")
props = getProperties(file_name, [])
example = getProperties(file_name + ".example", [])
for key in expected:
prop = props.get(key)
if not prop:
self.clashes += 1
print("Error: property", key, "is not set in", file_name)
if self.verbosity > 1:
for key in props.keys():
if key in example:
if props[key] != example[key]: print("\nValue for" , key, "in", file_name, "is", "'" + props[key] + "'", "which differs from example:", "'" + example[key] + "'")
else: print("\nValue for" , key, "in", file_name, "is", "'" + props[key] + "'", "is not in example")
for key in example.keys():
if key not in props: print("\nValue for" , key, "not in", file_name, "but is in example:", "'" + example[key] + "'")
def configureFileForWar(self, f):
if not os.path.exists(f):
shutil.copy(f + ".example", f)
print("\nCopied", f + ".example", "to", f)
print("Please edit", f, "to meet your requirements")
self.clashes += 1
def checkNoErrors(self):
if self.clashes:
if self.clashes == 1:
abort("Please edit configuration file and try again as an error was reported.")
else:
abort("Please edit configuration files and try again as " + str(self.clashes) + " errors were reported.")
def installFile(self, file, dir):
if not os.path.isdir(dir): abort ("Please create directory " + dir + " to install " + file)
if not os.path.exists(file): abort (file + " not found")
shutil.copy(file , dir)
if self.verbosity:
print("\n", file, "copied to", dir)
def removeFile(self, file, dir):
dest = os.path.join(dir, file)
if os.path.exists(dest):
os.remove(dest)
if self.verbosity:
print("\n", file, "removed from", dir)
def installDir(self, file, dir):
if not os.path.isdir(dir): abort ("Please create directory " + dir + " to install " + file)
if not os.path.exists(file): abort (file + " not found")
if not os.path.isdir(file): abort (file + " is not a directory")
dest = os.path.join(dir, file)
if os.path.exists(dest): shutil.rmtree(dest)
shutil.copytree(file , dest)
if self.verbosity:
print("\n", file, "copied to", dir)
def removeDir(self, file, dir):
dest = os.path.join(dir, file)
if os.path.exists(dest):
shutil.rmtree(dest)
if self.verbosity:
print("\n", directory, "removed from", dir)
class WildflyActions(Actions):
def __init__(self, props, options):
super(WildflyActions, self).__init__(props, options)
wildfly = props["home"]
if not os.path.exists(wildfly): abort("wildfly directory " + wildfly + " does not exist")
self.cliCommand = os.path.join(wildfly, "bin", "jboss-cli.sh -c")
cmd = self.cliCommand + " --version"
out, err, rc = self.execute(cmd)
if rc: abort(out + err)
version = "Unknown!!!"
for line in out.splitlines():
if line.startswith("JBoss AS product"): version = line[18:]
if self.verbosity: print("You are using", version)
def enableApp(self, appName):
self._cli("deploy --name=" + appName)
def disableApp(self, appName):
self._cli("undeploy " + appName + " --keep-content", tolerant=True, printOutput=True)
def getAppName(self, app):
cmd = self.cliCommand + " " + "'ls deployment'"
out, err, rc = self.execute(cmd)
if rc: abort(err)
for line in out.splitlines():
if (line.startswith(app + "-")):
return line
def _cli(self, command, tolerant=False, printOutput=False):
cmd = self.cliCommand + " '" + command + "'"
if self.verbosity: print("\nexecute: " + cmd )
out, err, rc = self.execute(cmd)
if self.verbosity > 1 or | |
db_refs={'HGNC': '7989'},
bound_conditions=[BoundCondition(gtp1, True)])
nrasgtp2 = Agent('NRAS', db_refs={'HGNC': '7989'},
bound_conditions=[BoundCondition(gtp2, True)])
st1 = Phosphorylation(src, nrasgtp1, 'tyrosine', '32')
st2 = Phosphorylation(src, nrasgtp2, 'tyrosine', '32')
# The top-level list should contain only one statement, the more specific
# modification, supported by the less-specific modification.
pa = Preassembler(bio_ontology, stmts=[st1, st2])
stmts = pa.combine_related()
assert len(stmts) == 1
assert stmts[0].equals(st2)
assert len(stmts[0].supported_by) == 1
assert stmts[0].supported_by[0].equals(st1)
def test_complex_refinement():
ras = Agent('RAS')
raf = Agent('RAF')
mek = Agent('MEK')
st1 = Complex([ras, raf])
st2 = Complex([mek, ras, raf])
pa = Preassembler(bio_ontology, stmts=[st1, st2])
pa.combine_related()
assert len(pa.unique_stmts) == 2
assert len(pa.related_stmts) == 2
def test_complex_agent_refinement():
ras = Agent('RAS')
raf1 = Agent('RAF', mods=[ModCondition('ubiquitination', None, None, True)])
raf2 = Agent('RAF', mods=[ModCondition('ubiquitination', None, None, False)])
st1 = Complex([ras, raf1])
st2 = Complex([ras, raf2])
pa = Preassembler(bio_ontology, stmts=[st1, st2])
pa.combine_related()
assert len(pa.unique_stmts) == 2
assert len(pa.related_stmts) == 2
def test_mod_sites_refinement():
"""A statement with more specific modification context should be supported
by a less-specific statement."""
# TODO
assert True
def test_binding_site_refinement():
"""A statement with information about a binding site for an interaction
between two proteins should be supported by a statement without this
information."""
# TODO
assert True
def test_activating_substitution_refinement():
"""Should only be refinement if entities are a refinement and all
fields match."""
mc1 = MutCondition('12', 'G', 'D')
mc2 = MutCondition('61', 'Q', 'L')
nras1 = Agent('NRAS', mutations=[mc1], db_refs={'HGNC': '7989'})
nras2 = Agent('NRAS', mutations=[mc2], db_refs={'HGNC': '7989'})
ras = Agent('RAS', mutations=[mc1], db_refs={'FPLX': 'RAS'})
st1 = ActiveForm(ras, 'gtpbound', True,
evidence=Evidence(text='bar'))
st2 = ActiveForm(nras1, 'gtpbound', True,
evidence=Evidence(text='foo'))
st3 = ActiveForm(nras2, 'gtpbound', True,
evidence=Evidence(text='bar'))
st4 = ActiveForm(nras1, 'phosphatase', True,
evidence=Evidence(text='bar'))
st5 = ActiveForm(nras1, 'gtpbound', False,
evidence=Evidence(text='bar'))
assert st2.refinement_of(st1, bio_ontology)
assert not st3.refinement_of(st1, bio_ontology)
assert not st4.refinement_of(st1, bio_ontology)
assert not st5.refinement_of(st1, bio_ontology)
assert not st1.refinement_of(st2, bio_ontology)
assert not st3.refinement_of(st2, bio_ontology)
assert not st4.refinement_of(st2, bio_ontology)
assert not st5.refinement_of(st2, bio_ontology)
assert not st1.refinement_of(st3, bio_ontology)
assert not st2.refinement_of(st3, bio_ontology)
assert not st4.refinement_of(st3, bio_ontology)
assert not st5.refinement_of(st3, bio_ontology)
assert not st1.refinement_of(st4, bio_ontology)
assert not st2.refinement_of(st4, bio_ontology)
assert not st3.refinement_of(st4, bio_ontology)
assert not st5.refinement_of(st4, bio_ontology)
assert not st1.refinement_of(st5, bio_ontology)
assert not st2.refinement_of(st5, bio_ontology)
assert not st3.refinement_of(st5, bio_ontology)
assert not st4.refinement_of(st5, bio_ontology)
def test_translocation():
st1 = Translocation(Agent('AKT'), None, None)
st2 = Translocation(Agent('AKT'), None, 'plasma membrane')
st3 = Translocation(Agent('AKT'), None, 'nucleus')
pa = Preassembler(bio_ontology, stmts=[st1, st2, st3])
pa.combine_related()
assert len(pa.related_stmts) == 2, pa.related_stmts
def test_grounding_aggregation():
braf1 = Agent('BRAF', db_refs={'TEXT': 'braf', 'HGNC': '1097'})
braf2 = Agent('BRAF', db_refs={'TEXT': 'BRAF'})
braf3 = Agent('BRAF', db_refs={'TEXT': 'Braf', 'UP': 'P15056'})
braf4 = Agent('BRAF', db_refs={'TEXT': 'B-raf', 'UP': 'P15056',
'HGNC': '1097'})
st1 = Phosphorylation(None, braf1)
st2 = Phosphorylation(None, braf2)
st3 = Phosphorylation(None, braf3)
st4 = Phosphorylation(None, braf4)
pa = Preassembler(bio_ontology, stmts=[st1, st2, st3, st4])
unique_stmts = pa.combine_duplicates()
assert len(unique_stmts) == 3, unique_stmts
def test_grounding_aggregation_complex():
mek = Agent('MEK')
braf1 = Agent('BRAF', db_refs={'TEXT': 'braf', 'HGNC': '1097'})
braf2 = Agent('BRAF', db_refs={'TEXT': 'BRAF', 'dummy': 'dummy'})
braf3 = Agent('BRAF', db_refs={'TEXT': 'Braf', 'UP': 'P15056'})
st1 = Complex([mek, braf1])
st2 = Complex([braf2, mek])
st3 = Complex([mek, braf3])
pa = Preassembler(bio_ontology, stmts=[st1, st2, st3])
unique_stmts = pa.combine_duplicates()
assert len(unique_stmts) == 3, unique_stmts
def test_render_stmt_graph():
braf = Agent('BRAF', db_refs={'HGNC': '1097'})
mek1 = Agent('MAP2K1', db_refs={'HGNC': '6840'})
mek = Agent('MEK', db_refs={'FPLX':'MEK'})
# Statements
p0 = Phosphorylation(braf, mek)
p1 = Phosphorylation(braf, mek1)
p2 = Phosphorylation(braf, mek1, position='218')
p3 = Phosphorylation(braf, mek1, position='222')
p4 = Phosphorylation(braf, mek1, 'serine')
p5 = Phosphorylation(braf, mek1, 'serine', '218')
p6 = Phosphorylation(braf, mek1, 'serine', '222')
stmts = [p0, p1, p2, p3, p4, p5, p6]
pa = Preassembler(bio_ontology, stmts=stmts)
pa.combine_related()
graph = render_stmt_graph(pa.related_stmts, reduce=False)
# One node for each statement
assert len(graph.nodes()) == 7
# Edges:
# p0 supports p1-p6 = 6 edges
# p1 supports p2-p6 = 5 edges
# p2 supports p5 = 1 edge
# p3 supports p6 = 1 edge
# p4 supports p5-p6 = 2 edges
# (p5 and p6 support none--they are top-level)
# 6 + 5 + 1 + 1 + 2 = 15 edges
assert len(graph.edges()) == 15
def test_flatten_evidence_hierarchy():
braf = Agent('BRAF')
mek = Agent('MAP2K1')
st1 = Phosphorylation(braf, mek, evidence=[Evidence(text='foo')])
st2 = Phosphorylation(braf, mek, 'S', '218',
evidence=[Evidence(text='bar')])
pa = Preassembler(bio_ontology, stmts=[st1, st2])
pa.combine_related()
assert len(pa.related_stmts) == 1
flattened = flatten_evidence(pa.related_stmts)
assert len(flattened) == 1
top_stmt = flattened[0]
assert len(top_stmt.evidence) == 2
assert 'bar' in [e.text for e in top_stmt.evidence]
assert 'foo' in [e.text for e in top_stmt.evidence]
assert len(top_stmt.supported_by) == 1
supporting_stmt = top_stmt.supported_by[0]
assert len(supporting_stmt.evidence) == 1
assert supporting_stmt.evidence[0].text == 'foo'
supporting_stmt.evidence[0].text = 'changed_foo'
assert supporting_stmt.evidence[0].text == 'changed_foo'
assert 'changed_foo' not in [e.text for e in top_stmt.evidence]
assert 'foo' in [e.text for e in top_stmt.evidence]
assert {ev.annotations.get('support_type') for ev in top_stmt.evidence} \
== {'direct', 'supported_by'}
def test_flatten_evidence_multilevel():
braf = Agent('BRAF')
mek = Agent('MAP2K1')
st1 = Phosphorylation(braf, mek, evidence=[Evidence(text='foo')])
st2 = Phosphorylation(braf, mek, 'S',
evidence=[Evidence(text='bar')])
st3 = Phosphorylation(braf, mek, 'S', '218',
evidence=[Evidence(text='baz')])
pa = Preassembler(bio_ontology, stmts=[st1, st2, st3])
pa.combine_related()
assert len(pa.related_stmts) == 1
flattened = flatten_evidence(pa.related_stmts)
assert len(flattened) == 1
top_stmt = flattened[0]
assert len(top_stmt.evidence) == 3, len(top_stmt.evidence)
anns = [ev.annotations['support_type'] for ev in top_stmt.evidence]
assert anns.count('direct') == 1
assert anns.count('supported_by') == 2
def test_flatten_evidence_hierarchy_supports():
braf = Agent('BRAF')
mek = Agent('MAP2K1')
st1 = Phosphorylation(braf, mek, evidence=[Evidence(text='foo')])
st2 = Phosphorylation(braf, mek, 'S', '218',
evidence=[Evidence(text='bar')])
pa = Preassembler(bio_ontology, stmts=[st1, st2])
pa_stmts = pa.combine_related(return_toplevel=False)
assert len(pa_stmts) == 2
flattened = flatten_evidence(pa_stmts, collect_from='supports')
assert len(flattened) == 2
top_stmt = flattened[1]
assert len(top_stmt.evidence) == 1
assert 'bar' in [e.text for e in top_stmt.evidence]
assert len(top_stmt.supported_by) == 1
supporting_stmt = top_stmt.supported_by[0]
assert len(supporting_stmt.evidence) == 2
assert set([e.text for e in supporting_stmt.evidence]) == {'foo', 'bar'}
def test_flatten_stmts():
st1 = Phosphorylation(Agent('MAP3K5'), Agent('RAF1'), 'S', '338')
st2 = Phosphorylation(None, Agent('RAF1'), 'S', '338')
st3 = Phosphorylation(None, Agent('RAF1'))
st4 = Phosphorylation(Agent('PAK1'), Agent('RAF1'), 'S', '338')
st5 = Phosphorylation(None, Agent('RAF1'), evidence=Evidence(text='foo'))
pa = Preassembler(bio_ontology, stmts=[st1, st2, st3, st4, st5])
pa.combine_duplicates()
pa.combine_related()
assert len(pa.related_stmts) == 2
assert len(flatten_stmts(pa.unique_stmts)) == 4
assert len(flatten_stmts(pa.related_stmts)) == 4
def test_complex_refinement_order():
st1 = Complex([Agent('MED23'), Agent('ELK1')])
st2 = Complex([Agent('ELK1', mods=[ModCondition('phosphorylation')]),
Agent('MED23')])
pa = Preassembler(bio_ontology, stmts=[st1, st2])
pa.combine_duplicates()
pa.combine_related()
assert len(pa.related_stmts) == 1
def test_activation_refinement():
subj = Agent('alcohol', db_refs={'CHEBI': 'CHEBI:16236',
'HMDB': 'HMDB00108',
'PUBCHEM': '702',
'TEXT': 'alcohol'})
obj = Agent('endotoxin', db_refs={'TEXT': 'endotoxin'})
st1 = Inhibition(subj, obj)
st2 = Activation(subj, obj)
pa = Preassembler(bio_ontology, stmts=[st1, st2])
pa.combine_duplicates()
assert len(pa.unique_stmts) == 2
pa.combine_related()
assert len(pa.related_stmts) == 2
def test_homodimer_refinement():
egfr = Agent('EGFR')
erbb = Agent('ERBB2')
st1 = Complex([erbb, erbb])
st2 = Complex([erbb, egfr])
pa = Preassembler(bio_ontology, stmts=[st1, st2])
pa.combine_duplicates()
assert len(pa.unique_stmts) == 2
pa.combine_related()
assert len(pa.related_stmts) == 2
def test_return_toplevel():
src = Agent('SRC', db_refs={'HGNC': '11283'})
nras = Agent('NRAS', db_refs={'HGNC': '7989'})
st1 = Phosphorylation(src, nras, 'tyrosine', '32')
st2 = Phosphorylation(src, nras)
pa = Preassembler(bio_ontology, stmts=[st1, st2])
stmts = pa.combine_related(return_toplevel=True)
assert len(stmts) == 1
assert len(stmts[0].supported_by) == 1
assert len(stmts[0].supported_by[0].supports) == 1
stmts = pa.combine_related(return_toplevel=False)
assert len(stmts) == 2
ix = 1 if stmts[0].residue else 0
assert len(stmts[1-ix].supported_by) == 1
assert len(stmts[1-ix].supported_by[0].supports) == 1
assert len(stmts[ix].supports) == 1
assert len(stmts[ix].supports[0].supported_by) == 1
def test_conversion_refinement():
ras = Agent('RAS', db_refs={'FPLX': 'RAS'})
hras = Agent('HRAS', db_refs={'HGNC': '5173'})
gtp = Agent('GTP')
gdp = Agent('GDP')
st1 = Conversion(ras, gtp, gdp)
st2 = Conversion(hras, gtp, gdp)
st3 = Conversion(hras, [gtp, gdp], gdp)
st4 = Conversion(hras, [gdp, gtp], gdp)
pa = Preassembler(bio_ontology, stmts=[st1, st2, st3, st4])
toplevel_stmts = pa.combine_related()
assert len(toplevel_stmts) == 2
@unittest.skipUnless(has_indra_world, 'indra_world not available')
def test_influence_duplicate():
gov = 'wm/concept/causal_factor/social_and_political/government'
agr = 'wm/concept/causal_factor/agriculture/crop_production'
cgov = Event(Concept('government', db_refs={'WM': [(gov, 1.0)]}))
cagr = Event(Concept('agriculture', db_refs={'WM': [(agr, 1.0)]}))
print(cgov.matches_key())
stmt1 = Influence(cgov, cagr, evidence=[Evidence(source_api='eidos1')])
stmt2 = Influence(cagr, cgov, evidence=[Evidence(source_api='eidos2')])
stmt3 = Influence(cgov, cagr, evidence=[Evidence(source_api='eidos3')])
pa = Preassembler(world_ontology, [stmt1, stmt2, stmt3])
unique_stmts = pa.combine_duplicates()
unique_stmts = sorted(unique_stmts, key=lambda x: len(x.evidence))
assert len(unique_stmts) == 2
assert len(unique_stmts[0].evidence) == 1
assert len(unique_stmts[1].evidence) == 2, unique_stmts
sources = [e.source_api for e in unique_stmts[1].evidence]
assert set(sources) == {'eidos1', 'eidos3'}
@unittest.skipUnless(has_indra_world, 'indra_world not available')
def test_influence_refinement():
tran = 'wm/concept/causal_factor/access/infrastructure_access/'\
'transportation'
ship = 'wm/concept/causal_factor/access/infrastructure_access/' \
'transportation/shipping'
agr = 'wm/concept/causal_factor/economic_and_commerce/' \
'economic_activity/livelihood'
ctran = Event(Concept('transportation', db_refs={'WM': [(tran, 1.0)]}))
cship = Event(Concept('trucking', db_refs={'WM': [(ship, 1.0)]}))
cagr = Event(Concept('agriculture', db_refs={'WM': [(agr, 1.0)]}))
stmt1 = Influence(ctran, cagr, evidence=[Evidence(source_api='eidos1')])
stmt2 = Influence(cship, cagr, evidence=[Evidence(source_api='eidos2')])
stmt3 = Influence(cagr, ctran, evidence=[Evidence(source_api='eidos3')])
pa = Preassembler(world_ontology, [stmt1, stmt2, stmt3])
rel_stmts = pa.combine_related()
assert len(rel_stmts) == 2, rel_stmts
truck_stmt = [st for st in rel_stmts if st.subj.concept.name | |
#
# Copyright 2021-2022 <NAME>
# 2021 <NAME>
#
# ### MIT license
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import asyncio
import logging
import os
import shutil
import traceback
import urllib.parse
from functools import reduce
from gi.repository import Gio, GLib, Gtk, GtkSource
import dtoolcore.utils
from dtool_info.utils import sizeof_fmt
import dtool_lookup_api.core.config
from dtool_lookup_api.core.LookupClient import ConfigurationBasedLookupClient
# As of dtool-lookup-api 0.5.0, the following line still is a necessity to
# disable prompting for credentials on the command line. This behavior
# will change in future versions.
dtool_lookup_api.core.config.Config.interactive = False
from ..models.base_uris import all, LocalBaseURIModel
from ..models.datasets import DatasetModel
from ..models.settings import settings
from ..utils.copy_manager import CopyManager
from ..utils.date import date_to_string
from ..utils.dependency_graph import DependencyGraph
from ..utils.logging import FormattedSingleMessageGtkInfoBarHandler, DefaultFilter, _log_nested
from ..utils.query import (is_valid_query, dump_single_line_query_text)
from ..utils.subprocess import launch_default_app_for_uri
from ..widgets.base_uri_list_box import LOOKUP_BASE_URI
from ..widgets.base_uri_row import DtoolBaseURIRow
from ..widgets.search_popover import DtoolSearchPopover
from ..widgets.search_results_row import DtoolSearchResultsRow
from .dataset_name_dialog import DatasetNameDialog
from .about_dialog import AboutDialog
from .settings_dialog import SettingsDialog
from .log_window import LogWindow
_logger = logging.getLogger(__name__)
def _fill_manifest_tree_store(store, manifest, parent=None):
nodes = {}
store.clear()
def find_or_create_parent_node(path, top_parent):
if not path:
return top_parent
try:
return nodes[path]
except KeyError:
head, tail = os.path.split(path)
parent = find_or_create_parent_node(head, top_parent)
new_node = store.append(parent, [tail, '', '', ''])
nodes[path] = new_node
return new_node
for uuid, values in sorted(manifest, key=lambda kv: kv[1]['relpath']):
head, tail = os.path.split(values['relpath'])
store.append(find_or_create_parent_node(head, parent),
[tail,
sizeof_fmt(values['size_in_bytes']).strip(),
f'{date_to_string(values["utc_timestamp"])}',
uuid])
@Gtk.Template(filename=f'{os.path.dirname(__file__)}/main_window.ui')
class MainWindow(Gtk.ApplicationWindow):
__gtype_name__ = 'DtoolMainWindow'
_max_nb_datasets = 100
create_dataset_button = Gtk.Template.Child()
menu_button = Gtk.Template.Child()
search_entry = Gtk.Template.Child()
#copy_dataset_spinner = Gtk.Template.Child()
base_uri_list_box = Gtk.Template.Child()
dataset_list_box = Gtk.Template.Child()
main_stack = Gtk.Template.Child()
main_paned = Gtk.Template.Child()
main_label = Gtk.Template.Child()
main_spinner = Gtk.Template.Child()
dataset_stack = Gtk.Template.Child()
dataset_box = Gtk.Template.Child()
dataset_label = Gtk.Template.Child()
uuid_label = Gtk.Template.Child()
uri_label = Gtk.Template.Child()
name_label = Gtk.Template.Child()
created_by_label = Gtk.Template.Child()
frozen_at_label = Gtk.Template.Child()
size_label = Gtk.Template.Child()
show_button = Gtk.Template.Child()
add_items_button = Gtk.Template.Child()
freeze_button = Gtk.Template.Child()
copy_button = Gtk.Template.Child()
progress_revealer = Gtk.Template.Child()
progress_button = Gtk.Template.Child()
progress_popover = Gtk.Template.Child()
edit_readme_switch = Gtk.Template.Child()
save_metadata_button = Gtk.Template.Child()
dependency_stack = Gtk.Template.Child()
dependency_view = Gtk.Template.Child()
dependency_spinner = Gtk.Template.Child()
dependency_graph_widget = Gtk.Template.Child()
readme_source_view = Gtk.Template.Child()
readme_spinner = Gtk.Template.Child()
readme_stack = Gtk.Template.Child()
readme_view = Gtk.Template.Child()
manifest_spinner = Gtk.Template.Child()
manifest_stack = Gtk.Template.Child()
manifest_tree_view = Gtk.Template.Child()
manifest_tree_store = Gtk.Template.Child()
manifest_view = Gtk.Template.Child()
settings_button = Gtk.Template.Child()
error_bar = Gtk.Template.Child()
error_label = Gtk.Template.Child()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.application = self.get_application()
self.main_stack.set_visible_child(self.main_label)
self.dataset_stack.set_visible_child(self.dataset_label)
self.readme_buffer = self.readme_source_view.get_buffer()
lang_manager = GtkSource.LanguageManager()
self.readme_buffer.set_language(lang_manager.get_language("yaml"))
self.readme_buffer.set_highlight_syntax(True)
self.readme_buffer.set_highlight_matching_brackets(True)
self.error_bar.set_revealed(False)
self.progress_revealer.set_reveal_child(False)
# connect log handler to error bar
root_logger = logging.getLogger()
self.log_handler = FormattedSingleMessageGtkInfoBarHandler(info_bar=self.error_bar, label=self.error_label)
# exclude unwanted log messages from being displayed in error bar
self.log_handler.addFilter(DefaultFilter())
root_logger.addHandler(self.log_handler)
# connect a search popover with search entry
self.search_popover = DtoolSearchPopover(search_entry=self.search_entry)
self.log_window = LogWindow(application=self.application)
self.settings_dialog = SettingsDialog(application=self.application)
self.about_dialog = AboutDialog(application=self.application)
# window-scoped actions
# search action
search_text_variant = GLib.Variant.new_string("dummy")
search_action = Gio.SimpleAction.new("search", search_text_variant.get_type())
search_action.connect("activate", self.do_search)
self.add_action(search_action)
# select row by row index in dataset list box action
row_index_variant = GLib.Variant.new_uint32(0)
select_dataset_action = Gio.SimpleAction.new("select-dataset", row_index_variant.get_type())
select_dataset_action.connect("activate", self.do_select_dataset_row_by_row_index)
self.add_action(select_dataset_action)
# select row by uri in dataset list box action
uri_variant = GLib.Variant.new_string('dummy')
select_dataset_by_uri_action = Gio.SimpleAction.new("select-dataset-by-uri", uri_variant.get_type())
select_dataset_by_uri_action.connect("activate", self.do_select_dataset_row_by_uri)
self.add_action(select_dataset_by_uri_action)
# show details of dataset by row index in dataset list box action
row_index_variant = GLib.Variant.new_uint32(0)
show_dataset_action = Gio.SimpleAction.new("show-dataset", row_index_variant.get_type())
show_dataset_action.connect("activate", self.do_show_dataset_details_by_row_index)
self.add_action(show_dataset_action)
# show details of dataset by uri in dataset list box action
uri_variant = GLib.Variant.new_string("dummy")
show_dataset_by_uri_action = Gio.SimpleAction.new("show-dataset-by-uri", uri_variant.get_type())
show_dataset_by_uri_action.connect("activate", self.do_show_dataset_details_by_uri)
self.add_action(show_dataset_by_uri_action)
# search, select and show first search result subsequently
row_index_variant = GLib.Variant.new_string("dummy")
search_select_show_action = Gio.SimpleAction.new("search-select-show", row_index_variant.get_type())
search_select_show_action.connect("activate", self.do_search_select_and_show)
self.add_action(search_select_show_action)
# get item
dest_file_variant = GLib.Variant.new_string("dummy")
get_item_action = Gio.SimpleAction.new("get-item", dest_file_variant.get_type())
get_item_action.connect("activate", self.do_get_item)
self.add_action(get_item_action)
# refresh view
refresh_view_action = Gio.SimpleAction.new("refresh-view")
refresh_view_action.connect("activate", self.do_refresh_view)
self.add_action(refresh_view_action)
self.dependency_graph_widget.search_by_uuid = self._search_by_uuid
self._copy_manager = CopyManager(self.progress_revealer, self.progress_popover)
_logger.debug(f"Constructed main window for app '{self.application.get_application_id()}'")
# utility methods
def refresh(self):
"""Refresh view."""
dataset_row = self.dataset_list_box.get_selected_row()
dataset_uri = None
if dataset_row is not None:
dataset_uri = dataset_row.dataset.uri
_logger.debug(f"Keep '{dataset_uri}' for dataset refresh.")
async def _refresh():
# first, refresh base uri list and its selection
await self._refresh_base_uri_list_box()
_logger.debug(f"Done refreshing base URIs.")
# on_base_uri_selected(self, list_box, row) called by selection
# above already
# TODO: following restration of selected dataset needs to happen
# after base URI has been loaded, but on_base_uri_selected
# spawns another task, hence above "await" won't wait for the
# process to complete. Need a signal insted.
# if dataset_uri is not None:
# _logger.debug(f"Select and show '{dataset_uri}'.")
# self._select_and_show_by_uri(dataset_uri)
asyncio.create_task(_refresh())
async def _refresh_base_uri_list_box(self):
# bookkeeping of current state
base_uri_row = self.base_uri_list_box.get_selected_row()
base_uri = None
if isinstance(base_uri_row, DtoolBaseURIRow):
base_uri = str(base_uri_row.base_uri)
elif isinstance(base_uri_row, DtoolSearchResultsRow):
base_uri = LOOKUP_BASE_URI
# first, refresh list box
await self.base_uri_list_box.refresh()
# second, refresh base uri list selection
if base_uri is not None:
_logger.debug(f"Reselect base URI '{base_uri}")
self._select_base_uri_row_by_uri(base_uri)
# removed these utility functions from inner scope of on_search_activate
# in order to decouple actual signal handler and functionality
def _update_search_summary(self, datasets):
row = self.base_uri_list_box.search_results_row
total_size = sum([0 if dataset.size_int is None else dataset.size_int for dataset in datasets])
row.info_label.set_text(f'{len(datasets)} datasets, {sizeof_fmt(total_size).strip()}')
async def _fetch_search_results(self, keyword, on_show=None):
row = self.base_uri_list_box.search_results_row
row.start_spinner()
try:
# datasets = await DatasetModel.search(keyword)
if keyword:
if is_valid_query(keyword):
_logger.debug("Valid query specified.")
datasets = await DatasetModel.query(keyword)
else:
_logger.debug("Specified search text is not a valid query, just perform free text search.")
# NOTE: server side allows a dict with the key-value pairs
# "free_text", "creator_usernames", "base_uris", "uuids", "tags",
# via route '/dataset/search', where all except "free_text"
# can be lists and are translated to logical "and" or "or"
# constructs on the server side. With the special treatment
# of the 'uuid' keyword above, should we introduce similar
# options for the other available keywords?
datasets = await DatasetModel.search(keyword)
else:
_logger.debug("No keyword specified, list all datasets.")
datasets = await DatasetModel.query_all()
if len(datasets) > self._max_nb_datasets:
_logger.warning(
f"{len(datasets)} search results exceed allowed displayed maximum of {self._max_nb_datasets}. "
f"Only the first {self._max_nb_datasets} results are shown. Narrow down your search.")
datasets = datasets[:self._max_nb_datasets] # Limit number of datasets that are shown
row.search_results = datasets # Cache datasets
self._update_search_summary(datasets)
if self.base_uri_list_box.get_selected_row() == row:
# Only update if the row is still selected
self.dataset_list_box.fill(datasets, on_show=on_show)
except Exception as e:
self.show_error(e)
self.base_uri_list_box.select_search_results_row()
self.main_stack.set_visible_child(self.main_paned)
row.stop_spinner()
def _search_by_uuid(self, uuid):
search_text = dump_single_line_query_text({"uuid": uuid})
self._search_by_search_text(search_text)
def _search_by_search_text(self, search_text):
self.activate_action('search-select-show', GLib.Variant.new_string(search_text))
# utility methods - dataset selection
def _select_dataset_row_by_row_index(self, index):
"""Select dataset row in dataset list box by index."""
row = self.dataset_list_box.get_row_at_index(index)
if row is not None:
_logger.debug(f"Dataset row {index} selected.")
self.dataset_list_box.select_row(row)
else:
_logger.info(f"No dataset row with index {index} available for selection.")
def _select_dataset_row_by_uri(self, uri):
"""Select dataset row in dataset list box by uri."""
index = self.dataset_list_box.get_row_index_from_uri(uri)
self._select_dataset_row_by_row_index(index)
def _show_dataset_details(self, dataset):
asyncio.create_task(self._update_dataset_view(dataset))
self.dataset_stack.set_visible_child(self.dataset_box)
def _show_dataset_details_by_row_index(self, index):
row = self.dataset_list_box.get_row_at_index(index)
if row is not None:
_logger.debug(f"{row.dataset.name} shown.")
self._show_dataset_details(row.dataset)
else:
_logger.info(f"No dataset row with index {index} available for selection.")
def _show_dataset_details_by_uri(self, uri):
"""Select dataset row in dataset list box by uri."""
index = self.dataset_list_box.get_row_index_from_uri(uri)
self._show_dataset_details_by_row_index(index)
def _select_and_show_by_row_index(self, index=0):
self._select_dataset_row_by_row_index(index)
self._show_dataset_details_by_row_index(index)
def _select_and_show_by_uri(self, uri):
self._select_dataset_row_by_uri(uri)
self._show_dataset_details_by_uri(uri)
def _search(self, search_text, on_show=None):
_logger.debug(f"Evoke search with search text {search_text}.")
self.main_stack.set_visible_child(self.main_spinner)
row = self.base_uri_list_box.search_results_row
row.search_results = None
asyncio.create_task(self._fetch_search_results(search_text, on_show))
def _search_select_and_show(self, search_text):
_logger.debug(f"Search '{search_text}'...")
self._search(search_text, on_show=lambda _: self._select_and_show_by_row_index())
def _get_selected_items(self):
"""Returns (name uuid) tuples of items selected in manifest tree store."""
selection = self.manifest_tree_view.get_selection()
model, paths = selection.get_selected_rows()
items = []
for path in paths:
column_iter = model.get_iter(path)
item_name = model.get_value(column_iter, 0)
item_uuid = model.get_value(column_iter, 3)
items.append((item_name, item_uuid))
return | |
import sys
sys.path.append('..')
import copy
import numpy as np
import sys
import random
import config
import threading
from warnings import simplefilter
from datetime import datetime
from sklearn import metrics
from agent import Agent
from message import Message
from utils.dp_mechanisms import laplace
import utils.diffie_hellman as dh
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.linear_model import SGDClassifier
from sklearn.preprocessing import MinMaxScaler
from pyspark.ml.classification import LogisticRegression
simplefilter(action='ignore', category=FutureWarning)
class ClientAgentServerless(Agent):
def __init__(self, agent_number, train_datasets, evaluator, active_clients):
"""
Initializes an instance of client agent
:param agent_number: id for agent
:type agent_number: int
:param train_datasets: dictionary mapping iteration to dataset for given iteration
:type train_datasets: dictionary indexed by ints mapping to pyspark dataframes
:param evaluator: evaluator instance used to evaluate new weights
:type evaluator: evaluator, defined in parallelized.py
:param active_clients: Clients currently in simulation. Will be updated if clients drop out
:type sensitivity: set
"""
super(ClientAgentServerless, self).__init__(agent_number=agent_number, agent_type="client_agent")
self.train_datasets = train_datasets
self.evaluator = evaluator
self.active_clients = active_clients
self.directory = None
self.pubkeyList = None
self.seckeyList = None
self.otherkeyList = None
self.commonkeyList = None
self.seeds = None
self.deltas = None
self.computation_times = {}
self.personal_weights = {} # personal weights. Maps iteration (int) to weights (numpy array)
self.personal_intercepts = {}
self.weights_dp_noise = {} # keyed by iteration; noise added at each iteration
self.intercepts_dp_noise = {}
self.other_client_weights = {}
self.other_client_intercepts = {}
self.federated_weights = {} # averaged weights
self.federated_intercepts = {}
self.personal_accuracy = {}
self.federated_accuracy = {}
def initializations(self):
"""
Preforms initializions that have to be done after initializing instance
:return: None
:rtype: None
"""
assert (self.directory is not None)
clients = self.directory.clients
num_clients = len(clients)
pubkeyList, seckeyList = dh.keygeneration(num_clients, self.agent_number)
# note this works because dicts are ordered in Python 3.6+
self.pubkeyList = dict(zip(clients.keys(), pubkeyList))
self.seckeyList = dict(zip(clients.keys(), seckeyList))
# these dictionaries will be populated after key exchange
self.otherkeyList = {agent_name: None for agent_name, __ in clients.items()}
self.otherkeyList[self.name] = 0 # set to zero for yourself!
self.commonkeyList = {agent_name: None for agent_name, __ in clients.items()}
self.commonkeyList[self.name] = 0
self.seeds = {agent_name: None for agent_name, __ in clients.items()}
self.seeds[self.name] = 0
self.deltas = {agent_name: None for agent_name, __ in clients.items()}
self.deltas[self.name] = 0
def send_pubkeys(self):
"""
Sends public keys to other clients in simulations as required by diffie-helman protocol.
"""
for agent_name, agent in self.directory.clients.items():
pubkey = self.pubkeyList[agent_name] # retrieve pubkey for client we're sending to
body = {'pubkey': pubkey}
msg = Message(sender_name=self.name, recipient_name=agent_name, body=body)
agent.receive_pubkey(msg) # invoke method of receiving agent
def receive_pubkey(self, message):
"""
Receives public key from another client
:param message: message containing pubkey from another client
:type message: instance of Message defined in message.py
"""
sender = message.sender
body = message.body
pubkey = body["pubkey"]
self.otherkeyList[sender] = pubkey
def initialize_common_keys(self):
"""
Initializes common key list to be used as offsets for sending weights
"""
pubkeyList = list(self.pubkeyList.values())
seckeyList = list(self.seckeyList.values())
otherkeyList = list(self.otherkeyList.values())
commonkeyList = dh.keyexchange(len(self.directory.clients), self.agent_number, pubkeyList, seckeyList,
otherkeyList) # generates common keys
for i, agent in enumerate(self.commonkeyList):
self.commonkeyList[agent] = commonkeyList[i]
self.update_deltas() # this method generates seeds and deltas from the common keys
def produce_weights(self, iteration, lock):
start_time = datetime.now()
if iteration - 1 > len(self.train_datasets): # iteration is indexed starting from 1
raise (ValueError(
'Not enough data to support a {}th iteration. Either change iteration data length in config.py or decrease amount of iterations.'.format(
iteration)))
if config.USING_PYSPARK:
weights, intercepts = self.compute_weights_pyspark(iteration)
else:
weights, intercepts = self.compute_weights_sklearn(iteration)
self.personal_weights[iteration] = weights
self.personal_intercepts[iteration] = intercepts
# create copies of weights and intercepts since we may be adding to them
final_weights, final_intercepts = copy.deepcopy(weights), copy.deepcopy(intercepts)
if config.USE_DP_PRIVACY:
lock.acquire() # for random seed
final_weights, final_intercepts = \
self.add_noise(weights=weights, intercepts=intercepts, iteration=iteration)
lock.release()
if config.USE_SECURITY: # adding security via diffie-helman offsets
final_weights, final_intercepts = \
self.add_security_offsets(weights=final_weights, intercepts=final_intercepts)
end_time = datetime.now()
computation_time = end_time - start_time
self.computation_times[iteration] = computation_time
# multiply latency by two: first the server has to request the value, then the client has to return it
body = {'weights': final_weights, 'intercepts': final_intercepts, 'iteration': iteration} # generate body
for client_name in self.active_clients:
client_agent = self.directory.clients[client_name]
if client_name != self.name:
message = Message(sender_name=self.name, recipient_name=client_name, body=body)
client_agent.receive_weights(message)
return None
def compute_weights_pyspark(self, iteration):
'''
Corresponds to algorithm 2 in the paper
'''
dataset = self.train_datasets[iteration]
lr = LogisticRegression(maxIter=config.LOG_MAX_ITER)
lrModel = lr.fit(dataset)
weights = lrModel.coefficientMatrix.toArray()
intercepts = lrModel.interceptVector
return weights, intercepts
def compute_weights_sklearn(self, iteration):
'''
Corresponds to algorithm 1 inthe paper
'''
X, y = self.train_datasets[iteration]
lr = SGDClassifier(alpha=0.0001, loss="log", random_state=config.RANDOM_SEEDS[self.name][iteration])
# Assign prev round coefficients
if iteration > 1:
federated_weights = copy.deepcopy(self.federated_weights[iteration - 1])
federated_intercepts = copy.deepcopy(self.federated_intercepts[iteration - 1])
else:
federated_weights = None
federated_intercepts = None
lr.fit(X, y, coef_init=federated_weights, intercept_init=federated_intercepts)
local_weights = lr.coef_
local_intercepts = lr.intercept_
return local_weights, local_intercepts
def add_noise(self, weights, intercepts, iteration):
# preparing value to send to server by adding deltas and DP noise
weights_shape = weights.shape
weights_dp_noise = np.zeros(weights_shape)
intercepts_shape = intercepts.shape
intercepts_dp_noise = np.zeros(intercepts_shape)
# generate DP parameters
active_clients_lens = [config.LENS_PER_ITERATION[client_name] for client_name in self.active_clients]
smallest_dataset = min(active_clients_lens)
if config.USING_CUMULATIVE:
smallest_dataset *= iteration
sensitivity = 2 / (
len(self.active_clients) * smallest_dataset * config.alpha)
epsilon = config.EPSILONS[self.name]
random.seed(config.RANDOM_SEEDS[self.name][iteration])
# adding differentially private noise
for i in range(weights_shape[0]): # weights_modified is 2-D
for j in range(weights_shape[1]):
if config.DP_ALGORITHM == 'Laplace':
dp_noise = laplace(mean=config.mean, sensitivity=sensitivity, epsilon=epsilon)
elif config.DP_ALGORITHM == 'Gamma':
scale = sensitivity / epsilon
num_clients = len(self.directory.clients)
dp_noise = random.gammavariate(1 / num_clients, scale) - random.gammavariate(1 / num_clients,
scale)
else:
raise AssertionError('Need to specify config.DP_ALGORITHM as Laplace or Gamma')
weights_dp_noise[i][j] = dp_noise
if config.INTERCEPTS_DP_NOISE:
for i in range(intercepts_shape[0]):
if config.DP_ALGORITHM == 'Laplace':
dp_noise = laplace(mean=config.mean, sensitivity=sensitivity, epsilon=epsilon)
elif config.DP_ALGORITHM == 'Gamma':
scale = sensitivity / epsilon
num_clients = len(self.directory.clients)
dp_noise = random.gammavariate(1 / num_clients, scale) - random.gammavariate(1 / num_clients, scale)
else:
raise AssertionError('Need to specify config.DP_ALGORITHM as Laplace or Gamma')
intercepts_dp_noise[i] = dp_noise
weights_with_noise = copy.deepcopy(weights) # make a copy to not mutate weights
intercepts_with_noise = copy.deepcopy(intercepts)
self.weights_dp_noise[iteration] = weights_dp_noise
weights_with_noise += weights_dp_noise
self.intercepts_dp_noise[iteration] = intercepts_dp_noise
intercepts_with_noise += intercepts_dp_noise
return weights_with_noise, intercepts_with_noise
def add_security_offsets(self, weights, intercepts):
adding = True # Controls flow of loop. When other agent number is greater, subtract offset instead of add it
for agent_name, offset in self.deltas.items(): # dictionary but should be ordered since Python 3
if agent_name == self.name:
adding = False # from here on out subtract offsets for next clients
elif agent_name in self.active_clients:
if adding == True:
weights += offset
intercepts += offset
else:
weights -= offset
intercepts -= offset
else:
# client no longer in simulation so don't add offset
pass
self.update_deltas() # update the deltas after using them
return weights, intercepts
def update_deltas(self):
"""
Updates commonkeyList. Called after each iteration to update values.
"""
if None not in self.commonkeyList.values(): # if first time calling this function
agents_and_seeds = self.commonkeyList.items()
self.commonkeyList = self.commonkeyList.fromkeys(self.commonkeyList.keys(), None)
else:
# use exisitng seed to generate new seeds and offsets
agents_and_seeds = self.seeds.items()
for agent, seed in agents_and_seeds:
# uses current seeds to generate new deltas and new seeds
if agent != self.name:
seed_b = bin(seed) # cast to binary
delta_b = seed_b[:20]
delta = int(delta_b, 2) # convert back to decimal from base 2
seed_b = seed_b[20:]
seed = int(seed_b, 2)
random.seed(seed) # generate new seed
seed = random.randint(-sys.maxsize, sys.maxsize)
self.seeds[agent] = seed
self.deltas[agent] = delta
def receive_weights(self, message):
body = message.body
iteration, client_weight, client_intercept = body['iteration'], body['weights'], body[
'intercepts']
if iteration not in self.other_client_weights:
self.other_client_weights[iteration] = {}
self.other_client_intercepts[iteration] = {}
self.other_client_weights[iteration][message.sender] = client_weight
self.other_client_intercepts[iteration][message.sender] = client_intercept
if len(self.other_client_weights[iteration]) == len(self.active_clients) - 1: # -1 because of yourself
self.federate_weights(iteration)
return None
def federate_weights(self, iteration):
while iteration not in self.personal_weights.keys():
pass # waiting until you've also finished computation
all_weights = list(self.other_client_weights[iteration].values())
all_weights.append(self.personal_weights[iteration])
all_intercepts = list(self.other_client_intercepts[iteration].values())
all_intercepts.append(self.personal_intercepts[iteration])
federated_weights = np.average(all_weights, axis = 0) # the weights for this iteration!
federated_intercepts = np.average(all_intercepts, axis=0)
self.federated_weights[iteration] = federated_weights
self.federated_intercepts[iteration] = federated_intercepts
personal_weights = self.personal_weights[iteration]
personal_intercepts = self.personal_intercepts[iteration]
personal_accuracy = self.evaluator.accuracy(personal_weights, personal_intercepts)
federated_accuracy = self.evaluator.accuracy(federated_weights, federated_intercepts)
self.personal_accuracy[iteration] = personal_accuracy
self.federated_accuracy[iteration] = federated_accuracy
args = [self.name, iteration, personal_accuracy, federated_accuracy]
iteration_report = 'Performance Metrics for {} on iteration {} \n' \
'------------------------------------------- \n' \
'Personal accuracy: {} \n' \
'Federated accuracy: {} | |
latency data
is now available as Distribution Metrics. Existing monitors have been migrated automatically but all terraformed
monitors can still use the existing metrics. We strongly recommend updating monitor definitions to query the new
metrics. To learn more, or to see examples of how to update your terraform definitions to utilize the new distribution
metrics, see the [detailed doc](https://docs.datadoghq.com/tracing/guide/ddsketch_trace_metrics/).
"""
return pulumi.get(self, "query")
@query.setter
def query(self, value: pulumi.Input[str]):
pulumi.set(self, "query", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
The type of the monitor. The mapping from these types to the types found in the Datadog Web UI can be found in the
Datadog API [documentation page](https://docs.datadoghq.com/api/v1/monitors/#create-a-monitor). Note: The monitor type
cannot be changed after a monitor is created.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="enableLogsSample")
def enable_logs_sample(self) -> Optional[pulumi.Input[bool]]:
"""
A boolean indicating whether or not to include a list of log values which triggered the alert. This is only used by log
monitors. Defaults to `false`.
"""
return pulumi.get(self, "enable_logs_sample")
@enable_logs_sample.setter
def enable_logs_sample(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_logs_sample", value)
@property
@pulumi.getter(name="escalationMessage")
def escalation_message(self) -> Optional[pulumi.Input[str]]:
"""
A message to include with a re-notification. Supports the `@username` notification allowed elsewhere.
"""
return pulumi.get(self, "escalation_message")
@escalation_message.setter
def escalation_message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "escalation_message", value)
@property
@pulumi.getter(name="evaluationDelay")
def evaluation_delay(self) -> Optional[pulumi.Input[int]]:
"""
(Only applies to metric alert) Time (in seconds) to delay evaluation, as a non-negative integer. For example, if the
value is set to `300` (5min), the `timeframe` is set to `last_5m` and the time is 7:00, the monitor will evaluate data
from 6:50 to 6:55. This is useful for AWS CloudWatch and other backfilled metrics to ensure the monitor will always have
data during evaluation.
"""
return pulumi.get(self, "evaluation_delay")
@evaluation_delay.setter
def evaluation_delay(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "evaluation_delay", value)
@property
@pulumi.getter(name="forceDelete")
def force_delete(self) -> Optional[pulumi.Input[bool]]:
"""
A boolean indicating whether this monitor can be deleted even if it’s referenced by other resources (e.g. SLO,
composite monitor).
"""
return pulumi.get(self, "force_delete")
@force_delete.setter
def force_delete(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "force_delete", value)
@property
@pulumi.getter(name="groupbySimpleMonitor")
def groupby_simple_monitor(self) -> Optional[pulumi.Input[bool]]:
"""
Whether or not to trigger one alert if any source breaches a threshold. This is only used by log monitors. Defaults to
`false`.
"""
return pulumi.get(self, "groupby_simple_monitor")
@groupby_simple_monitor.setter
def groupby_simple_monitor(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "groupby_simple_monitor", value)
@property
@pulumi.getter(name="includeTags")
def include_tags(self) -> Optional[pulumi.Input[bool]]:
"""
A boolean indicating whether notifications from this monitor automatically insert its triggering tags into the title.
Defaults to `true`.
"""
return pulumi.get(self, "include_tags")
@include_tags.setter
def include_tags(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "include_tags", value)
@property
@pulumi.getter
def locked(self) -> Optional[pulumi.Input[bool]]:
"""
A boolean indicating whether changes to this monitor should be restricted to the creator or admins. Defaults to `false`.
"""
return pulumi.get(self, "locked")
@locked.setter
def locked(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "locked", value)
@property
@pulumi.getter(name="monitorThresholdWindows")
def monitor_threshold_windows(self) -> Optional[pulumi.Input['MonitorMonitorThresholdWindowsArgs']]:
"""
A mapping containing `recovery_window` and `trigger_window` values, e.g. `last_15m` . Can only be used for, and are
required for, anomaly monitors.
"""
return pulumi.get(self, "monitor_threshold_windows")
@monitor_threshold_windows.setter
def monitor_threshold_windows(self, value: Optional[pulumi.Input['MonitorMonitorThresholdWindowsArgs']]):
pulumi.set(self, "monitor_threshold_windows", value)
@property
@pulumi.getter(name="monitorThresholds")
def monitor_thresholds(self) -> Optional[pulumi.Input['MonitorMonitorThresholdsArgs']]:
"""
Alert thresholds of the monitor.
"""
return pulumi.get(self, "monitor_thresholds")
@monitor_thresholds.setter
def monitor_thresholds(self, value: Optional[pulumi.Input['MonitorMonitorThresholdsArgs']]):
pulumi.set(self, "monitor_thresholds", value)
@property
@pulumi.getter(name="newGroupDelay")
def new_group_delay(self) -> Optional[pulumi.Input[int]]:
"""
The time (in seconds) to skip evaluations for new groups. `new_group_delay` overrides `new_host_delay` if it is set to a
nonzero value.
"""
return pulumi.get(self, "new_group_delay")
@new_group_delay.setter
def new_group_delay(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "new_group_delay", value)
@property
@pulumi.getter(name="newHostDelay")
def new_host_delay(self) -> Optional[pulumi.Input[int]]:
"""
**Deprecated**. See `new_group_delay`. Time (in seconds) to allow a host to boot and applications to fully start before
starting the evaluation of monitor results. Should be a non-negative integer. This value is ignored for simple monitors
and monitors not grouped by host. Defaults to `300`. The only case when this should be used is to override the default
and set `new_host_delay` to zero for monitors grouped by host.
"""
return pulumi.get(self, "new_host_delay")
@new_host_delay.setter
def new_host_delay(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "new_host_delay", value)
@property
@pulumi.getter(name="noDataTimeframe")
def no_data_timeframe(self) -> Optional[pulumi.Input[int]]:
"""
The number of minutes before a monitor will notify when data stops reporting. Provider defaults to 10 minutes. We
recommend at least 2x the monitor timeframe for metric alerts or 2 minutes for service checks.
"""
return pulumi.get(self, "no_data_timeframe")
@no_data_timeframe.setter
def no_data_timeframe(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "no_data_timeframe", value)
@property
@pulumi.getter(name="notifyAudit")
def notify_audit(self) -> Optional[pulumi.Input[bool]]:
"""
A boolean indicating whether tagged users will be notified on changes to this monitor. Defaults to `false`.
"""
return pulumi.get(self, "notify_audit")
@notify_audit.setter
def notify_audit(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "notify_audit", value)
@property
@pulumi.getter(name="notifyNoData")
def notify_no_data(self) -> Optional[pulumi.Input[bool]]:
"""
A boolean indicating whether this monitor will notify when data stops reporting. Defaults to `false`.
"""
return pulumi.get(self, "notify_no_data")
@notify_no_data.setter
def notify_no_data(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "notify_no_data", value)
@property
@pulumi.getter
def priority(self) -> Optional[pulumi.Input[int]]:
"""
Integer from 1 (high) to 5 (low) indicating alert severity.
"""
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter(name="renotifyInterval")
def renotify_interval(self) -> Optional[pulumi.Input[int]]:
"""
The number of minutes after the last notification before a monitor will re-notify on the current status. It will only
re-notify if it's not resolved.
"""
return pulumi.get(self, "renotify_interval")
@renotify_interval.setter
def renotify_interval(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "renotify_interval", value)
@property
@pulumi.getter(name="renotifyOccurrences")
def renotify_occurrences(self) -> Optional[pulumi.Input[int]]:
"""
The number of re-notification messages that should be sent on the current status.
"""
return pulumi.get(self, "renotify_occurrences")
@renotify_occurrences.setter
def renotify_occurrences(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "renotify_occurrences", value)
@property
@pulumi.getter(name="renotifyStatuses")
def renotify_statuses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The types of statuses for which re-notification messages should be sent.
"""
return pulumi.get(self, "renotify_statuses")
@renotify_statuses.setter
def renotify_statuses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "renotify_statuses", value)
@property
@pulumi.getter(name="requireFullWindow")
def require_full_window(self) -> Optional[pulumi.Input[bool]]:
"""
A boolean indicating whether this monitor needs a full window of data before it's evaluated. We highly recommend you set
this to `false` for sparse metrics, otherwise some evaluations will be skipped. Default: `true` for `on average`, `at
all times` and `in total` aggregation. `false` otherwise.
"""
return pulumi.get(self, "require_full_window")
@require_full_window.setter
def require_full_window(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "require_full_window", value)
@property
@pulumi.getter(name="restrictedRoles")
def restricted_roles(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "restricted_roles")
@restricted_roles.setter
def restricted_roles(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "restricted_roles", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of tags to associate with your monitor. This can help you categorize and filter monitors in the manage monitors
page of the UI. Note: it's not currently possible to filter by these tags when querying via the API
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="timeoutH")
def timeout_h(self) -> Optional[pulumi.Input[int]]:
"""
The number of hours of the monitor not reporting data before it will automatically resolve from a triggered state.
"""
return pulumi.get(self, "timeout_h")
@timeout_h.setter
def timeout_h(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout_h", value)
@property
@pulumi.getter
def validate(self) -> Optional[pulumi.Input[bool]]:
"""
If set to `false`, skip the validation call done during plan.
"""
return pulumi.get(self, "validate")
@validate.setter
def validate(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "validate", value)
@pulumi.input_type
class _MonitorState:
def __init__(__self__, *,
enable_logs_sample: Optional[pulumi.Input[bool]] = None,
escalation_message: Optional[pulumi.Input[str]] = None,
evaluation_delay: Optional[pulumi.Input[int]] = None,
force_delete: Optional[pulumi.Input[bool]] = None,
groupby_simple_monitor: Optional[pulumi.Input[bool]] = None,
include_tags: Optional[pulumi.Input[bool]] = None,
locked: Optional[pulumi.Input[bool]] = None,
message: Optional[pulumi.Input[str]] = None,
monitor_threshold_windows: Optional[pulumi.Input['MonitorMonitorThresholdWindowsArgs']] = None,
monitor_thresholds: Optional[pulumi.Input['MonitorMonitorThresholdsArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
new_group_delay: Optional[pulumi.Input[int]] = None,
new_host_delay: Optional[pulumi.Input[int]] = None,
no_data_timeframe: Optional[pulumi.Input[int]] = None,
notify_audit: Optional[pulumi.Input[bool]] = None,
notify_no_data: Optional[pulumi.Input[bool]] = None,
priority: Optional[pulumi.Input[int]] = None,
query: Optional[pulumi.Input[str]] = None,
renotify_interval: Optional[pulumi.Input[int]] = None,
renotify_occurrences: Optional[pulumi.Input[int]] = None,
renotify_statuses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
require_full_window: Optional[pulumi.Input[bool]] = None,
restricted_roles: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
timeout_h: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None,
validate: Optional[pulumi.Input[bool]] = None):
"""
Input properties used for looking up and filtering Monitor resources.
:param pulumi.Input[bool] enable_logs_sample: A boolean indicating whether or not to include a list of log values which triggered the alert. This is only used by log
monitors. Defaults to `false`.
:param pulumi.Input[str] escalation_message: A message to include with a re-notification. Supports the `@username` notification allowed elsewhere.
:param pulumi.Input[int] evaluation_delay: | |
now
# Set the path if applicable
if '_path' in session_data:
self._path = session_data['_path']
self.update(session_data)
self.accessed_dict = session_data.copy()
finally:
self.namespace.release_read_lock()
if timed_out:
self.invalidate()
def save(self, accessed_only=False):
"""Saves the data for this session to persistent storage
If accessed_only is True, then only the original data loaded
at the beginning of the request will be saved, with the updated
last accessed time.
"""
# Look to see if its a new session that was only accessed
# Don't save it under that case
if accessed_only and (self.is_new or not self.save_atime):
return None
# this session might not have a namespace yet or the session id
# might have been regenerated
if not hasattr(self, 'namespace') or self.namespace.namespace != self.id:
self.namespace = self.namespace_class(
self.id,
data_dir=self.data_dir,
digest_filenames=False,
**self.namespace_args)
self.namespace.acquire_write_lock(replace=True)
try:
if accessed_only:
data = dict(self.accessed_dict.items())
else:
data = dict(self.items())
if self.encrypt_key:
data = self._encrypt_data(data)
# Save the data
if not data and 'session' in self.namespace:
del self.namespace['session']
else:
self.namespace['session'] = data
finally:
self.namespace.release_write_lock()
if self.use_cookies and self.is_new:
self.request['set_cookie'] = True
def revert(self):
"""Revert the session to its original state from its first
access in the request"""
self.clear()
self.update(self.accessed_dict)
def regenerate_id(self):
"""
creates a new session id, retains all session data
Its a good security practice to regnerate the id after a client
elevates priviliges.
"""
self._create_id(set_new=False)
# TODO: I think both these methods should be removed. They're from
# the original mod_python code i was ripping off but they really
# have no use here.
def lock(self):
"""Locks this session against other processes/threads. This is
automatic when load/save is called.
***use with caution*** and always with a corresponding 'unlock'
inside a "finally:" block, as a stray lock typically cannot be
unlocked without shutting down the whole application.
"""
self.namespace.acquire_write_lock()
def unlock(self):
"""Unlocks this session against other processes/threads. This
is automatic when load/save is called.
***use with caution*** and always within a "finally:" block, as
a stray lock typically cannot be unlocked without shutting down
the whole application.
"""
self.namespace.release_write_lock()
class CookieSession(Session):
"""Pure cookie-based session
Options recognized when using cookie-based sessions are slightly
more restricted than general sessions.
:param key: The name the cookie should be set to.
:param timeout: How long session data is considered valid. This is used
regardless of the cookie being present or not to determine
whether session data is still valid.
:type timeout: int
:param save_accessed_time: Whether beaker should save the session's access
time (True) or only modification time (False).
Defaults to True.
:param cookie_expires: Expiration date for cookie
:param cookie_domain: Domain to use for the cookie.
:param cookie_path: Path to use for the cookie.
:param data_serializer: If ``"json"`` or ``"pickle"`` should be used
to serialize data. Can also be an object with
``loads` and ``dumps`` methods. By default
``"pickle"`` is used.
:param secure: Whether or not the cookie should only be sent over SSL.
:param httponly: Whether or not the cookie should only be accessible by
the browser not by JavaScript.
:param encrypt_key: The key to use for the local session encryption, if not
provided the session will not be encrypted.
:param validate_key: The key used to sign the local encrypted session
:param invalidate_corrupt: How to handle corrupt data when loading. When
set to True, then corrupt data will be silently
invalidated and a new session created,
otherwise invalid data will cause an exception.
:type invalidate_corrupt: bool
"""
def __init__(self, request, key='beaker.session.id', timeout=None,
save_accessed_time=True, cookie_expires=True, cookie_domain=None,
cookie_path='/', encrypt_key=None, validate_key=None, secure=False,
httponly=False, data_serializer='pickle',
encrypt_nonce_bits=DEFAULT_NONCE_BITS, invalidate_corrupt=False,
**kwargs):
if not crypto.has_aes and encrypt_key:
raise InvalidCryptoBackendError("No AES library is installed, can't generate "
"encrypted cookie-only Session.")
self.request = request
self.key = key
self.timeout = timeout
self.save_atime = save_accessed_time
self.cookie_expires = cookie_expires
self.encrypt_key = encrypt_key
self.validate_key = validate_key
self.encrypt_nonce_size = get_nonce_size(encrypt_nonce_bits)
self.request['set_cookie'] = False
self.secure = secure
self.httponly = httponly
self._domain = cookie_domain
self._path = cookie_path
self.invalidate_corrupt = invalidate_corrupt
self._set_serializer(data_serializer)
try:
cookieheader = request['cookie']
except KeyError:
cookieheader = ''
if validate_key is None:
raise BeakerException("No validate_key specified for Cookie only "
"Session.")
if timeout and not save_accessed_time:
raise BeakerException("timeout requires save_accessed_time")
try:
self.cookie = SignedCookie(
validate_key,
input=cookieheader,
)
except http_cookies.CookieError:
self.cookie = SignedCookie(
validate_key,
input=None,
)
self['_id'] = _session_id()
self.is_new = True
# If we have a cookie, load it
if self.key in self.cookie and self.cookie[self.key].value is not None:
self.is_new = False
try:
cookie_data = self.cookie[self.key].value
if cookie_data is InvalidSignature:
raise BeakerException("Invalid signature")
self.update(self._decrypt_data(cookie_data))
self._path = self.get('_path', '/')
except Exception as e:
if self.invalidate_corrupt:
util.warn(
"Invalidating corrupt session %s; "
"error was: %s. Set invalidate_corrupt=False "
"to propagate this exception." % (self.id, e))
self.invalidate()
else:
raise
if self.timeout is not None:
now = time.time()
last_accessed_time = self.get('_accessed_time', now)
if now - last_accessed_time > self.timeout:
self.clear()
self.accessed_dict = self.copy()
self._create_cookie()
def created(self):
return self['_creation_time']
created = property(created)
def id(self):
return self['_id']
id = property(id)
def _set_domain(self, domain):
self['_domain'] = domain
self._domain = domain
def _get_domain(self):
return self._domain
domain = property(_get_domain, _set_domain)
def _set_path(self, path):
self['_path'] = self._path = path
def _get_path(self):
return self._path
path = property(_get_path, _set_path)
def save(self, accessed_only=False):
"""Saves the data for this session to persistent storage"""
if accessed_only and (self.is_new or not self.save_atime):
return
if accessed_only:
self.clear()
self.update(self.accessed_dict)
self._create_cookie()
def expire(self):
"""Delete the 'expires' attribute on this Session, if any."""
self.pop('_expires', None)
def _create_cookie(self):
if '_creation_time' not in self:
self['_creation_time'] = time.time()
if '_id' not in self:
self['_id'] = _session_id()
self['_accessed_time'] = time.time()
val = self._encrypt_data()
if len(val) > 4064:
raise BeakerException("Cookie value is too long to store")
self.cookie[self.key] = val
if '_expires' in self:
expires = self['_expires']
else:
expires = None
expires = self._set_cookie_expires(expires)
if expires is not None:
self['_expires'] = expires
if '_domain' in self:
self.cookie[self.key]['domain'] = self['_domain']
elif self._domain:
self.cookie[self.key]['domain'] = self._domain
if self.secure:
self.cookie[self.key]['secure'] = True
self._set_cookie_http_only()
self.cookie[self.key]['path'] = self.get('_path', '/')
self.request['cookie_out'] = self.cookie[self.key].output(header='')
self.request['set_cookie'] = True
def delete(self):
"""Delete the cookie, and clear the session"""
# Send a delete cookie request
self._delete_cookie()
self.clear()
def invalidate(self):
"""Clear the contents and start a new session"""
self.clear()
self['_id'] = _session_id()
class SessionObject(object):
"""Session proxy/lazy creator
This object proxies access to the actual session object, so that in
the case that the session hasn't been used before, it will be
setup. This avoid creating and loading the session from persistent
storage unless its actually used during the request.
"""
def __init__(self, environ, **params):
self.__dict__['_params'] = params
self.__dict__['_environ'] = environ
self.__dict__['_sess'] = None
self.__dict__['_headers'] = {}
def _session(self):
"""Lazy initial creation of session object"""
if self.__dict__['_sess'] is None:
params = self.__dict__['_params']
environ = self.__dict__['_environ']
self.__dict__['_headers'] = req = {'cookie_out': None}
req['cookie'] = environ.get('HTTP_COOKIE')
session_cls = params.get('session_class', None)
if session_cls is None:
if params.get('type') == 'cookie':
session_cls = CookieSession
else:
session_cls = Session
else:
assert issubclass(session_cls, Session),\
"Not a Session: " + session_cls
self.__dict__['_sess'] = session_cls(req, **params)
return self.__dict__['_sess']
def __getattr__(self, attr):
return getattr(self._session(), attr)
def __setattr__(self, attr, value):
setattr(self._session(), attr, value)
def __delattr__(self, name):
self._session().__delattr__(name)
def __getitem__(self, key):
return self._session()[key]
def __setitem__(self, key, value):
self._session()[key] = value
def __delitem__(self, key):
self._session().__delitem__(key)
def __repr__(self):
return self._session().__repr__()
def __iter__(self):
"""Only works for proxying to a dict"""
return iter(self._session().keys())
def __contains__(self, key):
return key in self._session()
def has_key(self, key):
return key in self._session()
def get_by_id(self, id):
"""Loads a session given a session ID"""
params = self.__dict__['_params']
session = Session({}, use_cookies=False, id=id, **params)
if session.is_new:
return None
return session
def save(self):
self.__dict__['_dirty'] = True
def delete(self):
self.__dict__['_dirty'] = True
self._session().delete()
def persist(self):
"""Persist the session to the storage
Always saves the whole session if save() or delete() have been called.
If they haven't:
- If autosave is set to true, saves the the entire session regardless.
- If save_accessed_time is set to true or unset, only saves the updated
access time.
- If save_accessed_time is set to false, doesn't save anything.
"""
if self.__dict__['_params'].get('auto'):
self._session().save()
elif self.__dict__['_params'].get('save_accessed_time', True):
if self.dirty():
self._session().save()
else:
self._session().save(accessed_only=True)
else: # save_accessed_time is false
if self.dirty():
self._session().save()
def dirty(self):
"""Returns True if save() or delete() | |
<reponame>andyzt/convai_router_bot
import json
import os
import random
from collections import defaultdict
from datetime import datetime, timedelta
from typing import TextIO
from uuid import uuid4
import yaml
from mongoengine import errors, QuerySet
from mongoengine.queryset.visitor import Q
from . import Bot, PersonProfile, User, UserPK, BannedPair, Conversation, ConversationPeer, Message, Complaint, Settings
def fill_db_with_stub(n_bots=5,
n_bots_banned=2,
n_humans=10,
n_humans_banned=2,
n_banned_pairs=3,
n_profiles=20,
n_topics=3,
n_conversations=20,
n_msg_per_conv=15,
n_complaints_new=3,
n_complaints_processed=2):
from random import choice, randint
with open(os.path.join(os.path.split(__file__)[0], "lorem_ipsum.txt"), 'r') as f:
lorem_ipsum = f.read().split(' ')
profiles = [PersonProfile(persona=[' '.join(lorem_ipsum[i * 10:(i + 1) * 10])],
link_uuid=str(uuid4()),
topics=[f'Topic_{i}' for i in range(random.randrange(n_topics + 1))]
).save() for i in range(n_profiles)]
bots = [Bot(token='stub' + str(uuid4()),
bot_name='stub bot #' + str(i)).save() for i in range(n_bots)]
banned_bots = [Bot(token='stub' + str(uuid4()),
bot_name='stub banned bot #' + str(i),
banned=True).save() for i in range(n_bots_banned)]
humans = [User(user_key=UserPK(user_id='stub' + str(uuid4()),
platform=choice(UserPK.PLATFORM_CHOICES)),
username='stub user #' + str(i)).save() for i in range(n_humans)]
banned_humans = [User(user_key=UserPK(user_id='stub' + str(uuid4()),
platform=choice(UserPK.PLATFORM_CHOICES)),
username='stub banned user #' + str(i),
banned=True).save() for i in range(n_humans_banned)]
all_humans = humans + banned_humans
all_bots = bots + banned_bots
all_peers = all_humans + all_bots
banned_pairs = []
for _ in range(n_banned_pairs):
while True:
try:
banned_pairs.append(BannedPair(user=choice(all_humans),
bot=choice(all_bots)).save())
break
except errors.NotUniqueError:
# retry...
pass
conversations = []
for i in range(n_conversations):
human_peer = ConversationPeer(peer=choice(all_humans),
assigned_profile=choice(profiles),
dialog_evaluation_score=randint(1, 5),
other_peer_profile_options=[choice(profiles) for _ in range(2)])
human_peer.other_peer_profile_selected = choice(human_peer.other_peer_profile_options)
other_peer = ConversationPeer(peer=choice(all_peers),
assigned_profile=choice(human_peer.other_peer_profile_options),
dialog_evaluation_score=randint(1, 5),
other_peer_profile_options=[choice(profiles)] + [human_peer.assigned_profile])
other_peer.other_peer_profile_selected = choice(other_peer.other_peer_profile_options)
conv = Conversation(participant1=human_peer, participant2=other_peer, conversation_id=i + 1)
msgs = [Message(msg_id=i,
text=' '.join(lorem_ipsum[i * 10:(i + 1) * 10]),
sender=choice([human_peer.peer, other_peer.peer]),
time=datetime.now() + timedelta(hours=i),
evaluation_score=randint(0, 1)) for i in range(n_msg_per_conv)]
conv.messages = msgs
conversations.append(conv.save())
complaints_new = [Complaint(complainer=c.participants[0].peer,
complain_to=c.participants[1].peer,
conversation=c).save() for c in map(lambda _: choice(conversations),
range(n_complaints_new))]
complaints_processed = [Complaint(complainer=c.participants[0].peer,
complain_to=c.participants[1].peer,
conversation=c,
processed=True).save() for c in map(lambda _: choice(conversations),
range(n_complaints_processed))]
def get_inactive_bots(n_bots, threshold=None):
pipeline = [
{'$match': {'participant2.peer._cls': 'Bot'}},
{'$group': {'_id': '$participant2.peer',
'count': {'$sum': 1}}},
{'$sort': {'count': 1}}
]
if threshold is not None:
pipeline.append({'$match': {'count': {'$lte': threshold}}})
else:
pipeline.append({'$limit': n_bots})
ids, counts = zip(*[(group['_id']['_ref'].as_doc()['$id'], group['count'])
for group in Conversation.objects.aggregate(*pipeline)])
bots = Bot.objects.in_bulk(ids)
for id, count in zip(ids, counts):
yield bots[id], count
def register_bot(token, name):
return Bot(token=token,
bot_name=name).save()
def get_complaints(include_processed=False):
args = {'processed': False} if not include_processed else {}
return Complaint.objects(**args)
def mark_complaints_processed(all=False, *ids):
objects = Complaint.objects if all else Complaint.objects(id__in=ids)
return objects.update(processed=True)
def ban_human(platform, user_id):
return User.objects(user_key__platform=platform, user_key__user_id=user_id).update(banned=True)
def ban_bot(token):
return Bot.objects(token=token).update(banned=True)
def ban_human_bot(platform, user_id, token):
human = User.objects.get(user_key=UserPK(user_id=user_id, platform=platform))
bot = Bot.objects.with_id(token)
return BannedPair(user=human, bot=bot).save()
def set_default_bot(platform, user_id, token):
user = User.objects.get(user_key=UserPK(user_id=user_id, platform=platform))
bot = Bot.objects.with_id(token)
return user.update(assigned_test_bot=bot)
def import_profiles(stream: TextIO):
_, ext = os.path.splitext(stream.name)
if ext == '.json':
linked_groups = json.load(stream)
elif ext == '.yaml' or ext == '.yml':
linked_groups = yaml.safe_load(stream)
else:
raise ValueError(f'file extension "{ext}" is not supported, it should be either `json` or `yaml/yml`')
profiles = []
for linked_group in linked_groups:
link_uuid = str(uuid4())
for linked_profile in linked_group:
profiles.append(PersonProfile(persona=linked_profile['persona'],
tags=linked_profile.get('tags', []),
link_uuid=link_uuid,
topics=linked_profile.get('topics', [])))
return PersonProfile.objects.insert(profiles)
def manage_tags(action: str, tag: str) -> str:
tags_set: QuerySet = Settings.objects(name='tags')
num_of_tags = tags_set.count()
if num_of_tags == 0:
tags = Settings(name='tags', value=list())
tags.save()
elif num_of_tags == 1:
tags = tags_set.first()
else:
raise ValueError('multiple documents in "Settings" collection with name "tags"')
tag_count = tags.value.count(tag)
if action == 'add':
if tag_count:
response = f'"{tag}" is already in active tags list'
else:
tags.update(add_to_set__value=tag)
response = f'"{tag}" was added to the active tags list'
elif action == 'remove':
for _ in range(tag_count):
tags.update(pull__value=tag)
if tag_count:
response = f'"{tag}" was removed from the active tags list'
else:
response = f'"{tag}" does not in active tags list'
elif action == 'list':
if tags.value:
response = f"{len(tags.value)} tags in active tags list: {', '.join(tags.value)}"
else:
response = 'active tags list is empty'
else:
raise ValueError(f'unexpected action argument value: {action}')
return response
def export_training_conversations(date_begin=None, date_end=None, reveal_sender=False, reveal_ids=False):
# TODO: need to process to human conversation scenario
# TODO: merge with export_bot_scores
training_convs = []
if (date_begin is None) and (date_end is None):
date_begin = '1900-01-01'
date_end = '2500-12-31'
elif (date_begin is not None) and (date_end is None):
date_end = date_begin
datetime_begin = datetime.strptime(f'{date_begin}_00:00:00.000000', "%Y-%m-%d_%H:%M:%S.%f")
datetime_end = datetime.strptime(f'{date_end}_23:59:59.999999', "%Y-%m-%d_%H:%M:%S.%f")
args = {'start_time__gte': datetime_begin, 'start_time__lte': datetime_end}
convs = Conversation.objects(**args)
for conv in convs:
conv: Conversation = conv
training_conv = {
'dialog_id': str(hex(conv.conversation_id)),
'dialog': [],
'start_time': str(conv.start_time),
'end_time': str(conv.end_time),
'users': []
}
users = [conv.participant1, conv.participant2]
user_map = {}
for i in range(len(users)):
# other user's id
j = (i + 1) % 2
u = users[i]
obj = {}
try:
try:
uid = str(u.peer.user_key.user_id)
uclass = 'Human'
except AttributeError:
uid = str(u.peer.id)
uclass = 'Bot'
obj['user_class'] = uclass
obj['user_id'] = uid
obj['user_external_id'] = u.peer_conversation_guid
user_map[u.peer] = (uid, uclass)
other_profile_true = users[j].assigned_profile.persona
if u.other_peer_profile_selected is not None:
other_profile_hyp = u.other_peer_profile_selected.persona
else:
other_profile_hyp = None
if other_profile_hyp is None:
obj['profile_match'] = 0
elif other_profile_hyp == other_profile_true:
obj['profile_match'] = 1
else:
obj['profile_match'] = -1
other_profile_options = [pr.persona for pr in u.other_peer_profile_options]
ended_dialog = False
if 'triggered_dialog_end' in u:
if u['triggered_dialog_end']:
ended_dialog = True
obj['dialog_evaluation'] = u.dialog_evaluation_score
obj['profile'] = u.assigned_profile.persona
obj['topics'] = u.assigned_profile.topics
obj['other_profile_options'] = other_profile_options
obj['ended_dialog'] = ended_dialog
except:
obj['dialog_evaluation'] = 5
obj['profile'] = ''
obj['topics'] = ['']
obj['other_profile_options'] = ['']
obj['ended_dialog'] = True
training_conv['users'].append(obj)
for msg in conv.messages:
msg: Message = msg
try:
usr = user_map[msg.sender]
except:
print(msg)
usr = ['unknown','unknown']
training_message = {
#'id': msg.msg_id,
'sender': usr[0],
'sender_class': usr[1],
'text': msg.text,
'evaluation_score': msg.evaluation_score,
'system': msg.system,
'time': msg.time.strftime("%Y-%m-%d %H:%M:%S"),
}
training_conv['dialog'].append(training_message)
training_convs.append(training_conv)
return training_convs
def export_bot_scores(date_begin=None, date_end=None, daily_stats=False):
# TODO: refactor this shit with pipeline
bot_scores = {}
# ===== maint =====
convs = {}
profiles_obj = PersonProfile.objects
profiles = {str(profile.pk): list(profile.persona) for profile in profiles_obj}
bot_daily_stats = {}
for bot in Bot.objects:
if bot.banned:
continue
bot_id = str(bot.id)
bot_scores[bot_id] = {}
# ===== maint =====
convs[bot_id] = {}
if (date_begin is None) and (date_end is None):
date_begin = '1900-01-01'
date_end = '2500-12-31'
elif (date_begin is not None) and (date_end is None):
date_end = date_begin
datetime_begin = datetime.strptime(f'{date_begin}_00:00:00.000000', "%Y-%m-%d_%H:%M:%S.%f")
datetime_end = datetime.strptime(f'{date_end}_23:59:59.999999', "%Y-%m-%d_%H:%M:%S.%f")
date_args = {'start_time__gte': datetime_begin, 'start_time__lte': datetime_end}
q_date = Q(**date_args)
q_participant1 = Q(participant1__peer=bot)
q_participant2 = Q(participant2__peer=bot)
bot_convs = Conversation.objects(q_date & (q_participant1 | q_participant2))
user_eval_scores = []
profile_selected_scores = []
scored_dialogs = 0
convs_long_short = defaultdict(list)
for bot_conv in bot_convs:
bot_conv: Conversation = bot_conv
bot_conv_id = str(bot_conv.id)
num_messages = len(bot_conv.messages)
count_as_scored = False
conv_date = str(datetime.date(bot_conv.start_time))
num_user_messages = 0
num_bot_messages = 0
if isinstance(bot_conv.participant1.peer, Bot):
peer_bot = bot_conv.participant1
peer_user = bot_conv.participant2
else:
peer_bot = bot_conv.participant2
peer_user = bot_conv.participant1
for message in bot_conv.messages:
message: Message = message
if message.sender == peer_user.peer:
num_user_messages += 1
elif message.sender == peer_bot.peer:
num_bot_messages += 1
long_conv = True if (num_user_messages > 3 and num_bot_messages > 3) else False
convs_long_short[conv_date].append(long_conv)
user_eval_score = peer_user.dialog_evaluation_score
bot_profile = peer_bot.assigned_profile
user_selected_profile = peer_user.other_peer_profile_selected
user_selected_profile_parts = peer_user.other_peer_profile_selected_parts
if user_eval_score is not None:
eval_score_norm = (int(user_eval_score) - 1) / 4
user_eval_scores.append(eval_score_norm)
count_as_scored = count_as_scored | True
if user_selected_profile is not None:
profile_selected_score = int(user_selected_profile == bot_profile)
profile_selected_scores.append(profile_selected_score)
count_as_scored = count_as_scored | True
elif len(user_selected_profile_parts) > 0:
profile_set = set(list(bot_profile.persona))
selected_set = set(list(user_selected_profile_parts))
matched_set = profile_set.intersection(selected_set)
profile_selected_score = len(matched_set) / len(profile_set)
profile_selected_scores.append(profile_selected_score)
count_as_scored = count_as_scored | True
else:
profile_selected_score = None
scored_dialogs = scored_dialogs + (int(count_as_scored))
# ===== maint =====
convs[bot_id][bot_conv_id] = {
'user_eval_score': user_eval_score,
'profile_selected_score': profile_selected_score,
'profile_set': list(bot_profile.persona),
'selected_set': list(user_selected_profile_parts),
'num_messages': num_messages
}
daily_statistics = {}
for date, daily_convs_log_short in convs_long_short.items():
daily_statistics[date] = {}
daily_statistics[date]['dialogs_total'] = len(daily_convs_log_short)
daily_statistics[date]['dialogs_long'] = len([conv for conv in daily_convs_log_short if conv])
daily_statistics[date]['dialogs_short'] = len([conv for conv in daily_convs_log_short if not conv])
bot_daily_stats[bot_id] = daily_statistics
if daily_stats:
bot_scores[bot_id]['daily_statistics'] = bot_daily_stats[bot_id]
bot_scores[bot_id]['user_eval_score'] = 0 if len(user_eval_scores) == 0 else \
sum(user_eval_scores) / len(user_eval_scores)
bot_scores[bot_id]['profile_selected_score'] = 0 if len(profile_selected_scores) == 0 else \
sum(profile_selected_scores) / len(profile_selected_scores)
bot_scores[bot_id]['scored_dialogs'] = scored_dialogs
bot_scores[bot_id]['dialogs_total'] = sum([daily_statistics[date]['dialogs_total']
for date in daily_statistics.keys()])
bot_scores[bot_id]['dialogs_long'] = sum([daily_statistics[date]['dialogs_long']
for date in daily_statistics.keys()])
bot_scores[bot_id]['dialogs_short'] = sum([daily_statistics[date]['dialogs_short']
for date in daily_statistics.keys()])
# ===== maint =====
# return {'scores': bot_scores, 'convs': convs}
def get_default_dict():
return defaultdict(int)
total_daily_statistics = defaultdict(get_default_dict)
for bot in bot_daily_stats.values():
for date, stats in bot.items():
total_daily_statistics[date]['dialogs_total'] += stats['dialogs_total']
total_daily_statistics[date]['dialogs_long'] += stats['dialogs_long']
total_daily_statistics[date]['dialogs_short'] += stats['dialogs_short']
bot_scores['total'] = {}
if daily_stats:
bot_scores['total']['daily_statistics'] = total_daily_statistics
bot_scores['total']['dialogs_total'] = sum([day['dialogs_total'] for day in | |
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>15]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
#df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a31(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
df_all=df_all[df_all['total_mv_rank']<6]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
#df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a31_full(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
#df_all['st_or_otherwrong']=0
#df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
#df_all['high_stop']=0
#df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
df_all=df_all[df_all['total_mv_rank']<6]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
#df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['amount','close','real_price'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a29_full(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
#df_all['st_or_otherwrong']=0
#df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
#df_all['high_stop']=0
#df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']<6]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
#df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['amount','close','real_price'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
| |
is the rebinning factor, e.g it returns::
xr = array([x[k*pack:k*(pack+1)].sum()/pack for k in range(int(floor((stop-start)/pack)))])
strstp = [start,stop] is a list of slice indices
rebinning of x, y [,e] is done on the slice truncated to the approrpiate pack multiple, stopm
x[start:stopm], y[start:stopm], [e[start:stopm]]
use either::
xr,yr = rebin(x,y,strstp,pack)
or::
xr,yr,eyr = rebin(x,y,strstp,pack,ey) # the 5th is y error
'''
from numpy import floor, sqrt, empty
start,stop = strstp
m = int(floor((stop-start)/pack)) # length of rebinned xb
mn = m*pack # length of x slice
xx =x[start:start+mn] # slice of the first 1d array
xx = xx.reshape(m,pack) # temporaty 2d array
xr = xx.sum(1)/pack # rebinned first ndarray
if len(y.shape)==1:
yb = empty(m)
yy = y[start:start+mn] # slice row
yy = yy.reshape(m,pack) # temporaty 2d
yr = yy.sum(1)/pack # rebinned row
if e is not None:
ey = e[start:start+mn] # slice row
ey = ey.reshape(m,pack) # temporaty 2d
er = sqrt((ey**2).sum(1))/pack # rebinned row - only good for ISIS
elif len(y.shape)==2:
nruns = y.shape[0] # number of runs
yr = empty((nruns,m))
if e is not None:
er = empty((nruns,m))
for k in range(nruns): # each row is a run
yy = y[k][start:start+mn] # slice row
yy = yy.reshape(m,pack) # temporaty 2d
yr[k] = yy.sum(1)/pack # rebinned row
if e is not None:
ey = e[k][start:start+mn] # slice row
ey = ey.reshape(m,pack) # temporaty 2d
er[k] = sqrt((ey**2).sum(1))/pack # rebinned row
elif len(y.shape)==3:
ngroups,nruns = y.shape[0:2] # number of groups, runs
yr = empty((ngroups,nruns,m))
if e is not None:
er = empty((ngroups,nruns,m))
for k in range(ngroups):
for j in range(nruns):
yy = y[k][j][start:start+mn] # slice row
yy = yy.reshape(m,pack) # temporaty 2d
yr[k][j] = yy.sum(1)/pack # rebinned row
if e is not None:
ey = e[k][j][start:start+mn] # slice row
ey = ey.reshape(m,pack) # temporaty 2d
er[k][j] = sqrt((ey**2).sum(1))/pack # rebinned row
if e is not None:
return xr,yr,er
else:
return xr,yr
def rebin_decay(x,yf,yb,bf,bb,strstp,pack):
'''
input:
x is 1D intensive (time)
yf, yb 1D, 2D, 3D extensive arrays to be rebinned
bf, bb are scalars or arrays (see musuite.single_for_back_counts and musuite.single_multigroup_for_back_counts)
pack > 1 is the rebinning factor, e.g it returns::
xr = array([x[k*pack:k*(pack+1)].sum()/pack for k in range(int(floor((stop-start)/pack)))])
yr = array([y[k*pack:k*(pack+1)].sum() for k in range(int(floor((stop-start)/pack)))])
strstp = [start,stop] is a list of slice indices
rebinning of x,y is done on the slice truncated to the approrpiate pack multiple, stopm
x[start:stopm], y[start:stopm]
use::
xr,yfr, ybr, bfr, bbr, yfmr, ybmr = rebin(x,yf,yb,bf,bb,yfm,ybm,strstp,pack)
'''
from numpy import floor, sqrt, exp, empty, mean
from mujpy.aux.aux import TauMu_mus
start,stop = strstp
m = int(floor((stop-start)/pack)) # length of rebinned xb
mn = m*pack # length of x slice
xx =x[start:start+mn] # slice of the first 2D array
xx = xx.reshape(m,pack) # temporaty 2d array
xr = xx.sum(1)/pack # rebinned first ndarray
bfr, bbr = bf*pack, bb*pack
if len(yf.shape)==1:
yfr = empty(m)
ybr = empty(m)
yfr = yf[start:start+mn] # slice row
ybr = yb[start:start+mn] # slice row
yfr = yfr.reshape(m,pack) # temporaty 2d
ybr = ybr.reshape(m,pack) # temporaty 2d
yfr = yfr.sum(1) # rebinned row extensive
ybr = ybr.sum(1) # rebinned row extensive
yfmr, ybmr = mean((yfr-bfr)*exp(xr/TauMu_mus())), mean((ybr-bbr)*exp(xr/TauMu_mus()))
elif len(yf.shape)==2:
nruns = yf.shape[0] # number of runs
yfr = empty((nruns,m))
ybr = empty((nruns,m))
for k in range(nruns): # each row is a run, or a group
yyf = yf[k][start:start+mn] # slice row
yyf = yyf.reshape(m,pack) # temporaty 2d
yfr[k] = yyf.sum(1) # rebinned row extesive
yyb = yb[k][start:start+mn] # slice row
yyb = yyb.reshape(m,pack) # temporaty 2d
ybr[k] = yyb.sum(1) # rebinned row extesive
bfr, bbr = bf[k]*pack, bb[k]*pack
# print('aux,rebin_decay,debug: bfr {}, bbr {}'.format(bfr, bbr))
yfmr, ybmr = mean((yfr[:][k]-bfr)*exp(xr/TauMu_mus())), mean((ybr[:][k]-bbr)*exp(xr/TauMu_mus()))
elif len(yf.shape)==3: # probably never used unless calib mode becomes a C2 case
ngroups,nruns = yf.shape[0:2] # number of runs
yfr = empty((ngroups,nruns,m))
ybr = empty((nruns,m))
for k in range(ngroups):
for j in range(nruns):
yyf = yf[k][j][start:start+mn] # slice row
yyf = yyf.reshape(m,pack) # temporaty 2d
yfr[k][j] = yyf.sum(1) # rebinned row extesive
yyb = yb[k][j][start:start+mn] # slice row
yyb = yyb.reshape(m,pack) # temporaty 2d
ybr[k][j] = yyb.sum(1) # rebinned row extesive
bfr, bbr = bf[k][j]*pack, bb[k][j]*pack
yfmr, ybmr = mean((yfr[:][k][j]-bfr)*exp(xr/TauMu_mus())), mean((ybr[:][k][j]-bbr)*exp(xr/TauMu_mus()))
return xr,yfr,ybr,bfr,bbr,yfmr,ybmr
def safetry(string):
'''
Used by muvalid
'''
from math import acos,asin,atan,atan2,ceil,cos,cosh,degrees,e,exp,floor,log,log10,pi,pow,radians,sin,sinh,sqrt,tan,tanh
safe_list = ['a','acos', 'asin', 'atan', 'atan2', 'ceil', 'cos', 'cosh', 'degrees', 'e',
'exp', 'floor', 'log', 'log10', 'pi', 'pow', 'radians', 'sin', 'sinh', 'sqrt', 'tan', 'tanh']
# use the list to filter the local namespace
a = 0.3
safe_dict={}
for k in safe_list:
safe_dict[k]=locals().get(k)
# print(safe_dict[k])
return eval(string,{"__builtins__":None},safe_dict)
def scanms(y,n):
# produces guess for hifi t=0 bin, to be fed to a step fit function
# check running average of (n bins,n skips,n bins)
# with two means m1,m2 and two variances s21,s22, against step pattern
# compares m2-m1 with sqrt(s21+s22)
from numpy import sqrt
istart = []
istop = []
for k in range(y.shape[0]-n):
m1,m2 = y[k:k+n].sum()/n, y[k+2*n:k+3*n].sum()/n
s = sqrt(((y[k:k+n]-m1)**2).sum()/(n-1)+ ((y[k+2*n:k+3*n]-m2)**2).sum()/(n-1))
if m2-m1>s:
if not istart:
istart = k+n
elif not istop:
istop = k+n
elif istop == k+n-1:
istop = k+n
if istop and istart:
if istop-istart == n:
return istop
return -1
def spec_prec(a):
'''
format specifier precision::
0 for a > 1.0
1 for 1.0 > a > 0.1
2 for 0.1 > a > 0.01 etc.
'''
import numpy as np
return int(abs(min(0.,np.floor(np.log10(a)))))
def shorten(path,subpath):
'''
shortens path
e.g. path, subpath = '/home/myname/myfolder', '/home/myname'
shart = './myfolder'
'''
short = path.split(subpath)
if len(short)==2:
short = '.'+short[1]
return short
def exit_safe():
'''
opens an are you sure box?
'''
from tkinter.messagebox import askyesno
answer = askyesno(title='Exit mujpy', message='Really quit?')
return answer
def step(x,a,n,dn,b):
from scipy.stats import norm
# error function as step function for t=0 in HIFI
return a+b*norm.cdf(x,n,dn)
def tlog_exists(path,run,ndigits):
'''
check if tlog exists under various known filenames types
'''
import os
filename_psibulk = 'run_'+muzeropad(run,ndigits)+'.mon' # add definitions for e.g. filename_isis
ok = os.path.exists(os.path.join(path,filename_psibulk)) # or os.path.exists(os.path.join(paths,filename_isis))
return ok
def translate(nint,lmin,function_in):
'''
input:
nint: dashbord index,
lmin: list of minuit indices replacement, one for each dashboard index, -1 is blanck
function: single function string, of dashboard index nint, to be translated
output:
function_out: single translated function
Used in int2_method_key and min2int to replace parameter indices contained in function[nint] e.g.
::
p[0]*2+p[3]
by translate the internal parameter indices 0 and 3 (written according to the dashboard dict order)
into the corresponding minuit parameter list indices, skipping shared parameters.
e.g. if parameter 1 is shared with parameter 0, the minuit parameter index 3
will be translated to 2 (skipping internal index 1)
'''
from copy import deepcopy
# print(' nint = {}, lmin = {}\n{}'.format(nint,lmin,function_in))
function_out = deepcopy(function_in)
# search for integers between '[' and ']'
start = [i+1 for i in findall('[',function_out)]
# finds index of number after all occurencies of '['
stop = [i for i in findall(']',function_out)]
# same for ']'
nints = [function_out[i:j] for (i,j) in zip(start,stop)]
# this is a list of strings with the numbers to be replaced
nmins = [lmin[int(function_out[i:j])] for (i,j) in zip(start,stop)]
# replacements integers
for lstr,m in zip(nints,nmins):
function_out = function_out.replace(lstr,str(m))
return function_out
def translate_nint(nint,lmin,function): # NOT USED any more?!!
'''
Used in int2_int and min2int to parse parameters contained in function[nint].value e.g.
::
p[4]*2+p[7]
and translate the internal parameter indices 4 and 7 (written according to the gui parameter list order)
into the corresponding minuit parameter list indices, that skips shared and fixed parameters.
e.g. if parameter 6 is shared with parameter 4 and parameter 2 is fixed, the minuit parameter indices
will be 3 instead of 4 (skipping internal index 2) and 5 instead of 7 (skipping both 2 and 6)
Returns lmin[nint]
'''
string = function[nint].value
# | |
<reponame>dendisuhubdy/tomlplusplus<gh_stars>0
#!/usr/bin/env python3
# This file is a part of toml++ and is subject to the the terms of the MIT license.
# Copyright (c) 2019-2020 <NAME> <<EMAIL>>
# See https://github.com/marzer/tomlplusplus/blob/master/LICENSE for the full license text.
import sys
import re
import os
import os.path as path
import math
import requests
import traceback
#### SETTINGS / MISC #########################################
class Settings:
binary_bitmasks = False
switch_case_limits = [64, 8]
def make_literal(codepoint):
if (codepoint > 0xFFFF):
return "U'\\U{:08X}'".format(codepoint)
else:
return "U'\\u{:04X}'".format(codepoint)
def make_bitmask(codepoint, bits = 64):
if (Settings.binary_bitmasks):
if (bits > 32):
return "0b{:064b}ull".format(codepoint)
else:
return "0b{:032b}u".format(codepoint)
else:
if (bits > 32):
return "0x{:X}ull".format(codepoint)
else:
return "0x{:X}u".format(codepoint)
def make_mask_from_indices(indices):
mask = 0
for i in indices:
mask = mask | (1 << i)
return mask
def range_first(r):
if isinstance(r, int):
return r
else:
return r[0]
def range_last(r):
if isinstance(r, int):
return r
else:
return r[1]
def is_pow2(v):
return v & (v-1) == 0
def calculate_subdivisions(span_size, level):
# if it's a relatively small span, divide it such the effective size of each subchunk
# would be less than or equal to 64 so we'll generate bitmask ops
if (64 < span_size <= 4096):
return int(math.ceil(span_size / 64))
case_limit = Settings.switch_case_limits[min(len(Settings.switch_case_limits)-1, level)]
# try to find a divisor that will yield a power-of-2 size
subdivs = case_limit
while (subdivs > 1):
subdiv_size = int(math.ceil(span_size / float(subdivs)))
if (subdiv_size > 1 and subdiv_size < span_size and is_pow2(subdiv_size)):
return subdivs
subdivs -= 1
# couldn't find divisor that would yield a power-of-2 size
subdivs = case_limit
while (subdivs > 1):
subdiv_size = int(math.ceil(span_size / float(subdivs)))
if (subdiv_size > 1 and subdiv_size < span_size):
return subdivs
subdivs /= 2
return subdivs
#### CHUNK ###################################################
class Chunk:
def __init__(self, first, last, level=0):
self.first = int(first)
self.last = int(last)
self.level = level
self.span_size = (self.last - self.first) + 1
self.count = 0
self.ranges = []
self.subchunks = None
self.subchunk_size = 0
self.first_set = self.last + 1
self.last_set = -1
self.first_unset = self.first
self.all_div_by = None
self.all_div_by_add = None
def low_range_mask(self):
if self.count == 0:
return 0
mask = 0
bits = 0
prev_last_unset = -1
for r in self.ranges:
first = range_first(r)
last = range_last(r)
count = (last - first) + 1
while (prev_last_unset >= 0 and prev_last_unset < first and bits < 64):
prev_last_unset += 1
bits += 1
if (bits >= 64):
break
while (count > 0 and bits < 64):
mask |= (1 << bits)
bits += 1
count -= 1
if (bits >= 64):
break
prev_last_unset = last + 1
return mask
def add(self, first, last = None):
f = int(first)
num_added = 0
if (last is None or first == last):
self.ranges.append(f)
self.count += 1
self.last_set = max(self.last_set, f)
if (self.first_unset == f):
self.first_unset = f + 1
else:
l = int(last)
self.ranges.append((f, l))
self.count += (l - f) + 1
self.last_set = max(self.last_set, l)
if (self.first_unset == f):
self.first_unset = l + 1
self.first_set = min(self.first_set, f)
def analyze(self):
if (self.count > 0 and (self.first != self.first_set or self.last != self.last_set)):
raise Exception('cannot call analyze() on an untrimmed Chunk')
self.all_div_by = None
self.all_div_by_add = None
if (self.span_size <= 1):
return
for div in range(2, 51):
for add in range(0, 50):
divisible = None
for r in self.ranges:
first = range_first(r)
last = range_last(r)
if (last < self.first_set):
continue
if (first > self.last_set):
break
first = max(first, self.first_set)
last = min(last, self.last_set)
if (divisible is None):
divisible = True
for cp in range(first, last+1):
divisible = divisible and (((cp + add) % div) == 0)
if not divisible:
break
if not divisible:
break
if divisible is not None and divisible:
self.all_div_by = div
if add != 0:
self.all_div_by_add = add
return
def trim(self):
if (self.subchunks is not None
or self.count == 0
or (self.first_set == self.first and self.last_set == self.last)):
return
self.first = self.first_set
self.last = self.last_set
self.span_size = (self.last - self.first) + 1
def subdivide(self):
if (self.count > 0 and (self.first != self.first_set or self.last != self.last_set)):
raise Exception('cannot call subdivide() on an untrimmed Chunk')
if (self.subchunks is not None
or self.count >= self.span_size - 1
or self.count <= 1
or (self.last_set - self.first_set) + 1 <= 64
or self.count == (self.last - self.first_set) + 1
or self.count == (self.first_unset - self.first)
or self.count == (self.last_set - self.first_set) + 1
or (len(self.ranges) == 2 and range_first(self.ranges[0]) == self.first and range_last(self.ranges[1]) == self.last)
or len(self.ranges) <= 4
or self.all_div_by is not None
):
return
subchunk_count = calculate_subdivisions(self.span_size, self.level)
if (subchunk_count <= 1):
return
subchunk_size = int(math.ceil(self.span_size / float(subchunk_count)))
if (subchunk_size <= 4):
return
self.subchunks = []
self.subchunk_size = subchunk_size
for subchunk in range(subchunk_count):
self.subchunks.append(
Chunk(
self.first + (subchunk * self.subchunk_size),
min(self.first + (((subchunk + 1) * self.subchunk_size) - 1), self.last),
self.level + 1
)
)
for r in self.ranges:
if (isinstance(r, int)):
subchunk = int((r - self.first) / self.subchunk_size)
self.subchunks[subchunk].add(r)
else:
start_chunk = int((r[0] - self.first) / self.subchunk_size)
end_chunk = int((r[1] - self.first) / self.subchunk_size)
for subchunk in range(start_chunk, end_chunk+1):
self.subchunks[subchunk].add(
max(r[0], self.subchunks[subchunk].first),
min(r[1], self.subchunks[subchunk].last),
)
#self.ranges = None
for subchunk in self.subchunks:
subchunk.trim()
subchunk.analyze()
subchunk.subdivide()
def always_returns_true(self):
return self.count == self.span_size
def always_returns_false(self):
return self.count == 0
def print_subchunk_case(self, subchunk_index, output_file, level, indent):
print("{}\tcase {}: ".format(indent, subchunk_index), end='', file=output_file)
if (self.subchunks[subchunk_index].count == self.subchunks[subchunk_index].span_size):
self.subchunks[subchunk_index].print(output_file, level + 1, (self.first, self.last))
else:
if (self.subchunks[subchunk_index].subchunks is not None and self.subchunks[subchunk_index].span_size > 64):
print("\n{}\t{{".format(indent), file=output_file)
self.subchunks[subchunk_index].print(output_file, level + 1, (self.first, self.last))
if (self.subchunks[subchunk_index].subchunks is not None and self.subchunks[subchunk_index].span_size > 64):
print("{}\t}}".format(indent), file=output_file)
def return_value_string(self):
# return true; (completely full range)
if (self.always_returns_true()):
return 'true'
# return false; (completely empty range)
elif (self.always_returns_false()):
return 'false'
# return cp == A
elif (self.count == 1):
return 'codepoint == {}'.format(make_literal(self.ranges[0]))
# return cp != A
elif (self.count == self.span_size - 1):
return 'codepoint != {}'.format(make_literal(self.first_unset))
# return cp < A
elif (self.count == (self.first_unset - self.first)):
return 'codepoint < {}'.format(make_literal(self.first_unset))
# return cp >= A
elif (self.count == (self.last - self.first_set) + 1):
return 'codepoint >= {}'.format(make_literal(self.first_set))
# return cp >= A && cp <= B
elif (self.count == (self.last_set - self.first_set) + 1):
return 'codepoint >= {} && codepoint <= {}'.format(make_literal(self.first_set), make_literal(self.last_set))
# return cp <= A || cp >= B
elif (len(self.ranges) == 2 and range_first(self.ranges[0]) == self.first and range_last(self.ranges[1]) == self.last):
return 'codepoint <= {} || codepoint >= {}'.format(make_literal(range_last(self.ranges[0])), make_literal(range_first(self.ranges[1])))
# return cp % X == 0
elif (self.all_div_by is not None):
if (self.all_div_by_add is not None):
return '(static_cast<uint_least64_t>(codepoint) {} {}ull) % {}ull == 0ull'.format(
'-' if self.all_div_by_add < 0 else '+',
abs(self.all_div_by_add),
self.all_div_by
)
else:
return 'static_cast<uint_least64_t>(codepoint) % {}ull == 0ull'.format(self.all_div_by)
# return cp & A (32-bit)
elif ((self.last_set - self.first_set) + 1 <= 32):
if (self.first_set == self.first):
return '(1u << (static_cast<uint_least32_t>(codepoint) - 0x{:X}u)) & {}'.format(self.first_set, make_bitmask(self.low_range_mask(), 32))
else:
return 'codepoint >= {} && ((1u << (static_cast<uint_least32_t>(codepoint) - 0x{:X}u)) & {})'.format(
make_literal(self.first_set), self.first_set, make_bitmask(self.low_range_mask(), 32))
# return cp & A (64-bit)
elif ((self.last_set - self.first_set) + 1 <= 64):
if (self.first_set == self.first):
return '(1ull << (static_cast<uint_least64_t>(codepoint) - 0x{:X}ull)) & {}'.format(self.first_set, make_bitmask(self.low_range_mask()))
else:
return 'codepoint >= {} && ((1ull << (static_cast<uint_least64_t>(codepoint) - 0x{:X}ull)) & {})'.format(
make_literal(self.first_set), self.first_set, make_bitmask(self.low_range_mask()))
return None
def print(self, output_file, level = 0, parent_range = None):
indent = '\t\t' + ('\t' * (2 * level))
if (parent_range is None):
parent_range = (0, 0x7FFFFFFF)
rvs = self.return_value_string()
# return ______;
if (rvs is not None):
print("return {};".format(rvs), file=output_file)
# switch (cp)
elif (self.subchunks is not None):
# guard against non-exhaustive ranges (we may have been trimmed)
if (self.first > parent_range[0] and self.last < parent_range[1]):
print("{}if (codepoint < {} || codepoint > {})\n{}\treturn false;\n".format(indent, make_literal(self.first), make_literal(self.last), indent), file=output_file)
elif (self.first > parent_range[0]):
print("{}if (codepoint < {})\n{}\treturn false;\n".format(indent, make_literal(self.first), indent), file=output_file)
elif (self.last < parent_range[1]):
print("{}if (codepoint > {})\n{}\treturn false;\n".format(indent, make_literal(self.last), indent), file=output_file)
# see if we can avoid emitting a switch altogether, or reduce its scope
always_true = []
always_false = []
not_always_true_or_false = []
for subchunk_index in range(len(self.subchunks)):
even = (subchunk_index % 2) == 0
if self.subchunks[subchunk_index].always_returns_true():
always_true.append(subchunk_index)
elif self.subchunks[subchunk_index].always_returns_false():
always_false.append(subchunk_index)
else:
not_always_true_or_false.append(subchunk_index)
selector = '(static_cast<uint_least32_t>(codepoint) - 0x{:X}u) / {}u'.format(self.first, self.subchunk_size)
# return selector & mask
if (len(always_true) + len(always_false) == len(self.subchunks) and len(self.subchunks) <= 64):
print("{}return ({}) & {};".format(indent, selector, make_bitmask(make_mask_from_indices(always_true))), file=output_file)
# return selector == A ? true : selector & mask
#elif (len(not_always_true_or_false) == 1
# and (len(always_true) + len(always_false)) == len(self.subchunks)-1
# and len(self.subchunks) <= 64):
# print('{}const auto selector = {}; //kek'.format(indent, selector), file=output_file)
# print('{}return selector == {}u ? true : selector & {};'.format(
# indent,
# not_always_true_or_false[0],
# make_bitmask(make_mask_from_indices(always_true))
# ),
# file=output_file
# )
# switch(selector)
else:
print("{}TOML_ASSUME_CODEPOINT_BETWEEN({}, {});".format(indent, make_literal(self.first), make_literal(self.last)), file=output_file)
print("{}switch ({})\n{}{{".format(indent, selector, indent), file=output_file)
if (len(always_true) == 0 and len(always_false) == 0):
for subchunk_index in range(len(self.subchunks)):
self.print_subchunk_case(subchunk_index, output_file, level, indent)
print("{}\tTOML_NO_DEFAULT_CASE;".format(indent), file=output_file)
elif (len(always_true) > len(always_false)):
for subchunk_index in range(len(self.subchunks)):
if not self.subchunks[subchunk_index].always_returns_true():
self.print_subchunk_case(subchunk_index, output_file, level, indent)
print("{}\tdefault: return true;".format(indent), file=output_file)
else:
for subchunk_index in range(len(self.subchunks)):
if not self.subchunks[subchunk_index].always_returns_false():
self.print_subchunk_case(subchunk_index, output_file, level, indent)
print("{}\tdefault: return false;".format(indent), file=output_file)
print("{}}}".format(indent), file=output_file)
print("{}//# chunk summary: {} codepoints from {} ranges (spanning a search area of {})".format(indent, self.count, len(self.ranges), self.span_size), file=output_file)
# return cp == A || cp == B ...
else:
print("return", end='', file=output_file)
line_weight = 0
first_line = True
for range_idx in range(0, len(self.ranges)):
r = self.ranges[range_idx]
range_weight = (1 if (
isinstance(r, int)
or (range_idx == 0 and r[0] == self.first)
or (range_idx == (len(self.ranges)-1) and r[1] == self.last))
else 2
)
needs_space = True
if ((line_weight + range_weight) > (4 - (1 if first_line else 0))):
print("\n\t{}".format(indent), end='', file=output_file)
line_weight = range_weight
needs_space = False
first_line = False
else:
line_weight += range_weight
if (needs_space):
print(" ", end='', file=output_file)
if (range_idx > 0):
print("|| ", end='', file=output_file)
if (isinstance(r, int)):
print("codepoint == {}".format(make_literal(r)), end='', file=output_file)
elif (range_idx == 0 and r[0] == self.first):
print("codepoint <= {}".format(make_literal(r[1])), end='', file=output_file)
elif (range_idx == (len(self.ranges)-1) and r[1] == self.last):
print("codepoint >= {}".format(make_literal(r[0])), end='', file=output_file)
else:
print("{}codepoint >= {} && codepoint <= {}{}".format(
'(' if len(self.ranges) > 1 else '',
make_literal(r[0]),
make_literal(r[1]),
')' if len(self.ranges) > 1 else ''
),
end='',
file=output_file
)
print(";", file=output_file)
#### FUNCTION GENERATOR #####################################
def emit_function(name, categories, file, codepoints):
# divide the codepoints up into chunks of ranges
root_chunk = Chunk(codepoints[0][0], codepoints[-1][0])
first_codepoint = -1
last_codepoint = -1
for codepoint, category in codepoints:
if (category in | |
be
parsed by :func:`time.strptime` or if it returns a value which isn't a
time tuple.
Class attributes:
.. attribute:: min
The earliest representable :class:`DateTime`.
.. attribute:: max
The latest representable :class:`DateTime`.
.. attribute:: resolution
The smallest possible difference between non-equal :class:`DateTime`
objects, ``timedelta(microseconds=1)``.
Instance attributes (read-only):
.. attribute:: year
Between :const:`MINYEAR` and :const:`MAXYEAR` inclusive.
.. attribute:: month
Between 1 and 12 inclusive.
.. attribute:: day
Between 1 and the number of days in the given month of the given year.
.. attribute:: hour
In ``range(24)``.
.. attribute:: minute
In ``range(60)``.
.. attribute:: second
In ``range(60)``.
.. attribute:: microsecond
In ``range(1000000)``.
.. attribute:: tzinfo
The object passed as the *tzinfo* argument to the :class:`DateTime`
constructor, or ``None`` if none was passed.
Supported operations:
+---------------------------------------+-------------------------------+
| Operation | Result |
+=======================================+===============================+
| ``datetime2 = datetime1 + timedelta`` | \(1) |
+---------------------------------------+-------------------------------+
| ``datetime2 = datetime1 - timedelta`` | \(2) |
+---------------------------------------+-------------------------------+
| ``timedelta = datetime1 - datetime2`` | \(3) |
+---------------------------------------+-------------------------------+
| ``datetime1 < datetime2`` | Compares :class:`DateTime` to |
| | :class:`DateTime`. \(4) |
+---------------------------------------+-------------------------------+
#. datetime2 is a duration of timedelta removed from datetime1, moving
forward in time if ``timedelta.days`` > 0, or backward if
``timedelta.days`` < 0. The result has the same :attr:`tzinfo`
attribute as the input datetime, and datetime2 - datetime1 == timedelta
after. :exc:`OverflowError` is raised if datetime2.year would be smaller
than :const:`MINYEAR` or larger than :const:`MAXYEAR`. Note that no time
zone adjustments are done even if the input is an aware object.
#. Computes the datetime2 such that datetime2 + timedelta == datetime1. As
for addition, the result has the same :attr:`tzinfo` attribute as the
input datetime, and no time zone adjustments are done even if the input
is aware. This isn't quite equivalent to datetime1 + (-timedelta),
because -timedelta in isolation can overflow in cases where datetime1 -
timedelta does not.
#. Subtraction of a :class:`DateTime` from a :class:`DateTime` is defined
only if both operands are naive, or if both are aware. If one is aware
and the other is naive, :exc:`TypeError` is raised.
If both are naive, or both are aware and have the same :attr:`tzinfo`
attribute, the :attr:`tzinfo` attributes are ignored, and the result is
a :class:`timedelta` object *t* such that ``datetime2 + t ==
datetime1``. No time zone adjustments are done in this case.
If both are aware and have different :attr:`tzinfo` attributes, ``a-b``
acts as if *a* and *b* were first converted to naive UTC datetimes
first. The result is ``(a.replace(tzinfo=None) - a.utcoffset()) -
(b.replace(tzinfo=None)
- b.utcoffset())`` except that the implementation never overflows.
#. *datetime1* is considered less than *datetime2* when *datetime1*
precedes *datetime2* in time.
If one comparand is naive and the other is aware, :exc:`TypeError` is
raised. If both comparands are aware, and have the same :attr:`tzinfo`
attribute, the common :attr:`tzinfo` attribute is ignored and the base
datetimes are compared. If both comparands are aware and have different
:attr:`tzinfo` attributes, the comparands are first adjusted by
subtracting their UTC offsets (obtained from ``self.utcoffset()``).
.. note::
In order to stop comparison from falling back to the default scheme
of comparing object addresses, datetime comparison normally raises
:exc:`TypeError` if the other comparand isn't also a
:class:`DateTime` object. However, ``NotImplemented`` is returned
instead if the other comparand has a :meth:`timetuple` attribute.
This hook gives other kinds of date objects a chance at implementing
mixed-type comparison. If not, when a :class:`DateTime` object is
compared to an object of a different type, :exc:`TypeError` is raised
unless the comparison is ``==`` or ``!=``. The latter cases return
:const:`False` or :const:`True`, respectively.
:class:`DateTime` objects can be used as dictionary keys. In Boolean
contexts, all :class:`DateTime` objects are considered to be true.
Instance methods:
.. method:: date()
Return :class:`date` object with same year, month and day.
.. method:: time()
Return :class:`Time` object with same hour, minute, second and
microsecond. :attr:`tzinfo` is ``None``. See also method
:meth:`timetz`.
.. method:: timetz()
Return :class:`Time` object with same hour, minute, second,
microsecond, and tzinfo attributes. See also method :meth:`time`.
.. method:: replace([year[, month[, day[, hour[, minute[, second[, microsecond[, tzinfo]]]]]]]])
Return a DateTime with the same attributes, except for those attributes
given new values by whichever keyword arguments are specified. Note
that ``tzinfo=None`` can be specified to create a naive DateTime from an
aware DateTime with no conversion of date and time data.
.. method:: astimezone(tz)
Return a :class:`DateTime` object with new :attr:`tzinfo` attribute
*tz*, adjusting the date and time data so the result is the same UTC
time as *self*, but in *tz*'s local time.
*tz* must be an instance of a :class:`tzinfo` subclass, and its
:meth:`utcoffset` and :meth:`dst` methods must not return ``None``.
*self* must be aware (``self.tzinfo`` must not be ``None``, and
``self.utcoffset()`` must not return ``None``).
If ``self.tzinfo`` is *tz*, ``self.astimezone(tz)`` is equal to *self*:
no adjustment of date or time data is performed. Else the result is
local time in time zone *tz*, representing the same UTC time as *self*:
after ``astz = dt.astimezone(tz)``, ``astz - astz.utcoffset()`` will
usually have the same date and time data as ``dt - dt.utcoffset()``. The
discussion of class :class:`tzinfo` explains the cases at Daylight
Saving Time transition boundaries where this cannot be achieved (an
issue only if *tz* models both standard and daylight time).
If you merely want to attach a time zone object *tz* to a DateTime *dt*
without adjustment of date and time data, use ``dt.replace(tzinfo=tz)``.
If you merely want to remove the time zone object from an aware DateTime
*dt* without conversion of date and time data, use
``dt.replace(tzinfo=None)``.
Note that the default :meth:`tzinfo.fromutc` method can be overridden in
a :class:`tzinfo` subclass to affect the result returned by
:meth:`astimezone`. Ignoring error cases, :meth:`astimezone` acts
like::
def astimezone(self, tz):
if self.tzinfo is tz:
return self
# Convert self to UTC, and attach the new time zone object.
utc = (self - self.utcoffset()).replace(tzinfo=tz)
# Convert from UTC to tz's local time.
return tz.fromutc(utc)
.. method:: utcoffset()
If :attr:`tzinfo` is ``None``, returns ``None``, else returns
``self.tzinfo.utcoffset(self)``, and raises an exception if the latter
doesn't return ``None``, or a :class:`timedelta` object representing a
whole number of minutes with magnitude less than one day.
.. method:: dst()
If :attr:`tzinfo` is ``None``, returns ``None``, else returns
``self.tzinfo.dst(self)``, and raises an exception if the latter doesn't
return ``None``, or a :class:`timedelta` object representing a whole
number of minutes with magnitude less than one day.
.. method:: tzname()
If :attr:`tzinfo` is ``None``, returns ``None``, else returns
``self.tzinfo.tzname(self)``, raises an exception if the latter doesn't
return ``None`` or a string object,
.. method:: weekday()
Return the day of the week as an integer, where Monday is 0 and Sunday
is 6. The same as ``self.date().weekday()``. See also
:meth:`isoweekday`.
.. method:: isoweekday()
Return the day of the week as an integer, where Monday is 1 and Sunday
is 7. The same as ``self.date().isoweekday()``. See also
:meth:`weekday`, :meth:`isocalendar`.
.. method:: isocalendar()
Return a 3-tuple, (ISO year, ISO week number, ISO weekday). The same as
``self.date().isocalendar()``.
.. method:: isoformat([sep])
Return a string representing the date and time in ISO 8601 format,
YYYY-MM-DDTHH:MM:SS.mmmmmm or, if :attr:`microsecond` is 0,
YYYY-MM-DDTHH:MM:SS
If :meth:`utcoffset` does not return ``None``, a 6-character string is
appended, giving the UTC offset in (signed) hours and minutes:
YYYY-MM-DDTHH:MM:SS.mmmmmm+HH:MM or, if :attr:`microsecond` is 0
YYYY-MM-DDTHH:MM:SS+HH:MM
The optional argument *sep* (default ``'T'``) is a one-character
separator, placed between the date and time portions of the result. For
example,
>>> from datetime import tzinfo, timedelta, datetime
>>> class TZ(tzinfo):
... def utcoffset(self, dt): return timedelta(minutes=-399)
...
>>> datetime(2002, 12, 25, tzinfo=TZ()).isoformat(' ')
'2002-12-25 00:00:00-06:39'
"""
pass
class Date(dt.date):
r"""
Represents a date.
This type is returned by the :func:`date` function and represents a date.
A :class:`Date` object represents a date (year, month and day) in an
idealized calendar, the current Gregorian calendar indefinitely | |
= self.elements
if empty in self.show.flatten(): # not even finished yet
return False
for i in range(n ** 2):
if elements != set(self.submatrix(i + 1).flatten()):
return False
elif elements != set(self.row(i).flatten()):
return False
elif elements != set(self.col(i).flatten()):
return False
return True
def itemset(self, entry, value):
'''(Sudoku, (int, int), int/str) -> None
Precondition:
1. value in self.elements
2. each int in entry is from 0 to self.n ** 2 - 1 inclusive.
Mutate entry number of self to value.
>>> q_small = [
... [ 1, '.', 3, '.'],
... ['.', 2, '.', '.'],
... ['.', '.', '.', '.'],
... ['.', '.', '.', 4]
... ]
...
>>> q_small = Sudoku(q_small)
>>> q_small
Sudoku(
1 . | 3 .
. 2 | . .
--------------+--------------
. . | . .
. . | . 4
n: 2
elements: 1, 2, 3, 4
empty: .
)
>>> q_small.itemset((0, 1), 4)
>>> q_small
Sudoku(
1 4 | 3 .
. 2 | . .
--------------+--------------
. . | . .
. . | . 4
n: 2
elements: 1, 2, 3, 4
empty: .
)
'''
self.show.itemset(entry, value)
def itemsets(self, entries):
'''(Sudoku, Candidate or {(int, int): set of ints/strs}) -> None
Precondition: each int in entries is exactly one element of
self.elements.
Mutate entry number of self according to values given in entries
if the value set has length 1.
>>> q_small = [
... [ 1, '.', 3, '.'],
... ['.', 2, '.', '.'],
... ['.', '.', '.', '.'],
... ['.', '.', '.', 4]
... ]
...
>>> q_small = Sudoku(q_small, elements = {'1', '2', '3', '4'})
>>> candids = q_small.candidates()
>>> candids == candidate.Candidate(
... {
... (0, 1): {'4'},
... (0, 3): {'2'},
... (1, 0): {'3', '4'},
... (1, 2): {'1', '4'},
... (1, 3): {'1'},
... (2, 0): {'3', '2', '4'},
... (2, 1): {'3', '1', '4'},
... (2, 2): {'1', '2'},
... (2, 3): {'3', '1', '2'},
... (3, 0): {'3', '2'},
... (3, 1): {'3', '1'},
... (3, 2): {'1', '2'}
... },
... elements = {1, 2, 3, 4}
... )
...
True
>>> q_small
Sudoku(
1 . | 3 .
. 2 | . .
--------------+--------------
. . | . .
. . | . 4
n: 2
elements: 1, 2, 3, 4
empty: .
)
>>> q_small.itemsets(candids)
>>> q_small
Sudoku(
1 4 | 3 2
. 2 | . 1
--------------+--------------
. . | . .
. . | . 4
n: 2
elements: 1, 2, 3, 4
empty: .
)
'''
if type(entries) == dict:
if entries == {}:
return None
for entry, values in entries.items():
if len(values) == 1:
self.itemset(entry, list(values)[0])
elif 'Candidate' in str(type(entries)):
elements = self.elements
if entries == candidate.Candidate({}, elements = elements):
return None
for entry, values in entries.items():
if len(values) == 1:
self.itemset(entry, list(values)[0])
def melt(self, include_empty = True):
'''(Sudoku, bool) -> Candidate
Return Candidate form of self, and include empty entries
as well if include_empty is True (by default).
>>> import numpy as np
>>> q_small = np.array([
... [ 1, '.', 3, '.'],
... ['.', 2, '.', '.'],
... ['.', '.', '.', '.'],
... ['.', '.', '.', 4]
... ])
...
>>> q_small = Sudoku(q_small)
>>> q_small.melt() == candidate.Candidate(
... {
... (0, 0): {'1'}, (0, 1): {'.'}, (0, 2): {'3'}, (0, 3): {'.'},
... (1, 0): {'.'}, (1, 1): {'2'}, (1, 2): {'.'}, (1, 3): {'.'},
... (2, 0): {'.'}, (2, 1): {'.'}, (2, 2): {'.'}, (2, 3): {'.'},
... (3, 0): {'.'}, (3, 1): {'.'}, (3, 2): {'.'}, (3, 3): {'4'}
... },
... elements = {1, 2, 3, 4}
... )
...
True
>>> q_small.melt(include_empty = False) == candidate.Candidate(
... {
... (0, 0): {'1'}, (0, 2): {'3'}, (1, 1): {'2'}, (3, 3): {'4'}
... },
... elements = {1, 2, 3, 4}
... )
...
True
'''
n = self.n
empty = self.empty
result = {}
for i in range(n ** 2):
for j in range(n ** 2):
result[(i, j)] = {self.show[(i, j)]}
if not include_empty:
result_copy = result.copy()
for k, v in result_copy.items():
if list(v)[0] == empty:
result.pop(k)
return candidate.Candidate(result, elements = self.elements)
def missing(self, s = None, r = None, c = None):
'''(Sudoku[, int, int, int]) -> set of str
Precondition:
1. 1 <= s <= self.n ** 2
2. 0 <= r <= self.n ** 2 - 1
3. 0 <= c <= self.n ** 2 - 1
Return all missing values of self at the specified submatrix
number s, the specified row number r, or the specified column
number c.
If s is specified, then r and c will be ignored;
if s is None and r is specified, then c will be ignored;
If neither s, r, nor c are specified, the function returns None.
>>> import sudsoln.questions as sq
>>> q1 = to_sudoku(sq.q1)
>>> q1
Sudoku(
. . . | . 2 . | . . .
8 3 . | 7 1 4 | . 9 6
. 6 . | 9 . 5 | 4 . 8
-------------------+-------------------+-------------------
. 9 . | 3 . 1 | . . 4
. 1 . | 4 . 2 | . . 7
. 7 5 | . . . | 2 1 .
-------------------+-------------------+-------------------
. . 4 | . . . | 7 . .
. . . | 5 . 7 | . . .
. . . | 1 9 6 | . . .
n: 3
elements: 1, 2, 3, 4, 5, 6, 7, 8, 9
empty: .
)
>>> q1.missing(s = 1) == {'2', '5', '4', '9', '7', '1'}
True
>>> q1.missing(r = 3) == {'6', '2', '8', '5', '7'}
True
>>> q1.missing(c = 8) == {'3', '2', '5', '9', '1'}
True
'''
elements = self.elements
if s is not None:
return elements.difference(set(self.submatrix(s).flatten()))
elif r is not None:
return elements.difference(set(self.row(r).flatten()))
elif c is not None:
return elements.difference(set(self.col(c).flatten()))
def row(self, r):
'''(Sudoku, int) -> Array
Precondition: 0 <= r <= self.n ** 2 - 1
Return one of self.n ** 2 rows of self selected by r.
>>> import sudsoln.questions as sq
>>> q1 = to_sudoku(sq.q1)
>>> q1
Sudoku(
. . . | . 2 . | . . .
8 3 . | 7 1 4 | . 9 6
. 6 . | 9 . 5 | 4 . 8
-------------------+-------------------+-------------------
. 9 . | 3 . 1 | . . 4
. 1 . | 4 . 2 | . . 7
. 7 5 | . . . | 2 1 .
-------------------+-------------------+-------------------
. . 4 | . . . | 7 . .
. . . | 5 . 7 | . . .
. . . | 1 9 6 | . . .
n: 3
elements: 1, 2, 3, 4, 5, 6, 7, 8, 9
empty: .
)
>>> q1.row(2)
Array([
['.', '6', '.', '9', '.', '5', '4', '.', '8']
])
'''
return self.show[r, :]
def solve(self, max_trial = 200, quietly = False, seed = None):
'''(Sudoku, int, bool[, int]) -> str, int
Mutate self to the answer form, or until max_trial is met, and
return the time it took to compute the answer and the number of
trials used. seed can be given for reproducibility. Set
quietly = True to display no messages.
'''
trial = 0
import datetime
n = self.n
empty = self.empty
start = datetime.datetime.now()
self.solve_logically()
sudoku_copy = self.copy()
sudoku_copy_melted = sudoku_copy.melt()
if empty in self.show.flatten():
if not quietly:
msg = "Logical approaches weren't enough. " +\
"Solving with | |
<reponame>0x326/clingo<filename>examples/clingo/robots/visualize.py
#!/usr/bin/env python
import clingo
try:
import Tkinter
except ImportError:
import tkinter as Tkinter
# {{{1 class Board
class Board:
def __init__(self):
self.size = 1
self.blocked = set()
self.barriers = set()
self.targets = set()
self.pos = dict()
self.robots = [{}]
self.moves = []
self.current_target = None
self.solution = None
ctl = clingo.Control()
ctl.load("board.lp")
ctl.ground([("base", [])])
ctl.solve(on_model=self.__on_model)
def __on_model(self, m):
for atom in m.symbols(atoms=True):
if atom.name == "barrier" and len(atom.arguments) == 4:
x, y, dx, dy = [n.number for n in atom.arguments]
self.blocked.add((x - 1 , y - 1 , dx, dy))
self.blocked.add((x - 1 + dx, y - 1 , -dx, dy))
self.blocked.add((x - 1 , y - 1 + dy, dx, -dy))
self.blocked.add((x - 1 + dx, y - 1 + dy, -dx, -dy))
if dy == 0:
self.barriers.add(('west', x if dx == 1 else x - 1, y - 1))
else:
self.barriers.add(('north', x - 1, y if dy == 1 else y - 1))
elif atom.name == "dim" and len(atom.arguments) == 1:
self.size = max(self.size, atom.arguments[0].number)
elif atom.name == "available_target" and len(atom.arguments) == 4:
c, s, x, y = [(n.number if n.type == clingo.SymbolType.Number else str(n)) for n in atom.arguments]
self.targets.add((c, s, x - 1, y - 1))
elif atom.name == "initial_pos" and len(atom.arguments) == 3:
c, x, y = [(n.number if n.type == clingo.SymbolType.Number else str(n)) for n in atom.arguments]
self.pos[c] = (x - 1, y - 1)
for d in range(0, self.size):
self.blocked.add((d , 0, 0, -1))
self.blocked.add((d , self.size - 1, 0, 1))
self.blocked.add((0 , d, -1, 0))
self.blocked.add((self.size - 1, d, 1, 0))
def move(self, robot, dx, dy):
x, y = self.pos[robot]
while (not (x, y, dx, dy) in self.blocked and
not (x + dx, y + dy) in self.pos.values()):
x += dx
y += dy
self.pos[robot] = (x, y)
if (self.solution is not None and
len(self.solution) > 0 and
self.solution[0][0] == robot and
self.solution[0][1] == dx and
self.solution[0][2] == dy):
self.solution.pop(0)
if len(self.solution) == 0:
self.solution = None
else:
self.solution = None
def won(self):
r, _, x, y = self.current_target
return self.pos[r] == (x, y)
# {{{1 class Solver
# NOTE: it would be a nice gimmick to make the search interruptible
class Solver:
def __init__(self, horizon=0):
self.__horizon = horizon
self.__prg = clingo.Control(['-t4'])
self.__future = None
self.__solution = None
self.__assign = []
self.__prg.load("board.lp")
self.__prg.load("robots.lp")
parts = [ ("base", [])
, ("check", [0])
, ("state", [0])
]
for t in range(1, self.__horizon+1):
parts.extend([ ("trans", [t])
, ("check", [t])
, ("state", [t])
])
self.__prg.ground(parts)
self.__prg.assign_external(clingo.Function("horizon", [self.__horizon]), True)
def __next(self):
assert(self.__horizon < 30)
self.__prg.assign_external(clingo.Function("horizon", [self.__horizon]), False)
self.__horizon += 1
self.__prg.ground([ ("trans", [self.__horizon])
, ("check", [self.__horizon])
, ("state", [self.__horizon])
])
self.__prg.assign_external(clingo.Function("horizon", [self.__horizon]), True)
def start(self, board):
self.__assign = []
for robot, (x, y) in board.pos.items():
self.__assign.append(clingo.Function("pos", [clingo.Function(robot), x+1, y+1, 0]))
self.__assign.append(clingo.Function("target",
[ clingo.Function(board.current_target[0])
, board.current_target[2] + 1
, board.current_target[3] + 1
]))
for x in self.__assign:
self.__prg.assign_external(x, True)
self.__solution = None
self.__future = self.__prg.solve(on_model=self.__on_model, async_=True)
def busy(self):
if self.__future is None:
return False
if self.__future.wait(0):
if self.__solution is None:
self.__next()
self.__future = self.__prg.solve(on_model=self.__on_model, async_=True)
return True
else:
self.__future = None
return False
return True
def stop(self):
if self.__future is not None:
self.__future.cancel()
self.__future.wait()
self.__future = None
self.get()
def get(self):
solution = self.__solution
self.__solution = None
for x in self.__assign:
self.__prg.assign_external(x, False)
self.__assign = []
return solution
def __on_model(self, m):
self.__solution = []
for atom in m.symbols(atoms=True):
if atom.name == "move" and len(atom.arguments) == 4:
c, x, y, t = [(n.number if n.type == clingo.SymbolType.Number else str(n)) for n in atom.arguments]
self.__solution.append((c, x, y, t))
self.__solution.sort(key=lambda x: x[3])
p = None
i = 0
for x in self.__solution:
if p is not None and \
p[0] == x[0] and \
p[1] == x[1] and \
p[2] == x[2]:
break
p = x
i += 1
del self.__solution[i:]
# {{{1 class Visualization
class Visualization:
def __init__(self, master, board):
self.__margin = 20
self.__tile_size = 40
self.__canvas_width = None
self.__canvas_height = None
self.__robot_images = {}
self.__target_images = {}
self.__solution_images = []
self.__direction_images = []
self.__entered = set()
self.__slots = {}
self.__highlights = {}
self.__targets = {}
self.__moves = {}
self.__moves_short = {}
self.__robots = {}
self.__barriers = {}
self.__tiles = []
self.__canvas_width = board.size * self.__tile_size + 2 * self.__margin
self.__canvas_height = (1 + board.size) * self.__tile_size + 3 * self.__margin
self.__canvas = Tkinter.Canvas(master, width=self.__canvas_width, height=self.__canvas_height)
self.__canvas.pack()
colors = ['green', 'red', 'blue', 'yellow']
shapes = ['moon', 'sun', 'star', 'saturn']
directions = [('north', 0, -1), ("east", 1, 0), ('south', 0, 1), ('west', -1, 0)]
for orientation in ['left', 'right']:
path = 'img/tile_{orientation}.gif'.format(orientation=orientation)
self.__tiles.append(Tkinter.PhotoImage(file=path))
for direction in ['north', 'west']:
path = 'img/wall_{direction}.gif'.format(direction=direction)
self.__barriers[direction] = (Tkinter.PhotoImage(file=path), -6, -6)
for color in colors:
path = 'img/robot_{color}.gif'.format(color=color)
self.__robots[color] = Tkinter.PhotoImage(file=path)
for shape in shapes:
path = "img/{shape}_{color}.gif".format(shape=shape, color=color)
self.__targets[(color, shape)] = Tkinter.PhotoImage(file=path)
for (direction, dx, dy) in directions:
path = "img/arrow_{color}_{direction}.gif".format(color=color, direction=direction)
self.__moves[(color, dx, dy)] = Tkinter.PhotoImage(file=path)
path = "img/move_{color}_{direction}.gif".format(color=color, direction=direction)
self.__moves_short[(color, dx, dy)] = Tkinter.PhotoImage(file=path)
for x in range(0, board.size):
for y in range(0, board.size):
self.__canvas.create_image(
self.__margin + self.__tile_size * x,
self.__margin + self.__tile_size * y,
anchor=Tkinter.NW,
image=self.__tiles[(x + y) % len(self.__tiles)])
for (t, m, x, y) in board.targets:
self.__target_images[(x, y)] = self.__canvas.create_image(
self.__margin + self.__tile_size * x,
self.__margin + self.__tile_size * y,
anchor=Tkinter.NW,
image=self.__targets[(t,m)])
self.__canvas.itemconfig(
self.__target_images[(x, y)],
state=Tkinter.HIDDEN)
for (r, (x, y)) in board.pos.items():
self.__robot_images[r] = self.__canvas.create_image(
self.__margin + self.__tile_size * x,
self.__margin + self.__tile_size * y,
anchor=Tkinter.NW,
image=self.__robots[r])
for (d, x, y) in board.barriers:
(img, dx, dy) = self.__barriers[d]
self.__canvas.create_image(
self.__margin + self.__tile_size * x + dx,
self.__margin + self.__tile_size * y + dy,
anchor=Tkinter.NW,
image=img)
self.__solve_button = self.__canvas.create_text(
board.size * self.__tile_size / 2 + self.__margin,
(0.5 + board.size) * self.__tile_size + 2 * self.__margin,
text="Solve!",
activefill="blue",
state=Tkinter.HIDDEN)
self.__solving_text = self.__canvas.create_text(
board.size * self.__tile_size / 2 + self.__margin,
(0.5 + board.size) * self.__tile_size + 2 * self.__margin,
text="Solving...",
state=Tkinter.HIDDEN)
self.__canvas.bind('<Motion>', self.__mouse_move_event)
self.__canvas.bind('<Button-1>', self.__mouse_click_event)
def __mouse_over(self, tag, mx, my):
if self.__canvas.itemcget(tag, "state") == Tkinter.HIDDEN:
return False
x, y, xx, yy = self.__canvas.bbox(tag)
return mx >= x and mx < xx and \
my >= y and my < yy
def __mouse_over_triangle(self, tag, mx, my, dx, dy):
if self.__mouse_over(tag, mx, my):
px, py = self.__canvas.coords(tag)
px = (mx - px) / self.__tile_size
py = (my - py) / self.__tile_size
rx = px - py
ry = px + py - 1
if (dx - dy) * rx < 0 and (dx + dy) * ry < 0:
return True
return False
def __mouse_click_event(self, e):
clicked = set()
for (x, y), t in self.__target_images.items():
if self.__mouse_over(t, e.x, e.y):
clicked.add(("target", (x, y)))
for (t, val) in self.__direction_images:
r, x, y, dx, dy = val
if self.__mouse_over_triangle(t, e.x, e.y, dx, dy):
clicked.add(("robot", val))
if self.__mouse_over(self.__solve_button, e.x, e.y):
clicked.add(("solve", None))
for tag, val in clicked:
for slot in self.__slots.get(tag, []):
slot("click", val)
def __mouse_move_event(self, e):
entered = set()
for ((x, y), t) in self.__target_images.items():
if self.__mouse_over(t, e.x, e.y):
entered.add(("target", (x, y)))
for (t, val) in self.__direction_images:
r, x, y, dx, dy = val
if self.__mouse_over_triangle(t, e.x, e.y, dx, dy):
entered.add(("robot", val))
for (tag, val) in self.__entered - entered:
for slot in self.__slots.get(tag, []):
slot("leave", val)
for (tag, val) in entered - self.__entered:
for slot in self.__slots.get(tag, []):
slot("enter", val)
self.__entered = entered
def highlight(self, x, y, active):
if active and not (x, y) in self.__highlights:
m = 8
xx = self.__margin + x * self.__tile_size + m
yy = self.__margin + y * self.__tile_size + m
self.__highlights[(x, y)] = self.__canvas.create_rectangle(
(xx, yy, xx + self.__tile_size - 2 * m, yy + self.__tile_size - 2 * m),
width=3,
outline="blue")
elif not active and (x, y) in self.__highlights:
self.__canvas.delete(self.__highlights[(x, y)])
del self.__highlights[(x, y)]
def highlight_direction(self, x, y, dx, dy, active):
if active and not (x, y, dx, dy) in self.__highlights:
m = 8
xx = self.__margin + x * self.__tile_size + m
yy = self.__margin + y * self.__tile_size + m
xxx = xx + self.__tile_size - 2 * m
yyy = | |
{
i.split("== ")[-1][1:-2]: {
"ylim": tt.split("\n")[n + 1].split("=")[-1].split()[0:2]
}
for (n, i) in enumerate(tt.split("\n"))
if "==" in i
}
def gas_plot(self):
gas = self.par
if "N2" in gas:
ms = "^"
alpha = 0.6
elif "O2" in gas:
ms = "o"
alpha = 0.9
else:
ms = "*"
alpha = 0.4
# return ms, alpha
return {"marker": ms, "alpha": alpha}
def AmdImp():
x_col, y_col = "DATA_Yre", "DATA_Yim"
x_fit2, y_fit2 = "FIT2_Yre", "FIT2_Yim"
Y_adm = {
"x_data": x_col,
"y_data": y_col,
"x_fit": x_fit2,
"y_fit": y_fit2,
"x_label": "$\mathrm{Y_{Re}}$",
"y_label": "$\mathrm{Y_{Im}}$",
}
Z_imp = {
"x_data": "DATA_Zre",
"y_data": "DATA_-Zim",
"x_fit": "FIT2_Zre",
"y_fit": "FIT2_Zim",
"x_label": "$\mathrm{Z_{Re}}$",
"y_label": "$\mathrm{Z_{Im}}$",
}
return {"Y": Y_adm, "Z": Z_imp}
def ReFit():
bad_fittings = ["N2_EIS-range_1500rpm_JOS3_high-load_267"]
def StandardMeasurements():
files = []
def read_varnames(grp):
lmfit_var_names_cols = [i for i in grp.columns if "lmfit_var_names" in i][0]
_varsgrp = [
a for i in grp[lmfit_var_names_cols].unique() for a in i.split(", ")
]
_varsgrp += list(set(["Rct_kin" for i in _varsgrp if "Rct" in i])) + list(
set(
[
"Qad+Cdlp"
for i in _varsgrp
if all([i in _varsgrp for i in ["Qad", "Cdlp"]])
]
)
)
_vars_in_grp = [
i
for i in grp.columns
if any(v in i for v in _varsgrp) and not "stderr" in i
]
return _varsgrp, _vars_in_grp
def EIS_ParsPlotting_Rs_per_Sample(SampleData, PDDirEIScom):
for yPar in ["Rct", "Rs", "Rorr", "Rct_kin", "Cdlp"]:
fig, ax = plt.subplots(1, 1)
SampleLabel, SampleID = (
SampleData.SampleCode.unique()[0],
SampleData.SampleID.unique()[0],
)
for Gas, gasGr in SampleData.groupby(by="Gas"):
for Status, stGr in gasGr.groupby(by="postAST"):
if yPar == "Cdlp" or yPar == "Qad":
ax2 = ax.twinx()
sc1 = ax.scatter(
stGr["E_AppV_RHE"].values,
stGr["Cdlp"].values,
label=str("Cdl" + ": " + "%s, %s" % (Gas, Status)),
s=80,
marker="D",
)
sc2 = ax.scatter(
stGr["E_AppV_RHE"].values,
stGr["Qad"].values,
label=str("Qad" + ": " "%s, %s" % (Gas, Status)),
s=80,
marker="h",
)
sc_12 = ax.scatter(
stGr["E_AppV_RHE"].values,
stGr["Qad"].values + stGr["Cdlp"].values,
label=str("Cdl+Qad" + ": " + "%s, %s" % (Gas, Status)),
s=80,
marker="h",
)
# sc1 = ax1.scatter(Pgr['E_AppV_RHE'].values,Pgr[yPar[0]].values,label=str(yPar[0]+': '+Path(fn).stem),s=80)
sc3 = ax2.scatter(
stGr["E_AppV_RHE"].values,
stGr["nDL"].values,
label=str("n_Cdl" + ": " + "%s, %s" % (Gas, Status)),
s=40,
alpha=0.6,
marker="o",
)
sc4 = ax2.scatter(
stGr["E_AppV_RHE"].values,
stGr["nAd"].values,
label=str("n_Qad" + ": " + "%s, %s" % (Gas, Status)),
s=40,
alpha=0.6,
marker="h",
)
ax.set_ylim([0, 0.03])
ax2.set_ylim([0, 1])
ax.legend(bbox_to_anchor=(0.5, 1.5), ncol=2, loc="upper center")
else:
ax.scatter(
stGr["E_AppV_RHE"].values,
stGr[yPar].values,
label="%s, %s" % (Gas, Status),
s=80,
)
ax.legend(ncol=1, loc="upper left", fontsize=10)
ax.set_ylabel(yPar)
ax.set_xlabel("E / V v RHE")
ax.set_title(SampleLabel)
ax.grid(True)
DestFile = PDDirEIScom.joinpath(
"_".join([SampleID, yPar, SampleLabel])
).with_suffix(".png")
plt.savefig(DestFile, dpi=300, bbox_inches="tight")
plt.close()
#
def EIS_ParsPlotting(AllData_E_file, Cdl, DestFile, SaveFigsC=False):
SampleLabel = AllData_E_file.Sample.unique()[0]
fig1, ax1 = plt.subplots(1, 1)
ax2 = ax1.twinx()
minl, maxl = (
AllData_E_file[["Cdlp", "Qad"]].min().min(),
1.1 * AllData_E_file[["Cdlp", "Qad"]].max().max(),
)
# for yPar in [('Cdlp','nDL'),('Qad','nAd')]:
# minl, maxl = 0.5*AllData_E_file[yPar[0]].min(),1.1*AllData_E_file[yPar[0]].max()
scts = []
for fn, Pgr in AllData_E_file.groupby("File"):
sc1 = ax1.scatter(
Pgr["E_AppV_RHE"].values,
Pgr["Cdlp"].values,
label=str("Cdl" + ": " + Path(fn).stem),
s=80,
c="royalblue",
marker="D",
)
sc2 = ax1.scatter(
Pgr["E_AppV_RHE"].values,
Pgr["Qad"].values,
label=str("Qad" + ": " + Path(fn).stem),
s=80,
c="tomato",
marker="h",
)
sc_12 = ax1.scatter(
Pgr["E_AppV_RHE"].values,
Pgr["Qad"].values + Pgr["Cdlp"].values,
label=str("Cdl+Qad" + ": " + Path(fn).stem),
s=80,
c="darkviolet",
marker="h",
)
# sc1 = ax1.scatter(Pgr['E_AppV_RHE'].values,Pgr[yPar[0]].values,label=str(yPar[0]+': '+Path(fn).stem),s=80)
sc3 = ax2.scatter(
Pgr["E_AppV_RHE"].values,
Pgr["nDL"].values,
label=str("n_Cdl" + ": " + Path(fn).stem),
s=40,
c="lightgrey",
alpha=0.6,
marker="o",
)
sc4 = ax2.scatter(
Pgr["E_AppV_RHE"].values,
Pgr["nAd"].values,
label=str("n_Qad" + ": " + Path(fn).stem),
s=40,
c="black",
alpha=0.6,
marker="h",
)
# sc2 = ax2.scatter(Pgr['E_AppV_RHE'].values,Pgr[yPar[1]].values,label=str(yPar[1]+': '+Path(fn).stem),s=40,c='lightgrey',alpha=0.5, marker='^')
scts.append([sc1, sc2, sc_12, sc3, sc4])
if not Cdl.empty:
sc5 = ax1.scatter(
Cdl["E_AppV_RHE"].values,
Cdl["Cdl"].values,
label=str([Path(i).stem + "\n" for i in Cdl.Filename.unique()]),
s=20,
alpha=0.8,
marker="o",
c="aqua",
)
Cdl_CV_max = Cdl["Cdl"].mean() + 2 * Cdl["Cdl"].std()
if Cdl_CV_max > maxl:
maxl = Cdl_CV_max
scts.append([sc5])
# ax1.set_ylabel(yPar[0])
# ax2.set_ylabel(yPar[1])
ax1.set_ylabel("Cdl / Qad")
ax2.set_ylabel("n_CdL / n_Qad")
ax1.set_xlabel("E / V v RHE")
ax1.set_ylim([0, maxl])
ax2.set_ylim([0, 1])
ax1.grid(True)
# lns = lns1+lns2+lns3
flat_scts = list(itertools.chain.from_iterable(scts))
labs = [l.get_label() for l in flat_scts]
ax1.legend(
flat_scts,
labs,
bbox_to_anchor=(0.4, 1.1 + 0.1 * len(labs)),
ncol=1,
loc="upper center",
fontsize=10,
)
# ax1.set_title('%s (%s)'%(SampleLabel,DestFile.stem))
fig1.suptitle(("%s (%s)" % (SampleLabel, DestFile.stem)))
# ax2.legend(bbox_to_anchor=(0.4,1.24),ncol=1, loc="upper center",fontsize=10)
if SaveFigsC:
# plt.savefig(dest_dir.joinpath('EIS_%s.png'%(yPar[0])),dpi=300,bbox_inches='tight')
plt.savefig(
DestFile.parent.joinpath("Cdl" + "_" + DestFile.name),
dpi=300,
bbox_inches="tight",
)
plt.show()
plt.close()
def PlotParsEIS(AllData_E_file, EISovv, SampleCode, DestFile, xEIS="Y", yEIS="Rct"):
# maxLim = (AllData_E_file[['DATA_Yim','DATA_Yre']].max()).max()
#%%
EvRHE = "E_AppV_RHE"
maxYim = np.abs(AllData_E_file["DATA_%sim" % xEIS]).max()
maxYre = np.abs(AllData_E_file["DATA_%sre" % xEIS]).max()
# AllData_E_file['DATA_Yre'].max()
# maxZim = AllData_E_file['DATA_Yim'].max()
# maxZre = AllData_E_file['DATA_Yre'].max()
Lenrows = len(AllData_E_file[EvRHE].unique())
# fig,axes = plt.subplots(nrows=Lenrows ,sharex=True,sharey=True)
ht, wd = 10, 15
fig, ax = plt.subplots(figsize=(ht, wd))
if SampleCode.empty:
Scode = EISovv["SampleID"].unique()[0]
else:
Scode = SampleCode.Sample.values[0]
fig.suptitle(
"%s %s, in \n %s saturated %s \n %s \n %s"
% (
Scode,
EISovv.postAST.values[0],
EISovv["Gas"].unique()[0],
EISovv["Electrolyte"].unique()[0],
EISovv.EXP_date.values[0],
Path(EISovv["SourceFilename"].unique()[0]).stem,
)
)
dataC, fitC, extraC, initC = "tab:blue", "tab:red", "gold", "gray"
# ax.set_xlim(0,maxYre)
# ax.set_ylim(0,maxYre+maxYim*Lenrows)
ax.grid(True)
# ax.axis('equal')
for En, Ev in enumerate(AllData_E_file[EvRHE].unique()):
Edata = AllData_E_file.loc[(AllData_E_file[EvRHE] == Ev)].sort_values(
by="Frequency(Hz)"
)
if xEIS == "Y":
FIT2_Im = Edata["FIT2_%sim" % xEIS].values + len(Edata) * [maxYim * En]
FIT1_Im = Edata["FIT1_%sim" % xEIS].values + len(Edata) * [maxYim * En]
DATA_Im = Edata["DATA_%sim" % xEIS].values + len(Edata) * [maxYim * En]
xText = 0.06
elif xEIS == "Z":
FIT2_Im = np.abs(Edata["FIT2_%sim" % xEIS].values) + len(Edata) * [
maxYim * En
]
FIT1_Im = np.abs(Edata["FIT1_%sim" % xEIS].values) + len(Edata) * [
maxYim * En
]
DATA_Im = np.abs(Edata["DATA_%sim" % xEIS].values) + len(Edata) * [
maxYim * En
]
xText = maxYre * 0.9
ax.plot(Edata["FIT2_%sre" % xEIS].values, FIT2_Im, c=fitC, lw=2.5)
ax.plot(
Edata["FIT1_%sre" % xEIS].values,
FIT1_Im,
c="lightgrey",
lw=2.5,
alpha=0.5,
)
ax.scatter(Edata["DATA_%sre" % xEIS].values, DATA_Im, c=dataC, s=150)
ax.annotate(
"$\mathrm{%.2f \/ V_{RHE} }$" % Ev,
xy=(xText, 0.001 + maxYim * En),
xycoords="data",
)
ax.set_ylim(0, maxYim * (En + 2))
if xEIS == "Y":
ax.set_xlim(0, 0.07)
xunit = "mS"
elif xEIS == "Z":
ax.set_xlim(0, maxYre * 1.1)
xunit = "\Omega"
ax.set_ylabel("$\mathrm{%s_{Im}\/ offset \//\/ %s}$" % (xEIS, xunit))
ax.set_xlabel("$\mathrm{%s_{Re} \//\/ %s}$" % (xEIS, xunit))
# *(ht/wd)
# fig_path = EIS_dest_dir.with_suffix('.png')
# plt.show()
#%%
plt.savefig(DestFile, bbox_inches="tight", dpi=200)
plt.close()
def PlotCombinedEIS(
AllData_E_file_spectras, EISovv, SampleCode, DestFile, xEIS="Y"
):
# maxLim = (AllData_E_file[['DATA_Yim','DATA_Yre']].max()).max()
# AllData_E_file_spectras,EISovv,SampleCode,DestFile = spectras_comb_bestmods, bgrp,SampleCodes, DestFile
#%%
EvRHE = "E_AppV_RHE"
if EISovv.Model_EEC.nunique() == 1:
plot_Model_EEC = EISovv.Model_EEC.unique()[0]
else:
plot_Model_EEC = f"{EISovv.Model_EEC.unique()[0]} and more"
# '; '.join(str(i) for i in EISovv.Model_EEC.unique())
AllData_E_file = AllData_E_file_spectras
# .loc[AllData_E_file_spectras.Model_EEC.isin(EISovv.Model_EEC.unique())]
if xEIS in ["Y", "Z"]:
colIm, colRe = f"{xEIS}im", f"{xEIS}re"
maxYre = np.abs(AllData_E_file[f"DATA_{colRe}"]).max()
elif "-Zangle" in xEIS:
colIm, colRe = f"{xEIS}", "Frequency(Hz)"
maxYre = np.abs(AllData_E_file[f"{colRe}"]).max()
# AllData_E_file['DATA_Yre'].max()
# maxZim = AllData_E_file['DATA_Yim'].max()
# maxZre = AllData_E_file['DATA_Yre'].max()
# Lenrows = len(AllData_E_file[EvRHE].unique())
# fig,axes = plt.subplots(nrows=Lenrows ,sharex=True,sharey=True)
ht, wd = 15, 20
fig, ax = plt.subplots(figsize=(ht, wd))
sID = EISovv["SampleID"].unique()[0]
if not EISovv.empty:
Scode = EISovv.query("SampleID == @sID").SampleCode.values[0]
else:
Scode = ""
sID = f"{sID}({Scode})"
def ___convert_to_datetime(d):
return datetime.strptime(
np.datetime_as_string(d, unit="s"), "%Y-%m-%dT%H:%M:%S"
)
fig.suptitle(
f"{sID} {EISovv.postAST.unique()[0]}, in"
+ " \n"
+ f"{EISovv.Gas.unique()[0]} saturated {EISovv.Electrolyte.unique()[0]}"
+ " \n"
+ f"{___convert_to_datetime(EISovv.PAR_date.values[0]):%c}"
+ "\n"
+ f"{Path(EISovv.PAR_file.unique()[0]).stem}"
+ "\n"
+ f"{plot_Model_EEC}"
)
dataC, fitC, extraC, initC = "tab:blue", "tab:red", "gold", "gray"
# ax.set_xlim(0,maxYre)
# ax.set_ylim(0,maxYre+maxYim*Lenrows)
ax.grid(True)
# ax.axis('equal')
maxYim = np.abs(AllData_E_file[f"DATA_{colIm}"]).max()
_splitter = "Segment #"
_uniqsegs = AllData_E_file["Segment #"].nunique()
_uniq = [
k
for k, val in AllData_E_file.nunique().to_dict().items()
if val == _uniqsegs and any([k in c for c in ["E_AppV_RHE", "RPM_DAC"]])
]
# E_AppV_RHE'
for En, Ev in enumerate(AllData_E_file[_splitter].unique()):
Edata = AllData_E_file.loc[(AllData_E_file[_splitter] == Ev)].sort_values(
by="Frequency(Hz)"
)
_ERHE = Edata[EvRHE].unique()[0]
_RPM = Edata["RPM_DAC"].unique()[0]
_MOD = Edata["Model_EEC"].unique()[0]
# print(_ERHE,_MOD)
if xEIS == "Y":
# FIT2_Im = Edata['FIT2_%sim'%xEIS].values+len(Edata)*[maxYim*En]
FIT1_Im = Edata[f"FIT_{colIm}"].values + len(Edata) * [maxYim * En]
DATA_Im = Edata[f"DATA_{colIm}"].values + len(Edata) * [maxYim * En]
xText = 0.06
_xtra = 0.02
elif xEIS == "Z":
# FIT2_Im = np.abs(Edata['FIT2_%sim'%xEIS].values)+len(Edata)*[maxYim*En]
FIT1_Im = np.abs(Edata[f"FIT_{colIm}"].values) + len(Edata) * [
maxYim * En
]
DATA_Im = np.abs(Edata[f"DATA_{colIm}"].values) + len(Edata) * [
maxYim * En
]
xText = maxYre * 0.9
_xtra = maxYre * 0.3
elif xEIS == "-Zangle":
FIT1_Im = np.abs(Edata[f"FIT_{colIm}"].values) + len(Edata) * [
maxYim * En
]
DATA_Im = np.abs(Edata[f"DATA_{colIm}"].values) + len(Edata) * [
maxYim * En
]
xText = maxYre * 0.9
_xtra = maxYre * 0.3
# ax.plot(Edata['FIT2_%sre' %xEIS].values,FIT2_Im,c=fitC,lw=2.5)
if not "Frequency" in colRe:
ax.plot(
Edata[f"FIT_{colRe}"].values, FIT1_Im, c=fitC, lw=3.5, alpha=0.7
)
ax.scatter(Edata[f"DATA_{colRe}"].values, DATA_Im, c=dataC, | |
# Copyright 2021, <NAME>, mailto:<EMAIL>
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Utils for file and directory operations.
This provides enhanced and more error resilient forms of standard
stuff. It will also frequently add sorting for determism.
"""
from __future__ import print_function
import glob
import os
import shutil
import stat
import tempfile
import time
from contextlib import contextmanager
from nuitka.__past__ import ( # pylint: disable=I0021,redefined-builtin
basestring,
)
from nuitka.PythonVersions import python_version
from nuitka.Tracing import my_print
from .Importing import importFromInlineCopy
from .ThreadedExecutor import RLock, getThreadIdent
from .Utils import getOS, isWin32Windows
# Locking seems to be only required for Windows mostly, but we can keep
# it for all.
file_lock = RLock()
# Use this in case of dead locks or even to see file operations being done.
_lock_tracing = False
@contextmanager
def withFileLock(reason="unknown"):
"""Acquire file handling lock.
Args:
reason: What is being done.
Notes: This is most relevant for Windows, but prevents concurrent access
from threads generally, which could lead to observing half ready things.
"""
if _lock_tracing:
my_print(getThreadIdent(), "Want file lock for %s" % reason)
file_lock.acquire()
if _lock_tracing:
my_print(getThreadIdent(), "Acquired file lock for %s" % reason)
yield
if _lock_tracing:
my_print(getThreadIdent(), "Released file lock for %s" % reason)
file_lock.release()
def areSamePaths(path1, path2):
"""Decide if two paths the same.
Args:
path1: First path
path2: Second path
Returns:
Boolean value indicating if the two paths point to the
same path.
Notes:
Case differences ignored on platforms where that is the
norm, and with it normalized, and turned absolute paths, it
becomes a mere string compare after that.
is no differences.
"""
path1 = os.path.normcase(os.path.abspath(os.path.normpath(path1)))
path2 = os.path.normcase(os.path.abspath(os.path.normpath(path2)))
return path1 == path2
def haveSameFileContents(path1, path2):
# Local import, to avoid this for normal use cases.
import filecmp
return filecmp.cmp(path1, path2)
def getFileSize(path):
return os.path.getsize(path)
def relpath(path, start="."):
"""Make it a relative path, if possible.
Args:
path: path to work on
start: where to start from, defaults to current directory
Returns:
Changed path, pointing to the same path relative to current
directory if possible.
Notes:
On Windows, a relative path is not possible across device
names, therefore it may have to return the absolute path
instead.
"""
if start == ".":
start = os.curdir
try:
return os.path.relpath(path, start)
except ValueError:
# On Windows, paths on different devices prevent it to work. Use that
# full path then.
if getOS() == "Windows":
return os.path.abspath(path)
raise
def makePath(path):
"""Create a directory if it doesn't exist.
Args:
path: path to create as a directory
Notes:
This also is thread safe on Windows, i.e. no race is
possible.
"""
with withFileLock("creating directory %s" % path):
if not os.path.isdir(path):
os.makedirs(path)
def _getRealPathWindows(path):
# Slow, because we are using an external process, we it's only for standalone and Python2,
# which is slow already.
import subprocess
result = subprocess.check_output(
"""powershell -NoProfile "Get-Item '%s' | Select-Object -ExpandProperty Target" """
% path
)
if str is not bytes:
result = result.decode("utf8")
return os.path.join(os.path.dirname(path), result.rstrip("\r\n"))
def getDirectoryRealPath(path):
"""Get os.path.realpath with Python2 and Windows symlink workaround applied.
Args:
path: path to get realpath of
Returns:
path with symlinks resolved
Notes:
Workaround for Windows symlink is applied.
"""
path = os.path.realpath(path)
# Attempt to resolve Windows symlinks on Python2
if os.name == "nt" and not os.path.isdir(path):
path = _getRealPathWindows(path)
return path
def listDir(path):
"""Give a sorted listing of a path.
Args:
path: directory to create a listing from
Returns:
Sorted list of tuples of full filename, and basename of
a directory.
Notes:
Typically the full name and the basename are both needed
so this function simply does both, for ease of use on the
calling side.
This should be used, because it makes sure to resolve the
symlinks to directories on Windows, that a naive "os.listdir"
won't do by default.
"""
real_path = getDirectoryRealPath(path)
return sorted(
[(os.path.join(path, filename), filename) for filename in os.listdir(real_path)]
)
def getFileList(path, ignore_dirs=(), ignore_suffixes=(), normalize=True):
"""Get all files below a given path.
Args:
path: directory to create a recurseive listing from
ignore_dirs: Don't descend into these directory, ignore them
ignore_suffixes: Don't return files with these suffixes
Returns:
Sorted list of all filenames below that directory,
relative to it.
Notes:
This function descends into directories, but does
not follow symlinks.
"""
result = []
# Normalize ignoredirs for better matching.
ignore_dirs = [os.path.normcase(ignore_dir) for ignore_dir in ignore_dirs]
for root, dirnames, filenames in os.walk(path):
dirnames.sort()
filenames.sort()
# Normalize dirnames for better matching.
dirnames_normalized = [os.path.normcase(dirname) for dirname in dirnames]
for dirname in ignore_dirs:
if dirname in dirnames_normalized:
dirnames.remove(dirname)
for filename in filenames:
if os.path.normcase(filename).endswith(ignore_suffixes):
continue
fullname = os.path.join(root, filename)
if normalize:
fullname = os.path.normpath(fullname)
result.append(fullname)
return result
def getSubDirectories(path):
"""Get all directories below a given path.
Args:
path: directory to create a recurseive listing from
Returns:
Sorted list of all directories below that directory,
relative to it.
Notes:
This function descends into directories, but does
not follow symlinks.
"""
result = []
for root, dirnames, _filenames in os.walk(path):
dirnames.sort()
for dirname in dirnames:
result.append(os.path.join(root, dirname))
result.sort()
return result
def deleteFile(path, must_exist):
"""Delete a file, potentially making sure it exists.
Args:
path: file to delete
Notes:
This also is thread safe on Windows, i.e. no race is
possible.
"""
with withFileLock("deleting file %s" % path):
if os.path.islink(path) or os.path.isfile(path):
try:
os.unlink(path)
except OSError:
if must_exist:
raise
elif must_exist:
raise OSError("Does not exist", path)
def splitPath(path):
""" Split path, skipping empty elements. """
return tuple(element for element in os.path.split(path) if element)
def hasFilenameExtension(path, extensions):
""" Has a filename one of the given extensions. """
extension = os.path.splitext(os.path.normcase(path))[1]
return extension in extensions
def removeDirectory(path, ignore_errors):
"""Remove a directory recursively.
On Windows, it happens that operations fail, and succeed when reried,
so added a retry and small delay, then another retry. Should make it
much more stable during tests.
All kinds of programs that scan files might cause this, but they do
it hopefully only briefly.
"""
def onError(func, path, exc_info):
# Try again immediately, ignore what happened, pylint: disable=unused-argument
try:
func(path)
except OSError:
time.sleep(0.1)
func(path)
with withFileLock("removing directory %s" % path):
if os.path.exists(path):
try:
shutil.rmtree(path, ignore_errors=False, onerror=onError)
except OSError:
if ignore_errors:
shutil.rmtree(path, ignore_errors=ignore_errors)
else:
raise
@contextmanager
def withTemporaryFile(suffix="", mode="w", delete=True):
with tempfile.NamedTemporaryFile(
suffix=suffix, mode=mode, delete=delete
) as temp_file:
yield temp_file
def getFileContentByLine(filename, mode="r", encoding=None):
# We read the whole, to keep lock times minimal. We only deal with small
# files like this normally.
return getFileContents(filename, mode, encoding=encoding).splitlines()
def getFileContents(filename, mode="r", encoding=None):
"""Get the contents of a file.
Args:
filename: str with the file to be read
mode: "r" for str, "rb" for bytes result
encoding: optional encoding to used when reading the file, e.g. "utf8"
Returns:
str or bytes - depending on mode.
"""
with withFileLock("reading file %s" % filename):
if encoding is not None:
import codecs
with codecs.open(filename, mode, encoding=encoding) as f:
return f.read()
else:
with open(filename, mode) as f:
return f.read()
def putTextFileContents(filename, contents, encoding=None):
"""Write a text file from given contents.
Args:
filename: str with the file to be created
contents: str or iterable of strings with what should be written into the file
encoding: optional encoding to used when writing the file
Returns:
None
"""
def _writeContents(output_file):
if isinstance(contents, basestring):
print(contents, file=output_file)
else:
for line in contents:
print(line, file=output_file)
with withFileLock("writing file %s" % filename):
if encoding is not None:
import codecs
with codecs.open(filename, "w", encoding=encoding) as output_file:
_writeContents(output_file)
else:
with open(filename, "w") as output_file:
_writeContents(output_file)
@contextmanager
def withPreserveFileMode(filename):
old_mode = os.stat(filename).st_mode
yield
os.chmod(filename, old_mode)
@contextmanager
def withMadeWritableFileMode(filename):
with withPreserveFileMode(filename):
os.chmod(filename, int("644", 8))
yield
def removeFileExecutablePermission(filename):
old_stat = os.stat(filename)
mode = old_stat.st_mode
mode &= ~(stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
if | |
"""
This is an object-orient plotting library.
A procedural interface is provided by the companion pylab module,
which may be imported directly, e.g::
from pylab import *
or using ipython::
ipython -pylab
For the most part, direct use of the object-oriented library is
encouraged when programming rather than working interactively. The
exceptions are the pylab commands :func:`~matplotlib.pyplot.figure`,
:func:`~matplotlib.pyplot.subplot`,
:func:`~matplotlib.backends.backend_qt4agg.show`, and
:func:`~pyplot.savefig`, which can greatly simplify scripting.
Modules include:
:mod:`matplotlib.axes`
defines the :class:`~matplotlib.axes.Axes` class. Most pylab
commands are wrappers for :class:`~matplotlib.axes.Axes`
methods. The axes module is the highest level of OO access to
the library.
:mod:`matplotlib.figure`
defines the :class:`~matplotlib.figure.Figure` class.
:mod:`matplotlib.artist`
defines the :class:`~matplotlib.artist.Artist` base class for
all classes that draw things.
:mod:`matplotlib.lines`
defines the :class:`~matplotlib.lines.Line2D` class for
drawing lines and markers
:mod`matplotlib.patches`
defines classes for drawing polygons
:mod:`matplotlib.text`
defines the :class:`~matplotlib.text.Text`,
:class:`~matplotlib.text.TextWithDash`, and
:class:`~matplotlib.text.Annotate` classes
:mod:`matplotlib.image`
defines the :class:`~matplotlib.image.AxesImage` and
:class:`~matplotlib.image.FigureImage` classes
:mod:`matplotlib.collections`
classes for efficient drawing of groups of lines or polygons
:mod:`matplotlib.colors`
classes for interpreting color specifications and for making
colormaps
:mod:`matplotlib.cm`
colormaps and the :class:`~matplotlib.image.ScalarMappable`
mixin class for providing color mapping functionality to other
classes
:mod:`matplotlib.ticker`
classes for calculating tick mark locations and for formatting
tick labels
:mod:`matplotlib.backends`
a subpackage with modules for various gui libraries and output
formats
The base matplotlib namespace includes:
:data:`~matplotlib.rcParams`
a global dictionary of default configuration settings. It is
initialized by code which may be overridded by a matplotlibrc
file.
:func:`~matplotlib.rc`
a function for setting groups of rcParams values
:func:`~matplotlib.use`
a function for setting the matplotlib backend. If used, this
function must be called immediately after importing matplotlib
for the first time. In particular, it must be called
**before** importing pylab (if pylab is imported).
matplotlib is written by <NAME> (<EMAIL>) and a
host of others.
"""
from __future__ import generators
__version__ = '0.98.3'
__revision__ = '$Revision$'
__date__ = '$Date$'
import os, re, shutil, subprocess, sys, warnings
import distutils.sysconfig
import distutils.version
NEWCONFIG = False
# Needed for toolkit setuptools support
if 0:
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
pass # must not have setuptools
if not hasattr(sys, 'argv'): # for modpython
sys.argv = ['modpython']
"""
Manage user customizations through a rc file.
The default file location is given in the following order
- environment variable MATPLOTLIBRC
- HOME/.matplotlib/matplotlibrc if HOME is defined
- PATH/matplotlibrc where PATH is the return value of
get_data_path()
"""
import sys, os, tempfile
from rcsetup import defaultParams, validate_backend, validate_toolbar
from rcsetup import validate_cairo_format
major, minor1, minor2, s, tmp = sys.version_info
_python24 = major>=2 and minor1>=4
# the havedate check was a legacy from old matplotlib which preceeded
# datetime support
_havedate = True
#try:
# import pkg_resources # pkg_resources is part of setuptools
#except ImportError: _have_pkg_resources = False
#else: _have_pkg_resources = True
if not _python24:
raise ImportError('matplotlib requires Python 2.4 or later')
import numpy
nn = numpy.__version__.split('.')
if not (int(nn[0]) >= 1 and int(nn[1]) >= 1):
raise ImportError(
'numpy 1.1 or later is required; you have %s' % numpy.__version__)
def is_string_like(obj):
if hasattr(obj, 'shape'): return 0
try: obj + ''
except (TypeError, ValueError): return 0
return 1
def _is_writable_dir(p):
"""
p is a string pointing to a putative writable dir -- return True p
is such a string, else False
"""
try: p + '' # test is string like
except TypeError: return False
try:
t = tempfile.TemporaryFile(dir=p)
t.write('1')
t.close()
except OSError: return False
else: return True
class Verbose:
"""
A class to handle reporting. Set the fileo attribute to any file
instance to handle the output. Default is sys.stdout
"""
levels = ('silent', 'helpful', 'debug', 'debug-annoying')
vald = dict( [(level, i) for i,level in enumerate(levels)])
# parse the verbosity from the command line; flags look like
# --verbose-silent or --verbose-helpful
_commandLineVerbose = None
for arg in sys.argv[1:]:
if not arg.startswith('--verbose-'): continue
_commandLineVerbose = arg[10:]
def __init__(self):
self.set_level('silent')
self.fileo = sys.stdout
def set_level(self, level):
'set the verbosity to one of the Verbose.levels strings'
if self._commandLineVerbose is not None:
level = self._commandLineVerbose
if level not in self.levels:
raise ValueError('Illegal verbose string "%s". Legal values are %s'%(level, self.levels))
self.level = level
def set_fileo(self, fname):
std = {
'sys.stdout': sys.stdout,
'sys.stderr': sys.stderr,
}
if fname in std:
self.fileo = std[fname]
else:
try:
fileo = file(fname, 'w')
except IOError:
raise ValueError('Verbose object could not open log file "%s" for writing.\nCheck your matplotlibrc verbose.fileo setting'%fname)
else:
self.fileo = fileo
def report(self, s, level='helpful'):
"""
print message s to self.fileo if self.level>=level. Return
value indicates whether a message was issued
"""
if self.ge(level):
print >>self.fileo, s
return True
return False
def wrap(self, fmt, func, level='helpful', always=True):
"""
return a callable function that wraps func and reports it
output through the verbose handler if current verbosity level
is higher than level
if always is True, the report will occur on every function
call; otherwise only on the first time the function is called
"""
assert callable(func)
def wrapper(*args, **kwargs):
ret = func(*args, **kwargs)
if (always or not wrapper._spoke):
spoke = self.report(fmt%ret, level)
if not wrapper._spoke: wrapper._spoke = spoke
return ret
wrapper._spoke = False
wrapper.__doc__ = func.__doc__
return wrapper
def ge(self, level):
'return true if self.level is >= level'
return self.vald[self.level]>=self.vald[level]
verbose=Verbose()
def checkdep_dvipng():
try:
s = subprocess.Popen(['dvipng','-version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
line = s.stdout.readlines()[1]
v = line.split()[-1]
return v
except (IndexError, ValueError):
return None
def checkdep_ghostscript():
try:
if sys.platform == 'win32':
command_args = ['gswin32c', '--version']
else:
command_args = ['gs', '--version']
s = subprocess.Popen(command_args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
v = s.stdout.read()[:-1]
return v
except (IndexError, ValueError):
return None
def checkdep_tex():
try:
s = subprocess.Popen(['tex','-version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
line = s.stdout.readlines()[0]
pattern = '3\.1\d+'
match = re.search(pattern, line)
v = match.group(0)
return v
except (IndexError, ValueError, AttributeError):
return None
def checkdep_pdftops():
try:
s = subprocess.Popen(['pdftops','-v'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
for line in s.stderr:
if 'version' in line:
v = line.split()[-1]
return v
except (IndexError, ValueError, UnboundLocalError):
return None
def compare_versions(a, b):
"return True if a is greater than or equal to b"
if a:
a = distutils.version.LooseVersion(a)
b = distutils.version.LooseVersion(b)
if a>=b: return True
else: return False
else: return False
def checkdep_ps_distiller(s):
if not s:
return False
flag = True
gs_req = '7.07'
gs_sugg = '7.07'
gs_v = checkdep_ghostscript()
if compare_versions(gs_v, gs_sugg): pass
elif compare_versions(gs_v, gs_req):
verbose.report(('ghostscript-%s found. ghostscript-%s or later '
'is recommended to use the ps.usedistiller option.') % (gs_v, gs_sugg))
else:
flag = False
warnings.warn(('matplotlibrc ps.usedistiller option can not be used '
'unless ghostscript-%s or later is installed on your system') % gs_req)
if s == 'xpdf':
pdftops_req = '3.0'
pdftops_req_alt = '0.9' # poppler version numbers, ugh
pdftops_v = checkdep_pdftops()
if compare_versions(pdftops_v, pdftops_req):
pass
elif compare_versions(pdftops_v, pdftops_req_alt) and not \
compare_versions(pdftops_v, '1.0'):
pass
else:
flag = False
warnings.warn(('matplotlibrc ps.usedistiller can not be set to '
'xpdf unless xpdf-%s or later is installed on your system') % pdftops_req)
if flag:
return s
else:
return False
def checkdep_usetex(s):
if not s:
return False
tex_req = '3.1415'
gs_req = '7.07'
gs_sugg = '7.07'
dvipng_req = '1.5'
flag = True
tex_v = checkdep_tex()
if compare_versions(tex_v, tex_req): pass
else:
flag = False
warnings.warn(('matplotlibrc text.usetex option can not be used '
'unless TeX-%s or later is '
'installed on your system') % tex_req)
dvipng_v = checkdep_dvipng()
if compare_versions(dvipng_v, dvipng_req): pass
else:
flag = False
warnings.warn( 'matplotlibrc text.usetex can not be used with *Agg '
'backend unless dvipng-1.5 or later is '
'installed on your system')
gs_v = checkdep_ghostscript()
if compare_versions(gs_v, gs_sugg): pass
elif compare_versions(gs_v, gs_req):
verbose.report(('ghostscript-%s found. ghostscript-%s or later is '
'recommended for use with the text.usetex '
'option.') % (gs_v, gs_sugg))
else:
flag = False
warnings.warn(('matplotlibrc text.usetex can not be used '
'unless ghostscript-%s or later is '
'installed on your system') % gs_req)
return flag
def _get_home():
"""Find user's home directory if possible.
Otherwise raise error.
:see: http://mail.python.org/pipermail/python-list/2005-February/263921.html
"""
path=''
try:
path=os.path.expanduser("~")
except:
pass
if not os.path.isdir(path):
for evar in ('HOME', 'USERPROFILE', 'TMP'):
try:
path = os.environ[evar]
if os.path.isdir(path):
break
except: pass
if path:
return path
else:
raise RuntimeError('please define environment variable $HOME')
get_home = verbose.wrap('$HOME=%s', _get_home, always=False)
def _get_configdir():
"""
Return the string representing the configuration dir.
default is HOME/.matplotlib. you can override this with the
MPLCONFIGDIR environment variable
"""
configdir = os.environ.get('MPLCONFIGDIR')
if configdir is not None:
if not _is_writable_dir(configdir):
raise RuntimeError('Could not write to MPLCONFIGDIR="%s"'%configdir)
return configdir
h = get_home()
p = os.path.join(get_home(), | |
Relation fields
Accomplishes: RelationField = RelationField(
name='Accomplishes',
rule='min 1 Task',
inverse_of=['Is_Accomplished_By'],
semantic_manager=semantic_manager)
"""
A Relationship Between A Certain Entity (E.G., A Device) And The Task It
Accomplishes
"""
Consists_Of: RelationField = RelationField(
name='Consists_Of',
rule='only Device',
semantic_manager=semantic_manager)
"""
A Relationship Indicating A Composite Entity That Consists Of Other Entities
(E.G., A Temperature/Humidity Sensor That Consists Of A Temperature Sensor
And A Humidity Sensor)
"""
Controls_Property: RelationField = RelationField(
name='Controls_Property',
rule='only Property',
semantic_manager=semantic_manager)
"""
A Relationship Specifying The Property That Can Be Controlled By A Certain
Device
"""
Has_Function: RelationField = RelationField(
name='Has_Function',
rule='some Actuating_Function, min 1 Function',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Type Of Function Of A Device
"""
Has_Profile: RelationField = RelationField(
name='Has_Profile',
rule='only Profile',
semantic_manager=semantic_manager)
"""
A Relationship Associating A Profile To A Certain Entity (E.G., A Device)
"""
Has_State: RelationField = RelationField(
name='Has_State',
rule='only State',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Type Of State Of A Device
"""
Has_Typical_Consumption: RelationField = RelationField(
name='Has_Typical_Consumption',
rule='only (Energy or Power)',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Typical (Energy Or Power) Consumption Of A
Device
"""
Is_Used_For: RelationField = RelationField(
name='Is_Used_For',
rule='only Commodity',
semantic_manager=semantic_manager)
"""
A Relationship Specifying The Purpose For Which A Device Is Used For (E.G.,
Controlling A Commodity)
"""
Makes_Measurement: RelationField = RelationField(
name='Makes_Measurement',
rule='only Measurement',
semantic_manager=semantic_manager)
"""
A Relation Between A Device And The Measurements It Makes. Such Measurement
Will Link Together The Value Of The Measurement, Its Unit Of Measure And The
Property To Which It Relates.
"""
Measures_Property: RelationField = RelationField(
name='Measures_Property',
rule='only Property',
semantic_manager=semantic_manager)
"""
A Relationship Specifying The Property That Can Be Measured By A Certain
Device
"""
Offers: RelationField = RelationField(
name='Offers',
rule='only Service',
inverse_of=['Is_Offered_By'],
semantic_manager=semantic_manager)
"""
A Relationship Between A Device And A Service
"""
class Door_Switch(Switch):
"""
A Device Of Category Saref:Actuator That Consists Of A Switch, Accomplishes
The Task Saref:Safety, Performs The Saref:Openclosefunction, Is Used For
Controlling A Door, And Can Be Found In The State Saref:Openclosestate.
Source:
https://w3id.org/saref (saref.ttl)
"""
def __init__(self, *args, **kwargs):
is_initialised = 'id' in self.__dict__
super().__init__(*args, **kwargs)
if not is_initialised:
self.Has_Description._rules = [('max|1', [['string']])]
self.Has_Manufacturer._rules = [('max|1', [['string']])]
self.Has_Model._rules = [('max|1', [['string']])]
self.Accomplishes._rules = [('value', [[Safety]]), ('min|1', [[Task]])]
self.Consists_Of._rules = [('some', [[Switch]]), ('only', [[Device]])]
self.Controls_Property._rules = [('only', [[Property]])]
self.Has_Function._rules = [('some', [[Open_Close_Function]]), ('some', [[Actuating_Function]]), ('min|1', [[Function]])]
self.Has_Profile._rules = [('only', [[Profile]])]
self.Has_State._rules = [('some', [[Open_Close_State]]), ('only', [[State]])]
self.Has_Typical_Consumption._rules = [('only', [[Energy], [Power]])]
self.Is_Used_For._rules = [('only', [[Commodity]])]
self.Makes_Measurement._rules = [('only', [[Measurement]])]
self.Measures_Property._rules = [('only', [[Property]])]
self.Offers._rules = [('only', [[Service]])]
self.Accomplishes._instance_identifier = self.get_identifier()
self.Consists_Of._instance_identifier = self.get_identifier()
self.Controls_Property._instance_identifier = self.get_identifier()
self.Has_Function._instance_identifier = self.get_identifier()
self.Has_Profile._instance_identifier = self.get_identifier()
self.Has_State._instance_identifier = self.get_identifier()
self.Has_Typical_Consumption._instance_identifier = self.get_identifier()
self.Is_Used_For._instance_identifier = self.get_identifier()
self.Makes_Measurement._instance_identifier = self.get_identifier()
self.Measures_Property._instance_identifier = self.get_identifier()
self.Offers._instance_identifier = self.get_identifier()
self.Has_Description._instance_identifier = self.get_identifier()
self.Has_Manufacturer._instance_identifier = self.get_identifier()
self.Has_Model._instance_identifier = self.get_identifier()
self.Accomplishes.add(Safety())
# Data fields
Has_Description: DataField = DataField(
name='Has_Description',
rule='max 1 string',
semantic_manager=semantic_manager)
"""
A Relationship Providing A Description Of An Entity (E.G., Device)
"""
Has_Manufacturer: DataField = DataField(
name='Has_Manufacturer',
rule='max 1 string',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Manufacturer Of An Entity (E.G., Device)
"""
Has_Model: DataField = DataField(
name='Has_Model',
rule='max 1 string',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Model Of An Entity (E.G., Device)
"""
# Relation fields
Accomplishes: RelationField = RelationField(
name='Accomplishes',
rule='value Safety, min 1 Task',
inverse_of=['Is_Accomplished_By'],
semantic_manager=semantic_manager)
"""
A Relationship Between A Certain Entity (E.G., A Device) And The Task It
Accomplishes
"""
Consists_Of: RelationField = RelationField(
name='Consists_Of',
rule='some Switch, only Device',
semantic_manager=semantic_manager)
"""
A Relationship Indicating A Composite Entity That Consists Of Other Entities
(E.G., A Temperature/Humidity Sensor That Consists Of A Temperature Sensor
And A Humidity Sensor)
"""
Controls_Property: RelationField = RelationField(
name='Controls_Property',
rule='only Property',
semantic_manager=semantic_manager)
"""
A Relationship Specifying The Property That Can Be Controlled By A Certain
Device
"""
Has_Function: RelationField = RelationField(
name='Has_Function',
rule='some Open_Close_Function, some Actuating_Function, min 1 Function',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Type Of Function Of A Device
"""
Has_Profile: RelationField = RelationField(
name='Has_Profile',
rule='only Profile',
semantic_manager=semantic_manager)
"""
A Relationship Associating A Profile To A Certain Entity (E.G., A Device)
"""
Has_State: RelationField = RelationField(
name='Has_State',
rule='some Open_Close_State, only State',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Type Of State Of A Device
"""
Has_Typical_Consumption: RelationField = RelationField(
name='Has_Typical_Consumption',
rule='only (Energy or Power)',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Typical (Energy Or Power) Consumption Of A
Device
"""
Is_Used_For: RelationField = RelationField(
name='Is_Used_For',
rule='only Commodity',
semantic_manager=semantic_manager)
"""
A Relationship Specifying The Purpose For Which A Device Is Used For (E.G.,
Controlling A Commodity)
"""
Makes_Measurement: RelationField = RelationField(
name='Makes_Measurement',
rule='only Measurement',
semantic_manager=semantic_manager)
"""
A Relation Between A Device And The Measurements It Makes. Such Measurement
Will Link Together The Value Of The Measurement, Its Unit Of Measure And The
Property To Which It Relates.
"""
Measures_Property: RelationField = RelationField(
name='Measures_Property',
rule='only Property',
semantic_manager=semantic_manager)
"""
A Relationship Specifying The Property That Can Be Measured By A Certain
Device
"""
Offers: RelationField = RelationField(
name='Offers',
rule='only Service',
inverse_of=['Is_Offered_By'],
semantic_manager=semantic_manager)
"""
A Relationship Between A Device And A Service
"""
class Light_Switch(Switch):
"""
A Device Of Category Saref:Actuator That Consists Of A Switch, Accomplishes
The Task Saref:Lighting, Performs The Saref:Onofffunction, Measures The
Property Saref:Light, And Can Be Found In The State Saref:Onoffstate. It Can
Offer A Switch On Service.
Source:
https://w3id.org/saref (saref.ttl)
"""
def __init__(self, *args, **kwargs):
is_initialised = 'id' in self.__dict__
super().__init__(*args, **kwargs)
if not is_initialised:
self.Has_Description._rules = [('max|1', [['string']])]
self.Has_Manufacturer._rules = [('max|1', [['string']])]
self.Has_Model._rules = [('max|1', [['string']])]
self.Accomplishes._rules = [('value', [[Lighting]]), ('min|1', [[Task]])]
self.Consists_Of._rules = [('some', [[Switch]]), ('only', [[Device]])]
self.Controls_Property._rules = [('only', [[Property]])]
self.Has_Function._rules = [('some', [[On_Off_Function]]), ('some', [[Actuating_Function]]), ('min|1', [[Function]])]
self.Has_Profile._rules = [('only', [[Profile]])]
self.Has_State._rules = [('some', [[On_Off_State]]), ('only', [[State]])]
self.Has_Typical_Consumption._rules = [('only', [[Energy], [Power]])]
self.Is_Used_For._rules = [('only', [[Commodity]])]
self.Makes_Measurement._rules = [('only', [[Measurement]])]
self.Measures_Property._rules = [('some', [[Light]]), ('only', [[Property]])]
self.Offers._rules = [('some', [[Switch_On_Service]]), ('only', [[Service]])]
self.Accomplishes._instance_identifier = self.get_identifier()
self.Consists_Of._instance_identifier = self.get_identifier()
self.Controls_Property._instance_identifier = self.get_identifier()
self.Has_Function._instance_identifier = self.get_identifier()
self.Has_Profile._instance_identifier = self.get_identifier()
self.Has_State._instance_identifier = self.get_identifier()
self.Has_Typical_Consumption._instance_identifier = self.get_identifier()
self.Is_Used_For._instance_identifier = self.get_identifier()
self.Makes_Measurement._instance_identifier = self.get_identifier()
self.Measures_Property._instance_identifier = self.get_identifier()
self.Offers._instance_identifier = self.get_identifier()
self.Has_Description._instance_identifier = self.get_identifier()
self.Has_Manufacturer._instance_identifier = self.get_identifier()
self.Has_Model._instance_identifier = self.get_identifier()
self.Accomplishes.add(Lighting())
# Data fields
Has_Description: DataField = DataField(
name='Has_Description',
rule='max 1 string',
semantic_manager=semantic_manager)
"""
A Relationship Providing A Description Of An Entity (E.G., Device)
"""
Has_Manufacturer: DataField = DataField(
name='Has_Manufacturer',
rule='max 1 string',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Manufacturer Of An Entity (E.G., Device)
"""
Has_Model: DataField = DataField(
name='Has_Model',
rule='max 1 string',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Model Of An Entity (E.G., Device)
"""
# Relation fields
Accomplishes: RelationField = RelationField(
name='Accomplishes',
rule='value Lighting, min 1 Task',
inverse_of=['Is_Accomplished_By'],
semantic_manager=semantic_manager)
"""
A Relationship Between A Certain Entity (E.G., A Device) And The Task It
Accomplishes
"""
Consists_Of: RelationField = RelationField(
name='Consists_Of',
rule='some Switch, only Device',
semantic_manager=semantic_manager)
"""
A Relationship Indicating A Composite Entity That Consists Of Other Entities
(E.G., A Temperature/Humidity Sensor That Consists Of A Temperature Sensor
And A Humidity Sensor)
"""
Controls_Property: RelationField = RelationField(
name='Controls_Property',
rule='only Property',
semantic_manager=semantic_manager)
"""
A Relationship Specifying The Property That Can Be Controlled By A Certain
Device
"""
Has_Function: RelationField = RelationField(
name='Has_Function',
rule='some On_Off_Function, some Actuating_Function, min 1 Function',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Type Of Function Of A Device
"""
Has_Profile: RelationField = RelationField(
name='Has_Profile',
rule='only Profile',
semantic_manager=semantic_manager)
"""
A Relationship Associating A Profile To A Certain Entity (E.G., A Device)
"""
Has_State: RelationField = RelationField(
name='Has_State',
rule='some On_Off_State, only State',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Type Of State Of A Device
"""
Has_Typical_Consumption: RelationField = RelationField(
name='Has_Typical_Consumption',
rule='only (Energy or Power)',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Typical (Energy Or Power) Consumption Of A
Device
"""
Is_Used_For: RelationField = RelationField(
name='Is_Used_For',
rule='only Commodity',
semantic_manager=semantic_manager)
"""
A Relationship Specifying The Purpose For Which A Device Is Used For (E.G.,
Controlling A Commodity)
"""
Makes_Measurement: RelationField = RelationField(
name='Makes_Measurement',
rule='only Measurement',
semantic_manager=semantic_manager)
"""
A Relation Between A Device And The Measurements It Makes. Such Measurement
Will Link Together The Value Of The Measurement, Its Unit Of Measure And The
Property To Which It Relates.
"""
Measures_Property: RelationField = RelationField(
name='Measures_Property',
rule='some Light, only Property',
semantic_manager=semantic_manager)
"""
A Relationship Specifying The Property That Can Be Measured By A Certain
Device
"""
Offers: RelationField = RelationField(
name='Offers',
rule='some Switch_On_Service, only Service',
inverse_of=['Is_Offered_By'],
semantic_manager=semantic_manager)
"""
A Relationship Between A Device And A Service
"""
class Switch_On_Service(Service):
"""
A Type Of Service That Represents An On/Off Function To The Network
Source:
https://w3id.org/saref (saref.ttl)
"""
def __init__(self, *args, **kwargs):
is_initialised = 'id' in self.__dict__
super().__init__(*args, **kwargs)
if not is_initialised:
self.Is_Offered_By._rules = [('some', [[Light_Switch]]), ('min|1', [[Device]])]
self.Represents._rules = [('some', [[On_Off_Function]]), ('min|1', [[Function]])]
self.Is_Offered_By._instance_identifier = self.get_identifier()
self.Represents._instance_identifier = self.get_identifier()
# Relation fields
Is_Offered_By: RelationField = RelationField(
name='Is_Offered_By',
rule='some Light_Switch, min 1 Device',
inverse_of=['Offers'],
semantic_manager=semantic_manager)
"""
A Relationship Between A Service And A Device That Offers The Service
"""
Represents: RelationField = RelationField(
name='Represents',
rule='some On_Off_Function, min 1 Function',
semantic_manager=semantic_manager)
"""
A Relationship Between A Service And A Function.
"""
class Task(Thing):
"""
The Goal For Which A Device Is Designed (From A User Perspective). For
Example, A Washing Machine Is Designed For The Task Of Washing. We Propose
Here A List Of Tasks That Are Relevant For The Purpose Of Saref, But This
List Can Be Extended.
Source:
https://w3id.org/saref (saref.ttl)
"""
def __init__(self, *args, **kwargs):
is_initialised = 'id' in self.__dict__
super().__init__(*args, **kwargs)
if not is_initialised:
self.Is_Accomplished_By._rules = [('min|1', [[Device]])]
self.Is_Accomplished_By._instance_identifier = self.get_identifier()
# Relation fields
Is_Accomplished_By: RelationField = RelationField(
name='Is_Accomplished_By',
rule='min 1 Device',
inverse_of=['Accomplishes'],
semantic_manager=semantic_manager)
"""
A Relationship Indentifying The Task Accomplished By A Certain Entity (E.G.,
A Device)
"""
class Temperature(Property):
"""
A Saref:Property Related To Some Measurements That Are Characterized By A
Certain Value That Is Measured In A Temperature Unit (Degree_Celsius,
Degree_Fahrenheit, Or Degree_Kelvin)
Source:
https://w3id.org/saref (saref.ttl)
"""
def __init__(self, *args, **kwargs):
is_initialised = 'id' in self.__dict__
super().__init__(*args, **kwargs)
if not is_initialised:
self.Is_Controlled_By_Device._rules = [('only', [[Device]])]
self.Is_Measured_By_Device._rules = [('only', [[Device]])]
self.Relates_To_Measurement._rules = [('only', [[Measurement]])]
self.Is_Controlled_By_Device._instance_identifier = self.get_identifier()
self.Is_Measured_By_Device._instance_identifier = self.get_identifier()
self.Relates_To_Measurement._instance_identifier = self.get_identifier()
# Relation fields
Is_Controlled_By_Device: RelationField = RelationField(
name='Is_Controlled_By_Device',
rule='only Device',
semantic_manager=semantic_manager)
"""
A Relationship Specifying The Devices That Can Control A Certain Property
"""
Is_Measured_By_Device: RelationField = RelationField(
name='Is_Measured_By_Device',
rule='only Device',
semantic_manager=semantic_manager)
"""
A Relationship Specifying The Devices That Can Measure A Certain Property
"""
Relates_To_Measurement: RelationField = RelationField(
name='Relates_To_Measurement',
rule='only Measurement',
semantic_manager=semantic_manager)
"""
A Relationship Between A Property And The Measurements It Relates To
"""
class Temperature_Sensor(Sensor):
"""
A Device That Consists Of A Sensor, Has Category Saref:Sensor, Performs The
Saref:Sensingfunction And Is Used For The Purpose Of Sensing A Property Of
Type Saref:Temperature
Source:
https://w3id.org/saref (saref.ttl)
"""
def __init__(self, *args, **kwargs):
is_initialised = 'id' in self.__dict__
super().__init__(*args, **kwargs)
if not is_initialised:
self.Has_Description._rules = [('max|1', [['string']])]
self.Has_Manufacturer._rules = [('max|1', [['string']])]
self.Has_Model._rules = [('max|1', [['string']])]
self.Accomplishes._rules = [('value', [[Comfort]]), ('min|1', [[Task]])]
self.Consists_Of._rules = [('some', [[Sensor]]), ('only', [[Device]])]
self.Controls_Property._rules = [('only', [[Property]])]
self.Has_Function._rules = [('some', [[Sensing_Function]]), ('some', [[Sensing_Function]]), ('min|1', [[Function]])]
self.Has_Profile._rules = [('only', [[Profile]])]
self.Has_State._rules = [('only', [[State]])]
self.Has_Typical_Consumption._rules = [('only', [[Energy], [Power]])]
self.Is_Used_For._rules = [('only', [[Commodity]])]
self.Makes_Measurement._rules = [('only', [[Measurement]])]
self.Measures_Property._rules = [('some', [[Temperature]]), ('only', [[Property]])]
self.Offers._rules = [('only', [[Service]])]
self.Accomplishes._instance_identifier = | |
<gh_stars>1-10
import praw
import prawcore
import postgresConfig
import psycopg2
import time
import unidecode
import os
# COMMENT THIS OUT FOR PRODUCTION
#import config
def authenticate():
is_prod = os.environ.get('IS_HEROKU', None)
# grab all user configs from the config file
# Note: File is not shared to reduce multiple instances of the bot running
if is_prod:
r = praw.Reddit(username = os.environ['username'],
password = os.environ['password'],
client_id = os.environ['client_id'],
client_secret = os.environ['client_secret'],
user_agent = "Arsenal Goal Bot v0.1",
)
return r
else:
r = praw.Reddit(username = config.username,
password = <PASSWORD>,
client_id = config.client_id,
client_secret = config.client_secret,
user_agent = "Arsenal Goal Bot v0.1",
)
return r
FOOTER = '''___\n\n
^^[Wiki](https://www.reddit.com/r/arsenal_goal_bot/wiki/index)
^^| ^^[Feedback](/r/arsenal_goal_bot)
^^| ^^[Creator](/u/BSUWolf)'''
def parse_body(body):
# Find comments that start with the keyword and start indexing the characters
start_index = body.find('!arsenalgoal ')
# Remove first 13 characters to pull request
body = body[start_index + 13:]
# End indexing at a new line
end_index = body.find('\n')
print('user query: {}'.format(body))
# Split the query into different sections at each comma
query = body.split(',')
return query
def parse_body_assist(body):
# Find comments that start with the keyword and start indexing the characters
start_index = body.find('!arsenalassist ')
# Remove first 13 characters to pull request
body = body[start_index + 15:]
# End indexing at a new line
end_index = body.find('\n')
print('user query: {}'.format(body))
# Split the query into different sections at each comma
query = body.split(',')
return query
def get_sql_items(query):
# Create an empty array for params to be added to
params = []
# Designate variable for first portion of the query
first_query = query[0].strip()
# Remove special characters
first_query_string = unidecode.unidecode(first_query)
# Add player_name to params array
params.append(first_query_string)
# If query is longer than one section..
if 0 <= 1 < len(query):
# Create a variable for the second portion of the query
second_query = query[1].strip()
# Search to see if the second portion is a competion specific query
if second_query == "league cup" or second_query == "community shield" or second_query == "premier league" or second_query == "fa cup" or second_query == "europa league" or second_query == "champions league":
# Add second portion to the params
params.append(second_query)
if 0 <= 2 < len(query):
third_query = query[2].strip()
params.append(third_query)
sqlquery = '''SELECT opposition, competition, season, url, trim(both ' ' from opposition), trim(both ' ' from competition), trim(both ' ' from season), trim(both ' ' from url) FROM mens_goals WHERE scorer = %s AND competition = %s AND season = %s; '''
return sqlquery, params
# Build a query specific to search for player and competion
sqlquery = '''SELECT opposition, competition, season, url, trim(both ' ' from opposition), trim(both ' ' from competition), trim(both ' ' from season), trim(both ' ' from url) FROM mens_goals WHERE scorer = %s AND competition = %s; '''
print("Search via leagues")
return sqlquery, params
elif second_query is None:
# TODO handle this better....
print('No second query item')
return("no item")
elif second_query == "2019-2020" or second_query == "2018-2019":
params.append(second_query)
sqlquery = '''SELECT opposition, competition, season, url, trim(both ' ' from opposition), trim(both ' ' from competition), trim(both ' ' from season), trim(both ' ' from url) FROM mens_goals WHERE scorer = %s AND season = %s; '''
return sqlquery, params
# If the second section does not state a competition
else:
# add second section to params
params.append(second_query)
if 0 <= 2 < len(query):
third_query = query[2].strip()
params.append(third_query)
sqlquery = '''SELECT opposition, competition, season, url, trim(both ' ' from opposition), trim(both ' ' from competition), trim(both ' ' from season), trim(both ' ' from url) FROM mens_goals WHERE scorer = %s AND opposition = %s AND season = %s; '''
return sqlquery, params
# Query specifically for player and opposition
sqlquery = '''SELECT opposition, competition, season, url, trim(both ' ' from opposition), trim(both ' ' from competition), trim(both ' ' from season), trim(both ' ' from url) FROM mens_goals WHERE scorer = %s AND opposition = %s; '''
print("Not league query")
return sqlquery, params
def get_assist_items(query):
# Create an empty array for params to be added to
params = []
# Designate variable for first portion of the query
player_name = query[0].strip()
# Add player_name to params array
params.append(player_name)
# If query is longer than one section..
if 0 <= 1 < len(query):
# Create a variable for the second portion of the query
second_query = query[1].strip()
# Search to see if the second portion is a competion specific query
if second_query == "league cup" or second_query == "community shield" or second_query == "premier league" or second_query == "fa cup" or second_query == "europa league" or second_query == "champions league":
# Add second portion to the params
params.append(second_query)
if 0 <= 2 < len(query):
third_query = query[2].strip()
params.append(third_query)
sqlquery = '''SELECT opposition, competition, season, url, trim(both ' ' from opposition), trim(both ' ' from competition), trim(both ' ' from season), trim(both ' ' from url) FROM mens_goals WHERE assist = %s AND competition = %s AND season = %s; '''
return sqlquery, params
# Build a query specific to search for player and competion
sqlquery = '''SELECT opposition, competition, season, url, trim(both ' ' from opposition), trim(both ' ' from competition), trim(both ' ' from season), trim(both ' ' from url) FROM mens_goals WHERE assist = %s AND competition = %s; '''
print("Search via leagues")
return sqlquery, params
elif second_query == "2018-2019":
params.append(second_query)
sqlquery = '''SELECT opposition, competition, season, url, trim(both ' ' from opposition), trim(both ' ' from competition), trim(both ' ' from season), trim(both ' ' from url) FROM mens_goals WHERE assist = %s AND season = %s; '''
return sqlquery, params
# If the second section does not state a competition
else:
# add second section to params
params.append(second_query)
if 0 <= 2 < len(query):
third_query = query[2].strip()
params.append(third_query)
sqlquery = '''SELECT opposition, competition, season, url, trim(both ' ' from opposition), trim(both ' ' from competition), trim(both ' ' from season), trim(both ' ' from url) FROM mens_goals WHERE assist = %s AND opposition = %s AND season = %s; '''
return sqlquery, params
# Query specifically for player and opposition
sqlquery = '''SELECT opposition, competition, season, url, trim(both ' ' from opposition), trim(both ' ' from competition), trim(both ' ' from season), trim(both ' ' from url) FROM mens_goals WHERE assist = %s AND opposition = %s; '''
print("Not league query")
return sqlquery, params
def get_urls(sqlquery, params):
is_prod = os.environ.get('IS_HEROKU', None)
print("is prod?? ", is_prod)
if is_prod:
#Define our connection string
host = os.environ['DB_HOST']
dbname = os.environ['DB_NAME']
user = os.environ['DB_USER']
password = <PASSWORD>['DB_PASSWORD']
conn_string = "host='{}' dbname='{}' user='{}' password='{}'".format(host,dbname,user,password)
# print the connection string we will use to connect
print("Connecting to database\n ->%s" % (conn_string))
# get a connection, if a connect cannot be made an exception will be raised here
conn = psycopg2.connect(conn_string)
# conn.cursor will return a cursor object, you can use this cursor to perform queries
cursor = conn.cursor()
print("Connected!\n")
else:
host = 'localhost'
dbname = 'arsenal_bot'
user = 'nic'
#Define our connection string
conn_string = "host='{}' dbname='{}' user='{}'".format(host,dbname,user)
# print the connection string we will use to connect
print("Connecting to database\n ->%s" % (conn_string))
# get a connection, if a connect cannot be made an exception will be raised here
conn = psycopg2.connect(conn_string)
# conn.cursor will return a cursor object, you can use this cursor to perform queries
cursor = conn.cursor()
print("Connected!\n")
# Variables to connect to DB
#conn_string = "host='localhost' dbname='arsenal_bot' user='nic'"
# Connect to DB
#conn = psycopg2.connect(conn_string)
#cursor = conn.cursor()
# Execute query to db for data
cursor.execute(sqlquery, params)
reply = ''
if cursor:
# For each record that comes back, loop through and build the reply
for record in cursor:
reply += '[{}: {} ({})](https://imgur.com/{})'.format(record[4], record[5], record[6], record[7])
reply += '\n\n'
reply += FOOTER
return reply
def run(r):
# Get all comments from designated subreddits
for comment in r.subreddit('arsenal_goal_bot+Gunners').stream.comments():
body = comment.body
# listen | |
c = atoms.get_cell()[2][2] / self.properties[structure + '_size'][2]
self.values[('lattice_constant_a', structure, elements)] = a
self.values[('lattice_constant_c', structure, elements)] = c
#self.latticeconstants[(structure, elements)] = [a, c]
E = atoms.get_potential_energy() / len(atoms)
self.values[('energy', structure, elements)] = E
if self.debug:
print "a: %.5f A" % (a,)
if structure in ['hcp', 'l10']:
print "c: %.5f A" % (c,)
print "E: %.5f eV" % (E,)
print 40 * "-" + "\n"
def calc_lattice_ratio_ca(self, structure, elements):
a = self.get_lattice_constant_a(structure, elements)
c = self.get_lattice_constant_c(structure, elements)
self.values[('lattice_ratio_ca', structure, elements)] = c / a
def calc_volume_per_atom(self, structure, elements):
if structure == 'fcc':
a = self.get_lattice_constant_a(structure, elements)
self.values[('volume_per_atom', structure, elements)] = a**3 / 4.0
elif structure == 'hcp':
a = self.get_lattice_constant_a(structure, elements)
c = self.get_lattice_constant_c(structure, elements)
self.values[('volume_per_atom', structure, elements)] = np.sqrt(3) * a**2 * c / 4.0
else:
raise ValueError("Cannot calculate lattice constants for '%s'" % (structure,))
def calc_surface_energy(self, structure, elements):
if structure not in ['fcc100', 'fcc111', 'hcp0001', 'l12100',
'l12111']:#, 'hcp1010A', 'hcp1010B']:
raise ValueError("Cannot calculate surface energy for '%s'" % (structure,))
if self.debug:
print "Calculating surface energy..."
print 40 * "*"
print "Structure: %s" % (structure,)
print "Elements: %s\n" % (elements,)
images = []
for n in self.properties['surface_layers']:
images.append(self.get_structure(structure, elements, l=n))
natoms = len(images[-1]) / self.properties['surface_layers'][-1]
e = SurfaceEnergy(images, natoms, self.get_calc(), fmax=0.01, debug=self.debug)
self.values[('surface_energy', structure, elements)] = e
if self.debug:
print "\nSurface energy: %.5f eV/atom" % (e,)
print 40 * "-" + "\n"
def calc_surface_ratio(self, structure, elements):
structures = structure.split('-')
if len(structures) != 2:
raise ValueError("You must specify exactly two surfaces")
e = []
for struct in structures:
e.append(self.get_surface_energy(struct, elements))
self.values[('surface_ratio', structure, elements)] = e[0] / e[1]
def calc_heat_of_formation(self, structure, elements):
if structure not in ['l12', 'l10', 'B2']:
raise ValueError("Cannot calculate heat of formation for '%s'" % (structure,))
if self.debug:
print "Calculating heat of formation..."
print 40 * "*"
print "Structure: %s" % (structure,)
print "Elements: %s\n" % (elements,)
e_single = {}
for symbol in elements:
sym = reference_states[atomic_numbers[symbol]]['symmetry']
#atoms = self.get_structure(sym, (symbol,))
#atoms.set_calculator(self.get_calc())
#e_single[symbol] = atoms.get_potential_energy() / len(atoms)
e_single[symbol] = self.get_value('energy', sym, (symbol,),
self.calc_lattice_constants)
#atoms = self.get_structure(structure, elements)
#atoms.set_calculator(self.get_calc())
#e_alloy = atoms.get_potential_energy() / len(atoms)
e_alloy = self.get_value('energy', structure, elements,
self.calc_lattice_constants)
if structure == 'l12':
e = e_alloy - 0.25 * e_single[elements[0]] \
- 0.75 * e_single[elements[1]]
else:
e = e_alloy - 0.5 * e_single[elements[0]] \
- 0.5 * e_single[elements[1]]
self.values[('heat_of_formation', structure, elements)] = e
if self.debug:
print "Heat of formation: %.5f eV/atom" % (e,)
print 40 * "-" + "\n"
#def calc_heat_of_solution(self, structure, elements):
# lim(N->inf) {E(Pt_N Y) - N*E(Pt) - E(Y)}
def calc_impurity_energy(self, structure, elements):
if structure not in ['oct38-center', 'oct38-face', 'oct38-edge']:
raise ValueError("Cannot calculate impurity energy for '%s'" % (structure,))
if len(elements) != 2:
raise ValueError("Tuple of elements must be of length two")
if self.debug:
print "Calculating impurity energy..."
print 40 * "*"
print "Structure: %s" % (structure,)
print "Elements: %s\n" % (elements,)
name, impurity = structure.split('-')
sym = reference_states[atomic_numbers[elements[1]]]['symmetry']
latticeconstant = self.get_lattice_constant_a(sym, (elements[1],))
if name == 'oct38':
sites = {'center': 10, 'face': 9, 'edge': 0}
atoms = Octahedron(elements[1], 4, 1, latticeconstant)
atoms.set_calculator(self.get_calc())
dyn = BFGS(atoms, logfile=None, trajectory=None)
dyn.run(fmax=0.001, steps=100)
assert dyn.converged()
s_clean = dyn.get_number_of_steps()
e_clean = atoms.get_potential_energy()
atoms[sites[impurity]].symbol = elements[0]
dyn.run(fmax=0.001, steps=100)
assert dyn.converged()
s_impurity = dyn.get_number_of_steps()
e_impurity = atoms.get_potential_energy()
self.values[('impurity_energy', structure, elements)] = e_impurity - e_clean
if self.debug:
print "BFGS steps: %i %i" % (s_clean, s_impurity)
print "Impurity energy: %.5f - %.5f = %.5f eV" % (e_impurity, e_clean,
e_impurity - e_clean)
print 40 * "-" + "\n"
def calc_impurity_ratio(self, structure, elements):
structures = structure.split('/')
if len(structures) != 2:
raise ValueError("You must specify exactly two structures")
e = []
for struct in structures:
e.append(self.get_impurity_energy(struct, elements))
self.values[('impurity_ratio', structure, elements)] = e[0] / e[1]
def calc_cutoff_energy(self, structure, elements):
if structure not in ['fcc', 'hcp', 'l12']:
raise ValueError("Cannot calculate cutoff energy for '%s'" % (structure,))
if self.debug:
print "Calculating cutoff energy..."
print 40 * "*"
print "Structure: %s" % (structure,)
print "Elements: %s\n" % (elements,)
if structure in ['fcc', 'l12']:
a = self.latticeconstants[(structure, elements)]
c = None
f = 1.73
else:
lc = self.latticeconstants[(structure, elements)]
a = lc[0]
c = lc[1]
f = 1.73
atoms = self.get_structure(structure, elements, a=a, c=c)
atoms.set_calculator(self.get_calc())
e_equilibrium = atoms.get_potential_energy()
new_cell = f * atoms.get_cell()
atoms.set_cell(new_cell, True)
e_cutoff = atoms.get_potential_energy()
self.values[('cutoff_energy', structure, elements)] = e_cutoff / e_equilibrium
if self.debug:
print "Equilibrium: %.5f eV" % (e_equilibrium,)
print "Cutoff: %.5f eV" % (e_cutoff,)
print "Ratio: %.5f" % (e_cutoff / e_equilibrium,)
print 40 * "-" + "\n"
def calc_scaling_energy(self, structure, elements):
t = structure.split('-')
structure = t[0]
scaling = float(t[1])
if structure not in ['fcc', 'hcp', 'l12']:
raise ValueError("Cannot calculate scaling energy for '%s'" % (structure,))
if self.debug:
print "Calculating scaling energy..."
print 40 * "*"
print "Structure: %s" % (structure,)
print "Elements: %s\n" % (elements,)
if structure in ['fcc', 'l12']:
a = self.latticeconstants[(structure, elements)]
c = None
f = 1.73
else:
lc = self.latticeconstants[(structure, elements)]
a = lc[0]
c = lc[1]
f = 1.73
atoms = self.get_structure(structure, elements, a=a, c=c)
atoms.set_calculator(self.get_calc())
new_cell = scaling * atoms.get_cell()
atoms.set_cell(new_cell, True)
energy = atoms.get_potential_energy() / len(atoms)
structure = '%s-%.2f' % (structure, scaling)
self.values[('scaling_energy', structure, elements)] = energy
if self.debug:
print "Energy: %.5f eV" % (energy,)
print 40 * "-" + "\n"
def calc_force_match(self, structure, elements):
if self.debug:
print "Calculating force matching..."
print 40 * "*"
print "Structure: %s" % (structure,)
print "Elements: %s\n" % (elements,)
atoms = read(structure)
f_dft = atoms.get_forces()
atoms = atoms.repeat((2,2,2))
atoms.set_calculator(self.get_calc())
f = atoms.get_forces()[:len(f_dft)]
df = f_dft - f
err = np.sqrt((df*df).sum())
err /= np.sqrt((f_dft*f_dft).sum())
self.values[('force_match', structure, elements)] = err
if self.debug:
print "Mean force error: %.5f eV/A" % (err,)
print 40 * "-" + "\n"
def calc_stacking_fault(self, structure, elements):
if structure != 'fcc':
raise ValueError("Cannot calculate stacking fault energy of structure "+structure)
a = self.latticeconstants[(structure, elements)]
atoms = fcc111(elements[0], (1,2,5), orthogonal=True, a=a)
atoms.set_pbc(True)
atoms = atoms.repeat(self.properties['sf_repeat'])
atoms.set_calculator(self.get_calc())
dyn = QuasiNewton(atoms, logfile=None, trajectory=None)
dyn.run(fmax=0.02)
e_sf = atoms.get_potential_energy()
n_sf = len(atoms)
atoms = fcc111(elements[0], (1,2,6), orthogonal=True, a=a)
atoms.set_pbc(True)
atoms = atoms.repeat(self.properties['sf_repeat'])
atoms.set_calculator(self.get_calc())
dyn = QuasiNewton(atoms, logfile=None, trajectory=None)
dyn.run(fmax=0.02)
e_bulk = atoms.get_potential_energy()
n_bulk = len(atoms)
layers = self.properties['sf_repeat'][2]
uc = atoms.get_cell()
area = uc[0,0] * uc[1,1]
result = (e_sf - e_bulk * n_sf / n_bulk) / layers / area
result /= 1e-3 * kJ * 1e-20
self.values[('stacking_fault', structure, elements)] = result
### Bulk structures ###
def get_structure(self, name, elements, a=None, c= None, l=None):
# Check number of elements
if name[:3] in ['fcc', 'hcp']:
if len(elements) != 1:
raise ValueError("Tuple of elements must be of length one")
if name[:3] in ['l12', 'l10'] or name[:2] == 'B2':
if len(elements) != 2:
raise ValueError("Tuple of elements must be of length two")
# Get lattice constants
if a is None:
if name[:2] == 'B2':
a = self.get_lattice_constant_a(name[:2], elements)
elif name[:3] in ['fcc', 'hcp', 'bcc', 'l12', 'l10']:
a = self.get_lattice_constant_a(name[:3], elements)
if c is None:
if name[:3] in ['hcp', 'l10']:
c = self.get_lattice_constant_c(name[:3], elements)
# Get size
if name in ['fcc', 'hcp', 'bcc', 'l12', 'l10', 'B2']:
size = self.properties[name + '_size']
elif name in ['fcc100', 'fcc111', 'hcp0001']:
size = self.properties[name + '_size'][:2] + (l,)
# Make structure
if name == 'fcc':
atoms = FaceCenteredCubic(symbol=elements[0],
size=size,
latticeconstant=a)
elif name == 'hcp':
atoms = HexagonalClosedPacked(symbol=elements[0], size=size,
directions=[[2,-1,-1,0],[0,1,-1,0],[0,0,0,1]],
latticeconstant=(a, c))
elif name == 'bcc':
atoms = BodyCenteredCubic(symbol=elements[0],
size=size,
latticeconstant=a)
elif name == 'B2':
atoms = B2(symbol=elements, size=size, latticeconstant=a)
elif name == 'l12':
atoms = L1_2(symbol=elements, size=size, latticeconstant=a)
elif name == 'l10':
atoms = L1_0(symbol=elements, size=size, latticeconstant=(a, c))
elif name == 'fcc100':
atoms = fcc100(symbol=elements[0], size=size, a=a, vacuum=10.0)
elif name == 'fcc111':
atoms = fcc111(symbol=elements[0], size=size, a=a,
vacuum=10.0, orthogonal=True)
elif name == 'hcp0001':
atoms = hcp0001(symbol=elements[0], size=size, a=a, c=c,
vacuum=10.0, orthogonal=True)
elif name == 'hcp1010A':
raise ValueError("Structure '%s' not supported" % (name,))
atoms = None
elif name == 'hcp1010B':
raise ValueError("Structure '%s' not supported" % (name,))
atoms = None
elif name == 'l12100':
n = (l + 1) / 2
atoms = L1_2(symbol=elements, size=(8, 8, n), latticeconstant=a)
atoms.set_pbc([True, True, False])
# Remove layers
atoms = atoms[atoms.get_positions()[:,2] > 0.1 * | |
line options
which have arguments ('--my-opt somepath') we might get some
false positives. All builtin and 3rd party plugins will have
been loaded, however, so common options will not confuse our logic
here.
"""
current = py.path.local()
self._confcutdir = (
current.join(namespace.confcutdir, abs=True)
if namespace.confcutdir
else None
)
self._noconftest = namespace.noconftest
self._using_pyargs = namespace.pyargs
testpaths = namespace.file_or_dir
foundanchor = False
for path in testpaths:
path = str(path)
# remove node-id syntax
i = path.find("::")
if i != -1:
path = path[:i]
anchor = current.join(path, abs=1)
if exists(anchor): # we found some file object
self._try_load_conftest(anchor)
foundanchor = True
if not foundanchor:
self._try_load_conftest(current)
def _try_load_conftest(self, anchor):
self._getconftestmodules(anchor)
# let's also consider test* subdirs
if anchor.check(dir=1):
for x in anchor.listdir("test*"):
if x.check(dir=1):
self._getconftestmodules(x)
@lru_cache(maxsize=128)
def _getconftestmodules(self, path):
if self._noconftest:
return []
if path.isfile():
directory = path.dirpath()
else:
directory = path
# XXX these days we may rather want to use config.rootdir
# and allow users to opt into looking into the rootdir parent
# directories instead of requiring to specify confcutdir
clist = []
for parent in directory.realpath().parts():
if self._confcutdir and self._confcutdir.relto(parent):
continue
conftestpath = parent.join("conftest.py")
if conftestpath.isfile():
mod = self._importconftest(conftestpath)
clist.append(mod)
self._dirpath2confmods[directory] = clist
return clist
def _rget_with_confmod(self, name, path):
modules = self._getconftestmodules(path)
for mod in reversed(modules):
try:
return mod, getattr(mod, name)
except AttributeError:
continue
raise KeyError(name)
def _importconftest(self, conftestpath):
# Use a resolved Path object as key to avoid loading the same conftest twice
# with build systems that create build directories containing
# symlinks to actual files.
# Using Path().resolve() is better than py.path.realpath because
# it resolves to the correct path/drive in case-insensitive file systems (#5792)
key = Path(str(conftestpath)).resolve()
try:
return self._conftestpath2mod[key]
except KeyError:
pkgpath = conftestpath.pypkgpath()
if pkgpath is None:
_ensure_removed_sysmodule(conftestpath.purebasename)
try:
mod = conftestpath.pyimport()
if (
hasattr(mod, "pytest_plugins")
and self._configured
and not self._using_pyargs
):
_fail_on_non_top_pytest_plugins(conftestpath, self._confcutdir)
except Exception:
raise ConftestImportFailure(conftestpath, sys.exc_info())
self._conftest_plugins.add(mod)
self._conftestpath2mod[key] = mod
dirpath = conftestpath.dirpath()
if dirpath in self._dirpath2confmods:
for path, mods in self._dirpath2confmods.items():
if path and path.relto(dirpath) or path == dirpath:
assert mod not in mods
mods.append(mod)
self.trace("loading conftestmodule {!r}".format(mod))
self.consider_conftest(mod)
return mod
#
# API for bootstrapping plugin loading
#
#
def consider_preparse(self, args, *, exclude_only=False):
i = 0
n = len(args)
while i < n:
opt = args[i]
i += 1
if isinstance(opt, str):
if opt == "-p":
try:
parg = args[i]
except IndexError:
return
i += 1
elif opt.startswith("-p"):
parg = opt[2:]
else:
continue
if exclude_only and not parg.startswith("no:"):
continue
self.consider_pluginarg(parg)
def consider_pluginarg(self, arg):
if arg.startswith("no:"):
name = arg[3:]
if name in essential_plugins:
raise UsageError("plugin %s cannot be disabled" % name)
# PR #4304 : remove stepwise if cacheprovider is blocked
if name == "cacheprovider":
self.set_blocked("stepwise")
self.set_blocked("pytest_stepwise")
self.set_blocked(name)
if not name.startswith("pytest_"):
self.set_blocked("pytest_" + name)
else:
name = arg
# Unblock the plugin. None indicates that it has been blocked.
# There is no interface with pluggy for this.
if self._name2plugin.get(name, -1) is None:
del self._name2plugin[name]
if not name.startswith("pytest_"):
if self._name2plugin.get("pytest_" + name, -1) is None:
del self._name2plugin["pytest_" + name]
self.import_plugin(arg, consider_entry_points=True)
def consider_conftest(self, conftestmodule):
self.register(conftestmodule, name=conftestmodule.__file__)
def consider_env(self):
self._import_plugin_specs(os.environ.get("PYTEST_PLUGINS"))
def consider_module(self, mod):
self._import_plugin_specs(getattr(mod, "pytest_plugins", []))
def _import_plugin_specs(self, spec):
plugins = _get_plugin_specs_as_list(spec)
for import_spec in plugins:
self.import_plugin(import_spec)
def import_plugin(self, modname, consider_entry_points=False):
"""
Imports a plugin with ``modname``. If ``consider_entry_points`` is True, entry point
names are also considered to find a plugin.
"""
# most often modname refers to builtin modules, e.g. "pytester",
# "terminal" or "capture". Those plugins are registered under their
# basename for historic purposes but must be imported with the
# _pytest prefix.
assert isinstance(modname, str), (
"module name as text required, got %r" % modname
)
modname = str(modname)
if self.is_blocked(modname) or self.get_plugin(modname) is not None:
return
importspec = "_pytest." + modname if modname in builtin_plugins else modname
self.rewrite_hook.mark_rewrite(importspec)
if consider_entry_points:
loaded = self.load_setuptools_entrypoints("pytest11", name=modname)
if loaded:
return
try:
__import__(importspec)
except ImportError as e:
raise ImportError(
'Error importing plugin "{}": {}'.format(modname, str(e.args[0]))
).with_traceback(e.__traceback__)
except Skipped as e:
from _pytest.warnings import _issue_warning_captured
_issue_warning_captured(
PytestConfigWarning("skipped plugin {!r}: {}".format(modname, e.msg)),
self.hook,
stacklevel=2,
)
else:
mod = sys.modules[importspec]
self.register(mod, modname)
def _get_plugin_specs_as_list(specs):
"""
Parses a list of "plugin specs" and returns a list of plugin names.
Plugin specs can be given as a list of strings separated by "," or already as a list/tuple in
which case it is returned as a list. Specs can also be `None` in which case an
empty list is returned.
"""
if specs is not None and not isinstance(specs, types.ModuleType):
if isinstance(specs, str):
specs = specs.split(",") if specs else []
if not isinstance(specs, (list, tuple)):
raise UsageError(
"Plugin specs must be a ','-separated string or a "
"list/tuple of strings for plugin names. Given: %r" % specs
)
return list(specs)
return []
def _ensure_removed_sysmodule(modname):
try:
del sys.modules[modname]
except KeyError:
pass
class Notset:
def __repr__(self):
return "<NOTSET>"
notset = Notset()
def _iter_rewritable_modules(package_files):
"""
Given an iterable of file names in a source distribution, return the "names" that should
be marked for assertion rewrite (for example the package "pytest_mock/__init__.py" should
be added as "pytest_mock" in the assertion rewrite mechanism.
This function has to deal with dist-info based distributions and egg based distributions
(which are still very much in use for "editable" installs).
Here are the file names as seen in a dist-info based distribution:
pytest_mock/__init__.py
pytest_mock/_version.py
pytest_mock/plugin.py
pytest_mock.egg-info/PKG-INFO
Here are the file names as seen in an egg based distribution:
src/pytest_mock/__init__.py
src/pytest_mock/_version.py
src/pytest_mock/plugin.py
src/pytest_mock.egg-info/PKG-INFO
LICENSE
setup.py
We have to take in account those two distribution flavors in order to determine which
names should be considered for assertion rewriting.
More information:
https://github.com/pytest-dev/pytest-mock/issues/167
"""
package_files = list(package_files)
seen_some = False
for fn in package_files:
is_simple_module = "/" not in fn and fn.endswith(".py")
is_package = fn.count("/") == 1 and fn.endswith("__init__.py")
if is_simple_module:
module_name, _ = os.path.splitext(fn)
# we ignore "setup.py" at the root of the distribution
if module_name != "setup":
seen_some = True
yield module_name
elif is_package:
package_name = os.path.dirname(fn)
seen_some = True
yield package_name
if not seen_some:
# at this point we did not find any packages or modules suitable for assertion
# rewriting, so we try again by stripping the first path component (to account for
# "src" based source trees for example)
# this approach lets us have the common case continue to be fast, as egg-distributions
# are rarer
new_package_files = []
for fn in package_files:
parts = fn.split("/")
new_fn = "/".join(parts[1:])
if new_fn:
new_package_files.append(new_fn)
if new_package_files:
yield from _iter_rewritable_modules(new_package_files)
class Config:
"""
Access to configuration values, pluginmanager and plugin hooks.
:param PytestPluginManager pluginmanager:
:param InvocationParams invocation_params:
Object containing the parameters regarding the ``pytest.main``
invocation.
"""
@attr.s(frozen=True)
class InvocationParams:
"""Holds parameters passed during ``pytest.main()``
The object attributes are read-only.
.. versionadded:: 5.1
.. note::
Note that the environment variable ``PYTEST_ADDOPTS`` and the ``addopts``
ini option are handled by pytest, not being included in the ``args`` attribute.
Plugins accessing ``InvocationParams`` must be aware of that.
"""
args = attr.ib(converter=tuple)
"""tuple of command-line arguments as passed to ``pytest.main()``."""
plugins = attr.ib()
"""list of extra plugins, might be `None`."""
dir = attr.ib(type=Path)
"""directory where ``pytest.main()`` was invoked from."""
def __init__(
self,
pluginmanager: PytestPluginManager,
*,
invocation_params: Optional[InvocationParams] = None
) -> None:
from .argparsing import Parser, FILE_OR_DIR
if invocation_params is None:
invocation_params = self.InvocationParams(
args=(), plugins=None, dir=Path().resolve()
)
self.option = argparse.Namespace()
"""access to command line option as attributes.
:type: argparse.Namespace"""
self.invocation_params = invocation_params
_a = FILE_OR_DIR
self._parser = Parser(
usage="%(prog)s [options] [{}] [{}] [...]".format(_a, _a),
processopt=self._processopt,
)
self.pluginmanager = pluginmanager
"""the plugin manager handles plugin registration and hook invocation.
:type: PytestPluginManager"""
self.trace = self.pluginmanager.trace.root.get("config")
self.hook = self.pluginmanager.hook
self._inicache = {} # type: Dict[str, Any]
self._override_ini = () # type: Sequence[str]
self._opt2dest = {} # type: Dict[str, str]
self._cleanup = [] # type: List[Callable[[], None]]
# A place where plugins can store information on the config for their
# own use. Currently only intended for internal plugins.
self._store = Store()
self.pluginmanager.register(self, "pytestconfig")
self._configured = False
self.hook.pytest_addoption.call_historic(
kwargs=dict(parser=self._parser, pluginmanager=self.pluginmanager)
| |
grp --> list, groups to which the user belogns
Returns
-------
(file['_id'], file_id, h, inserted_documents, updatedone_documents, deletedone_documents) --> tuple(bson.objectid.ObjectId class, bson.objectid.ObjectId class, list, list, list, list), the source object id, the destination object id, the list of the datanodes which handle a replica of any chunk for the source file, the list of the documents to insert and the collections in which they must be inserted, the list of the conditions for updating MongoDB documents, the values which have to be updated and the collection in which perform the update, the list of the conditions for deleting MongoDB documents and the collection in which perform the delete
"""
#for master namenode
inserted_documents = []
updatedone_documents = []
deletedone_documents = []
#get the source file to copy into a new path
try:
file = get_file(client, orig_file, required_by, grp)
except AccessDeniedException as e:
logging.warning(e.message)
raise e
except NotFoundException as e:
logging.warning(e.message)
raise e
for c in file['chunks'].keys():
tmp_c = file['chunks'][c]
del file['chunks'][c]
file['chunks'][c.replace('.', '[dot]').replace(':', '[colon]')] = tmp_c
for r in file['replicas'].keys():
tmp_dn = []
for dn in file['replicas'][r]:
tmp_dn.append(dn.replace('.', '[dot]').replace(':', '[colon]'))
file['replicas'][r] = tmp_dn
#get fs (filesystem) MongoDB collection
fs = get_fs(client)
#navigate in the file system until the parent directory
try:
curr_dir = navigate_through(dest_path, fs, required_by, grp, 'cp')
except AccessDeniedException as e:
logging.warning(e.message)
raise e
except NotFoundException as e:
logging.warning(e.message)
raise e
#the last part of the path is a directory or is the root directory, so the orig file must be copied in the destination directory with the same orig file name
if dest_path.name in curr_dir['directories'] or dest_path.name == '':
if dest_path.name != '':
curr_dir = fs.find_one({'name': dest_path.name, 'parent': curr_dir['_id'], 'type': 'd'})
#check the permissions with parent role on the final directory
if not check_permissions(curr_dir, 'parent', required_by, grp, 'cp'):
logging.warning('Access denied: the operation required is not allowed on {}'.format(curr_dir['name']))
raise AccessDeniedException(curr_dir['name'])
#a file with the same name already exists into this directory
if file['name'] in curr_dir['files']:
logging.warning('The resource already exists')
raise AlreadyExistsException()
#a directory with the same name already exists into this directory
if file['name'] in curr_dir['directories']:
logging.warning('A directory with the same name already exists')
raise AlreadyExistsDirectoryException()
#insert the new file into the parent directory
fs.update_one({ '_id': curr_dir['_id'] }, {'$push': { 'files': file['name']}})
#insert into the list needed for aligning the other namenodes
updatedone_documents.append(({ '_id': curr_dir['_id'] }, {'$push': { 'files': file['name']}}, 'fs'))
#create the file node and insert it into fs collection
new_file = create_file_node(file['name'], curr_dir['_id'], required_by, required_by, size=file['size'])
file_id = fs.insert_one(new_file).inserted_id
curr_file = fs.find_one({'_id': file_id})
#insert into the list needed for aligning the other namenodes
inserted_documents.append((curr_file, 'fs'))
dest_chunks = {}
dest_replicas = {}
dest_chunks_bkp = {}
dest_replicas_bkp = {}
orig_chunks = file['chunks']
orig_replicas = file['replicas']
orig_chunks_bkp = file['chunks_bkp']
orig_replicas_bkp = file['replicas_bkp']
#when a file is copied, also the chunks in the datanodes must be copied as they are, but with the prefix of the new file
for dn in orig_chunks:
for c in orig_chunks[dn]:
try:
dest_chunks[dn].append('{}_{}'.format(str(file_id),c.split('_')[1]))
except:
dest_chunks[dn] = ['{}_{}'.format(str(file_id),c.split('_')[1])]
dest_chunks_bkp['{}_{}'.format(str(file_id),c.split('_')[1])] = dn
for r in orig_replicas:
for dn in orig_replicas[r]:
try:
dest_replicas['{}_{}'.format(str(file_id),r.split('_')[1])].append(dn)
except:
dest_replicas['{}_{}'.format(str(file_id),r.split('_')[1])] = [dn]
try:
dest_replicas_bkp[dn].append('{}_{}'.format(str(file_id),r.split('_')[1]))
except:
dest_replicas_bkp[dn] = ['{}_{}'.format(str(file_id),r.split('_')[1])]
#update the fs collection
fs.update_one({ '_id': file_id }, {'$set': {'chunks': dest_chunks, 'chunks_bkp': dest_chunks_bkp, 'replicas': dest_replicas, 'replicas_bkp': dest_replicas_bkp}})
#insert into the list needed for aligning the other namenodes
updatedone_documents.append(({ '_id': file_id }, {'$set': {'chunks': dest_chunks, 'chunks_bkp': dest_chunks_bkp, 'replicas': dest_replicas, 'replicas_bkp': dest_replicas_bkp}}, 'fs'))
for c in orig_chunks.keys():
tmp_c = orig_chunks[c]
del orig_chunks[c]
orig_chunks[c.replace('[dot]', '.').replace('[colon]', ':')] = tmp_c
for r in orig_replicas.keys():
tmp_dn = []
for dn in orig_replicas[r]:
tmp_dn.append(dn.replace('[dot]', '.').replace('[colon]', ':'))
orig_replicas[r] = tmp_dn
#get the list of the chunks to copy, without repetitions
hs = list(set(chain(*list(orig_replicas.values()))))
hm = list(orig_chunks.keys())
h = list(set(hm + hs))
logging.info('Copied {} content into {}'.format(orig_file, dest_path))
#return the list of chunks to copy as they are with the new prefix into the interested datanodes
return (file['_id'], file_id, h, inserted_documents, updatedone_documents, deletedone_documents)
#the last part of the path is a file name
else:
#check the permissions on the parent directory with parent role
if not check_permissions(curr_dir, 'parent', required_by, grp, 'cp'):
logging.warning('Access denied: the operation required is not allowed on {}'.format(curr_dir['name']))
raise AccessDeniedException(curr_dir['name'])
#a file with the same name already exists
if dest_path.name in curr_dir['files']:
logging.warning('The resource already exists')
raise AlreadyExistsException()
#a directory with the same name alredy exists
elif dest_path.name in curr_dir['directories']:
logging.warning('A directory with the same name already exists')
raise AlreadyExistsDirectoryException()
#the file does not exist into the directory, so it must be created
#update the fs collection
fs.update_one({ '_id': curr_dir['_id'] }, {'$push': { 'files': dest_path.name}})
#insert into the list needed for aligning the other namenodes
updatedone_documents.append(({ '_id': curr_dir['_id'] }, {'$push': { 'files': dest_path.name}}, 'fs'))
#create the file node and update the fs collection
new_file = create_file_node(dest_path.name, curr_dir['_id'], required_by, required_by, size=file['size'])
file_id = fs.insert_one(new_file).inserted_id
curr_file = fs.find_one({'_id': file_id})
#insert into the list needed for aligning the other namenodes
inserted_documents.append((curr_file, 'fs'))
dest_chunks = {}
dest_replicas = {}
dest_chunks_bkp = {}
dest_replicas_bkp = {}
orig_chunks = file['chunks']
orig_replicas = file['replicas']
orig_chunks_bkp = file['chunks_bkp']
orig_replicas_bkp = file['replicas_bkp']
#when a file is copied, also the chunks in the datanodes must be copied as they are, but with the prefix of the new file
for dn in orig_chunks:
for c in orig_chunks[dn]:
try:
dest_chunks[dn].append('{}_{}'.format(str(file_id),c.split('_')[1]))
except:
dest_chunks[dn] = ['{}_{}'.format(str(file_id),c.split('_')[1])]
dest_chunks_bkp['{}_{}'.format(str(file_id),c.split('_')[1])] = dn
for r in orig_replicas:
for dn in orig_replicas[r]:
try:
dest_replicas['{}_{}'.format(str(file_id),r.split('_')[1])].append(dn)
except:
dest_replicas['{}_{}'.format(str(file_id),r.split('_')[1])] = [dn]
try:
dest_replicas_bkp[dn].append('{}_{}'.format(str(file_id),r.split('_')[1]))
except:
dest_replicas_bkp[dn] = ['{}_{}'.format(str(file_id),r.split('_')[1])]
#update the fs collection
fs.update_one({ '_id': file_id }, {'$set': {'chunks': dest_chunks, 'chunks_bkp': dest_chunks_bkp, 'replicas': dest_replicas, 'replicas_bkp': dest_replicas_bkp}})
#insert into the list needed for aligning the other namenodes
updatedone_documents.append(({ '_id': file_id }, {'$set': {'chunks': dest_chunks, 'chunks_bkp': dest_chunks_bkp, 'replicas': dest_replicas, 'replicas_bkp': dest_replicas_bkp}}, 'fs'))
for c in orig_chunks.keys():
tmp_c = orig_chunks[c]
del orig_chunks[c]
orig_chunks[c.replace('[dot]', '.').replace('[colon]', ':')] = tmp_c
for r in orig_replicas.keys():
tmp_dn = []
for dn in orig_replicas[r]:
tmp_dn.append(dn.replace('[dot]', '.').replace('[colon]', ':'))
orig_replicas[r] = tmp_dn
#get the list of the chunks to copy, without repetitions
hs = list(set(chain(*list(orig_replicas.values()))))
hm = list(orig_chunks.keys())
h = list(set(hm + hs))
logging.info('Copied {} content into {}'.format(orig_file, dest_path))
#return the list of chunks to copy as they are with the new prefix into the interested datanodes
return (file['_id'], file_id, h, inserted_documents, updatedone_documents, deletedone_documents)
def mv(client, orig_path, dest_path, required_by, grp):
"""Move/Rename a resource into a destination path.
Parameters
----------
client --> pymoMongoClient class, MongoDB client
orig_path --> pathlib.PosixPath class, path to the resource you want to move/rename, the origin path
dest_path --> pathlib.PosixPath class, path to the resource you want to move the origin resource or new name for the origin resource, the destination path
required_by --> str, user who required the operation
grp --> list, groups to which the user belogns
Returns
-------
updatedone_documents --> list, the list of the conditions for updating MongoDB documents, the values which have to be updated and the collection in which perform the update
"""
#for master namenode
updatedone_documents = []
#get the source file/directory to rename/move into a new path
try:
to_move = get_file(client, orig_path, required_by, grp)
except NotFoundException as e:
try:
to_move = get_directory(client, orig_path, required_by, grp)
except AccessDeniedException as e:
logging.warning(e.message)
raise e
except NotFoundException as e:
logging.warning(e.message)
raise e
except RootDirectoryException as e:
logging.warning(e.message)
raise e
except AccessDeniedException as e:
logging.warning(e.message)
raise e
#the file or the directory cannot be moved because already exists
if orig_path == dest_path:
if to_move['type'] == 'f':
logging.warning('The resource already exists')
raise AlreadyExistsException()
else:
logging.warning('A directory with the same name already exists')
raise AlreadyExistsDirectoryException()
#get fs (filesystem) MongoDB collection
fs = get_fs(client)
#navigate in the file system until the parent directory for the orig path
try:
curr_dir_from = navigate_through(orig_path, fs, required_by, grp, 'mv_source')
except AccessDeniedException as e:
logging.warning(e.message)
raise e
except NotFoundException as e:
logging.warning(e.message)
raise e
#navigate in the file system until the parent directory for the destination path
try:
curr_dir_to = navigate_through(dest_path, fs, required_by, grp, | |
'async with'"):
await python_script.stderr(CAPTURE)
async def test_redirect_error_to_logger(python_script, capfd, caplog):
"Test redirection options with both stdout and stderr output (logger)."
test_logger = logging.getLogger("test_logger")
result = await python_script.stderr(test_logger)
assert result == "hi stdout\ngoodbye!"
assert _readouterr(capfd) == ("", "")
logs = [tup for tup in caplog.record_tuples if tup[0] == "test_logger"]
assert logs == [
("test_logger", 40, "hi stderr"),
]
async def test_async_context_manager(sh):
"Use `async with` to read/write bytes incrementally."
tr = sh("tr", "[:lower:]", "[:upper:]").stdin(CAPTURE)
async with tr.run() as run:
assert run.stderr is None
# N.B. We won't deadlock writing/reading a single byte.
run.stdin.write(b"a")
run.stdin.close()
result = await run.stdout.read()
assert result == b"A"
async def test_async_iteration(sh):
"Use `async for` to read stdout line by line."
echo = sh("echo", "-n", "line1\n", "line2\n", "line3").stderr(DEVNULL)
async with echo.run() as run:
result = [line async for line in run]
assert result == ["line1\n", " line2\n", " line3"]
async def test_manually_created_pipeline(sh):
"You can create a pipeline manually using input redirection and os.pipe()."
(r, w) = os.pipe()
echo = sh("echo", "-n", "abc").stdout(w, close=True)
tr = sh("tr", "[:lower:]", "[:upper:]").stdin(r, close=True)
result = await asyncio.gather(tr, echo)
assert result == ["ABC", ""]
async def test_pipeline(sh):
"Test a simple pipeline."
pipe = sh("echo", "-n", "xyz") | sh("tr", "[:lower:]", "[:upper:]")
result = await pipe
assert result == "XYZ"
async def test_pipeline_with_env(sh):
"Test a simple pipeline with an augmented environment."
pipe = sh("echo", "-n", "xyz").env(FOO=1) | sh("tr", "[:lower:]", "[:upper:]")
result = await pipe
assert result == "XYZ"
async def test_pipeline_with_result(sh):
"Test a simple pipeline with `return_result` set to True."
echo = sh("echo", "-n", "xyz")
tr = sh("tr", "[:lower:]", "[:upper:]").set(return_result=True)
result = await (echo | tr)
assert result == Result(
output_bytes=b"XYZ",
exit_code=0,
cancelled=False,
encoding="utf-8",
extra=(
PipeResult(
exit_code=0,
cancelled=False,
),
PipeResult(
exit_code=0,
cancelled=False,
),
),
)
async def test_pipeline_single_cmd(sh):
"Test a single command pipeline."
tr = sh("tr", "[:lower:]", "[:upper:]")
result = await ("abc" | tr)
assert result == "ABC"
async def test_pipeline_invalid_cmd1(sh):
pipe = sh(" non_existant ", "xyz") | sh("tr", "[:lower:]", "[:upper:]")
# uvloop's error message does not include filename.
with pytest.raises(FileNotFoundError):
await pipe
async def test_pipeline_invalid_cmd2(sh):
pipe = sh("echo", "abc") | sh(" non_existant ", "xyz")
# uvloop's error message does not include filename.
with pytest.raises(FileNotFoundError):
await pipe
async def test_exit_codes(sh):
"Test `allows_exit_codes` option."
sh = sh.stderr(STDOUT).set(exit_codes={1})
cmd = sh("cat", "/tmp/__does_not_exist__")
result = await cmd
# "cat" may be displayed as "/usr/bin/cat" on some systems.
assert re.fullmatch(
r".*cat: .*__does_not_exist__'?: No such file or directory\n", result
)
async def test_pipeline_async_iteration(sh):
"Use `async for` to read stdout line by line."
echo = sh("echo", "-n", "line1\n", "line2\n", "line3")
cat = sh("cat")
async with (echo | cat).run() as run:
result = [line async for line in run]
assert result == ["line1\n", " line2\n", " line3"]
async def test_pipeline_async_context_manager(sh):
"Use `async with` to read/write bytes incrementally."
tr = sh("tr", "[:lower:]", "[:upper:]")
pipe = (tr | sh("cat")).stdin(CAPTURE)
async with pipe.run() as run:
assert run.stderr is None
# N.B. We won't deadlock writing/reading a single byte.
run.stdin.write(b"a")
run.stdin.close()
result = await run.stdout.read()
assert result == b"A"
assert run.name == "tr|cat"
assert (
repr(run) == "<PipeRunner 'tr|cat' results=["
"Result(output_bytes=b'', exit_code=0, cancelled=False, encoding='utf-8', extra=None), "
"Result(output_bytes=b'', exit_code=0, cancelled=False, encoding='utf-8', extra=None)]>"
)
async def test_gather_same_cmd(sh):
"""Test passing the same cmd to harvest().
This test fails with `asyncio.gather`.
"""
cmd = sh(sys.executable, "-c", "import secrets; print(secrets.randbits(31))")
results = await harvest_results(cmd, cmd)
assert results[0] != results[1]
async def test_cancelled_antipattern(sh):
"""Test the ResultError/cancellation anti-pattern.
This occurs *only* when incomplete_result=True.
Catching `ResultError` using try/except conceals the `CancelledError`
when you want to cancel the current task.
"""
sleep_cmd = sh("sleep", 3600).set(return_result=True, incomplete_result=True)
async def _subtask():
try:
await sleep_cmd
except ResultError as ex:
# sleep_cmd has incomplete_result set to True.
assert ex.result.cancelled
# CancelledError is lost!
await sleep_cmd
task = asyncio.create_task(_subtask())
# Wait for task to start, then inject a CancelledError.
await asyncio.sleep(0.1)
task.cancel()
# Catch ResultError from second sleep command. We have to use a timeout,
# otherwise we'd wait an hour for the 2nd sleep command to finish.
with pytest.raises(ResultError) as exc_info:
await asyncio.wait_for(task, timeout=0.1)
assert exc_info.value.result.cancelled
async def test_cancelled_antipattern_fix(sh):
"""Test the ResultError/cancellation anti-pattern fix.
Catching `ResultError` using try/except conceals the `CancelledError`
when you want to cancel the current task.
The fix is to explicitly re-raise the CancelledError when you are done with
the `ResultError`.
"""
sleep_cmd = sh("sleep", 3600).set(return_result=True, incomplete_result=True)
async def _subtask():
try:
await sleep_cmd
except ResultError as ex:
assert ex.result.cancelled
if ex.result.cancelled:
raise asyncio.CancelledError() from ex
await sleep_cmd
task = asyncio.create_task(_subtask())
# Wait for task to start, then inject a CancelledError.
await asyncio.sleep(0.1)
task.cancel()
# Wait for task to report it is cancelled.
with pytest.raises(asyncio.CancelledError):
await task
assert task.cancelled
async def test_multiple_capture(sh):
"Test the multiple capture example from the documentation."
cmd = sh("cat").stdin(CAPTURE)
async with cmd.run() as run:
run.stdin.write(b"abc\n")
output, _ = await asyncio.gather(run.stdout.readline(), run.stdin.drain())
run.stdin.close()
result = run.result(output)
assert result == "abc\n"
async def test_cancel_timeout(sh):
"Test the `cancel_timeout` setting."
sleep = sh("nohup", "sleep").set(
cancel_timeout=0.25,
cancel_signal=signal.SIGHUP,
)
with pytest.raises(asyncio.TimeoutError):
await asyncio.wait_for(sleep(10.0), 0.25)
async def test_shell_cmd(sh):
"Test a shell command. (https://bugs.python.org/issue43884)"
shell = sh("/bin/sh", "-c").set(
return_result=True,
incomplete_result=True,
exit_codes={0, _CANCELLED_EXIT_CODE},
)
task = asyncio.create_task(shell("sleep 2 && echo done").coro())
await asyncio.sleep(0.25)
task.cancel()
with pytest.raises(ResultError) as exc_info:
await task
assert exc_info.type is ResultError
assert exc_info.value.result == Result(
output_bytes=b"",
exit_code=_CANCELLED_EXIT_CODE,
cancelled=True,
encoding="utf-8",
extra=None,
)
async def test_large_cat(sh):
"Test cat with some decent sized data."
data = b"x" * (4 * 1024 * 1024 + 1)
cat = sh("cat").set(encoding=None)
result = await (data | cat)
assert len(result) == (4 * 1024 * 1024 + 1)
assert result == data
async def test_env_ellipsis_unix(sh):
"Test using `...` in env method to grab value from global environment."
cmd = sh("env").set(inherit_env=False).env(PATH=...)
result = await cmd
assert result.startswith("PATH=")
async def test_env_ellipsis_unix_wrong_case(sh):
"Test using `...` in env method to grab value from global environment."
# Fails because actual env is named "PATH", not "path"
with pytest.raises(KeyError):
sh("env").set(inherit_env=False).env(path=...)
async def test_process_substitution(sh):
"""Test process substitution.
```
diff <(echo a) <(echo b)
```
"""
cmd = sh("diff", sh("echo", "a"), sh("echo", "b")).set(exit_codes={0, 1})
result = await cmd
if _IS_ALPINE:
assert result.endswith("@@ -1 +1 @@\n-a\n+b\n")
else:
assert result == "1c1\n< a\n---\n> b\n"
async def test_process_substitution_with_pipe(sh):
"""Test process substitution.
```
diff <(echo a | cat) <(echo b | cat)
```
"""
pipe1 = sh("echo", "a") | sh("cat")
pipe2 = sh("echo", "b") | sh("cat")
cmd = sh("diff", pipe1, pipe2).set(exit_codes={0, 1})
result = await cmd
if _IS_ALPINE:
assert result.endswith("@@ -1 +1 @@\n-a\n+b\n")
else:
assert result == "1c1\n< a\n---\n> b\n"
async def test_process_substitution_write(sh, tmp_path):
"""Test process substitution with write_mode.
```
echo a | tee >(cat > tmpfile)
```
"""
out = tmp_path / "test_process_sub_write"
cat = sh("cat") | out
pipe = sh("echo", "a") | sh("tee", cat().writable)
result = await pipe
assert result == "a\n"
assert out.read_bytes() == b"a\n"
async def test_process_substitution_write_pipe(sh, tmp_path):
"""Test process substitution with write_mode.
```
echo b | tee >(cat | cat > tmpfile)
```
"""
out = tmp_path / "test_process_sub_write"
cat = sh("cat") | sh("cat") | out
pipe = sh("echo", "b") | sh("tee", cat().writable)
result = await pipe
assert result == "b\n"
assert out.read_bytes() == b"b\n"
async def test_process_substitution_write_pipe_alt(sh, tmp_path):
"""Test process substitution with write_mode.
Change where the .writable appears; put it on the first command of the pipe.
```
echo b | tee >(cat | cat > tmpfile)
```
"""
out = tmp_path / "test_process_sub_write"
cat = sh("cat").writable | sh("cat") | out
pipe = sh("echo", "b") | sh("tee", cat()).stderr(INHERIT)
result = await pipe
assert result == "b\n"
assert out.read_bytes() == b"b\n"
async def test_process_substitution_error_filenotfound(sh):
"""Test process substitution with FileNotFoundError error."""
cmd = sh("diff", sh("echo", "a"), sh("_unknown_", "b")).set(exit_codes={0, 1})
with pytest.raises(FileNotFoundError, match="_unknown_"):
await cmd
async def test_process_substitution_error_exit_1(sh):
"""Test process substitution with FileNotFoundError error."""
# sleep exits with code 1 if no argument passed.
cmd = sh("diff", sh("echo", "a"), sh("sleep")).set(exit_codes={0, 1})
with pytest.raises(ResultError) as exc_info:
await cmd
result = exc_info.value.result
assert result == Result(
output_bytes=b"",
exit_code=1,
cancelled=False,
encoding="utf-8",
extra=None, # FIXME: This should indicate that "sleep" failed...
)
async def test_start_new_session(sh):
"""Test `_start_new_session` option."""
script = """import os; print(os.getsid(0) == os.getpid())"""
cmd = sh(sys.executable, "-c", script)
result = await cmd
assert result == "False\n"
result = await cmd.set(_start_new_session=False)
assert result == "False\n"
result = await cmd.set(_start_new_session=True)
assert | |
+ m.x1308 + m.x1408 + m.x1508 + m.x1608 + m.x1708 + m.x1808
+ m.x1908 + m.x2008 + m.x2108 + m.x2208 + m.x2308 + m.x2408 + m.x2508 + m.x2608 + m.x2708
+ m.x2808 + m.x2908 == 1)
m.c3010 = Constraint(expr= m.x9 + m.x109 + m.x209 + m.x309 + m.x409 + m.x509 + m.x609 + m.x709 + m.x809 + m.x909
+ m.x1009 + m.x1109 + m.x1209 + m.x1309 + m.x1409 + m.x1509 + m.x1609 + m.x1709 + m.x1809
+ m.x1909 + m.x2009 + m.x2109 + m.x2209 + m.x2309 + m.x2409 + m.x2509 + m.x2609 + m.x2709
+ m.x2809 + m.x2909 == 1)
m.c3011 = Constraint(expr= m.x10 + m.x110 + m.x210 + m.x310 + m.x410 + m.x510 + m.x610 + m.x710 + m.x810 + m.x910
+ m.x1010 + m.x1110 + m.x1210 + m.x1310 + m.x1410 + m.x1510 + m.x1610 + m.x1710 + m.x1810
+ m.x1910 + m.x2010 + m.x2110 + m.x2210 + m.x2310 + m.x2410 + m.x2510 + m.x2610 + m.x2710
+ m.x2810 + m.x2910 == 1)
m.c3012 = Constraint(expr= m.x11 + m.x111 + m.x211 + m.x311 + m.x411 + m.x511 + m.x611 + m.x711 + m.x811 + m.x911
+ m.x1011 + m.x1111 + m.x1211 + m.x1311 + m.x1411 + m.x1511 + m.x1611 + m.x1711 + m.x1811
+ m.x1911 + m.x2011 + m.x2111 + m.x2211 + m.x2311 + m.x2411 + m.x2511 + m.x2611 + m.x2711
+ m.x2811 + m.x2911 == 1)
m.c3013 = Constraint(expr= m.x12 + m.x112 + m.x212 + m.x312 + m.x412 + m.x512 + m.x612 + m.x712 + m.x812 + m.x912
+ m.x1012 + m.x1112 + m.x1212 + m.x1312 + m.x1412 + m.x1512 + m.x1612 + m.x1712 + m.x1812
+ m.x1912 + m.x2012 + m.x2112 + m.x2212 + m.x2312 + m.x2412 + m.x2512 + m.x2612 + m.x2712
+ m.x2812 + m.x2912 == 1)
m.c3014 = Constraint(expr= m.x13 + m.x113 + m.x213 + m.x313 + m.x413 + m.x513 + m.x613 + m.x713 + m.x813 + m.x913
+ m.x1013 + m.x1113 + m.x1213 + m.x1313 + m.x1413 + m.x1513 + m.x1613 + m.x1713 + m.x1813
+ m.x1913 + m.x2013 + m.x2113 + m.x2213 + m.x2313 + m.x2413 + m.x2513 + m.x2613 + m.x2713
+ m.x2813 + m.x2913 == 1)
m.c3015 = Constraint(expr= m.x14 + m.x114 + m.x214 + m.x314 + m.x414 + m.x514 + m.x614 + m.x714 + m.x814 + m.x914
+ m.x1014 + m.x1114 + m.x1214 + m.x1314 + m.x1414 + m.x1514 + m.x1614 + m.x1714 + m.x1814
+ m.x1914 + m.x2014 + m.x2114 + m.x2214 + m.x2314 + m.x2414 + m.x2514 + m.x2614 + m.x2714
+ m.x2814 + m.x2914 == 1)
m.c3016 = Constraint(expr= m.x15 + m.x115 + m.x215 + m.x315 + m.x415 + m.x515 + m.x615 + m.x715 + m.x815 + m.x915
+ m.x1015 + m.x1115 + m.x1215 + m.x1315 + m.x1415 + m.x1515 + m.x1615 + m.x1715 + m.x1815
+ m.x1915 + m.x2015 + m.x2115 + m.x2215 + m.x2315 + m.x2415 + m.x2515 + m.x2615 + m.x2715
+ m.x2815 + m.x2915 == 1)
m.c3017 = Constraint(expr= m.x16 + m.x116 + m.x216 + m.x316 + m.x416 + m.x516 + m.x616 + m.x716 + m.x816 + m.x916
+ m.x1016 + m.x1116 + m.x1216 + m.x1316 + m.x1416 + m.x1516 + m.x1616 + m.x1716 + m.x1816
+ m.x1916 + m.x2016 + m.x2116 + m.x2216 + m.x2316 + m.x2416 + m.x2516 + m.x2616 + m.x2716
+ m.x2816 + m.x2916 == 1)
m.c3018 = Constraint(expr= m.x17 + m.x117 + m.x217 + m.x317 + m.x417 + m.x517 + m.x617 + m.x717 + m.x817 + m.x917
+ m.x1017 + m.x1117 + m.x1217 + m.x1317 + m.x1417 + m.x1517 + m.x1617 + m.x1717 + m.x1817
+ m.x1917 + m.x2017 + m.x2117 + m.x2217 + m.x2317 + m.x2417 + m.x2517 + m.x2617 + m.x2717
+ m.x2817 + m.x2917 == 1)
m.c3019 = Constraint(expr= m.x18 + m.x118 + m.x218 + m.x318 + m.x418 + m.x518 + m.x618 + m.x718 + m.x818 + m.x918
+ m.x1018 + m.x1118 + m.x1218 + m.x1318 + m.x1418 + m.x1518 + m.x1618 + m.x1718 + m.x1818
+ m.x1918 + m.x2018 + m.x2118 + m.x2218 + m.x2318 + m.x2418 + m.x2518 + m.x2618 + m.x2718
+ m.x2818 + m.x2918 == 1)
m.c3020 = Constraint(expr= m.x19 + m.x119 + m.x219 + m.x319 + m.x419 + m.x519 + m.x619 + m.x719 + m.x819 + m.x919
+ m.x1019 + m.x1119 + m.x1219 + m.x1319 + m.x1419 + m.x1519 + m.x1619 + m.x1719 + m.x1819
+ m.x1919 + m.x2019 + m.x2119 + m.x2219 + m.x2319 + m.x2419 + m.x2519 + m.x2619 + m.x2719
+ m.x2819 + m.x2919 == 1)
m.c3021 = Constraint(expr= m.x20 + m.x120 + m.x220 + m.x320 + m.x420 + m.x520 + m.x620 + m.x720 + m.x820 + m.x920
+ m.x1020 + m.x1120 + m.x1220 + m.x1320 + m.x1420 + m.x1520 + m.x1620 + m.x1720 + m.x1820
+ m.x1920 + m.x2020 + m.x2120 + m.x2220 + m.x2320 + m.x2420 + m.x2520 + m.x2620 + m.x2720
+ m.x2820 + m.x2920 == 1)
m.c3022 = Constraint(expr= m.x21 + m.x121 + m.x221 + m.x321 + m.x421 + m.x521 + m.x621 + m.x721 + m.x821 + m.x921
+ m.x1021 + m.x1121 + m.x1221 + m.x1321 + m.x1421 + m.x1521 + m.x1621 + m.x1721 + m.x1821
+ m.x1921 + m.x2021 + m.x2121 + m.x2221 + m.x2321 + m.x2421 + m.x2521 + m.x2621 + m.x2721
+ m.x2821 + m.x2921 == 1)
m.c3023 = Constraint(expr= m.x22 + m.x122 + m.x222 + m.x322 + m.x422 + m.x522 + m.x622 + m.x722 + m.x822 + m.x922
+ m.x1022 + m.x1122 + m.x1222 + m.x1322 + m.x1422 + m.x1522 + m.x1622 + m.x1722 + m.x1822
+ m.x1922 + m.x2022 + m.x2122 + m.x2222 + m.x2322 + m.x2422 + m.x2522 + m.x2622 + m.x2722
+ m.x2822 + m.x2922 == 1)
m.c3024 = Constraint(expr= m.x23 + m.x123 + m.x223 + m.x323 + m.x423 + m.x523 + m.x623 + m.x723 + m.x823 + m.x923
+ m.x1023 + m.x1123 + m.x1223 + m.x1323 + m.x1423 + m.x1523 + m.x1623 + m.x1723 + m.x1823
+ m.x1923 + m.x2023 + m.x2123 + m.x2223 + m.x2323 + m.x2423 + m.x2523 + m.x2623 + m.x2723
+ m.x2823 + m.x2923 == 1)
m.c3025 = Constraint(expr= m.x24 + m.x124 + m.x224 + m.x324 + m.x424 + m.x524 + m.x624 + m.x724 + m.x824 + m.x924
+ m.x1024 + m.x1124 + m.x1224 + m.x1324 + m.x1424 + m.x1524 + m.x1624 + m.x1724 + m.x1824
+ m.x1924 + m.x2024 + m.x2124 + m.x2224 + m.x2324 + m.x2424 + m.x2524 + m.x2624 + m.x2724
+ m.x2824 + m.x2924 == 1)
m.c3026 = Constraint(expr= m.x25 + m.x125 + m.x225 + m.x325 + m.x425 + m.x525 + m.x625 + m.x725 + m.x825 + m.x925
+ m.x1025 + m.x1125 + m.x1225 + m.x1325 + m.x1425 + m.x1525 + m.x1625 + m.x1725 + m.x1825
+ m.x1925 + m.x2025 + m.x2125 + m.x2225 + m.x2325 + m.x2425 + m.x2525 + m.x2625 + m.x2725
+ m.x2825 + m.x2925 == 1)
m.c3027 = Constraint(expr= m.x26 + m.x126 + m.x226 + m.x326 + m.x426 + m.x526 + m.x626 + m.x726 + m.x826 + m.x926
+ m.x1026 + m.x1126 + m.x1226 + m.x1326 + m.x1426 + m.x1526 + m.x1626 + m.x1726 + m.x1826
+ m.x1926 + m.x2026 + m.x2126 + m.x2226 + m.x2326 + m.x2426 + m.x2526 + m.x2626 + m.x2726
+ m.x2826 + m.x2926 == 1)
m.c3028 = Constraint(expr= m.x27 + m.x127 + m.x227 + m.x327 + m.x427 + m.x527 + m.x627 + m.x727 + m.x827 + m.x927
+ m.x1027 + m.x1127 + m.x1227 + m.x1327 + m.x1427 + m.x1527 + m.x1627 + m.x1727 + m.x1827
+ m.x1927 + m.x2027 + m.x2127 + m.x2227 + m.x2327 + m.x2427 + m.x2527 + m.x2627 + m.x2727
+ m.x2827 + m.x2927 == 1)
m.c3029 = Constraint(expr= m.x28 + m.x128 + m.x228 + m.x328 + m.x428 + m.x528 + m.x628 + m.x728 + m.x828 + m.x928
+ m.x1028 + m.x1128 + m.x1228 + m.x1328 + m.x1428 + m.x1528 + m.x1628 + m.x1728 + m.x1828
+ m.x1928 + m.x2028 + m.x2128 + m.x2228 | |
<gh_stars>1000+
# -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
import base64
import logging
import sys
import inspect
import re
import traceback
import json
from functools import partial
import numpy as np
###############################################################################
# LOGGING (some adapted from mne-python)
def _get_vispy_caller():
"""Helper to get vispy calling function from the stack"""
records = inspect.stack()
# first few records are vispy-based logging calls
for record in records[5:]:
module = record[0].f_globals['__name__']
if module.startswith('vispy'):
line = str(record[0].f_lineno)
func = record[3]
cls = record[0].f_locals.get('self', None)
clsname = "" if cls is None else cls.__class__.__name__ + '.'
caller = "{0}:{1}{2}({3}): ".format(module, clsname, func, line)
return caller
return 'unknown'
# class _WrapStdOut(object):
# """Class to work around how doctest captures stdout"""
# def __getattr__(self, name):
# # Even more ridiculous than this class, this must be sys.stdout (not
# # just stdout) in order for this to work (tested on OSX and Linux)
# return getattr(sys.stdout, name)
class _VispyFormatter(logging.Formatter):
"""Formatter that optionally prepends caller"""
def __init__(self):
logging.Formatter.__init__(self, '%(levelname)s: %(message)s')
self._vispy_prepend_caller = False
def _vispy_set_prepend(self, prepend):
self._vispy_prepend_caller = prepend
def format(self, record):
out = logging.Formatter.format(self, record)
if self._vispy_prepend_caller:
out = _get_vispy_caller() + out
return out
class _VispyStreamHandler(logging.StreamHandler):
"""Stream handler allowing matching and recording
This handler has two useful optional additions:
1. Recording emitted messages.
2. Performing regexp substring matching.
Prepending of traceback information is done in _VispyFormatter.
"""
def __init__(self):
logging.StreamHandler.__init__(self, sys.stderr)
self._vispy_formatter = _lf
self.setFormatter(self._vispy_formatter)
self._vispy_match = None
self._vispy_emit_list = list()
self._vispy_set_emit_record(False)
self._vispy_set_match(None)
self._vispy_print_msg = True
def _vispy_emit_match_andor_record(self, record):
"""Log message emitter that optionally matches and/or records"""
test = record.getMessage()
match = self._vispy_match
if (match is None or re.search(match, test) or
re.search(match, _get_vispy_caller())):
if self._vispy_emit_record:
fmt_rec = self._vispy_formatter.format(record)
self._vispy_emit_list.append(fmt_rec)
if self._vispy_print_msg:
return logging.StreamHandler.emit(self, record)
else:
return
def _vispy_set_match(self, match):
old_match = self._vispy_match
self._vispy_match = match
# Triage here to avoid a bunch of if's later (more efficient)
if match is not None or self._vispy_emit_record:
self.emit = self._vispy_emit_match_andor_record
else:
self.emit = partial(logging.StreamHandler.emit, self)
return old_match
def _vispy_set_emit_record(self, record):
self._vispy_emit_record = record
match = self._vispy_match
# Triage here to avoid a bunch of if's later (more efficient)
if match is not None or self._vispy_emit_record:
self.emit = self._vispy_emit_match_andor_record
else:
self.emit = partial(logging.StreamHandler.emit, self)
def _vispy_reset_list(self):
self._vispy_emit_list = list()
logger = logging.getLogger('vispy')
_lf = _VispyFormatter()
_lh = _VispyStreamHandler() # needs _lf to exist
logger.addHandler(_lh)
logging_types = dict(debug=logging.DEBUG, info=logging.INFO,
warning=logging.WARNING, error=logging.ERROR,
critical=logging.CRITICAL)
def set_log_level(verbose, match=None, return_old=False):
"""Convenience function for setting the logging level
Parameters
----------
verbose : bool, str, int, or None
The verbosity of messages to print. If a str, it can be either DEBUG,
INFO, WARNING, ERROR, or CRITICAL. Note that these are for
convenience and are equivalent to passing in logging.DEBUG, etc.
For bool, True is the same as 'INFO', False is the same as 'WARNING'.
match : str | None
String to match. Only those messages that both contain a substring
that regexp matches ``'match'`` (and the ``verbose`` level) will be
displayed.
return_old : bool
If True, return the old verbosity level and old match.
Notes
-----
If ``verbose=='debug'``, then the ``vispy`` method emitting the log
message will be prepended to each log message, which is useful for
debugging. If ``verbose=='debug'`` or ``match is not None``, then a
small performance overhead is added. Thus it is suggested to only use
these options when performance is not crucial.
See also
--------
vispy.util.use_log_level
"""
# This method is responsible for setting properties of the handler and
# formatter such that proper messages (possibly with the vispy caller
# prepended) are displayed. Storing log messages is only available
# via the context handler (use_log_level), so that configuration is
# done by the context handler itself.
if isinstance(verbose, bool):
verbose = 'info' if verbose else 'warning'
if isinstance(verbose, str):
verbose = verbose.lower()
if verbose not in logging_types:
raise ValueError('Invalid argument "%s"' % verbose)
verbose = logging_types[verbose]
else:
raise TypeError('verbose must be a bool or string')
logger = logging.getLogger('vispy')
old_verbose = logger.level
old_match = _lh._vispy_set_match(match)
logger.setLevel(verbose)
if verbose <= logging.DEBUG:
_lf._vispy_set_prepend(True)
else:
_lf._vispy_set_prepend(False)
out = None
if return_old:
out = (old_verbose, old_match)
return out
class use_log_level(object):
"""Context manager that temporarily sets logging level
Parameters
----------
level : str
See ``set_log_level`` for options.
match : str | None
The string to match.
record : bool
If True, the context manager will keep a record of the logging
messages generated by vispy. Otherwise, an empty list will
be returned.
print_msg : bool
If False, printing of (all) messages will be suppressed. This is
mainly useful in testing. False only works in `record=True` mode, if
not recording messages, consider setting `level` appropriately.
Returns
-------
records : list
As a context manager, an empty list or the list of logging messages
will be returned (depending on the input ``record``).
"""
# This method mostly wraps to set_log_level, but also takes
# care of enabling/disabling message recording in the formatter.
def __init__(self, level, match=None, record=False, print_msg=True):
self._new_level = level
self._new_match = match
self._print_msg = print_msg
self._record = record
if match is not None and not isinstance(match, str):
raise TypeError('match must be None or str')
def __enter__(self):
# set the log level
old_level, old_match = set_log_level(self._new_level,
self._new_match, return_old=True)
for key, value in logging_types.items():
if value == old_level:
old_level = key
self._old_level = old_level
self._old_match = old_match
if not self._print_msg:
_lh._vispy_print_msg = False
# set handler to record, if appropriate
_lh._vispy_reset_list()
if self._record:
_lh._vispy_set_emit_record(True)
return _lh._vispy_emit_list
else:
return list()
def __exit__(self, type, value, traceback):
# reset log level
set_log_level(self._old_level, self._old_match)
# reset handler
if self._record:
_lh._vispy_set_emit_record(False)
if not self._print_msg:
_lh._vispy_print_msg = True # set it back
def log_exception(level='warning', tb_skip=2):
"""
Send an exception and traceback to the logger.
This function is used in cases where an exception is handled safely but
nevertheless should generate a descriptive error message. An extra line
is inserted into the stack trace indicating where the exception was caught.
Parameters
----------
level : str
See ``set_log_level`` for options.
tb_skip : int
The number of traceback entries to ignore, prior to the point where
the exception was caught. The default is 2.
"""
stack = "".join(traceback.format_stack()[:-tb_skip])
tb = traceback.format_exception(*sys.exc_info())
msg = tb[0] # "Traceback (most recent call last):"
msg += stack
msg += " << caught exception here: >>\n"
msg += "".join(tb[1:]).rstrip()
logger.log(logging_types[level], msg)
logger.log_exception = log_exception # make this easier to reach
def _handle_exception(ignore_callback_errors, print_callback_errors, obj,
cb_event=None, node=None):
"""Helper for prining errors in callbacks
See EventEmitter._invoke_callback for a use example.
"""
if not hasattr(obj, '_vispy_err_registry'):
obj._vispy_err_registry = {}
registry = obj._vispy_err_registry
if cb_event is not None:
cb, event = cb_event
exp_type = 'callback'
else:
exp_type = 'node'
type_, value, tb = sys.exc_info()
tb = tb.tb_next # Skip *this* frame
sys.last_type = type_
sys.last_value = value
sys.last_traceback = tb
del tb # Get rid of it in this namespace
# Handle
if not ignore_callback_errors:
raise
if print_callback_errors != "never":
this_print = 'full'
if print_callback_errors in ('first', 'reminders'):
# need to check to see if we've hit this yet
if exp_type == 'callback':
key = repr(cb) + repr(event)
else:
key = repr(node)
if key in registry:
registry[key] += 1
if print_callback_errors == 'first':
this_print = None
else: # reminders
ii = registry[key]
# Use logarithmic selection
# (1, 2, ..., 10, 20, ..., 100, 200, ...)
if ii == (2 ** int(np.log2(ii))):
this_print = ii
else:
this_print = None
else:
registry[key] = 1
if this_print == 'full':
logger.log_exception()
if exp_type == 'callback':
logger.error("Invoking %s for %s" % (cb, event))
else: # == 'node':
logger.error("Drawing node %s" % node)
elif this_print is not None:
if exp_type == 'callback':
logger.error("Invoking %s repeat %s"
% (cb, this_print))
else: # == 'node':
logger.error("Drawing node %s repeat %s"
% (node, this_print))
def _serialize_buffer(buffer, array_serialization=None):
"""Serialize a NumPy array."""
if array_serialization == 'binary':
return buffer.ravel().tobytes()
elif array_serialization == 'base64':
return {'storage_type': 'base64',
'buffer': base64.b64encode(buffer).decode('ascii')
}
raise ValueError("The array serialization method should be 'binary' or "
"'base64'.")
class NumPyJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return _serialize_buffer(obj, array_serialization='base64')
elif isinstance(obj, np.generic):
return obj.item()
| |
)
#-------------------------------------------------------------------------------
# Name: nodeInfo()
# Role: Return a dictionary having the primary key being the unique
# node names, and the values being the associated configID
# for each node.
# History:
# when who ver what
# -------- --- ---- ----------
# 10/04/16 rag 0.0 New - Write using configInfoAsDict()
#-------------------------------------------------------------------------------
def nodeInfo( scope = None ) :
'nodeInfo( scope = None ) - Return a dictionary of the node names, and their configuration IDs.'
return configInfoAsDict( 'Node', scope )
#-------------------------------------------------------------------------------
# Name: nvListAsDict()
# Role: To display the contents of a dictionary in a more readable format
# History:
# when who ver what
# -------- --- ---- ----------
# 09/11/15 rag 0.1 Add - docstring
# 09/08/18 rag 0.0 New - see stringAsNestedList()
#-------------------------------------------------------------------------------
def nvListAsDict( nvList ):
'nvListAsDict( mvList ) - Convert a list containing name/value pairs into a dictionary'
result = {}
for name, value in nvList :
result[ name ] = value
return result
#-------------------------------------------------------------------------------
# Name: nvTextListAsDict
# Role: Convert a list of name/value pairs found in the specified text
# string into a dicationary.
# Note: Depends upon availability of WSAS Admin Objects via sys.modules
# History:
# when who ver what
# -------- --- ---- ----------
# 10/01/27 rag 0.5 Fix - Change "cmdName" to "funName"
# 09/11/15 rag 0.4 Add - docstring
# 09/04/07 rag 0.3 Add - Added displayDict() call to example
# 09/03/21 rag 0.2 Add - Add "role" description & corrected example
# 08/12/18 rag 0.1 Fix - Handle "no value" pair (e.g., "[nodeShortName ]")
# 08/12/17 rag 0.0 New - Based upon work for book
#-------------------------------------------------------------------------------
# Example use:
# > from WAuJ_utilities import nvTextListAsDict, displayDict
# > sDict = nvTextListAsDict( AdminTask.showServerTypeInfo( 'APPLICATION_SERVER' ) )
# > displayDict( sDict )
#-------------------------------------------------------------------------------
def nvTextListAsDict( text ) :
'nvTextListAsDict( text ) - Convert a list of name/value pairs into a dictionary.'
funName = callerName()
#-----------------------------------------------------------------------------
# Initialize the dictionary to be returned
#-----------------------------------------------------------------------------
result = {}
#-----------------------------------------------------------------------------
# Verify that the specified string "looks" right...
#-----------------------------------------------------------------------------
if ( text.count( '[' ) == text.count( ']' ) ) and ( text[ 0 ] == '[' ) and ( text[ -1 ] == ']' ) :
#---------------------------------------------------------------------------
# Remove outer brackets (i.e., '[]') and then leading/trailing blanks
#---------------------------------------------------------------------------
innerText = text[ 1:-1 ].strip()
#---------------------------------------------------------------------------
# Locate a possible unused character so the list of values can
# easily be split into name value pairs
#---------------------------------------------------------------------------
delimiters = ',.|!@#' # Possible delimiter values
for delim in delimiters :
#-------------------------------------------------------------------------
# If this char (delim) doesn't exist in the string, put it in,
# between the close and open brackets so that it can be used to
# split the line into a list of strings like '[name value]'.
#-------------------------------------------------------------------------
if innerText.count( delim ) == 0 :
for pair in innerText.replace( '] [', ']%s[' % delim ).split( delim ) :
#---------------------------------------------------------------------
# verify that the string starts and ends with brackets...
# Note: a == b == c is only true if both a == b and b == c
#---------------------------------------------------------------------
if ( pair.count( '[' ) == pair.count( ']' ) == 1 ) and ( pair[ 0 ] == '[' ) and ( pair[ -1 ] == ']' ) :
#-------------------------------------------------------------------
# Occasionally, we have a situation where pair contains
# only a name, and not a name/value pair.
# So, this code was added to handle that rare situation.
#-------------------------------------------------------------------
contents = pair[ 1:-1 ].strip()
try :
( name, value ) = contents.split( ' ', 1 )
except :
( name, value ) = ( contents, '' )
result[ name ] = value
else :
print '%s error - Unexpected text: "%s" (ignored).' % ( funName, pair )
#-----------------------------------------------------------------------
# All name/pair sub-strings have been processed, we're done
#-----------------------------------------------------------------------
break
else :
print '%s error - Unable to split data, empty dictionary returned.' % funName
return {}
else :
print '%s error - Unexpected data format: "%s", empty dictionary returned.' % ( funName, text )
return result
#-------------------------------------------------------------------------------
# Name: parentTypes()
# Role: Return a multi-line string (i.e., containing newline separators) of all
# of the valid parent types for the specified configuration type.
# History:
# when who ver what
# -------- --- ---- ----------
# 12/05/18 rag 0.0 New
#-------------------------------------------------------------------------------
def parentTypes( Type, WSAStypes = None ) :
import os.linesep as newline
#---------------------------------------------------------------------------
# Local optimization - minimize the number of calls to AdminConfig.types()
#---------------------------------------------------------------------------
if not WSAStypes :
WSAStypes = AdminConfig.types().splitlines()
#---------------------------------------------------------------------------
# Use AdminConfig.parents() if we can, otherwise figure it out for ourself
#---------------------------------------------------------------------------
if Type in WSAStypes :
result = AdminConfig.parents( Type )
if result.startswith( 'WASX7351I' ) :
result = newline.join( [ kind for kind in WSAStypes if AdminConfig.attributes( kind ).find( Type ) > -1 ] )
else :
print 'parentTypes() error: unknown / unrecognized type:', Type
result = None
return result
#-------------------------------------------------------------------------------
# Name: scopeAsDict
# Role: Convert the specified scope configuration object into a dictionary
# History:
# when who ver what
# -------- --- ---- ----------
# 10/09/27 rag 0.1 Add - Include scopeName & scopeType values in result
# 10/09/24 rag 0.0 New - Based upon work down for certZilla
#-------------------------------------------------------------------------------
# Example use:
# > from WAuJ_utilities import WAuJ
# > ks = AdminTask.listKeyStores( '[-all true]' ).splitlines()[ 0 ]
# > attr = WAuJ.showAsDict( ks )
# > scope = WAuJ.scopeAsDict( attr[ 'managementScope' ] )
#-------------------------------------------------------------------------------
def scopeAsDict( configId ) :
'scopeAsDict( configId ) - Return a dictionary of the specified scope config object.'
funName = callerName()
result = {}
try :
result = showAsDict( configId )
Name = result[ 'scopeName' ]
result.update( scopeNameAsDict( Name ) )
result[ 'configId' ] = configId
result[ 'name' ] = result[ result[ 'scopeType' ] ]
except :
Type, value = sys.exc_info()[ :2 ]
print '%s exception Type: %s\n\tvalue: %s' % ( funName, str( Type ), str( value ) )
return result
#-------------------------------------------------------------------------------
# Name: scopeNameAsDict
# Role: Convert specified scopeName string into a dictionary
# History:
# when who ver what
# -------- --- ---- ----------
# 10/09/24 rag 0.0 New - Based upon work down for certZilla
#-------------------------------------------------------------------------------
# Example use:
# > from WAuJ_utilities import WAuJ
# > ks = AdminTask.listKeyStores( '[-all true]' ).splitlines()[ 0 ]
# > attr = WAuJ.showAsDict( ks )
# > info = WAuJ.showAsDict( attr[ 'managementScope' ] )
# > scope = WAuJ..scopeNameAsDict( info[ 'scopeName' ] )
#-------------------------------------------------------------------------------
def scopeNameAsDict( scopeName ) :
'scopeNameAsDict( scopeName ) - Return a dictionary of the specified scopeName.'
funName = callerName()
result = {}
try :
nv = scopeName.split( ':' )
if len( nv ) % 2 :
raise ValueError, '%s error: Unrecognized input: %s' % ( funName, scopeName )
for i in range( 0, len( nv ), 2 ) :
n, v = nv[ i ], nv[ i + 1 ]
# print '%s() name="%s" value="%s"' % ( funName, n[ 1:-1 ], v )
result[ n[ 1:-1 ] ] = v
except :
print '%s exception: %s' % ( funName, str( sys.exc_info()[ 1 ] ) )
result = {}
return result
#-------------------------------------------------------------------------------
# Name: scopedWSASvariables()
# Role: Return a dictionary of the specific WebSphere Application
# Server (WSAS) variables scoped by the specified configId.
# History:
# when who ver what
# -------- --- ---- ----------
# 10/05/19 rag 0.2 Fix handle KeyError for VMdict[ 'entries' ]
# Fix handle empty VMid
# 10/04/22 rag 0.1 Add docstring
# 10/04/18 rag 0.0 New
#-------------------------------------------------------------------------------
def scopedWSASvariables( configId ) :
'scopedWSASvariables( configId ) - Return a dictionary of the WSAS variables scoped by the specified configId.'
funName = callerName()
start = configId.find( '(' )
if start < 0 :
print '%s() error - no \'(\' found in configId: "%s"' % ( funName, configId )
return
fini = configId.find( '|' )
if fini < 0 :
print '%s*( error - no \'|\' found in configId: "%s"' % ( funName, configId )
return
#-----------------------------------------------------------------------------
# Quick and easy extraction of the part of the configId we need to
# filter the VariableMap configuration items.
#-----------------------------------------------------------------------------
prefix = configId[ start : fini + 1 ]
VMid = configIdFilter( 'VariableMap', prefix )
#-----------------------------------------------------------------------------
# Use this VariableMap configId to determine which entries exist.
#-----------------------------------------------------------------------------
result = {}
if VMid :
VMdict = showAsDict( VMid )
entries = VMdict.get( 'entries', '[]' )[ 1:-1 ]
#---------------------------------------------------------------------------
# | |
<reponame>ecoo-app/ecoo-backend
import base64
import secrets
import string
from enum import Enum
from urllib.parse import urlencode
import pysodium
import pytezos
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
from django.db import models, transaction
from django.db.models import Max, Q, Sum
from django.utils.crypto import get_random_string
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from fcm_django.models import FCMDevice
from pytezos.crypto.key import Key
from schwifty import IBAN
from apps.currency.mixins import CurrencyOwnedMixin
from apps.wallet.utils import create_message
from project.mixins import UUIDModel
class WALLET_STATES(Enum):
UNVERIFIED = 0
PENDING = 1
VERIFIED = 2
DEACTIVATED = 3
class WALLET_CATEGORIES(Enum):
CONSUMER = 0
COMPANY = 1
OWNER = 2
WALLET_STATE_CHOICES = (
(WALLET_STATES.UNVERIFIED.value, _("Unverified")),
(WALLET_STATES.PENDING.value, _("Pending")),
(WALLET_STATES.VERIFIED.value, _("Verified")),
(WALLET_STATES.DEACTIVATED.value, _("Deactivated")),
)
WALLET_CATEGORY_CHOICES = (
(WALLET_CATEGORIES.CONSUMER.value, _("Consumer")),
(WALLET_CATEGORIES.COMPANY.value, _("Company")),
(WALLET_CATEGORIES.OWNER.value, _("Owner")),
)
class Wallet(CurrencyOwnedMixin):
owner = models.ForeignKey(
settings.AUTH_USER_MODEL,
blank=True,
null=True,
on_delete=models.DO_NOTHING,
related_name="wallets",
)
wallet_id = models.CharField(
_("Wallet Id"), unique=True, blank=True, editable=False, max_length=128
)
public_key = models.CharField(
_("Publickey"), unique=True, blank=True, editable=True, max_length=60
) # encoded public_key
category = models.IntegerField(
_("Category"),
default=WALLET_CATEGORIES.CONSUMER.value,
choices=WALLET_CATEGORY_CHOICES,
)
state = models.IntegerField(
_("State"), default=WALLET_STATES.UNVERIFIED.value, choices=WALLET_STATE_CHOICES
)
@property
def address(self):
return Key.from_encoded_key(self.public_key).public_key_hash()
@property
def balance(self):
return (
self.to_transactions.aggregate(Sum("amount")).get("amount__sum") or 0
) - (self.from_transactions.aggregate(Sum("amount")).get("amount__sum") or 0)
@property
def from_metatransactions(self):
return MetaTransaction.objects.filter(from_wallet=self)
@property
def nonce(self):
# filter out amount==0
transactions = self.from_metatransactions.filter(
from_public_key=self.public_key, amount__gt=0
)
if transactions.count() == 0:
return 0
else:
return transactions.aggregate(Max("nonce"))["nonce__max"]
@property
def is_in_public_key_transfer(self):
return self.transfer_requests.filter(state=2).exists()
def __str__(self):
return "{} - {}".format(self.wallet_id, self.currency)
@staticmethod
def generate_wallet_id():
characters = get_random_string(2, string.ascii_uppercase)
digits = str(secrets.randbelow(999999)).zfill(6)
return characters + digits
def notify_owner_receiving_money(self, from_wallet_id, amount):
# TODO: multi language support?
self.__notify_owner_devices(
f"Sie haben {amount/pow(10,self.currency.decimals)} CHF von {from_wallet_id} erhalten"
)
def notify_transfer_successful(self, to_wallet_id, amount):
self.__notify_owner_devices(
f"Sie haben {amount/pow(10,self.currency.decimals)} CHF an {to_wallet_id} gesendet"
)
def notify_owner_verified(self):
self.__notify_owner_devices(f"Wallet {self.wallet_id} wurde verifiziert")
def notify_owner_transfer_request_done(self):
self.__notify_owner_devices(
f"PublicKeyRequest vollzogen für {self.wallet_id}",
data={"wallet_id": self.wallet_id},
)
def __notify_owner_devices(self, message, data=None):
devices = FCMDevice.objects.filter(user=self.owner)
devices.send_message(
title=settings.PUSH_NOTIFICATION_TITLE, body=message, data=data
)
def clean(self, *args, **kwargs):
super(Wallet, self).clean(*args, **kwargs)
errors = {}
# TODO: more to clean?
try:
self.address
except:
errors["public_key"] = ValidationError(
_("Public key is not in valid format")
)
if len(errors) > 0:
raise ValidationError(errors)
def save(self, *args, **kwargs):
if self.category == WALLET_CATEGORIES.CONSUMER.value and self._state.adding:
self.state = WALLET_STATES.VERIFIED.value
super().save(*args, **kwargs)
class Meta:
ordering = ["created_at"]
class OwnerWallet(Wallet):
private_key = models.CharField(
_("Privatekey"), unique=True, blank=True, editable=False, max_length=128
)
def save(self, *args, **kwargs):
self.state = WALLET_STATES.VERIFIED.value
self.category = WALLET_CATEGORIES.OWNER.value
super(OwnerWallet, self).save(*args, **kwargs)
def clean(self, *args, **kwargs):
if self.private_key is None or len(self.private_key) <= 0:
key = Key.generate()
self.private_key = key.secret_key()
self.public_key = key.public_key()
super(Wallet, self).clean(*args, **kwargs)
class PaperWallet(Wallet):
user_verification = models.ForeignKey(
"verification.UserVerification",
null=True,
on_delete=models.DO_NOTHING,
blank=True,
)
private_key = models.CharField(unique=True, max_length=128)
@property
def can_be_used_for_verification(self):
if self.category == WALLET_CATEGORIES.COMPANY.value:
return (
self.from_transactions.count() == 0
and self.to_transactions.count() == 0
and self.balance == 0
)
return (
self.from_transactions.count() == 0
and self.to_transactions.count() == 1
and self.balance == self.currency.starting_capital
)
@staticmethod
def generate_new_wallet(
currency,
place_of_origin,
user_verification,
category=WALLET_CATEGORIES.CONSUMER.value,
state=WALLET_STATES.VERIFIED.value,
):
with transaction.atomic():
while True:
wallet_id = Wallet.generate_wallet_id()
if Wallet.objects.filter(wallet_id=wallet_id).exists():
continue
else:
key = Key.generate()
private_key = key.secret_key()
public_key = key.public_key()
from apps.profiles.models import UserProfile
username = slugify(
"%s %s %s"
% (
user_verification.first_name,
user_verification.last_name,
get_random_string(10),
)
)
while get_user_model().objects.filter(username=username).exists():
username = slugify(
"%s %s %s"
% (
user_verification.first_name,
user_verification.last_name,
get_random_string(10),
)
)
user = get_user_model().objects.create(
username=username,
password=get_user_model().objects.make_random_password(),
)
from apps.verification.models import VERIFICATION_STATES
# if we do not change the state here and save before we save the new user profile, then we trigger an SMS verification
user_verification.state = VERIFICATION_STATES.CLAIMED.value
user_verification.save()
profile = UserProfile(
owner=user,
first_name=user_verification.first_name,
last_name=user_verification.last_name,
address_street=user_verification.address_street,
address_town=user_verification.address_town,
address_postal_code=user_verification.address_postal_code,
telephone_number="+417",
date_of_birth=user_verification.date_of_birth,
place_of_origin=place_of_origin.place_of_origin,
)
paper_wallet = PaperWallet.objects.create(
user_verification=user_verification,
owner=user,
wallet_id=wallet_id,
private_key=private_key,
public_key=public_key,
currency=currency,
state=WALLET_STATES.VERIFIED.value,
category=category,
)
profile.wallet = paper_wallet
profile.save()
user_verification.user_profile = profile
user_verification.save()
return paper_wallet
def generate_deeplink(self):
encryption_key = bytes.fromhex(settings.ENCRYPTION_KEY)
nonce = pysodium.randombytes(pysodium.crypto_secretbox_NONCEBYTES)
pk = pysodium.crypto_aead_xchacha20poly1305_ietf_encrypt(
self.private_key.encode("UTF-8"), None, nonce, encryption_key
)
# TODO: never used?!?
# decrypted_pk = pysodium.crypto_aead_xchacha20poly1305_ietf_decrypt(
# pk, None, nonce, encryption_key)
payload = {
"nonce": base64.b64encode(nonce),
"id": self.wallet_id,
"pk": base64.b64encode(pk),
}
return (
"https://ecoo.page.link/?"
+ urlencode(
{
"link": "{}wallet/?{}".format(
settings.DEEPLINK_BASE_URL, urlencode(payload)
)
}
)
+ "&apn=ch.ecoupon.mobile.android&ibi=ch.ecoupon.mobile&isi="
+ settings.DEEPLINK_ISI_PARAM
)
def save(self, *args, **kwargs):
self.state = WALLET_STATES.VERIFIED.value
super(PaperWallet, self).save(*args, **kwargs)
class Meta:
verbose_name = _("Paper wallet")
verbose_name_plural = _("Paper wallets")
class TRANSACTION_STATES(Enum):
OPEN = 1
PENDING = 2
DONE = 3
FAILED = 4
TRANSACTION_STATE_CHOICES = (
(TRANSACTION_STATES.OPEN.value, "Open"),
(TRANSACTION_STATES.PENDING.value, "Pending"),
(TRANSACTION_STATES.DONE.value, "Done"),
(TRANSACTION_STATES.FAILED.value, "Failed"),
)
class Transaction(UUIDModel):
from_wallet = models.ForeignKey(
Wallet,
verbose_name=_("From Wallet"),
on_delete=models.DO_NOTHING,
related_name="from_transactions",
blank=True,
null=True,
)
to_wallet = models.ForeignKey(
Wallet,
verbose_name=_("To Wallet"),
on_delete=models.DO_NOTHING,
related_name="to_transactions",
)
amount = models.IntegerField(
verbose_name=_("Amount"),
)
state = models.IntegerField(
verbose_name=_("State"),
choices=TRANSACTION_STATE_CHOICES,
default=TRANSACTION_STATES.OPEN.value,
)
submitted_to_chain_at = models.DateTimeField(
verbose_name=_("Submitted to chain"), null=True, blank=True, editable=False
)
operation_hash = models.CharField(
verbose_name=_("Operation hash"), max_length=128, blank=True, editable=False
)
notes = models.TextField(verbose_name=_("Notes"), blank=True)
user_notes = models.TextField(verbose_name=_("User notes"), blank=True)
def __str__(self):
if self.from_wallet:
return "{} -{}-> {}".format(
self.from_wallet.wallet_id, self.amount, self.to_wallet.wallet_id
)
else:
return "-{}-> {}".format(self.amount, self.to_wallet.wallet_id)
@property
def is_cashout_transaction(self) -> bool:
return self.to_wallet == self.to_wallet.currency.cashout_wallet
@property
def is_mint_transaction(self):
return self.from_wallet is None
@property
def is_verification_transaction(self):
# TODO: is this correct?
from_wallet = self.from_wallet
if from_wallet is None:
return False
if PaperWallet.objects.filter(wallet_id=from_wallet.wallet_id).count() == 0:
return False
if (
from_wallet.from_transactions.count() == 0
and from_wallet.balance == self.amount
and not self.pk
):
return True
if (
from_wallet.from_transactions.count() == 1
and from_wallet.balance == 0
and self.pk
):
return True
return False
@property
def tag(self):
if (
self.from_wallet
and self.from_wallet == self.from_wallet.currency.owner_wallet
):
return "from_owner"
if self.to_wallet == self.to_wallet.currency.owner_wallet:
return "to_owner"
return ""
@staticmethod
def get_belonging_to_user(user):
belonging_wallets = user.wallets.all()
return Transaction.objects.filter(
Q(from_wallet__in=belonging_wallets) | Q(to_wallet__in=belonging_wallets)
)
def clean(self, *args, **kwargs):
errors = {}
if hasattr(self, "to_wallet"):
if self.to_wallet.transfer_requests.exclude(
state=TRANSACTION_STATES.DONE.value
).exists():
errors["to_wallet"] = ValidationError(
_(
"Wallet transfer ongoing for destination wallet, cannot send funds to this wallet at the moment."
)
)
if self.is_mint_transaction and not self.to_wallet.currency.allow_minting:
errors["to_wallet"] = ValidationError(
_("Currency must allow minting if you want to mint")
)
if self.amount is not None and self.amount < 0:
errors["amount"] = ValidationError(_("Amount must be >= 0"))
if self.amount == 0:
self.state = TRANSACTION_STATES.DONE.value
if not self.is_mint_transaction:
if hasattr(self, "from_wallet"):
if self.amount and self.from_wallet.balance < self.amount:
errors["from_wallet"] = ValidationError(
_("Balance of from_wallet must be greater than amount")
)
if (
hasattr(self, "to_wallet")
and self.from_wallet.currency != self.to_wallet.currency
):
errors["from_wallet"] = ValidationError(
_('"From wallet" and "to wallet" need to use same currency')
)
errors["to_wallet"] = ValidationError(
_('"From wallet" and "to wallet" need to use same currency')
)
if self.from_wallet.transfer_requests.exclude(
state=TRANSACTION_STATES.DONE.value
).exists():
errors["from_wallet"] = ValidationError(
_(
"Wallet transfer ongoing for source wallet, cannot send funds from this wallet at the moment."
)
)
if self.from_wallet.state != WALLET_STATES.VERIFIED.value:
errors["from_wallet"] = ValidationError(
_("Only verified addresses can send money")
)
if self.is_verification_transaction and not self.pk:
# check max_claim count if transaction would be created
claim_count = sum(
[
1
for tx in Transaction.objects.filter(
to_wallet=self.to_wallet
)
if tx.is_verification_transaction
]
)
if claim_count > self.to_wallet.currency.max_claims:
raise ValidationError(_("Claim maximum reached"))
if self.to_wallet.state != WALLET_STATES.VERIFIED.value:
# wallet is not verified
from apps.verification.models import VERIFICATION_STATES
if self.is_verification_transaction:
profile = (
self.to_wallet.user_profiles.first()
if self.to_wallet.category
== WALLET_CATEGORIES.CONSUMER.value
else self.to_wallet.company_profiles.first()
)
if (
profile
and profile.sms_pin_verifications
and profile.sms_pin_verifications.count() > 0
and profile.sms_pin_verifications.first().state
== VERIFICATION_STATES.CLAIMED.value
) or not self.to_wallet.currency.needs_sms_verification:
self.to_wallet.state = WALLET_STATES.VERIFIED.value
self.to_wallet.save()
else:
errors["to_wallet"] = ValidationError(
"To wallet needs sms verification before a 'verification transaction' can be created"
)
if len(errors) > 0:
raise ValidationError(errors)
super(Transaction, self).clean(*args, **kwargs)
@property
def currency_amount(self):
if self.from_wallet:
decimals = self.from_wallet.currency.decimals
elif self.to_wallet:
decimals = self.to_wallet.currency.decimals
else:
decimals = 2
return self.amount / 10 ** decimals
class Meta:
ordering = ["-created_at"]
verbose_name = _("Transaction")
verbose_name_plural = _("Transactions")
class MetaTransaction(Transaction):
nonce = models.IntegerField()
signature = models.CharField(max_length=128, unique=True)
from_public_key = models.CharField(
_("Publickey"), blank=True, editable=False, max_length=60
)
def to_meta_transaction_dictionary(self):
return {
"from_public_key": self.from_wallet.public_key,
"signature": self.signature,
"nonce": self.nonce,
"txs": [
{
"to_": self.to_wallet.address,
"amount": self.amount,
"token_id": self.from_wallet.currency.token_id,
}
],
}
def clean(self, *args, **kwargs):
super().clean(*args, **kwargs)
errors = {}
if self.is_mint_transaction:
errors["from_wallet"] = ValidationError(
_("Metatransaction always must have from")
)
if not self.nonce or self.nonce <= 0:
errors["nonce"] = ValidationError(_("Nonce must be > 0"))
if hasattr(self, "from_wallet"):
if self.from_wallet and self.nonce != self.from_wallet.nonce + 1:
errors["nonce"] = ValidationError(
_(
"Nonce must be 1 higher than from_wallet's last meta transaction nonce"
)
)
try:
message = create_message(
self.from_wallet,
self.to_wallet,
self.nonce,
self.from_wallet.currency.token_id,
self.amount,
)
except:
pass
if self.from_wallet and self.to_wallet:
self.from_public_key = self.from_wallet.public_key
| |
<chord>
... <duration base="1/2"/>
... <lyric>
... <verse i="0">das,</verse>
... <verse i="1">scherz,</verse>
... </lyric>
... <heads>
... <head pitch="G4"/>
... </heads>
... </chord>
... <chord>
... <duration base="1/2"/>
... <lyric>
... <verse i="0">so</verse>
... <verse i="1">der</verse>
... </lyric>
... <heads>
... <head pitch="A4"/>
... </heads>
... </chord>
... <barline type="end"/>
... </noteObjects>
... """
>>> noteObjectsElement = ci.domElementFromText(noteObjectsString)
>>> s = ci.streamFromNoteObjects(noteObjectsElement)
>>> s.show('text')
{0.0} <music21.clef.Treble8vbClef>
{0.0} <music21.key.KeySignature of 1 flat>
{0.0} <music21.note.Note G>
{2.0} <music21.note.Note A>
{4.0} <music21.bar.Barline style=final>
>>> s.highestTime
4.0
'''
if streamObj is None:
s = stream.Stream()
else:
s = streamObj
mapping = {'clefSign': self.clefFromClefSign,
'keySign': self.keySignatureFromKeySign,
'timeSign': self.timeSignatureFromTimeSign,
'rest': self.restFromRest,
'chord': self.chordOrNoteFromChord,
'barline': self.barlineListFromBarline,
}
for d in noteObjectsElement:
el = None
t = d.tag
if t not in mapping:
print("Unknown tag type: %s" % t)
else:
el = mapping[t](d)
if isinstance(el, list): #barlineList returns a list
for elSub in el:
s.coreAppend(elSub)
elif el is None:
pass
else:
s.coreAppend(el)
s.coreElementsChanged()
return s
def restFromRest(self, restElement):
'''
Returns a :class:`~music21.rest.Rest` object from a <rest> tag.
>>> ci = capella.fromCapellaXML.CapellaImporter()
>>> restElement = ci.domElementFromText('<rest><duration base="1/2"/></rest>')
>>> r = ci.restFromRest(restElement)
>>> r
<music21.note.Rest rest>
>>> r.duration.type
'half'
'''
r = note.Rest()
durationList = restElement.findall('duration')
r.duration = self.durationFromDuration(durationList[0])
return r
def chordOrNoteFromChord(self, chordElement):
'''
returns a :class:`~music21.note.Note` or :class:`~music21.chord.Chord`
from a chordElement -- a `Note`
is returned if the <chord> has one <head> element, a `Chord` is
returned if there are multiple <head> elements.
>>> ci = capella.fromCapellaXML.CapellaImporter()
>>> chordElement = ci.domElementFromText(
... '<chord><duration base="1/1"/><heads><head pitch="G4"/></heads></chord>')
>>> n = ci.chordOrNoteFromChord(chordElement)
>>> n
<music21.note.Note G>
>>> n.duration
<music21.duration.Duration 4.0>
This one is an actual chord
>>> chordElement = ci.domElementFromText(
... '<chord><duration base="1/8"/>' +
... '<heads><head pitch="G4"/><head pitch="A5"/></heads></chord>')
>>> c = ci.chordOrNoteFromChord(chordElement)
>>> c
<music21.chord.Chord G3 A4>
>>> c.duration
<music21.duration.Duration 0.5>
TODO: test Lyrics
'''
durationList = chordElement.findall('duration')
headsList = chordElement.findall('heads')
if len(durationList) != 1 or len(headsList) != 1:
raise CapellaImportException("Malformed chord!")
notesList = self.notesFromHeads(headsList[0])
noteOrChord = None
if not notesList:
raise CapellaImportException("Malformed chord!")
elif len(notesList) == 1:
noteOrChord = notesList[0] # a Note object
else:
noteOrChord = chord.Chord(notesList)
noteOrChord.duration = self.durationFromDuration(durationList[0])
lyricsList = chordElement.findall('lyric')
if lyricsList:
lyricsList = self.lyricListFromLyric(lyricsList[0])
noteOrChord.lyrics = lyricsList
return noteOrChord
def notesFromHeads(self, headsElement):
'''
returns a list of :class:`~music21.note.Note` elements for each <head> in <heads>
>>> ci = capella.fromCapellaXML.CapellaImporter()
>>> headsElement = ci.domElementFromText(
... '<heads><head pitch="B7"><alter step="-1"/></head><head pitch="C2"/></heads>')
>>> ci.notesFromHeads(headsElement)
[<music21.note.Note B->, <music21.note.Note C>]
'''
notes = []
headDomList = headsElement.findall('head')
for headElement in headDomList:
notes.append(self.noteFromHead(headElement))
return notes
def noteFromHead(self, headElement):
'''
return a :class:`~music21.note.Note` object from a <head> element. This will become
part of Chord._notes if there are multiple, but in any case, it needs to be a Note
not a Pitch for now, because it could have Tie information
>>> ci = capella.fromCapellaXML.CapellaImporter()
>>> headElement = ci.domElementFromText(
... '<head pitch="B7"><alter step="-1"/><tie end="true"/></head>')
>>> n = ci.noteFromHead(headElement)
>>> n
<music21.note.Note B->
>>> n.octave # capella octaves are one higher than written
6
>>> n.tie
<music21.tie.Tie stop>
'''
if 'pitch' not in headElement.attrib:
raise CapellaImportException("Cannot deal with <head> element without pitch!")
noteNameWithOctave = headElement.attrib['pitch']
n = note.Note()
n.nameWithOctave = noteNameWithOctave
n.octave = n.octave - 1 # capella octaves are 1 off...
alters = headElement.findall('alter')
if len(alters) > 1:
raise CapellaImportException("Cannot deal with multiple <alter> elements!")
elif len(alters) == 1:
acc = self.accidentalFromAlter(alters[0])
n.pitch.accidental = acc
ties = headElement.findall('tie')
if len(ties) > 1:
raise CapellaImportException("Cannot deal with multiple <tie> elements!")
elif len(ties) == 1:
thisTie = self.tieFromTie(ties[0])
n.tie = thisTie
return n
def accidentalFromAlter(self, alterElement):
'''
return a :class:`~music21.pitch.Accidental` object from an <alter> tag.
>>> ci = capella.fromCapellaXML.CapellaImporter()
>>> alter = ci.domElementFromText('<alter step="-1"/>')
>>> ci.accidentalFromAlter(alter)
<accidental flat>
The only known display type is "suppress"
>>> alter = ci.domElementFromText('<alter step="2" display="suppress"/>')
>>> acc = ci.accidentalFromAlter(alter)
>>> acc
<accidental double-sharp>
>>> acc.displayType
'never'
'''
if 'step' in alterElement.attrib:
alteration = int(alterElement.attrib['step'])
else:
print("No alteration...")
alteration = 0
acc = pitch.Accidental(alteration)
if 'display' in alterElement.attrib and alterElement.attrib['display'] == 'suppress':
acc.displayType = 'never'
return acc
def tieFromTie(self, tieElement):
'''
returns a :class:`~music21.tie.Tie` element from a <tie> tag
if begin == 'true' then Tie.type = start
>>> ci = capella.fromCapellaXML.CapellaImporter()
>>> tieEl = ci.domElementFromText('<tie begin="true"/>')
>>> ci.tieFromTie(tieEl)
<music21.tie.Tie start>
if end == 'true' then Tie.type = stop
>>> tieEl = ci.domElementFromText('<tie end="true"/>')
>>> ci.tieFromTie(tieEl)
<music21.tie.Tie stop>
if begin == 'true' and end == 'true' then Tie.type = continue (is this right???)
>>> tieEl = ci.domElementFromText('<tie begin="true" end="true"/>')
>>> ci.tieFromTie(tieEl)
<music21.tie.Tie continue>
'''
begin = False
end = False
if 'begin' in tieElement.attrib and tieElement.attrib['begin'] == 'true':
begin = True
if 'end' in tieElement.attrib and tieElement.attrib['end'] == 'true':
end = True
tieType = None
if begin is True and end is True:
tieType = 'continue'
elif begin is True:
tieType = 'start'
elif end is True:
tieType = 'stop'
else:
return None
tieObj = tie.Tie(tieType)
return tieObj
def lyricListFromLyric(self, lyricElement):
'''
returns a list of :class:`~music21.note.Lyric` objects from a <lyric> tag
>>> ci = capella.fromCapellaXML.CapellaImporter()
>>> lyricEl = ci.domElementFromText(
... '<lyric><verse i="0" hyphen="true">di</verse>' +
... '<verse i="1">man,</verse><verse i="2">frau,</verse></lyric>')
>>> ci.lyricListFromLyric(lyricEl)
[<music21.note.Lyric number=1 syllabic=begin text="di">,
<music21.note.Lyric number=2 syllabic=single text="man,">,
<music21.note.Lyric number=3 syllabic=single text="frau,">]
'''
lyricList = []
verses = lyricElement.findall('verse')
for d in verses:
thisLyric = self.lyricFromVerse(d)
if thisLyric is not None:
lyricList.append(thisLyric)
return lyricList
def lyricFromVerse(self, verse):
'''
returns a :class:`~music21.note.Lyric` object from a <verse> tag
>>> ci = capella.fromCapellaXML.CapellaImporter()
>>> verse = ci.domElementFromText('<verse i="0" hyphen="true">di"</verse>')
>>> ci.lyricFromVerse(verse)
<music21.note.Lyric number=1 syllabic=begin text="di"">
Does not yet support 'align' attribute
if the text is empty, returns None
'''
verseNumber = 1
syllabic = 'single'
if 'i' in verse.attrib:
verseNumber = int(verse.attrib['i']) + 1
if 'hyphen' in verse.attrib and verse.attrib['hyphen'] == 'true':
syllabic = 'begin'
text = verse.text
if text is None or text == "":
return None
else:
lyric = note.Lyric(text=text, number=verseNumber, syllabic=syllabic, applyRaw=True)
return lyric
# i = number - 1
# align
# hyphen = true
clefMapping = {'treble': clef.TrebleClef,
'bass': clef.BassClef,
'alto': clef.AltoClef,
'tenor': clef.TenorClef,
'G2-': clef.Treble8vbClef,
}
def clefFromClefSign(self, clefSign):
'''
returns a :class:`~music21.clef.Clef` object or subclass from a <clefSign> tag.
>>> ci = capella.fromCapellaXML.CapellaImporter()
>>> clefSign = ci.domElementFromText('<clefSign clef="treble"/>')
>>> ci.clefFromClefSign(clefSign)
<music21.clef.TrebleClef>
>>> clefSign = ci.domElementFromText('<clefSign clef="G2-"/>')
>>> ci.clefFromClefSign(clefSign)
<music21.clef.Treble8vbClef>
>>> clefSign = ci.domElementFromText('<clefSign clef="F1+"/>')
>>> clefObject = ci.clefFromClefSign(clefSign)
>>> clefObject
<music21.clef.FClef>
>>> clefObject.sign
'F'
>>> clefObject.line
1
>>> clefObject.octaveChange
1
'''
if 'clef' in clefSign.attrib:
clefValue = clefSign.attrib['clef']
if clefValue in self.clefMapping:
return self.clefMapping[clefValue]()
elif clefValue[0] == 'p':
return clef.PercussionClef()
elif len(clefValue) > 1:
clefSignAndLine = clefValue[0:2]
clefOctaveChange = 0
if len(clefValue) > 2:
if clefValue[2] == '+':
clefOctaveChange = 1
elif clefValue[2] == '-':
clefOctaveChange = -1
clefObj = clef.clefFromString(clefSignAndLine, clefOctaveChange)
return clefObj
return None
def keySignatureFromKeySign(self, keySign):
'''
Returns a :class:`~music21.key.KeySignature` object from a keySign tag.
>>> ci = capella.fromCapellaXML.CapellaImporter()
>>> keySign = ci.domElementFromText('<keySign fifths="-1"/>')
>>> ci.keySignatureFromKeySign(keySign)
<music21.key.KeySignature of 1 flat>
'''
if 'fifths' in keySign.attrib:
keyFifths = int(keySign.attrib['fifths'])
return key.KeySignature(keyFifths)
def timeSignatureFromTimeSign(self, timeSign):
'''
Returns a :class:`~music21.meter.TimeSignature` object from a timeSign tag.
>>> ci = capella.fromCapellaXML.CapellaImporter()
>>> timeSign = ci.domElementFromText('<timeSign time="4/4"/>')
>>> ci.timeSignatureFromTimeSign(timeSign)
<music21.meter.TimeSignature 4/4>
>>> timeSign = ci.domElementFromText('<timeSign time="infinite"/>')
>>> ci.timeSignatureFromTimeSign(timeSign) is None
True
'''
if 'time' in timeSign.attrib:
timeString = timeSign.attrib['time']
if timeString != 'infinite':
return meter.TimeSignature(timeString)
else:
return None
else:
return None
def durationFromDuration(self, durationElement):
'''
>>> ci = capella.fromCapellaXML.CapellaImporter()
>>> durationTag = ci.domElementFromText('<duration base="1/32" dots="1"/>')
>>> d = ci.durationFromDuration(durationTag)
>>> d
<music21.duration.Duration 0.1875>
>>> d.type
'32nd'
>>> d.dots
1
>>> durationTag2 = ci.domElementFromText(
... '<duration base="1/4"><tuplet count="3"/></duration>')
>>> d2 = ci.durationFromDuration(durationTag2)
>>> d2
<music21.duration.Duration 2/3>
>>> d2.type
'quarter'
>>> d2.tuplets
(<music21.duration.Tuplet 3/2>,)
Does not handle noDuration='true', display, churchStyle on rest durations
'''
dur = duration.Duration()
if 'base' in durationElement.attrib:
baseValue = durationElement.attrib['base']
slashIndex = baseValue.find("/")
if slashIndex != -1:
firstNumber = int(baseValue[0:slashIndex])
secondNumber = int(baseValue[slashIndex + 1:])
quarterLength = (4.0 * firstNumber)/secondNumber
dur.quarterLength = quarterLength
if 'dots' in durationElement.attrib:
dotNumber = int(durationElement.attrib['dots'])
dur.dots = dotNumber
tuplets = durationElement.findall('tuplet')
for d in tuplets:
| |
<filename>code/player.py
#<NAME>
#Settlers of Catan, 2020
#Imports
from board import *
import numpy as np
#Class definition for a player
class player():
'Class Definition for Game Player'
#Initialize a game player, we use A, B and C to identify
def __init__(self, playerName, playerColor):
self.name = playerName
self.color = playerColor
self.victoryPoints = 0
self.isAI = False
self.settlementsLeft = 5
self.roadsLeft = 15
self.citiesLeft = 4
self.resources = {'ORE':5, 'BRICK':6, 'WHEAT':3, 'WOOD':6, 'SHEEP':3} #Dictionary that keeps track of resource amounts
self.knightsPlayed = 0
self.largestArmyFlag = False
self.maxRoadLength = 0
self.longestRoadFlag = False
#Undirected Graph to keep track of which vertices and edges player has colonised
#Every time a player's build graph is updated the gameBoardGraph must also be updated
#Each of the 3 lists store vertex information - Roads are stores with tuples of vertex pairs
self.buildGraph = {'ROADS':[], 'SETTLEMENTS':[], 'CITIES':[]}
self.portList = [] #List of ports acquired
#Dev cards in possession
self.newDevCards = [] #List to keep the new dev cards draw - update the main list every turn
self.devCards = {'KNIGHT':0, 'VP':0, 'MONOPOLY':0, 'ROADBUILDER':0, 'YEAROFPLENTY':0}
self.devCardPlayedThisTurn = False
self.visibleVictoryPoints = self.victoryPoints - self.devCards['VP']
#function to build a road from vertex v1 to vertex v2
def build_road(self, v1, v2, board):
'Update buildGraph to add a road on edge v1 - v2'
if(self.resources['BRICK'] > 0 and self.resources['WOOD'] > 0): #Check if player has resources available
if(self.roadsLeft > 0): #Check if player has roads left
self.buildGraph['ROADS'].append((v1,v2))
self.roadsLeft -= 1
#Update player resources
self.resources['BRICK'] -= 1
self.resources['WOOD'] -= 1
board.updateBoardGraph_road(v1, v2, self) #update the overall boardGraph
#Calculate current max road length and update
maxRoads = self.get_road_length(board)
self.maxRoadLength = maxRoads
print('{} Built a Road. MaxRoadLength: {}'.format(self.name, self.maxRoadLength))
else:
print("No roads available to build")
else:
print("Insufficient Resources to Build Road - Need 1 BRICK, 1 WOOD")
#function to build a settlement on vertex with coordinates vCoord
def build_settlement(self, vCoord, board):
'Update player buildGraph and boardgraph to add a settlement on vertex v'
#Take input from Player on where to build settlement
#Check if player has correct resources
#Update player resources and boardGraph with transaction
if(self.resources['BRICK'] > 0 and self.resources['WOOD'] > 0 and self.resources['SHEEP'] > 0 and self.resources['WHEAT'] > 0): #Check if player has resources available
if(self.settlementsLeft > 0): #Check if player has settlements left
self.buildGraph['SETTLEMENTS'].append(vCoord)
self.settlementsLeft -= 1
#Update player resources
self.resources['BRICK'] -= 1
self.resources['WOOD'] -= 1
self.resources['SHEEP'] -= 1
self.resources['WHEAT'] -= 1
self.victoryPoints += 1
board.updateBoardGraph_settlement(vCoord, self) #update the overall boardGraph
print('{} Built a Settlement'.format(self.name))
#Add port to players port list if it is a new port
if((board.boardGraph[vCoord].port != False) and (board.boardGraph[vCoord].port not in self.portList)):
self.portList.append(board.boardGraph[vCoord].port)
print("{} now has {} Port access".format(self.name, board.boardGraph[vCoord].port))
else:
print("No settlements available to build")
else:
print("Insufficient Resources to Build Settlement. Build Cost: 1 BRICK, 1 WOOD, 1 WHEAT, 1 SHEEP")
#function to build a city on vertex v
def build_city(self, vCoord, board):
'Upgrade existing settlement to city in buildGraph'
if(self.resources['WHEAT'] >= 2 and self.resources['ORE'] >= 3): #Check if player has resources available
if(self.citiesLeft > 0):
self.buildGraph['CITIES'].append(vCoord)
self.settlementsLeft += 1 #Increase number of settlements and decrease number of cities
self.citiesLeft -=1
#Update player resources
self.resources['ORE'] -= 3
self.resources['WHEAT'] -= 2
self.victoryPoints += 1
board.updateBoardGraph_city(vCoord, self) #update the overall boardGraph
print('{} Built a City'.format(self.name))
else:
print("No cities available to build")
else:
print("Insufficient Resources to Build City. Build Cost: 3 ORE, 2 WHEAT")
#function to move robber to a specific hex and steal from a player
def move_robber(self, hexIndex, board, player_robbed):
'Update boardGraph with Robber and steal resource'
board.updateBoardGraph_robber(hexIndex)
#Steal a random resource from other players
self.steal_resource(player_robbed)
return
#Function to steal a random resource from player_2
def steal_resource(self, player_2):
if(player_2 == None):
print("No Player on this hex to Rob")
return
#Get all resources player 2 has in a list and use random list index to steal
p2_resources = []
for resourceName, resourceAmount in player_2.resources.items():
p2_resources += [resourceName]*resourceAmount
resourceIndexToSteal = np.random.randint(0, len(p2_resources))
#Get a random permutation and steal a card
p2_resources = np.random.permutation(p2_resources)
resourceStolen = p2_resources[resourceIndexToSteal]
#Update resources of both players
player_2.resources[resourceStolen] -= 1
self.resources[resourceStolen] += 1
print("Stole 1 {} from Player {}".format(resourceStolen, player_2.name))
return
#Function to calculate road length for longest road calculation
#Use both player buildgraph and board graph to compute recursively
def get_road_length(self, board):
roadLengths = [] #List to store road lengths from each starting edge
for road in self.buildGraph['ROADS']: #check for every starting edge
self.road_i_lengths = [] #List to keep track of all lengths of roads resulting from this root road
roadCount = 0
roadArr = []
vertexList = []
#print("Start road:", road)
self.check_path_length(road, roadArr, roadCount, vertexList, board.boardGraph)
road_inverted = (road[1], road[0])
roadCount = 0
roadArr = []
vertexList = []
self.check_path_length(road_inverted, roadArr, roadCount, vertexList, board.boardGraph)
roadLengths.append(max(self.road_i_lengths)) #Update roadLength with max starting from this road
#print(self.road_i_lengths)
#print("Road Lengths:", roadLengths, max(roadLengths))
return max(roadLengths)
#Function to checl the path length from a current edge to all possible other vertices not yet visited by t
def check_path_length(self, edge, edgeList, roadLength, vertexList, boardGraph):
#Append current edge to list and increment road count
edgeList.append(edge) #Append the road
roadLength += 1
vertexList.append(edge[0]) #Append the first vertex
#Get new neighboring forward edges from this edge - not visited by the search yet
road_neighbors_list = self.get_neighboring_roads(edge, boardGraph, edgeList, vertexList)
#print(neighboringRoads)
#if no neighboring roads exist append the roadLength upto this edge
if(road_neighbors_list == []):
#print("No new neighbors found")
self.road_i_lengths.append(roadLength)
return
else:
#check paths from left and right neighbors separately
for neighbor_road in road_neighbors_list:
#print("checking neighboring edge:", neighbor_road)
self.check_path_length(neighbor_road, edgeList, roadLength, vertexList, boardGraph)
#Helper function to get neighboring edges from this road that haven't already been explored
#We want forward neighbors only
def get_neighboring_roads(self, road_i, boardGraph, visitedRoads, visitedVertices):
#print("Getting neighboring roads for current road:", road_i)
newNeighbors = []
#Use v1 and v2 to get the vertices to expand from
v1 = road_i[0]
v2 = road_i[1]
for edge in self.buildGraph['ROADS']:
if(edge[1] in visitedVertices):
edge = (edge[1], edge[0]) #flip the edge if the orientation is reversed
if(edge not in visitedRoads): #If it is a new distinct edge
if(boardGraph[v2].state['Player'] in [self, None]):#Add condition for vertex to be not colonised by anyone else
if(edge[0] == v2 and edge[0] not in visitedVertices): #If v2 has neighbors, defined starting or finishing at v2
#print("Appending NEW neighbor:", edge)
newNeighbors.append(edge)
if(edge[0] == v1 and edge[0] not in visitedVertices):
newNeighbors.append(edge)
if(edge[1] == v2 and edge[1] not in visitedVertices): #If v1 has neighbors, defined starting or finishing at v2
newNeighbors.append((edge[1], edge[0]))
if(edge[1] == v1 and edge[1] not in visitedVertices):
newNeighbors.append((edge[1], edge[0]))
return newNeighbors
#function to end turn
def end_turn():
'Pass turn to next player and update game state'
#function to draw a Development Card
def draw_devCard(self, board):
'Draw a random dev card from stack and update self.devcards'
if(self.resources['WHEAT'] >= 1 and self.resources['ORE'] >= 1 and self.resources['SHEEP'] >= 1): #Check if player has resources available
#Get alldev cards available
devCardsToDraw = []
for cardName, cardAmount in board.devCardStack.items():
devCardsToDraw += [cardName]*cardAmount
#IF there are no devCards left
if(devCardsToDraw == []):
print("No Dev Cards Left!")
return
devCardIndex = np.random.randint(0, len(devCardsToDraw))
#Get a random permutation and draw a card
devCardsToDraw = np.random.permutation(devCardsToDraw)
cardDrawn = devCardsToDraw[devCardIndex]
#Update player resources
self.resources['ORE'] -= 1
self.resources['WHEAT'] -= 1
self.resources['SHEEP'] -= 1
#If card is a victory point apply immediately, else add to new card list
if(cardDrawn == 'VP'):
self.victoryPoints += 1
board.devCardStack[cardDrawn] -= 1
self.devCards[cardDrawn] += 1
self.visibleVictoryPoints = self.victoryPoints - self.devCards['VP']
else:#Update player dev card and the stack
self.newDevCards.append(cardDrawn)
board.devCardStack[cardDrawn] -= 1
print("{} drew a {} from Development Card Stack".format(self.name, cardDrawn))
else:
print("Insufficient Resources for Dev Card. Cost: 1 ORE, 1 WHEAT, 1 SHEEP")
#Function to update dev card stack with dev cards drawn from prior turn
def updateDevCards(self):
for newCard in self.newDevCards:
self.devCards[newCard] += 1
#Reset the new card list to blank
self.newDevCards = []
#function to play a development card
def play_devCard(self, game):
'Update game state'
#Check if player can play a devCard this | |
#
logger.debug("%s journal abbreviation issn %r current %r normalized %r", rcsbId, issn, curAbbrev, revAbbrev)
cObj.setValue(revAbbrev, atName, ii)
return True
except Exception as e:
logger.exception("Failing for %r with %s", dataContainer.getName(), str(e))
return False
def __updateJournalAbbreviation(self, rcsbId, issn, curAbbrev):
revAbbrev = None
try:
if issn:
medlineAbbrev = self.__crP.getMedlineJournalAbbreviation(issn)
# medlineIsoAbbrev = self.__crP.getMedlineJournalIsoAbbreviation(issn)
crIssn = issn.replace("-", "")
crTitle = self.__crP.getCrossRefJournalTitle(crIssn)
#
revAbbrev = medlineAbbrev
if not medlineAbbrev and not crTitle:
logger.debug("%s: missing information for issn %r curAbbrev %r", rcsbId, issn, curAbbrev)
revAbbrev = capwords(curAbbrev.replace(".", " "))
elif not medlineAbbrev:
revAbbrev = self.__jtaP.getJournalAbbreviation(crTitle, usePunctuation=False)
else:
if curAbbrev.upper() in ["TO BE PUBLISHED", "IN PREPARATION"]:
revAbbrev = "To be published"
elif curAbbrev.upper().startswith("THESIS"):
revAbbrev = "Thesis"
else:
revAbbrev = capwords(curAbbrev.replace(".", " "))
logger.debug("%r: missing issn and non-standard abbrev for %r", rcsbId, curAbbrev)
if not curAbbrev:
logger.info("%r: missing issn and journal abbrev", rcsbId)
#
logger.debug("%s: revised: %r current: %r", rcsbId, revAbbrev, curAbbrev)
except Exception as e:
logger.exception("Failing on %r %r %r with %r", rcsbId, issn, curAbbrev, str(e))
return revAbbrev
def assignPrimaryCitation(self, dataContainer, catName, atName, **kwargs):
"""Normalize citation journal abbrev.
Args:
dataContainer (object): mmif.api.DataContainer object instance
catName (str): Category name
atName (str): Attribute name
Returns:
bool: True for success or False otherwise
"""
logger.debug("Starting catName %s atName %s kwargs %r", catName, atName, kwargs)
try:
if not dataContainer.exists(catName):
return False
#
cObj = dataContainer.getObj(catName)
if not cObj.hasAttribute(atName):
cObj.appendAttribute(atName)
#
for ii in range(cObj.getRowCount()):
citId = cObj.getValue("id", ii)
if citId.upper() == "PRIMARY":
cObj.setValue("Y", atName, ii)
else:
cObj.setValue("N", atName, ii)
return True
except Exception as e:
logger.exception("Failing for %r with %s", dataContainer.getName(), str(e))
return False
def __getEmdbIdentifiers(self, dataContainer):
"""[summary]
Args:
dataContainer ([type]): [description]
Returns:
[type]: [description]
#
loop_
_database_2.database_id
_database_2.database_code
PDB 6QUY
WWPDB D_1292100913
EMDB EMD-4644
#
loop_
_pdbx_database_related.db_name
_pdbx_database_related.details
_pdbx_database_related.db_id
_pdbx_database_related.content_type
EMDB 'HsCKK (human CAMSAP1) decorated 13pf taxol-GDP microtubule (asymmetric unit)' EMD-4643 'other EM volume'
PDB 'HsCKK (human CAMSAP1) decorated 13pf taxol-GDP microtubule (asymmetric unit)' 6QUS unspecified
EMDB 'NgCKK (N.Gruberi CKK) decorated 13pf taxol-GDP microtubule' EMD-4644 'associated EM volume'
#
"""
emdbIdD = {}
emdbIdAltD = {}
if dataContainer.exists("database_2"):
dbObj = dataContainer.getObj("database_2")
for ii in range(dbObj.getRowCount()):
dbId = dbObj.getValue("database_id", ii)
dbCode = dbObj.getValue("database_code", ii)
if dbId.upper() == "EMDB":
emdbIdD[dbCode] = "associated EM volume"
if dataContainer.exists("pdbx_database_related"):
drObj = dataContainer.getObj("pdbx_database_related")
for ii in range(drObj.getRowCount()):
dbCode = drObj.getValue("db_id", ii)
dbName = drObj.getValue("db_name", ii)
contentType = drObj.getValue("content_type", ii)
if dbName.upper() == "EMDB" and contentType.upper() == "ASSOCIATED EM VOLUME" and dbCode not in emdbIdD:
emdbIdD[dbCode] = "associated EM volume"
elif dbName.upper() == "EMDB" and contentType.upper() != "ASSOCIATED EM VOLUME" and dbCode not in emdbIdAltD:
emdbIdAltD[dbCode] = contentType
return emdbIdD, emdbIdAltD
def buildContainerEntryIds(self, dataContainer, catName, **kwargs):
"""Load the input category with rcsb_entry_container_identifiers content.
Args:
dataContainer (object): mmif.api.DataContainer object instance
catName (str): Category name
Returns:
bool: True for success or False otherwise
For example:
loop_
_rcsb_entry_container_identifiers.entry_id
_rcsb_entry_container_identifiers.entity_ids
_rcsb_entry_container_identifiers.polymer_entity_ids_polymer
_rcsb_entry_container_identifiers.non-polymer_entity_ids
_rcsb_entry_container_identifiers.assembly_ids
_rcsb_entry_container_identifiers.rcsb_id
...
"""
logger.debug("Starting catName %s kwargs %r", catName, kwargs)
try:
if not dataContainer.exists("entry"):
return False
if not dataContainer.exists(catName):
dataContainer.append(DataCategory(catName, attributeNameList=self.__dApi.getAttributeNameList(catName)))
#
cObj = dataContainer.getObj(catName)
tObj = dataContainer.getObj("entry")
entryId = tObj.getValue("id", 0)
cObj.setValue(entryId, "entry_id", 0)
cObj.setValue(entryId, "rcsb_id", 0)
#
tObj = dataContainer.getObj("entity")
entityIdL = tObj.getAttributeValueList("id")
cObj.setValue(",".join(entityIdL), "entity_ids", 0)
#
#
tIdL = tObj.selectValuesWhere("id", "polymer", "type")
tV = ",".join(tIdL) if tIdL else "?"
cObj.setValue(tV, "polymer_entity_ids", 0)
tIdL = tObj.selectValuesWhere("id", "non-polymer", "type")
tV = ",".join(tIdL) if tIdL else "?"
cObj.setValue(tV, "non-polymer_entity_ids", 0)
#
tIdL = tObj.selectValuesWhere("id", "branched", "type")
tV = ",".join(tIdL) if tIdL else "?"
cObj.setValue(tV, "branched_entity_ids", 0)
#
# tIdL = tObj.selectValuesWhere("id", "water", "type")
# tV = ",".join(tIdL) if tIdL else "?"
# cObj.setValue(tV, "water_entity_ids", 0)
#
tObj = dataContainer.getObj("pdbx_struct_assembly")
assemblyIdL = tObj.getAttributeValueList("id") if tObj else []
tV = ",".join(assemblyIdL) if assemblyIdL else "?"
cObj.setValue(tV, "assembly_ids", 0)
#
#
emdbIdD, emdbIdAltD = self.__getEmdbIdentifiers(dataContainer)
tV = ",".join([tId for tId in emdbIdD]) if emdbIdD else "?"
cObj.setValue(tV, "emdb_ids", 0)
tV = ",".join([tId for tId in emdbIdAltD]) if emdbIdAltD else "?"
cObj.setValue(tV, "related_emdb_ids", 0)
#
modelIdList = self.__commonU.getModelIdList(dataContainer)
tV = ",".join([str(tId) for tId in modelIdList]) if modelIdList else "?"
cObj.setValue(tV, "model_ids", 0)
#
return True
except Exception as e:
logger.exception("For %s failing with %s", catName, str(e))
return False
def consolidateAccessionDetails(self, dataContainer, catName, **kwargs):
"""Consolidate accession details into the rcsb_accession_info category. Also include
a flag for the availability of any supporting experimental data.
Args:
dataContainer (object): mmif.api.DataContainer object instance
catName (str): Category name
Returns:
bool: True for success or False otherwise
For example:
For example -
_rcsb_accession_info.entry_id 1ABC
_rcsb_accession_info.status_code REL
_rcsb_accession_info.deposit_date 2018-01-11
_rcsb_accession_info.initial_release_date 2018-03-23
_rcsb_accession_info.major_revision 1
_rcsb_accession_info.minor_revision 2
_rcsb_accession_info.revision_date 2018-10-25
Taking data values from:
_pdbx_database_status.entry_id 3OQP
_pdbx_database_status.deposit_site RCSB
_pdbx_database_status.process_site RCSB
_pdbx_database_status.recvd_initial_deposition_date 2010-09-03
_pdbx_database_status.status_code REL
_pdbx_database_status.status_code_sf REL
_pdbx_database_status.status_code_mr ?
_pdbx_database_status.status_code_cs ?
_pdbx_database_status.pdb_format_compatible Y
_pdbx_database_status.methods_development_category ?
_pdbx_database_status.SG_entry Y
#
loop_
_pdbx_audit_revision_history.ordinal
_pdbx_audit_revision_history.data_content_type
_pdbx_audit_revision_history.major_revision
_pdbx_audit_revision_history.minor_revision
_pdbx_audit_revision_history.revision_date
1 'Structure model' 1 0 2010-10-13
2 'Structure model' 1 1 2011-07-13
3 'Structure model' 1 2 2011-07-20
4 'Structure model' 1 3 2014-11-12
5 'Structure model' 1 4 2017-10-25
#
# - For EM and SAS -
_pdbx_database_related.db_name EMDB
_pdbx_database_related.details
'pseudo-atomic model of the RNA polymerase lambda-based antitermination complex solved by cryo-EM'
_pdbx_database_related.db_id EMD-3561
_pdbx_database_related.content_type 'associated EM volume'
"""
##
try:
logger.debug("Starting with %r %r %r", dataContainer.getName(), catName, kwargs)
#
# if there is incomplete accessioninformation then exit
if not (dataContainer.exists("pdbx_database_status") or dataContainer.exists("pdbx_audit_revision_history")):
return False
# Create the new target category
if not dataContainer.exists(catName):
dataContainer.append(DataCategory(catName, attributeNameList=self.__dApi.getAttributeNameList(catName)))
cObj = dataContainer.getObj(catName)
#
tObj = dataContainer.getObj("pdbx_database_status")
entryId = tObj.getValue("entry_id", 0)
statusCode = tObj.getValue("status_code", 0)
depositDate = tObj.getValue("recvd_initial_deposition_date", 0)
#
cObj.setValue(entryId, "entry_id", 0)
cObj.setValue(statusCode, "status_code", 0)
cObj.setValue(depositDate, "deposit_date", 0)
# cObj.setValue(depositDate[:4], "deposit_year", 0)
#
# -- Experimental data availability --
#
expDataRelFlag = "N"
statusSf = tObj.getValueOrDefault("status_code_sf", 0, defaultValue=None)
statusMr = tObj.getValueOrDefault("status_code_mr", 0, defaultValue=None)
statusCs = tObj.getValueOrDefault("status_code_cs", 0, defaultValue=None)
#
if statusSf == "REL" or statusMr == "REL" or statusCs == "REL":
expDataRelFlag = "Y"
else:
if dataContainer.exists("pdbx_database_related"):
rObj = dataContainer.getObj("pdbx_database_related")
ctL = rObj.getAttributeValueList("content_type")
if "associated EM volume" in ctL or "associated SAS data" in ctL:
expDataRelFlag = "Y"
#
cObj.setValue(expDataRelFlag, "has_released_experimental_data", 0)
#
tObj = dataContainer.getObj("pdbx_audit_revision_history")
nRows = tObj.getRowCount()
# Assuming the default sorting order from the release module -
releaseDate = tObj.getValue("revision_date", 0)
minorRevision = tObj.getValue("minor_revision", nRows - 1)
majorRevision = tObj.getValue("major_revision", nRows - 1)
revisionDate = tObj.getValue("revision_date", nRows - 1)
cObj.setValue(releaseDate, "initial_release_date", 0)
# cObj.setValue(releaseDate[:4], "initial_release_year", 0)
cObj.setValue(minorRevision, "minor_revision", 0)
cObj.setValue(majorRevision, "major_revision", 0)
cObj.setValue(revisionDate, "revision_date", 0)
#
return True
except Exception as e:
logger.exception("In %s for %s failing with %s", dataContainer.getName(), catName, str(e))
return False
def filterRedundantRecords(self, dataContainer, catName, **kwargs):
"""Filter redundant records from input category subject to excluded/included attributes."""
try:
logger.debug("Starting with %r %r %r", dataContainer.getName(), catName, kwargs)
# Exit if source categories are missing
if not dataContainer.exists(catName):
return False
#
cObj = dataContainer.getObj(catName)
if cObj.getRowCount() < 2:
return False
#
if catName == "pdbx_related_exp_data_set":
logger.debug("Filtering %r %r", dataContainer.getName(), catName)
try:
cObj.removeAttribute("ordinal")
cObj.removeDuplicateRows()
cObj.appendAttribute("ordinal")
for ii in range(cObj.getRowCount()):
cObj.setValue(ii + 1, "ordinal", ii)
except Exception as e:
logger.exception("%s failing with %s", dataContainer.getName(), str(e))
return False
return True
except Exception as e:
logger.exception("For %s %r failing with %s", dataContainer.getName(), catName, str(e))
#
return False
def addEntryInfo(self, dataContainer, catName, **kwargs):
"""
Add _rcsb_entry_info, for example:
_rcsb_entry_info.entry_id 1ABC
_rcsb_entry_info.polymer_composition 'heteromeric protein'
_rcsb_entry_info.experimental_method 'multiple methods'
_rcsb_entry_info.experimental_method_count 2
_rcsb_entry_info.polymer_entity_count 2
_rcsb_entry_info.entity_count 2
_rcsb_entry_info.nonpolymer_entity_count 2
_rcsb_entry_info.branched_entity_count 0
_rcsb_entry_info.software_programs_combined 'Phenix;RefMac'
....
Also add the related field:
_entity_poly.rcsb_entity_polymer_type
'Protein' 'polypeptide(D) or polypeptide(L)'
'DNA' 'polydeoxyribonucleotide'
'RNA' 'polyribonucleotide'
'NA-hybrid' 'polydeoxyribonucleotide/polyribonucleotide hybrid'
'Other' 'polysaccharide(D), polysaccharide(L), cyclic-pseudo-peptide, peptide nucleic acid, or other'
#
_rcsb_entry_info.deposited_polymer_monomer_count
'polymer_entity_count_protein',
'polymer_entity_count_nucleic_acid',
'polymer_entity_count_nucleic_acid_hybrid',
'polymer_entity_count_DNA',
'polymer_entity_count_RNA',
"""
try:
logger.debug("Starting with %r %r %r", dataContainer.getName(), catName, kwargs)
# Exit if source categories are missing
if not (dataContainer.exists("entity") and dataContainer.exists("entry")):
return False
if not (dataContainer.exists("exptl") or dataContainer.exists("ma_model_list")):
return False
#
# Create the new target category rcsb_entry_info
if not dataContainer.exists(catName):
dataContainer.append(DataCategory(catName, attributeNameList=self.__dApi.getAttributeNameList(catName)))
# --------------------------------------------------------------------------------------------------------
# catName = rcsb_entry_info
cObj = dataContainer.getObj(catName)
#
# --------------------------------------------------------------------------------------------------------
# Filter experimental methods
#
methodCount = 0
expMethod = None
if dataContainer.exists("exptl"):
xObj = dataContainer.getObj("exptl")
entryId = xObj.getValue("entry_id", 0)
methodL = xObj.getAttributeValueList("method")
methodCount, expMethod = self.__commonU.filterExperimentalMethod(methodL)
elif dataContainer.exists("ma_model_list"):
tObj = dataContainer.getObj("entry")
entryId = tObj.getValue("id", 0)
| |
if not resource:
return None
res = job_pb2.RunAlphaJobTemplateTemplateContainersEnvValueSourceSecretKeyRef()
if Primitive.to_proto(resource.secret):
res.secret = Primitive.to_proto(resource.secret)
if Primitive.to_proto(resource.version):
res.version = Primitive.to_proto(resource.version)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return JobTemplateTemplateContainersEnvValueSourceSecretKeyRef(
secret=Primitive.from_proto(resource.secret),
version=Primitive.from_proto(resource.version),
)
class JobTemplateTemplateContainersEnvValueSourceSecretKeyRefArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
JobTemplateTemplateContainersEnvValueSourceSecretKeyRef.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
JobTemplateTemplateContainersEnvValueSourceSecretKeyRef.from_proto(i)
for i in resources
]
class JobTemplateTemplateContainersResources(object):
def __init__(self, limits: dict = None, cpu_idle: bool = None):
self.limits = limits
self.cpu_idle = cpu_idle
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = job_pb2.RunAlphaJobTemplateTemplateContainersResources()
if Primitive.to_proto(resource.limits):
res.limits = Primitive.to_proto(resource.limits)
if Primitive.to_proto(resource.cpu_idle):
res.cpu_idle = Primitive.to_proto(resource.cpu_idle)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return JobTemplateTemplateContainersResources(
limits=Primitive.from_proto(resource.limits),
cpu_idle=Primitive.from_proto(resource.cpu_idle),
)
class JobTemplateTemplateContainersResourcesArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [JobTemplateTemplateContainersResources.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [JobTemplateTemplateContainersResources.from_proto(i) for i in resources]
class JobTemplateTemplateContainersPorts(object):
def __init__(self, name: str = None, container_port: int = None):
self.name = name
self.container_port = container_port
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = job_pb2.RunAlphaJobTemplateTemplateContainersPorts()
if Primitive.to_proto(resource.name):
res.name = Primitive.to_proto(resource.name)
if Primitive.to_proto(resource.container_port):
res.container_port = Primitive.to_proto(resource.container_port)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return JobTemplateTemplateContainersPorts(
name=Primitive.from_proto(resource.name),
container_port=Primitive.from_proto(resource.container_port),
)
class JobTemplateTemplateContainersPortsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [JobTemplateTemplateContainersPorts.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [JobTemplateTemplateContainersPorts.from_proto(i) for i in resources]
class JobTemplateTemplateContainersVolumeMounts(object):
def __init__(self, name: str = None, mount_path: str = None):
self.name = name
self.mount_path = mount_path
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = job_pb2.RunAlphaJobTemplateTemplateContainersVolumeMounts()
if Primitive.to_proto(resource.name):
res.name = Primitive.to_proto(resource.name)
if Primitive.to_proto(resource.mount_path):
res.mount_path = Primitive.to_proto(resource.mount_path)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return JobTemplateTemplateContainersVolumeMounts(
name=Primitive.from_proto(resource.name),
mount_path=Primitive.from_proto(resource.mount_path),
)
class JobTemplateTemplateContainersVolumeMountsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
JobTemplateTemplateContainersVolumeMounts.to_proto(i) for i in resources
]
@classmethod
def from_proto(self, resources):
return [
JobTemplateTemplateContainersVolumeMounts.from_proto(i) for i in resources
]
class JobTemplateTemplateVolumes(object):
def __init__(
self, name: str = None, secret: dict = None, cloud_sql_instance: dict = None
):
self.name = name
self.secret = secret
self.cloud_sql_instance = cloud_sql_instance
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = job_pb2.RunAlphaJobTemplateTemplateVolumes()
if Primitive.to_proto(resource.name):
res.name = Primitive.to_proto(resource.name)
if JobTemplateTemplateVolumesSecret.to_proto(resource.secret):
res.secret.CopyFrom(
JobTemplateTemplateVolumesSecret.to_proto(resource.secret)
)
else:
res.ClearField("secret")
if JobTemplateTemplateVolumesCloudSqlInstance.to_proto(
resource.cloud_sql_instance
):
res.cloud_sql_instance.CopyFrom(
JobTemplateTemplateVolumesCloudSqlInstance.to_proto(
resource.cloud_sql_instance
)
)
else:
res.ClearField("cloud_sql_instance")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return JobTemplateTemplateVolumes(
name=Primitive.from_proto(resource.name),
secret=JobTemplateTemplateVolumesSecret.from_proto(resource.secret),
cloud_sql_instance=JobTemplateTemplateVolumesCloudSqlInstance.from_proto(
resource.cloud_sql_instance
),
)
class JobTemplateTemplateVolumesArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [JobTemplateTemplateVolumes.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [JobTemplateTemplateVolumes.from_proto(i) for i in resources]
class JobTemplateTemplateVolumesSecret(object):
def __init__(
self, secret: str = None, items: list = None, default_mode: int = None
):
self.secret = secret
self.items = items
self.default_mode = default_mode
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = job_pb2.RunAlphaJobTemplateTemplateVolumesSecret()
if Primitive.to_proto(resource.secret):
res.secret = Primitive.to_proto(resource.secret)
if JobTemplateTemplateVolumesSecretItemsArray.to_proto(resource.items):
res.items.extend(
JobTemplateTemplateVolumesSecretItemsArray.to_proto(resource.items)
)
if Primitive.to_proto(resource.default_mode):
res.default_mode = Primitive.to_proto(resource.default_mode)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return JobTemplateTemplateVolumesSecret(
secret=Primitive.from_proto(resource.secret),
items=JobTemplateTemplateVolumesSecretItemsArray.from_proto(resource.items),
default_mode=Primitive.from_proto(resource.default_mode),
)
class JobTemplateTemplateVolumesSecretArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [JobTemplateTemplateVolumesSecret.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [JobTemplateTemplateVolumesSecret.from_proto(i) for i in resources]
class JobTemplateTemplateVolumesSecretItems(object):
def __init__(self, path: str = None, version: str = None, mode: int = None):
self.path = path
self.version = version
self.mode = mode
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = job_pb2.RunAlphaJobTemplateTemplateVolumesSecretItems()
if Primitive.to_proto(resource.path):
res.path = Primitive.to_proto(resource.path)
if Primitive.to_proto(resource.version):
res.version = Primitive.to_proto(resource.version)
if Primitive.to_proto(resource.mode):
res.mode = Primitive.to_proto(resource.mode)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return JobTemplateTemplateVolumesSecretItems(
path=Primitive.from_proto(resource.path),
version=Primitive.from_proto(resource.version),
mode=Primitive.from_proto(resource.mode),
)
class JobTemplateTemplateVolumesSecretItemsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [JobTemplateTemplateVolumesSecretItems.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [JobTemplateTemplateVolumesSecretItems.from_proto(i) for i in resources]
class JobTemplateTemplateVolumesCloudSqlInstance(object):
def __init__(self, instances: list = None):
self.instances = instances
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = job_pb2.RunAlphaJobTemplateTemplateVolumesCloudSqlInstance()
if Primitive.to_proto(resource.instances):
res.instances.extend(Primitive.to_proto(resource.instances))
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return JobTemplateTemplateVolumesCloudSqlInstance(
instances=Primitive.from_proto(resource.instances),
)
class JobTemplateTemplateVolumesCloudSqlInstanceArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
JobTemplateTemplateVolumesCloudSqlInstance.to_proto(i) for i in resources
]
@classmethod
def from_proto(self, resources):
return [
JobTemplateTemplateVolumesCloudSqlInstance.from_proto(i) for i in resources
]
class JobTemplateTemplateVPCAccess(object):
def __init__(self, connector: str = None, egress: str = None):
self.connector = connector
self.egress = egress
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = job_pb2.RunAlphaJobTemplateTemplateVPCAccess()
if Primitive.to_proto(resource.connector):
res.connector = Primitive.to_proto(resource.connector)
if JobTemplateTemplateVPCAccessEgressEnum.to_proto(resource.egress):
res.egress = JobTemplateTemplateVPCAccessEgressEnum.to_proto(
resource.egress
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return JobTemplateTemplateVPCAccess(
connector=Primitive.from_proto(resource.connector),
egress=JobTemplateTemplateVPCAccessEgressEnum.from_proto(resource.egress),
)
class JobTemplateTemplateVPCAccessArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [JobTemplateTemplateVPCAccess.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [JobTemplateTemplateVPCAccess.from_proto(i) for i in resources]
class JobTerminalCondition(object):
def __init__(
self,
type: str = None,
state: str = None,
message: str = None,
last_transition_time: str = None,
severity: str = None,
reason: str = None,
internal_reason: str = None,
domain_mapping_reason: str = None,
revision_reason: str = None,
execution_reason: str = None,
):
self.type = type
self.state = state
self.message = message
self.last_transition_time = last_transition_time
self.severity = severity
self.reason = reason
self.internal_reason = internal_reason
self.domain_mapping_reason = domain_mapping_reason
self.revision_reason = revision_reason
self.execution_reason = execution_reason
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = job_pb2.RunAlphaJobTerminalCondition()
if Primitive.to_proto(resource.type):
res.type = Primitive.to_proto(resource.type)
if JobTerminalConditionStateEnum.to_proto(resource.state):
res.state = JobTerminalConditionStateEnum.to_proto(resource.state)
if Primitive.to_proto(resource.message):
res.message = Primitive.to_proto(resource.message)
if Primitive.to_proto(resource.last_transition_time):
res.last_transition_time = Primitive.to_proto(resource.last_transition_time)
if JobTerminalConditionSeverityEnum.to_proto(resource.severity):
res.severity = JobTerminalConditionSeverityEnum.to_proto(resource.severity)
if JobTerminalConditionReasonEnum.to_proto(resource.reason):
res.reason = JobTerminalConditionReasonEnum.to_proto(resource.reason)
if JobTerminalConditionInternalReasonEnum.to_proto(resource.internal_reason):
res.internal_reason = JobTerminalConditionInternalReasonEnum.to_proto(
resource.internal_reason
)
if JobTerminalConditionDomainMappingReasonEnum.to_proto(
resource.domain_mapping_reason
):
res.domain_mapping_reason = (
JobTerminalConditionDomainMappingReasonEnum.to_proto(
resource.domain_mapping_reason
)
)
if JobTerminalConditionRevisionReasonEnum.to_proto(resource.revision_reason):
res.revision_reason = JobTerminalConditionRevisionReasonEnum.to_proto(
resource.revision_reason
)
if JobTerminalConditionExecutionReasonEnum.to_proto(resource.execution_reason):
res.execution_reason = JobTerminalConditionExecutionReasonEnum.to_proto(
resource.execution_reason
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return JobTerminalCondition(
type=Primitive.from_proto(resource.type),
state=JobTerminalConditionStateEnum.from_proto(resource.state),
message=Primitive.from_proto(resource.message),
last_transition_time=Primitive.from_proto(resource.last_transition_time),
severity=JobTerminalConditionSeverityEnum.from_proto(resource.severity),
reason=JobTerminalConditionReasonEnum.from_proto(resource.reason),
internal_reason=JobTerminalConditionInternalReasonEnum.from_proto(
resource.internal_reason
),
domain_mapping_reason=JobTerminalConditionDomainMappingReasonEnum.from_proto(
resource.domain_mapping_reason
),
revision_reason=JobTerminalConditionRevisionReasonEnum.from_proto(
resource.revision_reason
),
execution_reason=JobTerminalConditionExecutionReasonEnum.from_proto(
resource.execution_reason
),
)
class JobTerminalConditionArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [JobTerminalCondition.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [JobTerminalCondition.from_proto(i) for i in resources]
class JobConditions(object):
def __init__(
self,
type: str = None,
state: str = None,
message: str = None,
last_transition_time: str = None,
severity: str = None,
reason: str = None,
revision_reason: str = None,
execution_reason: str = None,
):
self.type = type
self.state = state
self.message = message
self.last_transition_time = last_transition_time
self.severity = severity
self.reason = reason
self.revision_reason = revision_reason
self.execution_reason = execution_reason
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = job_pb2.RunAlphaJobConditions()
if Primitive.to_proto(resource.type):
res.type = Primitive.to_proto(resource.type)
if JobConditionsStateEnum.to_proto(resource.state):
res.state = JobConditionsStateEnum.to_proto(resource.state)
if Primitive.to_proto(resource.message):
res.message = Primitive.to_proto(resource.message)
if Primitive.to_proto(resource.last_transition_time):
res.last_transition_time = Primitive.to_proto(resource.last_transition_time)
if JobConditionsSeverityEnum.to_proto(resource.severity):
res.severity = JobConditionsSeverityEnum.to_proto(resource.severity)
if JobConditionsReasonEnum.to_proto(resource.reason):
res.reason = JobConditionsReasonEnum.to_proto(resource.reason)
if JobConditionsRevisionReasonEnum.to_proto(resource.revision_reason):
res.revision_reason = JobConditionsRevisionReasonEnum.to_proto(
resource.revision_reason
)
if JobConditionsExecutionReasonEnum.to_proto(resource.execution_reason):
res.execution_reason = JobConditionsExecutionReasonEnum.to_proto(
resource.execution_reason
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return JobConditions(
type=Primitive.from_proto(resource.type),
state=JobConditionsStateEnum.from_proto(resource.state),
message=Primitive.from_proto(resource.message),
last_transition_time=Primitive.from_proto(resource.last_transition_time),
severity=JobConditionsSeverityEnum.from_proto(resource.severity),
reason=JobConditionsReasonEnum.from_proto(resource.reason),
revision_reason=JobConditionsRevisionReasonEnum.from_proto(
resource.revision_reason
),
execution_reason=JobConditionsExecutionReasonEnum.from_proto(
resource.execution_reason
),
)
class JobConditionsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [JobConditions.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [JobConditions.from_proto(i) for i in resources]
class JobLatestSucceededExecution(object):
def __init__(self, name: str = None, create_time: str = None):
self.name = name
self.create_time = create_time
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = job_pb2.RunAlphaJobLatestSucceededExecution()
if Primitive.to_proto(resource.name):
res.name = Primitive.to_proto(resource.name)
if Primitive.to_proto(resource.create_time):
res.create_time = Primitive.to_proto(resource.create_time)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return JobLatestSucceededExecution(
name=Primitive.from_proto(resource.name),
create_time=Primitive.from_proto(resource.create_time),
)
class JobLatestSucceededExecutionArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [JobLatestSucceededExecution.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [JobLatestSucceededExecution.from_proto(i) for i in resources]
class JobLatestCreatedExecution(object):
def __init__(self, name: str = None, create_time: str = None):
self.name = name
self.create_time = create_time
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = job_pb2.RunAlphaJobLatestCreatedExecution()
if Primitive.to_proto(resource.name):
res.name = Primitive.to_proto(resource.name)
if Primitive.to_proto(resource.create_time):
res.create_time = Primitive.to_proto(resource.create_time)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return JobLatestCreatedExecution(
name=Primitive.from_proto(resource.name),
create_time=Primitive.from_proto(resource.create_time),
)
class JobLatestCreatedExecutionArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
| |
database."""
if self.partials_display_pw.pane(self.partials_pw, "weight"):
# Size is assumed to be (1,1) as created at first call and grid
# will be filled after <Configure> event to size the widget.
# Subsequent calls are not associated with a <Configure> event so
# fill the widget by after_idle(...).
if self.base_partials.get_frame().winfo_width() != 1:
self.base_partials.set_partial_key()
self.partials_pw.after_idle(self.base_partials.load_new_index)
def show_partial_position_grid(self, database):
"""Show widget containing list of partial positions in database."""
self._create_partial_position_datasource(database)
## Hack to cope with GUI use while Import in progress.
# if self.base_partials.get_data_source().get_database() is not None:
# self.base_partials.clear_grid_keys()
self.calculate_payload_availability()
self.configure_panedwindows()
self._show_base_partials()
self.base_partials.set_focus()
def _create_repertoire_datasource(self, database):
"""Create a new DataSource for list of repertoires."""
# Probably wrong because datasource = F(active item), but this is a
# constant at present.
self.base_repertoires.set_data_source(
DataSource(
database,
REPERTOIRE_FILE_DEF,
OPENING_FIELD_DEF,
make_ChessDBrowRepertoire(self),
),
self.base_repertoires.on_data_change,
)
if self.repertoire_items.any_items_displayed_of_type():
self.set_repertoire_data_source()
self.repertoire_items.set_insert_or_delete_on_all_items()
def _show_base_repertoires(self):
"""Show widget containing list of repertoire games in database."""
if self.repertoires_display_pw.pane(self.repertoires_pw, "weight"):
# Size is assumed to be (1,1) as created at first call and grid
# will be filled after <Configure> event to size the widget.
# Subsequent calls are not associated with a <Configure> event so
# fill the widget by after_idle(...).
if self.base_repertoires.get_frame().winfo_width() != 1:
self.base_repertoires.set_partial_key()
self.repertoires_pw.after_idle(
self.base_repertoires.load_new_index
)
def show_repertoire_grid(self, database):
"""Show widget containing list of repertoire games in database."""
self._create_repertoire_datasource(database)
## Hack to cope with GUI use while Import in progress.
# if self.base_repertoires.get_data_source().get_database() is not None:
# self.base_repertoires.clear_grid_keys()
self.calculate_payload_availability()
self.configure_panedwindows()
self._show_base_repertoires()
self.base_repertoires.set_focus()
def set_import_subprocess(self, subprocess_id=None):
"""Set the import subprocess object if not already active."""
if self.is_import_subprocess_active():
raise ChessUIError("Attempt to set import subprocess while active")
self._import_subprocess = subprocess_id
def get_import_subprocess(self):
"""Return the import subprocess identity."""
return self._import_subprocess
def get_import_subprocess_poll(self):
"""Poll the import subprocess and return the response."""
return self._import_subprocess.poll()
def is_import_subprocess_active(self):
"""Return True if the import subprocess object is active."""
if self._import_subprocess is None:
return False
return self._import_subprocess.poll() is None
def get_toplevel(self):
"""Return the toplevel widget."""
return self.top_pw.winfo_toplevel()
def process_uci_commands_from_engines_and_analysis_requests(self):
"""Periodically process UCI actions required and response queues."""
self.uci.uci.get_engine_responses()
self.uci.uci.get_analysis_requests()
self.refresh_analysis_widgets()
self.top_pw.after(
1000, self.process_uci_commands_from_engines_and_analysis_requests
)
@staticmethod
def _configure_font(target, source):
"""Set target font properties from source font."""
for property_ in "family", "weight", "slant", "size":
target[property_] = source[property_]
def _set_board_colours(self, sbg, bbg, bfg):
"""Set colours and fonts used to display games.
sbg == True - set game score colours
bbg == True - set board square colours
bfg == True - set board piece colours
"""
exceptions = []
for games in (
self.game_items.order,
self.partial_items.order,
self.repertoire_items.order,
self.games_and_repertoires_in_toplevels,
self.partials_in_toplevels,
):
for item in games:
try:
item.set_colours(sbg, bbg, bfg)
except tkinter.TclError:
exceptions.append((item, games))
for item, games in exceptions:
games.remove(item)
def get_export_filename(self, datatype, pgn=True):
"""Return filename to contain export of datatype or None."""
title = " ".join(("Export", datatype))
if self.is_import_subprocess_active():
tkinter.messagebox.showinfo(
parent=self.get_toplevel(),
title=title,
message="An import of data is in progress",
)
return None
if not self.database:
tkinter.messagebox.showinfo(
parent=self.get_toplevel(),
title=title,
message="".join(
("Open the database from which the export is to be done.",)
),
)
return None
return self._get_export_filename(datatype, pgn, title)
def get_export_filename_for_single_item(self, datatype, pgn=True):
"""Return filename to contain export of datatype or None."""
return self._get_export_filename(
datatype, pgn, " ".join(("Export", datatype))
)
def get_export_folder(self):
"""Return folder name to contain export of database or None."""
title = "Export database as Text"
if self.is_import_subprocess_active():
tkinter.messagebox.showinfo(
parent=self.get_toplevel(),
title=title,
message="An import of data is in progress",
)
return None
if not self.database:
tkinter.messagebox.showinfo(
parent=self.get_toplevel(),
title=title,
message="".join(
("Open the database from which the export is to be done.",)
),
)
return None
filename = tkinter.filedialog.askdirectory(
parent=self.get_toplevel(), title=title, initialdir="~"
)
if not filename:
tkinter.messagebox.showwarning(
parent=self.get_toplevel(),
title=title,
message="Database not exported",
)
return None
bfn = os.path.basename(filename)
fns = (
os.path.join(filename, "".join((bfn, "_games", ".txt"))),
os.path.join(filename, "".join((bfn, "_repertoires", ".txt"))),
os.path.join(filename, "".join((bfn, "_partials", ".txt"))),
)
if not tkinter.messagebox.askokcancel(
parent=self.get_toplevel(),
title=title,
message="".join(
(
"The database will be exported to files:\n\n",
"\n\n".join((os.path.basename(f) for f in fns)),
"\n\nprovided none of these already exist.\n",
)
),
):
return None
for filename in fns:
if os.path.exists(filename):
tkinter.messagebox.showinfo(
parent=self.get_toplevel(),
title=title,
message="\n".join(
(
"Cannot export because file\n",
filename,
"\nalready exists.\n",
)
),
)
return None
return fns
def _get_export_filename(self, datatype, pgn, title):
"""Return filename to contain export of datatype or None."""
if pgn:
extn = "pgn"
else:
extn = "txt"
filename = tkinter.filedialog.asksaveasfilename(
parent=self.get_toplevel(),
title=title,
defaultextension="".join((".", extn)),
filetypes=((datatype, ".".join(("*", extn))),),
)
if not filename:
tkinter.messagebox.showwarning(
parent=self.get_toplevel(),
title=title,
message=" ".join((datatype, "file not saved")),
)
return None
return filename
def refresh_analysis_widgets(self):
"""Refresh game widgets with updated chess engine analysis."""
exceptions = []
for games in (
self.game_items.order,
self.repertoire_items.order,
self.games_and_repertoires_in_toplevels,
):
for item in games:
try:
if item.current is None:
try:
position = item.fen_tag_tuple_square_piece_map()
except ScoreNoGameException:
continue
else:
position = item.tagpositionmap[item.current]
item.refresh_analysis_widget(item.get_analysis(*position))
except tkinter.TclError:
exceptions.append((item, games))
for item, games in exceptions:
games.remove(item)
@staticmethod
def _give_focus(current, traverse):
"""Give focus to adjacent widget type in traversal order."""
give = current
while True:
give = traverse[give]
if give == current:
break
if give.is_visible():
give.set_focus()
break
def give_focus_backward(self, current):
"""Give focus to previous widget type in traversal order."""
self._give_focus(current, self.traverse_backward)
def give_focus_forward(self, current):
"""Give focus to next widget type in traversal order."""
self._give_focus(current, self.traverse_forward)
def show_all_panedwindows(self):
"""Show all panes with weights prior to toggle to single pane view."""
if not self.single_view:
return
for widget, parent in self.pw_parent_map.items():
parent.pane(widget, weight=self.pw_current_weights[widget])
self.single_view = False
self.configure_game_grid()
self.configure_partial_grid()
self.configure_repertoire_grid()
self.configure_selection_grid()
def show_just_panedwindow_with_focus(self, gainfocus):
"""Show pane containing widget with focus and hide the other panes."""
ppmi = {v: k for k, v in self.payload_parent_map.items()}
widget = gainfocus
while True:
if widget in ppmi:
if widget in self.pw_parent_map:
self.single_view = True
self.single_view = self.show_payload_panedwindows(
ppmi[widget]
)
break
if widget is self.top_pw:
break
widget = self.top_pw.nametowidget(widget.winfo_parent())
if self.single_view:
self.configure_game_grid()
self.configure_partial_grid()
self.configure_repertoire_grid()
def show_payload_panedwindows(self, payload):
"""Show just payload's ancestor's panes if payload available."""
if not self.single_view:
return False
if not self.payload_available[payload]:
return False
for ppw, parent in self.pw_parent_map.items():
parent.pane(ppw, weight=0)
ppw = self.payload_parent_map[payload]
while True:
if ppw not in self.pw_parent_map:
break
self.pw_parent_map[ppw].pane(ppw, weight=1)
ppw = self.pw_parent_map[ppw]
return True
def calculate_payload_availability(self):
"""Calculate availability of data for display."""
pla = self.payload_available
ppm = self.payload_parent_map
pwpm = self.pw_parent_map
pwcw = self.pw_current_weights
pww = self.pw_weights
for payload in pla:
pla[payload] = payload.is_payload_available()
widget = ppm[payload]
while widget in pwpm:
pwcw[widget] = 0
widget = pwpm[widget]
for key, value in pla.items():
widget = ppm[key]
while widget in pwpm:
if value:
pwcw[widget] = pww[widget]
widget = pwpm[widget]
if pwcw[ppm[self.base_games]]:
if pwcw[ppm[self.game_items]]:
# Same parent tree as base_games so just do child.
widget = ppm[self.game_games]
pwcw[widget] = pww[widget]
if pwcw[ppm[self.repertoire_items]]:
# Same parent tree as repertoire_items so just do child.
widget = ppm[self.repertoire_games]
pwcw[widget] = pww[widget]
if pwcw[ppm[self.partial_items]]:
# Same parent tree as partial_items so just do child.
widget = ppm[self.partial_games]
pwcw[widget] = pww[widget]
def configure_panedwindows(self):
"""Display available panedwindows subject to single_view status."""
for k in self.payload_available:
widget = self.payload_parent_map[k]
if self.pw_current_weights[widget]:
k.insert_payload(widget)
elif k in widget.panes():
k.forget_payload(widget)
while widget in self.pw_parent_map:
self.pw_parent_map[widget].pane(
widget, weight=self.pw_current_weights[widget]
)
widget = self.pw_parent_map[widget]
def set_properties_on_all_game_grids(self, game):
"""Set properties for game on all grids where it is visible."""
for grid in (
self.base_games,
self.game_games,
self.repertoire_games,
self.partial_games,
):
grid.set_properties(game)
def set_bindings_on_item_losing_focus_by_pointer_click(self):
"""Set bindings for an active item when it does not have focus.
Binary moves of focus between two items by keyboard or popup menu
action are dealt with elsewhere.
This method deals with many-to-one possibilities implied by pointer
click on a specific item. It could deal with the binary ones too, and
maybe that is better, or not.
"""
for item in (
self.game_items,
self.repertoire_items,
self.base_games,
self.game_games,
self.repertoire_games,
self.base_repertoires,
self.partial_games,
self.partial_items,
self.base_partials,
):
if item.bind_for_widget_without_focus():
break
def _set_position_analysis_data_source_all_items(self):
"""Set game and repertoire analysis data sources to match database.
self.ui.database will be None if no database open, or the database
instance if open.
"""
for items in (
self.game_items.order,
self.repertoire_items.order,
self.games_and_repertoires_in_toplevels,
):
for item in items:
item.set_position_analysis_data_source()
def make_position_analysis_data_source(self):
"""Create a new DataSource for stored chess engine analysis."""
if self.database:
# Without the 'is not None' seems unreliable at 08 Nov 2015.
# What is wrong with 'if <obj>:' where obj is a bsddb3.DB instance?
# It does work sometimes, so some environment clutter perhaps.
# Not | |
for h in self.head:
r = Record(datfile = self.datfile,
idstr = h.strip().replace('<>', '_'))
if not (r.exists() or r.removed()):
try:
res = self.node.talk('/get/%s/%d/%s' %
(self.datfile, r.stamp, r.id))
first = re.sub(r'[\r\n]*$', '', iter(res).next())
yield first
except StopIteration, err:
sys.stderr.write('get %s: %s\n' % (self, err))
# End of RecordGetter
class Cache(dict):
"""Cache of BBS.
Plain text (encode: UTF-8).
One record par one line.
"""
datfile = ""
datpath = config.cache_dir
stamp = 0 # when the cache is modified
size = 0 # size of cache file
count = 0 # records count
loaded = False # loaded records
type = "" # "thread"
def __init__(self, datfile, sugtagtable=None, recentlist=None):
dict.__init__(self)
self.datfile = datfile
self.dathash = title.file_hash(datfile)
self.datpath += "/" + self.dathash
self.removed = {}
self.stamp = self._load_status('stamp')
self.valid_stamp = self._load_status('validstamp')
self.recent_stamp = self.stamp
if recentlist is None:
recentlist = RecentList()
if self.datfile in recentlist.lookup:
recent_stamp = recentlist.lookup[self.datfile].stamp
if self.recent_stamp < recent_stamp:
self.recent_stamp = recent_stamp
self.size = self._load_status('size')
self.count = self._load_status('count')
self.node = RawNodeList(os.path.join(self.datpath, 'node.txt'))
self.tags = TagList(self.datfile,
os.path.join(self.datpath, 'tag.txt'))
if sugtagtable is None:
sugtagtable = SuggestedTagTable()
if self.datfile in sugtagtable:
self.sugtags = sugtagtable[self.datfile]
else:
self.sugtags = SuggestedTagList(sugtagtable, self.datfile)
for type in config.types:
if self.datfile.startswith(type):
self.type = type
break
self.save_record = config.save_record.get(self.type, 0)
self.save_size = config.save_size.get(self.type, 1)
self.get_range = config.get_range.get(self.type, 0)
self.sync_range = config.sync_range.get(self.type, 0)
self.save_removed = config.save_removed.get(self.type, 0)
if self.sync_range == 0:
self.save_removed = 0
elif self.save_removed == 0:
pass
elif self.save_removed <= self.sync_range:
self.save_removed = self.sync_range + 1
def __str__(self):
return self.datfile
def __len__(self):
return self.count
def keys(self):
self.load()
k = dict.keys(self)
k.sort()
return k
def __iter__(self):
for idstr in self.keys():
yield self[idstr]
def load(self):
if (not self.loaded) and self.exists():
self.loaded = True
try:
for k in os.listdir(self.datpath + "/record"):
self[k] = Record(datfile=self.datfile, idstr=k)
except OSError:
sys.stderr.write("%s/record: OSError\n" % self.datpath)
def exists(self):
return self.datpath and os.path.isdir(self.datpath)
def has_record(self):
removed = self.datpath + "/removed"
return bool(self) or \
(os.path.exists(removed) and
bool(os.listdir(removed)))
def _load_status(self, key):
path = "%s/%s.stat" % (self.datpath, key)
try:
f = file(path)
v = f.readline()
f.close()
return int(v.strip())
except IOError:
#sys.stderr.write(path + ": IOError\n")
return 0
except ValueError:
sys.stderr.write(path + ": ValueError\n")
return 0
def _save_status(self, key, val):
path = "%s/%s.stat" % (self.datpath, key)
try:
buf = str(val) + '\n'
if not fsdiff(path, buf):
try:
lock.acquire(True)
f = file(path, 'wb')
f.write(buf)
f.close()
finally:
lock.release()
return True
except IOError:
sys.stderr.write(path + ": IOError\n")
return False
def sync_status(self):
self._save_status('stamp', self.stamp)
self._save_status('validstamp', self.valid_stamp)
self._save_status('size', self.size)
self._save_status('count', self.count)
if not os.path.exists(self.datpath + '/dat.stat'):
self._save_status('dat', self.datfile)
def standby_directories(self):
for d in ('', '/attach', '/body', '/record', '/removed'):
if not os.path.isdir(self.datpath + d):
try:
os.makedirs(self.datpath + d)
except (IOError, OSError):
sys.stderr.write(self.datfile + ": IOError/OSError\n")
def check_data(self, res, stamp=None, id=None, begin=None, end=None):
'''Check a data and add it cache.'''
flag_got = False
flag_spam = False
count = 0
for i in res:
count += 1
r = Record(datfile=self.datfile)
parse_ok = r.parse(i)
if parse_ok and \
((stamp is None) or (r['stamp'] == str(stamp))) and \
((not id) or (r['id'] == id)) and \
((begin is None) or (begin <= r.stamp)) and \
((end is None) or (r.stamp <= end)) and \
r.md5check():
flag_got = True
if (len(i) > config.record_limit*1024) or spam.check(i):
sys.stderr.write(
'Warning: %s/%s: too large or spam record.\n' %
(self.datfile, r.idstr))
self.add_data(r, False)
r.remove()
flag_spam = True
else:
self.add_data(r)
else:
if stamp is not None:
str_stamp = '/%s' % stamp
elif 'stamp' in r:
str_stamp = '/%s' % r['stamp']
else:
str_stamp = ''
sys.stderr.write("Warning: %s%s: broken record.\n" %
(self.datfile, str_stamp))
r.free()
return count, flag_got, flag_spam
def get_data(self, stamp=0, id="", node=None):
"""Get appointed data."""
res = node.talk("/get/" + self.datfile + "/" + str(stamp) + "/" + id)
count, flag_got, flag_spam = self.check_data(res, stamp=stamp, id=id)
if count:
self.sync_status()
else:
sys.stderr.write("Warning: %s/%s: records not found.\n" %
(self.datfile, stamp))
return flag_got, flag_spam
def get_with_range(self, node=None):
"""Get data in range."""
oldcount = len(self)
now = int(time())
if self.stamp > 0:
begin = self.stamp
else:
begin = 0
if self.sync_range > 0:
begin2 = now - self.sync_range
else:
begin2 = 0
if begin < 0:
begin = 0
elif begin2 < begin:
begin = begin2
if (begin <= 0) and (len(self) <= 0):
if self.get_range > 0:
begin = now - self.get_range
if begin < 0:
begin = 0
else:
begin = 0
res = node.talk('/get/%s/%d-' % (self.datfile, begin))
else:
head = node.talk('/head/%s/%d-' % (self.datfile, begin))
res = RecordGetter(self.datfile, node, head)
count, flag_got, flag_spam = self.check_data(res, begin=begin, end=now)
if count:
self.sync_status()
if oldcount == 0:
self.loaded = True
return bool(count)
def add_data(self, rec, really=True):
"""Add new data cache."""
self.standby_directories()
rec.sync()
if really:
self[rec.idstr] = rec
self.size += len(str(rec)) + 1
self.count += 1
if really:
if self.valid_stamp < rec.stamp:
self.valid_stamp = rec.stamp
if self.stamp < rec.stamp:
self.stamp = rec.stamp
def check_body(self):
'''Remove body cache that is a field of removed record.'''
try:
dir = os.path.join(config.cache_dir, self.dathash, 'body')
for idstr in os.listdir(dir):
rec = Record(datfile=self.datfile, idstr=idstr)
if not rec.exists():
try:
os.remove(os.path.join(dir, idstr))
except OSError, err:
sys.stderr.write("%s/%s: OSError: %s\n" %
(dir, idstr, err))
except (IOError, OSError), err:
sys.stderr.write('IOError/OSError: %s\n' % err)
def check_attach(self):
"""Remove attach cache that is a field of removed record."""
try:
dir = os.path.join(config.cache_dir, self.dathash, 'attach')
for f in os.listdir(dir):
idstr = f
i = f.find(".")
if i >= 0:
idstr = f[:i]
if idstr.startswith('s'):
idstr = idstr[1:]
rec = Record(datfile=self.datfile, idstr=idstr)
if not rec.exists():
try:
os.remove(dir + "/" + f)
except OSError, err:
sys.stderr.write('OSError: %s\n' % err)
except (IOError, OSError), err:
sys.stderr.write('IOError/OSError: %s\n' % err)
def remove(self):
"""Remove cache (a.k.a DATFILE).
It is removed from disk.
"""
try:
shutil.rmtree(self.datpath)
return True
except (IOError, OSError), err:
sys.stderr.write('IOError/OSError: %s\n' % err)
return False
def remove_records(self, now, limit):
'''Remove records which are older than limit.
'''
# Remove old records.
ids = self.keys()
if self.save_size < len(ids):
ids = ids[:-self.save_size]
if limit > 0:
for r in ids:
rec = self[r]
if rec.stamp + limit < now:
rec.remove()
# Remove redundant records.
once = Set()
ids = self.keys()
for r in ids:
rec = self[r]
if not rec.exists():
pass
elif rec.id in once:
rec.remove()
else:
once.add(rec.id)
def search(self, searchlist=None, myself=None):
"""Search node from network and get records."""
self.standby_directories()
if searchlist is None:
searchlist = SearchList()
if not myself:
nodelist = NodeList()
myself = nodelist.myself()
lookuptable = LookupTable()
node = searchlist.search(self,
myself = myself,
nodes = lookuptable.get(self.datfile, []))
if node is not None:
nodelist = NodeList()
if node not in nodelist:
nodelist.append(node)
nodelist.sync()
self.get_with_range(node)
if node not in self.node:
while len(self.node) >= config.share_nodes:
n = self.node.random()
self.node.remove(n)
self.node.append(node)
self.node.sync()
return True
else:
self.sync_status()
return False
# End of Cache
class CacheList(list):
"""All cache."""
def __init__(self):
list.__init__(self)
self.load()
def load(self):
sugtagtable = SuggestedTagTable()
recentlist = RecentList()
del self[:]
for i in os.listdir(config.cache_dir):
if config.cache_hash_method == 'asis':
c = Cache(i, sugtagtable, recentlist)
self.append(c)
continue
try:
f = open(config.cache_dir + "/" + i + "/dat.stat")
dat_stat = f.readlines()[0].strip()
f.close()
c = Cache(dat_stat, sugtagtable, recentlist)
self.append(c)
f.close()
except IOError:
c = Cache(i, sugtagtable, recentlist)
self.append(c)
def rehash(self):
"""Rename file path hash if it is old.
"""
to_reload = False
for i in os.listdir(config.cache_dir):
try:
dat_stat_file = os.path.join(config.cache_dir, i, 'dat.stat')
if os.path.isfile(dat_stat_file):
f = open(dat_stat_file)
dat_stat = f.readlines()[0].strip()
f.close()
else:
dat_stat = i
f = open(dat_stat_file, 'wb')
f.write(i + '\n')
f.close()
hash = title.file_hash(dat_stat)
if i == hash:
continue
sys.stderr.write('rehash %s to %s\n' % (i, hash))
shutil.move(os.path.join(config.cache_dir, i),
os.path.join(config.cache_dir, hash))
to_reload = True
except (IOError, OSError, IndexError), err:
sys.stderr.write('rehash error %s for %s\n' % (err, i))
if to_reload:
self.load()
def getall(self, timelimit=0):
"""Search nodes and update my cache."""
random.shuffle(self)
nodelist = NodeList()
myself = nodelist.myself()
searchlist = SearchList()
for cache in self:
if int(time()) > timelimit:
sys.stderr.write("client timeout\n")
return
elif not cache.exists():
pass
else:
cache.search(searchlist=searchlist, myself=myself)
cache.size = 0
cache.count = 0
cache.valid_stamp = 0
for rec in cache:
if not rec.exists():
continue
load_ok = rec.load()
if load_ok:
if cache.stamp < rec.stamp:
cache.stamp = rec.stamp
if cache.valid_stamp < rec.stamp:
cache.valid_stamp = rec.stamp
cache.size += len(str(rec))
cache.count | |
the root of the library
path) that indicates the actual resource file.
:param depends: optionally, a list of resources that this resource
depends on. Entries in the list are :py:class:`Resource`
instances.
:param supersedes: optionally, a list of :py:class:`Resource`
instances that this resource supersedes as a rollup
resource. If all these resources are required for render a page,
the superseding resource will be included instead.
:param bottom: indicate that this resource is "bottom safe": it
can be safely included on the bottom of the page (just before
``</body>``). This can be used to improve the performance of
page loads when Javascript resources are in use. Not all
Javascript-based resources can however be safely included that
way, so you have to set this explicitly (or use the
``force_bottom`` option on :py:class:`NeededResources`).
:param renderer: optionally, a callable that accepts an URL
argument and returns a rendered HTML snippet for this
resource. If no renderer is provided, a renderer is looked up
based on the resource's filename extension.
:param dont_bundle: Don't bundle this resource in any bundles
(if bundling is enabled).
"""
def __init__(self, library, relpath,
depends=None,
supersedes=None,
bottom=False,
renderer=None,
debug=None,
dont_bundle=False,
minified=None,
minifier=NOTHING,
compiler=NOTHING,
source=None,
mode_parent=None):
self.relpath = relpath
super(Resource, self).__init__(library, depends)
self.dirname, self.filename = os.path.split(relpath)
if self.dirname and not self.dirname.endswith('/'):
self.dirname += '/'
self.ext = os.path.splitext(self.relpath)[1]
self.mode_parent = mode_parent
if compiler is NOTHING:
compiler = self.library.compilers.get(self.ext)
self.compiler = fanstatic.registry.CompilerRegistry.instance()[
compiler]
self.source = source
if minifier is NOTHING:
if mode_parent is None:
minifier = self.library.minifiers.get(self.ext)
else:
minifier = None
self.minifier = fanstatic.registry.MinifierRegistry.instance()[
minifier]
self.minified = minified
if (self.minified and not isinstance(self.minified, compat.basestring)
and self.minifier.available):
raise ConfigurationError(
"Since %s specifies minifier %s, passing another "
"Resource object as its minified version does not make sense"
% (self.relpath, minifier))
if not self.minified and self.minifier.available:
self.minified = self.minifier.source_to_target(self)
if _resource_file_existence_checking:
path = self.fullpath()
minified = (self.mode_parent
and self.mode_parent.minifier.available)
if not (minified
or self.compiler.available
or os.path.exists(path)):
raise UnknownResourceError(
"Resource file does not exist: %s" % path)
path = self.compiler.source_path(self)
if self.compiler.available and not os.path.exists(path):
raise UnknownResourceError(
"Source file %s for %s does not exist" % (
path, self.fullpath()))
self.bottom = bottom
self.dont_bundle = dont_bundle
if renderer is None:
# No custom, ad-hoc renderer for this Resource, so lookup
# the default renderer by resource filename extension.
if self.ext not in inclusion_renderers:
raise UnknownResourceExtensionError(
"Unknown resource extension %s for resource: %s" %
(self.ext, repr(self)))
self.order, self.renderer = inclusion_renderers[self.ext]
else:
# Use the custom renderer.
self.renderer = renderer
# If we do not know about the filename extension inclusion
# order, we render the resource after all others.
self.order, _ = inclusion_renderers.get(
self.ext, (compat.maxsize, None))
self.modes = {}
for mode_name, argument in [(DEBUG, debug), (MINIFIED, self.minified)]:
if argument is None:
continue
elif isinstance(argument, compat.basestring):
# this if is kludgy, but better than unrolling the loop
if mode_name == MINIFIED:
mode_parent = self.minifier.available and self
else:
mode_parent = None
mode_resource = Resource(
library, argument, bottom=bottom, renderer=renderer,
depends=depends, dont_bundle=dont_bundle,
mode_parent=mode_parent)
else:
# The dependencies of a mode resource should be the same
# or a subset of the dependencies this mode replaces.
if len(argument.depends - self.depends) > 0:
raise ModeResourceDependencyError
mode_resource = argument
self.modes[mode_name] = mode_resource
assert not isinstance(supersedes, compat.basestring)
self.supersedes = supersedes or []
self.rollups = []
# create a reference to the superseder in the superseded resource
for resource in self.supersedes:
resource.rollups.append(self)
def fullpath(self, path=None):
if path is None:
path = self.relpath
return os.path.normpath(os.path.join(self.library.path, path))
def compile(self, force=False):
# Skip compilation if this library has a version.
# If a package has been installed in development mode, the Library
# doesn't have a version. See registry.py.
if self.library.version is not None:
return
if self.mode_parent:
self.mode_parent.compile(force=force)
else:
self.compiler(self, force=force)
self.minifier(self, force=force)
def render(self, library_url):
return self.renderer('%s/%s' % (library_url, self.relpath))
def __repr__(self):
return "<Resource '%s' in library '%s'>" % (
self.relpath, self.library.name)
def mode(self, mode):
"""Get Resource in another mode.
If the mode is ``None`` or if the mode cannot be found, this
``Resource`` instance is returned instead.
:param mode: a string indicating the mode, or ``None``.
"""
if mode is None:
return self
# try getting the alternative
try:
return self.modes[mode]
except KeyError:
# fall back on the default mode if mode not found
return self
def need(self, slots=None):
"""Declare that the application needs this resource.
If you call ``.need()`` on ``Resource`` sometime during the
rendering process of your web page, this resource and all its
dependencies will be inserted as inclusions into the web page.
:param slots: an optional dictionary mapping from
:py:class:`Slot` instances to :py:class:`Resource`
instances. This dictionary describes how to fill in the
slots that this resource might depend on (directly or
indirectly). If a slot is required, the dictionary must
contain an entry for it.
"""
needed = get_needed()
needed.need(self, slots)
REQUIRED_DEFAULT_MARKER = object()
class Slot(Asset):
"""A resource slot.
Sometimes only the application has knowledge on how to fill in a
dependency for a resource, and this cannot be known at resource
definition time. In this case you can define a slot, and make your
resource depend on that. This slot can then be filled in with a
real resource by the application when you ``.need()`` that
resource (or when you need something that depends on the slot
indirectly).
:param library: the :py:class:`Library` this slot is in.
:param ext: the extension of the slot, for instance '.js'. This
determines what kind of resources can be slotted in here.
:param required: a boolean indicating whether this slot is
required to be filled in when a resource that depends on a slot
is needed, or whether it's optional. By default filling in a
slot is required.
:param depends: optionally, a list of resources that this slot
depends on. Resources that are slotted in here need to have
the same dependencies as that of the slot, or a strict subset.
"""
def __init__(self, library, extension, depends=None,
required=REQUIRED_DEFAULT_MARKER,
default=None):
super(Slot, self).__init__(library, depends)
#We need to detect if required was set to true explicitly.
if required is True and default is not None:
raise ValueError('A slot with a default is not required and can '
'not be made required.')
if required is REQUIRED_DEFAULT_MARKER:
required = True
self.default = default
assert extension.startswith('.')
self.ext = extension
self.required = required
class FilledSlot(Renderable):
def __init__(self, slot, resource):
self.filledby = resource
self.library = resource.library
self.relpath = resource.relpath
self.dirname, self.filename = resource.dirname, resource.filename
self.bottom = resource.bottom
self.rollups = resource.rollups
self.dont_bundle = resource.dont_bundle
if slot.ext != resource.ext:
raise SlotError(
"slot requires extension %s but filled with resource "
"with extension %s" %
(slot.ext, resource.ext))
self.ext = resource.ext
self.order = resource.order
self.renderer = resource.renderer
self.dependency_nr = slot.dependency_nr
self.modes = {}
for key, resource in compat.iteritems(resource.modes):
self.modes[key] = FilledSlot(slot, resource)
if not resource.depends.issubset(slot.depends):
raise SlotError(
"slot filled in with resource that has dependencies that "
"are not a strict subset of dependencies of slot")
# XXX how do slots interact with rollups?
def render(self, library_url):
return self.filledby.render(library_url)
def compile(self, force=False):
self.filledby.compile(force=force)
def __repr__(self):
return "<FilledSlot '%s' in library '%s'>" % (
self.relpath, self.library.name)
def mode(self, mode):
if mode is None:
return self
# try getting the alternative
try:
return self.modes[mode]
except KeyError:
# fall back on the default mode if mode not found
return self
class Group(Dependable):
"""A resource used to group resources together.
It doesn't define a resource file itself, but instead depends on
other resources. When a Group is depended on, all the resources
grouped together will be included.
:param depends: a list of resources that this resource depends
on. Entries in the list can be :py:class:`Resource` instances, or
:py:class:`Group` instances.
"""
def __init__(self, depends):
self.supports = set()
self.set_dependencies(depends)
def set_dependencies(self, depends):
self.depends = set(depends)
self.resources = set()
for depend in self.depends:
depend.supports.add(self)
self.resources.update(depend.resources)
for dependable in self.list_supporting():
dependable.resources.update(self.resources)
def list_assets(self):
assets = set([])
for depend in self.depends:
assets.update(depend.list_assets())
return assets
def need(self, slots=None):
"""Need this group resource.
If you call ``.need()`` | |
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, <NAME> and <NAME>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
"""
Photon scattering in quantum optical systems
This module includes a collection of functions for numerically computing photon
scattering in driven arbitrary systems coupled to some configuration of output
waveguides. The implementation of these functions closely follows the
mathematical treatment given in K.A. Fischer, et. al., Scattering of Coherent
Pulses from Quantum Optical Systems (2017, arXiv:1710.02875).
"""
# Author: <NAME>
# Contact: <EMAIL>
import numpy as np
from itertools import product, combinations_with_replacement
from ..core import basis, tensor, zero_ket, Qobj
from .propagator import propagator
from .solver import SolverOptions
__all__ = ['temporal_basis_vector',
'temporal_scattered_state',
'scattering_probability']
class Evolver:
"""
A caching class which takes a Hamiltonian and a list of times to calculate
and memoize propagators for the system between any two times as demanded.
Parameters
----------
H : :class: qutip.Qobj or list
System-waveguide(s) Hamiltonian or effective Hamiltonian in `Qobj` or
list-callback format. If construct_effective_hamiltonian is not
specified, an effective Hamiltonian is constructed from H and c_ops.
times : list-like
List of times to evaluate propagators over.
options : :class: qutip.SolverOptionss
Solver options to use when computing propagators.
Attributes
----------
H : :class: qutip.Qobj or list
System-waveguide(s) Hamiltonian, may be time-dependent.
tlist : list-like
List of times to evaluate propagators over.
propagators : (dict of float: (dict of float: :class: qutip.Qobj))
Dictionary of dictionaries of propagator objects with keys of
evaluation times, e.g. propagators[t2][t1] returns U[t2,t1].
"""
def __init__(self, H, tlist, options=None):
self.H = H
self.tlist = tlist
if options is None:
self.options = SolverOptions(nsteps=10000, normalize_output=False)
else:
self.options = options
# Make a blank nested dictionary to store propagators
self.propagators = dict.fromkeys(tlist)
for t in tlist:
self.propagators[t] = dict.fromkeys(tlist)
def prop(self, tf, ti):
"""Compute U[t2,t1] where t2 > t1 or return the cached operator.
Parameters
----------
tf : float
Final time to compute the propagator U[tf, ti].
ti : float
Initial time to compute the propagator U[tf,ti].
Returns
-------
propagator : :class: qutip.Qobj
The propagation operator.
"""
left, right = np.searchsorted(self.tlist, [ti, tf], side='left')
t1, t2 = self.tlist[left], self.tlist[right]
if self.propagators[t2][t1] is None:
self.propagators[t2][t1] = propagator(self.H, [t1, t2],
options=self.options,
unitary_mode='single')
# Something is still broken about batch unitary mode (see #807)
return self.propagators[t2][t1]
def set_partition(collection, num_sets):
"""
Enumerate all ways of partitioning collection into num_sets different lists,
e.g. list(set_partition([1,2], 2)) = [[[1, 2], []], [[1], [2]], [[2], [1]],
[[], [1, 2]]].
Parameters
----------
collection : iterable
Collection to generate a set partition of.
num_sets : int
Number of sets to partition collection into.
Returns
-------
partition : iterable
The partitioning of collection into num_sets sets.
"""
for partitioning in product(range(num_sets), repeat=len(collection)):
partition = [[] for _ in range(num_sets)]
for i, set_index in enumerate(partitioning):
partition[set_index].append(collection[i])
yield tuple(tuple(indices) for indices in partition)
def photon_scattering_operator(evolver, c_ops, taus_list):
"""
Compute the scattering operator for a system emitting into multiple
waveguides.
Parameters
----------
evolver : :class: qutip.scattering.Evolver
Evolver-wrapped Hamiltonian describing the system.
c_ops : list
list of collapse operators for each waveguide; these are assumed to
include spontaneous decay rates, e.g.
:math:`\\sigma = \\sqrt \\gamma \\cdot a`
taus_list : list-like
List of (list of emission times) for each waveguide.
Returns
-------
omega : :class: qutip.Qobj
The temporal scattering operator with dimensionality equal to the
system state.
"""
omega = 1
# Extract the full list of taus
taus = [(0.0, -1)] # temporal "ground state" for arbitrary waveguide
for i, tau_wg in enumerate(taus_list):
for tau in tau_wg:
taus.append((tau, i))
taus.sort(key = lambda tup: tup[0]) # sort taus by time
# Compute Prod Ueff(tq, tq-1)
for i in range(1, len(taus)):
tq, q = taus[i]
tprev, _ = taus[i - 1]
omega = c_ops[q] * evolver.prop(tq, tprev) * omega
# Add the <0|Uff(TP, tm)|0> term
tmax = evolver.tlist[-1]
taumax, _ = taus[-1]
# if taus[-1] < tmax:
omega = evolver.prop(tmax, taumax) * omega
return omega
def temporal_basis_vector(waveguide_emission_indices, n_time_bins):
"""
Generate a temporal basis vector for emissions at specified time bins into
specified waveguides.
Parameters
----------
waveguide_emission_indices : list or tuple
List of indices where photon emission occurs for each waveguide,
e.g. [[t1_wg1], [t1_wg2, t2_wg2], [], [t1_wg4, t2_wg4, t3_wg4]].
n_time_bins : int
Number of time bins; the range over which each index can vary.
Returns
-------
temporal_basis_vector : :class: qutip.Qobj
A basis vector representing photon scattering at the specified indices.
If there are W waveguides, T times, and N photon emissions, then the
basis vector has dimensionality (W*T)^N.
"""
# Cast waveguide_emission_indices to list for mutability
waveguide_emission_indices = [list(i) for i in waveguide_emission_indices]
# Calculate total number of waveguides
W = len(waveguide_emission_indices)
# Calculate total number of emissions
num_emissions = sum([len(waveguide_indices) for waveguide_indices in
waveguide_emission_indices])
if num_emissions == 0:
return basis(W * n_time_bins, 0)
# Pad the emission indices with zeros
offset_indices = []
for i, wg_indices in enumerate(waveguide_emission_indices):
offset_indices += [index + (i * n_time_bins) for index in wg_indices]
# Return an appropriate tensor product state
return tensor([basis(n_time_bins * W, i) for i in offset_indices])
def temporal_scattered_state(H, psi0, n_emissions, c_ops, tlist,
system_zero_state=None,
construct_effective_hamiltonian=True):
"""
Compute the scattered n-photon state projected onto the temporal basis.
Parameters
----------
H : :class: qutip.Qobj or list
System-waveguide(s) Hamiltonian or effective Hamiltonian in Qobj or
list-callback format. If construct_effective_hamiltonian is not
specified, an effective Hamiltonian is constructed from `H` and
`c_ops`.
psi0 : :class: qutip.Qobj
Initial state density matrix :math:`\\rho(t_0)` or state vector
:math:`\\psi(t_0)`.
n_emissions : int
Number of photon emissions to calculate.
c_ops : list
List of collapse operators for each waveguide; these are assumed to
include spontaneous decay rates, e.g.
:math:`\\sigma = \\sqrt \\gamma \\cdot a`
tlist : array_like
List of times for :math:`\\tau_i`. tlist should contain 0 and exceed
the pulse duration / temporal region of interest.
system_zero_state : :class: qutip.Qobj
State representing zero excitations in the system. Defaults to
:math:`\\psi(t_0)`
construct_effective_hamiltonian : bool
Whether an effective Hamiltonian should be constructed from H and c_ops:
:math:`H_{eff} = H - \\frac{i}{2} \\sum_n \\sigma_n^\\dagger \\sigma_n`
Default: True.
Returns
-------
phi_n : :class: qutip.Qobj
The scattered bath state projected onto the temporal basis given by
tlist. If there are W waveguides, T times, and N photon emissions, then
the state is a tensor product state with dimensionality T^(W*N).
"""
T = len(tlist)
W = len(c_ops)
if n_emissions == 0:
phi_n = zero_ket(W * T)
else:
phi_n = tensor([zero_ket(W * T)] * n_emissions)
if construct_effective_hamiltonian:
# Construct an effective Hamiltonian from system hamiltonian and c_ops
if isinstance(H, Qobj):
Heff = H - 1j / 2 * sum([op.dag() * op for op in c_ops])
elif isinstance(H, list):
Heff = H + [-1j / 2 * sum([op.dag() * op | |
time steps (with interpolation steps and starting point)
nt = xin.shape[1]
# SAVE VERSION OF TRACPY USED
# Save file into a local directory called tracks. Make directory if it
# doesn't exist.
if 'tracks' not in name:
if not os.path.exists('tracks'):
os.makedirs('tracks')
name = 'tracks/' + name
# Open file for writing.
# Using netCDF3-Classic because the ROMS output does and
# MFDataset does not work with NetCDF4
# Can't save variables over 2GB without special format:
# http://www.ncl.ucar.edu/Support/talk_archives/2011/0599.html
# rootgrp = netCDF.Dataset('tracks/' + name + '.nc','w',format='NETCDF3_CLASSIC')
# # Hoping that this format will both allow large variables and aggregation
# rootgrp = netCDF.Dataset('tracks/' + name + '.nc','w',format='NETCDF3_64BIT')
# Really useful netCDF4 resource:
# http://www.unidata.ucar.edu/software/netcdf/workshops/2012/netcdf_python/netcdf4python.pdf
# Looks like I might still be able to use MFDataset (with netCDF4_CLASSIC files)
# Apply compression at the createVariable stage with zlib
# Info about classic: http://www.unidata.ucar.edu/software/netcdf/docs/netcdf/NetCDF_002d4-Classic-Model-Format.html
# Looks like I might be able to use this, still use MFDataset, have large variables, and compress too
# 4-Classic can still only have 1 unlimited dimension
rootgrp = netCDF.Dataset(name + '.nc', 'w', format='NETCDF4_CLASSIC')
# Define dimensions
rootgrp.createDimension('ntrac', ntrac)
rootgrp.createDimension('nt', nt)
if Uin is not None:
xul = Uin.shape[0]
yul = Uin.shape[1]
rootgrp.createDimension('xul', xul)
rootgrp.createDimension('yul', yul)
xvl = Vin.shape[0]
yvl = Vin.shape[1]
rootgrp.createDimension('xvl', xvl)
rootgrp.createDimension('yvl', yvl)
# Do the rest of this by variable so they can be deleted as I go for memory.
if savell: # if saving in latlon
# Create variable
# 64-bit floating point, with lossless compression
lonp = rootgrp.createVariable('lonp', 'f8', ('ntrac', 'nt'),
zlib=True)
# Set some attributes
lonp.long_name = 'longitudinal position of drifter'
lonp.units = 'degrees'
lonp.time = 'tp'
# Write data to netCDF variables
lonp[:] = xin
# Delete to save space
del(xin)
# 64-bit floating point, with lossless compression
latp = rootgrp.createVariable('latp', 'f8', ('ntrac', 'nt'),
zlib=True)
latp.long_name = 'latitudinal position of drifter'
latp.units = 'degrees'
latp.time = 'tp'
latp[:] = yin
del(yin)
else: # then saving in grid coordinates
# Create variable
# 64-bit floating point, with lossless compression
xg = rootgrp.createVariable('xg', 'f8', ('ntrac', 'nt'), zlib=True)
# Set some attributes
xg.long_name = 'x grid position of drifter'
xg.units = 'grid units'
xg.time = 'tp'
# Write data to netCDF variables
xg[:] = xin
# Delete to save space
del(xin)
# 64-bit floating point, with lossless compression
yg = rootgrp.createVariable('yg', 'f8', ('ntrac', 'nt'), zlib=True)
yg.long_name = 'y grid position of drifter'
yg.units = 'grid units'
yg.time = 'tp'
yg[:] = yin
del(yin)
if do3din:
# 64-bit floating point, with lossless compression
zp = rootgrp.createVariable('zp', 'f8', ('ntrac', 'nt'),
zlib=True)
zp.long_name = 'vertical position of drifter (negative is downward from surface)'
zp.units = 'meter'
zp.time = 'tp'
zp[:] = zpin
del(zpin)
else:
del(zpin)
# 64-bit floating point, with lossless compression
tp = rootgrp.createVariable('tp', 'f8', ('ntrac', 'nt'),
zlib=True)
tp.long_name = 'time at drifter locations'
tp.units = time_unitsin
tp[:] = tpin
del(tpin)
if Uin is not None:
# 64-bit floating point, with lossless compression
T0 = rootgrp.createVariable('T0', 'f8', ('ntrac'), zlib=True)
U = rootgrp.createVariable('U', 'f8', ('xul', 'yul'), zlib=True)
V = rootgrp.createVariable('V', 'f8', ('xvl', 'yvl'), zlib=True)
T0.long_name = 'Initial volume transport associated with each drifter'
U.long_name = 'Aggregation of x volume transports of drifters'
V.long_name = 'Aggregation of y volume transports of drifters'
T0.units = 'meter3 second-1'
U.units = 'meter3 second-1'
V.units = 'meter3 second-1'
T0[:] = T0in
U[:] = Uin
V[:] = Vin
del(T0in, Uin, Vin)
# Create variables
# Main track information
# Include other run details
nsteps = rootgrp.createVariable('nsteps', 'i4')
N = rootgrp.createVariable('N', 'i4')
ff = rootgrp.createVariable('ff', 'i4')
tseas = rootgrp.createVariable('tseas', 'f8')
ah = rootgrp.createVariable('ah', 'f8')
av = rootgrp.createVariable('av', 'f8')
do3d = rootgrp.createVariable('do3d', 'i4')
doturb = rootgrp.createVariable('doturb', 'i4')
doperiodic = rootgrp.createVariable('doperiodic', 'i4')
# Set some attributes
nsteps.long_name = 'sets max time steps between time interpolations \
between model outputs'
N.long_name = 'sets number of samplings of drifter track'
ff.long_name = 'forward (1) or backward (-1) in time'
tseas.long_name = 'time between model outputs'
ah.long_name = 'horizontal diffusion'
av.long_name = 'vertical diffusion'
do3d.long_name = 'flag for running in 3d (1) or 2d (0)'
doturb.long_name = 'flag for using no subgrid parameterization (0), \
added turbulent velocities (1), displacement to \
particle position on a circle (2), displacement to \
particle position on an ellipse (3)'
doperiodic.long_name = 'flag for using periodic boundary conditions: \
none (0), in x-direction (1), in y-direction (2)'
tseas.units = 'second'
ah.units = 'meter2 second-1'
av.units = 'meter2 second-1'
# Write data to netCDF variables
nsteps[:] = nstepsin
N[:] = Nin
ff[:] = ffin
tseas[:] = tseasin
ah[:] = ahin
av[:] = avin
do3d[:] = do3din
doturb[:] = doturbin
doperiodic[:] = doperiodicin
rootgrp.close()
def loadtracks(name, loc=None):
"""
Load in track info from netcdf file.
Args:
name (str): Name of tracks file
loc (Optional): Tracks file is assumed to be in local tracks
directory. Use this to give location if it is not.
"""
if loc is None:
nc = netCDF.Dataset('tracks/' + name + '.nc')
else:
nc = netCDF.Dataset(loc + '/' + name + '.nc')
lonp = nc.variables['lonp'][:]
latp = nc.variables['latp'][:]
zp = nc.variables['zp'][:]
tp = nc.variables['tp'][:]
return lonp, latp, zp, tp
def loadtransport(name, fmod=None):
"""
Args:
name: Name of project
fmod: File modifier: a way to choose a subset of the file in the
project directory instead of all. Should be a string and can include
asterisks as wildcards.
Returns:
* U, V - Transport of drifter volume in x and y directions over all
used simulation files
* lon0 - Initial lon location for drifters
* lat0 - Initial lat location for drifters
* T0 - Overall
"""
# Which files to read in.
if fmod is None:
Files = glob.glob('tracks/' + name + '/*.nc')
elif type(fmod) == list and len(fmod) > 1:
Files = []
for i in range(len(fmod)):
Files = Files + glob.glob('tracks/' + fmod[i])
else:
Files = glob.glob('tracks/' + name + '/' + fmod + '.nc')
Files.sort()
# Load in U and V volume transports of drifters and add together for
# all files
for i, File in enumerate(Files):
d = netCDF.Dataset(File)
if i == 0: # initialize U and V transports from first file
U = d.variables['U'][:]
V = d.variables['V'][:]
T0 = np.sum(d.variables['T0'][:])
else: # add in transports from subsequent simulations
U = U + d.variables['U'][:]
V = V + d.variables['V'][:]
T0 = T0 + np.sum(d.variables['T0'][:])
# Add initial drifter location (all drifters start at the same
# location)
lon0 = d.variables['lonp'][:, 0]
lat0 = d.variables['latp'][:, 0]
d.close()
return U, V, lon0, lat0, T0
def save_ll2grid(name, grid, loc=None):
"""
Input drifter tracks from saved file in grid coordinates and save a new
file with drifter tracks in lat/lon instead.
Example:
>>> loc = 'http://barataria.tamu.edu:8080/thredds/dodsC/NcML/txla_nesting6.nc' # TXLA model/grid output location
>>> grid = tracpy.inout.readgrid(loc)
>>> tracpy.inout.save_ll2grid([trackfile], grid, loc=loc)
Note:
[trackfile] should be the name of the drifter tracks files,
including .nc extension, and any location prefix after 'tracks/'
Note:
input a loc value if the drifter files do not have it saved (those
run on hafen, for example)
"""
# load in tracks
d = netCDF.Dataset(name)
lonp = d.variables['lonp'][:]
latp = d.variables['latp'][:]
# Convert to grid coords
x, y, dt = tracpy.tools.interpolate2d(lonp, latp, grid, 'd_ll2ij')
del(lonp, latp, grid)
if 'loc' in d.variables:
loc = d.variables['loc'][:]
else:
print('will use input loc value for saving to file')
# save new file
# transport calculation included
if 'U' in d.variables:
if d.variables['do3d'][:]:
savetracks(x, y, d.variables['zp'][:], d.variables['tp'][:],
name.split('/')[-1][:-3], d.variables['nsteps'][:],
d.variables['N'][:], d.variables['ff'][:],
d.variables['tseas'][:], d.variables['ah'][:],
d.variables['av'][:], d.variables['do3d'][:],
d.variables['doturb'][:], loc, d.variables['T0'][:],
d.variables['U'][:], d.variables['V'][:], savell=False)
else: # have to input something for z but it won't be saved
savetracks(x, y, y, d.variables['tp'][:],
name.split('/')[-1][:-3], d.variables['nsteps'][:],
d.variables['N'][:], d.variables['ff'][:],
d.variables['tseas'][:], d.variables['ah'][:],
d.variables['av'][:], d.variables['do3d'][:],
d.variables['doturb'][:], loc, d.variables['T0'][:],
d.variables['U'][:], d.variables['V'][:], savell=False)
else:
if d.variables['do3d'][:]:
savetracks(x, y, d.variables['zp'][:], d.variables['tp'][:],
name.split('/')[-1][:-3], d.variables['nsteps'][:],
d.variables['N'][:], d.variables['ff'][:],
d.variables['tseas'][:], d.variables['ah'][:],
d.variables['av'][:], d.variables['do3d'][:],
d.variables['doturb'][:], loc, savell=False)
else: # have to input something for z but it won't be saved
| |
roles e.g. 'PI' or 'Co-PI' (note: if the investigator has a Dimensions researcher ID, that is returned as well).",
'long_description':
None,
'is_entity':
False,
'is_filter':
True,
'is_facet':
False
},
'title': {
'type':
'string',
'description':
'Title of the grant in English (if the grant language is not English, this field contains a translation of the title).',
'long_description':
None,
'is_entity':
False,
'is_filter':
False,
'is_facet':
False
},
'funders': {
'type':
'organizations',
'description':
'The organisation funding the grant. This is normally a GRID organisation, but in very few cases a Dimensions funder ID is used.',
'long_description':
None,
'is_entity':
True,
'is_filter':
True,
'is_facet':
True
},
'grant_number': {
'type':
'string',
'description':
'Grant identifier, as provided by the source (e.g., funder, aggregator) the grant was derived from.',
'long_description':
None,
'is_entity':
False,
'is_filter':
True,
'is_facet':
False
},
'funding_usd': {
'type': 'float',
'description': 'Funding amount awarded in USD.',
'long_description': None,
'is_entity': False,
'is_filter': True,
'is_facet': False
},
'linkout': {
'type': 'string',
'description': 'Original URL for the grant.',
'long_description': None,
'is_entity': False,
'is_filter': False,
'is_facet': False
},
'funding_chf': {
'type': 'float',
'description': 'Funding amount awarded in CHF.',
'long_description': None,
'is_entity': False,
'is_filter': True,
'is_facet': False
},
'category_icrp_cso': {
'type':
'categories',
'description':
'`ICRP Common Scientific Outline <https://app.dimensions.ai/browse/publication/cso>`_',
'long_description':
None,
'is_entity':
True,
'is_filter':
True,
'is_facet':
True
},
'foa_number': {
'type':
'string',
'description':
'The funding opportunity announcement (FOA) number, where available e.g. for grants from the US National Institute of Health (NIH) or from the National Science Foundation (NSF).',
'long_description':
None,
'is_entity':
False,
'is_filter':
True,
'is_facet':
False
},
'date_inserted': {
'type':
'date',
'description':
'Date when the record was inserted into Dimensions (note: this field does not support exact match on the data, only range filters e.g. `<=` or `>=`).',
'long_description':
None,
'is_entity':
False,
'is_filter':
True,
'is_facet':
False
},
'category_icrp_ct': {
'type':
'categories',
'description':
'`ICRP Cancer Types <https://app.dimensions.ai/browse/publication/cancer_types>`_',
'long_description':
None,
'is_entity':
True,
'is_filter':
True,
'is_facet':
True
},
'researchers': {
'type':
'researchers',
'description':
'Dimensions researchers IDs associated to the grant.',
'long_description':
None,
'is_entity':
True,
'is_filter':
True,
'is_facet':
True
},
'language_title': {
'type':
'string',
'description':
'ISO 639-1 language code for the original grant title.',
'long_description':
None,
'is_entity':
False,
'is_filter':
True,
'is_facet':
True
}
},
'fieldsets': ['all', 'basics', 'extras', 'categories'],
'metrics': {
'count': {
'name': 'count',
'description': 'Total count'
},
'funding': {
'name': 'funding',
'description': 'Total funding amount, in USD.'
}
},
'search_fields': [
'concepts', 'full_data', 'title_only', 'investigators',
'title_abstract_only'
]
},
'patents': {
'fields': {
'category_rcdc': {
'type':
'categories',
'description':
'`Research, Condition, and Disease Categorization <https://app.dimensions.ai/browse/publication/rcdc>`_',
'long_description':
None,
'is_entity':
True,
'is_filter':
True,
'is_facet':
True
},
'category_hrcs_hc': {
'type':
'categories',
'description':
'`HRCS - Health Categories <https://app.dimensions.ai/browse/publication/hrcs_hc>`_',
'long_description':
None,
'is_entity':
True,
'is_filter':
True,
'is_facet':
True
},
'additional_filters': {
'type':
'string',
'description':
"Additional filters describing the patents, e.g. whether it's about a 'Research Organisation', or it is part of the 'Orange Book'.",
'long_description':
None,
'is_entity':
False,
'is_filter':
True,
'is_facet':
False
},
'assignee_countries': {
'type':
'countries',
'description':
'Country of the assignees of the patent, expressed as GeoNames code and name.',
'long_description':
None,
'is_entity':
True,
'is_filter':
True,
'is_facet':
True
},
'id': {
'type': 'string',
'description': 'Dimensions patent ID',
'long_description': None,
'is_entity': False,
'is_filter': True,
'is_facet': False
},
'assignee_state_names': {
'type': 'string',
'description': 'State name of the assignee, as a string.',
'long_description': None,
'is_entity': False,
'is_filter': True,
'is_facet': True
},
'times_cited': {
'type':
'integer',
'description':
'The number of times the patent has been cited by other patents.',
'long_description':
None,
'is_entity':
False,
'is_filter':
True,
'is_facet':
True
},
'filing_status': {
'type':
'string',
'description':
"Filing Status of the patent e.g. 'Application' or 'Grant'.",
'long_description':
None,
'is_entity':
False,
'is_filter':
True,
'is_facet':
False
},
'ipcr': {
'type':
'string',
'description':
'`International Patent Classification Reform Categorization <https://www.wipo.int/classifications/ipc/en/faq/>`_.',
'long_description':
None,
'is_entity':
False,
'is_filter':
True,
'is_facet':
True
},
'category_hra': {
'type':
'categories',
'description':
'`Health Research Areas <https://app.dimensions.ai/browse/publication/health_research_areas?redirect_path=/discover/publication>`_',
'long_description':
None,
'is_entity':
True,
'is_filter':
True,
'is_facet':
True
},
'publication_date': {
'type': 'date',
'description': 'Date of publication of a patent.',
'long_description': None,
'is_entity': False,
'is_filter': True,
'is_facet': False
},
'category_for': {
'type':
'categories',
'description':
'`ANZSRC Fields of Research classification <https://app.dimensions.ai/browse/publication/for>`_',
'long_description':
None,
'is_entity':
True,
'is_filter':
True,
'is_facet':
True
},
'category_hrcs_rac': {
'type':
'categories',
'description':
'`HRCS – Research Activity Codes <https://app.dimensions.ai/browse/publication/hrcs_rac>`_',
'long_description':
None,
'is_entity':
True,
'is_filter':
True,
'is_facet':
True
},
'inventor_names': {
'type': 'string',
'description':
'Names of the people who invented the patent.',
'long_description': None,
'is_entity': False,
'is_filter': True,
'is_facet': False
},
'original_assignees': {
'type':
'organizations',
'description':
'GRID organisations that first owned the patent.',
'long_description':
None,
'is_entity':
True,
'is_filter':
True,
'is_facet':
True
},
'assignee_state_codes': {
'type':
'states',
'description':
'State of the assignee, expressed using GeoNames (ISO\u200c-3166-2) codes.',
'long_description':
None,
'is_entity':
True,
'is_filter':
True,
'is_facet':
True
},
'date': {
'type': 'date',
'description': 'Date when the patent was filed.',
'long_description': None,
'is_entity': False,
'is_filter': True,
'is_facet': False
},
'category_bra': {
'type':
'categories',
'description':
'`Broad Research Areas <https://app.dimensions.ai/browse/publication/broad_research_areas?redirect_path=/discover/publication>`_',
'long_description':
None,
'is_entity':
True,
'is_filter':
True,
'is_facet':
True
},
'priority_year': {
'type':
'integer',
'description':
'The filing year of the earliest application of which priority is claimed.',
'long_description':
None,
'is_entity':
False,
'is_filter':
True,
'is_facet':
True
},
'abstract': {
'type': 'string',
'description': 'Abstract or description of the patent.',
'long_description': None,
'is_entity': False,
'is_filter': False,
'is_facet': False
},
'year': {
'type': 'integer',
'description': 'The year the patent was filed.',
'long_description': None,
'is_entity': False,
'is_filter': True,
'is_facet': True
},
'assignee_cities': {
'type':
'cities',
'description':
'City of the assignees of the patent, expressed as GeoNames ID and name.',
'long_description':
None,
'is_entity':
True,
'is_filter':
True,
'is_facet':
True
},
'granted_date': {
'type':
'date',
'description':
'The date on which the official body grants the patent.',
'long_description':
None,
'is_entity':
False,
'is_filter':
True,
'is_facet':
False
},
'current_assignees': {
'type':
'organizations',
'description':
'GRID organisations currenlty owning the patent.',
'long_description':
None,
'is_entity':
True,
'is_filter':
True,
'is_facet':
True
},
'legal_status': {
'type':
'string',
'description':
"The legal status of the patent, e.g. 'Granted', 'Active', 'Abandoned' etc..",
'long_description':
None,
'is_entity':
False,
'is_filter':
True,
'is_facet':
False
},
'priority_date': {
'type':
'date',
'description':
'The earliest filing date in a family of patent applications.',
'long_description':
None,
'is_entity':
False,
'is_filter':
True,
'is_facet':
False
},
'current_assignee_names': {
'type':
'string',
'description':
'Names of the GRID organisations currently holding the patent.',
'long_description':
None,
'is_entity':
False,
'is_filter':
True,
'is_facet':
False
},
'associated_grant_ids': {
'type':
'string',
'description':
'Dimensions IDs of the grants associated to the patent (see also: :ref:`patents_model` section).',
'long_description':
None,
'is_entity':
False,
'is_filter':
True,
'is_facet':
False
},
'category_icrp_ct': {
'type':
'categories',
'description':
'`ICRP Cancer Types <https://app.dimensions.ai/browse/publication/cancer_types>`_',
'long_description':
None,
'is_entity':
True,
'is_filter':
True,
'is_facet':
True
},
'publication_ids': {
'type':
'string',
'description':
'Dimensions IDs of the publications related to this patent (see also: :ref:`patents_model` section).',
'long_description':
None,
'is_entity':
False,
'is_filter':
True,
'is_facet':
False
},
'assignees': {
'type':
'organizations',
'description':
'GRID organisations who own or have owned the rights of a patent (note: this is a combination of `current_assignees` and `original_assigness` fields).',
'long_description':
None,
'is_entity':
True,
'is_filter':
True,
'is_facet':
True
},
'original_assignee_names': {
'type':
'string',
'description':
'Name of the GRID organisation that first owned the patent.',
'long_description':
None,
'is_entity':
False,
'is_filter':
True,
'is_facet':
False
},
'title': {
'type': 'string',
'description': 'The title of the patent.',
'long_description': None,
'is_entity': False,
'is_filter': False,
'is_facet': False
},
'publication_year': {
'type': 'integer',
'description': 'Year of publication of a patent.',
'long_description': None,
'is_entity': False,
'is_filter': True,
'is_facet': True
},
'funders': {
'type': 'organizations',
'description': 'GRID organisations funding the patent.',
'long_description': None,
'is_entity': True,
'is_filter': True,
'is_facet': True
},
'category_icrp_cso': {
'type':
'categories',
'description':
'`ICRP Common Scientific Outline <https://app.dimensions.ai/browse/publication/cso>`_',
'long_description':
None,
'is_entity':
True,
'is_filter':
True,
'is_facet':
True
},
'expiration_date': {
'type': 'date',
'description': 'Date when the patent expires.',
'long_description': None,
'is_entity': False,
'is_filter': True,
'is_facet': False
},
'date_inserted': {
'type':
'date',
'description':
'Date when the record was inserted into Dimensions (note: this field does not support exact match on the data, only range filters e.g. `<=` or `>=`).',
'long_description':
None,
'is_entity':
False,
'is_filter':
True,
'is_facet':
False
},
'cpc': {
'type':
'string',
'description':
'`Cooperative Patent Classification Categorization <https://www.epo.org/searching-for-patents/helpful-resources/first-time-here/classification/cpc.html>`_.',
'long_description':
None,
'is_entity':
False,
'is_filter':
True,
'is_facet':
True
},
'cited_by_ids': {
'type':
'string',
'description':
'Dimensions IDs | |
= "(" + self.__who_am_i() + ") The passed argument " + \
"'data' is a " + str(type(data))
self.dp.v_debug(message)
message = "(" + self.__who_am_i() + ") The passed argument " + \
"'conffile' is a " + str(type(conffile))
self.dp.v_debug(message)
# End of if self.debug :
for item in data:
if "item" in item:
key = "item"
label = "filesystem"
fs = self.__parse_find_file(item, key, label)
data_dict[fs] = {}
if "stdout" in item:
key = "stdout"
label = None
if self.debug:
message = "(" + self.__who_am_i() + ") Found " + \
"dictionary key 'stdout'."
self.dp.v_debug(message)
# End of if self.debug:
lines = self.__parse_find_file(item, key, label)
data_dict[fs]['filename'] = self.__parse_filefolder(
lines,
conffile,
'-rw',
self.__who_am_i())
# End of if "stdout" in item:
# End of if "item" in item:
# End of for item in data:
for k in data_dict:
if data_dict[k]['filename'] is None:
data_list.append(False)
else:
data_list.append(True)
# End of for k in data_dict:
if self.debug:
message = "(" + self.__who_am_i() + ") data_dict: "
self.dp.v_debug(message)
pprint(data_dict)
message = "(" + self.__who_am_i() + ") data_list: "
self.dp.v_debug(message)
pprint(data_list)
self.dp.v_debug("<<< End of " + self.__who_am_i())
# End of if self.debug:
return data_list
''' End of def iosxe_find_conf_file(self, data, conffile, debug=False): '''
def iosxe_find_image_file(self, data, binfile, debug=False):
''' iosxe_find_image_file
Args:
data: contains the output from the running IOS-XE command
binfile: passing string with name of binary file
debug: passing Boolean to enable/disable debug
'''
self.debug = debug
data_dict = {}
if self.debug:
self.dp.v_debug(">>> Start of " + self.__who_am_i())
message = "(" + self.__who_am_i() + ") The passed argument " + \
"'data' is a " + str(type(data))
self.dp.v_debug(message)
message = "(" + self.__who_am_i() + ") The passed argument " + \
"'binfile' is a " + str(type(binfile))
self.dp.v_debug(message)
# End of if self.debug :
for item in data:
if "item" in item:
key = "item"
label = "filesystem"
fs = self.__parse_find_file(item, key, label)
data_dict[fs] = {}
if "stdout" in item:
key = "stdout"
label = None
if self.debug:
message = "(" + self.__who_am_i() + ") Found " + \
"dictionary key 'stdout'."
self.dp.v_debug(message)
# End of if self.debug:
lines = self.__parse_find_file(item, key, label)
data_dict[fs]['filename'] = self.__parse_filefolder(
lines,
binfile,
'-rw',
self.__who_am_i())
# End of if "stdout" in item:
# End of if "item" in item:
# End of for item in data:
for fs in data_dict.copy():
if 'filename' in data_dict[fs]:
if data_dict[fs]['filename'] is None:
del data_dict[fs] # remove key from dictionary
# End of if data_dict[fs]['filename'] is None:
# End of if 'filename' in data_dict[fs]:
# End of for fs in data_dict.copy():
if self.debug:
message = "(" + self.__who_am_i() + ") data_dict: "
self.dp.v_debug(message)
pprint(data_dict)
self.dp.v_debug("<<< End of " + self.__who_am_i())
return data_dict
''' End of def iosxe_find_image_file(self, data, binfile, debug=False): '''
def iosxe_get_build_version(self, data, required_version, debug=False):
''' iosxe_install_cmd_usage function
Args:
data: contains the output from the running IOS-XE command
required_version: passing the IOS-XE version that is required
to have
debug: passing Boolean to enable/disable debug
'''
self.debug = debug
data_list = []
if self.debug:
self.dp.v_debug(">>> Start of " + self.__who_am_i())
message = "(" + self.__who_am_i() + ") The passed argument " + \
"'data' is a " + str(type(data))
self.dp.v_debug(message)
message = "(" + self.__who_am_i() + ") The passed argument " + \
"'required_version' is a " + str(type(required_version))
self.dp.v_debug(message)
# End of if self.debug :
for item in data:
flag = False
if "stdout" in item:
key = "stdout"
label = ""
lines = self.__parse_find_file(item, key, label)
for line in lines:
line = line.strip()
cond = re.match('^(# pkginfo: Build:)\\s+('
+ re.escape(required_version) + ').*', line)
if cond:
flag = True
if self.debug:
message = "(" + self.__who_am_i() + ") Found " + \
"line: " + line
self.dp.v_debug(message)
# End of if self.debug:
# End of if cond:
# End of for line in lines:
data_list.append(flag)
# End of if "stdout" in item:
# End of for item in data:
if self.debug:
message = "(" + self.__who_am_i() + ") data_list: "
self.dp.v_debug(message)
pprint(data_list)
self.dp.v_debug("<<< End of " + self.__who_am_i())
# End of if self.debug:
return data_list
''' End of def iosxe_get_build_version( self, data, required_version,
debug=False): '''
def iosxe_install_cmd_usage(self, use_command, install_flag, request_flag,
software_flag, debug=False):
''' iosxe_install_cmd_usage function
Args:
use_command: empty string
install_flag: Boolean object
request_flag: Boolean object
software_flag: Boolean object
debug: passing Boolean to enable/disable debug
'''
self.debug = debug
use_command = ""
if self.debug:
self.dp.v_debug(">>> Start of " + self.__who_am_i())
cond = (isinstance(install_flag, bool)
and isinstance(request_flag, bool)
and isinstance(software_flag, bool))
if cond:
if install_flag:
use_command = "install"
elif request_flag:
use_command = "request"
elif software_flag:
use_command = "software"
# End of if cond:
if self.debug:
message = "(" + self.__who_am_i() + ") use_command: " + use_command
self.dp.v_debug(message)
self.dp.v_debug("<<< End of " + self.__who_am_i())
return use_command
''' End of def iosxe_install_cmd_usage( self, use_command, install_flag,
request_flag, software_flag,
debug=False): '''
def iosxe_parse_inactive(self, data, required_version, debug=False):
''' iosxe_parse_inactive
Args:
data: contains the output from the used Cisco command
required_version: passing the string to match with
debug: passing Boolean to enable/disable debug
'''
self.debug = debug
flag = False
if self.debug:
self.dp.v_debug(">>> Start of " + self.__who_am_i())
message = "(" + self.__who_am_i() + ") The passed argument " + \
"'data' is a " + str(type(data))
self.dp.v_debug(message)
message = "(" + self.__who_am_i() + ") The passed argument " + \
"'required_version' is a " + str(type(required_version))
self.dp.v_debug(message)
# End of if self.debug:
raw_list = self.__stdout(data, 'iosxe_parse_inactive')
for line in raw_list:
line = line.strip()
cond1 = re.match('^(No Inactive Packages).*', line)
cond2 = re.match('.*(' + re.escape(required_version) + ').*', line)
if cond1:
if self.debug:
message = "(" + self.__who_am_i() + ") Found line: " + line
self.dp.v_debug(message)
# End of if self.debug
# End of if cond1:
if cond2:
flag = True
if self.debug:
message = "(" + self.__who_am_i() + ") Found line: " + line
self.dp.v_debug(message)
# End of if self.debug
# End of if cond2:
if self.debug:
self.dp.v_debug("<<< End of " + self.__who_am_i())
# End of if self.debug:
return flag
''' End of def iosxe_parse_inactive(self, data, required_version,
debug=False): '''
def parse_filesystem_list(self, data, debug=False):
''' parse_filesystem_list
Args:
data: contains the output from the used Cisco command
debug: passing Boolean to enable/disable debug
'''
self.debug = debug
data_dict = {}
if self.debug:
self.dp.v_debug(">>> Start of " + self.__who_am_i())
# End of if self.debug:
if not isinstance(data, list):
message = "(" + self.__who_am_i() + ") Passed argument 'data' " + \
"isn't a list."
self.dp.v_error(message)
return data_dict
# End of if not isinstance(data, list):
for item in data:
keylabel = None
valuelabel = None
if "item" in item:
keylabel = item['item']
# End of if "item" in item:
if "stdout" in item:
lines = item['stdout'][0].split('\n')
valuelabel = self.__parse_filesystem_space(lines)
# End of if "stdout" in item:
if keylabel is not None and valuelabel is not None:
data_dict[keylabel] = valuelabel
# End of if keylabel is not None and valuelabel is not None:
if self.debug:
self.dp.v_debug("(" + self.__who_am_i() + ") data_dict = ")
pprint(data_dict)
self.dp.v_debug("<<< End of " + self.__who_am_i())
# End of if self.debug:
return data_dict
''' End of def parse_filesystem_list(data, debug=False): '''
def parse_data_for_deletion(self, data, running_ios, required_ios,
debug=False):
''' ios_parse_data_for_deletion
Args:
data: contains the output from the used Cisco command
running_ios: running IOS software image name
required_ios: required IOS software image name
debug: passing Boolean to enable/disable debug
'''
self.debug = debug
new_list = []
if self.debug:
self.dp.v_debug("Start of " + self.__who_am_i())
# End of if self.debug
if ":" in running_ios:
running_ios = running_ios.split(":")[-1]
# End of if ":" in running_ios:
if "/" in running_ios:
running_ios = running_ios.split("/")[-1]
# End of if "/" in running_ios:
if ":" in required_ios:
required_ios = required_ios.split(":")[-1]
# End of if ":" in required_ios:
if "/" in required_ios:
required_ios = required_ios.split("/")[-1]
# End of if "/" in required_ios:
# Build a dictionary
kwargs = {}
kwargs['running_ios'] = running_ios
kwargs['running_dir'] = re.sub('(.bin)$', '', running_ios)
kwargs['required_ios'] = required_ios
kwargs['required_dir'] = re.sub('(.bin)$', '', required_ios)
| |
\"-d, --densityMapFile \" to specify density map file name and try again." % OptionsInfo["Infile"])
DensityMapFile = None
MapFileRoot = "emd_%s" % EMDBID
MapFile1 = "%s.map.gz" % MapFileRoot
MapFile2 = "%s.map" % MapFileRoot
if os.path.exists(MapFile1):
DensityMapFile = MapFile1
elif os.path.exists(MapFile2):
DensityMapFile = MapFile2
else:
MiscUtil.PrintError("Density map files %s or %s don't exist. Use option \"-d, --densityMapFile \" to specify density map file name and try again" % (MapFile1, MapFile2))
MiscUtil.PrintInfo("Setting density map file name as %s..." % DensityMapFile)
return DensityMapFile
def RetrieveMeshLevel():
"""Retrieve recommened mesh contour level."""
MeshLevel = None
EMDBID = RetrieveEMDBID()
if EMDBID is None:
MiscUtil.PrintWarning("Failed to retrieve EMDB ID from input file %s to detect local header file already downloaded from EMDB server..." % OptionsInfo["Infile"])
return MeshLevel
MetadataHeaderFile = "emd-%s.xml" % (EMDBID)
if not os.path.exists(MetadataHeaderFile):
MiscUtil.PrintWarning("Failed to find a local header file, %s, for EMDB ID %s..." % (MetadataHeaderFile, EMDBID))
return MeshLevel
MiscUtil.PrintInfo("Retrieving recommeded mesh contour level from header file %s..." % MetadataHeaderFile)
ContourLevel = None
Source = None
XMLTree = ElementTree.parse(MetadataHeaderFile)
XMLRoot = XMLTree.getroot()
MapElement = XMLTree.find("map")
if MapElement is not None:
ContourLevelElement = MapElement.find("contourLevel")
if ContourLevelElement is not None:
ContourLevel = ContourLevelElement.text
Source = ContourLevelElement.get("source")
if ContourLevel is not None:
if Source is None:
Source = "NA"
MiscUtil.PrintInfo("Setting mesh level to recommended (Source: %s) mesh contour level value of %s..." % (Source, ContourLevel))
MeshLevel = ContourLevel
return MeshLevel
def RetrieveEMDBID():
"""Retrieve EMDB ID from input file. """
if "EMDBID" in OptionsInfo:
return OptionsInfo["EMDBID"]
EMDBID = None
Infile = OptionsInfo["Infile"]
FileDir, FileName, FileExt = MiscUtil.ParseFileName(Infile)
if re.match("^pdb$", FileExt, re.I):
EMDBID = RetriveEMDBIDFromPDBFile(Infile)
elif re.match("^cif$", FileExt, re.I):
EMDBID = RetriveEMDBIDFromCIFFile(Infile)
else:
EMDBID = None
OptionsInfo["EMDBID"] = EMDBID
return EMDBID
def RetriveEMDBIDFromPDBFile(Infile):
"""Retrieve EMDB ID from PDB file. """
EMDBID = None
InfileFH = open(Infile, "r")
if InfileFH is None:
MiscUtil.PrintError("Couldn't open input file: %s.\n" % (Infile))
MiscUtil.PrintInfo("\nRetrieving EMDB ID from input file %s..." % Infile)
EMDBID = None
for Line in InfileFH:
Line = Line.rstrip()
if re.match("^REMARK", Line, re.I):
if re.search("DB: EMDB", Line, re.I):
for Word in Line.split(" "):
# Retrieve string with EMD-
if re.search("EMD-", Word, re.I):
Word = Word.strip()
EMDBID = re.sub("EMD-", "", Word)
break
break
InfileFH.close()
return EMDBID
def RetriveEMDBIDFromCIFFile(Infile):
"""Retrieve EMDB ID from CIF file. """
InfileFH = open(Infile, "r")
if InfileFH is None:
MiscUtil.PrintError("Couldn't open input file: %s.\n" % (Infile))
MiscUtil.PrintInfo("\nRetrieving EMDB ID from input file %s..." % Infile)
EMDBID = None
for Line in InfileFH:
Line = Line.rstrip()
if re.match("^EMDB EMD", Line, re.I):
for Word in Line.split(" "):
# Retrieve string with EMD-
if re.search("EMD-", Word, re.I):
Word = Word.strip()
EMDBID = re.sub("EMD-", "", Word)
break
break
InfileFH.close()
return EMDBID
def ProcessOptions():
"""Process and validate command line arguments and options"""
MiscUtil.PrintInfo("Processing options...")
# Validate options...
ValidateOptions()
OptionsInfo["AllowEmptyObjects"] = True if re.match("^Yes$", Options["--allowEmptyObjects"], re.I) else False
OptionsInfo["BFactorChainCartoonPutty"] = True if re.match("^Yes$", Options["--BFactorChainCartoonPutty"], re.I) else False
OptionsInfo["BFactorColorPalette"] = Options["--BFactorColorPalette"]
OptionsInfo["Infile"] = Options["--infile"]
FileDir, FileName, FileExt = MiscUtil.ParseFileName(OptionsInfo["Infile"])
OptionsInfo["InfileRoot"] = FileName
OptionsInfo["Overwrite"] = Options["--overwrite"]
OptionsInfo["PMLOut"] = True if re.match("^Yes$", Options["--PMLOut"], re.I) else False
OptionsInfo["Outfile"] = Options["--outfile"]
FileDir, FileName, FileExt = MiscUtil.ParseFileName(OptionsInfo["Outfile"])
OptionsInfo["PSEOut"] = False
if re.match("^pml$", FileExt, re.I):
OptionsInfo["PMLOutfile"] = OptionsInfo["Outfile"]
OptionsInfo["PMEOutfile"] = re.sub(".pml$", ".pme", OptionsInfo["Outfile"])
elif re.match("^pse$", FileExt, re.I):
OptionsInfo["PSEOut"] = True
OptionsInfo["PSEOutfile"] = OptionsInfo["Outfile"]
OptionsInfo["PMLOutfile"] = re.sub(".pse$", ".pml", OptionsInfo["Outfile"])
if os.path.exists(OptionsInfo["PMLOutfile"]) and (not OptionsInfo["Overwrite"]):
MiscUtil.PrintError("The intermediate output file to be generated, %s, already exist. Use option \"--ov\" or \"--overwrite\" and try again." % OptionsInfo["PMLOutfile"] )
OptionsInfo["LabelFontID"] = int(Options["--labelFontID"])
# Process mesh parameters...
OptionsInfo["MeshCarveRadius"] = float(Options["--meshCarveRadius"])
OptionsInfo["MeshComplex"] = True if re.match("^Yes$", Options["--meshComplex"], re.I) else False
OptionsInfo["MeshChainComplex"] = Options["--meshChainComplex"]
OptionsInfo["MeshWidth"] = float(Options["--meshWidth"])
OptionsInfo["MeshColor"] = Options["--meshColor"]
OptionsInfo["SurfaceComplex"] = True if re.match("^Yes$", Options["--surfaceComplex"], re.I) else False
OptionsInfo["SurfaceChainComplex"] = Options["--surfaceChainComplex"]
OptionsInfo["SurfaceTransparency"] = float(Options["--surfaceTransparency"])
OptionsInfo["PocketContactsLigandColor"] = Options["--pocketContactsLigandColor"]
OptionsInfo["PocketContactsSolventColor"] = Options["--pocketContactsSolventColor"]
OptionsInfo["PocketContactsInorganicColor"] = Options["--pocketContactsInorganicColor"]
OptionsInfo["PocketDistanceCutoff"] = float(Options["--pocketDistanceCutoff"])
OptionsInfo["PocketLabelColor"] = Options["--pocketLabelColor"]
OptionsInfo["PocketSurface"] = True if re.match("^Yes$", Options["--pocketSurface"], re.I) else False
DensityMapFile = Options["--densityMapFile"]
if re.match("^auto$", DensityMapFile, re.I):
DensityMapFile = RetrieveDensityMapFileName()
OptionsInfo["DensityMapFile"] = Options["--densityMapFile"]
OptionsInfo["DensityMapFileName"] = DensityMapFile
MeshLevel = Options["--meshLevel"]
if re.match("^auto$", MeshLevel, re.I):
MeshLevel = RetrieveMeshLevel()
if MeshLevel is None:
MiscUtil.PrintWarning("Failed to retrieve recommended mesh contour level from header file. It's being set to 1.0. Use \"--meshLevel\" option to specify a different contour mesh level.")
MeshLevel = 1.0
OptionsInfo["MeshLevel"] = float(MeshLevel)
# Process specified chains and ligands...
OptionsInfo["ChainIDs"] = Options["--chainIDs"]
OptionsInfo["LigandIDs"] = Options["--ligandIDs"]
ProcessChainAndLigandIDs()
def RetrieveOptions():
"""Retrieve command line arguments and options"""
# Get options...
global Options
Options = docopt(_docoptUsage_)
# Set current working directory to the specified directory...
WorkingDir = Options["--workingdir"]
if WorkingDir:
os.chdir(WorkingDir)
# Handle examples option...
if "--examples" in Options and Options["--examples"]:
MiscUtil.PrintInfo(MiscUtil.GetExamplesTextFromDocOptText(_docoptUsage_))
sys.exit(0)
def ValidateOptions():
"""Validate option values"""
MiscUtil.ValidateOptionTextValue("--allowEmptyObjects", Options["--allowEmptyObjects"], "yes no")
MiscUtil.ValidateOptionTextValue("--BFactorChainCartoonPutty", Options["--BFactorChainCartoonPutty"], "yes no")
if not re.match("^auto$", Options["--densityMapFile"], re.I):
MiscUtil.ValidateOptionFilePath("-d, --densityMapFile", Options["--densityMapFile"])
MiscUtil.ValidateOptionFileExt("-d, --densityMapFile", Options["--densityMapFile"], "map map.gz")
MiscUtil.ValidateOptionFilePath("-i, --infile", Options["--infile"])
MiscUtil.ValidateOptionFileExt("-i, --infile", Options["--infile"], "pdb cif")
MiscUtil.ValidateOptionFileExt("-o, --outfile", Options["--outfile"], "pml pse")
MiscUtil.ValidateOptionsOutputFileOverwrite("-o, --outfile", Options["--outfile"], "--overwrite", Options["--overwrite"])
MiscUtil.ValidateOptionsDistinctFileNames("-i, --infile", Options["--infile"], "-o, --outfile", Options["--outfile"])
MiscUtil.ValidateOptionIntegerValue("--labelFontID", Options["--labelFontID"], {})
MiscUtil.ValidateOptionFloatValue("--meshCarveRadius", Options["--meshCarveRadius"], {">": 0.0})
MiscUtil.ValidateOptionTextValue("--meshComplex", Options["--meshComplex"], "yes no")
MiscUtil.ValidateOptionTextValue("--meshChainComplex", Options["--meshChainComplex"], "yes no auto")
MiscUtil.ValidateOptionFloatValue("--meshWidth", Options["--meshWidth"], {">": 0.0})
if not re.match("^auto$", Options["--meshLevel"], re.I):
MiscUtil.ValidateOptionFloatValue("--meshLevel", Options["--meshLevel"], {})
MiscUtil.ValidateOptionTextValue("--PMLOut", Options["--PMLOut"], "yes no")
MiscUtil.ValidateOptionFloatValue("--pocketDistanceCutoff", Options["--pocketDistanceCutoff"], {">": 0.0})
MiscUtil.ValidateOptionTextValue("--pocketSurface", Options["--pocketSurface"], "yes no")
MiscUtil.ValidateOptionTextValue("--surfaceComplex", Options["--surfaceComplex"], "yes no")
MiscUtil.ValidateOptionTextValue("--surfaceChainComplex", Options["--surfaceChainComplex"], "yes no auto")
MiscUtil.ValidateOptionFloatValue("--surfaceTransparency", Options["--surfaceTransparency"], {">=": 0.0, "<=": 1.0})
# Setup a usage string for docopt...
_docoptUsage_ = """
PyMOLVisualizeCryoEMDensity.py - Visualize cryo-EM density
Usage:
PyMOLVisualizeCryoEMDensity.py [--allowEmptyObjects <yes or no>]
[--BFactorChainCartoonPutty <yes or no>] [--BFactorColorPalette <text> ]
[--chainIDs <First, All or ID1,ID2...>] [--densityMapFile <text>]
[--ligandIDs <Largest, All or ID1,ID2...>] [--labelFontID <number>]
[--meshCarveRadius <number>] [--meshComplex <yes or no>]
[--meshChainComplex <yes, no, or auto>] [--meshColor <text>]
[--meshLevel <number>] [--meshWidth <number>] [--PMLOut <yes or no>]
[--pocketContactsLigandColor <text>] [--pocketContactsSolventColor <text>]
[--pocketContactsInorganicColor <text>] [--pocketDistanceCutoff <number>]
[--pocketLabelColor <text>] [--pocketSurface <yes or no>]
[--surfaceComplex <yes or no>] [--surfaceChainComplex <yes, no or auto>]
[--surfaceTransparency <number>] [--overwrite] [-w <dir>] -i <infile> -o <outfile>
PyMOLVisualizeCryoEMDensity.py -h | --help | -e | --examples
Description:
Generate PyMOL visualization files for viewing electron microscopy (EM) or
cryo-EM density around chains, ligands, and ligand binding pockets in
macromolecules including proteins and nucleic acids.
The supported input file formats are: Macromolecule - PDB (.pdb) or CIF(.cif),
Cryo-EM Density - Collaborative Computational Project Number 4 (CCP4) ( .map)
The supported output file formats are: PyMOL script file (.pml), PyMOL session
file (.pse)
The cryo-EM density and header files along with PDB files may be downloaded
from appropriate servers using DownloadPDBFiles.pl script.
A variety of PyMOL groups and objects may be created for visualization of
cryo-EM density present in map files. These groups and objects correspond to
maps, meshes, surfaces,chains, ligands, inorganics, ligand binding pockets,
pocket, polar interactions, and pocket hydrophobic surfaces. A complete
hierarchy of all possible PyMOL groups and objects is shown below:
<PDBFileRoot>
.Complex
.Complex
.CryoEM
.Map
.Mesh
.Surface
.Chain<ID>
.Complex
.Complex
.CryoEM
.Mesh
.Surface
.Chain
.Chain
.BFactor
.Solvent
.Inorganic
.Ligand<ID>
.Ligand
.Ligand
.CryoEM
.Mesh
.Surface
.Pocket
.Pocket
.CryoEM
.Mesh
.Surface
.Polar_Contacts
.Surface
.Pocket_Solvent
.Pocket_Solvent
.CryoEM
.Mesh
.Surface
.Polar_Contacts
.Pocket_Inorganic
.Pocket_Inorganic
.CryoEM
.Mesh
.Surface
.Polar_Contacts
.Ligand<ID>
.Ligand
... ... ...
.Pocket
... ... ...
.Pocket_Solvent
... ... ...
.Pocket_Inorganic
... ... ...
.Chain<ID>
... ... ...
.Ligand<ID>
... ... ...
.Ligand<ID>
... ... ...
.Chain<ID>
... ... ...
The meshes and surfaces are not created for complete complex in input file
by default. A word to the wise: The creation of these surface and mesh objects
may slow down loading of PML file and generation of PSE file, based on the size
of input complex and map files. The generation of PSE file may also fail. In
addition, you may want to interactively manipulate the contour level for meshes
and surfaces. The recommended value for contour level is automatically retrieved
from the header file available from EM density server. | |
means sorted oldest to latest, False opposite
:param max_ticks: Only the most recent max_ticks trades. Default None
:param timeout: Wait upto timeout seconds. Default None
:return: A numpy array of dtype HistoryConn.tick_type
HTT,[Symbol],[BeginDate BeginTime],[EndDate EndTime],[MaxDatapoints],
[BeginFilterTime],[EndFilterTime],[DataDirection],[RequestID],
[DatapointsPerSend]<CR><LF>
"""
req_id = self._get_next_req_id()
self._setup_request_data(req_id)
bp_str = fr.datetime_to_yyyymmdd_hhmmss(bgn_prd)
ep_str = fr.datetime_to_yyyymmdd_hhmmss(end_prd)
bf_str = fr.time_to_hhmmss(bgn_flt)
ef_str = fr.time_to_hhmmss(end_flt)
mt_str = fr.blob_to_str(max_ticks)
pts_per_batch = 100
if max_ticks is not None:
pts_per_batch = min((max_ticks, 100))
req_cmd = ("HTT,%s,%s,%s,%s,%s,%s,%d,%s,%d\r\n" % (
ticker, bp_str, ep_str, mt_str, bf_str, ef_str, ascend, req_id,
pts_per_batch))
self._send_cmd(req_cmd)
self._req_event[req_id].wait(timeout=timeout)
data = self._read_ticks(req_id)
if data.dtype == object:
iqfeed_err = str(data[0])
err_msg = "Request: %s, Error: %s" % (req_cmd, iqfeed_err)
if iqfeed_err == '!NO_DATA!':
raise NoDataError(err_msg)
elif iqfeed_err == "Unauthorized user ID.":
raise UnauthorizedError(err_msg)
else:
raise RuntimeError(err_msg)
else:
return data
def _read_bars(self, req_id: str) -> np.array:
"""Get buffer for req_id and transform to a numpy array of bars."""
res = self._get_data_buf(req_id)
if res.failed:
return np.array([res.err_msg], dtype='object')
else:
data = np.empty(res.num_pts, HistoryConn.bar_type)
line_num = 0
while res.raw_data and (line_num < res.num_pts):
dl = res.raw_data.popleft()
(dt, tm) = fr.read_posix_ts(dl[1])
data[line_num]['date'] = dt
data[line_num]['time'] = tm
data[line_num]['high_p'] = np.float64(dl[2])
data[line_num]['low_p'] = np.float64(dl[3])
data[line_num]['open_p'] = np.float64(dl[4])
data[line_num]['close_p'] = np.float64(dl[5])
data[line_num]['tot_vlm'] = np.int64(dl[6])
data[line_num]['prd_vlm'] = np.int64(dl[7])
data[line_num]['num_trds'] = np.int64(dl[8])
line_num += 1
if line_num >= res.num_pts:
assert len(res.raw_data) == 0
if len(res.raw_data) == 0:
assert line_num >= res.num_pts
return data
def request_bars(self, ticker: str, interval_len: int, interval_type: str,
max_bars: int, ascend: bool = False,
timeout: int = None) -> np.array:
"""
Get max_bars number of bars of bar_data from IQFeed.
:param ticker: Ticker symbol
:param interval_len: Length of each bar interval in interval_type units
:param interval_type: 's' = secs, 'v' = volume, 't' = ticks
:param max_bars: Only the most recent max_bars bars. Default None
:param ascend: True means oldest to latest, False opposite.
:param timeout: Wait no more than timeout secs. Default None
:return: A numpy array with dtype HistoryConn.bar_type
If you use an interval type other than seconds, please make sure
you understand what you are getting. The IQFeed docs
don't explain. Support may help. Best is to get the tick-data and
the bars and compare. In any event, bars other than
seconds bars where interval_len % 60 == 0 are only
available for the same period that tick-data is available for so
you may be better off getting tick-data and creating your own bars.
HIX,[Symbol],[Interval],[MaxDatapoints],[DataDirection],[RequestID],
[DatapointsPerSend],[IntervalType]<CR><LF>
"""
assert interval_type in ('s', 'v', 't')
req_id = self._get_next_req_id()
self._setup_request_data(req_id)
bars_per_batch = min((100, max_bars))
req_cmd = ("HIX,%s,%d,%d,%d,%s,%d,%s\r\n" % (
ticker, interval_len, max_bars, ascend, req_id, bars_per_batch,
interval_type))
self._send_cmd(req_cmd)
self._req_event[req_id].wait(timeout=timeout)
data = self._read_bars(req_id)
if data.dtype == object:
iqfeed_err = str(data[0])
err_msg = "Request: %s, Error: %s" % (req_cmd, iqfeed_err)
if iqfeed_err == '!NO_DATA!':
raise NoDataError(err_msg)
elif iqfeed_err == "Unauthorized user ID.":
raise UnauthorizedError(err_msg)
else:
raise RuntimeError(err_msg)
else:
return data
def request_bars_for_days(self, ticker: str, interval_len: int,
interval_type: str, days: int,
bgn_flt: datetime.time = None,
end_flt: datetime.time = None,
ascend: bool = False, max_bars: int = None,
timeout: int = None) -> np.array:
"""
Get bars for the previous N days.
:param ticker: Ticker symbol
:param interval_len: Length of each bar interval in interval_type units
:param interval_type: 's' = secs, 'v' = volume, 't' = ticks
:param days: Number of days to get bars for.
:param bgn_flt: Each day's data starting at bgn_flt
:param end_flt: Each day's data no later than end_flt
:param ascend: True means latest to oldest, False opposite.
:param max_bars: Only the most recent max_bars bars. Default None
:param timeout: Wait no more than timeout secs. Default None
:return: A numpy array with dtype HistoryConn.bar_type
If you use an interval type other than seconds, please make sure
you understand what you are getting. The IQFeed docs
don't explain. Support may help. Best is to get the tick-data and
the bars and compare. In any event, bars other than
seconds bars where interval_len % 60 == 0 are only
available for the same period that tick-data is available for so
you may be better off getting tick-data and creating your own bars.
HID,[Symbol],[Interval],[Days],[MaxDatapoints],[BeginFilterTime],
[EndFilterTime],[DataDirection],[RequestID],[DatapointsPerSend],
[IntervalType]<CR><LF>
"""
assert interval_type in ('s', 'v', 't')
req_id = self._get_next_req_id()
self._setup_request_data(req_id)
bf_str = fr.time_to_hhmmss(bgn_flt)
ef_str = fr.time_to_hhmmss(end_flt)
mb_str = fr.blob_to_str(max_bars)
bars_per_batch = 100
if max_bars is not None:
bars_per_batch = min((100, max_bars))
req_cmd = "HID,%s,%d,%d,%s,%s,%s,%d,%s,%d,%s\r\n" % (
ticker, interval_len, days, mb_str, bf_str, ef_str, ascend, req_id,
bars_per_batch, interval_type)
self._send_cmd(req_cmd)
self._req_event[req_id].wait(timeout=timeout)
data = self._read_bars(req_id)
if data.dtype == object:
iqfeed_err = str(data[0])
err_msg = "Request: %s, Error: %s" % (req_cmd, iqfeed_err)
if iqfeed_err == '!NO_DATA!':
raise NoDataError(err_msg)
elif iqfeed_err == "Unauthorized user ID.":
raise UnauthorizedError(err_msg)
else:
raise RuntimeError(err_msg)
else:
return data
def request_bars_in_period(self, ticker: str, interval_len: int,
interval_type: str, bgn_prd: datetime.datetime,
end_prd: datetime.datetime,
bgn_flt: datetime.time = None,
end_flt: datetime.time = None,
ascend: bool = False, max_bars: int = None,
timeout: int = None) -> np.array:
"""
Get bars for a specific period.
:param ticker: Ticker symbol
:param interval_len: Length of each bar interval in interval_type units
:param interval_type: 's' = secs, 'v' = volume, 't' = ticks
:param bgn_prd: Start of the period
:param end_prd: End of the period
:param bgn_flt: Each day's data starting at bgn_flt
:param end_flt: Each day's data no later than end_flt
:param ascend: True means oldest to latest, False opposite.
:param max_bars: Only the most recent max_bars bars. Default None.
:param timeout: Wait no more than timeout secs. Default None
:return: A numpy array with dtype HistoryConn.bar_type
If you use an interval type other than seconds, please make sure
you understand what you are getting. The IQFeed docs
don't explain. Support may help. Best is to get the tick-data and
the bars and compare. In any event, bars other than
seconds bars where interval_len % 60 == 0 are only
available for the same period that tick-data is available for so
you may be better off getting tick-data and creating your own bars.
HIT,[Symbol],[Interval],[BeginDate BeginTime],[EndDate EndTime],
[MaxDatapoints],[BeginFilterTime],[EndFilterTime],[DataDirection],
[RequestID],[DatapointsPerSend],[IntervalType]<CR><LF>
"""
assert interval_type in ('s', 'v', 't')
req_id = self._get_next_req_id()
self._setup_request_data(req_id)
bp_str = fr.datetime_to_yyyymmdd_hhmmss(bgn_prd)
ep_str = fr.datetime_to_yyyymmdd_hhmmss(end_prd)
bf_str = fr.time_to_hhmmss(bgn_flt)
ef_str = fr.time_to_hhmmss(end_flt)
mb_str = fr.blob_to_str(max_bars)
bars_per_batch = 100
if max_bars is not None:
bars_per_batch = min((100, max_bars))
req_cmd = ("HIT,%s,%d,%s,%s,%s,%s,%s,%d,%s,%d,%s\r\n" % (
ticker, interval_len, bp_str, ep_str, mb_str, bf_str, ef_str,
ascend, req_id, bars_per_batch, interval_type))
self._send_cmd(req_cmd)
self._req_event[req_id].wait(timeout=timeout)
data = self._read_bars(req_id)
if data.dtype == object:
iqfeed_err = str(data[0])
err_msg = "Request: %s, Error: %s" % (req_cmd, iqfeed_err)
if iqfeed_err == '!NO_DATA!':
raise NoDataError(err_msg)
elif iqfeed_err == "Unauthorized user ID.":
raise UnauthorizedError(err_msg)
else:
raise RuntimeError(err_msg)
else:
return data
def _read_daily_data(self, req_id: str) -> np.array:
"""Get buffer for req_id and convert to a numpy array of daily data."""
res = self._get_data_buf(req_id)
if res.failed:
return np.array([res.err_msg], dtype='object')
else:
data = np.empty(res.num_pts, HistoryConn.daily_type)
line_num = 0
while res.raw_data and (line_num < res.num_pts):
dl = res.raw_data.popleft()
data[line_num]['date'] = np.datetime64(dl[1], 'D')
data[line_num]['high_p'] = np.float64(dl[2])
data[line_num]['low_p'] = np.float64(dl[3])
data[line_num]['open_p'] = np.float64(dl[4])
data[line_num]['close_p'] = np.float64(dl[5])
data[line_num]['prd_vlm'] = np.uint64(dl[6])
data[line_num]['open_int'] = np.uint64(dl[7])
line_num += 1
if line_num >= res.num_pts:
assert len(res.raw_data) == 0
if len(res.raw_data) == 0:
assert line_num >= res.num_pts
return data
def request_daily_data(self, ticker: str, num_days: int,
ascend: bool = False, timeout: int = None):
"""
Request daily bars for the previous num_days.
:param ticker: Symbol
:param num_days: Number of days. 1 means today only.
:param ascend: True means oldest data first, False opposite.
:param timeout: Wait timeout seconds. Default None
:return: A numpy array with dtype HistoryConn.daily_type
HDX,[Symbol],[MaxDatapoints],[DataDirection],[RequestID],
[DatapointsPerSend]<CR><LF>
"""
req_id = self._get_next_req_id()
self._setup_request_data(req_id)
pts_per_batch = min((100, num_days))
req_cmd = ("HDX,%s,%d,%d,%s,%d\r\n" % (
ticker, num_days, ascend, req_id, pts_per_batch))
self._send_cmd(req_cmd)
self._req_event[req_id].wait(timeout=timeout)
data = self._read_daily_data(req_id)
if data.dtype == object:
iqfeed_err = str(data[0])
err_msg = "Request: %s, Error: %s" % (req_cmd, iqfeed_err)
if iqfeed_err == '!NO_DATA!':
raise NoDataError(err_msg)
elif iqfeed_err == "Unauthorized user ID.":
raise UnauthorizedError(err_msg)
else:
raise RuntimeError(err_msg)
else:
return data
def request_daily_data_for_dates(self, ticker: str, bgn_dt: datetime.date,
end_dt: datetime.date,
ascend: bool = False, max_days: int =
None,
timeout: | |
0
for line in content:
line = line.split()
if len(line) > 1:
if line[0] == 'Data_store:':
# If another Data Store detected we ensure to keep same
# length of all the elements in the dictionary
self._data_stores = \
self._add_none_to_empty_fields(self._data_stores)
pos = self._how_many_brackets_following(line[2:]) + 2
if line[pos].upper() == 'SELECTIVE':
pos += 1
self._data_stores['Label'].append(' '.join(line[1:pos]))
prev_pos, pos = pos, \
self._how_many_brackets_following(
line[pos + 1:]) + pos + 1
self._data_stores['Memory size'].append(
' '.join(line[prev_pos:pos]))
prev_pos, pos = pos, \
self._how_many_brackets_following(
line[pos + 1:]) + pos + 1
self._data_stores['Packet size'].append(
' '.join(line[prev_pos:pos]))
if len(line) > pos:
if '#' in line[pos]:
self._data_stores['Comment'].append(
' '.join(line[pos:]))
continue
else:
self._data_stores['Priority'].append(line[pos])
if len(line) > pos + 1:
if '#' in line[pos + 1]:
self._data_stores['Comment'].append(
' '.join(line[pos + 1:]))
continue
else:
self._data_stores['Identifier'].append(
line[pos + 1])
if len(line) > pos + 2:
self._data_stores['Comment'].append(
' '.join(line[pos + 2:]))
elif '#' in line[0][0]:
pass
else:
self._data_stores = \
self._add_none_to_empty_fields(self._data_stores)
break
counter += 1
self._data_stores = \
self._add_none_to_empty_fields(self._data_stores)
return counter
def _create_pandas(self):
"""Transforms the dictionary into a pandas DataFrame"""
cols = ['Label', 'Memory size', 'Packet size', 'Priority',
'Identifier', 'Comment']
self.Table = pd.DataFrame(self._data_stores, columns=cols)
class PIDs(EDF):
"""PIDs class
Attributes:
Table (DataFrame): Pandas DataFrame containing the information
"""
def __init__(self):
self.Table = None
self._pids = {"PID number": [], "Status": [], "Data Store ID": [],
"Comment": []}
def _read(self, content):
"""Function that converts the input content into a dictionary
Args:
content (list): Lines where a object of this type was detected at
the beginning.
Returns:
int: number of lines used from the content
"""
counter = 0
for line in content:
line = line.split()
if len(line) > 1:
if line[0] == 'PID:':
# If another PID detected we ensure to keep same length
# of all the elements in the dictionary
self._pids = \
self._add_none_to_empty_fields(self._pids)
self._pids['PID number'].append(line[1])
self._pids['Status'].append(line[2])
self._pids['Data Store ID'].append(line[3])
if len(line) > 4:
self._pids['Comment'].append(' '.join(line[4:]))
elif '#' in line[0][0]:
pass
else:
self._pids = \
self._add_none_to_empty_fields(self._pids)
break
counter += 1
self._pids = \
self._add_none_to_empty_fields(self._pids)
return counter
def _create_pandas(self):
"""Transforms the dictionary into a pandas DataFrame"""
cols = ['PID number', 'Status', 'Data Store ID', 'Comment']
self.Table = pd.DataFrame(self._pids, columns=cols)
class FTS(EDF):
"""FTS class
Attributes:
Table (DataFrame): Pandas DataFrame containing the information
"""
def __init__(self):
self.Table = None
self._fts = {"Data Store ID": [], "Status": [], "Data Volume": [],
"Comment": []}
def _read(self, content):
"""Function that converts the input content into a dictionary
Args:
content (list): Lines where a object of this type was detected at
the beginning.
Returns:
int: number of lines used from the content
"""
counter = 0
for line in content:
line = line.split()
if len(line) > 1:
if line[0] == 'FTS:':
# If another FTS detected we ensure to keep same length
# of all the elements in the dictionary
self._fts = \
self._add_none_to_empty_fields(self._fts)
self._fts['Data Store ID'].append(line[1])
self._fts['Status'].append(line[2])
if len(line) > 4:
self._fts['Data Volume'].append(' '.join(line[3:4]))
else:
self._fts['Data Volume'].append(line[3])
if len(line) > 5:
self._fts['Comment'].append(' '.join(line[5:]))
elif '#' in line[0][0]:
pass
else:
self._fts = \
self._add_none_to_empty_fields(self._fts)
break
counter += 1
self._fts = \
self._add_none_to_empty_fields(self._fts)
return counter
def _create_pandas(self):
"""Transforms the dictionary into a pandas DataFrame"""
cols = ['Data Store ID', 'Status', 'Data Volume', 'Comment']
self.Table = pd.DataFrame(self._fts, columns=cols)
class FOVs(EDF):
"""Field of Views class
Attributes:
Table (DataFrame): Pandas DataFrame containing the information
"""
def __init__(self):
self.Table = None
self._fov = {"FOV": [], "FOV_lookat": [], "FOV_upvector": [],
"FOV_type": [], "FOV_algorithm": [],
"FOV_geometric_angles": [], "FOV_geometric_pixels": [],
"FOV_sub_view": [], "FOV_straylight_angles": [],
"FOV_straylight_duration": [], "FOV_active": [],
"FOV_image_timing": [], "FOV_imaging": [],
"FOV_pitch": [], "FOV_yaw": []}
def _read(self, content):
"""Function that converts the input content into a dictionary
Args:
content (list): Lines where a object of this type was detected at
the beginning.
Returns:
int: number of lines used from the content
"""
counter = 0
for line in content:
line = line.split()
if len(line) > 1:
if line[0][:-1] in self._fov:
# If another FOV detected we ensure to keep same length
# of all the elements in the dictionary
if line[0] == 'FOV:':
self._fov = \
self._add_none_to_empty_fields(self._fov)
self._fov[line[0][:-1]].append(' '.join(line[1:]))
elif '#' in line[0][0]:
pass
else:
self._fov = \
self._add_none_to_empty_fields(self._fov)
break
counter += 1
self._fov = \
self._add_none_to_empty_fields(self._fov)
return counter
def _create_pandas(self):
"""Transforms the dictionary into a pandas DataFrame"""
cols = ["FOV", "FOV_lookat", "FOV_upvector", "FOV_type",
"FOV_algorithm", "FOV_geometric_angles",
"FOV_geometric_pixels", "FOV_sub_view",
"FOV_straylight_angles", "FOV_straylight_duration",
"FOV_active", "FOV_image_timing", "FOV_imaging",
"FOV_pitch", "FOV_yaw"]
self.Table = pd.DataFrame(self._fov, columns=cols)
class Areas(EDF):
"""Areas class
Attributes:
Table (DataFrame): Pandas DataFrame containing the information
"""
def __init__(self):
self.Table = None
self._areas = {"Area": [], "Area_orientation": [],
"Area_lighting_angle": [], "Area_lighting_duration": []}
def _read(self, content):
"""Function that converts the input content into a dictionary
Args:
content (list): Lines where a object of this type was detected at
the beginning.
Returns:
int: number of lines used from the content
"""
counter = 0
for line in content:
line = line.split()
if len(line) > 1:
if line[0][:-1] in self._areas:
# If another AREA detected we ensure to keep same length
# of all the elements in the dictionary
if line[0] == 'Area:':
self._areas = \
self._add_none_to_empty_fields(self._areas)
self._areas[line[0][:-1]].append(' '.join(line[1:]))
elif '#' in line[0][0]:
pass
else:
self._areas = \
self._add_none_to_empty_fields(self._areas)
break
counter += 1
self._areas = \
self._add_none_to_empty_fields(self._areas)
return counter
def _create_pandas(self):
"""Transforms the dictionary into a pandas DataFrame"""
cols = ["Area", "Area_orientation", "Area_lighting_angle",
"Area_lighting_duration"]
self.Table = pd.DataFrame(self._areas, columns=cols)
class Modes(EDF):
"""Modes class
Attributes:
Table (DataFrame): Pandas DataFrame containing the information
"""
def __init__(self):
self.Table = None
self._modes = {"Mode": [], "Mode_class": [], "Module_states": [],
"Internal_clock": [], "PID_enable_flags": [],
"Nominal_power": [], "Power_parameter": [],
"Nominal_data_rate": [], "Data_rate_parameter": [],
"Mode_aux_data_rate": [], "Equivalent_power": [],
"Equivalent_data_rate": [], "Mode_transitions": [],
"Mode_actions": [], "Mode_constraints": []}
def _read(self, content):
"""Function that converts the input content into a dictionary
Args:
content (list): Lines where a object of this type was detected at
the beginning.
Returns:
int: number of lines used from the content
"""
counter = 0
for line in content:
line = line.split()
if len(line) > 1:
if line[0][:-1] in self._modes:
# If another MODE detected we ensure to keep same
# length of all the elements in the dictionary
if line[0][:-1].upper() == 'MODE':
self._modes = \
self._add_none_to_empty_fields(self._modes)
self._modes[line[0][:-1]].append(' '.join(line[1:]))
elif '#' in line[0][0]:
pass
else:
self._modes = \
self._add_none_to_empty_fields(self._modes)
break
counter += 1
self._modes = \
self._add_none_to_empty_fields(self._modes)
return counter
def _create_pandas(self):
"""Transforms the dictionary into a pandas DataFrame"""
cols = ["Mode", "Mode_class", "Module_states", "Internal_clock",
"PID_enable_flags", "Nominal_power", "Power_parameter",
"Nominal_data_rate", "Data_rate_parameter",
"Mode_aux_data_rate", "Equivalent_power",
"Equivalent_data_rate", "Mode_transitions", "Mode_actions",
"Mode_constraints"]
self.Table = pd.DataFrame(self._modes, columns=cols)
class Modules(EDF):
"""Modules Class
Attributes:
Module_states_Table (DataFrame): Pandas DataFrame containing the
information of the Module States
Table (DataFrame): Pandas DataFrame containing the information of the
Modules
"""
def __init__(self):
self.Table = None
self._modules = {"Module": [], "Module_level": [],
"Module_dataflow": [], "Module_PID": [],
"Module_aux_PID": [], "Sub_modules": [],
"Nr_of_module_states": []}
self.Module_states_Table = None
self._module_states = {"Module_state": [], "MS_PID": [],
"MS_aux_PID": [], "MS_power": [],
"MS_power_parameter": [], "MS_data_rate": [],
"MS_data_rate_parameter": [],
"MS_aux_data_rate": [], "MS_constraints": [],
"Repeat_action": [], "MS_pitch": [],
"MS_yaw": []}
def _read(self, content):
"""Function that converts the input content into a dictionary
Args:
content (list): Lines where a object of this type was detected at
the beginning.
Returns:
int: number of lines used from the content
"""
counter = 0
for line in content:
line = line.split()
if len(line) > 1:
if line[0][:-1] in self._modules:
# If another MODULE detected we ensure to keep same
# length of all the elements in the dictionary
if line[0][:-1].upper() == 'MODULE':
self._modules = \
self._add_none_to_empty_fields(self._modules)
self._modules[line[0][:-1]].append(' '.join(line[1:]))
elif line[0][:-1] in self._module_states:
# If another MODULE_STATE detected we ensure to keep
# same length of all the elements in the dictionary
if line[0][:-1].upper() == 'MODULE_STATE':
# Adding module name for every module state
if isinstance(self._modules['Module'][-1], list):
line[1] = self._modules['Module'][-1][0] \
+ " - " + line[1]
else:
line[1] = self._modules['Module'][-1] \
+ " - " + line[1]
self._module_states = \
self._add_none_to_empty_fields(self._module_states)
self._module_states[line[0][:-1]].append(
' '.join(line[1:]))
elif '#' in line[0][0]:
pass
else:
self._modules = \
self._add_none_to_empty_fields(self._modules)
self._module_states = \
self._add_none_to_empty_fields(self._module_states)
break
counter += 1
self._modules = \
self._add_none_to_empty_fields(self._modules)
self._module_states = \
| |
search(prima_feats, "name", name)
if result is None:
return await bot.say("Feat not found.")
strict = result[1]
results = result[0]
if strict:
result = results
else:
if len(results) == 1:
result = results[0]
else:
result = await get_selection(ctx, [(r["name"], r) for r in results])
if result is None: return await bot.say("Selection timed out or was cancelled.")
embed = EmbedWithAuthor(ctx)
embed.title = result["name"]
meta = result["meta"]
meta2 = [meta[i:i + 1024] for i in range(2048, len(meta), 1024)]
embed.description = meta[0:2048]
for piece in meta2:
embed.add_field(name="\u200b", value=piece)
await bot.say(embed=embed)
await bot.delete_message(ctx.message)
@bot.group(pass_context=True)
async def item(ctx):
if ctx.invoked_subcommand is None:
await bot.say("You need to enter a valid source. See full supported list with `.sources item`.")
@item.command(pass_context=True)
async def adv(ctx, *, name=""):
result = search(adv_items, "name", name)
if result is None:
return await bot.say("Item not found.")
strict = result[1]
results = result[0]
if strict:
result = results
else:
if len(results) == 1:
result = results[0]
else:
result = await get_selection(ctx, [(r["name"], r) for r in results])
if result is None: return await bot.say("Selection timed out or was cancelled.")
embed = EmbedWithAuthor(ctx)
embed.title = result["name"]
meta = result["meta"]
meta2 = [meta[i:i + 1024] for i in range(2048, len(meta), 1024)]
lines = meta.split("\n")
embed.description = lines[0]
embed.add_field(name="Description", value=lines[1])
for piece in meta2:
embed.add_field(name="\u200b", value=piece)
await bot.say(embed=embed)
await bot.delete_message(ctx.message)
@item.command(pass_context=True)
async def prima(ctx, *, name=""):
result = search(prima_items, "name", name)
if result is None:
return await bot.say("Item not found.")
strict = result[1]
results = result[0]
if strict:
result = results
else:
if len(results) == 1:
result = results[0]
else:
result = await get_selection(ctx, [(r["name"], r) for r in results])
if result is None: return await bot.say("Selection timed out or was cancelled.")
embed = EmbedWithAuthor(ctx)
meta = result["meta"]
meta2 = [meta[i:i + 1024] for i in range(2048, len(meta), 1024)]
lines = meta.split("\n")
embed.title = result["name"]
embed.description = lines[0]
#embed.add_field(name="Description", value=lines[1])
for piece in meta2:
embed.add_field(name="\u200b", value=piece)
await bot.say(embed=embed)
await bot.delete_message(ctx.message)
@bot.group(pass_context=True)
async def monster(ctx):
if ctx.invoked_subcommand is None:
await bot.say("You need to enter a valid source. See full supported list with `.sources monster`.")
@monster.command(pass_context=True)
async def adv(ctx, *, name=""):
result = search(adv_monsters, "name", name)
if result is None:
return await bot.say("Monster not found.")
strict = result[1]
results = result[0]
if strict:
result = results
else:
if len(results) == 1:
result = results[0]
else:
result = await get_selection(ctx, [(r["name"], r) for r in results])
if result is None: return await bot.say("Selection timed out or was cancelled.")
embed = EmbedWithAuthor(ctx)
embed.title = result["name"]
meta = result["meta"]
meta2 = [meta[i:i + 1024] for i in range(2048, len(meta), 1024)]
lines = meta.split("\n")
embed.description = lines[0]
#embed.description = meta[0:0]
embed.add_field(name="Info", value=lines[1])
#embed.add_field(name="Info", value=meta[1:1])
embed.set_thumbnail(url=meta[2:2])
for piece in meta2:
embed.add_field(name="\u200b", value=piece)
await bot.say(embed=embed)
await bot.delete_message(ctx.message)
@monster.command(pass_context=True)
async def prima(ctx, *, name=""):
result = search(prima_monsters, "name", name)
if result is None:
return await bot.say("Monster not found.")
strict = result[1]
results = result[0]
if strict:
result = results
else:
if len(results) == 1:
result = results[0]
else:
result = await get_selection(ctx, [(r["name"], r) for r in results])
if result is None: return await bot.say("Selection timed out or was cancelled.")
embed = EmbedWithAuthor(ctx)
embed.title = result["name"]
meta = result["meta"]
meta2 = [meta[i:i + 1024] for i in range(2048, len(meta), 1024)]
#lines = meta.split("\n")
#embed.description = lines[0:0]
embed.description = meta[0:0]
#embed.add_field(name="Info", value=lines[1:1])
embed.add_field(name="Info", value=meta[1:1])
#embed.set_thumbnail(url=meta[2:2])
for piece in meta2:
embed.add_field(name="\u200b", value=piece)
await bot.say(embed=embed)
await bot.delete_message(ctx.message)
@bot.group(pass_context=True)
async def race(ctx):
if ctx.invoked_subcommand is None:
await bot.say("You need to enter a valid source. See full supported list with `.sources race`.")
@race.command(pass_context=True)
async def adv(ctx, *, name=""):
result = search(adv_races, "name", name)
if result is None:
return await bot.say("Race not found.")
strict = result[1]
results = result[0]
if strict:
result = results
else:
if len(results) == 1:
result = results[0]
else:
result = await get_selection(ctx, [(r["name"], r) for r in results])
if result is None: return await bot.say("Selection timed out or was cancelled.")
embed = EmbedWithAuthor(ctx)
embed.title = result["name"]
meta = result["meta"]
meta2 = [meta[i:i + 1024] for i in range(2048, len(meta), 1024)]
embed.description = meta[0:2048]
for piece in meta2:
embed.add_field(name="\u200b", value=piece)
await bot.say(embed=embed)
await bot.delete_message(ctx.message)
@race.command(pass_context=True)
async def prima(ctx, *, name=""):
result = search(prima_races, "name", name)
if result is None:
return await bot.say("Race not found.")
strict = result[1]
results = result[0]
if strict:
result = results
else:
if len(results) == 1:
result = results[0]
else:
result = await get_selection(ctx, [(r["name"], r) for r in results])
if result is None: return await bot.say("Selection timed out or was cancelled.")
embed = EmbedWithAuthor(ctx)
embed.title = result["name"]
meta = result["meta"]
meta2 = [meta[i:i + 1024] for i in range(2048, len(meta), 1024)]
embed.description = meta[0:2048]
for piece in meta2:
embed.add_field(name="\u200b", value=piece)
await bot.say(embed=embed)
await bot.delete_message(ctx.message)
@bot.group(pass_context=True)
async def spell(ctx):
if ctx.invoked_subcommand is None:
await bot.say("You need to enter a valid source. See full supported list with `.sources spell`.")
@spell.command(pass_context=True)
async def adv(ctx, *, name=""):
result = search(adv_spells, "name", name)
if result is None:
return await bot.say("Spell not found.")
strict = result[1]
results = result[0]
if strict:
result = results
else:
if len(results) == 1:
result = results[0]
else:
result = await get_selection(ctx, [(r["name"], r) for r in results])
if result is None: return await bot.say("Selection timed out or was cancelled.")
embed = EmbedWithAuthor(ctx)
embed.title = result["name"]
meta = result["meta"]
meta2 = [meta[i:i + 1024] for i in range(2048, len(meta), 1024)]
embed.description = meta[0:2048]
for piece in meta2:
embed.add_field(name="\u200b", value=piece)
await bot.say(embed=embed)
await bot.delete_message(ctx.message)
@spell.command(pass_context=True)
async def prima(ctx, *, name=""):
result = search(prima_spells, "name", name)
if result is None:
return await bot.say("Spell not found.")
strict = result[1]
results = result[0]
if strict:
result = results
else:
if len(results) == 1:
result = results[0]
else:
result = await get_selection(ctx, [(r["name"], r) for r in results])
if result is None: return await bot.say("Selection timed out or was cancelled.")
embed = EmbedWithAuthor(ctx)
embed.title = result["name"]
meta = result["meta"]
meta2 = [meta[i:i + 1024] for i in range(2048, len(meta), 1024)]
lines = meta.split("\n")
embed.description = lines[0]
for piece in meta2:
embed.add_field(name="\u200b", value=piece)
await bot.say(embed=embed)
await bot.delete_message(ctx.message)
@bot.command(pass_context=True, name="_class", aliases=["class"]) # This is the classes command.
async def _class(ctx, *, name=""):
embed = EmbedWithAuthor(ctx)
embed.title = "Adventurer Version 1.5"
embed.add_field(name="Level-Up Table", value="`1` Feat, Feat, Feat, Feat\n\
`2` Feat, Feat\n\
`3` Feat\n\
`4` Ability Score Improvement, Feat\n\
`5` Feat\n\
`6` Ability Score Improvement, Feat\n\
`7` Feat\n\
`8` Ability Score Improvement, Feat\n\
`9` Feat\n\
`10` Ability Score Improvement, Feat\n\
`11` Feat\n\
`12` Ability Score Improvement, Feat\n\
`13` Feat\n\
`14` Ability Score Improvement, Feat\n\
`15` Feat\n\
`16` Ability Score Improvement, Feat\n\
`17` Feat\n\
`18` Feat\n\
`19` Ability Score Improvement, Feat\n\
`20` Feat, Feat")
embed.add_field(name="Hit Die", value="1d8", inline=False)
embed.add_field(name="Saving Throws", value="None.")
embed.add_field(name="Starting Proficiencies", value="You are proficient with nothing except for any proficiencies provided by your race or background. \n\
You must pick up armor, weapon, tool, skill, and saving throw proficiencies through feats.")
embed.add_field(name="Starting Equipment", value="You start with the equipment provided by your background, as well as 4d4 x 10 gp to buy your own equipment.")
embed.set_thumbnail(url="https://cdn.discordapp.com/attachments/348897378062827520/425406341788467210/68747470733a2f2f6765656b616e6473756e6472792e636f6d2f77702d636f6e74656e742f75706c6f6164732f323031362f.jpg")
await bot.say(embed=embed)
await bot.delete_message(ctx.message)
@bot.command(pass_context=True, name="update", aliases=["u"])
async def update(ctx, *, name=""):
adv_classes.clear()
adv_feats.clear()
adv_items.clear()
adv_monsters.clear()
adv_races.clear()
adv_spells.clear()
prima_classes.clear()
prima_feats.clear()
prima_items.clear()
prima_monsters.clear()
prima_races.clear()
prima_spells.clear()
async with aiohttp.ClientSession() as session:
async with session.get(ADV_CLASS_SOURCE) as resp:
text = await resp.text()
if 399 < resp.status < 600:
raise Exception(f"Failed to update ADV classes: {text}")
raw_adv_classes = [t.strip() for t in text.split(DIVIDER)][IGNORED_ENTRIES:]
for adv_class in raw_adv_classes:
lines = adv_class.split("\n")
name = lines[0].strip("# ")
meta = "\n".join(lines[1::])
desc = "\n".join(lines)
for dup in [i for i in adv_classes if name.lower() == i["name"].lower()]:
adv_classes.remove(dup)
adv_classes.append({"name": name, "meta": meta, "desc": desc})
print(f"Indexed class {name} from ADV.")
async with aiohttp.ClientSession() as session:
async with session.get(ADV_FEAT_SOURCE) as resp:
text = await resp.text()
if 399 < resp.status < 600:
raise Exception(f"Failed to update ADV feats: {text}")
raw_adv_feats = [t.strip() for t in text.split(DIVIDER)][IGNORED_ENTRIES:]
for adv_feat in raw_adv_feats:
lines = adv_feat.split("\n")
name = lines[0].strip("# ")
meta = "\n".join(lines[1::])
desc = "\n".join(lines)
for dup in [i for i in adv_feats if name.lower() == i["name"].lower()]:
adv_feats.remove(dup)
adv_feats.append({"name": name, "meta": meta, "desc": desc})
print(f"Indexed feat {name} from ADV.")
async with aiohttp.ClientSession() as session:
async with session.get(ADV_ITEM_SOURCE) as resp:
text = await resp.text()
if 399 < resp.status < 600:
raise Exception(f"Failed to update ADV items: {text}")
raw_adv_items = [t.strip() for t in text.split(DIVIDER)][IGNORED_ENTRIES:]
for adv_item in raw_adv_items:
lines = adv_item.split("\n")
name = lines[0].strip("# ")
meta = "\n".join(lines[1::])
desc = "\n".join(lines)
for dup in [i for i in adv_items if name.lower() == i["name"].lower()]:
adv_items.remove(dup)
adv_items.append({"name": name, "meta": meta, "desc": desc})
print(f"Indexed item {name} from ADV.")
async with aiohttp.ClientSession() as session:
async with session.get(ADV_MONSTER_SOURCE) as resp:
text = await resp.text()
if 399 < resp.status < 600:
raise Exception(f"Failed to update ADV monsters: {text}")
raw_adv_monsters = [t.strip() for t in text.split(DIVIDER)][IGNORED_ENTRIES:]
for adv_monster in raw_adv_monsters:
lines = adv_monster.split("\n")
name = lines[0].strip("# ")
meta = "\n".join(lines[1::])
desc = "\n".join(lines)
for dup in [i for i in adv_monsters if name.lower() == i["name"].lower()]:
adv_monsters.remove(dup)
adv_monsters.append({"name": name, "meta": meta, "desc": desc})
print(f"Indexed monster {name} from ADV.")
async with aiohttp.ClientSession() as session:
async with session.get(ADV_RACE_SOURCE) as resp:
text = await resp.text()
if 399 < resp.status < 600:
raise Exception(f"Failed to update ADV races: {text}")
raw_adv_races = [t.strip() for t in text.split(DIVIDER)][IGNORED_ENTRIES:]
for adv_race in raw_adv_races:
lines = adv_race.split("\n")
name = lines[0].strip("# ")
meta = "\n".join(lines[1::])
desc = "\n".join(lines)
for dup in [i for i in adv_races if name.lower() == i["name"].lower()]:
adv_races.remove(dup)
adv_races.append({"name": name, "meta": meta, "desc": desc})
print(f"Indexed race {name} from ADV.")
async with aiohttp.ClientSession() as session:
async with session.get(ADV_SPELL_SOURCE) as resp:
text = await resp.text()
if 399 < resp.status < 600:
raise Exception(f"Failed to update ADV spells: {text}")
raw_adv_spells = [t.strip() for t in text.split(DIVIDER)][IGNORED_ENTRIES:]
for adv_spell in raw_adv_spells:
lines = adv_spell.split("\n")
name = lines[0].strip("# ")
meta = "\n".join(lines[1::])
desc = "\n".join(lines)
for dup in [i for i in adv_spells if name.lower() == i["name"].lower()]:
adv_spells.remove(dup)
adv_spells.append({"name": name, "meta": meta, "desc": desc})
print(f"Indexed spell {name} from ADV.")
async with aiohttp.ClientSession() as session:
async with session.get(PRIMA_CLASS_SOURCE) as resp:
text = await resp.text()
if 399 < resp.status < 600:
raise Exception(f"Failed to update PRIMA classes: {text}")
raw_prima_classes = [t.strip() for t in text.split(DIVIDER)][IGNORED_ENTRIES:]
for prima_class in raw_prima_classes:
lines = prima_class.split("\n")
name = lines[0].strip("# ")
meta = "\n".join(lines[1::])
desc = "\n".join(lines)
for dup in [i for i in prima_classes if name.lower() == i["name"].lower()]:
prima_classes.remove(dup)
prima_classes.append({"name": name, "meta": meta, "desc": desc})
print(f"Indexed | |
search for Markdown files and build a cache of `Meta`
from metadata in the Markdown.
:param root: str: The path to search for files from.
"""
doc_files = glob.iglob(root + '/**/*.md', recursive=True)
def _meta(path):
with open(path, 'r', encoding='utf-8') as f:
md = markdown.Markdown(extensions=mdextensions)
md.page_root = os.path.dirname(path)
Markup(md.convert(f.read()))
return md.Meta if hasattr(md, 'Meta') else None
doc_files_meta = {os.path.relpath(path, start=root): _meta(path) for path in doc_files}
doc_files_meta = {path: value for path, value in doc_files_meta.items() if value is not None}
# If a nav filter is set, exclude relevant documents.
# This takes the comma separated string supplied to `nav_limit`
# and excludes certain documents if they are NOT in this list.
global CMD_ARGS
if CMD_ARGS.nav_limit:
nav_filters = CMD_ARGS.nav_limit.split(',')
nav_filters = [nav_filter.strip().lower() for nav_filter in nav_filters]
nav_filters = [nav_filter for nav_filter in nav_filters if nav_filter]
def _should_include(doc_meta):
nav_strings = [nav.lower() for nav in doc_meta.get('nav', [])]
return any([y.startswith(x) for x in nav_filters for y in nav_strings])
doc_files_meta = {path: value for path, value in doc_files_meta.items() if _should_include(value)}
return doc_files_meta
def build_nav_menu(meta_cache):
""" Given a cache of Markdown `Meta` data, compile a structure that can be
used to generate the NAV menu.
This uses the `nav: Assembly>Bench>Part` variable at the top of the Markdown file.
"""
root = NavItem('root', 0)
# Pre-sort the nav-items alphabetically by nav-string. This will get overridden with the arange()
# function, but this avoids-un arranged items moving round between page refreshes due to Dicts being
# unordered.
sorted_meta_cache = sorted(
meta_cache.items(),
key = lambda items: items[1].get('nav', [''])[0].split('>')[-1] # Sort by the last part of the nav string for each page.
)
for path, meta in sorted_meta_cache:
nav_str = meta.get('nav', [None])[0]
nav_chunks = parse_nav_string(nav_str)
node = root
for name, weight in nav_chunks:
n = NavItem(name, weight)
node = node.add(n)
node.bind(meta=meta, link=path)
root.arrange()
return root
def build_reload_files_list(extra_dirs):
""" Given a list of directories, return a list of files to watch for modification
and subsequent server reload. """
extra_files = extra_dirs[:]
for extra_dir in extra_dirs:
for dirname, dirs, files in os.walk(extra_dir):
for filename in files:
filename = os.path.join(dirname, filename)
if os.path.isfile(filename):
extra_files.append(filename)
return extra_files
def read_html_for_injection(path):
""" Open an HTML file at the given path and return the contents
as a string. If the file does not exist, we raise an exception.
"""
# TODO: In the future, consider adding some caching here. However,
# beware of reloading / refereshing the page UX implications.
with open(path) as file:
return file.read()
def _render_markdown(file_path, **kwargs):
""" Given a `file_path` render the Markdown and return the result of `render_template`.
"""
global NAV_MENU, PROJECT_LOGO, PDF_GENERATION_ENABLED
default_template = 'document'
with open(file_path, 'r', encoding='utf-8') as f:
md = markdown.Markdown(extensions=mdextensions)
md.page_root = os.path.dirname(file_path)
md.page_file = file_path
markup = Markup(md.convert(f.read()))
# Fetch the template defined in the metadata.
template = md.Meta.get('template', None)
template = template[0] if template else default_template
if not template:
raise Exception('no template found for document')
template = f'{template}.html'
# Load any HTML to be injected from the meta-data.
injections = md.Meta.get('inject', [])
injections = [os.path.join(md.page_root, file) for file in injections]
injections = [read_html_for_injection(file) for file in injections]
# Render it out with all the prepared data.
return render_template(template,
content=markup,
nav_menu=NAV_MENU,
project_logo=PROJECT_LOGO,
pdf_enabled=PDF_GENERATION_ENABLED,
injections=injections,
**md.Meta,
**kwargs)
def configure_flask(app, root_dir):
""" Setup the flask application within this scope. """
@app.before_first_request
def build_navigation_cache():
""" Build an in-memory cache of document meta-data.
NOTE: The design choice is made to crash the application if any
of the markdown files cannot be opened and parsed. In the
future when it becomes more stable, this will probably change.
"""
# This is called each time the server restarts.
global NAV_MENU
meta_cache = build_meta_cache(root_dir)
# Build the nav menu data-structure.
NAV_MENU = build_nav_menu(meta_cache)
# Store the reference to the function that rebuilds the navigation cache.
app.build_navigation_cache = build_navigation_cache
@app.template_filter('gravatar')
def gravatar(email, size=100, rating='g', default='retro', use_ssl=False):
""" Return a gravatar link for a given email address. """
url = "https://secure.gravatar.com/avatar/" if use_ssl else "http://www.gravatar.com/avatar/"
email = email.strip().lower().encode('utf-8')
hash_email = hashlib.md5(email).hexdigest()
return f'{url}{hash_email}?s={size}&r={rating}&d={default}'
@app.template_filter()
def url_unquote(url):
""" Removes encoding around a URL. """
return urllib.parse.unquote(url)
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
@app.route("/print_header")
def print_header():
""" Render the template for the header used when printing with WKPDFTOHTML. """
global PROJECT_LOGO
return render_template('print_header.html', project_logo=PROJECT_LOGO)
@app.route("/print_footer")
def print_footer():
""" Render the template for the footer used when printing with WKPDFTOHTML. """
global PROJECT_LOGO
return render_template('print_footer.html', project_logo=PROJECT_LOGO)
@app.errorhandler(404)
def page_not_found(e):
global NAV_MENU, PROJECT_LOGO
return render_template('404.html', nav_menu=NAV_MENU, project_logo=PROJECT_LOGO), 404
@app.route("/w/<path:page>")
def wiki(page):
""" Render the page. """
file_path = os.path.abspath(os.path.join(root_dir, page))
if not os.path.isfile(file_path):
abort(404)
if '.md' in [ext.lower() for ext in os.path.splitext(file_path)]:
return _render_markdown(file_path, current_page=page)
else:
return send_from_directory(os.path.dirname(file_path), os.path.basename(file_path))
@app.route("/")
@app.route("/w/")
def homepage():
return wiki('home.md')
@app.route("/pdf/<path:page>")
def wiki_pdf(page):
file_path = os.path.abspath(os.path.join(root_dir, page))
if not os.path.isfile(file_path):
abort(404)
if '.md' not in [ext.lower() for ext in os.path.splitext(file_path)]:
return send_from_directory(os.path.dirname(file_path), os.path.basename(file_path))
# Configure the different paths.
pdf_temp = f'{tempfile.mktemp()}.pdf'
input_url = url_for('wiki', page=page, _external=True)
header_url = url_for('print_header', _external=True)
footer_url = url_for('print_footer', _external=True)
args = f'{WKHTMLTOPDF_BINARY} --header-html {header_url} --footer-html {footer_url} \
--print-media-type --header-spacing 2 {input_url} {pdf_temp}'
# Invoke WkHTMLtoPDF
result = subprocess.check_output(args, shell=True)
if not result:
pass
# Write the newly generated temp pdf into a response.
with open(pdf_temp, 'rb') as f:
binary_pdf = f.read()
target_file_name = page.replace("/", "_").replace("\\", "_")
response = make_response(binary_pdf)
response.headers['Content-Type'] = 'application/pdf'
# response.headers['Content-Disposition'] = f'attachment; filename={target_file_name}.pdf'
response.headers['Content-Disposition'] = f'inline; filename={target_file_name}.pdf'
# Delete the temp file and return the response.
os.remove(pdf_temp)
return response
def generate_static_pdf(app, root_dir, output_dir, nav_filter=None):
""" Generate a static PDF directory for the documentation in `root_dir`
into `output_dir`.
"""
global PORT_NUMBER
# Find all markdown document paths that are in the nav.
documents = build_meta_cache(root_dir)
markdown_docs_urls = ['pdf/' + file.replace('\\', '/') for file in documents.keys()]
# Generate URl to file pairs.
pairs = [(f'http://localhost:{PORT_NUMBER}/{url}',
f'{os.path.join(output_dir, *os.path.split(url))}.pdf')
for url in markdown_docs_urls]
# Download each pair.
for source, target in pairs:
os.makedirs(os.path.dirname(target), exist_ok=True)
print(f'Source: {source} \n Target: {target}')
urllib.request.urlretrieve(source, target)
# Helper function to return the domain if present.
def is_absolute(url):
""" Returns True if the passed url string is an absolute path.
False if not
"""
links = urlparse(url)
return bool(links.netloc)
def generate_static_html(app, root_dir, output_dir):
""" Generate a static HTML site for the documentation in `root_dir`
into `output_dir`.
"""
from flask_frozen import Freezer, MissingURLGeneratorWarning
import warnings
warnings.filterwarnings("ignore", category=MissingURLGeneratorWarning)
# Update the flask config.
app.config['FREEZER_RELATIVE_URLS'] = True
app.config['FREEZER_IGNORE_MIMETYPE_WARNINGS'] = True
app.config['FREEZER_DESTINATION'] = output_dir
# Create the freezer app. Make it use specific URLs.
freezer = Freezer(app, with_no_argument_rules=False, log_url_for=False)
# Register a generator that passes ALL files in the docs directory into the
# `wiki` flask route.
@freezer.register_generator
def wiki():
all_docs = [file.replace(f'{root_dir}', '/w').replace(f'{os.path.sep}', '/')
for file in glob.iglob(f'{root_dir}/**/*', recursive=True)
if os.path.isfile(file)]
for doc in all_docs:
yield doc
# Save all the URLs using the correct extension and MIME type.
freezer.freeze()
# For each `.md` file in the output directory:
for markdown_file in glob.iglob(f'{output_dir}/**/*.md', recursive=True):
# Rewrite all relative links to other `.md` files to `.html.`
output = ''
with open(markdown_file, 'r', encoding="utf-8") as f:
html = f.read()
def _href_replace(m):
href = m.group()
if is_absolute(href[6:-1]):
return href
return href.replace('.md', '.html')
output = re.sub('href="(.*md)"', _href_replace, html)
# Rename the file from `.md` to HTML.
with open(markdown_file[:-3] + '.html', 'w', encoding="utf-8") as f:
f.write(output)
# Delete the Markdown file.
os.remove(markdown_file)
def load_project_logo(logo_file=None):
""" Attempt to load the project logo from the specified path.
If this fails, return None. If this succeeds, convert it to a data-uri.
"""
if not logo_file:
return None
if not os.path.exists(logo_file):
return None
with open(logo_file, 'rb') as fp:
mime = 'image/png'
data64 = base64.b64encode(fp.read()).decode('utf-8')
preview_uri = u'data:%s;base64,%s' % (mime, data64)
return preview_uri
def check_pdf_generation_cap():
""" Check to see if we can use PDF generation by attempting to use the binary. """
global WKHTMLTOPDF_BINARY
retcode = subprocess.call(f'{WKHTMLTOPDF_BINARY} --version',
shell=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
return retcode == 0
def copy_local_project(force=False):
""" Copy the sample docs and style into the local working directory.
Note: This will overwrite anything currently in those folders.
"""
source_root = os.path.dirname(__file__)
target_root = os.getcwd()
targets = ['docs', 'style', 'logo.png']
| |
# April 12, 2019
# <NAME>
from __future__ import division
import sys, os, argparse, operator, numpy, pandas, fuzzysearch
from collections import Counter
## set up parser for user inputs
parser = argparse.ArgumentParser()
optional = parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
## user inputs required
required.add_argument('-p', '--inFile', help='path to .fasta file.', dest='file_inFile')
required.add_argument('-o', '--outPath', help='path to output directory.', dest='path_outPath')
required.add_argument('-d1', '--drOne', help='first partial CRISPR repeat ', dest='dr_sequence1')
required.add_argument('-d2', '--drTwo', help='second partial CRISPR repeat ', dest='dr_sequence2')
required.add_argument('-df', '--drFull', help='second partial CRISPR repeat ', dest='dr_sequenceFull')
## optional (defaults provided)
optional.add_argument('-l', '--LBC', help='library identifier ', dest='LBC', default='null')
optional.add_argument('-s', '--outName', help='path to output directory.', dest='file_outName', default='null')
optional.add_argument('-a', help='Number of allowed mismatches in the first partial CRISPR repeat. Default=2', type=int, dest='m1', default=2)
optional.add_argument('-b', help='Number of allowed mismatches in the second partial CRISPR repeat. Default=3', type=int, dest='m2', default=3)
optional.add_argument('-m', help='Minimum spacer size. Default=30', type=int, dest='min', default=25)
optional.add_argument('-n', help='Maximum spacer size. Default=55', type=int, dest='max', default=56)
optional.add_argument('-sMin', help='Minimum stagger length according to primer design. Default=0', type=int, dest='min_stagger', default=0)
optional.add_argument('-sMax', help='Maximum stagger length according to primer design. Default=8', type=int, dest='max_stagger', default=8)
optional.add_argument('--infoFile', help='boolean to generate files with GC content and length info for spacers', dest='infoFile', action='store_true')
optional.add_argument('--no-infoFile', help='boolean to generate files with GC content and length info for spacers', dest='infoFile', action='store_false')
optional.set_defaults(infoFile=False)
parser._action_groups.append(optional)
args = parser.parse_args()
# assign arguments to variables
DR1Mismatch = int(args.m1)
DR2Mismatch = int(args.m2)
minSpacer = int(args.min)
maxSpacer = int(args.max)
inFile = str(args.file_inFile)
outPath = str(args.path_outPath)+'/'
outName = str(args.file_outName)
firstRepeat = str(args.dr_sequence1)
secondRepeat = str(args.dr_sequence2)
fullRepeat = str(args.dr_sequenceFull)
LBC = str(args.LBC)
minStagger = int(args.min_stagger)
maxStagger = int(args.max_stagger)
infoFile=args.infoFile
if outName is 'null':
outName = inFile.split("/")[-1]
if outName.endswith('.fasta'):
outName = outName[:-6]
# Function that takes in a part of the read and gives back a spacer
def editSpacer(read,firstRepeat,firstExpect,secondRepeat,secondExpect,DR1Mismatch,DR2Mismatch,minSpacer,maxSpacer,firstRangeToLook,secondRangeToLook):
s=''
# Find the first repeat using string search, if not found, use fuzzy string matching
s = read[firstExpect:(firstExpect+firstRangeToLook+len(firstRepeat))]
findDR1 = s.find(firstRepeat)
if findDR1 ==-1:
firstMatch = fuzzysearch.find_near_matches(firstRepeat, s, max_l_dist = DR1Mismatch)
firstMatch = sorted(firstMatch, key=lambda x: x[2])
else:
firstMatch = [[findDR1, findDR1 + len(firstRepeat)]]
if not firstMatch: return '' # Too many mismatches. Return empty string.
s=''
# Find the second repeat using fuzzy string matching
s = read[secondExpect:(secondExpect+secondRangeToLook+len(secondRepeat))]
if len(s)>len(secondRepeat):
findDR2 = s.find(secondRepeat)
if findDR2 ==-1:
secondMatch = fuzzysearch.find_near_matches(secondRepeat, s, max_l_dist = DR2Mismatch)
secondMatch = sorted(secondMatch, key=lambda x: x[2])
else:
secondMatch = [[findDR2, findDR2 + len(secondRepeat)]]
if not secondMatch: return ''
else:
return ''
# Too many mismatches or read too short. Return empty string.
# If both repeats seem to have been found, return spacer
spacerStart = firstMatch[0][1]
spacerEnd = secondMatch[0][0] + secondExpect
# if spacer is too short, looking for other matches for second DR in the read
if len(secondMatch) > 1 & spacerEnd - spacerStart < minSpacer:
if findDR1 ==-1:
i=1
while i < len(secondMatch) and spacerEnd - spacerStart < minSpacer:
spacerEnd = secondMatch[i].start + secondExpect
i+=1
else:
secondMatch = fuzzysearch.find_near_matches(secondRepeat, s, max_l_dist = DR2Mismatch)
secondMatch = sorted(secondMatch, key=lambda x: x[2])
i=0
while i < len(secondMatch) and spacerEnd - spacerStart < minSpacer:
spacerEnd = secondMatch[i].start + secondExpect
i+=1
spacer = read[spacerStart:spacerEnd]
# no spacer if out of bounds
if len(spacer) > maxSpacer: return ''
if len(spacer) < minSpacer: return ''
return spacer
# identify and process files with the terms below
if ('.fasta' in inFile):
# open inFile for reading/writing and report file being processed
F = open(inFile,mode='rU')
G = open(outPath+outName+'.unique.fasta',mode='w') # unique spacers based on spacer sequence only
I = open(outPath+outName+'.doubleAcquisitions.fasta',mode='w')
J = open(outPath+outName+'.doubleAcquisitions.paired.fasta',mode='w') # double acquisitions with both spacers
K = open(outPath+outName+'.all.fasta',mode='w')
MC = open(outPath+outName+'.multipleAcquisitions.complete.fasta',mode='w') # multiple acquisitions with all spacers
M = open(outPath+outName+'.multipleAcquisitions.fasta',mode='w')
if not os.path.exists('outputs'):
os.makedirs('outputs')
SS = open("outputs/summaryStats.txt", mode = 'a')
if infoFile:
GC = open(outPath+outName+'.info.txt',mode='w')
GC.write("Unique_Spacer_Sequence"+'\t'+"Sequence_Length"+'\t'+"GC_content"+'\n')
os.system(str("echo '##################################################'"))
os.system(str('echo '+"'"+inFile+' accepted for processing'+"'"))
readName = ''
D={}
rawReads=0 # total reads in fasta
spacerReads=0 # reads having a spacer
UniqueSingleAcquisitions=0 # number of unique single acquisitions based on spacer sequence
UniqueDoubleAcquisitions=0 # number of unique double acquisitions based on spacer sequence
UniqueMultipleAcquisitions=0 # number of unique multiple acquisitions based on spacer sequence
SinglefullRepeatReads=0 # reads with a double acquisition
MultiplefullRepeatReads=0 # reads with multiple acquisitions
spacerReadsDoubleBoth=0 # double acquisitions with both spacers
spacerReadsDoubleOne=0 # double acquisitions with one spacer
spacerReadsMultiComplete=0 # multiple acquisition with all spacers
spacerReadsMultiSome=0 # multiple acquisiton with one spacer
spacerReadsMultiNoSpacerBetweenFullDRs=0 # multiple DRs without spacers between them (probably PCR artifacts)
minReadLength=minStagger + len(firstRepeat) + minSpacer + len(secondRepeat)
allDistalSpacers=[]
if 'null' not in LBC:
NonLBCReads=0
for L in F: # loop through reads in file
if '>' in L: # defline, skip for processing but save read name
readName = L.strip()
rawReads+=1
continue
L=L.strip()
# ignore reads that are too short to detect adapted spacer
if len(L) < minReadLength:
continue
# Identify LBC within file
if 'null' not in LBC:
findLBC = L.find(LBC)
if findLBC==-1:
numMismatches = int(round(len(LBC)*0.1))
LBCcoord = fuzzysearch.find_near_matches(LBC, L, max_l_dist = numMismatches)
else:
LBCcoord = [[findLBC, findLBC+len(LBC)]]
if not LBCcoord:
NonLBCReads+=1
continue
# identify and store reads with more than one acquisition (ie those that contain a full DR sequence)
numMismatches = int(round(len(fullRepeat)*0.1))
tempFullRepeat = fuzzysearch.find_near_matches(fullRepeat, L, max_l_dist = numMismatches)
if tempFullRepeat: # if full repeat is found
## DOUBLE ACQUISITIONS ##
if len(tempFullRepeat) is 1:
SinglefullRepeatReads += 1
I.write(readName+'\n'+L+'\n')
# split read into (leader) proximal and (leader) distal and independently run the editSpacer code to extract proximal/distal spacers
tempSpacerProximal = L[:tempFullRepeat[0].start+len(secondRepeat)]
firstExpect = minStagger
firstRangeToLook = maxStagger - minStagger + len(fullRepeat) - len(firstRepeat) # this is overkill because using len(fullRepeat), when this could be reduced by knowing the true primer to end of DR distance.
secondExpect = firstExpect + len(firstRepeat) + minSpacer
secondRangeToLook = maxStagger - minStagger + maxSpacer - minSpacer + len(fullRepeat) # this is overkill because using len(fullRepeat), when this could be reduced by knowing the true primer to end of DR distance.
spacerProximal=editSpacer(tempSpacerProximal,firstRepeat,firstExpect,secondRepeat,secondExpect,DR1Mismatch,DR2Mismatch,minSpacer,maxSpacer,firstRangeToLook,secondRangeToLook)
tempSpacerDistal = L[tempFullRepeat[0].end-len(firstRepeat):] # the for is to account for the tendency of regex to add up to 3 nucleotides at the end of a spacer
firstExpect = 0
firstRangeToLook = len(fullRepeat) - len(firstRepeat) # this is overkill because using len(fullRepeat), when this could be reduced by knowing the true primer to end of DR distance.
secondExpect = firstExpect + len(firstRepeat) + minSpacer
secondRangeToLook = maxSpacer - minSpacer + len(fullRepeat) # this is overkill because using len(fullRepeat), when this could be reduced by knowing the true primer to end of DR distance.
spacerDistal=editSpacer(tempSpacerDistal,firstRepeat,firstExpect,secondRepeat,secondExpect,DR1Mismatch,DR2Mismatch,minSpacer,maxSpacer,firstRangeToLook,secondRangeToLook)
# if single reads have distal & proximal spacers, label read and export these to file for looking into distal-proximal pairs
if spacerDistal and spacerProximal:
spacerReads+=1
spacerReadsDoubleBoth+=1
doubleSpacer= spacerProximal+"_"+spacerDistal
# store spacers in dict, this will force uniqueness
if doubleSpacer not in allDistalSpacers:
if doubleSpacer not in D:
D[doubleSpacer]=[readName+'_doubleAcquisitions',0]
D[doubleSpacer][1]+=1
allDistalSpacers.append(spacerDistal)
J.write(readName+'_doubleAcquisitions_both_distal'+'\n'+spacerDistal+'\n')
J.write(readName+'_doubleAcquisitions_both_proximal'+'\n'+spacerProximal+'\n')
K.write(readName+'_doubleAcquisitions_both_distal'+'\n'+spacerDistal+'\n')
K.write(readName+'_doubleAcquisitions_both_proximal'+'\n'+spacerProximal+'\n')
# process reads with only distal spacer
elif spacerDistal:
spacerReads+=1
spacerReadsDoubleOne+=1
if spacerDistal not in allDistalSpacers:
if spacerDistal not in D:
D[spacerDistal]=[readName+'_doubleAcquisitions_distal',0]
D[spacerDistal][1]+=1
K.write(readName+'_doubleAcquisitions_distal'+'\n'+spacerDistal+'\n')
# process reads with only proximal spacer
elif spacerProximal:
spacerReads+=1
spacerReadsDoubleOne+=1
if spacerProximal not in D:
D[spacerProximal]=[readName+'_doubleAcquisitions_proximal',0]
D[spacerProximal][1]+=1
K.write(readName+'_doubleAcquisitions_proximal'+'\n'+spacerProximal+'\n')
else:
continue
## MULTIPLE ACQUISITIONS ##
elif len(tempFullRepeat)>1:
MultiplefullRepeatReads+=1
M.write(readName+'\n'+L+'\n')
tempSpacerProximal = L[:tempFullRepeat[0].start+len(secondRepeat)]
firstExpect = minStagger
firstRangeToLook = maxStagger - minStagger + len(fullRepeat) - len(firstRepeat) # this is overkill because using len(fullRepeat), when this could be reduced by knowing the true primer to end of DR distance.
secondExpect = firstExpect + len(firstRepeat) + minSpacer
secondRangeToLook = maxStagger - minStagger + maxSpacer - minSpacer + len(fullRepeat) # this is overkill because using len(fullRepeat), when this could be reduced by knowing the true primer to end of DR distance.
spacerProximal=editSpacer(tempSpacerProximal,firstRepeat,firstExpect,secondRepeat,secondExpect,DR1Mismatch,DR2Mismatch,minSpacer,maxSpacer,firstRangeToLook,secondRangeToLook)
spacersMedial=''
for index, DR in enumerate(tempFullRepeat):
length = tempFullRepeat[index].start - tempFullRepeat[index-1].end
if length > minSpacer and length < maxSpacer:
tempspacersMedial = L[tempFullRepeat[index-1].end:tempFullRepeat[index].start]
spacersMedial=spacersMedial+"_"+tempspacersMedial
if spacersMedial:
spacersMedial=spacersMedial[1:]
tempSpacerDistal = L[tempFullRepeat[-1].end-len(firstRepeat):]
firstExpect = 0
firstRangeToLook = len(fullRepeat) - len(firstRepeat) # this is overkill because using len(fullRepeat), when this could be reduced by knowing the true primer to end of DR distance.
secondExpect = firstExpect + len(firstRepeat) + minSpacer
secondRangeToLook = maxSpacer - minSpacer + len(fullRepeat) # this is overkill because using len(fullRepeat), when this could be | |
<gh_stars>0
"""
"""
import sqlite3
import numpy as np
import math
class SQL:
def __init__(self):
self.path = None
self.connexion = None
self.cursor = None
self.verbose = False
def to_SQL_type(self, type_, mode = "format"):
"""
Function allowing to convert element type expressed in Python syntax into type
expressed into SQL syntax.
Parameter:
- type_ [str]: Types have to be committed as a string format
Returns:
- [str]: The parameter type converted in the SQL format if the type is considered in the method.
The input variable otherwise.
"""
if type(type_) == list and mode == "list":
sql_list = "("
for element in type_:
sql_list += "'"+str(element)+"'"+","
sql_list = sql_list[:-1]
sql_list += ")"
return sql_list
if mode == "format":
if type_ == "str":
return "text"
elif type_ == "int":
return "integer"
elif type_ == "float":
return "real"
else:
return type_
elif mode == "logic":
if type_ == "all":
return "ALL"
elif type_ == "any":
return "ANY"
elif type_ == "and":
return "AND"
elif type_ == "or":
return "OR"
elif type_ == "not":
return "NOT"
elif type_ == "in":
return "IN"
elif type_ == "is" or type_ == "==":
return "IS"
else:
return type_
elif mode == "operator":
if type_ == "==":
return "="
elif type_ == "!=":
return "<>"
else:
return type_
else:
return type_
def create_database(self, path):
"""
Function allowing to create a database.
Parameter:
- path [str]: Path and name of the database. Note: The folder should exist.
Returns:
None
"""
if not path[-3:] == ".db":
path += ".db"
self.path = path
self.connexion = sqlite3.connect(path)
self.cursor = self.connexion.cursor()
return
def connect_database(self, path):
"""
Function allowing to connect to an existing database
Parameter:
- path [str]: Path and name of the database. Note: The folder should exist.
Returns:
None
"""
self.create_database(path)
def execute(self,
action = None,
object = None,
argument = None):
"""
Function that execute every command following the SQL query
structure.
"""
command = action+" "+object+" "+argument
if self.verbose:
print (command)
iterator = self.cursor.execute(command)
return iterator
#=====================================================================================#
# LISTING FUNCTIONS
#=====================================================================================#
def get_table_list(self):
"""
Function returning the list of tables in the database
Parameters:
None
Returns:
- [list(str)]: ["table_name1", "table_name2", ...]
"""
action = "SELECT"
object = "name FROM sqlite_master"
argument = "WHERE type='table'"
iterator = self.execute(action = action,
object = object,
argument = argument)
table_list = [x[0] for x in iterator.fetchall()]
return table_list
def get_id_list(self, table):
"""
Function that retrieves the list of ids of the elements within
a table. If the tabe doesn't contain any elements, it return
the following list: [0]
Parameters:
- table [str]: Table name
Returns:
- [int]: List of ids of the elements in the table
in the order they have been added
"""
action = "SELECT"
object = "id"
argument = "FROM "+table
iterator = self.execute(action = action,
object = object,
argument = argument)
id_list = [x[0] for x in iterator.fetchall()]
if len(id_list) == 0 :
return [0]
return id_list
#=====================================================================================#
# CREATION & INSERTION FUNCTIONS
#=====================================================================================#
def create_table(self,
name,
structure):
"""
Function allowing to create a table in the already existing database
Parameters:
- name [str]: Name of the table
- structure [dict]: Structure of the table. Keys corresponds to the name of the columns while
associated values corresponds to the anounced type of the data.
Returns:
None
"""
action = "CREATE"
object = "TABLE"+" "+name
argument = "("
argument += "id"+" "+"integer"+", "
for key in structure.keys():
argument += key+" "+self.to_SQL_type(structure[key], mode = "format")+", "
argument = argument[:-2]
argument += ")"
self.execute(action = action,
object = object,
argument = argument)
return
def insert(self,
table,
value):
"""
Function allowing to insert an element in an existing table
of the connected database
Parameters:
- table [str] : Name of the table
- value [list] : List of the attributes of the element to be
inserted
Returns:
None
"""
# Check if there are non-common numbers in the list of numbers
# such as infinity values
# print (value)
# print (type(value[-2]))
for i in range(len(value)):
val = value[i]
if not type(val) == str:
# if type(val) == float:
# val = np.float(val)
# elif type(val) == int:
# val = np.int(val)
# print ("VAL = ",val)
if np.isinf(val) or math.isinf(val):
# print("Cond1")
if val > 1e32:
# print("Cond1.1")
value[i] = "Inf"
elif val < -1e32:
# print("Cond1.2")
value[i] = "-Inf"
else:
# print("Cond1.3")
value[i] = "+-Inf"
elif np.isnan(val):
value[i] = "NaN"
# print (value)
last_id = self.get_id_list(table)[-1]
value = [last_id+1]+value
action = "INSERT INTO"
object = table
argument = "VALUES ("
for element in value:
if type(element) == str:
element = element.replace("'", '"')
element = "'"+element+"'"
else:
element = str(element)
argument += element+","
argument = argument[:-1]
argument += ")"
self.execute(action = action,
object = object,
argument = argument)
self.connexion.commit()
return
def delete(self,
table,
where_ = None):
"""
Function allowing to delete an element from a table in the database.
Parameters:
- table [str]: Name of the table
- where_ [list(dict, str, list)]: List of conditions defining elements to be deleted. The structure of this
variable follows the scheme below:
[{
"object" : #Define the attribute name of an element,
"operator": #Define an operator defined in python syntax but provided inside a string
"value" : #A value which close the conditional statement
},
logic_operator [str] (it may be : "and", "or", "not"...)
...
The sequence of conditions has to follow logical rules otherwise it will probably raise an error.
]
"""
action = "DELETE FROM"+" "
object = table
argument = ""
if where_ is not None:
argument += "WHERE"+" "
for condition in where_:
if type(condition) == dict:
sub_object = condition["object"]
operator = self.to_SQL_type(condition["operator"], mode = "operator")
sub_value = condition["value"]
if type(sub_value) == str:
sub_value = "'"+sub_value+"'"
else:
sub_value = str(sub_value)
argument += sub_object+operator+sub_value+" "
if type(condition) == str:
argument += self.to_SQL_type(condition, mode = "logic")+" "
if type(condition) == list:
argument += self.to_SQL_type(condition, mode="list")+" "
self.execute(action = action,
object = object,
argument = argument)
self.connexion.commit()
return
def drop_table(self,
table):
"""
Function allowing to drop a table from the database
Parameters:
- table [str]: Table name
Returns:
None
"""
action = "DROP"
object = "TABLE"
argument = table
self.execute(action = action,
object = object,
argument = argument)
self.connexion.commit()
return
#=====================================================================================#
# QUERY FUNCTIONS
#=====================================================================================#
def select(self, #https://www.w3schools.com/sql/sql_select.asp
distinct = False, #https://www.w3schools.com/sql/sql_distinct.asp
columns = ["*"], #column1, column2 ...
table = None,
where_ = None, #https://www.w3schools.com/sql/sql_where.asp
orderby_ = None, #https://www.w3schools.com/sql/sql_orderby.asp
ordering = "ASC" # "DESC"
):
action = "SELECT"
if distinct:
action += " "+"DISTINCT"
object = ""
for col in columns:
object += col+", "
object = object[:-2]
if "*" in columns:
object = | |
def OnStylusButtonDown(self,*args):
"""
OnStylusButtonDown(self: UIElement,e: StylusButtonEventArgs)
Invoked when an unhandled System.Windows.Input.Stylus.StylusButtonDown�attached event reaches an
element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.StylusButtonEventArgs that contains the event data.
"""
pass
def OnStylusButtonUp(self,*args):
"""
OnStylusButtonUp(self: UIElement,e: StylusButtonEventArgs)
Invoked when an unhandled System.Windows.Input.Stylus.StylusButtonUp�attached event reaches an
element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.StylusButtonEventArgs that contains the event data.
"""
pass
def OnStylusDown(self,*args):
"""
OnStylusDown(self: UIElement,e: StylusDownEventArgs)
Invoked when an unhandled System.Windows.Input.Stylus.StylusDown�attached event reaches an
element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.StylusDownEventArgs that contains the event data.
"""
pass
def OnStylusEnter(self,*args):
"""
OnStylusEnter(self: UIElement,e: StylusEventArgs)
Invoked when an unhandled System.Windows.Input.Stylus.StylusEnter�attached event is raised by
this element. Implement this method to add class handling for this event.
e: The System.Windows.Input.StylusEventArgs that contains the event data.
"""
pass
def OnStylusInAirMove(self,*args):
"""
OnStylusInAirMove(self: UIElement,e: StylusEventArgs)
Invoked when an unhandled System.Windows.Input.Stylus.StylusInAirMove�attached event reaches an
element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.StylusEventArgs that contains the event data.
"""
pass
def OnStylusInRange(self,*args):
"""
OnStylusInRange(self: UIElement,e: StylusEventArgs)
Invoked when an unhandled System.Windows.Input.Stylus.StylusInRange�attached event reaches an
element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.StylusEventArgs that contains the event data.
"""
pass
def OnStylusLeave(self,*args):
"""
OnStylusLeave(self: UIElement,e: StylusEventArgs)
Invoked when an unhandled System.Windows.Input.Stylus.StylusLeave�attached event is raised by
this element. Implement this method to add class handling for this event.
e: The System.Windows.Input.StylusEventArgs that contains the event data.
"""
pass
def OnStylusMove(self,*args):
"""
OnStylusMove(self: UIElement,e: StylusEventArgs)
Invoked when an unhandled System.Windows.Input.Stylus.StylusMove�attached event reaches an
element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.StylusEventArgs that contains the event data.
"""
pass
def OnStylusOutOfRange(self,*args):
"""
OnStylusOutOfRange(self: UIElement,e: StylusEventArgs)
Invoked when an unhandled System.Windows.Input.Stylus.StylusOutOfRange�attached event reaches an
element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.StylusEventArgs that contains the event data.
"""
pass
def OnStylusSystemGesture(self,*args):
"""
OnStylusSystemGesture(self: UIElement,e: StylusSystemGestureEventArgs)
Invoked when an unhandled System.Windows.Input.Stylus.StylusSystemGesture�attached event reaches
an element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.StylusSystemGestureEventArgs that contains the event data.
"""
pass
def OnStylusUp(self,*args):
"""
OnStylusUp(self: UIElement,e: StylusEventArgs)
Invoked when an unhandled System.Windows.Input.Stylus.StylusUp�attached event reaches an element
in its route that is derived from this class. Implement this method to add class handling for
this event.
e: The System.Windows.Input.StylusEventArgs that contains the event data.
"""
pass
def OnTextInput(self,*args):
"""
OnTextInput(self: UIElement,e: TextCompositionEventArgs)
Invoked when an unhandled System.Windows.Input.TextCompositionManager.TextInput�attached event
reaches an element in its route that is derived from this class. Implement this method to add
class handling for this event.
e: The System.Windows.Input.TextCompositionEventArgs that contains the event data.
"""
pass
def OnToolTipClosing(self,*args):
"""
OnToolTipClosing(self: FrameworkElement,e: ToolTipEventArgs)
Invoked whenever an unhandled System.Windows.FrameworkElement.ToolTipClosing routed event
reaches this class in its route. Implement this method to add class handling for this event.
e: Provides data about the event.
"""
pass
def OnToolTipOpening(self,*args):
"""
OnToolTipOpening(self: FrameworkElement,e: ToolTipEventArgs)
Invoked whenever the System.Windows.FrameworkElement.ToolTipOpening routed event reaches this
class in its route. Implement this method to add class handling for this event.
e: Provides data about the event.
"""
pass
def OnTouchDown(self,*args):
"""
OnTouchDown(self: UIElement,e: TouchEventArgs)
Provides class handling for the System.Windows.UIElement.TouchDown routed event that occurs when
a touch presses inside this element.
e: A System.Windows.Input.TouchEventArgs that contains the event data.
"""
pass
def OnTouchEnter(self,*args):
"""
OnTouchEnter(self: UIElement,e: TouchEventArgs)
Provides class handling for the System.Windows.UIElement.TouchEnter routed event that occurs
when a touch moves from outside to inside the bounds of this element.
e: A System.Windows.Input.TouchEventArgs that contains the event data.
"""
pass
def OnTouchLeave(self,*args):
"""
OnTouchLeave(self: UIElement,e: TouchEventArgs)
Provides class handling for the System.Windows.UIElement.TouchLeave routed event that occurs
when a touch moves from inside to outside the bounds of this System.Windows.UIElement.
e: A System.Windows.Input.TouchEventArgs that contains the event data.
"""
pass
def OnTouchMove(self,*args):
"""
OnTouchMove(self: UIElement,e: TouchEventArgs)
Provides class handling for the System.Windows.UIElement.TouchMove routed event that occurs when
a touch moves while inside this element.
e: A System.Windows.Input.TouchEventArgs that contains the event data.
"""
pass
def OnTouchUp(self,*args):
"""
OnTouchUp(self: UIElement,e: TouchEventArgs)
Provides class handling for the System.Windows.UIElement.TouchUp routed event that occurs when a
touch is released inside this element.
e: A System.Windows.Input.TouchEventArgs that contains the event data.
"""
pass
def OnVisualChildrenChanged(self,*args):
"""
OnVisualChildrenChanged(self: Visual,visualAdded: DependencyObject,visualRemoved: DependencyObject)
Called when the System.Windows.Media.VisualCollection of the visual object is modified.
visualAdded: The System.Windows.Media.Visual that was added to the collection
visualRemoved: The System.Windows.Media.Visual that was removed from the collection
"""
pass
def OnVisualParentChanged(self,*args):
"""
OnVisualParentChanged(self: FrameworkElement,oldParent: DependencyObject)
Invoked when the parent of this element in the visual tree is changed. Overrides
System.Windows.UIElement.OnVisualParentChanged(System.Windows.DependencyObject).
oldParent: The old parent element. May be null to indicate that the element did not have a visual parent
previously.
"""
pass
def ParentLayoutInvalidated(self,*args):
"""
ParentLayoutInvalidated(self: FrameworkElement,child: UIElement)
Supports incremental layout implementations in specialized subclasses of
System.Windows.FrameworkElement.
System.Windows.FrameworkElement.ParentLayoutInvalidated(System.Windows.UIElement) is invoked
when a child element has invalidated a property that is marked in metadata as affecting the
parent's measure or arrange passes during layout.
child: The child element reporting the change.
"""
pass
def RemoveLogicalChild(self,*args):
"""
RemoveLogicalChild(self: FrameworkElement,child: object)
Removes the provided object from this element's logical tree. System.Windows.FrameworkElement
updates the affected logical tree parent pointers to keep in sync with this deletion.
child: The element to remove.
"""
pass
def RemoveVisualChild(self,*args):
"""
RemoveVisualChild(self: Visual,child: Visual)
Removes the parent-child relationship between two visuals.
child: The child visual object to remove from the parent visual.
"""
pass
@staticmethod
def SetBaselineOffset(element,value):
"""
SetBaselineOffset(element: DependencyObject,value: float)
Sets the value of the System.Windows.Controls.TextBlock.BaselineOffset�attached property on a
specified dependency object.
element: The dependency object on which to set the value of the
System.Windows.Controls.TextBlock.BaselineOffset property.
value: The new value to set the property to.
"""
pass
@staticmethod
def SetFontFamily(element,value):
"""
SetFontFamily(element: DependencyObject,value: FontFamily)
Sets the value of the System.Windows.Controls.TextBlock.FontFamily�attached property on a
specified dependency object.
element: The dependency object on which to set the value of the
System.Windows.Controls.TextBlock.FontFamily property.
value: The new value to set the property to.
"""
pass
@staticmethod
def SetFontSize(element,value):
"""
SetFontSize(element: DependencyObject,value: float)
Sets the value of the System.Windows.Controls.TextBlock.FontSize�attached property on a
specified dependency object.
element: The dependency object on which to set the value of the
System.Windows.Controls.TextBlock.FontSize property.
value: The new value to set the property to.
"""
pass
@staticmethod
def SetFontStretch(element,value):
"""
SetFontStretch(element: DependencyObject,value: FontStretch)
Sets the value of the System.Windows.Controls.TextBlock.FontStretch�attached property on a
specified dependency object.
element: The dependency object on which to set the value of the
System.Windows.Controls.TextBlock.FontStretch property.
value: The new value to set the property to.
"""
pass
@staticmethod
def SetFontStyle(element,value):
"""
SetFontStyle(element: DependencyObject,value: FontStyle)
Sets the value of the System.Windows.Controls.TextBlock.FontStyle�attached property on a
specified dependency object.
element: The dependency object on which to set the value of the
System.Windows.Controls.TextBlock.FontStyle property.
value: The new value to |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.