hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
57b8f635934bfe236a78eac9080d500004569035 | 1,440 | py | Python | project_files/OLD NDlib Composite.py | TheAkashain/NDLib-C19-UNB | b9769b7ce931e1fadba3c78e0f29b37834675e1c | [
"BSD-2-Clause"
] | null | null | null | project_files/OLD NDlib Composite.py | TheAkashain/NDLib-C19-UNB | b9769b7ce931e1fadba3c78e0f29b37834675e1c | [
"BSD-2-Clause"
] | null | null | null | project_files/OLD NDlib Composite.py | TheAkashain/NDLib-C19-UNB | b9769b7ce931e1fadba3c78e0f29b37834675e1c | [
"BSD-2-Clause"
] | 1 | 2020-11-24T14:21:03.000Z | 2020-11-24T14:21:03.000Z | import networkx as nx
import ndlib.models.ModelConfig as mc
import ndlib.models.CompositeModel as gc
import ndlib.models.compartments as ns
from ndlib.utils import multi_runs
import time
N = 60000
connections = 6
iterations = 200
executions = 20
start_time = time.time()
print('-----Generating Barbasi-Albert graph with {} nodes-----'.format(N))
g = nx.barabasi_albert_graph(N, connections)
# Composite Model instantiation
print('-----Configuring Model-----')
model = gc.CompositeModel(g)
# Model statuses
model.add_status("Susceptible")
model.add_status("Infected")
model.add_status("Removed")
# Compartment definition
c1 = ns.NodeStochastic(0.02, triggering_status="Infected")
c2 = ns.NodeStochastic(0.01)
# Rule definition
model.add_rule("Susceptible", "Infected", c1)
model.add_rule("Infected", "Removed", c2)
# Model initial status configuration
config = mc.Configuration()
config.add_model_parameter('fraction_infected', 0.05)
model.set_initial_status(config)
# Simulation multiple execution
print('-----Doing {} simulation(s)-----'.format(executions))
trends = multi_runs(model, execution_number=executions, iteration_number=iterations, infection_sets=None)
stop_time = time.time()
total_time = stop_time - start_time
print('\n----- Total Time: {} seconds ----'.format(total_time))
print('-----Plotting Results-----')
from ndlib.viz.mpl.DiffusionTrend import DiffusionTrend
viz = DiffusionTrend(model, trends)
viz.plot()
| 27.169811 | 105 | 0.759028 |
5eefb9affe4459fa100cd7a267f17ce98c607a2b | 1,832 | py | Python | src/encoded/tests/test_upgrade_quality_metric.py | procha2/encoded | e9f122362b71f3b8641023b8d2d5ad531d3484b7 | [
"MIT"
] | 102 | 2015-05-20T01:17:43.000Z | 2022-03-07T06:03:55.000Z | src/encoded/tests/test_upgrade_quality_metric.py | procha2/encoded | e9f122362b71f3b8641023b8d2d5ad531d3484b7 | [
"MIT"
] | 901 | 2015-01-07T23:11:57.000Z | 2022-03-18T13:56:12.000Z | src/encoded/tests/test_upgrade_quality_metric.py | procha2/encoded | e9f122362b71f3b8641023b8d2d5ad531d3484b7 | [
"MIT"
] | 65 | 2015-02-06T23:00:26.000Z | 2022-01-22T07:58:44.000Z | import pytest
@pytest.mark.parametrize('qc_type, old_version, new_version', [
('bismark_quality_metric', '8', '9'),
('chipseq_filter_quality_metric', '7', '8'),
('complexity_xcorr_quality_metric', '7', '8'),
('correlation_quality_metric', '7', '8'),
('cpg_correlation_quality_metric', '7', '8'),
('duplicates_quality_metric', '6', '7'),
('edwbamstats_quality_metric', '7', '8'),
('filtering_quality_metric', '7', '8'),
('gencode_category_quality_metric', '1', '2'),
('generic_quality_metric', '7', '8'),
('histone_chipseq_quality_metric', '1', '2'),
('hotspot_quality_metric', '7', '8'),
('idr_quality_metric', '6', '7'),
('idr_summary_quality_metric', '7', '8'),
('long_read_rna_mapping_quality_metric', '1', '2'),
('long_read_rna_quantification_quality_metric', '1', '2'),
('mad_quality_metric', '6', '7'),
('micro_rna_mapping_quality_metric', '1', '2'),
('micro_rna_quantification_quality_metric', '1', '2'),
('samtools_flagstats_quality_metric', '7', '8'),
('samtools_stats_quality_metric', '7', '8'),
('star_quality_metric', '7', '8'),
('trimming_quality_metric', '7', '8')
])
def test_upgrade_snATAC_name(upgrader, quality_metric_1, qc_type, old_version, new_version):
quality_metric_1['schema_version'] = old_version
quality_metric_1['assay_term_name'] = 'single-nuclei ATAC-seq'
value = upgrader.upgrade(qc_type, quality_metric_1, current_version=old_version, target_version=new_version)
assert value['assay_term_name'] == 'single-nucleus ATAC-seq'
assert value['schema_version'] == new_version
value['assay_term_name'] = 'HiC'
value = upgrader.upgrade(qc_type, quality_metric_1, current_version=old_version, target_version=new_version)
assert value['assay_term_name'] != 'single-nucleus ATAC-seq'
| 48.210526 | 112 | 0.683406 |
c47e9ab7c936740646807a15278ed58692605039 | 20,161 | py | Python | grasp/mt/minrisk.py | wilkeraziz/grasp | 95f5135fd3711eed32cddce2049dd595314fb1f4 | [
"Apache-2.0"
] | 9 | 2015-07-22T18:07:44.000Z | 2021-11-08T11:21:11.000Z | grasp/mt/minrisk.py | wilkeraziz/grasp | 95f5135fd3711eed32cddce2049dd595314fb1f4 | [
"Apache-2.0"
] | null | null | null | grasp/mt/minrisk.py | wilkeraziz/grasp | 95f5135fd3711eed32cddce2049dd595314fb1f4 | [
"Apache-2.0"
] | 1 | 2021-01-12T10:00:22.000Z | 2021-01-12T10:00:22.000Z | """
:Authors: - Wilker Aziz
"""
import logging
import argparse
import sys
"""
:Authors: - Wilker Aziz
"""
from os.path import splitext
import subprocess as sp
import shlex
import argparse
import logging
import sys
import itertools
import os
import numpy as np
import traceback
from multiprocessing import Pool
from functools import partial
from collections import deque
from grasp.loss.fast_bleu import DecodingBLEU
from grasp.loss.fast_bleu import doc_bleu, stream_doc_bleu
import grasp.ptypes as ptypes
from grasp.recipes import smart_ropen, smart_wopen, make_unique_directory, pickle_it, unpickle_it, traceit
from grasp.scoring.scorer import TableLookupScorer, StatelessScorer, StatefulScorer
from grasp.scoring.util import make_models
from grasp.scoring.util import read_weights
from grasp.mt.cdec_format import load_grammar
from grasp.mt.util import GoalRuleMaker
from grasp.mt.util import save_forest, save_ffs, load_ffs, make_dead_srule, make_batches, number_of_batches
from grasp.mt.segment import SegmentMetaData
from grasp.mt.input import make_pass_grammar
import grasp.semiring as semiring
from grasp.semiring.operator import FixedLHS, FixedRHS
from grasp.formal.scfgop import output_projection
from grasp.formal.fsa import make_dfa, make_dfa_set, make_dfa_set2
from grasp.formal.scfgop import make_hypergraph_from_input_view, output_projection
from grasp.formal.scfgop import lookup_components, stateless_components
from grasp.formal.topsort import AcyclicTopSortTable
from grasp.formal.traversal import bracketed_string, yield_string
from grasp.formal.wfunc import TableLookupFunction, ConstantFunction, derivation_weight
from grasp.cfg.model import DummyConstant
from grasp.cfg.symbol import Nonterminal
from grasp.cfg.symbol import Terminal
from grasp.cfg.srule import OutputView
from grasp.alg.deduction import NederhofParser, EarleyParser, EarleyRescorer
from grasp.alg.inference import viterbi_derivation, AncestralSampler
from grasp.alg.value import acyclic_value_recursion, acyclic_reversed_value_recursion, compute_edge_expectation
from grasp.alg.rescoring import weight_edges
from grasp.alg.rescoring import SlicedRescoring
from grasp.alg.rescoring import stateless_rescoring
from grasp.alg.chain import apply_filters, group_by_identity, group_by_projection
from grasp.alg.expectation import expected_components
from grasp.scoring.frepr import FComponents
from grasp.io.results import save_mcmc_yields, save_mcmc_derivations, save_markov_chain
from random import shuffle
from numpy import linalg as LA
from scipy.optimize import minimize
from time import time, strftime
from types import SimpleNamespace
import grasp.mt.pipeline2 as pipeline
def npvec2str(nparray, fnames=None, separator=' '):
"""converts an array of feature values into a string (fnames can be provided)"""
if fnames is None:
return separator.join(repr(fvalue) for fvalue in nparray)
else:
return separator.join('{0}={1}'.format(fname, repr(fvalue)) for fname, fvalue in zip(fnames, nparray))
def cmd_optimisation(parser):
# Optimisation
parser.add_argument("--maxiter", '-M', type=int, default=10,
help="Maximum number of iterations")
parser.add_argument('--mode', type=str, default='10',
help="use 'all' for all data, use 'online' for online updates, "
"use 0-100 to specify batch size in percentage")
parser.add_argument('--shuffle',
action='store_true',
help='shuffle training instances')
parser.add_argument('--temperature', type=float, default=1.0,
help='scales the initial model')
parser.add_argument('--proxy-init', type=str, default='uniform',
help="use 'uniform' for uniform weights, 'random' for random weights, or choose a default weight")
parser.add_argument('--target-init', type=str, default='uniform',
help="use 'uniform' for uniform weights, 'random' for random weights, or choose a default weight")
parser.add_argument("--resume", type=int, default=0,
help="Resume from a certain iteration (requires the config file of the preceding run)")
parser.add_argument('--merge', type=int, default=0,
help="how many iterations should we consider in estimating Z(x) (use 0 or less for all)")
parser.add_argument("--sgd", type=int, nargs=2, default=[10, 10],
help="Number of iterations and function evaluations for target optimisation")
parser.add_argument("--tol", type=float, nargs=2, default=[1e-9, 1e-9],
help="f-tol and g-tol in target optimisation")
parser.add_argument("--L2", type=float, default=0.0,
help="Weight of L2 regulariser in target optimisation")
def cmd_logging(parser):
parser.add_argument('--save-d',
action='store_true', default=0,
help='store sampled derivations (after MCMC filters apply)')
parser.add_argument('--save-y',
action='store_true', default=0,
help='store sampled translations (after MCMC filters apply)')
parser.add_argument('--verbose', '-v',
action='count', default=0,
help='increase the verbosity level')
def cmd_loss(group):
group.add_argument('--bleu-order',
type=int, default=4,
metavar='N',
help="longest n-gram feature for sentence-level IBM-BLEU")
group.add_argument('--bleu-smoothing',
type=float, default=1.0,
metavar='F',
help="add-p smoothing for sentence-level IBM-BLEU")
def cmd_parser(group):
group.add_argument('--goal',
type=str, default='GOAL', metavar='LABEL',
help='default goal symbol (root after parsing/intersection)')
group.add_argument('--framework',
type=str, default='exact', choices=['exact', 'slice'],
metavar='FRAMEWORK',
help="inference framework: 'exact', 'slice' sampling")
def cmd_grammar(group):
group.add_argument('--start', '-S',
type=str, default='S',
metavar='LABEL',
help='default start symbol')
group.add_argument("--dev-grammars", type=str,
help="grammars for the dev set")
group.add_argument("--devtest-grammars", type=str,
help="grammars for the devtest set")
group.add_argument('--extra-grammar',
action='append', default=[], metavar='PATH',
help="path to an additional grammar (multiple allowed)")
group.add_argument('--glue-grammar',
action='append', default=[], metavar='PATH',
help="glue rules are only applied to initial states (multiple allowed)")
group.add_argument('--pass-through',
action='store_true',
help="add pass-through rules for every input word (and an indicator feature for unknown words)")
group.add_argument('--default-symbol', '-X',
type=str, default='X', metavar='LABEL',
help='default nonterminal (used for pass-through rules and automatic glue rules)')
def cmd_sampler(group):
group.add_argument('--samples',
type=int, default=100,
metavar='N',
help="number of samples from proxy")
def get_argparser():
parser = argparse.ArgumentParser(description='Training by MLE',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
#parser.add_argument('config', type=str, help="configuration file")
parser.add_argument("workspace",
type=str, default=None,
help="where samples can be found and where decisions are placed")
parser.add_argument("proxy", type=str,
help="proxy model description")
parser.add_argument("target", type=str,
help="target model description")
parser.add_argument("dev", type=str,
help="development set")
parser.add_argument('--experiment',
type=str,
help='folder within the workspace where results are stored'
'by default we use a timestamp and a random suffix')
parser.add_argument("--proxy-weights", '-Q', type=str,
help="proxy weights")
parser.add_argument("--target-weights", '-P', type=str,
help="target weights")
parser.add_argument("--proxy-temperature", '-Tq', type=float, default=1.0,
help="scales the model (the bigger the more uniform)")
parser.add_argument("--target-temperature", '-Tp', type=float, default=1.0,
help="scales the model (the bigger the more uniform)")
parser.add_argument("--jobs", type=int, default=2, help="number of processes")
parser.add_argument('--dev-alias', type=str, default='dev',
help='Change the alias of the dev set')
parser.add_argument("--devtest", type=str,
help="devtest set")
parser.add_argument('--devtest-alias', type=str, default='devtest',
help='Change the alias of the devtest set')
parser.add_argument('--redo', action='store_true',
help='overwrite already computed files (by default we do not repeat computation)')
cmd_parser(parser.add_argument_group('Parser'))
cmd_grammar(parser.add_argument_group('Grammar'))
cmd_optimisation(parser.add_argument_group('Parameter optimisation by SGD'))
cmd_loss(parser.add_argument_group('Loss'))
cmd_sampler(parser.add_argument_group('Importance sampler'))
cmd_logging(parser.add_argument_group('Logging'))
# General
return parser
def make_dirs(args, exist_ok=True):
"""
Make output directories and saves the command line arguments for documentation purpose.
:param args: command line arguments
:return: main output directory within workspace (prefix is a timestamp and suffix is a unique random string)
"""
# create the workspace if missing
logging.info('Workspace: %s', args.workspace)
if not os.path.exists(args.workspace):
os.makedirs(args.workspace, exist_ok=exist_ok)
# create a unique experiment area or reuse a given one
if not args.experiment:
outdir = make_unique_directory(args.workspace)
else:
outdir = '{0}/{1}'.format(args.workspace, args.experiment)
os.makedirs(outdir, exist_ok=exist_ok)
logging.info('Writing files to: %s', outdir)
devdir = '{0}/{1}'.format(outdir, args.dev_alias)
os.makedirs(devdir, exist_ok=exist_ok)
if args.devtest:
devtestdir = '{0}/{1}'.format(outdir, args.devtest_alias)
os.makedirs(devtestdir, exist_ok=exist_ok)
dynamicdir = '{0}/iterations'.format(outdir)
os.makedirs(dynamicdir, exist_ok=exist_ok)
return outdir, devdir
@traceit
def pass0_to_pass2(seg, options, workingdir, model, redo, log):
saving = {'pass2.forest': '{0}/{1}.q-forest'.format(workingdir, seg.id),
'pass2.components': '{0}/{1}.q-components'.format(workingdir, seg.id)}
if pipeline.all_steps_complete(saving, redo):
return True
forest, components = pipeline.pass0_to_pass2(seg,
options,
model.lookup,
model.stateless,
model.stateful,
saving=saving, redo=redo, log=log)
return forest.n_nodes() > 0
def make_pass0_to_pass2_options(args):
options = SimpleNamespace()
options.extra_grammars = args.extra_grammar
options.glue_grammars = args.glue_grammar
options.pass_through = args.pass_through
options.default_symbol = args.default_symbol
options.goal = args.goal
options.start = args.start
return options
def parse_training(args, staticdir, model, segments):
logging.info('Parsing %d training instances using %d workers', len(segments), args.jobs)
with Pool(args.jobs) as workers:
feedback = workers.map(partial(pass0_to_pass2,
options=make_pass0_to_pass2_options(args),
workingdir=staticdir,
model=model,
redo=args.redo,
log=logging.info),
segments)
return tuple([seg for seg, status in zip(segments, feedback) if status])
def make_impsamp_options(args):
options = make_pass0_to_pass2_options(args)
options.samples = args.samples
options.bleu_order = args.bleu_order
options.bleu_smoothing = args.bleu_smoothing
return options
@traceit
def importance_sample(seg, options, staticdir, workingdir, proxy, target, redo, log):
saving = {'is.samples': '{0}/samples/{1}.is'.format(workingdir, seg.id),
'pass2.forest': '{0}/{1}.q-forest'.format(staticdir, seg.id),
'pass2.components': '{0}/{1}.q-components'.format(staticdir, seg.id)}
# TODO:
# 1. normalise q(d) \propto g(d) exactly?
# 2. use sample frequency for q(d)?
# 3. use unnormalised g(d)
samples = pipeline.importance_sample(seg,
options,
proxy,
target,
saving=saving, redo=redo, log=log)
# support
Y = [None] * len(samples)
# posterior
Q = np.zeros(len(samples), dtype=ptypes.weight)
P = np.zeros(len(samples), dtype=ptypes.weight)
# compute posterior
for i, sample in enumerate(samples):
Y[i] = sample.y.split()
D = sample.D
qy = 0.0
py = 0.0
#py = semiring.inside.zero
for d in D:
f = target.score(d.p_comps)
g = proxy.score(d.q_comps) # TODO: consider normalising g exactly
w = semiring.inside.divide(f, g)
qy += float(d.count) / len(samples)
py += d.count * semiring.inside.as_real(w)
#py = semiring.inside.plus(semiring.inside.times(semiring.inside.from_real(d.count), w), py)
#P[i] = semiring.inside.as_real(py)
Q[i] = qy
P[i] = py
P /= P.sum()
# compute consensus loss
bleu = DecodingBLEU(Y, P, max_order=options.bleu_order, smoothing=options.bleu_smoothing)
L = [bleu.loss(y) for y in Y]
ranking = sorted(range(len(Y)), key=lambda i: (L[i], -P[i]))
with smart_wopen('{0}/samples/{1}.ranking.gz'.format(workingdir, seg.id)) as fo:
print('# L ||| p(y) ||| q(y) ||| y', file=fo)
for i in ranking:
print('{0} ||| {1} ||| {2} ||| {3}'.format(L[i], P[i], Q[i], samples[i].y), file=fo)
return samples[i].y, P[i], L[i]
def sample_and_decode(args, staticdir, workingdir, proxy, target, segments):
logging.info('Decoding %d segments using %d workers', len(segments), args.jobs)
os.makedirs('{0}/samples'.format(workingdir), exist_ok=True)
with Pool(args.jobs) as workers:
decisions = workers.map(partial(importance_sample,
options=make_impsamp_options(args),
staticdir=staticdir,
workingdir=workingdir,
proxy=proxy,
target=target,
redo=args.redo,
log=logging.info),
segments)
return decisions
def mteval(args, workspace, iteration, proxy, target, segments, alias):
decisions = sample_and_decode(args,
'{0}/{1}'.format(workspace, alias),
'{0}/iterations/{1}/{2}'.format(workspace, iteration, alias),
proxy, target, segments)
evaldir = '{0}/iterations/{1}/{2}'.format(workspace, iteration, alias)
os.makedirs(evaldir, exist_ok=True)
with smart_wopen('{0}/hyps'.format(evaldir)) as fo:
for y, p, l in decisions:
print(y, file=fo)
bleu, pn, bp = stream_doc_bleu(smart_ropen('{0}/hyps'.format(evaldir)),
smart_ropen('{0}/{1}/refs'.format(workspace, alias)),
max_order=args.bleu_order,
smoothing=args.bleu_smoothing)
logging.info('BLEU %s: %.4f', alias, bleu)
return bleu
def sanity_checks(args):
failed = False
if not os.path.exists(args.dev):
logging.error('Training set not found: %s', args.dev)
failed = True
if args.devtest and not os.path.exists(args.devtest):
logging.error('Validation set not found: %s', args.devtest)
failed = True
if not os.path.exists(args.proxy):
logging.error('Proxy model description not found: %s', args.proxy)
failed = True
if not os.path.exists(args.target):
logging.error('Target model description not found: %s', args.target)
failed = True
if args.proxy_weights and not os.path.exists(args.proxy_weights):
logging.error('Proxy model weights not found: %s', args.proxy_weights)
failed = True
if args.target_weights and not os.path.exists(args.target_weights):
logging.error('Target model weights not found: %s', args.target_weights)
failed = True
return not failed
def core(args):
workspace, devdir = make_dirs(args)
if not sanity_checks(args):
raise FileNotFoundError('One or more files could not be found')
proxy = pipeline.load_model(args.proxy, args.proxy_weights, args.proxy_init, args.proxy_temperature)
logging.info('Proxy:\n%s', proxy)
target = pipeline.load_model(args.target, args.target_weights, args.target_init, args.target_temperature)
logging.info('Target:\n%s', target)
# 2. Parse data
dev = pipeline.read_segments_from_file(args.dev, args.dev_grammars)
dev = parse_training(args, devdir, proxy, dev)
logging.info(' %d training instances', len(dev))
# store references for evaluation purposes
pipeline.save_references('{0}/{1}/refs'.format(workspace, args.dev_alias), dev)
# Validation set
if args.devtest is None:
args.devtest = args.dev
args.devtest_alias = args.dev_alias
args.devtest_grammars = args.dev_grammars
devtest = dev
else:
devtest = pipeline.read_segments_from_file(args.devtest, args.devtest_grammars)
devtest = parse_training(args, '{0}/{1}'.format(workspace, args.devtest_alias), proxy, devtest)
logging.info(' %d validation instances', len(devtest))
pipeline.save_references('{0}/{1}/refs'.format(workspace, args.devtest_alias), devtest)
# evaluate the initial model
mteval(args, workspace, 0, proxy, target, devtest, args.devtest_alias)
##print('{0} ||| init ||| {1}={2} ||| {3}'.format(0, args.devtest_alias, bleu, npvec2str(model.weights().densify(), fnames)))
# 3. Optimise
#dimensionality = len(fnames)
def main():
args = get_argparser().parse_args()
if args.verbose == 1:
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s', datefmt='%H:%M:%S')
elif args.verbose > 1:
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s', datefmt='%H:%M:%S')
core(args)
if __name__ == '__main__':
main()
| 42.08977 | 129 | 0.621497 |
ee61c8480304d8b0a9cb464b470ae06fc2b03cd3 | 1,947 | py | Python | vendor/google_riscv-dv/pygen/pygen_src/test/riscv_instr_base_test.py | PaulOKCrevinn/ibex | 00240499bb865e0453ed52fa99f064462c7b053c | [
"Apache-2.0"
] | null | null | null | vendor/google_riscv-dv/pygen/pygen_src/test/riscv_instr_base_test.py | PaulOKCrevinn/ibex | 00240499bb865e0453ed52fa99f064462c7b053c | [
"Apache-2.0"
] | null | null | null | vendor/google_riscv-dv/pygen/pygen_src/test/riscv_instr_base_test.py | PaulOKCrevinn/ibex | 00240499bb865e0453ed52fa99f064462c7b053c | [
"Apache-2.0"
] | null | null | null | """
Copyright 2020 Google LLC
Copyright 2020 PerfectVIPs Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
import sys
sys.path.append("pygen/")
from pygen_src.riscv_instr_pkg import *
from pygen_src.riscv_instr_gen_config import cfg # NOQA
if cfg.argv.target == "rv32i":
from pygen_src.isa.rv32i_instr import * # NOQA
if cfg.argv.target == "rv32imc":
from pygen_src.isa.rv32i_instr import * # NOQA
from pygen_src.isa.rv32m_instr import * # NOQA
from pygen_src.isa.rv32c_instr import * # NOQA
from pygen_src.isa.riscv_instr import riscv_instr # NOQA
from pygen_src.riscv_asm_program_gen import riscv_asm_program_gen # NOQA
from pygen_src.riscv_utils import gen_config_table
class riscv_instr_base_test:
def __init__(self):
self.start_idx = cfg.argv.start_idx
self.asm_file_name = cfg.argv.asm_file_name
def run_phase(self):
for _ in range(cfg.num_of_tests):
cfg.randomize()
gen_config_table()
asm = riscv_asm_program_gen()
riscv_instr.create_instr_list(cfg)
if cfg.asm_test_suffix != "":
self.asm_file_name = "{}.{}".format(self.asm_file_name,
cfg.asm_test_suffix)
test_name = "{}_{}.S".format(self.asm_file_name,
_ + self.start_idx)
asm.get_directed_instr_stream()
asm.gen_program()
asm.gen_test_file(test_name)
riscv_instr_base_test = riscv_instr_base_test()
riscv_instr_base_test.run_phase()
| 37.442308 | 73 | 0.686184 |
117c236ec4ba2c6ecdc73b8299a72595a3d2def7 | 490 | py | Python | BatchTestScripts/Low usage/MUXsandbox.py | jakehyvonen/BTSPython | 9580a04622226a30fea4d5cbd036c7f88a9b732d | [
"MIT"
] | null | null | null | BatchTestScripts/Low usage/MUXsandbox.py | jakehyvonen/BTSPython | 9580a04622226a30fea4d5cbd036c7f88a9b732d | [
"MIT"
] | null | null | null | BatchTestScripts/Low usage/MUXsandbox.py | jakehyvonen/BTSPython | 9580a04622226a30fea4d5cbd036c7f88a9b732d | [
"MIT"
] | null | null | null | from gpiozero import LED
from time import sleep
#low level IC control pins
led1 = LED(13) #A0 pin
led2 = LED(6) #A1 pin
led3 = LED(5) #A2 pin
led4 = LED(27) #led4+5 = device rest plate
led5 = LED(22)
def SwitchMUXtoA():
led1.off()
led2.off()
led3.off()
def SwitchMUXtoB():
led1.on()
led2.off()
led3.off()
def SwitchMUXtoC():
led1.off()
led2.on()
led3.off()
def SwitchMUXtoD():
led1.on()
led2.on()
led3.off()
SwitchMUXtoD() | 14.848485 | 42 | 0.593878 |
7ba625b4fe2f608acb4d92ea76533c6d856a3c03 | 10,135 | py | Python | tests/platform_tests/test_reboot.py | shubav/sonic-mgmt | 0ff71b907a55489bb4ed7d17b1682380fd459bf2 | [
"Apache-2.0"
] | null | null | null | tests/platform_tests/test_reboot.py | shubav/sonic-mgmt | 0ff71b907a55489bb4ed7d17b1682380fd459bf2 | [
"Apache-2.0"
] | null | null | null | tests/platform_tests/test_reboot.py | shubav/sonic-mgmt | 0ff71b907a55489bb4ed7d17b1682380fd459bf2 | [
"Apache-2.0"
] | null | null | null | """
Check platform status after reboot. Three types of reboot are covered in this script:
* Cold reboot
* Fast reboot
* Warm reboot
This script is to cover the test case 'Reload configuration' in the SONiC platform test plan:
https://github.com/Azure/SONiC/blob/master/doc/pmon/sonic_platform_test_plan.md
"""
import logging
import re
import time
from datetime import datetime
import pytest
from tests.common.fixtures.conn_graph_facts import conn_graph_facts
from tests.common.utilities import wait_until
from tests.common.reboot import *
from tests.common.platform.transceiver_utils import check_transceiver_basic
from tests.common.platform.interface_utils import check_all_interface_information, get_port_map
from tests.common.platform.daemon_utils import check_pmon_daemon_status
from tests.common.platform.processes_utils import wait_critical_processes, check_critical_processes
pytestmark = [
pytest.mark.disable_loganalyzer,
pytest.mark.topology('any')
]
MAX_WAIT_TIME_FOR_INTERFACES = 300
MAX_WAIT_TIME_FOR_REBOOT_CAUSE = 120
@pytest.fixture(scope="module", autouse=True)
def teardown_module(duthosts, rand_one_dut_hostname, conn_graph_facts, xcvr_skip_list):
duthost = duthosts[rand_one_dut_hostname]
yield
logging.info("Tearing down: to make sure all the critical services, interfaces and transceivers are good")
interfaces = conn_graph_facts["device_conn"][duthost.hostname]
check_critical_processes(duthost, watch_secs=10)
check_interfaces_and_services(duthost, interfaces, xcvr_skip_list)
def reboot_and_check(localhost, dut, interfaces, xcvr_skip_list, reboot_type=REBOOT_TYPE_COLD, reboot_helper=None, reboot_kwargs=None):
"""
Perform the specified type of reboot and check platform status.
@param localhost: The Localhost object.
@param dut: The AnsibleHost object of DUT.
@param interfaces: DUT's interfaces defined by minigraph
@param xcvr_skip_list: list of DUT's interfaces for which transeiver checks are skipped
@param reboot_type: The reboot type, pre-defined const that has name convention of REBOOT_TYPE_XXX.
@param reboot_helper: The helper function used only by power off reboot
@param reboot_kwargs: The argument used by reboot_helper
"""
logging.info("Run %s reboot on DUT" % reboot_type)
reboot(dut, localhost, reboot_type=reboot_type, reboot_helper=reboot_helper, reboot_kwargs=reboot_kwargs)
check_interfaces_and_services(dut, interfaces, xcvr_skip_list, reboot_type)
def check_interfaces_and_services(dut, interfaces, xcvr_skip_list, reboot_type = None):
"""
Perform a further check after reboot-cause, including transceiver status, interface status
@param localhost: The Localhost object.
@param dut: The AnsibleHost object of DUT.
@param interfaces: DUT's interfaces defined by minigraph
"""
logging.info("Wait until all critical services are fully started")
wait_critical_processes(dut)
if reboot_type is not None:
logging.info("Check reboot cause")
assert wait_until(MAX_WAIT_TIME_FOR_REBOOT_CAUSE, 20, check_reboot_cause, dut, reboot_type), \
"got reboot-cause failed after rebooted by %s" % reboot_type
if reboot_ctrl_dict[reboot_type]["test_reboot_cause_only"]:
logging.info("Further checking skipped for %s test which intends to verify reboot-cause only" % reboot_type)
return
logging.info("Wait %d seconds for all the transceivers to be detected" % MAX_WAIT_TIME_FOR_INTERFACES)
assert wait_until(MAX_WAIT_TIME_FOR_INTERFACES, 20, check_all_interface_information, dut, interfaces, xcvr_skip_list), \
"Not all transceivers are detected or interfaces are up in %d seconds" % MAX_WAIT_TIME_FOR_INTERFACES
logging.info("Check transceiver status")
for asic_index in dut.get_frontend_asic_ids():
# Get the interfaces pertaining to that asic
interface_list = get_port_map(dut, asic_index)
interfaces_per_asic = {k:v for k, v in interface_list.items() if k in interfaces}
check_transceiver_basic(dut, asic_index, interfaces_per_asic, xcvr_skip_list)
logging.info("Check pmon daemon status")
assert check_pmon_daemon_status(dut), "Not all pmon daemons running."
if dut.facts["asic_type"] in ["mellanox"]:
from .mellanox.check_hw_mgmt_service import check_hw_management_service
from .mellanox.check_sysfs import check_sysfs
logging.info("Check the hw-management service")
check_hw_management_service(dut)
logging.info("Check sysfs")
check_sysfs(dut)
def test_cold_reboot(duthosts, rand_one_dut_hostname, localhost, conn_graph_facts, xcvr_skip_list):
"""
@summary: This test case is to perform cold reboot and check platform status
"""
duthost = duthosts[rand_one_dut_hostname]
reboot_and_check(localhost, duthost, conn_graph_facts["device_conn"][duthost.hostname], xcvr_skip_list, reboot_type=REBOOT_TYPE_COLD)
def test_fast_reboot(duthosts, rand_one_dut_hostname, localhost, conn_graph_facts, xcvr_skip_list):
"""
@summary: This test case is to perform cold reboot and check platform status
"""
duthost = duthosts[rand_one_dut_hostname]
if duthost.is_multi_asic:
pytest.skip("Multi-ASIC devices not supporting fast reboot")
reboot_and_check(localhost, duthost, conn_graph_facts["device_conn"][duthost.hostname], xcvr_skip_list, reboot_type=REBOOT_TYPE_FAST)
def test_warm_reboot(duthosts, rand_one_dut_hostname, localhost, conn_graph_facts, xcvr_skip_list):
"""
@summary: This test case is to perform cold reboot and check platform status
"""
duthost = duthosts[rand_one_dut_hostname]
if duthost.is_multi_asic:
pytest.skip("Multi-ASIC devices not supporting warm reboot")
asic_type = duthost.facts["asic_type"]
if asic_type in ["mellanox"]:
issu_capability = duthost.command("show platform mlnx issu")["stdout"]
if "disabled" in issu_capability:
pytest.skip("ISSU is not supported on this DUT, skip this test case")
reboot_and_check(localhost, duthost, conn_graph_facts["device_conn"][duthost.hostname], xcvr_skip_list, reboot_type=REBOOT_TYPE_WARM)
def _power_off_reboot_helper(kwargs):
"""
@summary: used to parametrized test cases on power_off_delay
@param kwargs: the delay time between turning off and on the PSU
"""
pdu_ctrl = kwargs["pdu_ctrl"]
all_outlets = kwargs["all_outlets"]
power_on_seq = kwargs["power_on_seq"]
delay_time = kwargs["delay_time"]
for outlet in all_outlets:
logging.debug("turning off {}".format(outlet))
pdu_ctrl.turn_off_outlet(outlet)
time.sleep(delay_time)
logging.info("Power on {}".format(power_on_seq))
for outlet in power_on_seq:
logging.debug("turning on {}".format(outlet))
pdu_ctrl.turn_on_outlet(outlet)
def test_power_off_reboot(duthosts, rand_one_dut_hostname, localhost, conn_graph_facts, xcvr_skip_list, pdu_controller, power_off_delay):
"""
@summary: This test case is to perform reboot via powercycle and check platform status
@param duthost: Fixture for DUT AnsibleHost object
@param localhost: Fixture for interacting with localhost through ansible
@param conn_graph_facts: Fixture parse and return lab connection graph
@param xcvr_skip_list: list of DUT's interfaces for which transeiver checks are skipped
@param pdu_controller: The python object of psu controller
@param power_off_delay: Pytest parameter. The delay between turning off and on the PSU
"""
duthost = duthosts[rand_one_dut_hostname]
pdu_ctrl = pdu_controller
if pdu_ctrl is None:
pytest.skip("No PSU controller for %s, skip rest of the testing in this case" % duthost.hostname)
all_outlets = pdu_ctrl.get_outlet_status()
# Purpose of this list is to control sequence of turning on PSUs in power off testing.
# If there are 2 PSUs, then 3 scenarios would be covered:
# 1. Turn off all PSUs, turn on PSU1, then check.
# 2. Turn off all PSUs, turn on PSU2, then check.
# 3. Turn off all PSUs, turn on one of the PSU, then turn on the other PSU, then check.
power_on_seq_list = []
if all_outlets:
power_on_seq_list = [[item] for item in all_outlets]
power_on_seq_list.append(all_outlets)
logging.info("Got all power on sequences {}".format(power_on_seq_list))
poweroff_reboot_kwargs = {"dut": duthost}
for power_on_seq in power_on_seq_list:
poweroff_reboot_kwargs["pdu_ctrl"] = pdu_ctrl
poweroff_reboot_kwargs["all_outlets"] = all_outlets
poweroff_reboot_kwargs["power_on_seq"] = power_on_seq
poweroff_reboot_kwargs["delay_time"] = power_off_delay
reboot_and_check(localhost, duthost, conn_graph_facts["device_conn"][duthost.hostname], xcvr_skip_list, REBOOT_TYPE_POWEROFF,
_power_off_reboot_helper, poweroff_reboot_kwargs)
def test_watchdog_reboot(duthosts, rand_one_dut_hostname, localhost, conn_graph_facts, xcvr_skip_list):
"""
@summary: This test case is to perform reboot via watchdog and check platform status
"""
duthost = duthosts[rand_one_dut_hostname]
test_watchdog_supported = "python -c \"import sonic_platform.platform as P; P.Platform().get_chassis().get_watchdog(); exit()\""
watchdog_supported = duthost.command(test_watchdog_supported,module_ignore_errors=True)["stderr"]
if "" != watchdog_supported:
pytest.skip("Watchdog is not supported on this DUT, skip this test case")
reboot_and_check(localhost, duthost, conn_graph_facts["device_conn"][duthost.hostname], xcvr_skip_list, REBOOT_TYPE_WATCHDOG)
def test_continuous_reboot(duthosts, rand_one_dut_hostname, localhost, conn_graph_facts, xcvr_skip_list):
"""
@summary: This test case is to perform 3 cold reboot in a row
"""
duthost = duthosts[rand_one_dut_hostname]
for i in range(3):
reboot_and_check(localhost, duthost, conn_graph_facts["device_conn"][duthost.hostname], xcvr_skip_list, reboot_type=REBOOT_TYPE_COLD)
| 43.497854 | 141 | 0.753527 |
3199cef8b54ce2e3cdbc1e5b7eee77a4525ccdd9 | 3,695 | py | Python | main.py | ondrasouk/encoders-comparison-tool | 81d257cf42865a9d70b19e8ed8ff7deab76af9b5 | [
"MIT"
] | 1 | 2021-12-30T19:44:13.000Z | 2021-12-30T19:44:13.000Z | main.py | ondrasouk/encoders-comparison-tool | 81d257cf42865a9d70b19e8ed8ff7deab76af9b5 | [
"MIT"
] | null | null | null | main.py | ondrasouk/encoders-comparison-tool | 81d257cf42865a9d70b19e8ed8ff7deab76af9b5 | [
"MIT"
] | null | null | null | import os
import numpy as np
import pathlib
import encoders_comparison_tool as enc
def output_dir(path):
if not os.path.isdir(path):
p = pathlib.Path(path)
p.mkdir(parents=True, exist_ok=True)
# Options for transcoder
# One line is one option
# enc.sweep_param is class for defining variable in that position
options = np.array([["-c:v", "libx264"],
["-level", "4.1"],
["-preset", "veryfast"],
["-crf", enc.sweep_param("add", 10, 11, 1)],
["-y"],
["-an"],
["-sn"]], dtype=object)
options1 = np.array([["-c:v", "libsvtav1"],
["-level", "6.3"],
["-preset", enc.sweep_param("add", 9, 10, 1)]], dtype=object)
options2 = np.array([["-c:v", "libx264"],
["-level", "4.1"],
["-preset", enc.sweep_param("list", ["ultrafast", "slower"])],
["-b:v", enc.sweep_param("lin", 0.1, 4, 2, "", "M")],
["-an"],
["-y"],
["-sn"]], dtype=object)
options3 = np.array([["-q", enc.sweep_param("lin", 9, 63, 2)],
["--preset", "faster"],
["-qpa", "1"],
["-t", "12"]], dtype=object)
# Make transcode_set
# enc.Transcode_setting is class for making transcode options with what module
# to load and what binary it will use.
transcode_set = {
"options": enc.Transcode_setting("ffmpeg_transcode.py", "/usr/bin/ffmpeg", options),
"optionsd": enc.Transcode_setting("dummy_transcode.py", "/usr/bin/ffmpeg", options),
"options1": enc.Transcode_setting("ffmpeg_transcode.py", "/usr/bin/ffmpeg", options1, concurrent=-1),
"options2": enc.Transcode_setting("ffmpeg_transcode.py", "/usr/bin/ffmpeg", options2, concurrent=1, two_pass=True),
"options3": enc.Transcode_setting("vvenc_transcode.py", ("../vvenc/bin/release-static/vvencFFapp", "../vvdec/bin/release-static/vvdecapp"), options3, concurrent=1, two_pass=False),
}
# Dictionary for storing paths for binaries.
binaries = {
"ffprobe": "/usr/bin/ffprobe",
"ffmpeg": "/usr/bin/ffmpeg"
}
# Settings for Windows testing.
if os.name == "nt":
binaries["ffprobe"] = "ffmpeg-n4.4.1-2-gcc33e73618-win64-gpl-4.4/bin/ffprobe.exe"
binaries["ffmpeg"] = "ffmpeg-n4.4.1-2-gcc33e73618-win64-gpl-4.4/bin/ffmpeg.exe"
for settings in transcode_set:
settings.binary = "ffmpeg-n4.4.1-2-gcc33e73618-win64-gpl-4.4/bin/ffmpeg.exe"
# Input video files can be stored as strings in iterable object
inputfiles_list = ["Sintel.2010.720p_30s.mkv"]
# Output directory for encoded videosequences
outputpath = "out/test/"
output_dir(outputpath)
# Test configuration for errors before running encode
#print(enc.transcode_check(binaries, inputfiles_list, transcode_set[2]))
#print(enc.transcode_check(binaries, inputfiles_list, transcode_set[2], "slow"))
# Print used configuration.
print(transcode_set["options"]())
print("\nencoding:\n")
# Start the transcode.
#enc.transcode(binaries, inputfiles_list, transcode_set["options"], outputpath)
enc.transcode(binaries, inputfiles_list, transcode_set["optionsd"], outputpath, check_if_exists=True, append_useage_log=True)
#enc.transcode(binaries, inputfiles_list, transcode_set["options"], outputpath, only_decode=True, append_useage_log=True, decoder="h264")
#enc.transcode(binaries, inputfiles_list, transcode_set["options2"], outputpath)
#enc.transcode(binaries, inputfiles_list, transcode_set["options3"], outputpath)
#enc.transcode(binaries, inputfiles_list, transcode_set["options3"], outputpath, only_decode=True, append_useage_log=True)
| 46.1875 | 184 | 0.644114 |
96940759decf0b4c3e9744117b4f94e722ecba15 | 692 | py | Python | brigadier/accounts/migrations/0003_alter_myuser_username.py | avoevodin/brigadier | c3869e4b526e0eb043146c53b78fda525ecfe5c4 | [
"MIT"
] | 1 | 2021-03-30T05:06:39.000Z | 2021-03-30T05:06:39.000Z | brigadier/accounts/migrations/0003_alter_myuser_username.py | avoevodin/brigadier | c3869e4b526e0eb043146c53b78fda525ecfe5c4 | [
"MIT"
] | 25 | 2021-03-22T12:41:41.000Z | 2021-10-20T10:42:05.000Z | brigadier/accounts/migrations/0003_alter_myuser_username.py | avoevodin/brigadier | c3869e4b526e0eb043146c53b78fda525ecfe5c4 | [
"MIT"
] | null | null | null | # Generated by Django 3.2 on 2021-10-06 20:16
import django.contrib.auth.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_create_public_group'),
]
operations = [
migrations.AlterField(
model_name='myuser',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username'),
),
]
| 34.6 | 316 | 0.680636 |
49da4288fffe4ee0f5e8a05a1ed9e807ae9254fd | 1,755 | py | Python | qr_code/src/qr_code.py | soundarzozm/covid-automation-sop | e86b2b760c18aa1cc46486bfc1f2b387889b90e0 | [
"MIT"
] | null | null | null | qr_code/src/qr_code.py | soundarzozm/covid-automation-sop | e86b2b760c18aa1cc46486bfc1f2b387889b90e0 | [
"MIT"
] | null | null | null | qr_code/src/qr_code.py | soundarzozm/covid-automation-sop | e86b2b760c18aa1cc46486bfc1f2b387889b90e0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# license removed for brevity
import rospy
from cv2 import cv2
import time
import datetime
from std_msgs.msg import String
import csv
from datetime import datetime,date
def talker(data):
pub = rospy.Publisher('chatter', String, queue_size=10)
rospy.init_node('talker', anonymous=True)
rate = rospy.Rate(10) # 10hz
while not rospy.is_shutdown():
hello_str = "Entered Hostel %s" % data
rospy.loginfo(hello_str)
pub.publish(hello_str)
rate.sleep()
break
# initalize the cam
cap = cv2.VideoCapture(0)
# initialize the cv2 QRCode detector
detector = cv2.QRCodeDetector()
#csv_file = "data.csv"
#csv_columns = ['name','reg_no', 'College','Hostel Block', 'Date','Time_Punched']
while True:
_, img = cap.read()
# detect and decode
data, bbox, _ = detector.detectAndDecode(img)
# check if there is a QRCode in the image
if bbox is not None:
# display the image with lines
for i in range(len(bbox)):
# draw all lines
cv2.line(img, tuple(bbox[i][0]), tuple(bbox[(i+1) % len(bbox)][0]), color=(255, 0, 0), thickness=2)
if data:
print("[+] QR Code detected, data:", data)
data_dict= list(data)
now = datetime.now()
Time_Punched= now.strftime("%H:%M:%S")
#dict = {'name': data_dict[0], 'reg_number': data_dict[1], 'College': data_dict[2], 'Hostel Block': data_dict[3], 'Date': date, 'Time_Punched': Time_Punched }
talker(data)
time.sleep(2)
# display the result
cv2.imshow("img", img)
if cv2.waitKey(1) == ord("q"):
break
cap.release()
cv2.destroyAllWindows() | 31.909091 | 170 | 0.603419 |
527b94da6775c22a15f8d919d7973fd790c4aceb | 1,961 | py | Python | yate/tokens.py | mauro-balades/yate | af704006cfd78ec9b93afe632c8741e0582ee354 | [
"MIT"
] | 1 | 2022-02-23T14:48:39.000Z | 2022-02-23T14:48:39.000Z | yate/tokens.py | mauro-balades/yate | af704006cfd78ec9b93afe632c8741e0582ee354 | [
"MIT"
] | null | null | null | yate/tokens.py | mauro-balades/yate | af704006cfd78ec9b93afe632c8741e0582ee354 | [
"MIT"
] | null | null | null | """
| YATE engine
|-------------
| MIT License
|
| Copyright (c) 2022 Mauro Baladés
|
| Permission is hereby granted, free of charge, to any person obtaining a copy
| of this software and associated documentation files (the "Software"), to deal
| in the Software without restriction, including without limitation the rights
| to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
| copies of the Software, and to permit persons to whom the Software is
| furnished to do so, subject to the following conditions:
|
| The above copyright notice and this permission notice shall be included in all
| copies or substantial portions of the Software.
|
| THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
| IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
| FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
| AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
| LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
| OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
| SOFTWARE.
"""
import operator
import re
# We will encapsulate each fragment of text in a Fragment object.
# This object will determine the fragment type and prepare the
# fragment for consumption by the compile function.
VAR_FRAGMENT = 0
OPEN_BLOCK_FRAGMENT = 1
CLOSE_BLOCK_FRAGMENT = 2
TEXT_FRAGMENT = 3
# Variable tokens
VAR_TOKEN_START = "{{"
VAR_TOKEN_END = "}}"
# Code block tokens
BLOCK_TOKEN_START = "{%"
BLOCK_TOKEN_END = "%}"
# Token regex
TOK_REGEX = re.compile(
r"(%s.*?%s|%s.*?%s)"
% (VAR_TOKEN_START, VAR_TOKEN_END, BLOCK_TOKEN_START, BLOCK_TOKEN_END)
)
# White space as a compìled regex object
WHITESPACE = re.compile("\s+")
# Operators
operator_lookup_table = {
"<": operator.lt,
">": operator.gt,
"==": operator.eq,
"!=": operator.ne,
"<=": operator.le,
">=": operator.ge,
}
| 30.640625 | 80 | 0.72973 |
31109fc17c20343e9c487682e5f9dabb6b073d25 | 1,645 | py | Python | Chapter 03/code/run_grid_search.py | shivampotdar/Artificial-Intelligence-with-Python | 00221c3b1a6d8003765d1ca48b5c95f86da375d9 | [
"MIT"
] | 387 | 2017-02-11T18:28:50.000Z | 2022-03-27T01:16:05.000Z | Chapter 03/code/run_grid_search.py | shivampotdar/Artificial-Intelligence-with-Python | 00221c3b1a6d8003765d1ca48b5c95f86da375d9 | [
"MIT"
] | 18 | 2017-12-15T03:10:25.000Z | 2021-04-20T14:32:43.000Z | Chapter 03/code/run_grid_search.py | shivampotdar/Artificial-Intelligence-with-Python | 00221c3b1a6d8003765d1ca48b5c95f86da375d9 | [
"MIT"
] | 407 | 2017-01-23T15:18:33.000Z | 2022-03-16T05:39:02.000Z | import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report
from sklearn import cross_validation, grid_search
from sklearn.ensemble import ExtraTreesClassifier
from sklearn import cross_validation
from sklearn.metrics import classification_report
from utilities import visualize_classifier
# Load input data
input_file = 'data_random_forests.txt'
data = np.loadtxt(input_file, delimiter=',')
X, y = data[:, :-1], data[:, -1]
# Separate input data into three classes based on labels
class_0 = np.array(X[y==0])
class_1 = np.array(X[y==1])
class_2 = np.array(X[y==2])
# Split the data into training and testing datasets
X_train, X_test, y_train, y_test = cross_validation.train_test_split(
X, y, test_size=0.25, random_state=5)
# Define the parameter grid
parameter_grid = [ {'n_estimators': [100], 'max_depth': [2, 4, 7, 12, 16]},
{'max_depth': [4], 'n_estimators': [25, 50, 100, 250]}
]
metrics = ['precision_weighted', 'recall_weighted']
for metric in metrics:
print("\n##### Searching optimal parameters for", metric)
classifier = grid_search.GridSearchCV(
ExtraTreesClassifier(random_state=0),
parameter_grid, cv=5, scoring=metric)
classifier.fit(X_train, y_train)
print("\nGrid scores for the parameter grid:")
for params, avg_score, _ in classifier.grid_scores_:
print(params, '-->', round(avg_score, 3))
print("\nBest parameters:", classifier.best_params_)
y_pred = classifier.predict(X_test)
print("\nPerformance report:\n")
print(classification_report(y_test, y_pred))
| 32.9 | 75 | 0.707599 |
1f1c3bf17a1d64f57e1d6f129b943ee0269987b2 | 4,501 | py | Python | webdispatch/uritemplate.py | aodag/WebDispatch | 55f8658a2b4100498e098a80303a346c3940f1bc | [
"MIT"
] | 1 | 2016-09-03T00:32:47.000Z | 2016-09-03T00:32:47.000Z | webdispatch/uritemplate.py | aodag/WebDispatch | 55f8658a2b4100498e098a80303a346c3940f1bc | [
"MIT"
] | 15 | 2017-07-25T08:00:38.000Z | 2019-02-21T03:23:42.000Z | webdispatch/uritemplate.py | aodag/WebDispatch | 55f8658a2b4100498e098a80303a346c3940f1bc | [
"MIT"
] | 1 | 2018-03-05T17:37:18.000Z | 2018-03-05T17:37:18.000Z | """ uri template
parsing and generating url patterns
"""
from datetime import datetime
import re
import string
from typing import ( # noqa pylint: disable=unused-import
Any,
Dict,
Callable,
Tuple,
)
VARS_PT = re.compile(r"{(?P<varname>[a-zA-Z0-9_]+)"
r"(:(?P<converter>[a-zA-Z0-9_]+))?}",
re.X)
META_CHARS = (
"\\",
".",
"^",
"$",
"*",
"+",
"|",
"?",
"(",
")",
"[",
"]") # type: Tuple[str, ...]
DEFAULT_CONVERTERS = {
'int': int,
'date': lambda s: datetime.strptime(s, '%Y-%m-%d'),
'date_ym': lambda s: datetime.strptime(s, '%Y-%m'),
} # type: Dict[str, Callable]
def regex_replacer(matched) -> str:
""" replace url placeholder to regex pattern"""
values = matched.groupdict()
return "(?P<" + values['varname'] + r">[\w-]+)"
def template_replacer(matched) -> str:
""" replace url placeholder to template interpolation"""
values = matched.groupdict()
return "${" + values['varname'] + "}"
def pattern_to_regex(pattern: str) -> str:
""" convert url patten to regex """
if pattern and pattern[-1] == "*":
pattern = pattern[:-1]
end = ""
else:
end = "$"
for metac in META_CHARS:
pattern = pattern.replace(metac, "\\" + metac)
return "^" + VARS_PT.sub(regex_replacer, pattern) + end
def pattern_to_template(pattern: str) -> str:
""" convert url pattern to string template"""
return VARS_PT.sub(template_replacer, pattern)
def detect_converters(pattern: str,
converter_dict: Dict[str, Callable],
default: Callable = str):
""" detect pairs of varname and converter from pattern"""
converters = {}
for matched in VARS_PT.finditer(pattern):
matchdict = matched.groupdict()
varname = matchdict['varname']
converter = matchdict['converter']
converters[varname] = converter_dict.get(converter, default)
return converters
class URITemplateFormatException(Exception):
""" raised when uri template format error duaring"""
class MatchResult:
""" result of parsing url """
def __init__(self, matchdict: Dict[str, Any], matchlength: int) -> None:
self.name = None # type: str
self.matchdict = matchdict
self.matchlength = matchlength
def new_named_args(self, cur_named_args: Dict[str, Any]) -> Dict[str, Any]:
""" create new named args updating current name args"""
named_args = cur_named_args.copy()
named_args.update(self.matchdict)
return named_args
def split_path_info(self, path_info: str) -> Tuple[str, str]:
""" split path_info to new script_name and new path_info"""
return path_info[:self.matchlength], path_info[self.matchlength:]
class URITemplate(object):
""" parsing and generating url with patterned """
def __init__(self, tmpl_pattern: str,
converters=None) -> None:
if tmpl_pattern.endswith('*') and not tmpl_pattern.endswith('/*'):
raise URITemplateFormatException('wildcard must be after slash.')
self.pattern = tmpl_pattern
self.regex = re.compile(pattern_to_regex(tmpl_pattern))
self.template = string.Template(pattern_to_template(tmpl_pattern))
if converters is None:
converters = DEFAULT_CONVERTERS
self.converters = detect_converters(
tmpl_pattern, converters)
def match(self, path_info: str) -> MatchResult:
""" parse path_info and detect urlvars of url pattern """
matched = self.regex.match(path_info)
if matched is None:
return None
matchlength = len(matched.group(0))
matchdict = matched.groupdict()
try:
matchdict = self.convert_values(matchdict)
except ValueError:
return None
return MatchResult(matchdict,
matchlength)
def convert_values(self, matchdict: Dict[str, str]) -> Dict[str, Any]:
""" convert values of ``matchdict``
with converter this object has."""
converted = {}
for varname, value in matchdict.items():
converter = self.converters[varname]
converted[varname] = converter(value)
return converted
def substitute(self, values: Dict[str, Any]) -> str:
""" generate url with url template"""
return self.template.substitute(values)
| 30.208054 | 79 | 0.607865 |
e9991e38ecc98716d133689ce6fb4bb261f07b81 | 8,025 | py | Python | openpype/hosts/houdini/api/usd.py | Tilix4/OpenPype | 8909bd890170880aa7ec8b673abaa25a9bdf40f2 | [
"MIT"
] | 1 | 2022-02-08T15:40:41.000Z | 2022-02-08T15:40:41.000Z | openpype/hosts/houdini/api/usd.py | zafrs/OpenPype | 4b8e7e1ed002fc55b31307efdea70b0feaed474f | [
"MIT"
] | 2 | 2022-03-18T01:46:03.000Z | 2022-03-18T01:46:16.000Z | openpype/hosts/houdini/api/usd.py | zafrs/OpenPype | 4b8e7e1ed002fc55b31307efdea70b0feaed474f | [
"MIT"
] | null | null | null | """Houdini-specific USD Library functions."""
import contextlib
import logging
from Qt import QtWidgets, QtCore, QtGui
from openpype import style
from openpype.pipeline import legacy_io
from openpype.tools.utils.assets_widget import SingleSelectAssetsWidget
from pxr import Sdf
log = logging.getLogger(__name__)
class SelectAssetDialog(QtWidgets.QWidget):
"""Frameless assets dialog to select asset with double click.
Args:
parm: Parameter where selected asset name is set.
"""
def __init__(self, parm):
self.setWindowTitle("Pick Asset")
self.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.Popup)
assets_widget = SingleSelectAssetsWidget(legacy_io, parent=self)
layout = QtWidgets.QHBoxLayout(self)
layout.addWidget(assets_widget)
assets_widget.double_clicked.connect(self._set_parameter)
self._assets_widget = assets_widget
self._parm = parm
def _set_parameter(self):
name = self._assets_widget.get_selected_asset_name()
self._parm.set(name)
self.close()
def _on_show(self):
pos = QtGui.QCursor.pos()
# Select the current asset if there is any
select_id = None
name = self._parm.eval()
if name:
db_asset = legacy_io.find_one(
{"name": name, "type": "asset"},
{"_id": True}
)
if db_asset:
select_id = db_asset["_id"]
# Set stylesheet
self.setStyleSheet(style.load_stylesheet())
# Refresh assets (is threaded)
self._assets_widget.refresh()
# Select asset - must be done after refresh
if select_id is not None:
self._assets_widget.select_asset(select_id)
# Show cursor (top right of window) near cursor
self.resize(250, 400)
self.move(self.mapFromGlobal(pos) - QtCore.QPoint(self.width(), 0))
def showEvent(self, event):
super(SelectAssetDialog, self).showEvent(event)
self._on_show()
def pick_asset(node):
"""Show a user interface to select an Asset in the project
When double clicking an asset it will set the Asset value in the
'asset' parameter.
"""
parm = node.parm("asset_name")
if not parm:
log.error("Node has no 'asset' parameter: %s", node)
return
# Construct a frameless popup so it automatically
# closes when clicked outside of it.
global tool
tool = SelectAssetDialog(parm)
tool.show()
def add_usd_output_processor(ropnode, processor):
"""Add USD Output Processor to USD Rop node.
Args:
ropnode (hou.RopNode): The USD Rop node.
processor (str): The output processor name. This is the basename of
the python file that contains the Houdini USD Output Processor.
"""
import loputils
loputils.handleOutputProcessorAdd(
{
"node": ropnode,
"parm": ropnode.parm("outputprocessors"),
"script_value": processor,
}
)
def remove_usd_output_processor(ropnode, processor):
"""Removes USD Output Processor from USD Rop node.
Args:
ropnode (hou.RopNode): The USD Rop node.
processor (str): The output processor name. This is the basename of
the python file that contains the Houdini USD Output Processor.
"""
import loputils
parm = ropnode.parm(processor + "_remove")
if not parm:
raise RuntimeError(
"Output Processor %s does not "
"exist on %s" % (processor, ropnode.name())
)
loputils.handleOutputProcessorRemove({"node": ropnode, "parm": parm})
@contextlib.contextmanager
def outputprocessors(ropnode, processors=tuple(), disable_all_others=True):
"""Context manager to temporarily add Output Processors to USD ROP node.
Args:
ropnode (hou.RopNode): The USD Rop node.
processors (tuple or list): The processors to add.
disable_all_others (bool, Optional): Whether to disable all
output processors currently on the ROP node that are not in the
`processors` list passed to this function.
"""
# TODO: Add support for forcing the correct Order of the processors
original = []
prefix = "enableoutputprocessor_"
processor_parms = ropnode.globParms(prefix + "*")
for parm in processor_parms:
original.append((parm, parm.eval()))
if disable_all_others:
for parm in processor_parms:
parm.set(False)
added = []
for processor in processors:
parm = ropnode.parm(prefix + processor)
if parm:
# If processor already exists, just enable it
parm.set(True)
else:
# Else add the new processor
add_usd_output_processor(ropnode, processor)
added.append(processor)
try:
yield
finally:
# Remove newly added processors
for processor in added:
remove_usd_output_processor(ropnode, processor)
# Revert to original values
for parm, value in original:
if parm:
parm.set(value)
def get_usd_rop_loppath(node):
# Get sop path
node_type = node.type().name()
if node_type == "usd":
return node.parm("loppath").evalAsNode()
elif node_type in {"usd_rop", "usdrender_rop"}:
# Inside Solaris e.g. /stage (not in ROP context)
# When incoming connection is present it takes it directly
inputs = node.inputs()
if inputs:
return inputs[0]
else:
return node.parm("loppath").evalAsNode()
def get_layer_save_path(layer):
"""Get custom HoudiniLayerInfo->HoudiniSavePath from SdfLayer.
Args:
layer (pxr.Sdf.Layer): The Layer to retrieve the save pah data from.
Returns:
str or None: Path to save to when data exists.
"""
hou_layer_info = layer.rootPrims.get("HoudiniLayerInfo")
if not hou_layer_info:
return
save_path = hou_layer_info.customData.get("HoudiniSavePath", None)
if save_path:
# Unfortunately this doesn't actually resolve the full absolute path
return layer.ComputeAbsolutePath(save_path)
def get_referenced_layers(layer):
"""Return SdfLayers for all external references of the current layer
Args:
layer (pxr.Sdf.Layer): The Layer to retrieve the save pah data from.
Returns:
list: List of pxr.Sdf.Layer that are external references to this layer
"""
layers = []
for layer_id in layer.GetExternalReferences():
layer = Sdf.Layer.Find(layer_id)
if not layer:
# A file may not be in memory and is
# referenced from disk. As such it cannot
# be found. We will ignore those layers.
continue
layers.append(layer)
return layers
def iter_layer_recursive(layer):
"""Recursively iterate all 'external' referenced layers"""
layers = get_referenced_layers(layer)
traversed = set(layers) # Avoid recursion to itself (if even possible)
traverse = list(layers)
for layer in traverse:
# Include children layers (recursion)
children_layers = get_referenced_layers(layer)
children_layers = [x for x in children_layers if x not in traversed]
traverse.extend(children_layers)
traversed.update(children_layers)
yield layer
def get_configured_save_layers(usd_rop):
lop_node = get_usd_rop_loppath(usd_rop)
stage = lop_node.stage(apply_viewport_overrides=False)
if not stage:
raise RuntimeError(
"No valid USD stage for ROP node: " "%s" % usd_rop.path()
)
root_layer = stage.GetRootLayer()
save_layers = []
for layer in iter_layer_recursive(root_layer):
save_path = get_layer_save_path(layer)
if save_path is not None:
save_layers.append(layer)
return save_layers
| 28.35689 | 78 | 0.645981 |
1c902e9cdb1d0a416258f324cdb1773702b741a0 | 3,898 | py | Python | ampel/log/LogsDumper.py | mafn/Ampel-core | 744acbf36f0a2ceae7230ceab1350236c1501b57 | [
"BSD-3-Clause"
] | null | null | null | ampel/log/LogsDumper.py | mafn/Ampel-core | 744acbf36f0a2ceae7230ceab1350236c1501b57 | [
"BSD-3-Clause"
] | null | null | null | ampel/log/LogsDumper.py | mafn/Ampel-core | 744acbf36f0a2ceae7230ceab1350236c1501b57 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: Ampel-core/ampel/log/LogsDumper.py
# License: BSD-3-Clause
# Author: valery brinnel <firstname.lastname@gmail.com>
# Date: 17.03.2021
# Last Modified Date: 17.03.2021
# Last Modified By: valery brinnel <firstname.lastname@gmail.com>
import sys, json
from typing import IO
from collections.abc import Sequence
from ampel.abstract.AbsIdMapper import AbsIdMapper
from ampel.log.LogFlag import LogFlag
from ampel.log.AmpelLogger import AmpelLogger
from ampel.log.LightLogRecord import LightLogRecord
from ampel.base.AmpelFlexModel import AmpelFlexModel
from ampel.util.pretty import prettyjson
class LogsDumper(AmpelFlexModel):
""" later """
out: None | str = None
to_json: None | bool = False
to_pretty_json: None | bool = False
date_format: None | str = None
id_mapper: None | AbsIdMapper = None
datetime_key: str = '_id'
resolve_flag: bool = True
main_separator: str = ' '
extra_separator: str = ' '
def __init__(self, **kwargs) -> None:
self._flag_strings: dict = {}
super().__init__(**kwargs)
def process(self, log_entries: Sequence[dict]):
fd = open(self.out, "w") if self.out else sys.stdout
if self.to_json or self.to_pretty_json:
self.write_json(fd, log_entries)
else:
self.write_txt(fd, log_entries)
fd.flush()
if self.out:
fd.close()
def write_json(self, fd: IO, log_entries: Sequence[dict]) -> None:
func = json.dumps if self.to_json else prettyjson
fd.write("[\n")
buf = ""
overwrite_pkey = self.datetime_key != '_id'
for el in log_entries:
# ObjectId is not json serializable
# (and not interesting when another field contains a timestamp)
if overwrite_pkey:
el['_id'] = el[self.datetime_key]
del el[self.datetime_key]
if self.date_format:
el['_id'] = el['_id'].strftime(self.date_format)
if 's' in el and self.id_mapper:
el['s'] = self.id_mapper.to_ext_id(el['s'])
if isinstance(el['f'], LogFlag):
if el['f'] not in self._flag_strings:
self._flag_strings[el['f']] = str(el['f']).replace("LogFlag.", "")
el['f'] = self._flag_strings[el['f']]
fd.write(buf)
buf = func(el) + ",\n" # type: ignore[operator]
fd.write(buf[:-2] + "\n]\n")
def write_txt(self, fd: IO, log_entries: Sequence[dict]) -> None:
for el in log_entries:
out = el[self.datetime_key].strftime(self.date_format) if self.date_format else el[self.datetime_key]
if isinstance(el['f'], LogFlag):
if el['f'] not in self._flag_strings:
self._flag_strings[el['f']] = self.main_separator + str(el['f']).replace("LogFlag.", "")
out += self._flag_strings[el['f']]
else:
out += self.main_separator + str(el['f'])
suffix = [f"run={el['r']}"]
if 's' in el:
if self.id_mapper:
suffix.append(f"stock={self.id_mapper.to_ext_id(el['s'])}")
else:
suffix.append(f"stock={el['s']}")
if 'c' in el:
suffix.append(f"channel={el['c']}")
if 'a' in el:
suffix.append(f"alert={el['a']}")
if (e := el.get('e')):
suffix = [f'{k}={e[k]}' for k in e]
if 'n' in el:
suffix.append("new")
if suffix:
out += self.main_separator + f'[{self.extra_separator.join(suffix)}]'
if el.get('m'):
out += self.main_separator + el['m']
fd.write(out + "\n")
def log_entries(self, log_entries: Sequence[dict], logger: AmpelLogger) -> None:
"""
Unsure when this could ever be required but it's there just in case
"""
for el in log_entries:
record = LightLogRecord(name=0, levelno=el['f'], msg=el.get('m'))
record.extra = el['e'] if 'e' in el else {}
record.extra['run'] = el['r']
if 'c' in el:
record.channel = el['c']
if 's' in el:
record.stock = el['s']
if 'a' in el:
record.extra['alert'] = el['a']
if 'n' in el:
record.extra['new'] = True
logger.handle(record)
| 25.311688 | 104 | 0.639046 |
552bf211727a9856b6bc8e51afaf1133bef1f376 | 1,048 | py | Python | apps/dataporten/client.py | kharann/onlineweb4 | 1130128c6233b623780779a25934ea73ef62c264 | [
"MIT"
] | null | null | null | apps/dataporten/client.py | kharann/onlineweb4 | 1130128c6233b623780779a25934ea73ef62c264 | [
"MIT"
] | null | null | null | apps/dataporten/client.py | kharann/onlineweb4 | 1130128c6233b623780779a25934ea73ef62c264 | [
"MIT"
] | null | null | null | import logging
from oic.oic import Client, RegistrationResponse
from oic.utils.authn.client import CLIENT_AUTHN_METHOD
DATAPORTEN_PROVIDER_CONFIG = 'https://auth.dataporten.no/'
def client_setup(client_id, client_secret):
"""Sets up an OpenID Connect Relying Party ("client") for connecting to Dataporten"""
logger = logging.getLogger(__name__)
assert client_id, 'Missing client id when setting up Dataporten OpenID Connect Relying Party'
assert client_secret, 'Missing client secret when setting up Dataporten OpenID Connect Relying Party'
client = Client(client_authn_method=CLIENT_AUTHN_METHOD)
logger.debug('Automatically registering Dataporten OpenID Provider.', extra={'config': DATAPORTEN_PROVIDER_CONFIG})
client.provider_config(DATAPORTEN_PROVIDER_CONFIG)
client_args = {
'client_id': client_id,
'client_secret': client_secret,
}
client.store_registration_info(RegistrationResponse(**client_args))
logger.debug('Successfully registered the provider.')
return client
| 36.137931 | 119 | 0.771947 |
2a593d0ab22cc76523f960d3515f87c71c6cda48 | 27,787 | py | Python | astropy/io/fits/tests/test_connect.py | guntbert/astropy | f2d2add09e5b1638b2698f19a4d46fcca19e82be | [
"BSD-3-Clause"
] | null | null | null | astropy/io/fits/tests/test_connect.py | guntbert/astropy | f2d2add09e5b1638b2698f19a4d46fcca19e82be | [
"BSD-3-Clause"
] | 2 | 2016-11-30T20:31:42.000Z | 2016-12-02T23:50:10.000Z | astropy/io/fits/tests/test_connect.py | guntbert/astropy | f2d2add09e5b1638b2698f19a4d46fcca19e82be | [
"BSD-3-Clause"
] | null | null | null | import os
import gc
import pathlib
import warnings
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy.io.fits.column import (_parse_tdisp_format, _fortran_to_python_format,
python_to_tdisp)
from astropy.io.fits import HDUList, PrimaryHDU, BinTableHDU
from astropy.io import fits
from astropy import units as u
from astropy.table import Table, QTable, NdarrayMixin, Column
from astropy.table.table_helpers import simple_table
from astropy.tests.helper import catch_warnings
from astropy.units.format.fits import UnitScaleError
from astropy.utils.exceptions import AstropyUserWarning
from astropy.coordinates import SkyCoord, Latitude, Longitude, Angle, EarthLocation
from astropy.time import Time, TimeDelta
from astropy.units.quantity import QuantityInfo
try:
import yaml # pylint: disable=W0611 # noqa
HAS_YAML = True
except ImportError:
HAS_YAML = False
DATA = os.path.join(os.path.dirname(__file__), 'data')
def equal_data(a, b):
for name in a.dtype.names:
if not np.all(a[name] == b[name]):
return False
return True
class TestSingleTable:
def setup_class(self):
self.data = np.array(list(zip([1, 2, 3, 4],
['a', 'b', 'c', 'd'],
[2.3, 4.5, 6.7, 8.9])),
dtype=[('a', int), ('b', 'U1'), ('c', float)])
def test_simple(self, tmpdir):
filename = str(tmpdir.join('test_simple.fts'))
t1 = Table(self.data)
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert equal_data(t1, t2)
def test_simple_pathlib(self, tmpdir):
filename = pathlib.Path(str(tmpdir.join('test_simple.fit')))
t1 = Table(self.data)
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert equal_data(t1, t2)
def test_simple_meta(self, tmpdir):
filename = str(tmpdir.join('test_simple.fits'))
t1 = Table(self.data)
t1.meta['A'] = 1
t1.meta['B'] = 2.3
t1.meta['C'] = 'spam'
t1.meta['comments'] = ['this', 'is', 'a', 'long', 'comment']
t1.meta['HISTORY'] = ['first', 'second', 'third']
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert equal_data(t1, t2)
for key in t1.meta:
if isinstance(t1.meta, list):
for i in range(len(t1.meta[key])):
assert t1.meta[key][i] == t2.meta[key][i]
else:
assert t1.meta[key] == t2.meta[key]
def test_simple_meta_conflicting(self, tmpdir):
filename = str(tmpdir.join('test_simple.fits'))
t1 = Table(self.data)
t1.meta['ttype1'] = 'spam'
with catch_warnings() as l:
t1.write(filename, overwrite=True)
assert len(l) == 1
assert str(l[0].message).startswith(
'Meta-data keyword ttype1 will be ignored since it conflicts with a FITS reserved keyword')
def test_simple_noextension(self, tmpdir):
"""
Test that file type is recognized without extension
"""
filename = str(tmpdir.join('test_simple'))
t1 = Table(self.data)
t1.write(filename, overwrite=True, format='fits')
t2 = Table.read(filename)
assert equal_data(t1, t2)
@pytest.mark.parametrize('table_type', (Table, QTable))
def test_with_units(self, table_type, tmpdir):
filename = str(tmpdir.join('test_with_units.fits'))
t1 = table_type(self.data)
t1['a'].unit = u.m
t1['c'].unit = u.km / u.s
t1.write(filename, overwrite=True)
t2 = table_type.read(filename)
assert equal_data(t1, t2)
assert t2['a'].unit == u.m
assert t2['c'].unit == u.km / u.s
@pytest.mark.skipif('not HAS_YAML')
def test_with_custom_units_qtable(self, tmpdir):
# Test only for QTable - for Table's Column, new units are dropped
# (as is checked in test_write_drop_nonstandard_units).
filename = str(tmpdir.join('test_with_units.fits'))
unit = u.def_unit('bandpass_sol_lum')
t = QTable()
t['l'] = np.ones(5) * unit
with catch_warnings(AstropyUserWarning) as w:
t.write(filename, overwrite=True)
assert len(w) == 1
assert 'bandpass_sol_lum' in str(w[0].message)
# Just reading back, the data is fine but the unit is not recognized.
with catch_warnings() as w:
t2 = QTable.read(filename)
assert isinstance(t2['l'].unit, u.UnrecognizedUnit)
assert str(t2['l'].unit) == 'bandpass_sol_lum'
assert len(w) == 1
assert "'bandpass_sol_lum' did not parse" in str(w[0].message)
assert np.all(t2['l'].value == t['l'].value)
# But if we enable the unit, it should be recognized.
with u.add_enabled_units(unit):
t3 = QTable.read(filename)
assert t3['l'].unit is unit
assert equal_data(t3, t)
# Regression check for #8897; write used to fail when a custom
# unit was enabled.
with catch_warnings(u.UnitsWarning) as w:
t3.write(filename, overwrite=True)
assert len(w) == 0
@pytest.mark.parametrize('table_type', (Table, QTable))
def test_with_format(self, table_type, tmpdir):
filename = str(tmpdir.join('test_with_format.fits'))
t1 = table_type(self.data)
t1['a'].format = '{:5d}'
t1['b'].format = '{:>20}'
t1['c'].format = '{:6.2f}'
t1.write(filename, overwrite=True)
t2 = table_type.read(filename)
assert equal_data(t1, t2)
assert t2['a'].format == '{:5d}'
assert t2['b'].format == '{:>20}'
assert t2['c'].format == '{:6.2f}'
def test_masked(self, tmpdir):
filename = str(tmpdir.join('test_masked.fits'))
t1 = Table(self.data, masked=True)
t1.mask['a'] = [1, 0, 1, 0]
t1.mask['b'] = [1, 0, 0, 1]
t1.mask['c'] = [0, 1, 1, 0]
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert t2.masked
assert equal_data(t1, t2)
assert np.all(t1['a'].mask == t2['a'].mask)
# Disabled for now, as there is no obvious way to handle masking of
# non-integer columns in FITS
# TODO: Re-enable these tests if some workaround for this can be found
# assert np.all(t1['b'].mask == t2['b'].mask)
# assert np.all(t1['c'].mask == t2['c'].mask)
def test_masked_nan(self, tmpdir):
filename = str(tmpdir.join('test_masked_nan.fits'))
data = np.array(list(zip([5.2, 8.4, 3.9, 6.3],
[2.3, 4.5, 6.7, 8.9])),
dtype=[('a', np.float64), ('b', np.float32)])
t1 = Table(data, masked=True)
t1.mask['a'] = [1, 0, 1, 0]
t1.mask['b'] = [1, 0, 0, 1]
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
np.testing.assert_array_almost_equal(t2['a'], [np.nan, 8.4, np.nan, 6.3])
np.testing.assert_array_almost_equal(t2['b'], [np.nan, 4.5, 6.7, np.nan])
# assert t2.masked
# t2.masked = false currently, as the only way to determine whether a table is masked
# while reading is to check whether col.null is present. For float columns, col.null
# is not initialized
def test_read_from_fileobj(self, tmpdir):
filename = str(tmpdir.join('test_read_from_fileobj.fits'))
hdu = BinTableHDU(self.data)
hdu.writeto(filename, overwrite=True)
with open(filename, 'rb') as f:
t = Table.read(f)
assert equal_data(t, self.data)
def test_read_with_nonstandard_units(self):
hdu = BinTableHDU(self.data)
hdu.columns[0].unit = 'RADIANS'
hdu.columns[1].unit = 'spam'
hdu.columns[2].unit = 'millieggs'
t = Table.read(hdu)
assert equal_data(t, self.data)
@pytest.mark.parametrize('table_type', (Table, QTable))
def test_write_drop_nonstandard_units(self, table_type, tmpdir):
# While we are generous on input (see above), we are strict on
# output, dropping units not recognized by the fits standard.
filename = str(tmpdir.join('test_nonstandard_units.fits'))
spam = u.def_unit('spam')
t = table_type()
t['a'] = [1., 2., 3.] * spam
with catch_warnings() as w:
t.write(filename)
assert len(w) == 1
assert 'spam' in str(w[0].message)
if table_type is Table or not HAS_YAML:
assert ('cannot be recovered in reading. '
'If pyyaml is installed') in str(w[0].message)
else:
assert 'lost to non-astropy fits readers' in str(w[0].message)
with fits.open(filename) as ff:
hdu = ff[1]
assert 'TUNIT1' not in hdu.header
def test_memmap(self, tmpdir):
filename = str(tmpdir.join('test_simple.fts'))
t1 = Table(self.data)
t1.write(filename, overwrite=True)
t2 = Table.read(filename, memmap=False)
t3 = Table.read(filename, memmap=True)
assert equal_data(t2, t3)
# To avoid issues with --open-files, we need to remove references to
# data that uses memory mapping and force the garbage collection
del t1, t2, t3
gc.collect()
@pytest.mark.parametrize('memmap', (False, True))
def test_character_as_bytes(self, tmpdir, memmap):
filename = str(tmpdir.join('test_simple.fts'))
t1 = Table(self.data)
t1.write(filename, overwrite=True)
t2 = Table.read(filename, character_as_bytes=False, memmap=memmap)
t3 = Table.read(filename, character_as_bytes=True, memmap=memmap)
assert t2['b'].dtype.kind == 'U'
assert t3['b'].dtype.kind == 'S'
assert equal_data(t2, t3)
# To avoid issues with --open-files, we need to remove references to
# data that uses memory mapping and force the garbage collection
del t1, t2, t3
gc.collect()
class TestMultipleHDU:
def setup_class(self):
self.data1 = np.array(list(zip([1, 2, 3, 4],
['a', 'b', 'c', 'd'],
[2.3, 4.5, 6.7, 8.9])),
dtype=[('a', int), ('b', 'U1'), ('c', float)])
self.data2 = np.array(list(zip([1.4, 2.3, 3.2, 4.7],
[2.3, 4.5, 6.7, 8.9])),
dtype=[('p', float), ('q', float)])
hdu1 = PrimaryHDU()
hdu2 = BinTableHDU(self.data1, name='first')
hdu3 = BinTableHDU(self.data2, name='second')
self.hdus = HDUList([hdu1, hdu2, hdu3])
def teardown_class(self):
del self.hdus
def setup_method(self, method):
warnings.filterwarnings('always')
def test_read(self, tmpdir):
filename = str(tmpdir.join('test_read.fits'))
self.hdus.writeto(filename)
with catch_warnings() as l:
t = Table.read(filename)
assert len(l) == 1
assert str(l[0].message).startswith(
'hdu= was not specified but multiple tables are present, reading in first available table (hdu=1)')
assert equal_data(t, self.data1)
def test_read_with_hdu_0(self, tmpdir):
filename = str(tmpdir.join('test_read_with_hdu_0.fits'))
self.hdus.writeto(filename)
with pytest.raises(ValueError) as exc:
Table.read(filename, hdu=0)
assert exc.value.args[0] == 'No table found in hdu=0'
@pytest.mark.parametrize('hdu', [1, 'first'])
def test_read_with_hdu_1(self, tmpdir, hdu):
filename = str(tmpdir.join('test_read_with_hdu_1.fits'))
self.hdus.writeto(filename)
with catch_warnings() as l:
t = Table.read(filename, hdu=hdu)
assert len(l) == 0
assert equal_data(t, self.data1)
@pytest.mark.parametrize('hdu', [2, 'second'])
def test_read_with_hdu_2(self, tmpdir, hdu):
filename = str(tmpdir.join('test_read_with_hdu_2.fits'))
self.hdus.writeto(filename)
with catch_warnings() as l:
t = Table.read(filename, hdu=hdu)
assert len(l) == 0
assert equal_data(t, self.data2)
def test_read_from_hdulist(self):
with catch_warnings() as l:
t = Table.read(self.hdus)
assert len(l) == 1
assert str(l[0].message).startswith(
'hdu= was not specified but multiple tables are present, reading in first available table (hdu=1)')
assert equal_data(t, self.data1)
def test_read_from_hdulist_with_hdu_0(self, tmpdir):
with pytest.raises(ValueError) as exc:
Table.read(self.hdus, hdu=0)
assert exc.value.args[0] == 'No table found in hdu=0'
@pytest.mark.parametrize('hdu', [1, 'first'])
def test_read_from_hdulist_with_hdu_1(self, tmpdir, hdu):
with catch_warnings() as l:
t = Table.read(self.hdus, hdu=hdu)
assert len(l) == 0
assert equal_data(t, self.data1)
@pytest.mark.parametrize('hdu', [2, 'second'])
def test_read_from_hdulist_with_hdu_2(self, tmpdir, hdu):
with catch_warnings() as l:
t = Table.read(self.hdus, hdu=hdu)
assert len(l) == 0
assert equal_data(t, self.data2)
def test_read_from_single_hdu(self):
with catch_warnings() as l:
t = Table.read(self.hdus[1])
assert len(l) == 0
assert equal_data(t, self.data1)
def test_masking_regression_1795():
"""
Regression test for #1795 - this bug originally caused columns where TNULL
was not defined to have their first element masked.
"""
t = Table.read(os.path.join(DATA, 'tb.fits'))
assert np.all(t['c1'].mask == np.array([False, False]))
assert np.all(t['c2'].mask == np.array([False, False]))
assert np.all(t['c3'].mask == np.array([False, False]))
assert np.all(t['c4'].mask == np.array([False, False]))
assert np.all(t['c1'].data == np.array([1, 2]))
assert np.all(t['c2'].data == np.array([b'abc', b'xy ']))
assert_allclose(t['c3'].data, np.array([3.70000007153, 6.6999997139]))
assert np.all(t['c4'].data == np.array([False, True]))
def test_scale_error():
a = [1, 4, 5]
b = [2.0, 5.0, 8.2]
c = ['x', 'y', 'z']
t = Table([a, b, c], names=('a', 'b', 'c'), meta={'name': 'first table'})
t['a'].unit = '1.2'
with pytest.raises(UnitScaleError) as exc:
t.write('t.fits', format='fits', overwrite=True)
assert exc.value.args[0] == "The column 'a' could not be stored in FITS format because it has a scale '(1.2)' that is not recognized by the FITS standard. Either scale the data or change the units."
@pytest.mark.parametrize('tdisp_str, format_return',
[('EN10.5', ('EN', '10', '5', None)),
('F6.2', ('F', '6', '2', None)),
('B5.10', ('B', '5', '10', None)),
('E10.5E3', ('E', '10', '5', '3')),
('A21', ('A', '21', None, None))])
def test_parse_tdisp_format(tdisp_str, format_return):
assert _parse_tdisp_format(tdisp_str) == format_return
@pytest.mark.parametrize('tdisp_str, format_str_return',
[('G15.4E2', '{:15.4g}'),
('Z5.10', '{:5x}'),
('I6.5', '{:6d}'),
('L8', '{:>8}'),
('E20.7', '{:20.7e}')])
def test_fortran_to_python_format(tdisp_str, format_str_return):
assert _fortran_to_python_format(tdisp_str) == format_str_return
@pytest.mark.parametrize('fmt_str, tdisp_str',
[('{:3d}', 'I3'),
('3d', 'I3'),
('7.3f', 'F7.3'),
('{:>4}', 'A4'),
('{:7.4f}', 'F7.4'),
('%5.3g', 'G5.3'),
('%10s', 'A10'),
('%.4f', 'F13.4')])
def test_python_to_tdisp(fmt_str, tdisp_str):
assert python_to_tdisp(fmt_str) == tdisp_str
def test_logical_python_to_tdisp():
assert python_to_tdisp('{:>7}', logical_dtype=True) == 'L7'
def test_bool_column(tmpdir):
"""
Regression test for https://github.com/astropy/astropy/issues/1953
Ensures that Table columns of bools are properly written to a FITS table.
"""
arr = np.ones(5, dtype=bool)
arr[::2] == np.False_
t = Table([arr])
t.write(str(tmpdir.join('test.fits')), overwrite=True)
with fits.open(str(tmpdir.join('test.fits'))) as hdul:
assert hdul[1].data['col0'].dtype == np.dtype('bool')
assert np.all(hdul[1].data['col0'] == arr)
def test_unicode_column(tmpdir):
"""
Test that a column of unicode strings is still written as one
byte-per-character in the FITS table (so long as the column can be ASCII
encoded).
Regression test for one of the issues fixed in
https://github.com/astropy/astropy/pull/4228
"""
t = Table([np.array(['a', 'b', 'cd'])])
t.write(str(tmpdir.join('test.fits')), overwrite=True)
with fits.open(str(tmpdir.join('test.fits'))) as hdul:
assert np.all(hdul[1].data['col0'] == ['a', 'b', 'cd'])
assert hdul[1].header['TFORM1'] == '2A'
t2 = Table([np.array(['\N{SNOWMAN}'])])
with pytest.raises(UnicodeEncodeError):
t2.write(str(tmpdir.join('test.fits')), overwrite=True)
def test_unit_warnings_read_write(tmpdir):
filename = str(tmpdir.join('test_unit.fits'))
t1 = Table([[1, 2], [3, 4]], names=['a', 'b'])
t1['a'].unit = 'm/s'
t1['b'].unit = 'not-a-unit'
with catch_warnings() as l:
t1.write(filename, overwrite=True)
assert len(l) == 1
assert str(l[0].message).startswith("'not-a-unit' did not parse as fits unit")
with catch_warnings() as l:
Table.read(filename, hdu=1)
assert len(l) == 0
def test_convert_comment_convention(tmpdir):
"""
Regression test for https://github.com/astropy/astropy/issues/6079
"""
filename = os.path.join(DATA, 'stddata.fits')
with pytest.warns(AstropyUserWarning, match=r'hdu= was not specified but '
r'multiple tables are present'):
t = Table.read(filename)
assert t.meta['comments'] == [
'',
' *** End of mandatory fields ***',
'',
'',
' *** Column names ***',
'',
'',
' *** Column formats ***',
''
]
def assert_objects_equal(obj1, obj2, attrs, compare_class=True):
if compare_class:
assert obj1.__class__ is obj2.__class__
info_attrs = ['info.name', 'info.format', 'info.unit', 'info.description', 'info.meta']
for attr in attrs + info_attrs:
a1 = obj1
a2 = obj2
for subattr in attr.split('.'):
try:
a1 = getattr(a1, subattr)
a2 = getattr(a2, subattr)
except AttributeError:
a1 = a1[subattr]
a2 = a2[subattr]
# Mixin info.meta can None instead of empty OrderedDict(), #6720 would
# fix this.
if attr == 'info.meta':
if a1 is None:
a1 = {}
if a2 is None:
a2 = {}
assert np.all(a1 == a2)
# Testing FITS table read/write with mixins. This is mostly
# copied from ECSV mixin testing.
el = EarthLocation(x=1 * u.km, y=3 * u.km, z=5 * u.km)
el2 = EarthLocation(x=[1, 2] * u.km, y=[3, 4] * u.km, z=[5, 6] * u.km)
sc = SkyCoord([1, 2], [3, 4], unit='deg,deg', frame='fk4',
obstime='J1990.5')
scc = sc.copy()
scc.representation_type = 'cartesian'
tm = Time([2450814.5, 2450815.5], format='jd', scale='tai', location=el)
mixin_cols = {
'tm': tm,
'dt': TimeDelta([1, 2] * u.day),
'sc': sc,
'scc': scc,
'scd': SkyCoord([1, 2], [3, 4], [5, 6], unit='deg,deg,m', frame='fk4',
obstime=['J1990.5', 'J1991.5']),
'q': [1, 2] * u.m,
'lat': Latitude([1, 2] * u.deg),
'lon': Longitude([1, 2] * u.deg, wrap_angle=180. * u.deg),
'ang': Angle([1, 2] * u.deg),
'el2': el2,
}
time_attrs = ['value', 'shape', 'format', 'scale', 'location']
compare_attrs = {
'c1': ['data'],
'c2': ['data'],
'tm': time_attrs,
'dt': ['shape', 'value', 'format', 'scale'],
'sc': ['ra', 'dec', 'representation_type', 'frame.name'],
'scc': ['x', 'y', 'z', 'representation_type', 'frame.name'],
'scd': ['ra', 'dec', 'distance', 'representation_type', 'frame.name'],
'q': ['value', 'unit'],
'lon': ['value', 'unit', 'wrap_angle'],
'lat': ['value', 'unit'],
'ang': ['value', 'unit'],
'el2': ['x', 'y', 'z', 'ellipsoid'],
'nd': ['x', 'y', 'z'],
}
@pytest.mark.skipif('not HAS_YAML')
def test_fits_mixins_qtable_to_table(tmpdir):
"""Test writing as QTable and reading as Table. Ensure correct classes
come out.
"""
filename = str(tmpdir.join('test_simple.fits'))
names = sorted(mixin_cols)
t = QTable([mixin_cols[name] for name in names], names=names)
t.write(filename, format='fits')
t2 = Table.read(filename, format='fits', astropy_native=True)
assert t.colnames == t2.colnames
for name, col in t.columns.items():
col2 = t2[name]
# Special-case Time, which does not yet support round-tripping
# the format.
if isinstance(col2, Time):
col2.format = col.format
attrs = compare_attrs[name]
compare_class = True
if isinstance(col.info, QuantityInfo):
# Downgrade Quantity to Column + unit
assert type(col2) is Column
# Class-specific attributes like `value` or `wrap_angle` are lost.
attrs = ['unit']
compare_class = False
# Compare data values here (assert_objects_equal doesn't know how in this case)
assert np.all(col.value == col2)
assert_objects_equal(col, col2, attrs, compare_class)
@pytest.mark.skipif('not HAS_YAML')
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_fits_mixins_as_one(table_cls, tmpdir):
"""Test write/read all cols at once and validate intermediate column names"""
filename = str(tmpdir.join('test_simple.fits'))
names = sorted(mixin_cols)
serialized_names = ['ang',
'dt.jd1', 'dt.jd2',
'el2.x', 'el2.y', 'el2.z',
'lat',
'lon',
'q',
'sc.ra', 'sc.dec',
'scc.x', 'scc.y', 'scc.z',
'scd.ra', 'scd.dec', 'scd.distance',
'scd.obstime.jd1', 'scd.obstime.jd2',
'tm', # serialize_method is formatted_value
]
t = table_cls([mixin_cols[name] for name in names], names=names)
t.meta['C'] = 'spam'
t.meta['comments'] = ['this', 'is', 'a', 'comment']
t.meta['history'] = ['first', 'second', 'third']
t.write(filename, format="fits")
t2 = table_cls.read(filename, format='fits', astropy_native=True)
assert t2.meta['C'] == 'spam'
assert t2.meta['comments'] == ['this', 'is', 'a', 'comment']
assert t2.meta['HISTORY'] == ['first', 'second', 'third']
assert t.colnames == t2.colnames
# Read directly via fits and confirm column names
with fits.open(filename) as hdus:
assert hdus[1].columns.names == serialized_names
@pytest.mark.skipif('not HAS_YAML')
@pytest.mark.parametrize('name_col', list(mixin_cols.items()))
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_fits_mixins_per_column(table_cls, name_col, tmpdir):
"""Test write/read one col at a time and do detailed validation"""
filename = str(tmpdir.join('test_simple.fits'))
name, col = name_col
c = [1.0, 2.0]
t = table_cls([c, col, c], names=['c1', name, 'c2'])
t[name].info.description = 'my \n\n\n description'
t[name].info.meta = {'list': list(range(50)), 'dict': {'a': 'b' * 200}}
if not t.has_mixin_columns:
pytest.skip('column is not a mixin (e.g. Quantity subclass in Table)')
if isinstance(t[name], NdarrayMixin):
pytest.xfail('NdarrayMixin not supported')
t.write(filename, format="fits")
t2 = table_cls.read(filename, format='fits', astropy_native=True)
assert t.colnames == t2.colnames
for colname in t.colnames:
assert_objects_equal(t[colname], t2[colname], compare_attrs[colname])
# Special case to make sure Column type doesn't leak into Time class data
if name.startswith('tm'):
assert t2[name]._time.jd1.__class__ is np.ndarray
assert t2[name]._time.jd2.__class__ is np.ndarray
@pytest.mark.skipif('HAS_YAML')
def test_warn_for_dropped_info_attributes(tmpdir):
filename = str(tmpdir.join('test.fits'))
t = Table([[1, 2]])
t['col0'].info.description = 'hello'
with catch_warnings() as warns:
t.write(filename, overwrite=True)
assert len(warns) == 1
assert str(warns[0].message).startswith(
"table contains column(s) with defined 'format'")
@pytest.mark.skipif('HAS_YAML')
def test_error_for_mixins_but_no_yaml(tmpdir):
filename = str(tmpdir.join('test.fits'))
t = Table([mixin_cols['sc']])
with pytest.raises(TypeError) as err:
t.write(filename)
assert "cannot write type SkyCoord column 'col0' to FITS without PyYAML" in str(err.value)
@pytest.mark.skipif('not HAS_YAML')
def test_info_attributes_with_no_mixins(tmpdir):
"""Even if there are no mixin columns, if there is metadata that would be lost it still
gets serialized
"""
filename = str(tmpdir.join('test.fits'))
t = Table([[1.0, 2.0]])
t['col0'].description = 'hello' * 40
t['col0'].format = '{:8.4f}'
t['col0'].meta['a'] = {'b': 'c'}
t.write(filename, overwrite=True)
t2 = Table.read(filename)
assert t2['col0'].description == 'hello' * 40
assert t2['col0'].format == '{:8.4f}'
assert t2['col0'].meta['a'] == {'b': 'c'}
@pytest.mark.skipif('not HAS_YAML')
@pytest.mark.parametrize('method', ['set_cols', 'names', 'class'])
def test_round_trip_masked_table_serialize_mask(tmpdir, method):
"""
Same as previous test but set the serialize_method to 'data_mask' so mask is
written out and the behavior is all correct.
"""
filename = str(tmpdir.join('test.fits'))
t = simple_table(masked=True) # int, float, and str cols with one masked element
# MaskedColumn but no masked elements. See table the MaskedColumnInfo class
# _represent_as_dict() method for info about we test a column with no masked elements.
t['d'] = [1, 2, 3]
if method == 'set_cols':
for col in t.itercols():
col.info.serialize_method['fits'] = 'data_mask'
t.write(filename)
elif method == 'names':
t.write(filename, serialize_method={'a': 'data_mask', 'b': 'data_mask',
'c': 'data_mask', 'd': 'data_mask'})
elif method == 'class':
t.write(filename, serialize_method='data_mask')
t2 = Table.read(filename)
assert t2.masked is False
assert t2.colnames == t.colnames
for name in t2.colnames:
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
# Data under the mask round-trips also (unmask data to show this).
t[name].mask = False
t2[name].mask = False
assert np.all(t2[name] == t[name])
| 36.901726 | 202 | 0.580415 |
0e5589c301276db1218df2073eba1559eb52a7c2 | 3,530 | py | Python | tests/mock_tables/mock_multi_asic.py | sg893052/sonic-utilities | fdb79b8d65b8ca22232f4e6b140f593dd01613d5 | [
"Apache-2.0"
] | 91 | 2016-03-23T14:24:41.000Z | 2022-03-18T20:25:37.000Z | tests/mock_tables/mock_multi_asic.py | sg893052/sonic-utilities | fdb79b8d65b8ca22232f4e6b140f593dd01613d5 | [
"Apache-2.0"
] | 1,495 | 2017-02-15T10:49:10.000Z | 2022-03-31T18:49:56.000Z | tests/mock_tables/mock_multi_asic.py | sg893052/sonic-utilities | fdb79b8d65b8ca22232f4e6b140f593dd01613d5 | [
"Apache-2.0"
] | 466 | 2016-04-25T09:31:23.000Z | 2022-03-31T06:54:17.000Z | # MONKEY PATCH!!!
from unittest import mock
from sonic_py_common import multi_asic
from utilities_common import multi_asic as multi_asic_util
mock_intf_table = {
'': {
'eth0': {
2: [{'addr': '10.1.1.1', 'netmask': '255.255.255.0', 'broadcast': '10.1.1.1'}],
10: [{'addr': '3100::1', 'netmask': 'ffff:ffff:ffff:ffff::/64'}]
},
'lo': {
2: [{'addr': '127.0.0.1', 'netmask': '255.0.0.0', 'broadcast': '127.255.255.255'}],
10: [{'addr': '::1', 'netmask':'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff/128'}]
}
},
'asic0': {
'Loopback0': {
17: [{'addr': '62:a5:9d:f4:16:96', 'broadcast': 'ff:ff:ff:ff:ff:ff'}],
2: [{'addr': '40.1.1.1', 'netmask': '255.255.255.255', 'broadcast': '40.1.1.1'}],
10: [{'addr': 'fe80::60a5:9dff:fef4:1696%Loopback0', 'netmask': 'ffff:ffff:ffff:ffff::/64'}]
},
'PortChannel0001': {
17: [{'addr': '82:fd:d1:5b:45:2f', 'broadcast': 'ff:ff:ff:ff:ff:ff'}],
2: [{'addr': '20.1.1.1', 'netmask': '255.255.255.0', 'broadcast': '20.1.1.1'}],
10: [{'addr': 'aa00::1', 'netmask': 'ffff:ffff:ffff:ffff::/64'}, {'addr': 'fe80::80fd:d1ff:fe5b:452f', 'netmask': 'ffff:ffff:ffff:ffff::/64'}]
},
'Loopback4096': {
2: [{'addr': '1.1.1.1', 'netmask': '255.255.255.0', 'broadcast': '1.1.1.1'}]
},
'veth@eth1': {
2: [{'addr': '192.1.1.1', 'netmask': '255.255.255.0', 'broadcast': '192.1.1.1'}]
}
},
'asic1': {
'Loopback0': {
17: [{'addr': '62:a5:9d:f4:16:96', 'broadcast': 'ff:ff:ff:ff:ff:ff'}],
2: [{'addr': '40.1.1.1', 'netmask': '255.255.255.255', 'broadcast': '40.1.1.1'}],
10: [{'addr': 'fe80::60a5:9dff:fef4:1696%Loopback0', 'netmask': 'ffff:ffff:ffff:ffff::/64'}]
},
'PortChannel0002': {
17: [{'addr': '82:fd:d1:5b:45:2f', 'broadcast': 'ff:ff:ff:ff:ff:ff'}],
2: [{'addr': '30.1.1.1', 'netmask': '255.255.255.0', 'broadcast': '30.1.1.1'}],
10: [{'addr': 'bb00::1', 'netmask': 'ffff:ffff:ffff:ffff::/64'}, {'addr': 'fe80::80fd:abff:fe5b:452f', 'netmask': 'ffff:ffff:ffff:ffff::/64'}]
},
'Loopback4096': {
2: [{'addr': '2.1.1.1', 'netmask': '255.255.255.0', 'broadcast': '2.1.1.1'}]
},
'veth@eth2': {
2: [{'addr': '193.1.1.1', 'netmask': '255.255.255.0', 'broadcast': '193.1.1.1'}]
}
}
}
def mock_get_num_asics():
return 2
def mock_is_multi_asic():
return True
def mock_get_namespace_list(namespace=None):
if namespace:
return [namespace]
return ['asic0', 'asic1']
def mock_multi_asic_get_ip_intf_from_ns(namespace):
interfaces = []
try:
interfaces = list(mock_intf_table[namespace].keys())
except KeyError:
pass
return interfaces
def mock_multi_asic_get_ip_intf_addr_from_ns(namespace, iface):
ipaddresses = []
try:
ipaddresses = mock_intf_table[namespace][iface]
except KeyError:
pass
return ipaddresses
multi_asic.get_num_asics = mock_get_num_asics
multi_asic.is_multi_asic = mock_is_multi_asic
multi_asic.get_namespace_list = mock_get_namespace_list
multi_asic.get_namespaces_from_linux = mock_get_namespace_list
multi_asic_util.multi_asic_get_ip_intf_from_ns = mock_multi_asic_get_ip_intf_from_ns
multi_asic_util.multi_asic_get_ip_intf_addr_from_ns = mock_multi_asic_get_ip_intf_addr_from_ns
| 37.157895 | 154 | 0.55864 |
563b44e1dd04c4f5472712955ec7bc15c3a9fc72 | 547 | py | Python | backend/apps/timing/utils/msg.py | lizhaoliu-Lec/OneApp | 2919c7b191bd01c6783acc2b8baed288d4f60b47 | [
"MIT"
] | 1 | 2019-08-12T02:17:10.000Z | 2019-08-12T02:17:10.000Z | backend/apps/timing/utils/msg.py | lizhaoliu-Lec/BookApp | 2919c7b191bd01c6783acc2b8baed288d4f60b47 | [
"MIT"
] | 4 | 2020-06-05T22:13:15.000Z | 2022-01-13T01:30:08.000Z | backend/apps/timing/utils/msg.py | lizhaoliu-Lec/BookApp | 2919c7b191bd01c6783acc2b8baed288d4f60b47 | [
"MIT"
] | null | null | null | """Message returned to frontend.
"""
# for timing record
TIMING_RECORD_GET_FAIL = '获取用户记录失败'
TIMING_RECORD_GET_SUCCESS = '获取用户记录成功'
TIMING_RECORD_POST_FAIL = '上传用户记录失败'
TIMING_RECORD_POST_SUCCESS = '上传用户记录成功'
# for timing plan
TIMING_PLAN_GET_FAIL = '获取用户计划失败'
TIMING_PLAN_GET_SUCCESS = '获取用户计划成功'
TIMING_PLAN_POST_FAIL = '上传用户计划失败'
TIMING_PLAN_POST_SUCCESS = '上传用户计划成功'
# for timing group
TIMING_GROUP_GET_FAIL = '获取用户所属组失败'
TIMING_GROUP_GET_SUCCESS = '获取用户所属组成功'
TIMING_GROUP_POST_FAIL = '上传用户所属组失败'
TIMING_GROUP_POST_SUCCESS = '上传用户所属组成功'
| 23.782609 | 39 | 0.809872 |
d868f8b56b2205a7621f71ca11fb4ac09239762b | 1,382 | py | Python | tests/first_stage_test.py | schlueter/mitogen | 76102927c81d5dfcc6c91565b2984f24dd3ee678 | [
"BSD-3-Clause"
] | null | null | null | tests/first_stage_test.py | schlueter/mitogen | 76102927c81d5dfcc6c91565b2984f24dd3ee678 | [
"BSD-3-Clause"
] | null | null | null | tests/first_stage_test.py | schlueter/mitogen | 76102927c81d5dfcc6c91565b2984f24dd3ee678 | [
"BSD-3-Clause"
] | null | null | null |
import subprocess
import unittest2
import mitogen.parent
import testlib
class CommandLineTest(testlib.RouterMixin, testlib.TestCase):
# Ensure this version of Python produces a command line that is sufficient
# to bootstrap this version of Python.
#
# TODO:
# * 2.7 starting 2.4
# * 2.7 starting 3.x
# * 3.x starting 2.7
def test_valid_syntax(self):
stream = mitogen.parent.Stream(self.router, 0, max_message_size=123)
args = stream.get_boot_command()
# Executing the boot command will print "EC0" and expect to read from
# stdin, which will fail because it's pointing at /dev/null, causing
# the forked child to crash with an EOFError and disconnect its write
# pipe. The forked and freshly execed parent will get a 0-byte read
# from the pipe, which is a valid script, and therefore exit indicating
# success.
fp = open("/dev/null", "r")
proc = subprocess.Popen(args,
stdin=fp,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = proc.communicate()
self.assertEquals(0, proc.returncode)
self.assertEquals("EC0\n", stdout)
self.assertIn("Error -5 while decompressing data: incomplete or truncated stream", stderr)
if __name__ == '__main__':
unittest2.main()
| 31.409091 | 98 | 0.651954 |
b31f0baadaf613e17eb0bd30cfbf3f65d580d177 | 2,929 | py | Python | dse_simulation/src/Old/aruco_test.py | hfekrmandi/Autonomous-GNC-MAS | b02bc3210d8b136400b1cf2b7950aa77f5831e0b | [
"MIT"
] | 1 | 2021-03-22T01:47:00.000Z | 2021-03-22T01:47:00.000Z | dse_simulation/src/Old/aruco_test.py | hfekrmandi/Self_Localization_Intelligent_Mapping_SLIM | 6e073737c1e8ebd170a828500f825357beb699fe | [
"MIT"
] | 1 | 2022-01-12T02:17:02.000Z | 2022-01-12T21:51:40.000Z | dse_simulation/src/Old/aruco_test.py | hfekrmandi/Self_Localization_Intelligent_Mapping_SLIM | 6e073737c1e8ebd170a828500f825357beb699fe | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
from __future__ import print_function
import roslib
import sys
import rospy
import csv
import numpy as np
import datetime
import time
from geometry_msgs.msg import Twist
from dse_msgs.msg import PoseMarkers
from std_msgs.msg import Float64MultiArray
from std_msgs.msg import MultiArrayLayout
from std_msgs.msg import MultiArrayDimension
from dse_msgs.msg import InfFilterPartials
from dse_msgs.msg import InfFilterResults
from scipy.spatial.transform import Rotation as R
import dse_lib
import dse_constants
roslib.load_manifest('dse_simulation')
class aruco_test:
# Set up initial variables
# Pass in the ID of this agent and the state dimension (6 or 12)
def __init__(self):
# # Get parameters from launch file
# self.ros_prefix = rospy.get_param('~prefix')
# if len(self.ros_prefix) != 0 and self.ros_prefix[0] != '/':
# self.ros_prefix = '/' + self.ros_prefix
# self.camera_limits = rospy.get_param('~camera_limits')
# # [dist_min, dist_max, horiz_fov, vert_fov]
# self.tag_limit = rospy.get_param('~tag_limit')
# # tag rotation at which the tag no longer detects
# self.tag_size = rospy.get_param('~tag_size')
# # height of the tag
self.ros_prefix = '/tb3_0'
self.camera_limits = [0.5, 1, 1.085595, 1.085595*480/640]
self.tag_limit = 0.785398
self.tag_size = 0.1*1.5
# Define publishers and subscribers
# Subscribe to the pose output from the camera
self.pose_sub = rospy.Subscriber(self.ros_prefix + "/dse/pose_markers", PoseMarkers, self.camera_callback)
self.num_results = 0
self.results = []
# When the direct estimator or consensus returns the combined information variables
def camera_callback(self, data):
self.num_results += 1
result = np.zeros((10))
result[0:6] = dse_lib.state_from_pose_array(data.pose_array, 12, 6)[0:6, 0]
result[6:10] = dse_lib.eul2quat(result[3:6, None])
self.results.append(result)
def collect_data(self):
if self.num_results > 0:
tmp = np.array(self.results)
[x, y, z, y, p, r, qx, qy, qz, qw] = np.split(np.array(self.results), 10, axis=1)
print('Success')
self.num_results = 0
self.results = []
print('data')
def main(args):
with open('data.csv', mode="w") as data_file:
data_writer = csv.writer(data_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
rospy.init_node('aruco_test_node', anonymous=True)
il = aruco_test()
il.csv_writer = data_writer
r = rospy.Rate(1)
il.dt = 1 / 1
try:
while True:
r.sleep()
il.collect_data()
except KeyboardInterrupt:
print("Shutting down")
if __name__ == '__main__':
main(sys.argv)
| 33.284091 | 114 | 0.645954 |
d80e8f74d8c45a56378ed3890f532896a325d63c | 1,127 | py | Python | terminal-kafka-monitor/widgets.py | praveen97uma/terminal-kafka-monitor | 523a410141bf5a55718d6867e783de4d8210ce8a | [
"Apache-2.0"
] | null | null | null | terminal-kafka-monitor/widgets.py | praveen97uma/terminal-kafka-monitor | 523a410141bf5a55718d6867e783de4d8210ce8a | [
"Apache-2.0"
] | null | null | null | terminal-kafka-monitor/widgets.py | praveen97uma/terminal-kafka-monitor | 523a410141bf5a55718d6867e783de4d8210ce8a | [
"Apache-2.0"
] | null | null | null | import npyscreen
import psutil
import sys
class MultiLineWidget(npyscreen.BoxTitle):
'''
A framed widget containing multiline text
'''
_contained_widget = npyscreen.MultiLineEdit
class CustomMultiLineAction(npyscreen.MultiLineAction):
'''
Making custom MultiLineAction by adding the handlers
'''
def __init__(self,*args,**kwargs):
super(CustomMultiLineAction,self).__init__(*args,**kwargs)
self.add_handlers({
"^K": self.kill_process,
"q" : self.quit
})
def kill_process(self,*args,**kwargs):
pid = self.values[self.cursor_line].split()[1]
target = psutil.Process(int(pid))
target.terminate()
def quit(self,*args,**kwargs):
sys.exit()
class MultiLineActionWidget(npyscreen.BoxTitle):
'''
A framed widget containing multiline text
'''
_contained_widget = CustomMultiLineAction
class MonitorWindowForm(npyscreen.FormBaseNew):
def create(self, *args, **kwargs):
super(MonitorWindowForm, self).create(*args, **kwargs)
def while_waiting(self):
pass
| 24.5 | 66 | 0.654836 |
463684a7d80b5f361a181944135e67506a1c4887 | 6,411 | py | Python | pdf2text/pdf2text.py | guilacerda/playground | 109960ffefc611fcda7354d7058d3256971ebfb9 | [
"MIT"
] | null | null | null | pdf2text/pdf2text.py | guilacerda/playground | 109960ffefc611fcda7354d7058d3256971ebfb9 | [
"MIT"
] | null | null | null | pdf2text/pdf2text.py | guilacerda/playground | 109960ffefc611fcda7354d7058d3256971ebfb9 | [
"MIT"
] | null | null | null | def get_menu():
file = open("cardapio.txt", 'r')
stringzona = ""
line = ""
foods = []
for line in file:
if line[len(line)-1] == '\n':
line = line[:len(line)-1] + " "
stringzona = stringzona + line
words = stringzona.split(" ")
for word in words:
if word != '':
foods.append(word)
id_desjejum = foods.index("DESJEJUM") + 8
id_almoco = foods.index("ALMOÇO") + 8
id_jantar = foods.index("JANTAR") + 8
id_end = foods.index("Legenda:")
menu = {}
menu["DESJEJUM"] = {
"Bebidas quentes": [],
"Vegetariano 1": [],
"Vegetariano 2": [],
"Vegetariano 3": [],
"Achocolatado": [],
"Pão": [],
"Complemento 1": [],
"Complemento 2": [],
"Comp. Vegetariano": [],
"Fruta": []
}
menu["ALMOÇO"] = {
"Salada:": [],
"Molho:": [],
"Prato Principal:": [],
"Guarnição:": [],
"Prato Vegetariano:": [],
"Acompanhamentos:": [],
"Sobremesa:": [],
"Refresco:": []
}
menu["JANTAR"] = {
"Salada:": [],
"Molho:": [],
"Sopa:": [],
"Pão:": [],
"Prato Principal:": [],
"Prato Vegetariano:": [],
"Acompanhamentos:": [],
"Sobremesa:": [],
"Refresco:": []
}
keys_desjejum = ["Bebidas", "Vegetariano", "Achocolatado", "Pão", "Complemento",
"Comp.", "Fruta"]
keys_almoco = ["Salada:", "Molho:", "Principal:", "Guarnição:",
"Vegetariano:", "Acompanhamentos:", "Sobremesa:", "Refresco:"]
keys_jantar = ["Salada:", "Molho:", "Principal:", "Sopa:", "Pão:",
"Vegetariano:", "Acompanhamentos:", "Sobremesa:", "Refresco:"]
key = ""
counter_v = 1
counter_pao = 1
counter_achocolatado = 1
counter_c = 1
it = iter(range(id_desjejum, id_almoco-8))
for x in it:
if foods[x] in keys_desjejum:
if foods[x] == "Bebidas" or foods[x] == "Comp.":
key = foods[x] + " " + foods[x+1]
x = next(it)
elif foods[x] == "Vegetariano":
key = foods[x] + " " + str(counter_v)
counter_v = counter_v + 1
elif foods[x] == "Complemento":
key = foods[x] + " " + str(counter_c)
counter_c = counter_c + 1
elif foods[x] == "Pão":
if counter_pao == 1:
key = foods[x]
counter_pao = counter_pao - 1
else:
menu["DESJEJUM"][key].append(foods[x])
elif foods[x] == "Achocolatado":
if counter_achocolatado == 1:
key = foods[x]
counter_achocolatado = counter_achocolatado - 1
else:
menu["DESJEJUM"][key].append(foods[x])
else:
key = foods[x]
else:
menu["DESJEJUM"][key].append(foods[x])
it = iter(range(id_almoco, id_jantar-8))
for x in it:
if foods[x] in keys_almoco:
if foods[x] == "Principal:" or foods[x] == "Vegetariano:":
key = foods[x-1] + " " + foods[x]
else:
key = foods[x]
else:
if foods[x] != "Prato":
menu["ALMOÇO"][key].append(foods[x])
it = iter(range(id_jantar, id_end))
for x in it:
if foods[x] in keys_jantar:
if foods[x] == "Principal:" or foods[x] == "Vegetariano:":
key = foods[x-1] + " " + foods[x]
else:
key = foods[x]
else:
if foods[x] != "Prato":
menu["JANTAR"][key].append(foods[x])
menu_days = {
"Segunda-feira": {
"DESJEJUM": {},
"ALMOÇO": {},
"JANTAR": {}
},
"Terça-feira": {
"DESJEJUM": {},
"ALMOÇO": {},
"JANTAR": {}
},
"Quarta-feira": {
"DESJEJUM": {},
"ALMOÇO": {},
"JANTAR": {}
},
"Quinta-feira": {
"DESJEJUM": {},
"ALMOÇO": {},
"JANTAR": {}
},
"Sexta-feira": {
"DESJEJUM": {},
"ALMOÇO": {},
"JANTAR": {}
},
"Sábado": {
"DESJEJUM": {},
"ALMOÇO": {},
"JANTAR": {}
},
"Domingo": {
"DESJEJUM": {},
"ALMOÇO": {},
"JANTAR": {}
}
}
is_title = []
_is_title = {}
menu_index = {}
for element in menu.keys():
for e in menu[element].keys():
for i in range(0, len(menu[element][e])):
if menu[element][e][i].istitle() \
and menu[element][e][i-1] != '/' \
and menu[element][e][i][0] != '/' \
and menu[element][e][i-1] != 'à' \
and menu[element][e][i-1] != "de" \
and menu[element][e][i-1] != 'e':
is_title.append(i)
_is_title[e] = []
_is_title[e].append(is_title)
# print(menu[element][e])
_is_title[e].append(menu[element][e])
# print(_is_title[e])
is_title = []
menu_index[element] = _is_title
_is_title = {}
# print(menu_index)
days = ["Segunda-feira", "Terça-feira", "Quarta-feira",
"Quinta-feira", "Sexta-feira", "Sábado", "Domingo"]
for e in menu_index.keys():
for key, food in zip(menu_index[e].keys(), menu_index[e].values()):
for i in range(0, len(food[0])):
if i+1 == len(food[0]):
food_words = food[1][food[0][i]:]
else:
food_words = food[1][food[0][i]:food[0][i+1]]
complete_word = ""
counter_words = 0
for word in food_words:
complete_word += word
if counter_words != len(food_words)-1:
complete_word += " "
counter_words += 1
menu_days[days[i]][e][key] = complete_word
# print(menu_index)
# import pprint
# pp = pprint.PrettyPrinter(indent=4)
# pp.pprint(menu_days)
return menu_days
| 29.543779 | 84 | 0.427858 |
5fff91960180c6f5f534b1f4a5a2dde5e48561f4 | 5,198 | py | Python | src/tests/test_status_flag_change.py | ghandic/6502Emulator | e647f86e5af777a2e0d8bccc50522e6db4e39162 | [
"MIT"
] | null | null | null | src/tests/test_status_flag_change.py | ghandic/6502Emulator | e647f86e5af777a2e0d8bccc50522e6db4e39162 | [
"MIT"
] | null | null | null | src/tests/test_status_flag_change.py | ghandic/6502Emulator | e647f86e5af777a2e0d8bccc50522e6db4e39162 | [
"MIT"
] | null | null | null | import copy
from truth.truth import AssertThat
from ..emulator.const import OpCodes
def test_clc_will_clear_the_carry_flag(cpu):
"""CLCWillClearTheCarryFlag"""
# Given:
cpu.reset_to(0xFF00)
cpu.Flag.C = True
cpu.Memory[0xFF00] = OpCodes.INS_CLC
expected_cycles = 2
cpu_copy = copy.copy(cpu)
# When:
cycles_used = cpu.execute(expected_cycles)
# Then:
AssertThat(expected_cycles).IsEqualTo(cycles_used)
AssertThat(cpu.Flag.C).IsFalsy()
AssertThat(cpu.Flag.Z).IsEqualTo(cpu_copy.Flag.Z)
AssertThat(cpu.Flag.I).IsEqualTo(cpu_copy.Flag.I)
AssertThat(cpu.Flag.D).IsEqualTo(cpu_copy.Flag.D)
AssertThat(cpu.Flag.B).IsEqualTo(cpu_copy.Flag.B)
AssertThat(cpu.Flag.V).IsEqualTo(cpu_copy.Flag.V)
AssertThat(cpu.Flag.N).IsEqualTo(cpu_copy.Flag.N)
def test_sec_will_clear_the_carry_flag(cpu):
"""SECWillSetTheCarryFlag"""
# Given:
cpu.reset_to(0xFF00)
cpu.Flag.C = False
cpu.Memory[0xFF00] = OpCodes.INS_SEC
expected_cycles = 2
cpu_copy = copy.copy(cpu)
# When:
cycles_used = cpu.execute(expected_cycles)
# Then:
AssertThat(expected_cycles).IsEqualTo(cycles_used)
AssertThat(cpu.Flag.C).IsTruthy()
AssertThat(cpu.Flag.Z).IsEqualTo(cpu_copy.Flag.Z)
AssertThat(cpu.Flag.I).IsEqualTo(cpu_copy.Flag.I)
AssertThat(cpu.Flag.D).IsEqualTo(cpu_copy.Flag.D)
AssertThat(cpu.Flag.B).IsEqualTo(cpu_copy.Flag.B)
AssertThat(cpu.Flag.V).IsEqualTo(cpu_copy.Flag.V)
AssertThat(cpu.Flag.N).IsEqualTo(cpu_copy.Flag.N)
def test_cld_will_clear_the_decimal_flag(cpu):
"""CLDWillClearTheDecimalFlag"""
# Given:
cpu.reset_to(0xFF00)
cpu.Flag.D = True
cpu.Memory[0xFF00] = OpCodes.INS_CLD
expected_cycles = 2
cpu_copy = copy.copy(cpu)
# When:
cycles_used = cpu.execute(expected_cycles)
# Then:
AssertThat(expected_cycles).IsEqualTo(cycles_used)
AssertThat(cpu.Flag.D).IsFalsy()
AssertThat(cpu.Flag.Z).IsEqualTo(cpu_copy.Flag.Z)
AssertThat(cpu.Flag.I).IsEqualTo(cpu_copy.Flag.I)
AssertThat(cpu.Flag.C).IsEqualTo(cpu_copy.Flag.C)
AssertThat(cpu.Flag.B).IsEqualTo(cpu_copy.Flag.B)
AssertThat(cpu.Flag.V).IsEqualTo(cpu_copy.Flag.V)
AssertThat(cpu.Flag.N).IsEqualTo(cpu_copy.Flag.N)
def test_sed_will_clear_the_decimal_flag(cpu):
"""SEDWillSetTheDecimalFlag"""
# Given:
cpu.reset_to(0xFF00)
cpu.Flag.D = False
cpu.Memory[0xFF00] = OpCodes.INS_SED
expected_cycles = 2
cpu_copy = copy.copy(cpu)
# When:
cycles_used = cpu.execute(expected_cycles)
# Then:
AssertThat(expected_cycles).IsEqualTo(cycles_used)
AssertThat(cpu.Flag.D).IsTruthy()
AssertThat(cpu.Flag.Z).IsEqualTo(cpu_copy.Flag.Z)
AssertThat(cpu.Flag.I).IsEqualTo(cpu_copy.Flag.I)
AssertThat(cpu.Flag.C).IsEqualTo(cpu_copy.Flag.C)
AssertThat(cpu.Flag.B).IsEqualTo(cpu_copy.Flag.B)
AssertThat(cpu.Flag.V).IsEqualTo(cpu_copy.Flag.V)
AssertThat(cpu.Flag.N).IsEqualTo(cpu_copy.Flag.N)
def test_cli_will_clear_the_interrupt_flag(cpu):
"""CLIWillClearTheInterruptFlag"""
# Given:
cpu.reset_to(0xFF00)
cpu.Flag.I = True
cpu.Memory[0xFF00] = OpCodes.INS_CLI
expected_cycles = 2
cpu_copy = copy.copy(cpu)
# When:
cycles_used = cpu.execute(expected_cycles)
# Then:
AssertThat(expected_cycles).IsEqualTo(cycles_used)
AssertThat(cpu.Flag.I).IsFalsy()
AssertThat(cpu.Flag.Z).IsEqualTo(cpu_copy.Flag.Z)
AssertThat(cpu.Flag.D).IsEqualTo(cpu_copy.Flag.D)
AssertThat(cpu.Flag.C).IsEqualTo(cpu_copy.Flag.C)
AssertThat(cpu.Flag.B).IsEqualTo(cpu_copy.Flag.B)
AssertThat(cpu.Flag.V).IsEqualTo(cpu_copy.Flag.V)
AssertThat(cpu.Flag.N).IsEqualTo(cpu_copy.Flag.N)
def test_sei_will_clear_the_interrupt_flag(cpu):
"""SEIWillSetTheInterruptFlag"""
# Given:
cpu.reset_to(0xFF00)
cpu.Flag.I = False
cpu.Memory[0xFF00] = OpCodes.INS_SEI
expected_cycles = 2
cpu_copy = copy.copy(cpu)
# When:
cycles_used = cpu.execute(expected_cycles)
# Then:
AssertThat(expected_cycles).IsEqualTo(cycles_used)
AssertThat(cpu.Flag.I).IsTruthy()
AssertThat(cpu.Flag.Z).IsEqualTo(cpu_copy.Flag.Z)
AssertThat(cpu.Flag.D).IsEqualTo(cpu_copy.Flag.D)
AssertThat(cpu.Flag.C).IsEqualTo(cpu_copy.Flag.C)
AssertThat(cpu.Flag.B).IsEqualTo(cpu_copy.Flag.B)
AssertThat(cpu.Flag.V).IsEqualTo(cpu_copy.Flag.V)
AssertThat(cpu.Flag.N).IsEqualTo(cpu_copy.Flag.N)
def test_clv_will_clear_the_overflow_flag(cpu):
"""CLVWillClearTheOverflowFlag"""
# Given:
cpu.reset_to(0xFF00)
cpu.Flag.V = True
cpu.Memory[0xFF00] = OpCodes.INS_CLV
expected_cycles = 2
cpu_copy = copy.copy(cpu)
# When:
cycles_used = cpu.execute(expected_cycles)
# Then:
AssertThat(expected_cycles).IsEqualTo(cycles_used)
AssertThat(cpu.Flag.V).IsFalsy()
AssertThat(cpu.Flag.Z).IsEqualTo(cpu_copy.Flag.Z)
AssertThat(cpu.Flag.D).IsEqualTo(cpu_copy.Flag.D)
AssertThat(cpu.Flag.C).IsEqualTo(cpu_copy.Flag.C)
AssertThat(cpu.Flag.B).IsEqualTo(cpu_copy.Flag.B)
AssertThat(cpu.Flag.I).IsEqualTo(cpu_copy.Flag.I)
AssertThat(cpu.Flag.N).IsEqualTo(cpu_copy.Flag.N)
| 31.125749 | 54 | 0.714121 |
f6e2750fd89bdd7556ebc4562600ff7421edc038 | 1,471 | py | Python | demos/fluid_logo.py | marc-gav/PhiFlow | b6186fd1503d040997b52d49aa18cd875267c27e | [
"MIT"
] | null | null | null | demos/fluid_logo.py | marc-gav/PhiFlow | b6186fd1503d040997b52d49aa18cd875267c27e | [
"MIT"
] | null | null | null | demos/fluid_logo.py | marc-gav/PhiFlow | b6186fd1503d040997b52d49aa18cd875267c27e | [
"MIT"
] | null | null | null | """ Fluid Logo
Incompressible fluid simulation with obstacles and buoyancy.
"""
from phi.flow import *
# from phi.torch.flow import *
# from phi.tf.flow import *
# from phi.jax.flow import *
DOMAIN = dict(x=128, y=128, bounds=Box[0:100, 0:100])
OBSTACLE_GEOMETRIES = [Box[15 + x * 7:15 + (x + 1) * 7, 41:83] for x in range(1, 10, 2)] + [Box[43:50, 41:48], Box[15:43, 83:90], Box[50:85, 83:90]]
OBSTACLE = Obstacle(union(OBSTACLE_GEOMETRIES))
OBSTACLE_MASK = HardGeometryMask(OBSTACLE.geometry) >> CenteredGrid(0, extrapolation.BOUNDARY, **DOMAIN)
INFLOW = CenteredGrid(Box[14:21, 6:10], extrapolation.BOUNDARY, **DOMAIN) + \
CenteredGrid(Box[79:86, 6:10], extrapolation.BOUNDARY, **DOMAIN) * 0.8 + \
CenteredGrid(Box[44:47, 49:50], extrapolation.BOUNDARY, **DOMAIN) * 0.1
velocity = StaggeredGrid(0, extrapolation.ZERO, **DOMAIN)
smoke = pressure = divergence = remaining_divergence = CenteredGrid(0, extrapolation.BOUNDARY, **DOMAIN)
for _ in view(display=['smoke', 'velocity', 'pressure', 'OBSTACLE_MASK'], play=False).range(warmup=1):
smoke = advect.semi_lagrangian(smoke, velocity, 1) + INFLOW
buoyancy_force = smoke * (0, 0.1) >> velocity # resamples density to velocity sample points
velocity = advect.semi_lagrangian(velocity, velocity, 1) + buoyancy_force
velocity, pressure = fluid.make_incompressible(velocity, (OBSTACLE,), Solve('CG-adaptive', 1e-5, 0, x0=pressure))
remaining_divergence = field.divergence(velocity)
| 54.481481 | 148 | 0.709041 |
df4412311f52a1be84ee7a2d580b1d0246a2c54d | 3,382 | py | Python | professionals/views.py | aykutgk/GoNaturalistic | f1f5e529eef94ae1f642e056cdfb01711333a929 | [
"MIT"
] | null | null | null | professionals/views.py | aykutgk/GoNaturalistic | f1f5e529eef94ae1f642e056cdfb01711333a929 | [
"MIT"
] | null | null | null | professionals/views.py | aykutgk/GoNaturalistic | f1f5e529eef94ae1f642e056cdfb01711333a929 | [
"MIT"
] | null | null | null | import json
from django.http import StreamingHttpResponse, HttpResponse
from django.views.generic import ListView, DetailView, FormView
from professionals.models import Professional
from articles.models import Article
from consultations.models import Consultation
from django.contrib.auth.forms import AuthenticationForm
class IndexView(ListView, FormView):
template_name = 'professionals/professionalIndexPage.html'
context_object_name = 'professional_list'
form_class = AuthenticationForm
paginate_by = 5
def get(self, request, *args, **kwargs):
self.user = request.user
self.url = request.path
perPage = request.GET.get('perPage', None)
if perPage:
self.paginate_by = int(perPage)
else:
pass
return super(IndexView, self).get(request, *args, **kwargs)
def get_queryset(self):
return Professional.objects.all().order_by('professional_full_name')
def get_context_data(self, **kwargs):
context = super(IndexView, self).get_context_data(**kwargs)
context['form'] = self.get_form(self.form_class)
context['url'] = self.url
return context
class ProfessionalPageView(FormView, DetailView):
model = Professional
template_name = 'professionals/professionalPage.html'
form_class = AuthenticationForm
def get(self, request, *args, **kwargs):
self.user = request.user
self.url = request.path
return super(ProfessionalPageView, self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(ProfessionalPageView, self).get_context_data(**kwargs)
self.object.hit()
context['form'] = self.get_form(self.form_class)
if self.object.s_article == "y":
if self.user.is_superuser:
#context['suggestedArticles']= self.object.professional_suggested_article_set.all()
context['articles']= Article.objects.filter(article_author=self.object).order_by('article_hits')[:12]
else:
#context['suggestedArticles']= self.object.professional_suggested_article_set.filter(s_article__article_status="p")
context['articles']= Article.objects.filter(article_author=self.object,article_status="p").order_by('article_hits')[:12]
if self.object.s_consultation == "y":
if self.user.is_superuser:
#context['suggestedConsultations']= self.object.professional_suggested_consultation_set.all()
context['consultations']= Consultation.objects.filter(consultation_professional=self.object).order_by('consultation_hits')[:12]
else:
#context['suggestedConsultations']= self.object.professional_suggested_consultation_set.filter(s_consultation__consultation_status="p")
context['consultations']= Consultation.objects.filter(consultation_professional=self.object,consultation_status="p").order_by('consultation_hits')[:12]
return context
| 53.68254 | 183 | 0.628326 |
15f1209a558bc712a08c71269aa333178844f790 | 54 | py | Python | groupdocs/version.py | groupdocs-legacy-sdk/python | 80e5ef5a9a14ac4a7815c6cf933b5b2997381455 | [
"Apache-2.0"
] | null | null | null | groupdocs/version.py | groupdocs-legacy-sdk/python | 80e5ef5a9a14ac4a7815c6cf933b5b2997381455 | [
"Apache-2.0"
] | null | null | null | groupdocs/version.py | groupdocs-legacy-sdk/python | 80e5ef5a9a14ac4a7815c6cf933b5b2997381455 | [
"Apache-2.0"
] | null | null | null | __pkgname__ = "groupdocs-python"
__version__ = "1.7.0" | 27 | 32 | 0.740741 |
f1b93782731a70a905ee076811ee4781598d1831 | 5,842 | py | Python | staff_manage_sdk/model/ops_automation/jobs_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | 5 | 2019-07-31T04:11:05.000Z | 2021-01-07T03:23:20.000Z | staff_manage_sdk/model/ops_automation/jobs_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | staff_manage_sdk/model/ops_automation/jobs_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: jobs.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from staff_manage_sdk.model.ops_automation import bind_resource_pb2 as staff__manage__sdk_dot_model_dot_ops__automation_dot_bind__resource__pb2
from staff_manage_sdk.model.ops_automation import mail_info_pb2 as staff__manage__sdk_dot_model_dot_ops__automation_dot_mail__info__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='jobs.proto',
package='ops_automation',
syntax='proto3',
serialized_options=_b('ZHgo.easyops.local/contracts/protorepo-models/easyops/model/ops_automation'),
serialized_pb=_b('\n\njobs.proto\x12\x0eops_automation\x1a\x39staff_manage_sdk/model/ops_automation/bind_resource.proto\x1a\x35staff_manage_sdk/model/ops_automation/mail_info.proto\"\xc1\x01\n\x04Jobs\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x08\x63\x61tegory\x18\x02 \x01(\t\x12\x0e\n\x06menuId\x18\x03 \x01(\t\x12\x32\n\x0c\x62indResource\x18\x04 \x01(\x0b\x32\x1c.ops_automation.BindResource\x12\x0c\n\x04\x64\x65sc\x18\x05 \x01(\t\x12\x13\n\x0b\x61llowModify\x18\x06 \x01(\x08\x12&\n\x04mail\x18\x07 \x01(\x0b\x32\x18.ops_automation.MailInfo\x12\n\n\x02id\x18\x08 \x01(\tBJZHgo.easyops.local/contracts/protorepo-models/easyops/model/ops_automationb\x06proto3')
,
dependencies=[staff__manage__sdk_dot_model_dot_ops__automation_dot_bind__resource__pb2.DESCRIPTOR,staff__manage__sdk_dot_model_dot_ops__automation_dot_mail__info__pb2.DESCRIPTOR,])
_JOBS = _descriptor.Descriptor(
name='Jobs',
full_name='ops_automation.Jobs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='ops_automation.Jobs.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='category', full_name='ops_automation.Jobs.category', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='menuId', full_name='ops_automation.Jobs.menuId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bindResource', full_name='ops_automation.Jobs.bindResource', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='desc', full_name='ops_automation.Jobs.desc', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='allowModify', full_name='ops_automation.Jobs.allowModify', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mail', full_name='ops_automation.Jobs.mail', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='ops_automation.Jobs.id', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=145,
serialized_end=338,
)
_JOBS.fields_by_name['bindResource'].message_type = staff__manage__sdk_dot_model_dot_ops__automation_dot_bind__resource__pb2._BINDRESOURCE
_JOBS.fields_by_name['mail'].message_type = staff__manage__sdk_dot_model_dot_ops__automation_dot_mail__info__pb2._MAILINFO
DESCRIPTOR.message_types_by_name['Jobs'] = _JOBS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Jobs = _reflection.GeneratedProtocolMessageType('Jobs', (_message.Message,), {
'DESCRIPTOR' : _JOBS,
'__module__' : 'jobs_pb2'
# @@protoc_insertion_point(class_scope:ops_automation.Jobs)
})
_sym_db.RegisterMessage(Jobs)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 46.365079 | 669 | 0.763608 |
f8c41105fc836706539f11f4d9931b80fcc84ebd | 3,553 | py | Python | tests/certification/optional/test_sofort_transactions.py | isunnapud/vantiv-sdk-for-python | 85ea6ba160c2436a305b9326cbb7d6c8f127c57c | [
"MIT"
] | 4 | 2017-11-16T16:02:06.000Z | 2021-05-04T14:40:08.000Z | tests/certification/optional/test_sofort_transactions.py | isunnapud/vantiv-sdk-for-python | 85ea6ba160c2436a305b9326cbb7d6c8f127c57c | [
"MIT"
] | 7 | 2017-08-23T15:04:38.000Z | 2020-04-07T20:20:15.000Z | tests/certification/optional/test_sofort_transactions.py | isunnapud/vantiv-sdk-for-python | 85ea6ba160c2436a305b9326cbb7d6c8f127c57c | [
"MIT"
] | 13 | 2017-10-17T22:07:57.000Z | 2022-03-29T17:33:42.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2017 Vantiv eCommerce
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the 'Software'), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import os
import sys
import unittest
package_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))))
sys.path.insert(0, package_root)
from vantivsdk import *
package_root = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))
sys.path.insert(0, package_root)
import certification_test_conf
conf = certification_test_conf.conf
class TestSofortTransactions(unittest.TestCase):
def test_p1_sofortSale(self):
txn = {
'sale': {
'orderId': 'p1_sofortSale',
'amount': 10011,
'id': 'ids',
'reportGroup': 'Planets',
'orderSource': 'ecommerce',
'billToAddress': {
'name': 'David Berman',
'addressLine1': '10 Main St.',
'city': 'San Jose',
'state': 'CA',
'zip': '95032',
'country': 'NL',
'email': 'jdoe@phoenixProcessing.com',
'phone': '7812701111'
},
'sofort': {
'preferredLanguage': 'NL',
}
}
}
response = online.request(txn, conf)
# TODO iDEAL Invalid Payment Type
# self.assertEquals('000', response['authorizationResponse']['response'])
def test_n10_sofortSale(self):
txn = {
'sale': {
'orderId': 'n10_sofortSale',
'amount': 20100,
'id': 'ids',
'reportGroup': 'Planets',
'orderSource': 'ecommerce',
'billToAddress': {
'name': 'David Berman',
'addressLine1': '10 Main St.',
'city': 'San Jose',
'state': 'CA',
'zip': '95032',
'country': 'USA',
'email': 'jdoe@phoenixProcessing.com',
'phone': '7812701111'
},
'sofort': {
'preferredLanguage': 'NL',
}
}
}
response = online.request(txn, conf)
self.assertEquals('917', response['saleResponse']['response'])
self.assertEquals('Invalid billing country code', response['saleResponse']['message'])
if __name__ == '__main__':
unittest.main()
| 34.833333 | 108 | 0.567689 |
c5cc44393e38b390bc124473e54bef6bfb01dd7d | 4,244 | py | Python | test/test_file_metadata_definition.py | nodeum-io/nodeum-sdk-python | 205536491bff507dea7be44af46202c17e7121d9 | [
"MIT"
] | null | null | null | test/test_file_metadata_definition.py | nodeum-io/nodeum-sdk-python | 205536491bff507dea7be44af46202c17e7121d9 | [
"MIT"
] | null | null | null | test/test_file_metadata_definition.py | nodeum-io/nodeum-sdk-python | 205536491bff507dea7be44af46202c17e7121d9 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Nodeum API
The Nodeum API makes it easy to tap into the digital data mesh that runs across your organisation. Make requests to our API endpoints and we’ll give you everything you need to interconnect your business workflows with your storage. All production API requests are made to: http://nodeumhostname/api/ The current production version of the API is v1. **REST** The Nodeum API is a RESTful API. This means that the API is designed to allow you to get, create, update, & delete objects with the HTTP verbs GET, POST, PUT, PATCH, & DELETE. **JSON** The Nodeum API speaks exclusively in JSON. This means that you should always set the Content-Type header to application/json to ensure that your requests are properly accepted and processed by the API. **Authentication** All API calls require user-password authentication. **Cross-Origin Resource Sharing** The Nodeum API supports CORS for communicating from Javascript for these endpoints. You will need to specify an Origin URI when creating your application to allow for CORS to be whitelisted for your domain. **Pagination** Some endpoints such as File Listing return a potentially lengthy array of objects. In order to keep the response sizes manageable the API will take advantage of pagination. Pagination is a mechanism for returning a subset of the results for a request and allowing for subsequent requests to “page” through the rest of the results until the end is reached. Paginated endpoints follow a standard interface that accepts two query parameters, limit and offset, and return a payload that follows a standard form. These parameters names and their behavior are borrowed from SQL LIMIT and OFFSET keywords. **Versioning** The Nodeum API is constantly being worked on to add features, make improvements, and fix bugs. This means that you should expect changes to be introduced and documented. However, there are some changes or additions that are considered backwards-compatible and your applications should be flexible enough to handle them. These include: - Adding new endpoints to the API - Adding new attributes to the response of an existing endpoint - Changing the order of attributes of responses (JSON by definition is an object of unordered key/value pairs) **Filter parameters** When browsing a list of items, multiple filter parameters may be applied. Some operators can be added to the value as a prefix: - `=` value is equal. Default operator, may be omitted - `!=` value is different - `>` greater than - `>=` greater than or equal - `<` lower than - `>=` lower than or equal - `><` included in list, items should be separated by `|` - `!><` not included in list, items should be separated by `|` - `~` pattern matching, may include `%` (any characters) and `_` (one character) - `!~` pattern not matching, may include `%` (any characters) and `_` (one character) # noqa: E501
The version of the OpenAPI document: 2.1.0
Contact: info@nodeum.io
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import nodeum_sdk
from nodeum_sdk.models.file_metadata_definition import FileMetadataDefinition # noqa: E501
from nodeum_sdk.rest import ApiException
class TestFileMetadataDefinition(unittest.TestCase):
"""FileMetadataDefinition unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test FileMetadataDefinition
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = nodeum_sdk.models.file_metadata_definition.FileMetadataDefinition() # noqa: E501
if include_optional :
return FileMetadataDefinition(
)
else :
return FileMetadataDefinition(
)
def testFileMetadataDefinition(self):
"""Test FileMetadataDefinition"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 80.075472 | 2,886 | 0.745994 |
194464d0a7df3d9cd728751a76c0f3de87784917 | 4,169 | py | Python | plot_log_arrays.py | victor-gil-sepulveda/PhD-ANMPythonHelpers | c0e15684cce4aa4da90141b51f043a567a5f8655 | [
"MIT"
] | 1 | 2017-11-01T15:19:10.000Z | 2017-11-01T15:19:10.000Z | plot_log_arrays.py | victor-gil-sepulveda/PhD-ANMPythonHelpers | c0e15684cce4aa4da90141b51f043a567a5f8655 | [
"MIT"
] | null | null | null | plot_log_arrays.py | victor-gil-sepulveda/PhD-ANMPythonHelpers | c0e15684cce4aa4da90141b51f043a567a5f8655 | [
"MIT"
] | null | null | null | '''
Created on 19/03/2015
@author: user
'''
import numpy
from optparse import OptionParser
import matplotlib.pyplot as plt
import anmichelpers.tools.measure as measure
from pyRMSD.RMSDCalculator import RMSDCalculator
def load_file(path):
try:
v = numpy.loadtxt(path)
if len(v.shape) == 1:
return numpy.array([v])
else:
return v
except:
print "[ERROR] Impossible to load %s"%path
exit()
def needs_one_file(i1):
assert i1 is not None, "An input file is needed"
return load_file(i1)
def needs_two_files(i1, i2):
needs_one_file(i1)
needs_one_file(i2)
return load_file(i1), load_file(i2)
def can_have_different_numberof_rows(v1, v2):
#can have different number of rows
if len(v1) != len(v2):
min_c = min(len(v1),len(v2))
v1 = v1[:min_c,:]
v2 = v2[:min_c,:]
return v1, v2
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("--i1", dest="input1")
parser.add_option("--i2", dest="input2")
parser.add_option("-f","--from", type= "int", dest="_from")
parser.add_option("-t","--to", type= "int", dest="to")
parser.add_option("-p","--plot_type", default="normal", dest="plot_type")
parser.add_option("-s","--skip_step", action= "store_true", dest="skip_step")
(options, args) = parser.parse_args()
plot_types = ["ccdist", "absdiff", "diff", "rmsd", "normal"]
assert options.to >= options._from, "[ERROR] 'from' value is bigger than 'to'. "
assert options.plot_type in plot_types, "[ERROR] plot type ('-p','--plot_type') must be one of %s"%str(plot_types)
result = None
if options.plot_type == "diff" or options.plot_type == "absdiff":
v1,v2 = needs_two_files(options.input1, options.input2)
assert len(v1[0]) == len(v2[0]),"[ERROR] arrays must have the same length (%s vs %s)."
v1,v2 = can_have_different_numberof_rows(v1, v2)
result = v2-v1
if options.plot_type == "absdiff":
result = abs(result)
if options.skip_step:
# skip first number
result = result[:,1:]
if options.plot_type == "ccdist":
v1,v2 = needs_two_files(options.input1, options.input2)
assert len(v1[0]) == len(v2[0]),"[ERROR] arrays must have the same length (%s vs %s)."
if len(v1) != len(v2):
min_c = min(len(v1),len(v2))
v1 = v1[:min_c,:]
v2 = v2[:min_c,:]
result = v2-v1
tmp_result = []
for r in result:
tmp_result.append(measure.calculate_mode_magnitudes(r))
result = numpy.array(tmp_result)
if options.skip_step:
# skip first number
result = result[:,1:]
if options.plot_type == "rmsd":
v1,v2 = needs_two_files(options.input1, options.input2)
v1,v2 = v1[:,1:], v2[:,1:]
v1,v2 = can_have_different_numberof_rows(v1, v2)
result = []
for i in range(len(v1)):
coordset1 = numpy.resize(v1[i], (len(v1[i])/3,3))
coordset2 = numpy.resize(v2[i], (len(v2[i])/3,3))
coordsets = numpy.array([coordset1, coordset2])
calculator = RMSDCalculator("QCP_SERIAL_CALCULATOR", coordsets)
result.append(calculator.pairwise(0,1))
elif options.plot_type == "normal":
result = needs_one_file(options.input1)
if options.skip_step:
# skip first number
result = result[:,1:]
if options.plot_type in ["ccdist", "absdiff", "diff", "normal"]:
number_of_plots = options.to-options._from+1
subplot_shape = (number_of_plots,1)
plt.title('----')
for i in range(number_of_plots):
ax = plt.subplot2grid(subplot_shape, (i,0))
plt.plot(range(len(result[i])), result[i])
plt.ylabel("value")
plt.xlabel("id")
plt.show()
elif options.plot_type in ["rmsd"]:
plt.plot(result)
plt.ylabel("value")
plt.xlabel("id")
plt.show()
| 33.087302 | 118 | 0.574478 |
9ee7a2a6f06c914e823a42be5ce2a234768220b0 | 924 | py | Python | utils/filter_helper.py | jiangwei221/image-matching-benchmark | 7f47a4acb24130ae49d9be261dd6333fd43e577e | [
"Apache-2.0"
] | 271 | 2020-02-12T01:40:22.000Z | 2022-03-16T03:13:29.000Z | utils/filter_helper.py | QinZiwen/image-matching-benchmark | e3946deb69940a7bcbbbc7aa7dc392f34497d036 | [
"Apache-2.0"
] | 24 | 2020-02-12T11:07:35.000Z | 2020-08-10T09:06:39.000Z | utils/filter_helper.py | QinZiwen/image-matching-benchmark | e3946deb69940a7bcbbbc7aa7dc392f34497d036 | [
"Apache-2.0"
] | 42 | 2020-02-12T08:13:15.000Z | 2021-09-26T07:45:18.000Z | # Copyright 2020 Google LLC, University of Victoria, Czech Technical University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from utils.path_helper import get_filter_match_file
def is_filter_complete(cfg):
'''Checks if stereo evaluation is complete.'''
# We should have the colmap pose file and no colmap temp path
is_complete = os.path.exists(get_filter_match_file(cfg))
return is_complete
| 35.538462 | 79 | 0.765152 |
12407cd42b3bc1fbf29788e1cde7796769298b6c | 276 | py | Python | website/migrations/0055_merge_20210119_2125.py | czhu1217/cmimc-online | 5ef49ceec0bb86d8ae120a6ecfd723532e277821 | [
"MIT"
] | null | null | null | website/migrations/0055_merge_20210119_2125.py | czhu1217/cmimc-online | 5ef49ceec0bb86d8ae120a6ecfd723532e277821 | [
"MIT"
] | 1 | 2022-01-23T21:08:12.000Z | 2022-01-23T21:08:12.000Z | website/migrations/0055_merge_20210119_2125.py | czhu1217/cmimc-online | 5ef49ceec0bb86d8ae120a6ecfd723532e277821 | [
"MIT"
] | 1 | 2021-10-17T17:11:42.000Z | 2021-10-17T17:11:42.000Z | # Generated by Django 3.1.5 on 2021-01-20 02:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('website', '0052_auto_20210114_2228'),
('website', '0054_task_grader_data_file'),
]
operations = [
]
| 18.4 | 50 | 0.652174 |
545ba2df174810388f06523e5174cbed3c2148ad | 63 | py | Python | riberry_web/__init__.py | srafehi/riberry_web | d4d57a5e54f10b725c55f6518fa74e24bd5b50ac | [
"MIT"
] | 1 | 2020-09-01T15:34:52.000Z | 2020-09-01T15:34:52.000Z | riberry_web/__init__.py | srafehi/riberry_web | d4d57a5e54f10b725c55f6518fa74e24bd5b50ac | [
"MIT"
] | null | null | null | riberry_web/__init__.py | srafehi/riberry_web | d4d57a5e54f10b725c55f6518fa74e24bd5b50ac | [
"MIT"
] | null | null | null | from . import app
from .app import main
__version__ = '0.1.2'
| 12.6 | 21 | 0.698413 |
8b109a02a571623fc7148dfb67e2ff27e45c2d56 | 3,423 | py | Python | recgve/models/tensorflow/train_model/ngcf/eval_ngcf.py | doubleblind148/IGCCF | bc0e90a5322f1ba4927ec89d9181190974f7e1ba | [
"MIT"
] | 1 | 2022-02-14T07:29:18.000Z | 2022-02-14T07:29:18.000Z | recgve/models/tensorflow/train_model/ngcf/eval_ngcf.py | doubleblind148/IGCCF | bc0e90a5322f1ba4927ec89d9181190974f7e1ba | [
"MIT"
] | null | null | null | recgve/models/tensorflow/train_model/ngcf/eval_ngcf.py | doubleblind148/IGCCF | bc0e90a5322f1ba4927ec89d9181190974f7e1ba | [
"MIT"
] | null | null | null | #!/usr/bin/env python
__author__ = "XXX"
__email__ = "XXX"
import argparse
import shutil
import tensorflow as tf
import wandb
from datasets.implemented_datasets import *
from evaluation.topk_evaluator import Evaluator
from igccf_experiments.best_models import get_wandb_project_dict
# Define static variables
from models.tensorflow.ngcf import NGCF
PROJECT = "Amaz_dat"
if __name__ == "__main__":
parser = argparse.ArgumentParser("Eval NGCF")
##########################################
# identifier of WANDB run
##########################################
parser.add_argument("--wandb_project", type=str, default=PROJECT)
parser.add_argument("--cutoff", type=list, default=[5, 20])
args = vars(parser.parse_args())
wandb_project_dict = get_wandb_project_dict(args["wandb_project"])
##########################################
# Retrieve run parameters
##########################################
api = wandb.Api()
run_identifier = "XXXXXX/{}/{}".format(
args["wandb_project"], wandb_project_dict["ngcf"]
)
run_object = api.run(run_identifier)
for f in run_object.files():
if "best_models" in str(f):
f.download(replace=True)
run_parameters_dict = run_object.config
##########################################
# Load dataset
##########################################
dataset_dict = eval(wandb_project_dict["dataset"])().load_split(
wandb_project_dict["split_name"]
)
train_df = dataset_dict["train"]
user_data = {"interactions": train_df}
val_df = dataset_dict["val"]
test_df = dataset_dict["test"]
##########################################
# Setting up val and test evaluator
##########################################
val_evaluator = Evaluator(
cutoff_list=run_parameters_dict["cutoff"],
metrics=["Recall", "NDCG"],
test_data=val_df,
)
test_evaluator = Evaluator(
cutoff_list=run_parameters_dict["cutoff"],
metrics=["Recall", "NDCG"],
test_data=test_df,
)
##########################################
# Load model
##########################################
model = NGCF(
train_df,
embeddings_size=run_parameters_dict["embedding_size"],
convolution_depth=run_parameters_dict["convolution_depth"],
mess_dropout=run_parameters_dict["mess_dropout"],
node_dropout=run_parameters_dict["node_dropout"],
)
weights_path = "best_models"
latest = tf.train.latest_checkpoint(weights_path)
model.load_weights(latest)
# delete the downloaded weights files
print("Deleting restored files from wandb")
shutil.rmtree("best_models")
##########################################
# Evaluate model
##########################################
val_evaluator.evaluate_recommender(model, user_data=user_data)
val_evaluator.print_evaluation_results()
test_evaluator.evaluate_recommender(model, user_data=user_data)
test_evaluator.print_evaluation_results()
##########################################
# Log results on WANDB
##########################################
test_result_dict = {}
for k, v in test_evaluator.result_dict.items():
new_key = "test_{}".format(k)
test_result_dict[new_key] = test_evaluator.result_dict[k]
run_object.summary.update(test_result_dict)
| 31.694444 | 70 | 0.568215 |
f8c85f279e0951e6a51763a8513c3e2a356c4544 | 1,543 | py | Python | civis/tests/test_civis.py | mcoirad-gmmb/civis-python | 4286da93496cc7d15d876c9001ea3aa1ad359972 | [
"BSD-3-Clause"
] | 1 | 2019-10-07T21:36:09.000Z | 2019-10-07T21:36:09.000Z | civis/tests/test_civis.py | mcoirad-gmmb/civis-python | 4286da93496cc7d15d876c9001ea3aa1ad359972 | [
"BSD-3-Clause"
] | 4 | 2019-09-25T21:05:09.000Z | 2019-10-22T15:09:42.000Z | civis/tests/test_civis.py | mcoirad-gmmb/civis-python | 4286da93496cc7d15d876c9001ea3aa1ad359972 | [
"BSD-3-Clause"
] | null | null | null | from unittest import mock
import civis
from civis.tests.mocks import TEST_SPEC
import pytest
@pytest.mark.parametrize('schema_tablename', [
'foo.bar', '"foo".bar', 'foo."bar"', '"foo"."bar"'
])
def test_get_table_id(schema_tablename):
"""Check that get_table_id handles quoted schema.tablename correctly."""
client = civis.APIClient(local_api_spec=TEST_SPEC, api_key='none')
client.get_database_id = mock.Mock(return_value=123)
mock_tables = mock.MagicMock()
mock_tables.__getitem__.side_effect = {0: mock.Mock()}.__getitem__
client.tables.list = mock.Mock(return_value=mock_tables)
client.get_table_id(table=schema_tablename, database=123)
client.tables.list.assert_called_once_with(
database_id=123,
schema='foo',
name='bar'
)
def test_get_storage_host_id():
client = civis.APIClient(local_api_spec=TEST_SPEC, api_key='none')
class StorageHost:
def __init__(self, id, name):
self.id = id
self.name = name
def __getitem__(self, key):
return getattr(self, key)
storage_hosts = [StorageHost(1234, 'test'), StorageHost(5678, 'othertest')]
client.storage_hosts.list = mock.Mock(return_value=storage_hosts)
assert client.get_storage_host_id('test') == 1234
client.storage_hosts.list.assert_called_once_with()
assert client.get_storage_host_id(4732) == 4732
with pytest.raises(ValueError, match="Storage Host invalidname not found"):
client.get_storage_host_id('invalidname')
| 29.673077 | 79 | 0.704472 |
a8eb322abe1bebd8923ff5bd66277690dd77ff11 | 9,769 | py | Python | Numerico/EP2/Testes.py | victorathanasio/Personal-projects | 94c870179cec32aa733a612a6faeb047df16d977 | [
"MIT"
] | null | null | null | Numerico/EP2/Testes.py | victorathanasio/Personal-projects | 94c870179cec32aa733a612a6faeb047df16d977 | [
"MIT"
] | null | null | null | Numerico/EP2/Testes.py | victorathanasio/Personal-projects | 94c870179cec32aa733a612a6faeb047df16d977 | [
"MIT"
] | null | null | null | #Lui Damianci Ferreira - 10770579
#Victor A. C. Athanasio - 9784401
# %%
from Ep2_functions import *
import matplotlib.pyplot as plt
import pandas as pd
from itertools import cycle, islice
string = 'Resultados \n \n' #referente a criacao do apendice contendo resultados
counter = 0 #referente a criacao do apendice contendo resultados
gerar_text = False #referente a criacao do apendice contendo resultados
a = time.time()
# %% Teste A
def TesteA():
''''Executa teste A'''
Name = 'Teste A'
N = 128
plist = [0.35]
uarray = create_us(plist, N)
uT = 7 * uarray[0]
resp, uarray = resolveMMQ(plist, N, uT)
exata = np.array([7])
Erro = finalize(Name, resp, uT, uarray, N, exata, plist)
return resp, Erro, exata, plist
# %% Teste B
def TesteB():
''''Executa teste B'''
Name = 'Teste B'
N = 128
plist = [0.15, 0.3, 0.7, 0.8]
uarray = create_us(plist, N)
uT = 2.3 * uarray[0] + 3.7 * uarray[1] + 0.3 * uarray[2] + 4.2 * uarray[3]
resp, uarray = resolveMMQ(plist, N, uT)
exata = np.array([2.3, 3.7, 0.3, 4.2])
Erro = finalize(Name, resp, uT, uarray, N, exata, plist)
return resp, Erro, exata, plist
# %% TesteC
def TesteC(N):
''''Executa teste C'''
Name = 'Teste C, N = {}'.format(N)
plist, uT = read_text(N)
resp, uarray = resolveMMQ(plist, N, uT)
exata = np.array([1, 5, 2, 1.5, 2.2, 3.1, 0.6, 1.3, 3.9, 0.5])
Erro = finalize(Name, resp, uT, uarray, N, exata, plist)
return resp, Erro, exata, plist
# %%
def TesteD(N):
''''Executa teste D'''
Name = 'Teste D, N = {}'.format(N)
plist, uT = read_text(N)
multipliers = np.random.random(N - 1)
multipliers -= 0.5
multipliers *= 2
multipliers *= 0.01
multipliers += 1
multipliers = multipliers.reshape(N - 1, 1)
uT = uT * multipliers
resp, uarray = resolveMMQ(plist, N, uT)
exata = np.array([1, 5, 2, 1.5, 2.2, 3.1, 0.6, 1.3, 3.9, 0.5])
Erro = finalize(Name, resp, uT, uarray, N, exata, plist)
return resp, Erro, exata, plist
# %%
def TodosTestes():
''''Executa todos os testes'''
TesteA()
TesteB()
Ns = [128, 256, 512, 1024, 2048]
respsc = []
Errosc = []
for n in Ns:
resp, Erro, exatac, plistc = TesteC(n)
respsc.append(resp)
Errosc.append(Erro)
plot_serie_barra('TesteC', respsc, exatac, plistc)
respsD = []
ErrosD = []
for n in Ns:
resp, Erro, exatad, plistd = TesteD(n)
respsD.append(resp)
ErrosD.append(Erro)
plot_serie_barra('TesteD', respsD, exatad, plistd)
plot_serie_erro(ErrosD, Errosc)
# %% Graficos
def finalize(Name, resp, uT, uarray, N, exata, plist):
''''Imprime os resultados e executa as analises de cada teste'''
print_resp(Name, resp)
sol = our_sol(resp, uarray)
Erro = Erro_quadratico(N, sol, uT)
plot_exataXsol(Name, uT, sol, Erro)
plot_barra(Name, resp, exata, plist)
return Erro
def print_resp(Name, resp):
''''Imprime respostas do teste de forma bonita e organizada'''
global string
global counter
print('---------------------------------------------------------------------------------------')
print(Name, ':')
if counter % 2 == 0:
string += r'\begin{multicols}{2}' + '\n' #referente a criacao do apendice contendo resultados
string += r'\noindent\rule{\linewidth}{0.4pt}' + '\n' #referente a criacao do apendice contendo resultados
string += Name + ':' + '\n \n' #referente a criacao do apendice contendo resultados
df = pd.DataFrame(columns=['', 'Ak'])
for i in range(resp.shape[0]):
df.loc[i, ''] = 'a{} = '.format(i + 1)
df.loc[i, 'Ak'] = resp[i]
print('a{} = {}'.format(i + 1, resp[i]))
string += 'a{} = {}'.format(i + 1, resp[i]) + '\n \n' #referente a criacao do apendice contendo resultados
string = string[:-2] #referente a criacao do apendice contendo resultados
string += r'\\' #referente a criacao do apendice contendo resultados
df = df.set_index('').dropna()
print()
string += '\n'#referente a criacao do apendice contendo resultados
def our_sol(resp, uarray):
''''Calcula o vetor solucao baseado nas intensidades do MMQ'''
sol = np.zeros((uarray.shape[1], 1))
for i in range(resp.shape[0]):
sol += resp[i] * uarray[i]
return sol
def Erro_quadratico(N, sol, uT):
''''Calcula erro quadrático'''
global string
global counter
DeltaX = 1 / N
Erro_ponto_a_ponto = uT - sol
# vector *= vector
# sum = np.sum(vector)
# sum *= DeltaX
# Erro = np.sqrt(sum) Esses passo podem ser substituidos por:
Erro = prod_interno(Erro_ponto_a_ponto, Erro_ponto_a_ponto)
Erro = np.sqrt(DeltaX*Erro)
print('Erro quadrático: {}'.format(Erro))
string += 'Erro quadrático: {}'.format(Erro) #referente a criacao do apendice contendo resultados
print()
string += '\n \n' #referente a criacao do apendice contendo resultados
if counter % 2 == 1: #referente a criacao do apendice contendo resultados
string = string[:-2] #referente a criacao do apendice contendo resultados
string += r'\end{multicols}' + '\n \n' #referente a criacao do apendice contendo resultados
counter += 1 #referente a criacao do apendice contendo resultados
return Erro
def plot_exataXsol(Name, vector, sol, Erro):
''''Plota a solucao exata, nossa solucao e a diferenca entre ambas no instante T'''
N = vector.shape[0] + 1
xspace = np.linspace(0, 1, N + 1)[1:-1]
plt.clf()
plt.plot(xspace, vector)
plt.plot(xspace, sol)
plt.ylim(0, np.max(vector) + 0.15*np.max(vector))
plt.xlim(0, 1)
plt.legend(['Solução exata', 'Solução calculada'])
plt.ylabel('Temperature')
plt.xlabel('Position')
plt.suptitle('Solução exata e calculada, ' + Name)
plt.text(0.35, 0.5, 'Erro quadrático = {}'.format(np.format_float_scientific(Erro, 2)), dict(size=12))
plt.savefig('{}.png'.format('plots/exataXcalculada' + Name))
plt.show()
plt.clf()
plt.plot(xspace, vector - sol)
plt.axhline(0, color='black', lw=1)
plt.xlim(0, 1)
plt.legend(['Erro'])
plt.ylabel('Diference in temperature')
plt.xlabel('Position')
plt.suptitle('Diferença entre solução exata e calculada (erro ponto a ponto), ' + Name)
plt.savefig('{}.png'.format('plots/erro' + Name))
plt.show()
def plot_barra(Name, resp, exata, plist):
''''Plota grafico de barras representando as intensidades calculadas e as exatas'''
plt.clf()
fig = plt.figure(frameon=False)
ax = fig.add_axes([0, 0, 1, 1])
X = np.array(plist)
espessura = 0.014
ax.set_xlim(0, 1)
ax.bar(X - espessura / 2, resp, width=espessura)
fig.add_axes(ax)
ax.bar(X + espessura / 2, exata, width=espessura)
ax.legend(labels=['Intensidade calculada', 'Intensidade exata'])
ax.set_title('Intensidade calculada e exata, {}'.format(Name))
ax.set_xlabel('Posição na barra')
ax.set_ylabel('')
fig.add_axes(ax)
fig.savefig('{}.png'.format('plots/barras' + Name), bbox_inches='tight', pad_inches=0)
fig.show()
def plot_serie_barra(Name, resps, exata, plist):
''''Plota a evolucao da intensidade de cada fonte em funcao do refinamento da malha, plota tambem uma linha que
contem a resposta exata '''
width = 0.35
matrix = resps[0]
for i in range(1, len(resps)):
matrix = np.vstack((matrix, resps[i]))
for i in range(len(plist)):
plt.clf()
data = matrix[:, i:i + 1].reshape(5)
data = pd.DataFrame({
'Intensidade calculada': data,
'Intensidade exata': np.ones(len(data)) * exata[i]
})
my_colors = list(
islice(cycle(['darkturquoise', 'deepskyblue', 'darkcyan', 'lightseagreen', 'c']), None, len(data)))
data['Intensidade calculada'].plot(kind='bar', width=width, color=my_colors,
title='Evolução da intensidade com N, {}, P = {}'.format(Name, plist[i]), legend=True)
data['Intensidade exata'].plot()
ax = plt.gca()
ax.set_xticklabels(('128', '256', '512', '1024', '2048'))
# ax.set_xticklabels(('128', '256', '512'))
ax.set_xlabel("N", fontsize=12)
ax.set_ylabel("Intensidade da fonte", fontsize=12)
ax.legend(labels=['Intensidade exata', 'Intensidade calculada'], loc='lower left')
# plt.show()
plt.savefig('{}.png'.format('plots/barras_pp' + Name + 'P=' + str(plist[i])))
def plot_serie_erro(erroD, erroC):
''''Plota a evolucao do erro com o refinamento da malha'''
xspace = np.array([128, 256, 512, 1024, 2048])
plt.clf()
plt.plot(xspace, erroD)
plt.plot(xspace, erroC)
plt.ylim(0, 0.12)
plt.legend(['Erro Teste D', 'Erro Teste C'])
plt.ylabel('Erro quadrático')
plt.xlabel('N')
plt.suptitle('Evolução do erro em função de N')
plt.savefig('{}.png'.format('plots/erroXn'))
plt.show()
if gerar_text: #referente a criacao do apendice contendo resultados
''''Apenas usada para gerar o apendice que vai para o relatorio'''
TodosTestes() #referente a criacao do apendice contendo resultados
f = open('Resultados.txt', 'w') #referente a criacao do apendice contendo resultados
f.write(string) #referente a criacao do apendice contendo resultados
f.close() #referente a criacao do apendice contendo resultados
print('Tempo total de execução:',time.time() - a) | 35.014337 | 130 | 0.598935 |
d21b1464cc28148885fa841d352e7aa2876d128c | 1,877 | py | Python | src/TileCoder.py | dquail/CycleWorld | 2bc1833ce4b787b03cdbfaf27d0ed99af269f555 | [
"MIT"
] | 2 | 2017-09-18T21:55:20.000Z | 2018-03-16T20:20:19.000Z | src/TileCoder.py | dquail/CycleWorld | 2bc1833ce4b787b03cdbfaf27d0ed99af269f555 | [
"MIT"
] | 6 | 2017-02-08T07:27:19.000Z | 2017-02-22T22:38:30.000Z | src/TileCoder.py | dquail/CycleWorld | 2bc1833ce4b787b03cdbfaf27d0ed99af269f555 | [
"MIT"
] | 2 | 2017-05-19T21:06:38.000Z | 2019-03-05T01:46:58.000Z | import numpy
import random
random.seed(0)
from tiles import *
def tileCode(numTilings, vectorLength, value):
indexes = tiles(numTilings, vectorLength, value)
#print("Tilecode for :" + str(value) + str(indexes))
featureVector = numpy.zeros(vectorLength)
for idx in indexes:
featureVector[idx] = 1
return featureVector
class TileCoder(object):
numberOfTiles = 8
numberOfTilings = 8
numberOfActions = 2
@staticmethod
def getIndexes(numTilings, vectorLength, value):
indexes = tiles(numTilings, vectorLength, value)
return indexes, vectorLength
@staticmethod
def getVectorFromIndexes(indexes, vectorLength):
featureVector = numpy.zeros(vectorLength)
for idx in indexes:
featureVector[idx] = 1
return featureVector
@staticmethod
def getFeatureVectorFromValues(value, numTilings = numberOfTilings, numTiles = numberOfTiles):
vectorLength = numTilings * numpy.power(numTiles, len(value))
indexes, l = TileCoder.getIndexes(numTilings, vectorLength, value)
featureVector = TileCoder.getVectorFromIndexes(indexes, vectorLength)
return featureVector
"""
@staticmethod
def getFeatureActionVectorFromValuesAndAction(value, action, numTilings = numberOfTilings, numTiles = numberOfTiles, numActions = numberOfActions):
vectorLength = numTilings * numpy.power(numTiles, len(value)) * numActions
featureVector = TileCoder.getFeatureVectorFromValues(value, numTilings, numTiles)
featureActionVector = numpy.zeros(vectorLength)
if (action == 1):
#move left
featureActionVector[0:len(featureVector)] = featureVector
if (action == 2):
#move right
featureActionVector[len(featureVector):] = featureVector
return featureVector
""" | 34.759259 | 151 | 0.693127 |
b735ff0785cb9f592f1e4e2a14308a6b0c427029 | 873 | py | Python | qiskit/ignis/mitigation/expval/__init__.py | paulineollitrault/qiskit-ignis | 99f24ea6533cd284be4c44a48d43e54f62f05674 | [
"Apache-2.0"
] | 182 | 2019-02-19T22:52:42.000Z | 2022-02-28T05:48:07.000Z | qiskit/ignis/mitigation/expval/__init__.py | paulineollitrault/qiskit-ignis | 99f24ea6533cd284be4c44a48d43e54f62f05674 | [
"Apache-2.0"
] | 384 | 2019-02-19T21:30:18.000Z | 2021-12-02T21:13:34.000Z | qiskit/ignis/mitigation/expval/__init__.py | paulineollitrault/qiskit-ignis | 99f24ea6533cd284be4c44a48d43e54f62f05674 | [
"Apache-2.0"
] | 203 | 2019-02-19T21:06:27.000Z | 2022-03-02T14:16:50.000Z | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Expectation value measurement error mitigation module
"""
from .utils import expectation_value
from .circuits import expval_meas_mitigator_circuits
from .fitter import ExpvalMeasMitigatorFitter
from .complete_mitigator import CompleteExpvalMeasMitigator
from .tensored_mitigator import TensoredExpvalMeasMitigator
from .ctmp_mitigator import CTMPExpvalMeasMitigator
| 36.375 | 77 | 0.799542 |
d296b24eaeccbb9d310b9cdbd26d8de59e8e4417 | 400 | py | Python | book_tools/format/mimetype.py | Aladex/sopds-fb2sax-sqlalchemy | d483b1591d44a1c08170d4e8e1d797cc32e2899a | [
"MIT"
] | 10 | 2015-02-09T14:23:42.000Z | 2022-02-17T16:26:25.000Z | book_tools/format/mimetype.py | Aladex/sopds-fb2sax-sqlalchemy | d483b1591d44a1c08170d4e8e1d797cc32e2899a | [
"MIT"
] | 2 | 2021-03-31T19:50:29.000Z | 2021-12-13T20:38:48.000Z | book_tools/format/mimetype.py | Aladex/sopds-fb2sax-sqlalchemy | d483b1591d44a1c08170d4e8e1d797cc32e2899a | [
"MIT"
] | 5 | 2016-11-27T12:06:59.000Z | 2021-07-27T08:10:03.000Z | class Mimetype:
OCTET_STREAM = 'application/octet-stream'
XML = 'application/xml'
ZIP = 'application/zip'
EPUB = 'application/epub+zip'
FB2 = 'application/fb2+xml'
FB2_ZIP = 'application/fb2+zip'
PDF = 'application/pdf'
MSWORD = 'application/msword'
MOBI = 'application/x-mobipocket-ebook'
DJVU = 'image/vnd.djvu'
TEXT = 'text/plain'
RTF = 'text/rtf'
| 26.666667 | 45 | 0.6425 |
16ddbe9068e60b8ac5bcb60e2664b940075237ba | 578 | py | Python | screengrab.py | tillmanp/GTAV_OpenCV_Automation | a74ead4e1f8e90c1675d4859ba3a6578fb2e064d | [
"MIT"
] | null | null | null | screengrab.py | tillmanp/GTAV_OpenCV_Automation | a74ead4e1f8e90c1675d4859ba3a6578fb2e064d | [
"MIT"
] | null | null | null | screengrab.py | tillmanp/GTAV_OpenCV_Automation | a74ead4e1f8e90c1675d4859ba3a6578fb2e064d | [
"MIT"
] | null | null | null | from PIL import ImageGrab
import numpy as np
import cv2
import time
import pyautogui ##workaround for window scaling problem
last_time = time.time()
while(True):
img = ImageGrab.grab(bbox=(10,40,1920,1120)) #bbox specifies specific region (bbox= x,y,width,height)
img_np = np.array(img)
img = cv2.cvtColor(img_np, cv2.COLOR_BGR2RGB)
print('loop took {} seconds'.format(time.time()-last_time))
last_time = time.time()
cv2.imshow('GTAV_OpenCV_Output', np.array(img))
cv2.waitKey(25) & 0xFF == ord('q')
cv2.destroyAllWindows() | 30.421053 | 106 | 0.683391 |
cefc88d377e1bfa5f293c7602653afc2f5ce381a | 54 | py | Python | carla/models/catalog/Linear_TORCH/__init__.py | jayanthyetukuri/CARLA | c3f3aaf11a5a8499c4bec5065e0c17ec8e6f5950 | [
"MIT"
] | 140 | 2021-08-03T21:53:32.000Z | 2022-03-20T08:52:02.000Z | carla/models/catalog/Linear_TORCH/__init__.py | jayanthyetukuri/CARLA | c3f3aaf11a5a8499c4bec5065e0c17ec8e6f5950 | [
"MIT"
] | 54 | 2021-03-07T18:22:16.000Z | 2021-08-03T12:06:31.000Z | carla/models/catalog/Linear_TORCH/__init__.py | jayanthyetukuri/CARLA | c3f3aaf11a5a8499c4bec5065e0c17ec8e6f5950 | [
"MIT"
] | 16 | 2021-08-23T12:14:58.000Z | 2022-03-01T00:52:58.000Z | # flake8: noqa
from .model_linear import LinearModel
| 13.5 | 37 | 0.796296 |
a8cc7c5a2829b63a0cf3134b4ce569f103d10164 | 1,384 | py | Python | lib/csv2dict.py | alfa871212/shor_paper | 09d3bf9d601d16cd369809e020c093de7889c3d4 | [
"MIT"
] | 3 | 2020-09-04T15:53:56.000Z | 2021-11-15T11:22:21.000Z | lib/csv2dict.py | alfa871212/shor_paper | 09d3bf9d601d16cd369809e020c093de7889c3d4 | [
"MIT"
] | null | null | null | lib/csv2dict.py | alfa871212/shor_paper | 09d3bf9d601d16cd369809e020c093de7889c3d4 | [
"MIT"
] | null | null | null | import argparse
import csv
import math
from qiskit.visualization import plot_histogram
def process_command():
parser = argparse.ArgumentParser()
method = parser.add_mutually_exclusive_group(required=True)
method.add_argument('--normal', '--nor', action='store_true')
method.add_argument('--sequential', '--seq', action='store_true')
parser.add_argument('-N', metavar='N', type=str, required=True)
method2 = parser.add_mutually_exclusive_group(required=True)
method2.add_argument('-a', metavar='a', type=str)
method2.add_argument('--all', action='store_true')
return parser.parse_args()
def plot_from_csv(path, N, a):
filename = path + N + '_' + str(a) + '.csv'
with open(filename, newline='') as f:
reader = csv.reader(f)
data = dict(list(reader))
# print(data)
plot_histogram(data, figsize=(10, 10), title=f'N={N} a={a} result(Nor)').savefig(
path + f'/{N}_{a}_res.png')
if __name__ == "__main__":
args = process_command()
if args.normal:
path = './normal/result/'
elif args.sequential:
path = './sequential/result/'
else:
raise Exception("Type not defined")
N = args.N
if args.all:
for i in range(2, int(N)):
if math.gcd(i, int(N)) == 1:
plot_from_csv(path, N, i)
else:
plot_from_csv(path, N, args.a)
| 29.446809 | 85 | 0.626445 |
344665cf6113a0a1c43decd003273672e0c31305 | 4,505 | py | Python | backend/project/accounts/api/views.py | Kamil732/DK-team | d99482111761f8c12b486246c539542f4f41c6c3 | [
"CC0-1.0"
] | 4 | 2021-07-05T14:26:52.000Z | 2021-08-25T15:54:39.000Z | backend/project/accounts/api/views.py | Kamil732/beauty-salon | d99482111761f8c12b486246c539542f4f41c6c3 | [
"CC0-1.0"
] | null | null | null | backend/project/accounts/api/views.py | Kamil732/beauty-salon | d99482111761f8c12b486246c539542f4f41c6c3 | [
"CC0-1.0"
] | null | null | null | from django.db.models import Q
from django.db.models import Value as V
from django.db.models.functions import Concat
from django.views.decorators.csrf import csrf_protect
from django.utils.decorators import method_decorator
from django.contrib import auth
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import permissions, generics, status, mixins
from rest_framework.exceptions import ValidationError
from . import serializers
from . import pagination
from server.permissions import IsAdminOrReadOnly, IsAdmin
from accounts.models import CustomerImage, Customer, Barber
class CurrentAccountAPIView(generics.RetrieveAPIView):
permission_classes = (permissions.IsAuthenticated,)
serializer_class = serializers.AccountSerializer
def get_object(self):
return self.request.user
@method_decorator(csrf_protect, name='dispatch')
class RegisterAPIView(generics.CreateAPIView):
serializer_class = serializers.RegisterSerializer
@method_decorator(csrf_protect, name='dispatch')
class LoginAPIView(APIView):
def post(self, request, format=None):
data = request.data
email = data['email']
password = data['password']
user = auth.authenticate(email=email, password=password)
if user is not None:
auth.login(request, user)
return Response({
'message': 'Pomyślnie zalogowano',
'user': serializers.AccountSerializer(user).data,
}, status=status.HTTP_200_OK)
raise ValidationError({'detail': 'Email lub hasło jest niepoprawne'})
@method_decorator(csrf_protect, name='dispatch')
class UpdateBarberAPIView(generics.UpdateAPIView):
permission_classes = (IsAdmin,)
serializer_class = serializers.BarberSerializer
queryset = Barber.objects.all()
lookup_field = 'slug'
lookup_url_kwarg = 'barber_slug'
@method_decorator(csrf_protect, name='dispatch')
class LogoutAPIView(APIView):
permission_classes = (permissions.IsAuthenticated,)
def post(self, request, format=None):
auth.logout(request)
return Response({'message': 'Pomyślnie wylogowano'})
@method_decorator(csrf_protect, name='create')
class CustomerImageListAPIView(generics.ListCreateAPIView):
permission_classes = (IsAdminOrReadOnly,)
queryset = CustomerImage.objects.order_by('-id')
serializer_class = serializers.CustomerImageSerializer
pagination_class = pagination.CustomerImagesPagination
def create(self, request, *args, **kwargs):
data = []
field_ids = []
for (key, value) in request.data.items():
field = key.split('-')[0]
field_id = int(key.split('-')[1])
if field_id in field_ids:
data[field_id][field] = value
else:
field_ids.append(field_id)
data.append({
field: value,
})
serializer = self.get_serializer(data=data, many=True)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
@method_decorator(csrf_protect, name='dispatch')
class CustomerImageDetailAPIView(mixins.UpdateModelMixin, mixins.DestroyModelMixin, generics.GenericAPIView):
permission_classes = (IsAdmin,)
queryset = CustomerImage.objects.all()
lookup_field = 'id'
lookup_url_kwarg = 'customer_image_id'
def patch(self, request, *args, **kwargs):
return self.partial_update(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
class CustomerListAPIView(generics.ListCreateAPIView):
# permission_classes = (IsAdmin,)
serializer_class = serializers.CustomerSerializer
def get_queryset(self):
search_field = self.request.query_params.get('search', '')
return Customer.objects.annotate(full_name=Concat('first_name', V(' '), 'last_name')).filter(Q(full_name__istartswith=search_field) | Q(
first_name__istartswith=search_field) | Q(last_name__istartswith=search_field))[:10]
class BarberListAPIView(generics.ListCreateAPIView):
# permission_classes = (IsAdminOrReadOnly,)
serializer_class = serializers.BarberSerializer
queryset = Barber.objects.prefetch_related('service_barber_data')
| 34.653846 | 144 | 0.717869 |
cb931ffc49115dab170170a22b7f95b00fffaa80 | 1,696 | py | Python | losses/_3dmm.py | human-analysis/3dfacefill | 1f85d72731690730eef03871d3afecf9d4d307b4 | [
"MIT"
] | 4 | 2022-01-01T08:53:20.000Z | 2022-01-26T18:43:36.000Z | losses/_3dmm.py | human-analysis/3dfacefill | 1f85d72731690730eef03871d3afecf9d4d307b4 | [
"MIT"
] | 1 | 2022-03-07T11:34:59.000Z | 2022-03-08T02:41:11.000Z | losses/_3dmm.py | human-analysis/3dfacefill | 1f85d72731690730eef03871d3afecf9d4d307b4 | [
"MIT"
] | 3 | 2022-01-01T08:53:22.000Z | 2022-03-16T12:40:40.000Z | # regression.py
import torch
from torch import nn
import numpy as np
__all__ = ['_3DMM']
class _3DMM(object):
"""docstring for 3DMM"""
def __init__(self):
super(_3DMM, self).__init__()
self.l1_loss = nn.L1Loss(reduction='none')
self.mse_loss = nn.MSELoss(reduction='none')
self.eps = 1e-8
def norm_loss(self, predictions, labels, mask=None, conf=None, loss_type='l1', reduce_mean=True, p=1, viz=None):
assert (loss_type in ['l1', 'l2', 'l2,1']), "Suporting loss type is ['l1', 'l2', 'l2,1']"
inputs, targets = predictions, labels
if loss_type == 'l1':
loss = self.l1_loss(inputs, targets)
elif loss_type == 'l2':
loss = self.mse_loss(inputs, targets)
elif loss_type == 'l2,1':
diff = inputs - targets
loss = torch.sqrt(torch.sum((diff ** 2) + 1e-16, dim=1, keepdim=True))
if p != 1:
loss = torch.pow(loss, p)
if conf is not None:
loss = loss.mean(dim=1, keepdim=True)
# loss = loss *2**0.5 / (conf + self.eps) + (conf + self.eps).log()
loss = (loss * torch.exp(-conf) + conf) / 2
if mask is not None:
loss = loss * mask * (np.prod([*mask.shape]) / (mask.sum() + self.eps))
return loss.mean()
def gan_loss(self, loss_map, mask=None, conf=None):
if conf is not None:
loss_map = loss_map.mean(dim=1, keepdim=True)
loss_map = (loss_map * torch.exp(-conf) + conf) / 2
if mask is not None:
loss_map = loss_map * mask * (np.prod([*mask.shape]) / (mask.sum() + self.eps))
return loss_map.mean()
| 32.615385 | 116 | 0.552476 |
b4b4d17a71f9ecf2bfc76e1af49fcdb79fdb2c64 | 322 | py | Python | h2a2b.py | EranGoldman/h2a2b | 87a6d7f52d5431f47c1696751836ada667e6a325 | [
"MIT"
] | null | null | null | h2a2b.py | EranGoldman/h2a2b | 87a6d7f52d5431f47c1696751836ada667e6a325 | [
"MIT"
] | null | null | null | h2a2b.py | EranGoldman/h2a2b | 87a6d7f52d5431f47c1696751836ada667e6a325 | [
"MIT"
] | null | null | null | import sys, binascii,base64,re
if len(sys.argv) == 2:
hex = sys.argv[1]
for i in range(0,len(sys.argv[1])):
if hex[i] not in "0987654321abcdef":
print(hex + " is not hex")
sys.exit()
print(base64.b64decode(binascii.unhexlify(hex)))
else:
print("Please enter anigma string")
| 29.272727 | 52 | 0.602484 |
450c4e3eb9ecb6528fa8f9ae0640a138b18d069d | 67,602 | py | Python | IntelFsp2Pkg/Tools/GenCfgOpt.py | tvbroughton/ModernFW | 83f997e58dcab4238dc537f38730d98bf5dad0df | [
"Python-2.0",
"Zlib",
"BSD-2-Clause",
"MIT",
"BSD-2-Clause-Patent",
"BSD-3-Clause"
] | 122 | 2019-05-14T02:30:24.000Z | 2022-03-16T08:45:15.000Z | IntelFsp2Pkg/Tools/GenCfgOpt.py | tvbroughton/ModernFW | 83f997e58dcab4238dc537f38730d98bf5dad0df | [
"Python-2.0",
"Zlib",
"BSD-2-Clause",
"MIT",
"BSD-2-Clause-Patent",
"BSD-3-Clause"
] | 6 | 2019-05-15T03:46:22.000Z | 2019-06-10T00:21:48.000Z | IntelFsp2Pkg/Tools/GenCfgOpt.py | tvbroughton/ModernFW | 83f997e58dcab4238dc537f38730d98bf5dad0df | [
"Python-2.0",
"Zlib",
"BSD-2-Clause",
"MIT",
"BSD-2-Clause-Patent",
"BSD-3-Clause"
] | 20 | 2019-05-14T01:27:02.000Z | 2021-08-31T08:39:01.000Z | ## @ GenCfgOpt.py
#
# Copyright (c) 2014 - 2018, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
import os
import re
import sys
import struct
from datetime import date
# Generated file copyright header
__copyright_txt__ = """## @file
#
# THIS IS AUTO-GENERATED FILE BY BUILD TOOLS AND PLEASE DO NOT MAKE MODIFICATION.
#
# This file lists all VPD informations for a platform collected by build.exe.
#
# Copyright (c) %4d, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
"""
__copyright_bsf__ = """/** @file
Boot Setting File for Platform Configuration.
Copyright (c) %4d, Intel Corporation. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
This file is automatically generated. Please do NOT modify !!!
**/
"""
__copyright_h__ = """/** @file
Copyright (c) %4d, Intel Corporation. All rights reserved.<BR>
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
This file is automatically generated. Please do NOT modify !!!
**/
"""
BuildOptionPcd = []
class CLogicalExpression:
def __init__(self):
self.index = 0
self.string = ''
def errExit(self, err = ''):
print "ERROR: Express parsing for:"
print " %s" % self.string
print " %s^" % (' ' * self.index)
if err:
print "INFO : %s" % err
raise SystemExit
def getNonNumber (self, n1, n2):
if not n1.isdigit():
return n1
if not n2.isdigit():
return n2
return None
def getCurr(self, lens = 1):
try:
if lens == -1:
return self.string[self.index :]
else:
if self.index + lens > len(self.string):
lens = len(self.string) - self.index
return self.string[self.index : self.index + lens]
except Exception:
return ''
def isLast(self):
return self.index == len(self.string)
def moveNext(self, len = 1):
self.index += len
def skipSpace(self):
while not self.isLast():
if self.getCurr() in ' \t':
self.moveNext()
else:
return
def normNumber (self, val):
return True if val else False
def getNumber(self, var):
var = var.strip()
if re.match('^0x[a-fA-F0-9]+$', var):
value = int(var, 16)
elif re.match('^[+-]?\d+$', var):
value = int(var, 10)
else:
value = None
return value
def parseValue(self):
self.skipSpace()
var = ''
while not self.isLast():
char = self.getCurr()
if re.match('^[\w.]', char):
var += char
self.moveNext()
else:
break
val = self.getNumber(var)
if val is None:
value = var
else:
value = "%d" % val
return value
def parseSingleOp(self):
self.skipSpace()
if re.match('^NOT\W', self.getCurr(-1)):
self.moveNext(3)
op = self.parseBrace()
val = self.getNumber (op)
if val is None:
self.errExit ("'%s' is not a number" % op)
return "%d" % (not self.normNumber(int(op)))
else:
return self.parseValue()
def parseBrace(self):
self.skipSpace()
char = self.getCurr()
if char == '(':
self.moveNext()
value = self.parseExpr()
self.skipSpace()
if self.getCurr() != ')':
self.errExit ("Expecting closing brace or operator")
self.moveNext()
return value
else:
value = self.parseSingleOp()
return value
def parseCompare(self):
value = self.parseBrace()
while True:
self.skipSpace()
char = self.getCurr()
if char in ['<', '>']:
self.moveNext()
next = self.getCurr()
if next == '=':
op = char + next
self.moveNext()
else:
op = char
result = self.parseBrace()
test = self.getNonNumber(result, value)
if test is None:
value = "%d" % self.normNumber(eval (value + op + result))
else:
self.errExit ("'%s' is not a valid number for comparision" % test)
elif char in ['=', '!']:
op = self.getCurr(2)
if op in ['==', '!=']:
self.moveNext(2)
result = self.parseBrace()
test = self.getNonNumber(result, value)
if test is None:
value = "%d" % self.normNumber((eval (value + op + result)))
else:
value = "%d" % self.normNumber(eval ("'" + value + "'" + op + "'" + result + "'"))
else:
break
else:
break
return value
def parseAnd(self):
value = self.parseCompare()
while True:
self.skipSpace()
if re.match('^AND\W', self.getCurr(-1)):
self.moveNext(3)
result = self.parseCompare()
test = self.getNonNumber(result, value)
if test is None:
value = "%d" % self.normNumber(int(value) & int(result))
else:
self.errExit ("'%s' is not a valid op number for AND" % test)
else:
break
return value
def parseOrXor(self):
value = self.parseAnd()
op = None
while True:
self.skipSpace()
op = None
if re.match('^XOR\W', self.getCurr(-1)):
self.moveNext(3)
op = '^'
elif re.match('^OR\W', self.getCurr(-1)):
self.moveNext(2)
op = '|'
else:
break
if op:
result = self.parseAnd()
test = self.getNonNumber(result, value)
if test is None:
value = "%d" % self.normNumber(eval (value + op + result))
else:
self.errExit ("'%s' is not a valid op number for XOR/OR" % test)
return value
def parseExpr(self):
return self.parseOrXor()
def getResult(self):
value = self.parseExpr()
self.skipSpace()
if not self.isLast():
self.errExit ("Unexpected character found '%s'" % self.getCurr())
test = self.getNumber(value)
if test is None:
self.errExit ("Result '%s' is not a number" % value)
return int(value)
def evaluateExpress (self, Expr):
self.index = 0
self.string = Expr
if self.getResult():
Result = True
else:
Result = False
return Result
class CGenCfgOpt:
def __init__(self):
self.Debug = False
self.Error = ''
self._GlobalDataDef = """
GlobalDataDef
SKUID = 0, "DEFAULT"
EndGlobalData
"""
self._BuidinOptionTxt = """
List &EN_DIS
Selection 0x1 , "Enabled"
Selection 0x0 , "Disabled"
EndList
"""
self._BsfKeyList = ['FIND','NAME','HELP','TYPE','PAGE','OPTION','ORDER']
self._HdrKeyList = ['HEADER','STRUCT', 'EMBED', 'COMMENT']
self._BuidinOption = {'$EN_DIS' : 'EN_DIS'}
self._MacroDict = {}
self._PcdsDict = {}
self._CfgBlkDict = {}
self._CfgPageDict = {}
self._CfgItemList = []
self._DscFile = ''
self._FvDir = ''
self._MapVer = 0
def ParseMacros (self, MacroDefStr):
# ['-DABC=1', '-D', 'CFG_DEBUG=1', '-D', 'CFG_OUTDIR=Build']
self._MacroDict = {}
IsExpression = False
for Macro in MacroDefStr:
if Macro.startswith('-D'):
IsExpression = True
if len(Macro) > 2:
Macro = Macro[2:]
else :
continue
if IsExpression:
IsExpression = False
Match = re.match("(\w+)=(.+)", Macro)
if Match:
self._MacroDict[Match.group(1)] = Match.group(2)
else:
Match = re.match("(\w+)", Macro)
if Match:
self._MacroDict[Match.group(1)] = ''
if len(self._MacroDict) == 0:
Error = 1
else:
Error = 0
if self.Debug:
print "INFO : Macro dictionary:"
for Each in self._MacroDict:
print " $(%s) = [ %s ]" % (Each , self._MacroDict[Each])
return Error
def EvaulateIfdef (self, Macro):
Result = Macro in self._MacroDict
if self.Debug:
print "INFO : Eval Ifdef [%s] : %s" % (Macro, Result)
return Result
def ExpandMacros (self, Input):
Line = Input
Match = re.findall("\$\(\w+\)", Input)
if Match:
for Each in Match:
Variable = Each[2:-1]
if Variable in self._MacroDict:
Line = Line.replace(Each, self._MacroDict[Variable])
else:
if self.Debug:
print "WARN : %s is not defined" % Each
Line = Line.replace(Each, Each[2:-1])
return Line
def ExpandPcds (self, Input):
Line = Input
Match = re.findall("(\w+\.\w+)", Input)
if Match:
for PcdName in Match:
if PcdName in self._PcdsDict:
Line = Line.replace(PcdName, self._PcdsDict[PcdName])
else:
if self.Debug:
print "WARN : %s is not defined" % PcdName
return Line
def EvaluateExpress (self, Expr):
ExpExpr = self.ExpandPcds(Expr)
ExpExpr = self.ExpandMacros(ExpExpr)
LogExpr = CLogicalExpression()
Result = LogExpr.evaluateExpress (ExpExpr)
if self.Debug:
print "INFO : Eval Express [%s] : %s" % (Expr, Result)
return Result
def FormatListValue(self, ConfigDict):
Struct = ConfigDict['struct']
if Struct not in ['UINT8','UINT16','UINT32','UINT64']:
return
dataarray = []
binlist = ConfigDict['value'][1:-1].split(',')
for each in binlist:
each = each.strip()
if each.startswith('0x'):
value = int(each, 16)
else:
value = int(each)
dataarray.append(value)
unit = int(Struct[4:]) / 8
if int(ConfigDict['length']) != unit * len(dataarray):
raise Exception("Array size is not proper for '%s' !" % ConfigDict['cname'])
bytearray = []
for each in dataarray:
value = each
for loop in xrange(unit):
bytearray.append("0x%02X" % (value & 0xFF))
value = value >> 8
newvalue = '{' + ','.join(bytearray) + '}'
ConfigDict['value'] = newvalue
return ""
def ParseDscFile (self, DscFile, FvDir):
Hardcode = False
AutoAlign = False
self._CfgItemList = []
self._CfgPageDict = {}
self._CfgBlkDict = {}
self._DscFile = DscFile
self._FvDir = FvDir
IsDefSect = False
IsPcdSect = False
IsUpdSect = False
IsVpdSect = False
IfStack = []
ElifStack = []
Error = 0
ConfigDict = {}
DscFd = open(DscFile, "r")
DscLines = DscFd.readlines()
DscFd.close()
MaxAlign = 32 #Default align to 32, but if there are 64 bit unit, align to 64
SizeAlign = 0 #record the struct max align
while len(DscLines):
DscLine = DscLines.pop(0).strip()
Handle = False
Match = re.match("^\[(.+)\]", DscLine)
if Match is not None:
IsDefSect = False
IsPcdSect = False
IsVpdSect = False
IsUpdSect = False
if Match.group(1).lower() == "Defines".lower():
IsDefSect = True
if (Match.group(1).lower() == "PcdsFeatureFlag".lower() or Match.group(1).lower() == "PcdsFixedAtBuild".lower()):
IsPcdSect = True
elif Match.group(1).lower() == "PcdsDynamicVpd.Upd".lower():
ConfigDict = {}
ConfigDict['header'] = 'ON'
ConfigDict['region'] = 'UPD'
ConfigDict['order'] = -1
ConfigDict['page'] = ''
ConfigDict['name'] = ''
ConfigDict['find'] = ''
ConfigDict['struct'] = ''
ConfigDict['embed'] = ''
ConfigDict['comment'] = ''
ConfigDict['subreg'] = []
IsUpdSect = True
Offset = 0
else:
if IsDefSect or IsPcdSect or IsUpdSect or IsVpdSect:
if re.match("^!else($|\s+#.+)", DscLine):
if IfStack:
IfStack[-1] = not IfStack[-1]
else:
print("ERROR: No paired '!if' found for '!else' for line '%s'" % DscLine)
raise SystemExit
elif re.match("^!endif($|\s+#.+)", DscLine):
if IfStack:
IfStack.pop()
Level = ElifStack.pop()
if Level > 0:
del IfStack[-Level:]
else:
print("ERROR: No paired '!if' found for '!endif' for line '%s'" % DscLine)
raise SystemExit
else:
Result = False
Match = re.match("!(ifdef|ifndef)\s+(.+)", DscLine)
if Match:
Result = self.EvaulateIfdef (Match.group(2))
if Match.group(1) == 'ifndef':
Result = not Result
IfStack.append(Result)
ElifStack.append(0)
else:
Match = re.match("!(if|elseif)\s+(.+)", DscLine.split("#")[0])
if Match:
Result = self.EvaluateExpress(Match.group(2))
if Match.group(1) == "if":
ElifStack.append(0)
IfStack.append(Result)
else: #elseif
if IfStack:
IfStack[-1] = not IfStack[-1]
IfStack.append(Result)
ElifStack[-1] = ElifStack[-1] + 1
else:
print("ERROR: No paired '!if' found for '!elif' for line '%s'" % DscLine)
raise SystemExit
else:
if IfStack:
Handle = reduce(lambda x,y: x and y, IfStack)
else:
Handle = True
if Handle:
Match = re.match("!include\s+(.+)", DscLine)
if Match:
IncludeFilePath = Match.group(1)
IncludeFilePath = self.ExpandMacros(IncludeFilePath)
PackagesPath = os.getenv("PACKAGES_PATH")
if PackagesPath:
for PackagePath in PackagesPath.split(os.pathsep):
IncludeFilePathAbs = os.path.join(os.path.normpath(PackagePath), os.path.normpath(IncludeFilePath))
if os.path.exists(IncludeFilePathAbs):
IncludeDsc = open(IncludeFilePathAbs, "r")
break
else:
IncludeDsc = open(IncludeFilePath, "r")
if IncludeDsc == None:
print("ERROR: Cannot open file '%s'" % IncludeFilePath)
raise SystemExit
NewDscLines = IncludeDsc.readlines()
IncludeDsc.close()
DscLines = NewDscLines + DscLines
Offset = 0
else:
if DscLine.startswith('!'):
print("ERROR: Unrecoginized directive for line '%s'" % DscLine)
raise SystemExit
if not Handle:
continue
if IsDefSect:
#DEFINE UPD_TOOL_GUID = 8C3D856A-9BE6-468E-850A-24F7A8D38E09
#DEFINE FSP_T_UPD_TOOL_GUID = 34686CA3-34F9-4901-B82A-BA630F0714C6
#DEFINE FSP_M_UPD_TOOL_GUID = 39A250DB-E465-4DD1-A2AC-E2BD3C0E2385
#DEFINE FSP_S_UPD_TOOL_GUID = CAE3605B-5B34-4C85-B3D7-27D54273C40F
Match = re.match("^\s*(?:DEFINE\s+)*(\w+)\s*=\s*([-.\w]+)", DscLine)
if Match:
self._MacroDict[Match.group(1)] = Match.group(2)
if self.Debug:
print "INFO : DEFINE %s = [ %s ]" % (Match.group(1), Match.group(2))
elif IsPcdSect:
#gSiPkgTokenSpaceGuid.PcdTxtEnable|FALSE
#gSiPkgTokenSpaceGuid.PcdOverclockEnable|TRUE
Match = re.match("^\s*([\w\.]+)\s*\|\s*(\w+)", DscLine)
if Match:
self._PcdsDict[Match.group(1)] = Match.group(2)
if self.Debug:
print "INFO : PCD %s = [ %s ]" % (Match.group(1), Match.group(2))
i = 0
while i < len(BuildOptionPcd):
Match = re.match("\s*([\w\.]+)\s*\=\s*(\w+)", BuildOptionPcd[i])
if Match:
self._PcdsDict[Match.group(1)] = Match.group(2)
i += 1
else:
Match = re.match("^\s*#\s+(!BSF|@Bsf|!HDR)\s+(.+)", DscLine)
if Match:
Remaining = Match.group(2)
if Match.group(1) == '!BSF' or Match.group(1) == '@Bsf':
Match = re.match("(?:^|.+\s+)PAGES:{(.+?)}", Remaining)
if Match:
# !BSF PAGES:{HSW:"Haswell System Agent", LPT:"Lynx Point PCH"}
PageList = Match.group(1).split(',')
for Page in PageList:
Page = Page.strip()
Match = re.match("(\w+):\"(.+)\"", Page)
self._CfgPageDict[Match.group(1)] = Match.group(2)
Match = re.match("(?:^|.+\s+)BLOCK:{NAME:\"(.+)\"\s*,\s*VER:\"(.+)\"\s*}", Remaining)
if Match:
self._CfgBlkDict['name'] = Match.group(1)
self._CfgBlkDict['ver'] = Match.group(2)
for Key in self._BsfKeyList:
Match = re.match("(?:^|.+\s+)%s:{(.+?)}" % Key, Remaining)
if Match:
if Key in ['NAME', 'HELP', 'OPTION'] and Match.group(1).startswith('+'):
ConfigDict[Key.lower()] += Match.group(1)[1:]
else:
ConfigDict[Key.lower()] = Match.group(1)
else:
for Key in self._HdrKeyList:
Match = re.match("(?:^|.+\s+)%s:{(.+?)}" % Key, Remaining)
if Match:
ConfigDict[Key.lower()] = Match.group(1)
Match = re.match("^\s*#\s+@Prompt\s+(.+)", DscLine)
if Match:
ConfigDict['name'] = Match.group(1)
Match = re.match("^\s*#\s*@ValidList\s*(.+)\s*\|\s*(.+)\s*\|\s*(.+)\s*", DscLine)
if Match:
if Match.group(2).strip() in self._BuidinOption:
ConfigDict['option'] = Match.group(2).strip()
else:
OptionValueList = Match.group(2).split(',')
OptionStringList = Match.group(3).split(',')
Index = 0
for Option in OptionValueList:
Option = Option.strip()
ConfigDict['option'] = ConfigDict['option'] + str(Option) + ':' + OptionStringList[Index].strip()
Index += 1
if Index in range(len(OptionValueList)):
ConfigDict['option'] += ', '
ConfigDict['type'] = "Combo"
Match = re.match("^\s*#\s*@ValidRange\s*(.+)\s*\|\s*(.+)\s*-\s*(.+)\s*", DscLine)
if Match:
if "0x" in Match.group(2) or "0x" in Match.group(3):
ConfigDict['type'] = "EditNum, HEX, (%s,%s)" % (Match.group(2), Match.group(3))
else:
ConfigDict['type'] = "EditNum, DEC, (%s,%s)" % (Match.group(2), Match.group(3))
Match = re.match("^\s*##\s+(.+)", DscLine)
if Match:
ConfigDict['help'] = Match.group(1)
# Check VPD/UPD
if IsUpdSect:
Match = re.match("^([_a-zA-Z0-9]+).([_a-zA-Z0-9]+)\s*\|\s*(0x[0-9A-F]+|\*)\s*\|\s*(\d+|0x[0-9a-fA-F]+)\s*\|\s*(.+)",DscLine)
else:
Match = re.match("^([_a-zA-Z0-9]+).([_a-zA-Z0-9]+)\s*\|\s*(0x[0-9A-F]+)(?:\s*\|\s*(.+))?", DscLine)
if Match:
ConfigDict['space'] = Match.group(1)
ConfigDict['cname'] = Match.group(2)
if Match.group(3) != '*':
Hardcode = True
Offset = int (Match.group(3), 16)
else:
AutoAlign = True
if Hardcode and AutoAlign:
print("Hardcode and auto-align mixed mode is not supported by GenCfgOpt")
raise SystemExit
ConfigDict['offset'] = Offset
if ConfigDict['order'] == -1:
ConfigDict['order'] = ConfigDict['offset'] << 8
else:
(Major, Minor) = ConfigDict['order'].split('.')
ConfigDict['order'] = (int (Major, 16) << 8 ) + int (Minor, 16)
if IsUpdSect:
Value = Match.group(5).strip()
if Match.group(4).startswith("0x"):
Length = int (Match.group(4), 16)
else :
Length = int (Match.group(4))
Offset += Length
else:
Value = Match.group(4)
if Value is None:
Value = ''
Value = Value.strip()
if '|' in Value:
Match = re.match("^.+\s*\|\s*(.+)", Value)
if Match:
Value = Match.group(1)
Length = -1
ConfigDict['length'] = Length
Match = re.match("\$\((\w+)\)", Value)
if Match:
if Match.group(1) in self._MacroDict:
Value = self._MacroDict[Match.group(1)]
ConfigDict['value'] = Value
if (len(Value) > 0) and (Value[0] == '{'):
Value = self.FormatListValue(ConfigDict)
if ConfigDict['name'] == '':
# Clear BSF specific items
ConfigDict['bsfname'] = ''
ConfigDict['help'] = ''
ConfigDict['type'] = ''
ConfigDict['option'] = ''
if IsUpdSect and AutoAlign:
ItemLength = int(ConfigDict['length'])
ItemOffset = int(ConfigDict['offset'])
ItemStruct = ConfigDict['struct']
Unit = 1
if ItemLength in [1, 2, 4, 8] and not ConfigDict['value'].startswith('{'):
Unit = ItemLength
# If there are 64 bit unit, align to 64
if Unit == 8:
MaxAlign = 64
SizeAlign = 8
if ItemStruct != '':
UnitDict = {'UINT8':1, 'UINT16':2, 'UINT32':4, 'UINT64':8}
if ItemStruct in ['UINT8', 'UINT16', 'UINT32', 'UINT64']:
Unit = UnitDict[ItemStruct]
# If there are 64 bit unit, align to 64
if Unit == 8:
MaxAlign = 64
SizeAlign = max(SizeAlign, Unit)
if (ConfigDict['embed'].find(':START') != -1):
Base = ItemOffset
SubOffset = ItemOffset - Base
SubRemainder = SubOffset % Unit
if SubRemainder:
Diff = Unit - SubRemainder
Offset = Offset + Diff
ItemOffset = ItemOffset + Diff
if (ConfigDict['embed'].find(':END') != -1):
Remainder = Offset % (MaxAlign/8) # MaxAlign is either 32 or 64
if Remainder:
Diff = (MaxAlign/8) - Remainder
Offset = Offset + Diff
ItemOffset = ItemOffset + Diff
MaxAlign = 32 # Reset to default 32 align when struct end
if (ConfigDict['cname'] == 'UpdTerminator'):
# ItemLength is the size of UpdTerminator
# Itemlength might be 16, 32, or 64
# Struct align to 64 if UpdTerminator
# or struct size is 64 bit, else align to 32
Remainder = Offset % max(ItemLength/8, 4, SizeAlign)
Offset = Offset + ItemLength
if Remainder:
Diff = max(ItemLength/8, 4, SizeAlign) - Remainder
ItemOffset = ItemOffset + Diff
ConfigDict['offset'] = ItemOffset
self._CfgItemList.append(ConfigDict.copy())
ConfigDict['name'] = ''
ConfigDict['find'] = ''
ConfigDict['struct'] = ''
ConfigDict['embed'] = ''
ConfigDict['comment'] = ''
ConfigDict['order'] = -1
ConfigDict['subreg'] = []
ConfigDict['option'] = ''
else:
# It could be a virtual item as below
# !BSF FIELD:{SerialDebugPortAddress0:1}
# or
# @Bsf FIELD:{SerialDebugPortAddress0:1b}
Match = re.match("^\s*#\s+(!BSF|@Bsf)\s+FIELD:{(.+):(\d+)([Bb])?}", DscLine)
if Match:
SubCfgDict = ConfigDict.copy()
if (Match.group(4) == None) or (Match.group(4) == 'B'):
UnitBitLen = 8
elif Match.group(4) == 'b':
UnitBitLen = 1
else:
print("ERROR: Invalide BSF FIELD length for line '%s'" % DscLine)
raise SystemExit
SubCfgDict['cname'] = Match.group(2)
SubCfgDict['bitlength'] = int (Match.group(3)) * UnitBitLen
if SubCfgDict['bitlength'] > 0:
LastItem = self._CfgItemList[-1]
if len(LastItem['subreg']) == 0:
SubOffset = 0
else:
SubOffset = LastItem['subreg'][-1]['bitoffset'] + LastItem['subreg'][-1]['bitlength']
SubCfgDict['bitoffset'] = SubOffset
LastItem['subreg'].append (SubCfgDict.copy())
ConfigDict['name'] = ''
return Error
def GetBsfBitFields (self, subitem, bytes):
start = subitem['bitoffset']
end = start + subitem['bitlength']
bitsvalue = ''.join('{0:08b}'.format(i) for i in bytes[::-1])
bitsvalue = bitsvalue[::-1]
bitslen = len(bitsvalue)
if start > bitslen or end > bitslen:
print "Invalid bits offset [%d,%d] for %s" % (start, end, subitem['name'])
raise SystemExit
return hex(int(bitsvalue[start:end][::-1], 2))
def UpdateSubRegionDefaultValue (self):
Error = 0
for Item in self._CfgItemList:
if len(Item['subreg']) == 0:
continue
bytearray = []
if Item['value'][0] == '{':
binlist = Item['value'][1:-1].split(',')
for each in binlist:
each = each.strip()
if each.startswith('0x'):
value = int(each, 16)
else:
value = int(each)
bytearray.append(value)
else:
if Item['value'].startswith('0x'):
value = int(Item['value'], 16)
else:
value = int(Item['value'])
idx = 0
while idx < Item['length']:
bytearray.append(value & 0xFF)
value = value >> 8
idx = idx + 1
for SubItem in Item['subreg']:
valuestr = self.GetBsfBitFields(SubItem, bytearray)
SubItem['value'] = valuestr
return Error
def CreateSplitUpdTxt (self, UpdTxtFile):
GuidList = ['FSP_T_UPD_TOOL_GUID','FSP_M_UPD_TOOL_GUID','FSP_S_UPD_TOOL_GUID']
SignatureList = ['0x545F', '0x4D5F','0x535F'] # _T, _M, and _S signature for FSPT, FSPM, FSPS
for Index in range(len(GuidList)):
UpdTxtFile = ''
FvDir = self._FvDir
if GuidList[Index] not in self._MacroDict:
self.Error = "%s definition is missing in DSC file" % (GuidList[Index])
return 1
if UpdTxtFile == '':
UpdTxtFile = os.path.join(FvDir, self._MacroDict[GuidList[Index]] + '.txt')
ReCreate = False
if not os.path.exists(UpdTxtFile):
ReCreate = True
else:
DscTime = os.path.getmtime(self._DscFile)
TxtTime = os.path.getmtime(UpdTxtFile)
if DscTime > TxtTime:
ReCreate = True
if not ReCreate:
# DSC has not been modified yet
# So don't have to re-generate other files
self.Error = 'No DSC file change, skip to create UPD TXT file'
return 256
TxtFd = open(UpdTxtFile, "w")
TxtFd.write("%s\n" % (__copyright_txt__ % date.today().year))
NextOffset = 0
SpaceIdx = 0
StartAddr = 0
EndAddr = 0
Default = 'DEFAULT|'
InRange = False
for Item in self._CfgItemList:
if Item['cname'] == 'Signature' and str(Item['value'])[0:6] == SignatureList[Index]:
StartAddr = Item['offset']
NextOffset = StartAddr
InRange = True
if Item['cname'] == 'UpdTerminator' and InRange == True:
EndAddr = Item['offset']
InRange = False
InRange = False
for Item in self._CfgItemList:
if Item['cname'] == 'Signature' and str(Item['value'])[0:6] == SignatureList[Index]:
InRange = True
if InRange != True:
continue
if Item['cname'] == 'UpdTerminator':
InRange = False
if Item['region'] != 'UPD':
continue
Offset = Item['offset']
if StartAddr > Offset or EndAddr < Offset:
continue
if NextOffset < Offset:
# insert one line
TxtFd.write("%s.UnusedUpdSpace%d|%s0x%04X|0x%04X|{0}\n" % (Item['space'], SpaceIdx, Default, NextOffset - StartAddr, Offset - NextOffset))
SpaceIdx = SpaceIdx + 1
NextOffset = Offset + Item['length']
TxtFd.write("%s.%s|%s0x%04X|%s|%s\n" % (Item['space'],Item['cname'],Default,Item['offset'] - StartAddr,Item['length'],Item['value']))
TxtFd.close()
return 0
def ProcessMultilines (self, String, MaxCharLength):
Multilines = ''
StringLength = len(String)
CurrentStringStart = 0
StringOffset = 0
BreakLineDict = []
if len(String) <= MaxCharLength:
while (StringOffset < StringLength):
if StringOffset >= 1:
if String[StringOffset - 1] == '\\' and String[StringOffset] == 'n':
BreakLineDict.append (StringOffset + 1)
StringOffset += 1
if BreakLineDict != []:
for Each in BreakLineDict:
Multilines += " %s\n" % String[CurrentStringStart:Each].lstrip()
CurrentStringStart = Each
if StringLength - CurrentStringStart > 0:
Multilines += " %s\n" % String[CurrentStringStart:].lstrip()
else:
Multilines = " %s\n" % String
else:
NewLineStart = 0
NewLineCount = 0
FoundSpaceChar = False
while (StringOffset < StringLength):
if StringOffset >= 1:
if NewLineCount >= MaxCharLength - 1:
if String[StringOffset] == ' ' and StringLength - StringOffset > 10:
BreakLineDict.append (NewLineStart + NewLineCount)
NewLineStart = NewLineStart + NewLineCount
NewLineCount = 0
FoundSpaceChar = True
elif StringOffset == StringLength - 1 and FoundSpaceChar == False:
BreakLineDict.append (0)
if String[StringOffset - 1] == '\\' and String[StringOffset] == 'n':
BreakLineDict.append (StringOffset + 1)
NewLineStart = StringOffset + 1
NewLineCount = 0
StringOffset += 1
NewLineCount += 1
if BreakLineDict != []:
BreakLineDict.sort ()
for Each in BreakLineDict:
if Each > 0:
Multilines += " %s\n" % String[CurrentStringStart:Each].lstrip()
CurrentStringStart = Each
if StringLength - CurrentStringStart > 0:
Multilines += " %s\n" % String[CurrentStringStart:].lstrip()
return Multilines
def CreateField (self, Item, Name, Length, Offset, Struct, BsfName, Help, Option):
PosName = 28
PosComment = 30
NameLine=''
HelpLine=''
OptionLine=''
IsArray = False
if Length in [1,2,4,8]:
Type = "UINT%d" % (Length * 8)
if Name.startswith("UnusedUpdSpace") and Length != 1:
IsArray = True
Type = "UINT8"
else:
IsArray = True
Type = "UINT8"
if Item and Item['value'].startswith('{'):
Type = "UINT8"
IsArray = True
if Struct != '':
Type = Struct
if Struct in ['UINT8','UINT16','UINT32','UINT64']:
IsArray = True
Unit = int(Type[4:]) / 8
Length = Length / Unit
else:
IsArray = False
if IsArray:
Name = Name + '[%d]' % Length
if len(Type) < PosName:
Space1 = PosName - len(Type)
else:
Space1 = 1
if BsfName != '':
NameLine=" - %s\n" % BsfName
else:
NameLine="\n"
if Help != '':
HelpLine = self.ProcessMultilines (Help, 80)
if Option != '':
OptionLine = self.ProcessMultilines (Option, 80)
if Offset is None:
OffsetStr = '????'
else:
OffsetStr = '0x%04X' % Offset
return "\n/** Offset %s%s%s%s**/\n %s%s%s;\n" % (OffsetStr, NameLine, HelpLine, OptionLine, Type, ' ' * Space1, Name,)
def PostProcessBody (self, TextBody):
NewTextBody = []
OldTextBody = []
IncludeLine = False
StructName = ''
VariableName = ''
IsUpdHdrDefined = False
IsUpdHeader = False
for Line in TextBody:
SplitToLines = Line.splitlines()
MatchComment = re.match("^/\*\sCOMMENT:(\w+):([\w|\W|\s]+)\s\*/\s([\s\S]*)", SplitToLines[0])
if MatchComment:
if MatchComment.group(1) == 'FSP_UPD_HEADER':
IsUpdHeader = True
else:
IsUpdHeader = False
if IsUpdHdrDefined != True or IsUpdHeader != True:
CommentLine = " " + MatchComment.group(2) + "\n"
NewTextBody.append("/**" + CommentLine + "**/\n")
Line = Line[(len(SplitToLines[0]) + 1):]
Match = re.match("^/\*\sEMBED_STRUCT:(\w+):(\w+):(START|END)\s\*/\s([\s\S]*)", Line)
if Match:
Line = Match.group(4)
if Match.group(1) == 'FSP_UPD_HEADER':
IsUpdHeader = True
else:
IsUpdHeader = False
if Match and Match.group(3) == 'START':
if IsUpdHdrDefined != True or IsUpdHeader != True:
NewTextBody.append ('typedef struct {\n')
StructName = Match.group(1)
VariableName = Match.group(2)
MatchOffset = re.search('/\*\*\sOffset\s0x([a-fA-F0-9]+)', Line)
if MatchOffset:
Offset = int(MatchOffset.group(1), 16)
else:
Offset = None
Line
IncludeLine = True
OldTextBody.append (self.CreateField (None, VariableName, 0, Offset, StructName, '', '', ''))
if IncludeLine:
if IsUpdHdrDefined != True or IsUpdHeader != True:
NewTextBody.append (Line)
else:
OldTextBody.append (Line)
if Match and Match.group(3) == 'END':
if (StructName != Match.group(1)) or (VariableName != Match.group(2)):
print "Unmatched struct name '%s' and '%s' !" % (StructName, Match.group(1))
else:
if IsUpdHdrDefined != True or IsUpdHeader != True:
NewTextBody.append ('} %s;\n\n' % StructName)
IsUpdHdrDefined = True
IncludeLine = False
NewTextBody.extend(OldTextBody)
return NewTextBody
def WriteLinesWithoutTailingSpace (self, HeaderFd, Line):
TxtBody2 = Line.splitlines(True)
for Line2 in TxtBody2:
Line2 = Line2.rstrip()
Line2 += '\n'
HeaderFd.write (Line2)
return 0
def CreateHeaderFile (self, InputHeaderFile):
FvDir = self._FvDir
HeaderFileName = 'FspUpd.h'
HeaderFile = os.path.join(FvDir, HeaderFileName)
# Check if header needs to be recreated
ReCreate = False
TxtBody = []
for Item in self._CfgItemList:
if str(Item['cname']) == 'Signature' and Item['length'] == 8:
Value = int(Item['value'], 16)
Chars = []
while Value != 0x0:
Chars.append(chr(Value & 0xFF))
Value = Value >> 8
SignatureStr = ''.join(Chars)
# Signature will be _T / _M / _S for FSPT / FSPM / FSPS accordingly
if '_T' in SignatureStr[6:6+2]:
TxtBody.append("#define FSPT_UPD_SIGNATURE %s /* '%s' */\n\n" % (Item['value'], SignatureStr))
elif '_M' in SignatureStr[6:6+2]:
TxtBody.append("#define FSPM_UPD_SIGNATURE %s /* '%s' */\n\n" % (Item['value'], SignatureStr))
elif '_S' in SignatureStr[6:6+2]:
TxtBody.append("#define FSPS_UPD_SIGNATURE %s /* '%s' */\n\n" % (Item['value'], SignatureStr))
TxtBody.append("\n")
for Region in ['UPD']:
UpdOffsetTable = []
UpdSignature = ['0x545F', '0x4D5F', '0x535F'] #['_T', '_M', '_S'] signature for FSPT, FSPM, FSPS
UpdStructure = ['FSPT_UPD', 'FSPM_UPD', 'FSPS_UPD']
for Item in self._CfgItemList:
if Item["cname"] == 'Signature' and Item["value"][0:6] in UpdSignature:
UpdOffsetTable.append (Item["offset"])
for UpdIdx in range(len(UpdOffsetTable)):
CommentLine = ""
for Item in self._CfgItemList:
if Item["comment"] != '' and Item["offset"] >= UpdOffsetTable[UpdIdx]:
MatchComment = re.match("^(U|V)PD_DATA_REGION:([\w|\W|\s]+)", Item["comment"])
if MatchComment and MatchComment.group(1) == Region[0]:
CommentLine = " " + MatchComment.group(2) + "\n"
TxtBody.append("/**" + CommentLine + "**/\n")
elif Item["offset"] >= UpdOffsetTable[UpdIdx] and Item["comment"] == '':
Match = re.match("^FSP([\w|\W|\s])_UPD", UpdStructure[UpdIdx])
if Match:
TxtBody.append("/** Fsp " + Match.group(1) + " UPD Configuration\n**/\n")
TxtBody.append("typedef struct {\n")
NextOffset = 0
SpaceIdx = 0
Offset = 0
LastVisible = True
ResvOffset = 0
ResvIdx = 0
LineBuffer = []
InRange = False
for Item in self._CfgItemList:
if Item['cname'] == 'Signature' and str(Item['value'])[0:6] == UpdSignature[UpdIdx] or Region[0] == 'V':
InRange = True
if InRange != True:
continue
if Item['cname'] == 'UpdTerminator':
InRange = False
if Item['region'] != Region:
continue
if Item["offset"] < UpdOffsetTable[UpdIdx]:
continue
NextVisible = LastVisible
if LastVisible and (Item['header'] == 'OFF'):
NextVisible = False
ResvOffset = Item['offset']
elif (not LastVisible) and Item['header'] == 'ON':
NextVisible = True
Name = "Reserved" + Region[0] + "pdSpace%d" % ResvIdx
ResvIdx = ResvIdx + 1
TxtBody.append(self.CreateField (Item, Name, Item["offset"] - ResvOffset, ResvOffset, '', '', '', ''))
if Offset < Item["offset"]:
if LastVisible:
Name = "Unused" + Region[0] + "pdSpace%d" % SpaceIdx
LineBuffer.append(self.CreateField (Item, Name, Item["offset"] - Offset, Offset, '', '', '', ''))
SpaceIdx = SpaceIdx + 1
Offset = Item["offset"]
LastVisible = NextVisible
Offset = Offset + Item["length"]
if LastVisible:
for Each in LineBuffer:
TxtBody.append (Each)
LineBuffer = []
Comment = Item["comment"]
Embed = Item["embed"].upper()
if Embed.endswith(':START') or Embed.endswith(':END'):
if not Comment == '' and Embed.endswith(':START'):
Marker = '/* COMMENT:%s */ \n' % Item["comment"]
Marker = Marker + '/* EMBED_STRUCT:%s */ ' % Item["embed"]
else:
Marker = '/* EMBED_STRUCT:%s */ ' % Item["embed"]
else:
if Embed == '':
Marker = ''
else:
self.Error = "Invalid embedded structure format '%s'!\n" % Item["embed"]
return 4
Line = Marker + self.CreateField (Item, Item["cname"], Item["length"], Item["offset"], Item['struct'], Item['name'], Item['help'], Item['option'])
TxtBody.append(Line)
if Item['cname'] == 'UpdTerminator':
break
TxtBody.append("} " + UpdStructure[UpdIdx] + ";\n\n")
# Handle the embedded data structure
TxtBody = self.PostProcessBody (TxtBody)
HeaderTFileName = 'FsptUpd.h'
HeaderMFileName = 'FspmUpd.h'
HeaderSFileName = 'FspsUpd.h'
UpdRegionCheck = ['FSPT', 'FSPM', 'FSPS'] # FSPX_UPD_REGION
UpdConfigCheck = ['FSP_T', 'FSP_M', 'FSP_S'] # FSP_X_CONFIG, FSP_X_TEST_CONFIG, FSP_X_RESTRICTED_CONFIG
UpdSignatureCheck = ['FSPT_UPD_SIGNATURE', 'FSPM_UPD_SIGNATURE', 'FSPS_UPD_SIGNATURE']
ExcludedSpecificUpd = 'FSPM_ARCH_UPD'
if InputHeaderFile != '':
if not os.path.exists(InputHeaderFile):
self.Error = "Input header file '%s' does not exist" % InputHeaderFile
return 6
InFd = open(InputHeaderFile, "r")
IncLines = InFd.readlines()
InFd.close()
for item in range(len(UpdRegionCheck)):
if UpdRegionCheck[item] == 'FSPT':
HeaderFd = open(os.path.join(FvDir, HeaderTFileName), "w")
FileBase = os.path.basename(os.path.join(FvDir, HeaderTFileName))
elif UpdRegionCheck[item] == 'FSPM':
HeaderFd = open(os.path.join(FvDir, HeaderMFileName), "w")
FileBase = os.path.basename(os.path.join(FvDir, HeaderMFileName))
elif UpdRegionCheck[item] == 'FSPS':
HeaderFd = open(os.path.join(FvDir, HeaderSFileName), "w")
FileBase = os.path.basename(os.path.join(FvDir, HeaderSFileName))
FileName = FileBase.replace(".", "_").upper()
HeaderFd.write("%s\n" % (__copyright_h__ % date.today().year))
HeaderFd.write("#ifndef __%s__\n" % FileName)
HeaderFd.write("#define __%s__\n\n" % FileName)
HeaderFd.write("#include <%s>\n\n" % HeaderFileName)
HeaderFd.write("#pragma pack(1)\n\n")
Export = False
for Line in IncLines:
Match = re.search ("!EXPORT\s+([A-Z]+)\s+EXTERNAL_BOOTLOADER_STRUCT_(BEGIN|END)\s+", Line)
if Match:
if Match.group(2) == "BEGIN" and Match.group(1) == UpdRegionCheck[item]:
Export = True
continue
else:
Export = False
continue
if Export:
HeaderFd.write(Line)
HeaderFd.write("\n")
Index = 0
StartIndex = 0
EndIndex = 0
StructStart = []
StructStartWithComment = []
StructEnd = []
for Line in TxtBody:
Index += 1
Match = re.match("(typedef struct {)", Line)
if Match:
StartIndex = Index - 1
Match = re.match("}\s([_A-Z0-9]+);", Line)
if Match and (UpdRegionCheck[item] in Match.group(1) or UpdConfigCheck[item] in Match.group(1)) and (ExcludedSpecificUpd not in Match.group(1)):
EndIndex = Index
StructStart.append(StartIndex)
StructEnd.append(EndIndex)
Index = 0
for Line in TxtBody:
Index += 1
for Item in range(len(StructStart)):
if Index == StructStart[Item]:
Match = re.match("^(/\*\*\s*)", Line)
if Match:
StructStartWithComment.append(StructStart[Item])
else:
StructStartWithComment.append(StructStart[Item] + 1)
Index = 0
for Line in TxtBody:
Index += 1
for Item in range(len(StructStart)):
if Index >= StructStartWithComment[Item] and Index <= StructEnd[Item]:
self.WriteLinesWithoutTailingSpace(HeaderFd, Line)
HeaderFd.write("#pragma pack()\n\n")
HeaderFd.write("#endif\n")
HeaderFd.close()
HeaderFd = open(HeaderFile, "w")
FileBase = os.path.basename(HeaderFile)
FileName = FileBase.replace(".", "_").upper()
HeaderFd.write("%s\n" % (__copyright_h__ % date.today().year))
HeaderFd.write("#ifndef __%s__\n" % FileName)
HeaderFd.write("#define __%s__\n\n" % FileName)
HeaderFd.write("#include <FspEas.h>\n\n")
HeaderFd.write("#pragma pack(1)\n\n")
for item in range(len(UpdRegionCheck)):
Index = 0
StartIndex = 0
EndIndex = 0
StructStart = []
StructStartWithComment = []
StructEnd = []
for Line in TxtBody:
Index += 1
Match = re.match("(typedef struct {)", Line)
if Match:
StartIndex = Index - 1
Match = re.match("#define\s([_A-Z0-9]+)\s*", Line)
if Match and (UpdSignatureCheck[item] in Match.group(1) or UpdSignatureCheck[item] in Match.group(1)):
StructStart.append(Index - 1)
StructEnd.append(Index)
Index = 0
for Line in TxtBody:
Index += 1
for Item in range(len(StructStart)):
if Index == StructStart[Item]:
Match = re.match("^(/\*\*\s*)", Line)
if Match:
StructStartWithComment.append(StructStart[Item])
else:
StructStartWithComment.append(StructStart[Item] + 1)
Index = 0
for Line in TxtBody:
Index += 1
for Item in range(len(StructStart)):
if Index >= StructStartWithComment[Item] and Index <= StructEnd[Item]:
self.WriteLinesWithoutTailingSpace(HeaderFd, Line)
HeaderFd.write("#pragma pack()\n\n")
HeaderFd.write("#endif\n")
HeaderFd.close()
return 0
def WriteBsfStruct (self, BsfFd, Item):
LogExpr = CLogicalExpression()
if Item['type'] == "None":
Space = "gPlatformFspPkgTokenSpaceGuid"
else:
Space = Item['space']
Line = " $%s_%s" % (Space, Item['cname'])
Match = re.match("\s*\{([x0-9a-fA-F,\s]+)\}\s*", Item['value'])
if Match:
DefaultValue = Match.group(1).strip()
else:
DefaultValue = Item['value'].strip()
if 'bitlength' in Item:
BsfFd.write(" %s%s%4d bits $_DEFAULT_ = %s\n" % (Line, ' ' * (64 - len(Line)), Item['bitlength'], DefaultValue))
else:
BsfFd.write(" %s%s%4d bytes $_DEFAULT_ = %s\n" % (Line, ' ' * (64 - len(Line)), Item['length'], DefaultValue))
TmpList = []
if Item['type'] == "Combo":
if not Item['option'] in self._BuidinOption:
OptList = Item['option'].split(',')
for Option in OptList:
Option = Option.strip()
(OpVal, OpStr) = Option.split(':')
test = LogExpr.getNumber (OpVal)
if test is None:
raise Exception("Selection Index '%s' is not a number" % OpVal)
TmpList.append((OpVal, OpStr))
return TmpList
def WriteBsfOption (self, BsfFd, Item):
PcdName = Item['space'] + '_' + Item['cname']
WriteHelp = 0
if Item['type'] == "Combo":
if Item['option'] in self._BuidinOption:
Options = self._BuidinOption[Item['option']]
else:
Options = PcdName
BsfFd.write(' %s $%s, "%s", &%s,\n' % (Item['type'], PcdName, Item['name'], Options))
WriteHelp = 1
elif Item['type'].startswith("EditNum"):
Match = re.match("EditNum\s*,\s*(HEX|DEC)\s*,\s*\((\d+|0x[0-9A-Fa-f]+)\s*,\s*(\d+|0x[0-9A-Fa-f]+)\)", Item['type'])
if Match:
BsfFd.write(' EditNum $%s, "%s", %s,\n' % (PcdName, Item['name'], Match.group(1)))
WriteHelp = 2
elif Item['type'].startswith("EditText"):
BsfFd.write(' %s $%s, "%s",\n' % (Item['type'], PcdName, Item['name']))
WriteHelp = 1
elif Item['type'] == "Table":
Columns = Item['option'].split(',')
if len(Columns) != 0:
BsfFd.write(' %s $%s "%s",' % (Item['type'], PcdName, Item['name']))
for Col in Columns:
Fmt = Col.split(':')
if len(Fmt) != 3:
raise Exception("Column format '%s' is invalid !" % Fmt)
try:
Dtype = int(Fmt[1].strip())
except:
raise Exception("Column size '%s' is invalid !" % Fmt[1])
BsfFd.write('\n Column "%s", %d bytes, %s' % (Fmt[0].strip(), Dtype, Fmt[2].strip()))
BsfFd.write(',\n')
WriteHelp = 1
if WriteHelp > 0:
HelpLines = Item['help'].split('\\n\\r')
FirstLine = True
for HelpLine in HelpLines:
if FirstLine:
FirstLine = False
BsfFd.write(' Help "%s"\n' % (HelpLine))
else:
BsfFd.write(' "%s"\n' % (HelpLine))
if WriteHelp == 2:
BsfFd.write(' "Valid range: %s ~ %s"\n' % (Match.group(2), Match.group(3)))
def GenerateBsfFile (self, BsfFile):
if BsfFile == '':
self.Error = "BSF output file '%s' is invalid" % BsfFile
return 1
Error = 0
OptionDict = {}
BsfFd = open(BsfFile, "w")
BsfFd.write("%s\n" % (__copyright_bsf__ % date.today().year))
BsfFd.write("%s\n" % self._GlobalDataDef)
BsfFd.write("StructDef\n")
NextOffset = -1
for Item in self._CfgItemList:
if Item['find'] != '':
BsfFd.write('\n Find "%s"\n' % Item['find'])
NextOffset = Item['offset'] + Item['length']
if Item['name'] != '':
if NextOffset != Item['offset']:
BsfFd.write(" Skip %d bytes\n" % (Item['offset'] - NextOffset))
if len(Item['subreg']) > 0:
NextOffset = Item['offset']
BitsOffset = NextOffset * 8
for SubItem in Item['subreg']:
BitsOffset += SubItem['bitlength']
if SubItem['name'] == '':
if 'bitlength' in SubItem:
BsfFd.write(" Skip %d bits\n" % (SubItem['bitlength']))
else:
BsfFd.write(" Skip %d bytes\n" % (SubItem['length']))
else:
Options = self.WriteBsfStruct(BsfFd, SubItem)
if len(Options) > 0:
OptionDict[SubItem['space']+'_'+SubItem['cname']] = Options
NextBitsOffset = (Item['offset'] + Item['length']) * 8
if NextBitsOffset > BitsOffset:
BitsGap = NextBitsOffset - BitsOffset
BitsRemain = BitsGap % 8
if BitsRemain:
BsfFd.write(" Skip %d bits\n" % BitsRemain)
BitsGap -= BitsRemain
BytesRemain = BitsGap / 8
if BytesRemain:
BsfFd.write(" Skip %d bytes\n" % BytesRemain)
NextOffset = Item['offset'] + Item['length']
else:
NextOffset = Item['offset'] + Item['length']
Options = self.WriteBsfStruct(BsfFd, Item)
if len(Options) > 0:
OptionDict[Item['space']+'_'+Item['cname']] = Options
BsfFd.write("\nEndStruct\n\n")
BsfFd.write("%s" % self._BuidinOptionTxt)
for Each in OptionDict:
BsfFd.write("List &%s\n" % Each)
for Item in OptionDict[Each]:
BsfFd.write(' Selection %s , "%s"\n' % (Item[0], Item[1]))
BsfFd.write("EndList\n\n")
BsfFd.write("BeginInfoBlock\n")
BsfFd.write(' PPVer "%s"\n' % (self._CfgBlkDict['ver']))
BsfFd.write(' Description "%s"\n' % (self._CfgBlkDict['name']))
BsfFd.write("EndInfoBlock\n\n")
for Each in self._CfgPageDict:
BsfFd.write('Page "%s"\n' % self._CfgPageDict[Each])
BsfItems = []
for Item in self._CfgItemList:
if Item['name'] != '':
if Item['page'] != Each:
continue
if len(Item['subreg']) > 0:
for SubItem in Item['subreg']:
if SubItem['name'] != '':
BsfItems.append(SubItem)
else:
BsfItems.append(Item)
BsfItems.sort(key=lambda x: x['order'])
for Item in BsfItems:
self.WriteBsfOption (BsfFd, Item)
BsfFd.write("EndPage\n\n")
BsfFd.close()
return Error
def Usage():
print "GenCfgOpt Version 0.53"
print "Usage:"
print " GenCfgOpt UPDTXT PlatformDscFile BuildFvDir [-D Macros]"
print " GenCfgOpt HEADER PlatformDscFile BuildFvDir InputHFile [-D Macros]"
print " GenCfgOpt GENBSF PlatformDscFile BuildFvDir BsfOutFile [-D Macros]"
def Main():
#
# Parse the options and args
#
i = 1
GenCfgOpt = CGenCfgOpt()
while i < len(sys.argv):
if sys.argv[i].strip().lower() == "--pcd":
BuildOptionPcd.append(sys.argv[i+1])
i += 1
i += 1
argc = len(sys.argv)
if argc < 4:
Usage()
return 1
else:
DscFile = sys.argv[2]
if not os.path.exists(DscFile):
print "ERROR: Cannot open DSC file '%s' !" % DscFile
return 2
OutFile = ''
if argc > 4:
if sys.argv[4][0] == '-':
Start = 4
else:
OutFile = sys.argv[4]
Start = 5
if argc > Start:
if GenCfgOpt.ParseMacros(sys.argv[Start:]) != 0:
print "ERROR: Macro parsing failed !"
return 3
FvDir = sys.argv[3]
if not os.path.exists(FvDir):
os.makedirs(FvDir)
if GenCfgOpt.ParseDscFile(DscFile, FvDir) != 0:
print "ERROR: %s !" % GenCfgOpt.Error
return 5
if GenCfgOpt.UpdateSubRegionDefaultValue() != 0:
print "ERROR: %s !" % GenCfgOpt.Error
return 7
if sys.argv[1] == "UPDTXT":
Ret = GenCfgOpt.CreateSplitUpdTxt(OutFile)
if Ret != 0:
# No change is detected
if Ret == 256:
print "INFO: %s !" % (GenCfgOpt.Error)
else :
print "ERROR: %s !" % (GenCfgOpt.Error)
return Ret
elif sys.argv[1] == "HEADER":
if GenCfgOpt.CreateHeaderFile(OutFile) != 0:
print "ERROR: %s !" % GenCfgOpt.Error
return 8
elif sys.argv[1] == "GENBSF":
if GenCfgOpt.GenerateBsfFile(OutFile) != 0:
print "ERROR: %s !" % GenCfgOpt.Error
return 9
else:
if argc < 5:
Usage()
return 1
print "ERROR: Unknown command '%s' !" % sys.argv[1]
Usage()
return 1
return 0
return 0
if __name__ == '__main__':
sys.exit(Main())
| 43.64235 | 171 | 0.443271 |
22cacb66410792fce58e16fad40d1fc761a815ba | 6,140 | py | Python | craft_detector/test.py | vinhtq115/CRAFT-pytorch | 2be0399fde1ba9bbe7af73e29054fbb678a1d01b | [
"MIT"
] | 6 | 2020-08-23T22:47:11.000Z | 2021-06-17T14:32:49.000Z | craft_detector/test.py | vinhtq115/CRAFT-pytorch | 2be0399fde1ba9bbe7af73e29054fbb678a1d01b | [
"MIT"
] | null | null | null | craft_detector/test.py | vinhtq115/CRAFT-pytorch | 2be0399fde1ba9bbe7af73e29054fbb678a1d01b | [
"MIT"
] | 7 | 2020-08-06T12:39:39.000Z | 2021-07-03T01:00:55.000Z | """
Copyright (c) 2019-present NAVER Corp.
MIT License
"""
# -*- coding: utf-8 -*-
import sys
import os
import time
import argparse
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from PIL import Image
import cv2
from skimage import io
import numpy as np
import craft_utils
import imgproc
import file_utils
import json
import zipfile
from craft import CRAFT
from collections import OrderedDict
def copyStateDict(state_dict):
if list(state_dict.keys())[0].startswith("module"):
start_idx = 1
else:
start_idx = 0
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = ".".join(k.split(".")[start_idx:])
new_state_dict[name] = v
return new_state_dict
def str2bool(v):
return v.lower() in ("yes", "y", "true", "t", "1")
parser = argparse.ArgumentParser(description='CRAFT Text Detection')
parser.add_argument('--trained_model', default='weights/craft_mlt_25k.pth', type=str, help='pretrained model')
parser.add_argument('--text_threshold', default=0.7, type=float, help='text confidence threshold')
parser.add_argument('--low_text', default=0.4, type=float, help='text low-bound score')
parser.add_argument('--link_threshold', default=0.4, type=float, help='link confidence threshold')
parser.add_argument('--cuda', default=False, type=str2bool, help='Use cuda for inference')
parser.add_argument('--canvas_size', default=1280, type=int, help='image size for inference')
parser.add_argument('--mag_ratio', default=1.5, type=float, help='image magnification ratio')
parser.add_argument('--poly', default=False, action='store_true', help='enable polygon type')
parser.add_argument('--show_time', default=False, action='store_true', help='show processing time')
parser.add_argument('--test_folder', default='/data/', type=str, help='folder path to input images')
parser.add_argument('--refine', default=False, action='store_true', help='enable link refiner')
parser.add_argument('--refiner_model', default='weights/craft_refiner_CTW1500.pth', type=str, help='pretrained refiner model')
args = parser.parse_args()
""" For test images in a folder """
image_list, _, _ = file_utils.get_files(args.test_folder)
result_folder = './result/'
if not os.path.isdir(result_folder):
os.mkdir(result_folder)
def test_net(net, image, text_threshold, link_threshold, low_text, cuda, poly, refine_net=None):
t0 = time.time()
# resize
img_resized, target_ratio, size_heatmap = imgproc.resize_aspect_ratio(image, args.canvas_size, interpolation=cv2.INTER_LINEAR, mag_ratio=args.mag_ratio)
ratio_h = ratio_w = 1 / target_ratio
# preprocessing
x = imgproc.normalizeMeanVariance(img_resized)
x = torch.from_numpy(x).permute(2, 0, 1) # [h, w, c] to [c, h, w]
x = Variable(x.unsqueeze(0)) # [c, h, w] to [b, c, h, w]
if cuda:
x = x.cuda()
# forward pass
with torch.no_grad():
y, feature = net(x)
# make score and link map
score_text = y[0,:,:,0].cpu().data.numpy()
score_link = y[0,:,:,1].cpu().data.numpy()
# refine link
if refine_net is not None:
with torch.no_grad():
y_refiner = refine_net(y, feature)
score_link = y_refiner[0,:,:,0].cpu().data.numpy()
t0 = time.time() - t0
t1 = time.time()
# Post-processing
boxes, polys = craft_utils.getDetBoxes(score_text, score_link, text_threshold, link_threshold, low_text, poly)
# coordinate adjustment
boxes = craft_utils.adjustResultCoordinates(boxes, ratio_w, ratio_h)
polys = craft_utils.adjustResultCoordinates(polys, ratio_w, ratio_h)
for k in range(len(polys)):
if polys[k] is None: polys[k] = boxes[k]
t1 = time.time() - t1
# render results (optional)
render_img = score_text.copy()
render_img = np.hstack((render_img, score_link))
ret_score_text = imgproc.cvt2HeatmapImg(render_img)
if args.show_time : print("\ninfer/postproc time : {:.3f}/{:.3f}".format(t0, t1))
return boxes, polys, ret_score_text
if __name__ == '__main__':
# load net
net = CRAFT() # initialize
print('Loading weights from checkpoint (' + args.trained_model + ')')
if args.cuda:
net.load_state_dict(copyStateDict(torch.load(args.trained_model)))
else:
net.load_state_dict(copyStateDict(torch.load(args.trained_model, map_location='cpu')))
if args.cuda:
net = net.cuda()
net = torch.nn.DataParallel(net)
cudnn.benchmark = False
net.eval()
# LinkRefiner
refine_net = None
if args.refine:
from refinenet import RefineNet
refine_net = RefineNet()
print('Loading weights of refiner from checkpoint (' + args.refiner_model + ')')
if args.cuda:
refine_net.load_state_dict(copyStateDict(torch.load(args.refiner_model)))
refine_net = refine_net.cuda()
refine_net = torch.nn.DataParallel(refine_net)
else:
refine_net.load_state_dict(copyStateDict(torch.load(args.refiner_model, map_location='cpu')))
refine_net.eval()
args.poly = True
t = time.time()
# load data
for k, image_path in enumerate(image_list):
print("Test image {:d}/{:d}: {:s}".format(k+1, len(image_list), image_path), end='\r')
image = imgproc.loadImage(image_path)
#
# (H, W) = image.shape[:2]
# (newW, newH) = (2400, 1800)
# # (newW, newH) = (W, H)
# rW = W / float(newW)
# rH = H / float(newH)
# image = cv2.resize(image, (newW, newH))
bboxes, polys, score_text = test_net(net, image, args.text_threshold, args.link_threshold, args.low_text, args.cuda, args.poly, refine_net)
# save score text
filename, file_ext = os.path.splitext(os.path.basename(image_path))
mask_file = result_folder + "/res_" + filename + '_mask.jpg'
cv2.imwrite(mask_file, score_text)
file_utils.saveResult(image_path, image[:,:,::-1], polys, dirname=result_folder)
print("elapsed time : {}s".format(time.time() - t))
| 34.494382 | 156 | 0.66987 |
1db579d8c0e92ac19ca95e88aa7aae19979a915a | 917 | py | Python | example/home/mutations.py | mazurbeam/wagtail-grapple | 8ca975b71553c4454a619d51d66d61fcd696faeb | [
"BSD-3-Clause"
] | null | null | null | example/home/mutations.py | mazurbeam/wagtail-grapple | 8ca975b71553c4454a619d51d66d61fcd696faeb | [
"BSD-3-Clause"
] | null | null | null | example/home/mutations.py | mazurbeam/wagtail-grapple | 8ca975b71553c4454a619d51d66d61fcd696faeb | [
"BSD-3-Clause"
] | null | null | null | from wagtail.core import hooks
from django.utils.crypto import get_random_string
from wagtail.core.models import Page
import graphene
from grapple.types.pages import PageInterface
from home.models import AuthorPage
class CreateAuthor(graphene.Mutation):
class Arguments:
name = graphene.String()
parent = graphene.Int()
slug = graphene.String()
ok = graphene.Boolean()
author = graphene.Field(
PageInterface,
)
def mutate(root, info, name, parent, slug):
# We use uuid here in order to ensure the slug will always be unique across tests
author = AuthorPage(name=name, title=name, slug=slug)
ok = True
Page.objects.get(id=parent).add_child(instance=author)
author.save_revision().publish()
return CreateAuthor(author=author, ok=ok)
class Mutations(graphene.ObjectType):
create_author = CreateAuthor.Field()
| 28.65625 | 89 | 0.7012 |
a77eb82f0494762260f40bd5f6bcef42715bd451 | 30,761 | py | Python | opennmt/training.py | feze23/OpenNMT-tf | 27d8de46021ded39b6ffc9f09ec15b141af6cf5e | [
"MIT"
] | null | null | null | opennmt/training.py | feze23/OpenNMT-tf | 27d8de46021ded39b6ffc9f09ec15b141af6cf5e | [
"MIT"
] | null | null | null | opennmt/training.py | feze23/OpenNMT-tf | 27d8de46021ded39b6ffc9f09ec15b141af6cf5e | [
"MIT"
] | null | null | null | """Training related classes and functions."""
import collections
import contextlib
import itertools
import time
import tensorflow as tf
from opennmt.inputters import text_inputter
from opennmt.optimizers import utils as optimizer_util
from opennmt.utils import compat
from opennmt.utils import misc
def _add_mixed_precision_wrapper(optimizer):
# TODO: clean mixed precision API when TensorFlow requirement is updated to >=2.4.
wrapper_class = None
wrapper_kwargs = {}
if compat.tf_supports("keras.mixed_precision.LossScaleOptimizer"):
wrapper_class = tf.keras.mixed_precision.LossScaleOptimizer
else:
wrapper_class = tf.keras.mixed_precision.experimental.LossScaleOptimizer
wrapper_kwargs = dict(loss_scale="dynamic")
if not isinstance(optimizer, wrapper_class):
optimizer = wrapper_class(optimizer, **wrapper_kwargs)
return optimizer
class Trainer:
"""Base class for model trainer, implementing single-GPU training."""
def __init__(self, model, optimizer, checkpoint=None):
"""Initializes the trainer.
Args:
model: A :class:`opennmt.models.Model` instance to train.
optimizer: A ``tf.keras.optimizers.Optimizer`` instance.
checkpoint: A :class:`opennmt.utils.checkpoint.Checkpoint` instance. If
not set, no checkpoints will be saved.
"""
self._checkpoint = checkpoint
self._model = model
if checkpoint is not None:
self._summary_writer = tf.summary.create_file_writer(checkpoint.model_dir)
else:
self._summary_writer = tf.summary.create_noop_writer()
self._training_stats = None
self._gradient_accumulator = optimizer_util.GradientAccumulator()
self._mixed_precision = misc.mixed_precision_enabled()
if optimizer is None:
raise ValueError("No optimizer is defined")
if self._mixed_precision:
optimizer = _add_mixed_precision_wrapper(optimizer)
self._optimizer = optimizer
@property
def is_master(self):
"""Master replica."""
return True
@property
def num_replicas(self):
"""Number of synchronous training replicas."""
return 1
def __call__(
self,
dataset,
max_step=None,
accum_steps=1,
report_steps=100,
save_steps=5000,
evaluator=None,
eval_steps=5000,
moving_average_decay=None,
):
"""Runs the training.
Args:
dataset: A ``tf.data.Dataset`` or a function taking a ``tf.distribute.InputContext``
instance and returning a ``tf.data.Dataset``.
max_step: The final training step.
accum_steps: The number of gradient accumulation steps.
report_steps: Report status every this many steps.
save_steps: Save a checkpoint every this many steps.
evaluator: A :class:`opennmt.evaluation.Evaluator` instance to call for
evaluation.
eval_steps: Evaluate every this many steps.
moving_average_decay: If set, maintain an exponential moving average of the model
variables using this decay value (usually close to 1, e.g. 0.9999). See
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage.
Returns:
A dictionary with various training statistics.
"""
if max_step is not None and self._optimizer.iterations.numpy() >= max_step:
raise RuntimeError(
"The training already reached max_step (%d). If you "
"want to continue the training, you should increase the "
"max_step value in the training parameters." % max_step
)
if evaluator is not None and evaluator.should_stop():
raise RuntimeError(
"The early stopping conditions are already met. If you "
"want to continue the training, you should update your "
"early stopping parameters."
)
self._gradient_accumulator.reset()
with self._summary_writer.as_default():
self._training_stats = TrainingStats(
self._model, self._optimizer, reduce_fn=self._all_reduce_sum
)
iterations = self._optimizer.iterations
tf.summary.experimental.set_step(iterations)
step = None
moving_average = None
for loss in self._steps(
dataset, accum_steps=accum_steps, report_steps=report_steps
):
if moving_average_decay is not None and self.is_master:
if moving_average is None:
moving_average = MovingAverage(
self._model.trainable_variables,
iterations,
decay=moving_average_decay,
)
self._update_moving_average(moving_average)
step = iterations.numpy()
reset_throughput = False
self._training_stats.update_on_step(step, loss)
if step % report_steps == 0:
self._training_stats.log(self.is_master)
reset_throughput = True
if step == 1 or (save_steps is not None and step % save_steps == 0):
self._save_checkpoint(step, moving_average=moving_average)
reset_throughput = True
if eval_steps is not None and step % eval_steps == 0:
early_stop = self._evaluate(
evaluator, step, moving_average=moving_average
)
reset_throughput = True
if early_stop:
tf.get_logger().warning(
"Early stopping conditions are met. Exiting."
)
break
if step == max_step:
break
if reset_throughput:
self._training_stats.reset_throughput()
if step is None:
raise RuntimeError(
"No training steps were executed. This usually means the "
"training file is empty or all examples were filtered out. "
"For the latter, verify that the maximum_*_length values are "
"consistent with your data."
)
self._training_stats.log_final(self.is_master)
summary = self._training_stats.get_global_summary()
self._save_checkpoint(step, moving_average=moving_average)
self._evaluate(evaluator, step, moving_average=moving_average)
return summary
def _save_checkpoint(self, step, moving_average=None):
"""Saves a checkpoint for step."""
if (
not self.is_master
or self._checkpoint is None
or step == self._checkpoint.last_saved_step
):
return
shadow_variables = (
moving_average.shadow_variables()
if moving_average is not None
else contextlib.suppress()
)
with shadow_variables:
self._checkpoint.save(step)
def _evaluate(self, evaluator, step, moving_average=None):
"""Runs evaluation for step. Returns ``True`` if early conditions are met."""
if (
not self.is_master
or evaluator is None
or step == evaluator.last_evaluated_step
):
return False
shadow_variables = (
moving_average.shadow_variables()
if moving_average is not None
else contextlib.suppress()
)
with shadow_variables:
evaluator(step)
return evaluator.should_stop()
def _finalize_dataset(self, dataset):
"""Returns the final dataset instance to be used for training.
Args:
dataset: A ``tf.data.Dataset`` or a function taking a ``tf.distribute.InputContext``
instance and returning a ``tf.data.Dataset``.
Returns:
A ``tf.data.Dataset``.
"""
if callable(dataset):
dataset = dataset(tf.distribute.InputContext())
return dataset
def _steps(self, dataset, accum_steps=1, report_steps=None):
"""Returns a generator over training steps (i.e. parameters update).
Args:
dataset: The training dataset.
accum_steps: Accumulate the gradients of this many steps/batches.
report_steps: Report summary statistics every this many steps. This should
typically be used in a ``tf.summary.record_if`` context.
Returns:
A generator that yields a loss value to report for this step.
"""
dataset = self._finalize_dataset(dataset)
iterator = iter(dataset)
# We define 2 separate functions to support gradient accumulation:
# * forward: compute and accumulate the gradients
# * step: apply the gradients
# When gradient accumulation is disabled, the forward function also applies the gradients.
def _forward():
# We get the next dataset element within the function for increased efficiency
# and avoid dealing with tf.function input signatures.
source, target = next(iterator)
return self._forward(
source,
target,
accum_steps=accum_steps,
report_steps=report_steps,
)
def _step():
return self._step()
# Wrap forward and step with tf.function to run in graph mode.
forward_fn = tf.function(_forward)
step_fn = tf.function(_step) if accum_steps > 1 else lambda: None
step_loss = 0
for i in itertools.count():
try:
loss = forward_fn()
except (
StopIteration,
tf.errors.OutOfRangeError,
): # Dataset iterator exhausted.
break
if tf.math.is_nan(loss):
raise RuntimeError("Model diverged with loss = NaN.")
step_loss += float(loss)
if (i + 1) % accum_steps == 0:
step_fn()
if i + 1 == accum_steps:
self._broadcast_variables()
yield step_loss
step_loss = 0
def _run_model(self, source, target, accum_steps=1):
"""Computes the loss of the given source and target pair.
Args:
source: A nested structure of tensors.
target: A nested structure of tensors.
accum_steps: The number of gradient accumulation steps.
Returns:
A tuple containing,
- The loss to compute the gradients.
- The loss to report.
"""
first_call = not self._model.built
outputs, _ = self._model(
source, labels=target, training=True, step=self._optimizer.iterations
)
loss = self._model.compute_loss(outputs, target, training=True)
if isinstance(loss, tuple):
training_loss = loss[0] / loss[1]
reported_loss = loss[0] / loss[2] if len(loss) > 2 else training_loss
else:
training_loss, reported_loss = loss, loss
training_loss = self._model.regularize_loss(
training_loss, variables=self._model.trainable_variables
)
loss_scale = accum_steps * self.num_replicas
training_loss /= loss_scale
reported_loss /= loss_scale
self._training_stats.update_on_example(source, target)
if first_call and self.is_master:
if self._checkpoint is not None:
self._model.visualize(self._checkpoint.model_dir)
tf.get_logger().info(
"Number of model parameters: %d", self._model.count_params()
)
tf.get_logger().info(
"Number of model weights: %d (trainable = %d, non trainable = %d)",
len(self._model.weights),
len(self._model.trainable_weights),
len(self._model.non_trainable_weights),
)
return training_loss, reported_loss
def _should_record_summaries(self, accum_steps, report_steps):
"""Returns a boolean tensor to be used in tf.summary.record_if."""
if report_steps is None or not self.is_master:
return False
record_summaries = tf.equal(self._optimizer.iterations % report_steps, 0)
if accum_steps > 1:
record_summaries = tf.logical_and(
record_summaries, tf.equal(self._gradient_accumulator.step, 0)
)
return record_summaries
def _compute_gradients(self, source, target, accum_steps, report_steps):
"""Computes the gradient of a training example."""
record_summaries = self._should_record_summaries(accum_steps, report_steps)
with tf.summary.record_if(record_summaries):
if tf.executing_eagerly():
with tf.GradientTape() as tape:
training_loss, reported_loss = self._run_model(
source, target, accum_steps=accum_steps
)
if self._mixed_precision:
training_loss = self._optimizer.get_scaled_loss(training_loss)
gradients = tape.gradient(
training_loss, self._model.trainable_variables
)
if self._mixed_precision:
gradients = self._optimizer.get_unscaled_gradients(gradients)
else:
training_loss, reported_loss = self._run_model(
source, target, accum_steps=accum_steps
)
# In mixed precision training, LossScaleOptimizer.get_gradients takes care
# of loss scaling.
gradients = self._optimizer.get_gradients(
training_loss, self._model.trainable_variables
)
_summarize_gradients(gradients, record_summaries)
return reported_loss, gradients
def _apply_gradients(self, gradients):
"""Applies the gradients."""
self._optimizer.apply_gradients(
list(zip(gradients, self._model.trainable_variables))
)
def _forward(self, source, target, accum_steps=1, report_steps=None):
"""Forwards a training example and accumulates the gradients."""
loss, gradients = self._compute_gradients(
source,
target,
accum_steps,
report_steps,
)
if accum_steps > 1:
self._gradient_accumulator(gradients)
else:
self._apply_gradients(gradients)
return loss
def _step(self):
"""Applies gradients and resets accumulation."""
self._apply_gradients(self._gradient_accumulator.gradients)
self._gradient_accumulator.reset()
def _update_moving_average(self, moving_average):
"""Updates the moving average of variables."""
moving_average.update()
def _broadcast_variables(self):
"""Broadcasts variables to other replicas, if required."""
return
def _all_reduce_sum(self, value):
"""Reduces the value across all replicas."""
return value
class HorovodTrainer(Trainer):
"""Trainer compatible with Horovod distributed training."""
def __init__(self, model, optimizer, hvd, checkpoint=None):
"""Initializes the Horovod trainer.
Args:
model: A :class:`opennmt.models.Model` instance to train.
optimizer: A ``tf.keras.optimizers.Optimizer`` instance.
hvd: The global Horovod object.
checkpoint: A :class:`opennmt.utils.checkpoint.Checkpoint` instance. If
not set, no checkpoints will be saved.
"""
super().__init__(model, optimizer, checkpoint=checkpoint)
self._hvd = hvd
@property
def is_master(self):
return self._hvd.rank() == 0
@property
def num_replicas(self):
return self._hvd.size()
def _finalize_dataset(self, dataset):
if callable(dataset):
dataset = dataset(
tf.distribute.InputContext(
num_input_pipelines=self._hvd.size(),
input_pipeline_id=self._hvd.rank(),
num_replicas_in_sync=self._hvd.size(),
)
)
return dataset
def _apply_gradients(self, gradients):
return super()._apply_gradients(map(self._all_reduce_sum, gradients))
def _broadcast_variables(self):
self._hvd.broadcast_variables(self._model.variables, root_rank=0)
self._hvd.broadcast_variables(self._optimizer.variables(), root_rank=0)
def _all_reduce_sum(self, value):
return self._hvd.allreduce(value, op=self._hvd.Sum)
class MirroredStrategyTrainer(Trainer):
"""Trainer based on ``tf.distribute.MirroredStrategy`` for local multi-GPU training."""
def __init__(self, model, optimizer, checkpoint=None, devices=None):
"""Initializes the MirroredStrategy trainer.
Args:
model: A :class:`opennmt.models.Model` instance to train.
optimizer: A ``tf.keras.optimizers.Optimizer`` instance.
checkpoint: A :class:`opennmt.utils.checkpoint.Checkpoint` instance. If
not set, no checkpoints will be saved.
devices: List of device strings to use for training. If not set, all
visible GPUs are used.
"""
super().__init__(model, optimizer, checkpoint=checkpoint)
self._strategy = tf.distribute.MirroredStrategy(devices=devices)
with self._strategy.scope():
# Create some variables under the strategy scope.
_ = self._optimizer.iterations
@property
def num_replicas(self):
return self._strategy.num_replicas_in_sync
def _finalize_dataset(self, dataset):
# We prefer not to use experimental_distribute_dataset here because it
# sometimes fails to split the batches (noticed with tokens batch type).
dataset_fn = dataset if callable(dataset) else lambda _: dataset
# TODO: clean this API usage when TensorFlow requirement is updated to >=2.4.
distribute_fn = getattr(
self._strategy, "distribute_datasets_from_function", None
)
if distribute_fn is None:
distribute_fn = (
self._strategy.experimental_distribute_datasets_from_function
)
return distribute_fn(dataset_fn)
def _forward(self, source, target, accum_steps=1, report_steps=None):
per_replica_loss = self._strategy.run(
super()._forward,
args=(source, target),
kwargs=dict(accum_steps=accum_steps, report_steps=report_steps),
)
# TODO: this reduction could be delayed until _step is called.
return self._strategy.reduce(tf.distribute.ReduceOp.SUM, per_replica_loss, None)
def _step(self):
self._strategy.run(super()._step)
def _update_moving_average(self, moving_average):
with self._strategy.scope():
super()._update_moving_average(moving_average)
def _summarize_gradients(gradients, should_record):
# Only compute the gradients global norm when the value is actually recorded.
if isinstance(should_record, bool) and not should_record:
return
tf.summary.scalar(
"gradients/global_norm",
tf.cond(
should_record,
true_fn=lambda: tf.linalg.global_norm(gradients),
false_fn=lambda: tf.constant(0, dtype=gradients[0].dtype),
),
)
class MovingAverage(object):
"""Object holding an exponential moving average of variables."""
def __init__(self, variables, step, decay=0.9999):
"""Initializes the moving average object.
Args:
variables: The list of variable for which to maintain a moving average.
step: The training step counter as a ``tf.Variable``.
decay: The decay rate of the exponential moving average. Usually close to
1, e.g. 0.9999, see the complete formula on
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage.
Raises:
TypeError: is :obj:`step` is not a ``tf.Variable``.
"""
if not isinstance(step, tf.Variable):
raise TypeError("step should be a tf.Variable")
if decay < 0.9 or decay > 1:
tf.get_logger().warning(
"Moving average decay should be close to 1 (e.g. 0.9999) but you "
"passed %f, is it correct? See https://www.tensorflow.org/api_docs"
"/python/tf/train/ExponentialMovingAverage for details about the "
"formula and recommended decay values."
)
self._ema = tf.train.ExponentialMovingAverage(decay, num_updates=step)
self._variables = variables
@tf.function
def update(self):
"""Updates the moving average of the variables."""
self._ema.apply(self._variables)
@contextlib.contextmanager
def shadow_variables(self):
"""Returns a context manager that assigns the variables to their moving
average value on enter and restores the previous value on exit.
Returns:
A context manager.
"""
previous_values = []
for variable in self._variables:
previous_values.append(variable.value())
variable.assign(self._ema.average(variable))
yield
for previous_value, variable in zip(previous_values, self._variables):
variable.assign(previous_value)
class TrainingStats:
"""Aggregate and summarize training statistics."""
def __init__(self, model, optimizer, reduce_fn=None, warmup_steps=2):
"""Initializes the statistics.
Args:
model: The model.
optimizer: The optimizer.
reduce_fn: In case of distributed training, a function to sum reduce
distributed values.
warmup_steps: Throughput values are ignored for this many steps at the
start of the training.
"""
self._model = model
self._optimizer = optimizer
self._reduce_fn = reduce_fn
self._warmup_steps = warmup_steps
self._words_counters = {}
self._num_updates = 0
self._average_loss = 0
self._last_loss = None
self._last_step = optimizer.iterations.numpy()
self._last_logged_step = self._last_step
self._last_logged_time = time.time()
self._num_tokens = collections.defaultdict(int)
self._oov_tokens = collections.defaultdict(lambda: collections.defaultdict(int))
def update_on_example(self, source, target):
"""Updates the training statistics on a new training example.
This may be called within a tf.function.
Args:
source: A dictionary of source features.
target: A dictionary of target features.
"""
self._update_words_counter("source", source)
self._record_oov_tokens("source", source, self._model.features_inputter)
if not self._model.unsupervised:
self._update_words_counter("target", target)
self._record_oov_tokens("target", target, self._model.labels_inputter)
def update_on_step(self, step, loss):
"""Updates the training statistics on a new training step.
Args:
step: The current training step.
loss: The loss for this step.
"""
# Convert Numpy or Tensor values to Python.
step = int(step)
loss = float(loss)
self._last_step = step
self._last_loss = loss
self._average_loss = (self._average_loss * self._num_updates + loss) / (
self._num_updates + 1
)
if self._num_updates < self._warmup_steps:
self.reset_throughput()
self._num_updates += 1
def get_last_summary(self):
"""Returns a summary of the training since the last log.
Returns:
A dictionary containing various statistics.
"""
elapsed_time = time.time() - self._last_logged_time
return {
"learning_rate": self._get_learning_rate(),
"loss": self._last_loss,
"step": self._last_step,
"steps_per_sec": (self._last_step - self._last_logged_step) / elapsed_time,
"words_per_sec": {
name: int(value.numpy() / elapsed_time)
for name, value in self._get_words_counters().items()
},
}
def get_global_summary(self):
"""Returns a summary of the training since the beginning.
Returns:
A dictionary containing various statistics.
"""
return {
"average_loss": self._average_loss,
"last_learning_rate": self._get_learning_rate(),
"last_loss": self._last_loss,
"last_step": self._last_step,
"num_steps": self._num_updates,
}
def log(self, is_master=True):
"""Logs the last training statistics.
Args:
is_master: Whether this process is the master worker or not.
"""
# Only the master should log the training statistics but we build the
# summary on all workers since it may reduce distributed values.
summary = self.get_last_summary()
if not is_master:
return
tf.summary.scalar(
"steps_per_sec",
summary["steps_per_sec"],
description="Training steps per second",
)
steps_per_sec_fmt = "steps/s = %0.2f" % summary["steps_per_sec"]
words_per_sec_fmt = []
for name, avg in summary["words_per_sec"].items():
tf.summary.scalar(
"words_per_sec/%s" % name,
avg,
description="%s words per second" % name.capitalize(),
)
words_per_sec_fmt.append("%s words/s = %d" % (name, avg))
tf.get_logger().info(
"Step = %d ; %s ; Learning rate = %f ; Loss = %f",
summary["step"],
", ".join([steps_per_sec_fmt] + list(sorted(words_per_sec_fmt))),
summary["learning_rate"],
summary["loss"],
)
tf.summary.scalar("loss", summary["loss"], description="Training loss")
tf.summary.scalar(
"optim/learning_rate", summary["learning_rate"], description="Learning rate"
)
def log_final(self, is_master=True):
"""Outputs the final log."""
if not is_master:
return
for name, oov_tokens in self._oov_tokens.items():
num_oov_tokens = sum(oov_tokens.values())
if num_oov_tokens > 0:
num_tokens = self._num_tokens[name]
tf.get_logger().warning(
"%.3f%% of %s tokens are out of vocabulary (%d out of %d tokens)",
(num_oov_tokens / num_tokens) * 100,
name,
num_oov_tokens,
num_tokens,
)
most_frequent_oov_tokens = (
"%s (%.1f%%)" % (oov_token, (count / num_oov_tokens) * 100)
for oov_token, count in sorted(
oov_tokens.items(),
key=lambda x: x[1],
reverse=True,
)
)
most_frequent_oov_tokens = list(most_frequent_oov_tokens)[:10]
tf.get_logger().info(
"The %d most frequent out of vocabulary %s tokens are: %s",
len(most_frequent_oov_tokens),
name,
"; ".join(most_frequent_oov_tokens),
)
def reset_throughput(self):
"""Resets the accumulated values since the last log."""
self._reset_words_counters()
self._last_logged_step = self._last_step
self._last_logged_time = time.time()
def _get_learning_rate(self):
learning_rate = self._optimizer.learning_rate
if isinstance(learning_rate, tf.optimizers.schedules.LearningRateSchedule):
learning_rate = learning_rate(self._last_step)
return float(learning_rate)
def _record_oov_tokens(self, name, features, inputter):
if not isinstance(inputter, text_inputter.WordEmbedder):
return
def _record(num_tokens, oov_tokens):
self._num_tokens[name] += int(num_tokens)
all_oov_tokens = self._oov_tokens[name]
for oov_token in oov_tokens.flatten():
all_oov_tokens[oov_token.decode("utf-8")] += 1
num_tokens = tf.reduce_sum(
inputter.get_length(features, ignore_special_tokens=True)
)
oov_tokens = inputter.get_oov_tokens(features)
tf.numpy_function(_record, [num_tokens, oov_tokens], [])
def _update_words_counter(self, name, features):
"""Accumulates the number of source and target tokens to report throughput."""
length = features.get("length")
if length is None:
return
num_words = tf.reduce_sum(length)
counter = self._words_counters.get(name)
if counter is None:
counter = tf.Variable(
tf.constant(0, dtype=tf.int64),
trainable=False,
synchronization=tf.VariableSynchronization.ON_READ,
aggregation=tf.VariableAggregation.SUM,
)
self._words_counters[name] = counter
counter.assign_add(tf.cast(num_words, tf.int64))
@tf.function
def _get_words_counters(self):
"""Returns the accumulated words counters.
Returns:
A dictionary mapping a counter name to a value.
"""
counters = {}
for name, counter in self._words_counters.items():
counter = counter.read_value()
if self._reduce_fn is not None:
counter = self._reduce_fn(counter)
counters[name] = counter
return counters
@tf.function
def _reset_words_counters(self):
"""Resets the accumulated words counters."""
for counter in self._words_counters.values():
counter.assign(tf.constant(0, dtype=tf.int64))
| 38.403246 | 98 | 0.604304 |
c53bff30b8c633b9cacb67f37bbe6bbcbcdfee66 | 3,928 | py | Python | src/gamesbyexample/powerballlottery.py | Hacker-Boy9999/PythonStdioGames | be96635a3a54e0ca32f2658f9e7089b173db5fa2 | [
"Python-2.0"
] | null | null | null | src/gamesbyexample/powerballlottery.py | Hacker-Boy9999/PythonStdioGames | be96635a3a54e0ca32f2658f9e7089b173db5fa2 | [
"Python-2.0"
] | null | null | null | src/gamesbyexample/powerballlottery.py | Hacker-Boy9999/PythonStdioGames | be96635a3a54e0ca32f2658f9e7089b173db5fa2 | [
"Python-2.0"
] | null | null | null | """Powerball Lottery, by Al Sweigart al@inventwithpython.com
A simulation of the lottery so you can experience the thrill of
losing the lottery without wasting your money.
This and other games are available at https://nostarch.com/XX
Tags: short, humor, simulation"""
__version__ = 0
import random
print('''Powerball Lottery, by Al Sweigart al@inventwithpython.com
Each powerball lottery ticket costs $2. The jackpot for this game
is $1.586 billion! It doesn't matter what the jackpot is though,
because the odds are 1 in 292,201,338, so you won't win.
This simulation gives you the thrill of playing without wasting money.
''')
# Let the player enter the first five numbers, 1 to 69:
while True:
print('Enter 5 different numbers from 1 to 69, with spaces between')
print('each number. (For example: 5 17 23 42 50)')
response = input('> ')
# Check that the player entered 5 things:
numbers = response.split()
if len(numbers) != 5:
print('Please enter 5 numbers, separated by spaces.')
continue
# Convert the strings into integers:
try:
for i in range(5):
numbers[i] = int(numbers[i])
except ValueError:
print('Please enter numbers, like 27, 35, or 62.')
continue
# Check that the numbers are between 1 and 69:
for i in range(5):
if not (1 <= numbers[i] <= 69):
print('The numbers must all be between 1 and 69.')
continue
# Check that the numbers are unique:
# (Create a set from number to remove duplicates.)
if len(set(numbers)) != 5:
print('You must enter 5 different numbers.')
continue
break
# Let the player select the powerball, 1 to 26:
while True:
print('Enter the powerball number from 1 to 26.')
response = input('> ')
# Convert the strings into integers:
try:
powerball = int(response)
except ValueError:
print('Please enter a number, like 3, 15, or 22.')
continue
# Check that the number is between 1 and 26:
if not (1 <= powerball <= 26):
print('The powerball number must be between 1 and 26.')
continue
break
# Enter the number of times you want to play:
while True:
print('How many times do you want to play? (Max: 1000000)')
response = input('> ')
# Convert the strings into integers:
try:
numPlays = int(response)
except ValueError:
print('Please enter a number, like 3, 15, or 22000.')
continue
# Check that the number is between 1 and 1000000:
if not (1 <= numPlays <= 1000000):
print('You can play between 1 and 1000000 times.')
continue
break
# Run the simulation:
price = '$' + str(2 * numPlays)
print('It costs', price, 'to play', numPlays, 'times, but don\'t')
print('worry. I\'m sure you\'ll win it all back.')
input('Press Enter to start...')
possibleNumbers = list(range(1, 70))
for i in range(numPlays):
# Come up with lottery numbers:
random.shuffle(possibleNumbers)
winningNumbers = possibleNumbers[0:5]
winningPowerball = random.randint(1, 26)
# Display winning numbers:
print('The winning numbers are: ', end='')
allWinningNums = ''
for i in range(5):
allWinningNums += str(winningNumbers[i]) + ' '
allWinningNums += 'and ' + str(winningPowerball)
print(allWinningNums.ljust(21), end='')
# NOTE: Sets are not ordered, so it doesn't matter what order the
# integers in set(numbers) and set(winningNumbers) are.
if (set(numbers) == set(winningNumbers)
and powerball == winningPowerball):
print()
print('You have won the Powerball Lottery! Congratulations,')
print('you would be a billionaire if this was real!')
break
else:
print(' You lost.') # The leading space is required here.
print('You have wasted', price)
print('Thanks for playing!')
| 31.677419 | 73 | 0.648167 |
3f96b0aff0cac543ee50f8adfd7fd692e3ef8b52 | 1,223 | py | Python | ejemplo02/consulta_datos2.py | PlataformasWeb-P-AA2021/clase07-1bim-vysery98 | 50e48beb294efa4eb7b0ecda4c6145ac74581fea | [
"Unlicense",
"MIT"
] | null | null | null | ejemplo02/consulta_datos2.py | PlataformasWeb-P-AA2021/clase07-1bim-vysery98 | 50e48beb294efa4eb7b0ecda4c6145ac74581fea | [
"Unlicense",
"MIT"
] | null | null | null | ejemplo02/consulta_datos2.py | PlataformasWeb-P-AA2021/clase07-1bim-vysery98 | 50e48beb294efa4eb7b0ecda4c6145ac74581fea | [
"Unlicense",
"MIT"
] | null | null | null | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy import and_ # se importa el operador and
# se importa la clase(s) del
# archivo genera_tablas
from genera_tablas import Club, Jugador
# se importa información del archivo configuracion
from configuracion import cadena_base_datos
engine = create_engine(cadena_base_datos)
Session = sessionmaker(bind=engine)
session = Session()
# Obtener todos los registros de
# la entidad estudiantes (clase Estudiante)
jugadores = session.query(Jugador).all()
# Se recorre la lista a través de un ciclo
# repetitivo for en python
print("Presentación de Jugadores")
for s in jugadores:
print("%s" % (s))
# desde cada objeto de la lista
# jugador
# se puede acceder al club; como lo definimos
# al momento de crear la clase Jugador
print("El Jugador pertenece a: %s " % (s.club))
print("---------")
print("Presentación de Jugadores - op2")
for s in jugadores:
print("%s" % (s))
# desde cada objeto de la lista
# jugador
# se puede acceder al club; como lo definimos
# al momento de crear la clase Jugador
print("El Jugador pertenece a: %s " % (s.club.nombre))
print("---------")
| 28.44186 | 58 | 0.708095 |
d19bf8587714026758f1d389f9dcba5b2385782e | 24,860 | py | Python | pandas/conftest.py | rkbk2000/pandas | e95ac5c4fd49acf63bb671f81e97ddeb4a1460f0 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/conftest.py | rkbk2000/pandas | e95ac5c4fd49acf63bb671f81e97ddeb4a1460f0 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/conftest.py | rkbk2000/pandas | e95ac5c4fd49acf63bb671f81e97ddeb4a1460f0 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | from collections import abc
from datetime import date, time, timedelta, timezone
from decimal import Decimal
import operator
import os
from dateutil.tz import tzlocal, tzutc
import hypothesis
from hypothesis import strategies as st
import numpy as np
import pytest
from pytz import FixedOffset, utc
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame
import pandas._testing as tm
from pandas.core import ops
from pandas.core.indexes.api import Index, MultiIndex
hypothesis.settings.register_profile(
"ci",
# Hypothesis timing checks are tuned for scalars by default, so we bump
# them from 200ms to 500ms per test case as the global default. If this
# is too short for a specific test, (a) try to make it faster, and (b)
# if it really is slow add `@settings(deadline=...)` with a working value,
# or `deadline=None` to entirely disable timeouts for that test.
deadline=500,
suppress_health_check=(hypothesis.HealthCheck.too_slow,),
)
hypothesis.settings.load_profile("ci")
def pytest_addoption(parser):
parser.addoption("--skip-slow", action="store_true", help="skip slow tests")
parser.addoption("--skip-network", action="store_true", help="skip network tests")
parser.addoption("--skip-db", action="store_true", help="skip db tests")
parser.addoption(
"--run-high-memory", action="store_true", help="run high memory tests"
)
parser.addoption("--only-slow", action="store_true", help="run only slow tests")
parser.addoption(
"--strict-data-files",
action="store_true",
help="Fail if a test is skipped for missing data file.",
)
def pytest_runtest_setup(item):
if "slow" in item.keywords and item.config.getoption("--skip-slow"):
pytest.skip("skipping due to --skip-slow")
if "slow" not in item.keywords and item.config.getoption("--only-slow"):
pytest.skip("skipping due to --only-slow")
if "network" in item.keywords and item.config.getoption("--skip-network"):
pytest.skip("skipping due to --skip-network")
if "db" in item.keywords and item.config.getoption("--skip-db"):
pytest.skip("skipping due to --skip-db")
if "high_memory" in item.keywords and not item.config.getoption(
"--run-high-memory"
):
pytest.skip("skipping high memory test since --run-high-memory was not set")
@pytest.fixture(autouse=True)
def configure_tests():
"""
Configure settings for all tests and test modules.
"""
pd.set_option("chained_assignment", "raise")
@pytest.fixture(autouse=True)
def add_imports(doctest_namespace):
"""
Make `np` and `pd` names available for doctests.
"""
doctest_namespace["np"] = np
doctest_namespace["pd"] = pd
@pytest.fixture(params=["bsr", "coo", "csc", "csr", "dia", "dok", "lil"])
def spmatrix(request):
"""
Yields scipy sparse matrix classes.
"""
from scipy import sparse
return getattr(sparse, request.param + "_matrix")
@pytest.fixture(params=[0, 1, "index", "columns"], ids=lambda x: f"axis {repr(x)}")
def axis(request):
"""
Fixture for returning the axis numbers of a DataFrame.
"""
return request.param
axis_frame = axis
@pytest.fixture(params=[0, "index"], ids=lambda x: f"axis {repr(x)}")
def axis_series(request):
"""
Fixture for returning the axis numbers of a Series.
"""
return request.param
@pytest.fixture
def ip():
"""
Get an instance of IPython.InteractiveShell.
Will raise a skip if IPython is not installed.
"""
pytest.importorskip("IPython", minversion="6.0.0")
from IPython.core.interactiveshell import InteractiveShell
return InteractiveShell()
@pytest.fixture(params=[True, False, None])
def observed(request):
"""
Pass in the observed keyword to groupby for [True, False]
This indicates whether categoricals should return values for
values which are not in the grouper [False / None], or only values which
appear in the grouper [True]. [None] is supported for future compatibility
if we decide to change the default (and would need to warn if this
parameter is not passed).
"""
return request.param
@pytest.fixture(params=[True, False, None])
def ordered_fixture(request):
"""
Boolean 'ordered' parameter for Categorical.
"""
return request.param
_all_arithmetic_operators = [
"__add__",
"__radd__",
"__sub__",
"__rsub__",
"__mul__",
"__rmul__",
"__floordiv__",
"__rfloordiv__",
"__truediv__",
"__rtruediv__",
"__pow__",
"__rpow__",
"__mod__",
"__rmod__",
]
@pytest.fixture(params=_all_arithmetic_operators)
def all_arithmetic_operators(request):
"""
Fixture for dunder names for common arithmetic operations.
"""
return request.param
@pytest.fixture(
params=[
operator.add,
ops.radd,
operator.sub,
ops.rsub,
operator.mul,
ops.rmul,
operator.truediv,
ops.rtruediv,
operator.floordiv,
ops.rfloordiv,
operator.mod,
ops.rmod,
operator.pow,
ops.rpow,
]
)
def all_arithmetic_functions(request):
"""
Fixture for operator and roperator arithmetic functions.
Notes
-----
This includes divmod and rdivmod, whereas all_arithmetic_operators
does not.
"""
return request.param
_all_numeric_reductions = [
"sum",
"max",
"min",
"mean",
"prod",
"std",
"var",
"median",
"kurt",
"skew",
]
@pytest.fixture(params=_all_numeric_reductions)
def all_numeric_reductions(request):
"""
Fixture for numeric reduction names.
"""
return request.param
_all_boolean_reductions = ["all", "any"]
@pytest.fixture(params=_all_boolean_reductions)
def all_boolean_reductions(request):
"""
Fixture for boolean reduction names.
"""
return request.param
_cython_table = pd.core.base.SelectionMixin._cython_table.items()
@pytest.fixture(params=list(_cython_table))
def cython_table_items(request):
"""
Yields a tuple of a function and its corresponding name. Correspond to
the list of aggregator "Cython functions" used on selected table items.
"""
return request.param
def _get_cython_table_params(ndframe, func_names_and_expected):
"""
Combine frame, functions from SelectionMixin._cython_table
keys and expected result.
Parameters
----------
ndframe : DataFrame or Series
func_names_and_expected : Sequence of two items
The first item is a name of a NDFrame method ('sum', 'prod') etc.
The second item is the expected return value.
Returns
-------
list
List of three items (DataFrame, function, expected result)
"""
results = []
for func_name, expected in func_names_and_expected:
results.append((ndframe, func_name, expected))
results += [
(ndframe, func, expected)
for func, name in _cython_table
if name == func_name
]
return results
@pytest.fixture(params=["__eq__", "__ne__", "__le__", "__lt__", "__ge__", "__gt__"])
def all_compare_operators(request):
"""
Fixture for dunder names for common compare operations
* >=
* >
* ==
* !=
* <
* <=
"""
return request.param
@pytest.fixture(params=["__le__", "__lt__", "__ge__", "__gt__"])
def compare_operators_no_eq_ne(request):
"""
Fixture for dunder names for compare operations except == and !=
* >=
* >
* <
* <=
"""
return request.param
@pytest.fixture(
params=["__and__", "__rand__", "__or__", "__ror__", "__xor__", "__rxor__"]
)
def all_logical_operators(request):
"""
Fixture for dunder names for common logical operations
* |
* &
* ^
"""
return request.param
@pytest.fixture(params=[None, "gzip", "bz2", "zip", "xz"])
def compression(request):
"""
Fixture for trying common compression types in compression tests.
"""
return request.param
@pytest.fixture(params=["gzip", "bz2", "zip", "xz"])
def compression_only(request):
"""
Fixture for trying common compression types in compression tests excluding
uncompressed case.
"""
return request.param
@pytest.fixture(params=[True, False])
def writable(request):
"""
Fixture that an array is writable.
"""
return request.param
@pytest.fixture(scope="module")
def datetime_tz_utc():
"""
Yields the UTC timezone object from the datetime module.
"""
return timezone.utc
@pytest.fixture(params=["utc", "dateutil/UTC", utc, tzutc(), timezone.utc])
def utc_fixture(request):
"""
Fixture to provide variants of UTC timezone strings and tzinfo objects.
"""
return request.param
@pytest.fixture(params=["inner", "outer", "left", "right"])
def join_type(request):
"""
Fixture for trying all types of join operations.
"""
return request.param
@pytest.fixture
def strict_data_files(pytestconfig):
"""
Returns the configuration for the test setting `--strict-data-files`.
"""
return pytestconfig.getoption("--strict-data-files")
@pytest.fixture
def datapath(strict_data_files):
"""
Get the path to a data file.
Parameters
----------
path : str
Path to the file, relative to ``pandas/tests/``
Returns
-------
path including ``pandas/tests``.
Raises
------
ValueError
If the path doesn't exist and the --strict-data-files option is set.
"""
BASE_PATH = os.path.join(os.path.dirname(__file__), "tests")
def deco(*args):
path = os.path.join(BASE_PATH, *args)
if not os.path.exists(path):
if strict_data_files:
raise ValueError(
f"Could not find file {path} and --strict-data-files is set."
)
else:
pytest.skip(f"Could not find {path}.")
return path
return deco
@pytest.fixture
def iris(datapath):
"""
The iris dataset as a DataFrame.
"""
return pd.read_csv(datapath("data", "iris.csv"))
@pytest.fixture(params=["nlargest", "nsmallest"])
def nselect_method(request):
"""
Fixture for trying all nselect methods.
"""
return request.param
@pytest.fixture(params=["left", "right", "both", "neither"])
def closed(request):
"""
Fixture for trying all interval closed parameters.
"""
return request.param
@pytest.fixture(params=["left", "right", "both", "neither"])
def other_closed(request):
"""
Secondary closed fixture to allow parametrizing over all pairs of closed.
"""
return request.param
@pytest.fixture(params=[None, np.nan, pd.NaT, float("nan"), np.float("NaN"), pd.NA])
def nulls_fixture(request):
"""
Fixture for each null type in pandas.
"""
return request.param
nulls_fixture2 = nulls_fixture # Generate cartesian product of nulls_fixture
@pytest.fixture(params=[None, np.nan, pd.NaT])
def unique_nulls_fixture(request):
"""
Fixture for each null type in pandas, each null type exactly once.
"""
return request.param
# Generate cartesian product of unique_nulls_fixture:
unique_nulls_fixture2 = unique_nulls_fixture
TIMEZONES = [
None,
"UTC",
"US/Eastern",
"Asia/Tokyo",
"dateutil/US/Pacific",
"dateutil/Asia/Singapore",
tzutc(),
tzlocal(),
FixedOffset(300),
FixedOffset(0),
FixedOffset(-300),
timezone.utc,
timezone(timedelta(hours=1)),
timezone(timedelta(hours=-1), name="foo"),
]
TIMEZONE_IDS = [repr(i) for i in TIMEZONES]
@td.parametrize_fixture_doc(str(TIMEZONE_IDS))
@pytest.fixture(params=TIMEZONES, ids=TIMEZONE_IDS)
def tz_naive_fixture(request):
"""
Fixture for trying timezones including default (None): {0}
"""
return request.param
@td.parametrize_fixture_doc(str(TIMEZONE_IDS[1:]))
@pytest.fixture(params=TIMEZONES[1:], ids=TIMEZONE_IDS[1:])
def tz_aware_fixture(request):
"""
Fixture for trying explicit timezones: {0}
"""
return request.param
# Generate cartesian product of tz_aware_fixture:
tz_aware_fixture2 = tz_aware_fixture
# ----------------------------------------------------------------
# Dtypes
# ----------------------------------------------------------------
UNSIGNED_INT_DTYPES = ["uint8", "uint16", "uint32", "uint64"]
UNSIGNED_EA_INT_DTYPES = ["UInt8", "UInt16", "UInt32", "UInt64"]
SIGNED_INT_DTYPES = [int, "int8", "int16", "int32", "int64"]
SIGNED_EA_INT_DTYPES = ["Int8", "Int16", "Int32", "Int64"]
ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES
ALL_EA_INT_DTYPES = UNSIGNED_EA_INT_DTYPES + SIGNED_EA_INT_DTYPES
FLOAT_DTYPES = [float, "float32", "float64"]
COMPLEX_DTYPES = [complex, "complex64", "complex128"]
STRING_DTYPES = [str, "str", "U"]
DATETIME64_DTYPES = ["datetime64[ns]", "M8[ns]"]
TIMEDELTA64_DTYPES = ["timedelta64[ns]", "m8[ns]"]
BOOL_DTYPES = [bool, "bool"]
BYTES_DTYPES = [bytes, "bytes"]
OBJECT_DTYPES = [object, "object"]
ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES
ALL_NUMPY_DTYPES = (
ALL_REAL_DTYPES
+ COMPLEX_DTYPES
+ STRING_DTYPES
+ DATETIME64_DTYPES
+ TIMEDELTA64_DTYPES
+ BOOL_DTYPES
+ OBJECT_DTYPES
+ BYTES_DTYPES
)
@pytest.fixture(params=STRING_DTYPES)
def string_dtype(request):
"""
Parametrized fixture for string dtypes.
* str
* 'str'
* 'U'
"""
return request.param
@pytest.fixture(params=BYTES_DTYPES)
def bytes_dtype(request):
"""
Parametrized fixture for bytes dtypes.
* bytes
* 'bytes'
"""
return request.param
@pytest.fixture(params=OBJECT_DTYPES)
def object_dtype(request):
"""
Parametrized fixture for object dtypes.
* object
* 'object'
"""
return request.param
@pytest.fixture(params=DATETIME64_DTYPES)
def datetime64_dtype(request):
"""
Parametrized fixture for datetime64 dtypes.
* 'datetime64[ns]'
* 'M8[ns]'
"""
return request.param
@pytest.fixture(params=TIMEDELTA64_DTYPES)
def timedelta64_dtype(request):
"""
Parametrized fixture for timedelta64 dtypes.
* 'timedelta64[ns]'
* 'm8[ns]'
"""
return request.param
@pytest.fixture(params=FLOAT_DTYPES)
def float_dtype(request):
"""
Parameterized fixture for float dtypes.
* float
* 'float32'
* 'float64'
"""
return request.param
@pytest.fixture(params=COMPLEX_DTYPES)
def complex_dtype(request):
"""
Parameterized fixture for complex dtypes.
* complex
* 'complex64'
* 'complex128'
"""
return request.param
@pytest.fixture(params=SIGNED_INT_DTYPES)
def sint_dtype(request):
"""
Parameterized fixture for signed integer dtypes.
* int
* 'int8'
* 'int16'
* 'int32'
* 'int64'
"""
return request.param
@pytest.fixture(params=UNSIGNED_INT_DTYPES)
def uint_dtype(request):
"""
Parameterized fixture for unsigned integer dtypes.
* 'uint8'
* 'uint16'
* 'uint32'
* 'uint64'
"""
return request.param
@pytest.fixture(params=ALL_INT_DTYPES)
def any_int_dtype(request):
"""
Parameterized fixture for any integer dtype.
* int
* 'int8'
* 'uint8'
* 'int16'
* 'uint16'
* 'int32'
* 'uint32'
* 'int64'
* 'uint64'
"""
return request.param
@pytest.fixture(params=ALL_EA_INT_DTYPES)
def any_nullable_int_dtype(request):
"""
Parameterized fixture for any nullable integer dtype.
* 'UInt8'
* 'Int8'
* 'UInt16'
* 'Int16'
* 'UInt32'
* 'Int32'
* 'UInt64'
* 'Int64'
"""
return request.param
@pytest.fixture(params=ALL_REAL_DTYPES)
def any_real_dtype(request):
"""
Parameterized fixture for any (purely) real numeric dtype.
* int
* 'int8'
* 'uint8'
* 'int16'
* 'uint16'
* 'int32'
* 'uint32'
* 'int64'
* 'uint64'
* float
* 'float32'
* 'float64'
"""
return request.param
@pytest.fixture(params=ALL_NUMPY_DTYPES)
def any_numpy_dtype(request):
"""
Parameterized fixture for all numpy dtypes.
* bool
* 'bool'
* int
* 'int8'
* 'uint8'
* 'int16'
* 'uint16'
* 'int32'
* 'uint32'
* 'int64'
* 'uint64'
* float
* 'float32'
* 'float64'
* complex
* 'complex64'
* 'complex128'
* str
* 'str'
* 'U'
* bytes
* 'bytes'
* 'datetime64[ns]'
* 'M8[ns]'
* 'timedelta64[ns]'
* 'm8[ns]'
* object
* 'object'
"""
return request.param
# categoricals are handled separately
_any_skipna_inferred_dtype = [
("string", ["a", np.nan, "c"]),
("string", ["a", pd.NA, "c"]),
("bytes", [b"a", np.nan, b"c"]),
("empty", [np.nan, np.nan, np.nan]),
("empty", []),
("mixed-integer", ["a", np.nan, 2]),
("mixed", ["a", np.nan, 2.0]),
("floating", [1.0, np.nan, 2.0]),
("integer", [1, np.nan, 2]),
("mixed-integer-float", [1, np.nan, 2.0]),
("decimal", [Decimal(1), np.nan, Decimal(2)]),
("boolean", [True, np.nan, False]),
("boolean", [True, pd.NA, False]),
("datetime64", [np.datetime64("2013-01-01"), np.nan, np.datetime64("2018-01-01")]),
("datetime", [pd.Timestamp("20130101"), np.nan, pd.Timestamp("20180101")]),
("date", [date(2013, 1, 1), np.nan, date(2018, 1, 1)]),
# The following two dtypes are commented out due to GH 23554
# ('complex', [1 + 1j, np.nan, 2 + 2j]),
# ('timedelta64', [np.timedelta64(1, 'D'),
# np.nan, np.timedelta64(2, 'D')]),
("timedelta", [timedelta(1), np.nan, timedelta(2)]),
("time", [time(1), np.nan, time(2)]),
("period", [pd.Period(2013), pd.NaT, pd.Period(2018)]),
("interval", [pd.Interval(0, 1), np.nan, pd.Interval(0, 2)]),
]
ids, _ = zip(*_any_skipna_inferred_dtype) # use inferred type as fixture-id
@pytest.fixture(params=_any_skipna_inferred_dtype, ids=ids)
def any_skipna_inferred_dtype(request):
"""
Fixture for all inferred dtypes from _libs.lib.infer_dtype
The covered (inferred) types are:
* 'string'
* 'empty'
* 'bytes'
* 'mixed'
* 'mixed-integer'
* 'mixed-integer-float'
* 'floating'
* 'integer'
* 'decimal'
* 'boolean'
* 'datetime64'
* 'datetime'
* 'date'
* 'timedelta'
* 'time'
* 'period'
* 'interval'
Returns
-------
inferred_dtype : str
The string for the inferred dtype from _libs.lib.infer_dtype
values : np.ndarray
An array of object dtype that will be inferred to have
`inferred_dtype`
Examples
--------
>>> import pandas._libs.lib as lib
>>>
>>> def test_something(any_skipna_inferred_dtype):
... inferred_dtype, values = any_skipna_inferred_dtype
... # will pass
... assert lib.infer_dtype(values, skipna=True) == inferred_dtype
"""
inferred_dtype, values = request.param
values = np.array(values, dtype=object) # object dtype to avoid casting
# correctness of inference tested in tests/dtypes/test_inference.py
return inferred_dtype, values
@pytest.fixture(
params=[
getattr(pd.offsets, o)
for o in pd.offsets.__all__
if issubclass(getattr(pd.offsets, o), pd.offsets.Tick)
]
)
def tick_classes(request):
"""
Fixture for Tick based datetime offsets available for a time series.
"""
return request.param
# ----------------------------------------------------------------
# Global setup for tests using Hypothesis
# Registering these strategies makes them globally available via st.from_type,
# which is use for offsets in tests/tseries/offsets/test_offsets_properties.py
for name in "MonthBegin MonthEnd BMonthBegin BMonthEnd".split():
cls = getattr(pd.tseries.offsets, name)
st.register_type_strategy(
cls, st.builds(cls, n=st.integers(-99, 99), normalize=st.booleans())
)
for name in "YearBegin YearEnd BYearBegin BYearEnd".split():
cls = getattr(pd.tseries.offsets, name)
st.register_type_strategy(
cls,
st.builds(
cls,
n=st.integers(-5, 5),
normalize=st.booleans(),
month=st.integers(min_value=1, max_value=12),
),
)
for name in "QuarterBegin QuarterEnd BQuarterBegin BQuarterEnd".split():
cls = getattr(pd.tseries.offsets, name)
st.register_type_strategy(
cls,
st.builds(
cls,
n=st.integers(-24, 24),
normalize=st.booleans(),
startingMonth=st.integers(min_value=1, max_value=12),
),
)
@pytest.fixture
def datetime_series():
"""
Fixture for Series of floats with DatetimeIndex
"""
s = tm.makeTimeSeries()
s.name = "ts"
return s
@pytest.fixture
def float_frame():
"""
Fixture for DataFrame of floats with index of unique strings
Columns are ['A', 'B', 'C', 'D'].
A B C D
P7GACiRnxd -0.465578 -0.361863 0.886172 -0.053465
qZKh6afn8n -0.466693 -0.373773 0.266873 1.673901
tkp0r6Qble 0.148691 -0.059051 0.174817 1.598433
wP70WOCtv8 0.133045 -0.581994 -0.992240 0.261651
M2AeYQMnCz -1.207959 -0.185775 0.588206 0.563938
QEPzyGDYDo -0.381843 -0.758281 0.502575 -0.565053
r78Jwns6dn -0.653707 0.883127 0.682199 0.206159
... ... ... ... ...
IHEGx9NO0T -0.277360 0.113021 -1.018314 0.196316
lPMj8K27FA -1.313667 -0.604776 -1.305618 -0.863999
qa66YMWQa5 1.110525 0.475310 -0.747865 0.032121
yOa0ATsmcE -0.431457 0.067094 0.096567 -0.264962
65znX3uRNG 1.528446 0.160416 -0.109635 -0.032987
eCOBvKqf3e 0.235281 1.622222 0.781255 0.392871
xSucinXxuV -1.263557 0.252799 -0.552247 0.400426
[30 rows x 4 columns]
"""
return DataFrame(tm.getSeriesData())
@pytest.fixture(params=[pd.Index, pd.Series], ids=["index", "series"])
def index_or_series(request):
"""
Fixture to parametrize over Index and Series, made necessary by a mypy
bug, giving an error:
List item 0 has incompatible type "Type[Series]"; expected "Type[PandasObject]"
See GH#29725
"""
return request.param
@pytest.fixture
def dict_subclass():
"""
Fixture for a dictionary subclass.
"""
class TestSubDict(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
return TestSubDict
@pytest.fixture
def non_mapping_dict_subclass():
"""
Fixture for a non-mapping dictionary subclass.
"""
class TestNonDictMapping(abc.Mapping):
def __init__(self, underlying_dict):
self._data = underlying_dict
def __getitem__(self, key):
return self._data.__getitem__(key)
def __iter__(self):
return self._data.__iter__()
def __len__(self):
return self._data.__len__()
return TestNonDictMapping
indices_dict = {
"unicode": tm.makeUnicodeIndex(100),
"string": tm.makeStringIndex(100),
"datetime": tm.makeDateIndex(100),
"datetime-tz": tm.makeDateIndex(100, tz="US/Pacific"),
"period": tm.makePeriodIndex(100),
"timedelta": tm.makeTimedeltaIndex(100),
"int": tm.makeIntIndex(100),
"uint": tm.makeUIntIndex(100),
"range": tm.makeRangeIndex(100),
"float": tm.makeFloatIndex(100),
"bool": tm.makeBoolIndex(2),
"categorical": tm.makeCategoricalIndex(100),
"interval": tm.makeIntervalIndex(100),
"empty": Index([]),
"tuples": MultiIndex.from_tuples(zip(["foo", "bar", "baz"], [1, 2, 3])),
"repeats": Index([0, 0, 1, 1, 2, 2]),
}
@pytest.fixture(params=indices_dict.keys())
def indices(request):
# copy to avoid mutation, e.g. setting .name
return indices_dict[request.param].copy()
def _create_series(index):
""" Helper for the _series dict """
size = len(index)
data = np.random.randn(size)
return pd.Series(data, index=index, name="a")
_series = {
f"series-with-{index_id}-index": _create_series(index)
for index_id, index in indices_dict.items()
}
_narrow_dtypes = [
np.float16,
np.float32,
np.int8,
np.int16,
np.int32,
np.uint8,
np.uint16,
np.uint32,
]
_narrow_series = {
f"{dtype.__name__}-series": tm.makeFloatSeries(name="a").astype(dtype)
for dtype in _narrow_dtypes
}
_index_or_series_objs = {**indices_dict, **_series, **_narrow_series}
@pytest.fixture(params=_index_or_series_objs.keys())
def index_or_series_obj(request):
"""
Fixture for tests on indexes, series and series with a narrow dtype
copy to avoid mutation, e.g. setting .name
"""
return _index_or_series_objs[request.param].copy(deep=True)
| 24.301075 | 87 | 0.633387 |
7bf688371ac95dbda217466b198c46e165223a3c | 71 | py | Python | tensortest.py | mcculzac/CSE891_Bio | 3c22a40100490cecd412ac582cf5ff09e0f325b8 | [
"MIT"
] | 21 | 2020-03-07T06:27:20.000Z | 2021-06-18T23:58:06.000Z | utils/test_gpu.py | pranjalchanda08/Self-Driving-Car | 1bbe4fbe01d9d49803f21025aa8304b0174ea5e9 | [
"MIT"
] | 1 | 2021-01-27T11:54:22.000Z | 2021-01-30T14:29:33.000Z | utils/test_gpu.py | pranjalchanda08/Self-Driving-Car | 1bbe4fbe01d9d49803f21025aa8304b0174ea5e9 | [
"MIT"
] | 5 | 2020-05-21T00:41:00.000Z | 2021-06-18T23:58:09.000Z | import tensorflow as tf
print(tf.config.list_physical_devices('GPU'))
| 17.75 | 45 | 0.802817 |
f19e824096565970f613c258c72628d606fda078 | 348 | py | Python | packages/core/minos-microservice-aggregate/minos/aggregate/transactions/contextvars.py | sorasful/minos-python | 1189330eebf6444627a2af6b29f347670f95a4dd | [
"MIT"
] | 3 | 2021-11-05T08:47:45.000Z | 2021-11-17T09:37:26.000Z | packages/core/minos-microservice-aggregate/minos/aggregate/transactions/contextvars.py | sorasful/minos-python | 1189330eebf6444627a2af6b29f347670f95a4dd | [
"MIT"
] | 30 | 2021-11-05T08:49:28.000Z | 2022-01-28T12:00:56.000Z | minos/aggregate/transactions/contextvars.py | Clariteia/minos_microservice_aggregate | 517db2f1abb11f5a8f2d77da76613061590ee5a7 | [
"MIT"
] | null | null | null | from __future__ import (
annotations,
)
from contextvars import (
ContextVar,
)
from typing import (
TYPE_CHECKING,
Final,
Optional,
)
if TYPE_CHECKING:
from .entries import (
TransactionEntry,
)
TRANSACTION_CONTEXT_VAR: Final[ContextVar[Optional[TransactionEntry]]] = ContextVar("transaction", default=None)
| 17.4 | 112 | 0.706897 |
2fd6cccdcd1d2f788b8e6244839e848a91fffe29 | 1,073 | py | Python | Algo and DSA/LeetCode-Solutions-master/Python/prime-arrangements.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 3,269 | 2018-10-12T01:29:40.000Z | 2022-03-31T17:58:41.000Z | Algo and DSA/LeetCode-Solutions-master/Python/prime-arrangements.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 53 | 2018-12-16T22:54:20.000Z | 2022-02-25T08:31:20.000Z | Algo and DSA/LeetCode-Solutions-master/Python/prime-arrangements.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 1,236 | 2018-10-12T02:51:40.000Z | 2022-03-30T13:30:37.000Z | # Time: O(n/2 + n/3 + ... + n/p) = O(nlog(logn)), see https://mathoverflow.net/questions/4596/on-the-series-1-2-1-3-1-5-1-7-1-11
# Space: O(n)
class Solution(object):
def numPrimeArrangements(self, n):
"""
:type n: int
:rtype: int
"""
def count_primes(n):
if n <= 1:
return 0
is_prime = [True]*((n+1)//2)
cnt = len(is_prime)
for i in xrange(3, n+1, 2):
if i*i > n:
break
if not is_prime[i//2]:
continue
for j in xrange(i*i, n+1, 2*i):
if not is_prime[j//2]:
continue
cnt -= 1
is_prime[j//2] = False
return cnt
def factorial(n):
result = 1
for i in xrange(2, n+1):
result = (result*i)%MOD
return result
MOD = 10**9+7
cnt = count_primes(n)
return factorial(cnt) * factorial(n-cnt) % MOD
| 29.805556 | 129 | 0.411929 |
ddf3d7689f44aefc2b70850a459318fd6f8001df | 262 | py | Python | django/tests/templates/form_view.html.py | roshanba/mangal | f7b428811dc07214009cc33f0beb665ead402038 | [
"bzip2-1.0.6",
"MIT"
] | null | null | null | django/tests/templates/form_view.html.py | roshanba/mangal | f7b428811dc07214009cc33f0beb665ead402038 | [
"bzip2-1.0.6",
"MIT"
] | null | null | null | django/tests/templates/form_view.html.py | roshanba/mangal | f7b428811dc07214009cc33f0beb665ead402038 | [
"bzip2-1.0.6",
"MIT"
] | null | null | null | BBBBBBB BBBBBBBBBBB
BBBBB BBBBBXXXXXX XXXXBBBBBBBB
BBBBB BBBBBBB
XXXXXXXXX
XXXXX XXXXXXXXXXXXXX
BB BBBBBBBBBBB
XX XXXXXXXXXXXXXXXXXXXXXX XXXXXXX XXX XXXXXX XXXXXXXXXX
BBBBB
XXX XXXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXX
XXXXX
XXXXXXX
BBBBBBBB
| 16.375 | 55 | 0.889313 |
7c7179ac98f7fafd10bb0ed8aad8e3beba7807e7 | 1,483 | py | Python | tests/t_spnego_negotiate_once.py | devurandom/mod_auth_gssapi | 06a069abddb97d735c45de58feac8ed18b65be35 | [
"MIT"
] | null | null | null | tests/t_spnego_negotiate_once.py | devurandom/mod_auth_gssapi | 06a069abddb97d735c45de58feac8ed18b65be35 | [
"MIT"
] | null | null | null | tests/t_spnego_negotiate_once.py | devurandom/mod_auth_gssapi | 06a069abddb97d735c45de58feac8ed18b65be35 | [
"MIT"
] | 1 | 2022-01-17T18:38:03.000Z | 2022-01-17T18:38:03.000Z | #!/usr/bin/python
# Copyright (C) 2015 - mod_auth_gssapi contributors, see COPYING for license.
import os
import requests
from requests_kerberos import HTTPKerberosAuth, OPTIONAL
if __name__ == '__main__':
sess = requests.Session()
url = 'http://%s/spnego_negotiate_once/' % (
os.environ['NSS_WRAPPER_HOSTNAME'])
# ensure a 401 with the appropriate WWW-Authenticate header is returned
# when no auth is provided
r = sess.get(url)
if r.status_code != 401:
raise ValueError('Spnego Negotiate Once failed - 401 expected')
if not (r.headers.get("WWW-Authenticate") and
r.headers.get("WWW-Authenticate").startswith("Negotiate")):
raise ValueError('Spnego Negotiate Once failed - WWW-Authenticate '
'Negotiate header missing')
# test sending a bad Authorization header with GssapiNegotiateOnce enabled
r = sess.get(url, headers={"Authorization": "Negotiate badvalue"})
if r.status_code != 401:
raise ValueError('Spnego Negotiate Once failed - 401 expected')
if r.headers.get("WWW-Authenticate"):
raise ValueError('Spnego Negotiate Once failed - WWW-Authenticate '
'Negotiate present but GssapiNegotiateOnce is '
'enabled')
# ensure a 200 is returned when valid auth is provided
r = sess.get(url, auth=HTTPKerberosAuth())
if r.status_code != 200:
raise ValueError('Spnego Negotiate Once failed')
| 39.026316 | 78 | 0.671612 |
74ca5a50110b46369a2e2df6b48e9b9c83cb05ee | 1,106 | py | Python | repo/examples/subscriber.py | justincpresley/nr-archway | 327dd6f013a6b6f35b6e7ca8a9cf1ca3acc26a9e | [
"Apache-2.0"
] | 4 | 2021-08-18T00:25:11.000Z | 2021-11-03T02:14:17.000Z | repo/examples/subscriber.py | justincpresley/ndn-archway | 54d704ad964690061ddc613ed58a0ea28629be3d | [
"Apache-2.0"
] | 2 | 2021-09-29T21:43:45.000Z | 2021-11-17T23:02:27.000Z | repo/examples/subscriber.py | justincpresley/ndn-archway | 54d704ad964690061ddc613ed58a0ea28629be3d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
"""
Example subscriber for `util/pubsub.py`.
@Author jonnykong@cs.ucla.edu
@Date 2020-05-10
"""
import asyncio as aio
import logging
from ndn.app import NDNApp
from ndn.encoding import Name, NonStrictName
import sys
sys.path.insert(0,'..')
from archway_repo.utils import PubSub
async def run_subscriber(app: NDNApp, subscriber_prefix: NonStrictName):
pb = PubSub(app, subscriber_prefix)
await pb.wait_for_ready()
topic = Name.from_str('/topic_foo')
pb.subscribe(topic, foo_cb)
def foo_cb(msg: bytes):
print(f'topic /topic_foo received msg: {msg.decode()}')
def main():
logging.basicConfig(format='[%(asctime)s]%(levelname)s:%(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO)
subscriber_prefix = Name.from_str('/test_subscriber')
app = NDNApp()
try:
app.run_forever(
after_start=run_subscriber(app, subscriber_prefix))
except FileNotFoundError:
logging.warning('Error: could not connect to NFD')
if __name__ == '__main__':
main() | 26.333333 | 72 | 0.66094 |
cefa74770e220b96ab1cf8f1a2919cd403161912 | 2,874 | py | Python | users/views.py | Tipuch/editdojo | 3efe6fa97a06d770ead7f4973a53dbb9b92445bf | [
"MIT"
] | 402 | 2018-10-18T05:49:04.000Z | 2022-03-31T08:02:50.000Z | users/views.py | Tipuch/editdojo | 3efe6fa97a06d770ead7f4973a53dbb9b92445bf | [
"MIT"
] | 52 | 2018-10-18T11:13:00.000Z | 2021-06-10T21:26:18.000Z | users/views.py | Tipuch/editdojo | 3efe6fa97a06d770ead7f4973a53dbb9b92445bf | [
"MIT"
] | 138 | 2018-10-18T05:04:42.000Z | 2022-01-10T13:07:51.000Z | from django.shortcuts import render
from django.http import HttpResponseRedirect
from .models import Language
# returns True if the user has already finished the signup flow.
def finished_signup_flow(user):
# The assumption of this function is that the user is already signed in.
assert user.is_authenticated
if len(user.learning_languages.all()) < 1:
return False
if len(user.fluent_languages.all()) < 1:
return False
return True
# Hanlde the signup flow
def signup_flow(request):
current_user = request.user
if not current_user.is_authenticated:
return HttpResponseRedirect('/')
# TODO: If the user has already finished the signup flow,
# then just redirect to root.
if finished_signup_flow(current_user):
return HttpResponseRedirect('/languagesSelected/') # TODO: change this to root
return render(request, 'language_selection.html')
# Handle the POST request for selecting langauges
def select_languages(request):
# First, make sure that the user is logged in.
current_user = request.user
if not current_user.is_authenticated:
return HttpResponseRedirect('/signup/')
learning_languages = []
fluent_languages = []
# Then, retrieve the lists of learning/fluent languages.
i = 1; key = 'learning1'
while key in request.POST and i < 100:
learning_languages.append(request.POST[key])
i += 1
key = 'learning' + str(i)
j = 1; key = 'fluent1'
while key in request.POST and j < 100:
fluent_languages.append(request.POST[key])
j += 1
key = 'fluent' + str(j)
# Create the set of valid languages from each list.
learning_language_set = set()
for language in set(learning_languages):
try:
l = Language.objects.get(english_representation=language)
learning_language_set.add(l)
except Language.DoesNotExist:
pass
fluent_language_set = set()
for language in set(fluent_languages):
try:
l = Language.objects.get(english_representation=language)
fluent_language_set.add(l)
except Language.DoesNotExist:
pass
# Make sure that there's at least one valid language in each category.
if not fluent_language_set or not learning_language_set:
return HttpResponseRedirect('/signup/')
# Then, finally, add selected languages to the user's info.
for language in learning_language_set:
if not language in current_user.learning_languages.all():
current_user.learning_languages.add(language)
for language in fluent_language_set:
if not language in current_user.fluent_languages.all():
current_user.fluent_languages.add(language)
current_user.save()
return HttpResponseRedirect('/languagesSelected/') | 36.846154 | 86 | 0.687196 |
1ac91a10c867bd5efab1dc271b0e03e3503ada57 | 3,846 | py | Python | xinshuo_io/pts_io.py | xinshuoweng/cv_ml_tool | 1918b9e37ec5fb8148b8a089f226a4864d67b153 | [
"MIT"
] | 31 | 2020-03-05T12:27:21.000Z | 2022-03-07T04:00:18.000Z | xinshuo_io/pts_io.py | xinshuoweng/cv_ml_tool | 1918b9e37ec5fb8148b8a089f226a4864d67b153 | [
"MIT"
] | null | null | null | xinshuo_io/pts_io.py | xinshuoweng/cv_ml_tool | 1918b9e37ec5fb8148b8a089f226a4864d67b153 | [
"MIT"
] | 12 | 2020-07-06T05:06:58.000Z | 2021-11-18T14:43:20.000Z | # Author: Xinshuo Weng
# email: xinshuo.weng@gmail.com
# this file includes functions about point file I/O stream
import numpy as np
from .file_io import load_txt_file
from xinshuo_miscellaneous import is_path_exists_or_creatable, is2dptsarray_occlusion, is2dptsarray_confidence, is2dptsarray, remove_empty_item_from_list, str2num
# note that, the top left point is (1, 1) in 300-W instead of zero-indexed
def anno_writer(pts_array, pts_savepath, num_pts=68, anno_version=1, debug=True):
'''
write the point array to a .pts file
parameter:
pts_array: 2 or 3 x num_pts numpy array
'''
if debug:
assert is_path_exists_or_creatable(pts_savepath), 'the save path is not correct'
assert (is2dptsarray(pts_array) or is2dptsarray_occlusion(pts_array) or is2dptsarray_confidence(pts_array)) and pts_array.shape[1] == num_pts, 'the input point is not correct'
with open(pts_savepath, 'w') as file:
file.write('version: %d\n' % anno_version)
file.write('n_points: %d\n' % num_pts)
file.write('{\n')
# main content
for pts_index in xrange(num_pts):
if is2dptsarray(pts_array):
file.write('%.3f %.3f %f\n' % (pts_array[0, pts_index], pts_array[1, pts_index], 1.0)) # all visible
else:
file.write('%.3f %.3f %f\n' % (pts_array[0, pts_index], pts_array[1, pts_index], pts_array[2, pts_index]))
file.write('}')
file.close()
def anno_parser(anno_path, num_pts=None, anno_version=None, warning=True, debug=True):
'''
parse the annotation for Keypoint file
return:
pts_array: 3 x num_pts (x, y, oculusion)
'''
data, num_lines = load_txt_file(anno_path, debug=debug)
# print(data)
if debug:
assert data[0].find('version: ') == 0, 'version is not correct: %s' % anno_path
assert data[1].find('n_points: ') == 0, 'number of points in second line is not correct'
assert data[2] == '{' and data[-1] == '}', 'starting and end symbol is not correct'
version = str2num(data[0][len('version: '):])
n_points = int(data[1][len('n_points: '):])
if debug:
# print('version of annotation is %d' % version)
# print('number of points is %d' % n_points)
assert num_lines == n_points + 4, 'number of lines is not correct' # 4 lines for general information: version, n_points, start and end symbol
if anno_version is not None:
assert version == anno_version, 'version of annotation is not correct: %d vs %d' % (version, anno_version)
if num_pts is not None:
assert num_pts == n_points, 'number of points is not correct: %d vs %d' % (num_pts, n_points)
# print(anno_path)
# read points coordinate
pts_array = np.zeros((3, n_points), dtype='float32')
line_offset = 3 # first point starts at fourth line
for point_index in xrange(n_points):
try:
pts_list = data[point_index + line_offset].split(' ') # x y format
if len(pts_list) > 2 and pts_list[2] == '': # handle edge case where additional whitespace exists after point coordinates
pts_list = remove_empty_item_from_list(pts_list)
# print(pts_list[0])
pts_array[0, point_index] = float(pts_list[0])
pts_array[1, point_index] = float(pts_list[1])
if len(pts_list) == 3:
pts_array[2, point_index] = float(pts_list[2])
else:
pts_array[2, point_index] = float(1) # oculusion flag, 0: oculuded, 1: visible. We use 1 for all points since no visibility is provided by LS3D-W
except ValueError:
print('error in loading points in %s' % anno_path)
return pts_array | 47.481481 | 183 | 0.630785 |
4f8aaef7d59c5184654de3481f3b39c50e952518 | 11,689 | py | Python | storops_test/vnx/test_parsers.py | tunaruraul/storops | 7092c516c55b4c2f00c7c22383e1ad46ecfec091 | [
"Apache-2.0"
] | 60 | 2016-04-18T23:42:10.000Z | 2022-03-23T02:26:03.000Z | storops_test/vnx/test_parsers.py | tunaruraul/storops | 7092c516c55b4c2f00c7c22383e1ad46ecfec091 | [
"Apache-2.0"
] | 317 | 2016-05-25T06:45:37.000Z | 2022-03-25T13:22:38.000Z | storops_test/vnx/test_parsers.py | tunaruraul/storops | 7092c516c55b4c2f00c7c22383e1ad46ecfec091 | [
"Apache-2.0"
] | 34 | 2016-03-18T02:39:12.000Z | 2022-01-07T12:54:14.000Z | # coding=utf-8
# Copyright (c) 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
import logging
from unittest import TestCase
from hamcrest import equal_to, assert_that, not_none, none, raises
from storops.vnx.enums import VNXSPEnum
from storops.vnx.parsers import VNXCliParser, VNXPropDescriptor, \
VNXParserConfigFactory
from storops.vnx.resource import get_vnx_parser
from storops_test.vnx.cli_mock import MockCli
from storops_test.vnx.resource.fakes import STORAGE_GROUP_HBA
log = logging.getLogger(__name__)
A = VNXPropDescriptor('-a', 'Prop A (name):', 'prop_a')
B = VNXPropDescriptor('-b', 'Prop B:')
C = VNXPropDescriptor('-c', 'Prop C:')
ID = VNXPropDescriptor(None, 'ID:', is_index=True)
class DemoParser(VNXCliParser):
def __init__(self):
super(DemoParser, self).__init__()
self.add_property(A, B, C, ID)
class DemoParserNonIndex(VNXCliParser):
def __init__(self):
super(DemoParserNonIndex, self).__init__()
self.add_property(VNXPropDescriptor('-b', 'Prop B:'))
class DemoParserRegexIndex(VNXCliParser):
def __init__(self):
super(DemoParserRegexIndex, self).__init__()
self.add_property(
VNXPropDescriptor(None,
r'\s*\w+:(\d+)',
'id',
is_index=True,
is_regex=True,
converter=int),
VNXPropDescriptor(None,
r'\s*value:\s*(\w+)',
'value',
is_regex=True))
class DemoParserMultiIndices(VNXCliParser):
def __init__(self):
super(DemoParserMultiIndices, self).__init__()
self.add_property(
VNXPropDescriptor(None, 'A:', is_index=True),
VNXPropDescriptor(None, 'B:', is_index=True),
VNXPropDescriptor(None, 'C:'),
VNXPropDescriptor(None, 'D:'))
class VNXCliParserTest(TestCase):
def test_get_property_options(self):
options = DemoParser().property_options
assert_that(' '.join(options), equal_to('-a -b -c'))
def test_get_index_descriptor(self):
assert_that(DemoParser().index_property.label, equal_to('ID:'))
def test_get_index_descriptor_none(self):
assert_that(DemoParserNonIndex().index_property, none())
def test_parse(self):
output = """
ID: test
Prop A (Name): ab (c)
Prop B: d ef
"""
parser = DemoParser()
parsed = parser.parse(output, [A, ID, C])
assert_that(parsed.prop_a, equal_to('ab (c)'))
assert_that(parsed.prop_c, none())
assert_that(parsed.id, equal_to('test'))
def f():
log.debug(parsed.prop_b)
assert_that(f, raises(AttributeError))
def test_parse_empty_prop(self):
output = """
ID: test
Prop A (Name): ab (c)
Prop B:
Prop C: abc
"""
parser = DemoParser()
parsed = parser.parse(output, [A, ID, B, C])
assert_that(parsed.id, equal_to('test'))
assert_that(parsed.prop_a, equal_to('ab (c)'))
assert_that(parsed.prop_b, equal_to(''))
def test_parse_regex_label(self):
output = """
id:123
value:abcde
id:456
value:ghijk
"""
parsed = DemoParserRegexIndex().parse_all(output)
assert_that(len(parsed), equal_to(2))
for i in parsed:
if i.id == 123:
assert_that(i.value, equal_to('abcde'))
elif i.id == 456:
assert_that(i.value, equal_to('ghijk'))
else:
self.fail('id not recognized.')
def test_all_options(self):
options = DemoParser().all_options
assert_that(options, equal_to(['-a', '-b', '-c']))
def test_parse_multi_index(self):
output = """
A: a0
B: b0
C: c0
A: a0
B: b0
D: d0
A: a0
B: b1
C: c1
"""
parsed = DemoParserMultiIndices().parse_all(output)
assert_that(len(parsed), equal_to(2))
a0b0 = next(i for i in parsed if i.b == 'b0')
assert_that(a0b0, not_none())
assert_that(a0b0.a, equal_to('a0'))
assert_that(a0b0.b, equal_to('b0'))
assert_that(a0b0.c, equal_to('c0'))
assert_that(a0b0.d, equal_to('d0'))
a0b1 = next(i for i in parsed if i.b == 'b1')
assert_that(a0b1, not_none())
assert_that(a0b1.a, equal_to('a0'))
assert_that(a0b1.b, equal_to('b1'))
assert_that(a0b1.c, equal_to('c1'))
class VNXStorageGroupHBAParserTest(TestCase):
def test_parse(self):
data = get_vnx_parser("VNXStorageGroupHBA").parse(STORAGE_GROUP_HBA)
assert_that(data.host_name, equal_to('abc.def.dev'))
assert_that(data.sp_port, equal_to('A-3v1'))
assert_that(data.initiator_ip, equal_to('10.244.209.72'))
assert_that(data.tpgt, equal_to('1'))
assert_that(data.isid, equal_to('10000000000'))
assert_that(
data.hba,
equal_to(('iqn.1991-05.com.microsoft:abc.def.dev',
'SP A', '3')))
def test_parse_no_header(self):
output = """
iqn.1991-05.com.microsoft:abc.def.dev SP A 1
Host name: abc.def.dev
SPPort: A-1v0
Initiator IP: 10.244.209.72
TPGT: 1
ISID: 10000000000
"""
data = get_vnx_parser("VNXStorageGroupHBA").parse(output)
assert_that(data.host_name, equal_to('abc.def.dev'))
assert_that(data.sp_port, equal_to('A-1v0'))
assert_that(data.initiator_ip, equal_to('10.244.209.72'))
assert_that(data.tpgt, equal_to('1'))
assert_that(data.isid, equal_to('10000000000'))
assert_that(data.hba,
equal_to(('iqn.1991-05.com.microsoft:abc.def.dev',
'SP A',
'1')))
class VNXStorageGroupParserTest(TestCase):
def test_parse(self):
parser = get_vnx_parser('VNXStorageGroup')
output = MockCli.read_file('storagegroup_-messner_-list_-host_'
'-iscsiAttributes_-gname_microsoft.txt')
sg = parser.parse(output)
assert_that(sg.shareable, equal_to(True))
assert_that(sg.name, equal_to('microsoft'))
assert_that(
sg.wwn,
equal_to('12:34:56:78:9A:BC:DE:F1:23:45:67:89:AB:CD:EF:01'))
assert_that(sg.alu_hlu_map[4], equal_to(0))
assert_that(sg.alu_hlu_map[456], equal_to(123))
assert_that(sg.alu_hlu_map.get(3, None), none())
# assert for hba members
assert_that(len(sg.hba_sp_pairs), equal_to(2))
hba = sg.hba_sp_pairs[0]
assert_that(hba.host_name, equal_to('abc.def.dev'))
class VNXConsistencyGroupParserTest(TestCase):
def test_parse(self):
output = MockCli.read_file('snap_-group_-list_-detail.txt')
parser = get_vnx_parser('VNXConsistencyGroup')
cgs = parser.parse_all(output)
cg = next(c for c in cgs if c.name == 'test cg name')
assert_that(cg, not_none())
assert_that(cg.state, equal_to('Ready'))
cg = next(c for c in cgs if c.name == 'another cg')
assert_that(cg, not_none())
assert_that(cg.state, equal_to('Offline'))
class VNXPoolPropertiesTest(TestCase):
def test_parse(self):
output = MockCli.read_file('storagepool_-list_-all_-id_1.txt')
parser = get_vnx_parser('VNXPool')
pool = parser.parse(output)
assert_that(pool.state, equal_to('Ready'))
assert_that(pool.pool_id, equal_to(1))
assert_that(pool.user_capacity_gbs, equal_to(2329.792))
assert_that(pool.available_capacity_gbs, equal_to(1473.623))
assert_that(pool.fast_cache, none())
assert_that(pool.name, equal_to('Pool_daq'))
assert_that(pool.total_subscribed_capacity_gbs, equal_to(2701.767))
assert_that(pool.percent_full_threshold, equal_to(70))
class VNXPoolFeatureParserTest(TestCase):
# command: storagepool -feature -info
output = """
Is Virtual Provisioning Supported: true
Max. Pools: 60
Max. Disks Per Pool: 1496
Max. Disks for all Pools: 1496
Max. Disks Per Operation: 180
Max. Pool LUNs: 4000
Min. Pool LUN Size(Blocks): 1
Max. Pool LUN Size(Blocks): 549755813888
Max. Pool LUN Size(GBs): 262144.000
Total Number of Pools: 2
Total Number of Pool LUNs: 4
Total Number of all Pool LUNs that are thin: 3
Total Number of all Pool LUNs that are non-thin: 1
Number of Disks used in Pools: 5
Available Disks:
Bus 0 Enclosure 0 Disk 24
Bus 0 Enclosure 0 Disk 16
Bus 0 Enclosure 0 Disk 5
Bus 0 Enclosure 0 Disk 4
"""
def test_parse(self):
parser = get_vnx_parser('VNXPoolFeature')
parsed = parser.parse(self.output)
assert_that(parsed.max_pool_luns, equal_to(4000))
assert_that(parsed.total_pool_luns, equal_to(4))
class VNXLunPropertiesTest(TestCase):
def test_parse(self):
output = MockCli.read_file('lun_-list_-all_-l_19.txt')
parser = get_vnx_parser('VNXLun')
parsed = parser.parse(output)
wwn = '60:06:01:60:1A:50:35:00:CC:22:61:D6:76:B1:E4:11'
assert_that(parsed.wwn, equal_to(wwn))
assert_that(parsed.name, equal_to('test_lun'))
assert_that(parsed.lun_id, equal_to(19))
assert_that(parsed.total_capacity_gb, equal_to(1.0))
assert_that(parsed.is_thin_lun, equal_to(True))
assert_that(parsed.is_compressed, equal_to(False))
assert_that(parsed.deduplication_state, equal_to('Off'))
assert_that(parsed.tiering_policy, equal_to('No Movement'))
assert_that(parsed.initial_tier, equal_to('Optimize Pool'))
assert_that(parsed.state, equal_to('Ready'))
assert_that(parsed.status, equal_to('OK(0x0)'))
assert_that(parsed.operation, equal_to('None'))
assert_that(parsed.current_owner, equal_to(VNXSPEnum.SP_A))
assert_that(parsed.attached_snapshot, none())
class VNXParserConfigFactoryTest(TestCase):
def test_read_properties(self):
name = 'VNXConsistencyGroup'
prop = get_vnx_parser(name)
assert_that(prop.resource_class_name, equal_to(name))
assert_that(prop.data_src, equal_to('cli'))
def test_properties_sequence_should_align_with_file(self):
props = get_vnx_parser('VNXSystem')
assert_that(props.MODEL.sequence, equal_to(0))
assert_that(props.NAME.sequence, equal_to(5))
def test_get_rsc_pkg_name(self):
name = VNXParserConfigFactory.get_rsc_pkg_name()
assert_that(name, equal_to('storops.vnx.resource'))
| 36.301242 | 78 | 0.612627 |
6a926c4419ad82a57d70e3e69ae812f485e9e747 | 1,540 | py | Python | post2notion.py | iayanpahwa/sensor2notion | f85ec09cbce0e626def9fadeef6793b3ffb9e236 | [
"Apache-2.0"
] | 5 | 2022-01-22T18:27:53.000Z | 2022-02-25T13:38:32.000Z | post2notion.py | iayanpahwa/sensor2notion | f85ec09cbce0e626def9fadeef6793b3ffb9e236 | [
"Apache-2.0"
] | null | null | null | post2notion.py | iayanpahwa/sensor2notion | f85ec09cbce0e626def9fadeef6793b3ffb9e236 | [
"Apache-2.0"
] | null | null | null | import requests, json
def createPage(databaseId, headers, data):
'''
Create a new notion row incl following data:
Date
Outdoor Temperature
Outdoor Humidity
Air Quality
Indoor Temperature
Indoor Humidity
'''
createUrl = 'https://api.notion.com/v1/pages'
newPageData = {
"parent": { "database_id": databaseId },
"properties": {
"Date": {
"title": [
{
"text": {
"content": data["date"]
}
}
]
},
"Outside Temperature": {
"number": data["outsideTemperature"]
},
"Air Quality": {
"rich_text": [
{
"text": {
"content": data["outsideAQI"]
}
}
]
},
"Home Temperature": {
"number": data["homeTemperature"]
},
"Home Humidity": {
"number": data["homeHumidity"]
},
}
}
data = json.dumps(newPageData)
try:
res = requests.request("POST", createUrl, headers=headers, data=data)
except requests.exceptions.RequestException as e:
print("Failed to post to Notion DB, check connection or API Key !!")
raise SystemExit(e)
else:
#print(res)
return(res.status_code)
| 27.017544 | 77 | 0.428571 |
75ca941b01cd09e7ab902b16ef2cb62d17673059 | 618 | py | Python | microservice/examples/echo.py | MartinHowarth/microservice | d20adef043f0f8265e75c9ab518567ade21255bd | [
"MIT"
] | null | null | null | microservice/examples/echo.py | MartinHowarth/microservice | d20adef043f0f8265e75c9ab518567ade21255bd | [
"MIT"
] | null | null | null | microservice/examples/echo.py | MartinHowarth/microservice | d20adef043f0f8265e75c9ab518567ade21255bd | [
"MIT"
] | null | null | null | from microservice.core.decorator import microservice
@microservice
def echo_as_dict(*args, **kwargs):
print(args)
print(kwargs)
ret = {'_args': args}
ret.update(kwargs)
return ret
@microservice
def echo_as_dict2(*args, **kwargs):
print(args)
print(kwargs)
ret = {'_args': args}
ret.update(kwargs)
ret2 = echo_as_dict3(5, 2, 5, asdf="asdrf")
return ret, ret2
@microservice
def echo_as_dict3(*args, **kwargs):
print(args)
print(kwargs)
ret = {'_args': args}
ret.update(kwargs)
ret2 = echo_as_dict(234, 456, 345, ty="no problem")
return ret, ret2
| 19.935484 | 55 | 0.648867 |
6c3fd94f54ddcea68f3b10d7e4af822b08280083 | 9,079 | py | Python | leetcode_python/Heap/least-number-of-unique-integers-after-k-removals.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | null | null | null | leetcode_python/Heap/least-number-of-unique-integers-after-k-removals.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | null | null | null | leetcode_python/Heap/least-number-of-unique-integers-after-k-removals.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | null | null | null | """
1481. Least Number of Unique Integers after K Removals
Medium
Given an array of integers arr and an integer k. Find the least number of unique integers after removing exactly k elements.
Example 1:
Input: arr = [5,5,4], k = 1
Output: 1
Explanation: Remove the single 4, only 5 is left.
Example 2:
Input: arr = [4,3,1,1,3,3,2], k = 3
Output: 2
Explanation: Remove 4, 2 and either one of the two 1s or three 3s. 1 and 3 will be left.
Constraints:
1 <= arr.length <= 10^5
1 <= arr[i] <= 10^9
0 <= k <= arr.length
"""
# V0
# IDEA : Counter
from collections import Counter
class Solution:
def findLeastNumOfUniqueInts(self, arr, k):
# edge case
if not arr:
return 0
cnt = dict(Counter(arr))
cnt_sorted = sorted(cnt.items(), key = lambda x : x[1])
#print ("cnt_sorted = " + str(cnt_sorted))
removed = 0
for key, freq in cnt_sorted:
"""
NOTE !!!
-> we need to remove exactly k elements and make remain unique integers as less as possible
-> since we ALREADY sort num_counter,
-> so the elements NOW are ordering with their count
-> so we need to remove ALL element while k still > 0
-> so k -= freq, since for element key, there are freq count for it in arr
"""
if freq <= k:
k -= freq
removed += 1
return len(cnt.keys()) - removed
# V0
# IDEA : Counter
class Solution:
def findLeastNumOfUniqueInts(self, arr, k):
num_counter = Counter(arr)
remove_ele = []
_num_counter = sorted(num_counter.items(), key = lambda x : x[1])
for key, freq in _num_counter:
"""
NOTE !!!
-> we need to remove exactly k elements and make remain unique integers as less as possible
-> since we ALREADY sort num_counter,
-> so the elements NOW are ordering with their count
-> so we need to remove ALL element while k still > 0
-> so k -= freq, since for element key, there are freq count for it in arr
"""
if freq <= k:
k -= freq
remove_ele.append(key)
return len(num_counter) - len(remove_ele)
# V0'
# IDEA : Counter
class Solution:
def findLeastNumOfUniqueInts(self, arr, k):
num_counter = Counter(arr)
remove_ele = []
for key, freq in sorted(num_counter.items(), key = lambda x : x[1]):
if freq <= k:
k -= freq
remove_ele.append(key)
return len(num_counter) - len(remove_ele)
# V0''
# IDEA : Counter + heapq
class Solution(object):
def findLeastNumOfUniqueInts(self, arr, k):
# use counter, and heap (priority queue)
from collections import Counter
import heapq
h = []
for key, val in Counter(arr).items():
heapq.heappush(h,(val,key))
while k > 0:
item = heapq.heappop(h)
if item[0] != 1:
heapq.heappush(h, (item[0]-1, item[1]))
k -=1
return len(h)
# V1
# IDEA : Counter
# https://leetcode.com/problems/least-number-of-unique-integers-after-k-removals/discuss/979896/Python-Solution
class Solution:
def findLeastNumOfUniqueInts(self, arr, k):
num_counter = Counter(arr)
remove_ele = []
for key, freq in sorted(num_counter.items(), key = lambda x : x[1]):
if freq <= k:
k -= freq
remove_ele.append(key)
return len(num_counter) - len(remove_ele)
# V1'
# IDEA : Counter
# https://leetcode.com/problems/least-number-of-unique-integers-after-k-removals/discuss/686293/Python-solution-with-Counter
class Solution:
def findLeastNumOfUniqueInts(self, arr, k):
from collections import Counter
count = sorted(Counter(arr).items(), key=lambda x: x[1]) # sort dictionary by value increasing and get it as tuple
removed = 0 # number of removed items
for key, val in count:
if k >= val:
k -= val
removed += 1
else:
break
return len(count)-removed # number of remained elements
# V1''
# IDEA : Counter
from collections import Counter
class Solution:
def findLeastNumOfUniqueInts(self, arr, k):
"""
Given an array of integers arr and number of removals k, this
program uses the Python Counter to help it determine the least
number of unique integers in arr after k removals.
:param arr: array of integers
:type arr: list[int]
:param k: number of integer removals from arr
:type k: int
:return: least number of unique integers in arr after k removals
:rtype: int
"""
"""
Remove the integers with the lowest counts to get the least number
of unique integers in arr
"""
counts = Counter( arr ).most_common()
unique = len( counts )
while k > 0 and unique > 0:
k -= counts[unique - 1][1]
if k >= 0:
unique -= 1
return unique
# V1'''
# IDEA : Counter + heapq
# https://leetcode.com/problems/least-number-of-unique-integers-after-k-removals/discuss/686429/Python-heaps
from collections import Counter
from heapq import *
class Solution(object):
def findLeastNumOfUniqueInts(self, arr, k):
c = Counter(arr)
heap = []
for key, value in c.items():
heappush(heap, (value, key))
while heap and k > 0:
value, key = heappop(heap)
if value > 1:
heappush(heap, (value-1, key))
k -= 1
return len(heap)
# V1''''
# IDEA : Counter + heapq
# https://leetcode.com/problems/least-number-of-unique-integers-after-k-removals/discuss/688829/Python-heap-solution
class Solution(object):
def findLeastNumOfUniqueInts(self, arr, k):
heap = []
count = collections.Counter(arr)
for key in count:
heapq.heappush(heap, (count[key], key))
while(heap and k):
count, key = heapq.heappop(heap)
if k >= count:
k -= count
else:
return len(heap) + 1
return len(heap)
# V1'''''
# IDEA : Counter + heapq
# https://leetcode.com/problems/least-number-of-unique-integers-after-k-removals/discuss/704179/python-solution%3A-Counter-and-Priority-Queue
# IDEA
# -> Count the occurence of each number.
# -> We want to delete the number with lowest occurence thus we can use minimum steps to reduce the total unique numbers in the list. For example,[4,3,1,1,3,3,2]. The Counter of this array will be: {3:3, 1:2, 4:1, 2:1}. Given k = 3, the greedy approach is to delete 2 and 4 first because both of them are appearing once. We need an ordering data structure to give us the lowest occurence of number each time. As you may know, Priority Queue comes to play
# -> Use heap to build PQ for the counter. We store each member as a tuple: (count, number) Python heap module will sort it based on the first member of the tuple.
# -> loop through k times to pop member out of heap and check if we need to push it back
class Solution(object):
def findLeastNumOfUniqueInts(self, arr, k):
# use counter, and heap (priority queue)
from collections import Counter
import heapq
h = []
for key, val in Counter(arr).items():
heapq.heappush(h,(val,key))
while k > 0:
item = heapq.heappop(h)
if item[0] != 1:
heapq.heappush(h, (item[0]-1, item[1]))
k -=1
return len(h)
# V1'''''''
# IDEA : Counter + heapq
# https://leetcode.com/problems/least-number-of-unique-integers-after-k-removals/discuss/1542356/Python-MinHeap-Solution
class Solution:
def findLeastNumOfUniqueInts(self, arr, k):
counter = collections.Counter(arr)
minHeap = []
for key, val in counter.items():
heapq.heappush(minHeap, val)
while k:
minHeap[0] -= 1
if minHeap[0] == 0:
heapq.heappop(minHeap)
k -= 1
return len(minHeap)
# V1'''''''
# IDEA : GREEDY
# https://zxi.mytechroad.com/blog/hashtable/leetcode-1481-least-number-of-unique-integers-after-k-removals/
# C++
# class Solution {
# public:
# int findLeastNumOfUniqueInts(vector<int>& arr, int k) {
# unordered_map<int, int> c;
# for (int x : arr) ++c[x];
# vector<int> m; // freq
# for (const auto [x, f] : c)
# m.push_back(f);
# sort(begin(m), end(m));
# int ans = m.size();
# int i = 0;
# while (k--) {
# if (--m[i] == 0) {
# ++i;
# --ans;
# }
# }
# return ans;
# }
# };
# V2 | 33.625926 | 454 | 0.573191 |
d4b666f8cfe67763cdc84d1761aae643352fa9ee | 7,132 | py | Python | sdk/python/pulumi_azure_nextgen/keyvault/v20190901/get_key.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/keyvault/v20190901/get_key.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/keyvault/v20190901/get_key.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetKeyResult',
'AwaitableGetKeyResult',
'get_key',
]
@pulumi.output_type
class GetKeyResult:
"""
The key resource.
"""
def __init__(__self__, attributes=None, curve_name=None, id=None, key_ops=None, key_size=None, key_uri=None, key_uri_with_version=None, kty=None, location=None, name=None, tags=None, type=None):
if attributes and not isinstance(attributes, dict):
raise TypeError("Expected argument 'attributes' to be a dict")
pulumi.set(__self__, "attributes", attributes)
if curve_name and not isinstance(curve_name, str):
raise TypeError("Expected argument 'curve_name' to be a str")
pulumi.set(__self__, "curve_name", curve_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if key_ops and not isinstance(key_ops, list):
raise TypeError("Expected argument 'key_ops' to be a list")
pulumi.set(__self__, "key_ops", key_ops)
if key_size and not isinstance(key_size, int):
raise TypeError("Expected argument 'key_size' to be a int")
pulumi.set(__self__, "key_size", key_size)
if key_uri and not isinstance(key_uri, str):
raise TypeError("Expected argument 'key_uri' to be a str")
pulumi.set(__self__, "key_uri", key_uri)
if key_uri_with_version and not isinstance(key_uri_with_version, str):
raise TypeError("Expected argument 'key_uri_with_version' to be a str")
pulumi.set(__self__, "key_uri_with_version", key_uri_with_version)
if kty and not isinstance(kty, str):
raise TypeError("Expected argument 'kty' to be a str")
pulumi.set(__self__, "kty", kty)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def attributes(self) -> Optional['outputs.KeyAttributesResponse']:
"""
The attributes of the key.
"""
return pulumi.get(self, "attributes")
@property
@pulumi.getter(name="curveName")
def curve_name(self) -> Optional[str]:
"""
The elliptic curve name. For valid values, see JsonWebKeyCurveName.
"""
return pulumi.get(self, "curve_name")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified identifier of the key vault resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="keyOps")
def key_ops(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "key_ops")
@property
@pulumi.getter(name="keySize")
def key_size(self) -> Optional[int]:
"""
The key size in bits. For example: 2048, 3072, or 4096 for RSA.
"""
return pulumi.get(self, "key_size")
@property
@pulumi.getter(name="keyUri")
def key_uri(self) -> str:
"""
The URI to retrieve the current version of the key.
"""
return pulumi.get(self, "key_uri")
@property
@pulumi.getter(name="keyUriWithVersion")
def key_uri_with_version(self) -> str:
"""
The URI to retrieve the specific version of the key.
"""
return pulumi.get(self, "key_uri_with_version")
@property
@pulumi.getter
def kty(self) -> Optional[str]:
"""
The type of the key. For valid values, see JsonWebKeyType.
"""
return pulumi.get(self, "kty")
@property
@pulumi.getter
def location(self) -> str:
"""
Azure location of the key vault resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the key vault resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
Tags assigned to the key vault resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type of the key vault resource.
"""
return pulumi.get(self, "type")
class AwaitableGetKeyResult(GetKeyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetKeyResult(
attributes=self.attributes,
curve_name=self.curve_name,
id=self.id,
key_ops=self.key_ops,
key_size=self.key_size,
key_uri=self.key_uri,
key_uri_with_version=self.key_uri_with_version,
kty=self.kty,
location=self.location,
name=self.name,
tags=self.tags,
type=self.type)
def get_key(key_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
vault_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetKeyResult:
"""
The key resource.
:param str key_name: The name of the key to be retrieved.
:param str resource_group_name: The name of the resource group which contains the specified key vault.
:param str vault_name: The name of the vault which contains the key to be retrieved.
"""
__args__ = dict()
__args__['keyName'] = key_name
__args__['resourceGroupName'] = resource_group_name
__args__['vaultName'] = vault_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:keyvault/v20190901:getKey', __args__, opts=opts, typ=GetKeyResult).value
return AwaitableGetKeyResult(
attributes=__ret__.attributes,
curve_name=__ret__.curve_name,
id=__ret__.id,
key_ops=__ret__.key_ops,
key_size=__ret__.key_size,
key_uri=__ret__.key_uri,
key_uri_with_version=__ret__.key_uri_with_version,
kty=__ret__.kty,
location=__ret__.location,
name=__ret__.name,
tags=__ret__.tags,
type=__ret__.type)
| 33.961905 | 198 | 0.62493 |
b0834edf2893555d97e93126d285a8f6ddf68524 | 439 | py | Python | tests/bdd/common/csi.py | xin3liang/mayastor-control-plane | 05650c5cf673cf1433d3a68b4991674b87cfc214 | [
"Apache-2.0"
] | 13 | 2021-03-29T15:10:15.000Z | 2022-03-23T04:01:59.000Z | tests/bdd/common/csi.py | xin3liang/mayastor-control-plane | 05650c5cf673cf1433d3a68b4991674b87cfc214 | [
"Apache-2.0"
] | 146 | 2021-03-30T13:14:56.000Z | 2022-03-31T15:54:38.000Z | tests/bdd/common/csi.py | xin3liang/mayastor-control-plane | 05650c5cf673cf1433d3a68b4991674b87cfc214 | [
"Apache-2.0"
] | 12 | 2021-06-22T13:53:01.000Z | 2022-03-31T12:07:36.000Z | """
Wrapper arount gRPC handle to communicate with CSI controller.
"""
import grpc
import csi_pb2_grpc as rpc
class CsiHandle(object):
def __init__(self, csi_socket):
self.channel = grpc.insecure_channel(csi_socket)
self.controller = rpc.ControllerStub(self.channel)
self.identity = rpc.IdentityStub(self.channel)
def __del__(self):
del self.channel
def close(self):
self.__del__()
| 21.95 | 62 | 0.690205 |
8a41e9afb4fa44cc254a8f3b85e90fec958e2b8c | 613 | py | Python | ZhipinSpider/ZhipinSpider/items.py | seeincen1/spider | ae468ecc143bc72f8c526d22c9850956d9456184 | [
"Apache-2.0"
] | null | null | null | ZhipinSpider/ZhipinSpider/items.py | seeincen1/spider | ae468ecc143bc72f8c526d22c9850956d9456184 | [
"Apache-2.0"
] | null | null | null | ZhipinSpider/ZhipinSpider/items.py | seeincen1/spider | ae468ecc143bc72f8c526d22c9850956d9456184 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class ZhipinspiderItem(scrapy.Item):
# define the fields for your item here like:
#工作名称
title = scrapy.Field()
#工资
salary = scrapy.Field()
#招聘公司
company = scrapy.Field()
#工作详细链接
url = scrapy.Field()
#工作地点
work_addr = scrapy.Field()
#行业
industry = scrapy.Field()
#公司规模
company_size = scrapy.Field()
#招聘人
recruiter = scrapy.Field()
#发布时间
publish_date = scrapy.Field()
| 19.15625 | 53 | 0.631321 |
233d21f217d60c600daf9c3db0fb5a7f6b6e9f76 | 189 | py | Python | gherkin_to_markdown/expressions/example_expression.py | LeandreArseneault/gherkin_to_markdown | 157a6a7ba5b7f1f3a159bc163bf1b1187401243a | [
"MIT"
] | 6 | 2022-02-14T22:10:50.000Z | 2022-03-10T20:42:29.000Z | gherkin_to_markdown/expressions/example_expression.py | LeandreArseneault/gherkin_to_markdown | 157a6a7ba5b7f1f3a159bc163bf1b1187401243a | [
"MIT"
] | null | null | null | gherkin_to_markdown/expressions/example_expression.py | LeandreArseneault/gherkin_to_markdown | 157a6a7ba5b7f1f3a159bc163bf1b1187401243a | [
"MIT"
] | null | null | null | from gherkin_to_markdown.expressions.expression import Expression
class ExampleExpression(Expression):
def to_markdown(self, statement: str):
return f"### {self.keyword}\n\n"
| 27 | 65 | 0.751323 |
6582d1a76b32cd3335812ff253975ccf188db89b | 2,211 | py | Python | app.py | Ideneal/IVUShiftsSync | 106d7835c6987b95b35275bc3217051fc886b033 | [
"MIT"
] | null | null | null | app.py | Ideneal/IVUShiftsSync | 106d7835c6987b95b35275bc3217051fc886b033 | [
"MIT"
] | null | null | null | app.py | Ideneal/IVUShiftsSync | 106d7835c6987b95b35275bc3217051fc886b033 | [
"MIT"
] | null | null | null | import logging
from datetime import datetime
from ivu import IVU
from cal import Calendar
from event import EventAdapter
from config import config
from utils import progress_bar, str2date, add_days, date2datetime
# Add logger
logging.basicConfig(filename='app.log', filemode='w+', datefmt='%d-%m-%Y %H:%M:%S')
def sync(events, calendar):
init_date = str2date(events[0]['start']['dateTime']) if len(events) > 0 else datetime.today()
last_date = str2date(events[-1]['start']['dateTime'])
current_date = init_date
days = (last_date - current_date).days
# Retrieve all existent calendar events
calendar_events = calendar.get_events(date2datetime(init_date), max_result=max(days, 10))
# Map events by dates
mapped_calendar_events = {str2date(event['start']['dateTime']): event for event in calendar_events}
mapped_events = {str2date(event['start']['dateTime']): event for event in events}
progress_bar(0, days, prefix='Progress:', suffix='Complete', length=50)
while current_date < last_date:
event = mapped_events.get(current_date)
calendar_event = mapped_calendar_events.get(current_date)
if event and calendar_event is None:
calendar.create_event(event)
if event is None and calendar_event:
calendar.delete_event(calendar_event)
if event and calendar_event:
calendar.update_event(calendar_event, event)
current_date = add_days(current_date, 1)
progress = days - (last_date - current_date).days
progress_bar(progress, days, prefix='Progress:', suffix='Complete', length=50)
def main():
# Retrieve shifts
client = IVU()
client.login(config['ivu']['username'], config['ivu']['password'])
shifts = client.shifts()
# Adapt shifts to events
adapter = EventAdapter()
events = adapter.get_events(shifts)
# Switch calendar
calendar = Calendar()
calendar.switch_calendar(config['google']['calendar'])
# Sync shifts
sync(events, calendar)
print('Event synchronized!')
if __name__ == '__main__':
try:
main()
except Exception as e:
logging.exception('Exception occurred')
| 31.585714 | 103 | 0.684306 |
d77e8aacb1742334eb4b5506fbdcebe8963894ea | 13,195 | py | Python | vivisect/impemu/platarch/arm.py | pombredanne/vivisect | 0274ff1dee066ccb9c9f5072fb7ac7b98655f854 | [
"ECL-2.0",
"Apache-2.0"
] | 16 | 2015-12-10T06:18:16.000Z | 2021-09-11T21:42:16.000Z | vivisect/impemu/platarch/arm.py | pombredanne/vivisect | 0274ff1dee066ccb9c9f5072fb7ac7b98655f854 | [
"ECL-2.0",
"Apache-2.0"
] | 5 | 2018-10-12T21:07:24.000Z | 2018-10-12T21:08:49.000Z | vivisect/impemu/platarch/arm.py | pombredanne/vivisect | 0274ff1dee066ccb9c9f5072fb7ac7b98655f854 | [
"ECL-2.0",
"Apache-2.0"
] | 6 | 2016-03-20T11:15:51.000Z | 2021-08-06T07:32:42.000Z | import logging
import envi
import envi.archs.arm as e_arm
from envi.archs.arm.regs import *
import vivisect.exc as v_exc
import vivisect.impemu.emulator as v_i_emulator
import visgraph.pathcore as vg_path
logger = logging.getLogger(__name__)
class ArmWorkspaceEmulator(v_i_emulator.WorkspaceEmulator, e_arm.ArmEmulator):
taintregs = [x for x in range(13)]
def __init__(self, vw, **kwargs):
'''
Please see the base emulator class in vivisect/impemu/emulator.py for the parameters
that can be passed through kwargs
'''
e_arm.ArmEmulator.__init__(self)
v_i_emulator.WorkspaceEmulator.__init__(self, vw, **kwargs)
self.setMemArchitecture(envi.ARCH_ARMV7)
def setThumbMode(self, thumb=1):
e_arm.ArmEmulator.setThumbMode(self, thumb)
def setArmMode(self, arm=1):
e_arm.ArmEmulator.setArmMode(self, arm)
def parseOpcode(self, va, arch=envi.ARCH_DEFAULT):
'''
Caching version.
We can make an opcode *faster* with the workspace because of
getByteDef etc... use it.
Made for ARM, because envi.Emulator doesn't understand the Thumb flag
'''
if arch == envi.ARCH_DEFAULT:
tmode = self.getFlag(PSR_T_bit)
arch = (envi.ARCH_ARMV7, envi.ARCH_THUMB)[tmode]
return self.vw.parseOpcode(va, arch=arch)
def stepi(self):
# NOTE: when we step, we *always* want to be stepping over calls
# (and possibly import emulate them)
starteip = self.getProgramCounter()
# parse out an opcode
tmode = self.getFlag(PSR_T_bit)
# logger.debug("tmode: %x", tmode)
op = self.parseOpcode(starteip | tmode)
if self.emumon:
self.emumon.prehook(self, op, starteip)
# Execute the opcode
self.executeOpcode(op)
vg_path.getNodeProp(self.curpath, 'valist').append(starteip)
endeip = self.getProgramCounter()
if self.emumon:
self.emumon.posthook(self, op, endeip)
if not self.checkCall(starteip, endeip, op):
self.checkBranches(starteip, endeip, op)
def _prep(self, funcva, tmode=None):
if tmode is not None:
# we're forcing thumb or arm mode... update the flag
self.setFlag(PSR_T_bit, tmode)
logger.debug("funcva thumb==%d (forced): 0x%x", tmode, funcva)
elif funcva & 3:
# if the va isn't 4-byte aligned, it's gotta be thumb
self.setFlag(PSR_T_bit, 1)
funcva &= -2
logger.debug("funcva is THUMB(addr): 0x%x", funcva)
else:
loc = self.vw.getLocation(funcva)
if loc is not None:
# if we have a opcode location, use it's iflags to determine mode
lva, lsz, lt, lti = loc
if (lti & envi.ARCH_MASK) == envi.ARCH_THUMB:
self.setFlag(PSR_T_bit, 1)
logger.debug("funcva is THUMB(loc): 0x%x", funcva)
else:
logger.debug("funcva is ARM(loc): 0x%x", funcva)
else:
# otherwise, let's use some heuristics to guess.
armthumb = 0
armop = None
thumbop = None
try:
thumbop = self.parseOpcode(funcva | 1)
armthumb -= 1
if thumbop.mnem == 'push':
armthumb -= 5
elif thumbop.mnem == 'ldr':
armthumb -= 2
except InvalidInstruction as e:
logger.debug(" heuristics: decoding ARM: %r", e)
try:
armop = self.parseOpcode(funcva)
armthumb += 1
if armop.mnem == 'push':
armthumb += 5
elif armop.mnem == 'ldr':
armthumb += 2
except InvalidInstruction as e:
logger.debug(" heuristics: decoding THUMB: %r", e)
if armop is None and thumbop is None:
# we didn't have a single push in either direction
logger.warning("TOTAL FAILURE TO DETERMINE THUMB MODE")
raise Exception("Neither architecture parsed the first opcode")
elif armthumb < 0:
self.setFlag(PSR_T_bit, 1)
logger.debug("ArmWorkspaceEmulator: Heuristically Determined funcva is THUMB: 0x%x", funcva)
else:
self.setFlag(PSR_T_bit, 0)
logger.debug("ArmWorkspaceEmulator: Heuristically Determined funcva is ARM: 0x%x", funcva)
self.funcva = funcva
return funcva
def runFunction(self, funcva, stopva=None, maxhit=None, maxloop=None, tmode=None):
"""
This is a utility function specific to WorkspaceEmulation (and impemu) that
will emulate, but only inside the given function. You may specify a stopva
to return once that location is hit.
"""
logger.debug('=== emu.runFunction(0x%x, stopva=%r, maxhit=%r, maxloop=%r, tmode=%r)', funcva, stopva, maxhit, maxloop, tmode)
funcva = self._prep(funcva, tmode)
# Let the current (should be base also) path know where we are starting
vg_path.setNodeProp(self.curpath, 'bva', funcva)
hits = {}
todo = [(funcva, self.getEmuSnap(), self.path)]
vw = self.vw # Save a dereference many many times
while len(todo):
va, esnap, self.curpath = todo.pop()
self.setEmuSnap(esnap)
self.setProgramCounter(va)
tmode = self.getFlag(PSR_T_bit)
# Check if we are beyond our loop max...
if maxloop is not None:
lcount = vg_path.getPathLoopCount(self.curpath, 'bva', va)
if lcount > maxloop:
continue
while True:
starteip = self.getProgramCounter()
if not vw.isValidPointer(starteip):
break
if starteip == stopva:
return
# Check straight hit count...
if maxhit is not None:
h = hits.get(starteip, 0)
h += 1
if h > maxhit:
break
hits[starteip] = h
# If we ran out of path (branches that went
# somewhere that we couldn't follow?
if self.curpath is None:
break
try:
# FIXME unify with stepi code...
op = self.parseOpcode(starteip | tmode)
self.op = op
if self.emumon:
try:
self.emumon.prehook(self, op, starteip)
except v_exc.BadOpBytes as e:
logger.debug(repr(e))
break
except Exception as e:
logger.log(self._log_level, "funcva: 0x%x opva: 0x%x: %r (%r) (in emumon prehook: %r)", funcva, starteip, op, e, self.emumon)
if self.emustop:
return
# Execute the opcode
self.executeOpcode(op)
vg_path.getNodeProp(self.curpath, 'valist').append(starteip)
endeip = self.getProgramCounter()
if self.emumon:
try:
self.emumon.posthook(self, op, endeip)
except Exception as e:
logger.log(self._log_level, "funcva: 0x%x opva: 0x%x: %r (%r) (in emumon posthook: %r)", funcva, starteip, op, e, self.emumon)
if self.emustop:
return
iscall = self.checkCall(starteip, endeip, op)
if self.emustop:
return
# If it wasn't a call, check for branches, if so, add them to
# the todo list and go around again...
if not iscall:
blist = self.checkBranches(starteip, endeip, op)
if len(blist):
# pc in the snap will be wrong, but over-ridden at restore
esnap = self.getEmuSnap()
for bva, bpath in blist:
todo.append((bva, esnap, bpath))
break
else:
# check if we've blx'd to a different thumb state. if so,
# be sure to return to the original tmode before continuing emulation pass
newtmode = self.getFlag(PSR_T_bit)
if newtmode != tmode:
self.setFlag(PSR_T_bit, tmode)
# If we enounter a procedure exit, it doesn't
# matter what EIP is, we're done here.
if op.iflags & envi.IF_RET:
vg_path.setNodeProp(self.curpath, 'cleanret', True)
break
except envi.UnsupportedInstruction as e:
if self.strictops:
logger.debug('runFunction breaking after unsupported instruction: 0x%08x %s', e.op.va, e.op.mnem)
raise e
else:
logger.debug('runFunction continuing after unsupported instruction: 0x%08x %s', e.op.va, e.op.mnem)
self.setProgramCounter(e.op.va+ e.op.size)
except Exception as e:
if self.emumon is not None:
self.emumon.logAnomaly(self, starteip, str(e))
logger.debug('runFunction breaking after exception (fva: 0x%x): %s', funcva, e)
break # If we exc during execution, this branch is dead.
class ThumbWorkspaceEmulator(ArmWorkspaceEmulator):
def __init__(self, vw, **kwargs):
ArmWorkspaceEmulator.__init__(self, vw, **kwargs)
self.setThumbMode()
self.setMemArchitecture(envi.ARCH_THUMB)
def runFunction(self, funcva, stopva=None, maxhit=None, maxloop=None, tmode=None):
return ArmWorkspaceEmulator.runFunction(self, funcva, stopva, maxhit, maxloop, tmode=1)
class Thumb16WorkspaceEmulator(ArmWorkspaceEmulator):
def __init__(self, vw, **kwargs):
ArmWorkspaceEmulator.__init__(self, vw, **kwargs)
self.setThumbMode()
self.setMemArchitecture(envi.ARCH_THUMB16)
def runFunction(self, funcva, stopva=None, maxhit=None, maxloop=None, tmode=None):
return ArmWorkspaceEmulator.runFunction(self, funcva, stopva, maxhit, maxloop, tmode=1)
'''
st0len gratuitously from wikipedia:
ARM[edit]
The standard ARM calling convention allocates the 16 ARM registers as:
r15 is the program counter.
r14 is the link register. (The BL instruction, used in a subroutine call, stores the return address in this register).
r13 is the stack pointer. (The Push/Pop instructions in "Thumb" operating mode use this register only).
r12 is the Intra-Procedure-call scratch register.
r4 to r11: used to hold local variables.
r0 to r3: used to hold argument values passed to a subroutine, and also hold results returned from a subroutine.
If the type of value returned is too large to fit in r0 to r3, or whose size cannot be determined statically at compile time, then the caller must allocate space for that value at run time, and pass a pointer to that space in r0.
Subroutines must preserve the contents of r4 to r11 and the stack pointer. (Perhaps by saving them to the stack in the function prologue, then using them as scratch space, then restoring them from the stack in the function epilogue). In particular, subroutines that call other subroutines *must* save the return address in the link register r14 to the stack before calling those other subroutines. However, such subroutines do not need to return that value to r14-they merely need to load that value into r15, the program counter, to return.
The ARM stack is full-descending.[3]
This calling convention causes a "typical" ARM subroutine to
* In the prolog, push r4 to r11 to the stack, and push the return address in r14, to the stack. (This can be done with a single STM instruction).
* copy any passed arguments (in r0 to r3) to the local scratch registers (r4 to r11).
* allocate other local variables to the remaining local scratch registers (r4 to r11).
* do calculations and call other subroutines as necessary using BL, assuming r0 to r3, r12 and r14 will not be preserved.
* put the result in r0
* In the epilog, pull r4 to r11 from the stack, and pulls the return address to the program counter r15. (This can be done with a single LDM instruction).
'''
| 42.291667 | 545 | 0.564532 |
4842aac0dd1661a0021046559077e21666cc20a1 | 3,388 | py | Python | todo_list/settings.py | IgnacioBarroso/todo-challenge | d73046972f523648d5aed88e32dfbe380ba5effc | [
"MIT"
] | null | null | null | todo_list/settings.py | IgnacioBarroso/todo-challenge | d73046972f523648d5aed88e32dfbe380ba5effc | [
"MIT"
] | null | null | null | todo_list/settings.py | IgnacioBarroso/todo-challenge | d73046972f523648d5aed88e32dfbe380ba5effc | [
"MIT"
] | null | null | null | """
Django settings for todo_list project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
from django.contrib.auth import logout
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-ah4u_0y#s#7@k@&imz+wr4o)s#wk37ej76puts-wqh0#lfvlxc'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['0.0.0.0','127.0.0.1','localhost']
LOGIN_REDIRECT_URL = '/'
LOGOUT_REDIRECT_URL = '/'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'todo_list.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todo_list.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 25.666667 | 91 | 0.701004 |
a24e3c5ef885d3af97ca587f7c14624baa866534 | 9,141 | py | Python | Bio/Data/IUPACData.py | rht/biopython | 3a44496d7bd79446266a4951b7d1f64569e4a96d | [
"BSD-3-Clause"
] | 3 | 2016-11-21T09:55:56.000Z | 2019-04-09T17:39:43.000Z | Bio/Data/IUPACData.py | rht/biopython | 3a44496d7bd79446266a4951b7d1f64569e4a96d | [
"BSD-3-Clause"
] | 32 | 2016-11-21T07:38:21.000Z | 2017-08-16T13:00:03.000Z | Bio/Data/IUPACData.py | rht/biopython | 3a44496d7bd79446266a4951b7d1f64569e4a96d | [
"BSD-3-Clause"
] | 8 | 2016-11-24T18:57:35.000Z | 2022-01-16T08:15:25.000Z | # This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Information about the IUPAC alphabets."""
protein_letters = "ACDEFGHIKLMNPQRSTVWY"
extended_protein_letters = "ACDEFGHIKLMNPQRSTVWYBXZJUO"
# B = "Asx"; aspartic acid or asparagine (D or N)
# X = "Xxx"; unknown or 'other' amino acid
# Z = "Glx"; glutamic acid or glutamine (E or Q)
# http://www.chem.qmul.ac.uk/iupac/AminoAcid/A2021.html#AA212
#
# J = "Xle"; leucine or isoleucine (L or I, used in NMR)
# Mentioned in http://www.chem.qmul.ac.uk/iubmb/newsletter/1999/item3.html
# Also the International Nucleotide Sequence Database Collaboration (INSDC)
# (i.e. GenBank, EMBL, DDBJ) adopted this in 2006
# http://www.ddbj.nig.ac.jp/insdc/icm2006-e.html
#
# Xle (J); Leucine or Isoleucine
# The residue abbreviations, Xle (the three-letter abbreviation) and J
# (the one-letter abbreviation) are reserved for the case that cannot
# experimentally distinguish leucine from isoleucine.
#
# U = "Sec"; selenocysteine
# http://www.chem.qmul.ac.uk/iubmb/newsletter/1999/item3.html
#
# O = "Pyl"; pyrrolysine
# http://www.chem.qmul.ac.uk/iubmb/newsletter/2009.html#item35
protein_letters_1to3 = {
'A': 'Ala', 'C': 'Cys', 'D': 'Asp',
'E': 'Glu', 'F': 'Phe', 'G': 'Gly', 'H': 'His',
'I': 'Ile', 'K': 'Lys', 'L': 'Leu', 'M': 'Met',
'N': 'Asn', 'P': 'Pro', 'Q': 'Gln', 'R': 'Arg',
'S': 'Ser', 'T': 'Thr', 'V': 'Val', 'W': 'Trp',
'Y': 'Tyr',
}
protein_letters_1to3_extended = dict(list(protein_letters_1to3.items()) + list({
'B': 'Asx', 'X': 'Xaa', 'Z': 'Glx', 'J': 'Xle',
'U': 'Sel', 'O': 'Pyl',
}.items()))
protein_letters_3to1 = dict((x[1], x[0]) for x in
protein_letters_1to3.items())
protein_letters_3to1_extended = dict((x[1], x[0]) for x in
protein_letters_1to3_extended.items())
ambiguous_dna_letters = "GATCRYWSMKHBVDN"
unambiguous_dna_letters = "GATC"
ambiguous_rna_letters = "GAUCRYWSMKHBVDN"
unambiguous_rna_letters = "GAUC"
# B == 5-bromouridine
# D == 5,6-dihydrouridine
# S == thiouridine
# W == wyosine
extended_dna_letters = "GATCBDSW"
# are there extended forms?
# extended_rna_letters = "GAUCBDSW"
ambiguous_dna_values = {
"A": "A",
"C": "C",
"G": "G",
"T": "T",
"M": "AC",
"R": "AG",
"W": "AT",
"S": "CG",
"Y": "CT",
"K": "GT",
"V": "ACG",
"H": "ACT",
"D": "AGT",
"B": "CGT",
"X": "GATC",
"N": "GATC",
}
ambiguous_rna_values = {
"A": "A",
"C": "C",
"G": "G",
"U": "U",
"M": "AC",
"R": "AG",
"W": "AU",
"S": "CG",
"Y": "CU",
"K": "GU",
"V": "ACG",
"H": "ACU",
"D": "AGU",
"B": "CGU",
"X": "GAUC",
"N": "GAUC",
}
ambiguous_dna_complement = {
"A": "T",
"C": "G",
"G": "C",
"T": "A",
"M": "K",
"R": "Y",
"W": "W",
"S": "S",
"Y": "R",
"K": "M",
"V": "B",
"H": "D",
"D": "H",
"B": "V",
"X": "X",
"N": "N",
}
ambiguous_rna_complement = {
"A": "U",
"C": "G",
"G": "C",
"U": "A",
"M": "K",
"R": "Y",
"W": "W",
"S": "S",
"Y": "R",
"K": "M",
"V": "B",
"H": "D",
"D": "H",
"B": "V",
"X": "X",
"N": "N",
}
def _make_ranges(mydict):
d = {}
for key, value in mydict.items():
d[key] = (value, value)
return d
# Mass data taken from PubChem
# Average masses of monophosphate deoxy nucleotides
unambiguous_dna_weights = {
"A": 331.2218,
"C": 307.1971,
"G": 347.2212,
"T": 322.2085
}
# Monoisotopic masses of monophospate deoxy nucleotides
monoisotopic_unambiguous_dna_weights = {
"A": 331.06817,
"C": 307.056936,
"G": 347.063084,
"T": 322.056602
}
unambiguous_dna_weight_ranges = _make_ranges(unambiguous_dna_weights)
unambiguous_rna_weights = {
"A": 347.2212,
"C": 323.1965,
"G": 363.2206,
"U": 324.1813
}
monoisotopic_unambiguous_rna_weights = {
"A": 347.063084,
"C": 323.051851,
"G": 363.057999,
"U": 324.035867
}
unambiguous_rna_weight_ranges = _make_ranges(unambiguous_rna_weights)
def _make_ambiguous_ranges(mydict, weight_table):
range_d = {}
avg_d = {}
for letter, values in mydict.items():
# Following line is a quick hack to skip undefined weights for U and O
if len(values) == 1 and values[0] not in weight_table:
continue
weights = [weight_table.get(x) for x in values]
range_d[letter] = (min(weights), max(weights))
total_w = 0.0
for w in weights:
total_w = total_w + w
avg_d[letter] = total_w / len(weights)
return range_d, avg_d
ambiguous_dna_weight_ranges, avg_ambiguous_dna_weights = \
_make_ambiguous_ranges(ambiguous_dna_values,
unambiguous_dna_weights)
ambiguous_rna_weight_ranges, avg_ambiguous_rna_weights = \
_make_ambiguous_ranges(ambiguous_rna_values,
unambiguous_rna_weights)
protein_weights = {
"A": 89.0932,
"C": 121.1582,
"D": 133.1027,
"E": 147.1293,
"F": 165.1891,
"G": 75.0666,
"H": 155.1546,
"I": 131.1729,
"K": 146.1876,
"L": 131.1729,
"M": 149.2113,
"N": 132.1179,
"O": 255.3134,
"P": 115.1305,
"Q": 146.1445,
"R": 174.201,
"S": 105.0926,
"T": 119.1192,
"U": 168.0532,
"V": 117.1463,
"W": 204.2252,
"Y": 181.1885
}
monoisotopic_protein_weights = {
"A": 89.047678,
"C": 121.019749,
"D": 133.037508,
"E": 147.053158,
"F": 165.078979,
"G": 75.032028,
"H": 155.069477,
"I": 131.094629,
"K": 146.105528,
"L": 131.094629,
"M": 149.051049,
"N": 132.053492,
"O": 255.158292,
"P": 115.063329,
"Q": 146.069142,
"R": 174.111676,
"S": 105.042593,
"T": 119.058243,
"U": 168.964203,
"V": 117.078979,
"W": 204.089878,
"Y": 181.073893,
}
extended_protein_values = {
"A": "A",
"B": "ND",
"C": "C",
"D": "D",
"E": "E",
"F": "F",
"G": "G",
"H": "H",
"I": "I",
"J": "IL",
"K": "K",
"L": "L",
"M": "M",
"N": "N",
"O": "O",
"P": "P",
"Q": "Q",
"R": "R",
"S": "S",
"T": "T",
"U": "U",
"V": "V",
"W": "W",
"X": "ACDEFGHIKLMNPQRSTVWY",
# TODO - Include U and O in the possible values of X?
# This could alter the extended_protein_weight_ranges ...
# by MP: Won't do this, because they are so rare.
"Y": "Y",
"Z": "QE",
}
protein_weight_ranges = _make_ranges(protein_weights)
extended_protein_weight_ranges, avg_extended_protein_weights = \
_make_ambiguous_ranges(extended_protein_values,
protein_weights)
# For Center of Mass Calculation.
# Taken from http://www.chem.qmul.ac.uk/iupac/AtWt/ & PyMol
atom_weights = {
'H': 1.00794,
'D': 2.01410,
'He': 4.002602,
'Li': 6.941,
'Be': 9.012182,
'B': 10.811,
'C': 12.0107,
'N': 14.0067,
'O': 15.9994,
'F': 18.9984032,
'Ne': 20.1797,
'Na': 22.989770,
'Mg': 24.3050,
'Al': 26.981538,
'Si': 28.0855,
'P': 30.973761,
'S': 32.065,
'Cl': 35.453,
'Ar': 39.948,
'K': 39.0983,
'Ca': 40.078,
'Sc': 44.955910,
'Ti': 47.867,
'V': 50.9415,
'Cr': 51.9961,
'Mn': 54.938049,
'Fe': 55.845,
'Co': 58.933200,
'Ni': 58.6934,
'Cu': 63.546,
'Zn': 65.39,
'Ga': 69.723,
'Ge': 72.64,
'As': 74.92160,
'Se': 78.96,
'Br': 79.904,
'Kr': 83.80,
'Rb': 85.4678,
'Sr': 87.62,
'Y': 88.90585,
'Zr': 91.224,
'Nb': 92.90638,
'Mo': 95.94,
'Tc': 98.0,
'Ru': 101.07,
'Rh': 102.90550,
'Pd': 106.42,
'Ag': 107.8682,
'Cd': 112.411,
'In': 114.818,
'Sn': 118.710,
'Sb': 121.760,
'Te': 127.60,
'I': 126.90447,
'Xe': 131.293,
'Cs': 132.90545,
'Ba': 137.327,
'La': 138.9055,
'Ce': 140.116,
'Pr': 140.90765,
'Nd': 144.24,
'Pm': 145.0,
'Sm': 150.36,
'Eu': 151.964,
'Gd': 157.25,
'Tb': 158.92534,
'Dy': 162.50,
'Ho': 164.93032,
'Er': 167.259,
'Tm': 168.93421,
'Yb': 173.04,
'Lu': 174.967,
'Hf': 178.49,
'Ta': 180.9479,
'W': 183.84,
'Re': 186.207,
'Os': 190.23,
'Ir': 192.217,
'Pt': 195.078,
'Au': 196.96655,
'Hg': 200.59,
'Tl': 204.3833,
'Pb': 207.2,
'Bi': 208.98038,
'Po': 208.98,
'At': 209.99,
'Rn': 222.02,
'Fr': 223.02,
'Ra': 226.03,
'Ac': 227.03,
'Th': 232.0381,
'Pa': 231.03588,
'U': 238.02891,
'Np': 237.05,
'Pu': 244.06,
'Am': 243.06,
'Cm': 247.07,
'Bk': 247.07,
'Cf': 251.08,
'Es': 252.08,
'Fm': 257.10,
'Md': 258.10,
'No': 259.10,
'Lr': 262.11,
'Rf': 261.11,
'Db': 262.11,
'Sg': 266.12,
'Bh': 264.12,
'Hs': 269.13,
'Mt': 268.14,
}
| 22.186893 | 80 | 0.50815 |
819438227c18cf1318563b916e435e4027b1b9c4 | 591 | py | Python | WebMirror/management/rss_parser_funcs/feed_parse_extractBluebunnytranslationsWordpressCom.py | fake-name/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 193 | 2016-08-02T22:04:35.000Z | 2022-03-09T20:45:41.000Z | WebMirror/management/rss_parser_funcs/feed_parse_extractBluebunnytranslationsWordpressCom.py | fake-name/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 533 | 2016-08-23T20:48:23.000Z | 2022-03-28T15:55:13.000Z | WebMirror/management/rss_parser_funcs/feed_parse_extractBluebunnytranslationsWordpressCom.py | rrosajp/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 19 | 2015-08-13T18:01:08.000Z | 2021-07-12T17:13:09.000Z | def extractBluebunnytranslationsWordpressCom(item):
'''
Parser for 'bluebunnytranslations.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('IHM', 'I Have Medicine', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | 29.55 | 104 | 0.659898 |
f53657f60e835a1cbda82c04cfd2252d03aaa786 | 331 | py | Python | public/app.py | ayushmankumar7/Tre-Tak | 781e9c4b72c46706bf62b704ace25a6180b3b620 | [
"MIT"
] | null | null | null | public/app.py | ayushmankumar7/Tre-Tak | 781e9c4b72c46706bf62b704ace25a6180b3b620 | [
"MIT"
] | null | null | null | public/app.py | ayushmankumar7/Tre-Tak | 781e9c4b72c46706bf62b704ace25a6180b3b620 | [
"MIT"
] | null | null | null | from flask import Flask, send_from_directory, render_template
import os
app = Flask(__name__)
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'images/icon.ico')
@app.route("/")
def index():
return render_template("index.html")
| 20.6875 | 69 | 0.65861 |
64c7aec74eb8ed3b629ea4077e78b8f3b6e00265 | 1,629 | py | Python | iotronic_ui/project/map/views.py | MDSLab/iotronic-ops-dashboard | ef12ec244e9ef156322749593c8d17de726f5586 | [
"Apache-2.0"
] | null | null | null | iotronic_ui/project/map/views.py | MDSLab/iotronic-ops-dashboard | ef12ec244e9ef156322749593c8d17de726f5586 | [
"Apache-2.0"
] | 1 | 2018-10-17T10:59:55.000Z | 2018-10-30T11:58:40.000Z | iotronic_ui/project/map/views.py | MDSLab/iotronic-ops-dashboard | ef12ec244e9ef156322749593c8d17de726f5586 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
from horizon import exceptions
from horizon import views
from openstack_dashboard.api import iotronic
from openstack_dashboard import policy
LOG = logging.getLogger(__name__)
class IndexView(views.APIView):
# A very simple class-based view...
template_name = 'project/map/index.html'
def get_data(self, request, context, *args, **kwargs):
boards = []
result = {'list': []}
# Admin_iot_project
if policy.check((("iot", "iot:list_project_boards"),), self.request):
try:
boards = iotronic.board_list(self.request, None, None)
# LOG.debug('MAP data INFO: %s', boards)
for i in range(len(boards)):
result["list"].append(boards[i]._info)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve project boards list.'))
# LOG.debug('MAP board list: %s', json.dumps(result))
context["boards"] = json.dumps(result)
return context
| 33.244898 | 79 | 0.655003 |
bafa85e1b393b19efad399ec3700a3b4c7b9f07c | 2,939 | py | Python | Xana/Xfit/fitgaussian.py | ClLov/Xana | 83d880432a457cff0f1fab2801e2530ddecb4019 | [
"MIT"
] | null | null | null | Xana/Xfit/fitgaussian.py | ClLov/Xana | 83d880432a457cff0f1fab2801e2530ddecb4019 | [
"MIT"
] | null | null | null | Xana/Xfit/fitgaussian.py | ClLov/Xana | 83d880432a457cff0f1fab2801e2530ddecb4019 | [
"MIT"
] | null | null | null | import numpy as np
from numpy import exp, sqrt, pi
import lmfit
def gaussian(x, **p):
gauss = 0
for i in range(p['nm']):
mst = str(i)
gauss += (p['a'+mst]/(sqrt(2*pi)*p['sig'+mst])) * exp(-(x-p['cen'+mst])**2 /(2*p['sig'+mst]**2))
gauss += + p['bg']
return gauss
def fitgaussian( x, y, err=None, mode='standard', nmodes=1, start=None, lb=None, ub=None, doplot=0, h_plot=None, ax=None,
output='pars', xl=None, ylim=None, color=None):
""" Fit data with Gaussian peak."""
# initialize parameters
pars = lmfit.Parameters()
for i in range(nmodes):
mst = str(i)
if i > 0:
start = np.array(start) *1.1
pars.add('cen'+mst, value=start[0], min=lb[0], max=ub[0], vary=1)
pars.add('sig'+mst, value=start[1], min=lb[1], max=ub[1], vary=1)
pars.add('a'+mst, value=start[2], min=lb[2], max=ub[2], vary=1)
pars.add('bg', value=1., vary=0)
pars.add('nm', value=nmodes, vary=0)
if 'bg' in mode:
if len(start) == 3:
pars['bg'].set(vary=1)
elif len(start) == 4:
pars['bg'].set(value=start[3], min=lb[3], max=ub[3], vary=1)
if err is not None:
wgt = 1./err**2
else:
wgt = np.ones_like(y)
if 'logx' in mode:
wgt = 1/np.log10(x)
mod = lmfit.Model(gaussian)
out = mod.fit(y, pars, x=x, weights=wgt)
# plot results
if doplot:
if xl is None:
if ax is None:
ax = plt.gca()
xl = ax.get_xlim()
if xl[0] == 0:
xl = (np.min(x)*0.9,np.max(x)*1.1)
xf = np.logspace(np.log10(xl[0]),np.log10(xl[1]),100)
v = out.best_values
gf = gaussian(xf, **v)
if 'legend' in doplot:
labstr = (r'$\mu: {0[0]:.2g},\, \sigma: {0[1]:.2g},\,'+
r' \mathrm{{a}}: {0[2]:.2g},\, \mathrm{{bg}}: {0[3]:.2g}$')
labstr = labstr.format([v[name] for name in out.var_names])
else:
labstr = ''
pl = []
pl.append(ax.plot(xf, gf, '-', label=labstr, linewidth=1))
if 'data' in doplot:
pl.append(ax.plot(x, y, 'o', markersize=2.5))
if color is None:
if h_plot is not None:
color = h_plot.get_color()
elif 'data' in doplot:
color = pl[0][0].get_color()
else:
color = 'gray'
for p in pl:
p[0].set_color(color)
ax.legend()
if 'report' in doplot:
print(out.fit_report())
if output == 'pars':
pars = np.zeros((3*nmodes+1,2))
for i,vn in enumerate(out.var_names):
pars[i,0] = out.best_values[vn]
param = list(out.params.values())
pars[i,1] = 1.*param[i].stderr
return pars
elif output == 'fit':
return out | 32.655556 | 121 | 0.480436 |
d2f42ce2e1ed044de1af1b69b22c445011d00d4c | 3,260 | py | Python | scripts/artifacts/healthCadence.py | rathbuna/iLEAPP | 391ddfab2257875fdf8181c84eb29a4992b60af7 | [
"MIT"
] | 2 | 2021-02-09T21:46:14.000Z | 2021-06-14T12:26:55.000Z | scripts/artifacts/healthCadence.py | rathbuna/iLEAPP | 391ddfab2257875fdf8181c84eb29a4992b60af7 | [
"MIT"
] | null | null | null | scripts/artifacts/healthCadence.py | rathbuna/iLEAPP | 391ddfab2257875fdf8181c84eb29a4992b60af7 | [
"MIT"
] | 1 | 2020-12-11T10:07:28.000Z | 2020-12-11T10:07:28.000Z | import glob
import os
import pathlib
import plistlib
import sqlite3
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, tsv, timeline, is_platform_windows
from scripts.ccl import ccl_bplist
def get_healthCadence(files_found, report_folder, seeker):
file_found = str(files_found[0])
db = sqlite3.connect(file_found)
cursor = db.cursor()
cursor.execute('''
SELECT
DATETIME(SAMPLES.START_DATE + 978307200, 'UNIXEPOCH') AS "START DATE",
DATETIME(SAMPLES.END_DATE + 978307200, 'UNIXEPOCH') AS "END DATE",
METADATA_VALUES.NUMERICAL_VALUE AS "SPM (strides/min)",
CASE WORKOUTS.ACTIVITY_TYPE
WHEN 63 THEN "HIGH INTENSITY INTERVAL TRAINING (HIIT)"
WHEN 37 THEN "INDOOR / OUTDOOR RUN"
WHEN 3000 THEN "OTHER"
WHEN 52 THEN "INDOOR / OUTDOOR WALK"
WHEN 20 THEN "FUNCTIONAL TRAINING"
WHEN 13 THEN "INDOOR CYCLE"
WHEN 16 THEN "ELLIPTICAL"
WHEN 35 THEN "ROWER"
ELSE "UNKNOWN" || "-" || WORKOUTS.ACTIVITY_TYPE
END "WORKOUT TYPE",
WORKOUTS.DURATION / 60.00 AS "DURATION (IN MINUTES)",
WORKOUTS.TOTAL_ENERGY_BURNED AS "CALORIES BURNED",
WORKOUTS.TOTAL_DISTANCE AS "DISTANCE IN KILOMETERS",
WORKOUTS.TOTAL_DISTANCE*0.621371 AS "DISTANCE IN MILES",
WORKOUTS.TOTAL_BASAL_ENERGY_BURNED AS "TOTAL BASEL ENERGY BURNED",
CASE WORKOUTS.GOAL_TYPE
WHEN 2 THEN "MINUTES"
WHEN 0 THEN "OPEN"
END "GOAL TYPE",
WORKOUTS.GOAL AS "GOAL",
WORKOUTS.TOTAL_FLIGHTS_CLIMBED AS "FLIGHTS CLIMBED",
WORKOUTS.TOTAL_W_STEPS AS "STEPS"
FROM
SAMPLES
LEFT OUTER JOIN
METADATA_VALUES
ON METADATA_VALUES.OBJECT_ID = SAMPLES.DATA_ID
LEFT OUTER JOIN
METADATA_KEYS
ON METADATA_KEYS.ROWID = METADATA_VALUES.KEY_ID
LEFT OUTER JOIN
WORKOUTS
ON WORKOUTS.DATA_ID = SAMPLES.DATA_ID
WHERE
WORKOUTS.ACTIVITY_TYPE NOT NULL AND KEY IS "_HKPrivateWorkoutAverageCadence"
''')
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
data_list = []
for row in all_rows:
data_list.append((row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[8],row[9],row[10],row[11],row[12]))
report = ArtifactHtmlReport('Health Workout Cadence')
report.start_artifact_report(report_folder, 'Workout Cadence')
report.add_script()
data_headers = ('Start Date','End Date','Strides per Min.','Workout Type','Duration in Mins.','Calories Burned','Distance in KM','Distance in Miles','Total Base Energy','Goal Type','Goal','Flights Climbed','Steps' )
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = 'Health Cadence'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = 'Health Cadence'
timeline(report_folder, tlactivity, data_list)
else:
logfunc('No data available in table')
db.close()
return
| 38.809524 | 226 | 0.639571 |
67d3b4ba26fb78582ff2e2d2ea5c5cc4988e4e53 | 4,506 | py | Python | networkx/algorithms/assortativity/connectivity.py | jmmcd/networkx | 207ff7d1e9bfaff013ac77c8d6bb79619892c994 | [
"BSD-3-Clause"
] | 1 | 2020-08-08T21:52:34.000Z | 2020-08-08T21:52:34.000Z | networkx/algorithms/assortativity/connectivity.py | jmmcd/networkx | 207ff7d1e9bfaff013ac77c8d6bb79619892c994 | [
"BSD-3-Clause"
] | 2 | 2019-11-13T03:48:53.000Z | 2021-02-15T16:52:09.000Z | networkx/algorithms/assortativity/connectivity.py | jmmcd/networkx | 207ff7d1e9bfaff013ac77c8d6bb79619892c994 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2011 by
# Jordi Torrents <jtorrents@milnou.net>
# Aric Hagberg <hagberg@lanl.gov>
# All rights reserved.
# BSD license.
#
#
# Authors: Jordi Torrents <jtorrents@milnou.net>
# Aric Hagberg <hagberg@lanl.gov>
from collections import defaultdict
__all__ = ['average_degree_connectivity',
'k_nearest_neighbors']
def average_degree_connectivity(G, source="in+out", target="in+out",
nodes=None, weight=None):
r"""Compute the average degree connectivity of graph.
The average degree connectivity is the average nearest neighbor degree of
nodes with degree k. For weighted graphs, an analogous measure can
be computed using the weighted average neighbors degree defined in
[1]_, for a node `i`, as
.. math::
k_{nn,i}^{w} = \frac{1}{s_i} \sum_{j \in N(i)} w_{ij} k_j
where `s_i` is the weighted degree of node `i`,
`w_{ij}` is the weight of the edge that links `i` and `j`,
and `N(i)` are the neighbors of node `i`.
Parameters
----------
G : NetworkX graph
source : "in"|"out"|"in+out" (default:"in+out")
Directed graphs only. Use "in"- or "out"-degree for source node.
target : "in"|"out"|"in+out" (default:"in+out"
Directed graphs only. Use "in"- or "out"-degree for target node.
nodes : list or iterable (optional)
Compute neighbor connectivity for these nodes. The default is all
nodes.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used as a weight.
If None, then each edge has weight 1.
Returns
-------
d : dict
A dictionary keyed by degree k with the value of average connectivity.
Raises
------
ValueError
If either `source` or `target` are not one of 'in',
'out', or 'in+out'.
Examples
--------
>>> G=nx.path_graph(4)
>>> G.edges[1, 2]['weight'] = 3
>>> nx.k_nearest_neighbors(G)
{1: 2.0, 2: 1.5}
>>> nx.k_nearest_neighbors(G, weight='weight')
{1: 2.0, 2: 1.75}
See also
--------
neighbors_average_degree
Notes
-----
This algorithm is sometimes called "k nearest neighbors" and is also
available as `k_nearest_neighbors`.
References
----------
.. [1] A. Barrat, M. Barthélemy, R. Pastor-Satorras, and A. Vespignani,
"The architecture of complex weighted networks".
PNAS 101 (11): 3747–3752 (2004).
"""
# First, determine the type of neighbors and the type of degree to use.
if G.is_directed():
if source not in ('in', 'out', 'in+out'):
raise ValueError('source must be one of "in", "out", or "in+out"')
if target not in ('in', 'out', 'in+out'):
raise ValueError('target must be one of "in", "out", or "in+out"')
direction = {'out': G.out_degree,
'in': G.in_degree,
'in+out': G.degree}
neighbor_funcs = {'out': G.successors,
'in': G.predecessors,
'in+out': G.neighbors}
source_degree = direction[source]
target_degree = direction[target]
neighbors = neighbor_funcs[source]
# `reverse` indicates whether to look at the in-edge when
# computing the weight of an edge.
reverse = (source == 'in')
else:
source_degree = G.degree
target_degree = G.degree
neighbors = G.neighbors
reverse = False
dsum = defaultdict(int)
dnorm = defaultdict(int)
# Check if `source_nodes` is actually a single node in the graph.
source_nodes = source_degree(nodes)
if nodes in G:
source_nodes = [(nodes, source_degree(nodes))]
for n, k in source_nodes:
nbrdeg = target_degree(neighbors(n))
if weight is None:
s = sum(d for n, d in nbrdeg)
else: # weight nbr degree by weight of (n,nbr) edge
if reverse:
s = sum(G[nbr][n].get(weight, 1) * d for nbr, d in nbrdeg)
else:
s = sum(G[n][nbr].get(weight, 1) * d for nbr, d in nbrdeg)
dnorm[k] += source_degree(n, weight=weight)
dsum[k] += s
# normalize
dc = {}
for k, avg in dsum.items():
dc[k] = avg
norm = dnorm[k]
if avg > 0 and norm > 0:
dc[k] /= norm
return dc
k_nearest_neighbors = average_degree_connectivity
| 31.957447 | 78 | 0.582335 |
5494b49a9993bf66a03a0f97c0b2dc8d9a062c60 | 57,098 | py | Python | exl_env/lib/python3.6/site-packages/xgboost/core.py | verma-varsha/fraud-detection | 13c5b0c274dfa2b68e82a4ee317e09223b5b663f | [
"MIT"
] | 1 | 2019-12-26T03:43:28.000Z | 2019-12-26T03:43:28.000Z | exl_env/lib/python3.6/site-packages/xgboost/core.py | verma-varsha/fraud-detection | 13c5b0c274dfa2b68e82a4ee317e09223b5b663f | [
"MIT"
] | 4 | 2021-03-18T22:30:03.000Z | 2022-02-12T06:12:28.000Z | exl_env/lib/python3.6/site-packages/xgboost/core.py | verma-varsha/fraud-detection | 13c5b0c274dfa2b68e82a4ee317e09223b5b663f | [
"MIT"
] | 1 | 2022-01-27T15:35:59.000Z | 2022-01-27T15:35:59.000Z | # coding: utf-8
# pylint: disable=too-many-arguments, too-many-branches, invalid-name
# pylint: disable=too-many-branches, too-many-lines, W0141
"""Core XGBoost Library."""
from __future__ import absolute_import
import collections
import ctypes
import os
import re
import sys
import numpy as np
import scipy.sparse
from .compat import STRING_TYPES, PY3, DataFrame, MultiIndex, py_str, PANDAS_INSTALLED, DataTable
from .libpath import find_lib_path
# c_bst_ulong corresponds to bst_ulong defined in xgboost/c_api.h
c_bst_ulong = ctypes.c_uint64
class XGBoostError(Exception):
"""Error thrown by xgboost trainer."""
pass
class EarlyStopException(Exception):
"""Exception to signal early stopping.
Parameters
----------
best_iteration : int
The best iteration stopped.
"""
def __init__(self, best_iteration):
super(EarlyStopException, self).__init__()
self.best_iteration = best_iteration
# Callback environment used by callbacks
CallbackEnv = collections.namedtuple(
"XGBoostCallbackEnv",
["model",
"cvfolds",
"iteration",
"begin_iteration",
"end_iteration",
"rank",
"evaluation_result_list"])
def from_pystr_to_cstr(data):
"""Convert a list of Python str to C pointer
Parameters
----------
data : list
list of str
"""
if isinstance(data, list):
pointers = (ctypes.c_char_p * len(data))()
if PY3:
data = [bytes(d, 'utf-8') for d in data]
else:
data = [d.encode('utf-8') if isinstance(d, unicode) else d
for d in data]
pointers[:] = data
return pointers
else:
# copy from above when we actually use it
raise NotImplementedError
def from_cstr_to_pystr(data, length):
"""Revert C pointer to Python str
Parameters
----------
data : ctypes pointer
pointer to data
length : ctypes pointer
pointer to length of data
"""
if PY3:
res = []
for i in range(length.value):
try:
res.append(str(data[i].decode('ascii')))
except UnicodeDecodeError:
res.append(str(data[i].decode('utf-8')))
else:
res = []
for i in range(length.value):
try:
res.append(str(data[i].decode('ascii')))
except UnicodeDecodeError:
res.append(unicode(data[i].decode('utf-8')))
return res
def _log_callback(msg):
"""Redirect logs from native library into Python console"""
print("{0:s}".format(py_str(msg)))
def _get_log_callback_func():
"""Wrap log_callback() method in ctypes callback type"""
# pylint: disable=invalid-name
CALLBACK = ctypes.CFUNCTYPE(None, ctypes.c_char_p)
return CALLBACK(_log_callback)
def _load_lib():
"""Load xgboost Library."""
lib_paths = find_lib_path()
if len(lib_paths) == 0:
return None
pathBackup = os.environ['PATH']
lib_success = False
os_error_list = []
for lib_path in lib_paths:
try:
# needed when the lib is linked with non-system-available dependencies
os.environ['PATH'] = pathBackup + os.pathsep + os.path.dirname(lib_path)
lib = ctypes.cdll.LoadLibrary(lib_path)
lib_success = True
except OSError as e:
os_error_list.append(str(e))
continue
if not lib_success:
libname = os.path.basename(lib_paths[0])
raise XGBoostError(
'XGBoost Library ({}) could not be loaded.\n'.format(libname) +
'Likely causes:\n' +
' * OpenMP runtime is not installed ' +
'(vcomp140.dll or libgomp-1.dll for Windows, ' +
'libgomp.so for UNIX-like OSes)\n' +
' * You are running 32-bit Python on a 64-bit OS\n' +
'Error message(s): {}\n'.format(os_error_list))
lib.XGBGetLastError.restype = ctypes.c_char_p
lib.callback = _get_log_callback_func()
if lib.XGBRegisterLogCallback(lib.callback) != 0:
raise XGBoostError(lib.XGBGetLastError())
return lib
# load the XGBoost library globally
_LIB = _load_lib()
def _check_call(ret):
"""Check the return value of C API call
This function will raise exception when error occurs.
Wrap every API call with this function
Parameters
----------
ret : int
return value from API calls
"""
if ret != 0:
raise XGBoostError(_LIB.XGBGetLastError())
def ctypes2numpy(cptr, length, dtype):
"""Convert a ctypes pointer array to a numpy array.
"""
NUMPY_TO_CTYPES_MAPPING = {
np.float32: ctypes.c_float,
np.uint32: ctypes.c_uint,
}
if dtype not in NUMPY_TO_CTYPES_MAPPING:
raise RuntimeError('Supported types: {}'.format(NUMPY_TO_CTYPES_MAPPING.keys()))
ctype = NUMPY_TO_CTYPES_MAPPING[dtype]
if not isinstance(cptr, ctypes.POINTER(ctype)):
raise RuntimeError('expected {} pointer'.format(ctype))
res = np.zeros(length, dtype=dtype)
if not ctypes.memmove(res.ctypes.data, cptr, length * res.strides[0]):
raise RuntimeError('memmove failed')
return res
def ctypes2buffer(cptr, length):
"""Convert ctypes pointer to buffer type."""
if not isinstance(cptr, ctypes.POINTER(ctypes.c_char)):
raise RuntimeError('expected char pointer')
res = bytearray(length)
rptr = (ctypes.c_char * length).from_buffer(res)
if not ctypes.memmove(rptr, cptr, length):
raise RuntimeError('memmove failed')
return res
def c_str(string):
"""Convert a python string to cstring."""
return ctypes.c_char_p(string.encode('utf-8'))
def c_array(ctype, values):
"""Convert a python string to c array."""
if isinstance(values, np.ndarray) and values.dtype.itemsize == ctypes.sizeof(ctype):
return (ctype * len(values)).from_buffer_copy(values)
return (ctype * len(values))(*values)
PANDAS_DTYPE_MAPPER = {'int8': 'int', 'int16': 'int', 'int32': 'int', 'int64': 'int',
'uint8': 'int', 'uint16': 'int', 'uint32': 'int', 'uint64': 'int',
'float16': 'float', 'float32': 'float', 'float64': 'float',
'bool': 'i'}
def _maybe_pandas_data(data, feature_names, feature_types):
""" Extract internal data from pd.DataFrame for DMatrix data """
if not isinstance(data, DataFrame):
return data, feature_names, feature_types
data_dtypes = data.dtypes
if not all(dtype.name in PANDAS_DTYPE_MAPPER for dtype in data_dtypes):
bad_fields = [data.columns[i] for i, dtype in
enumerate(data_dtypes) if dtype.name not in PANDAS_DTYPE_MAPPER]
msg = """DataFrame.dtypes for data must be int, float or bool.
Did not expect the data types in fields """
raise ValueError(msg + ', '.join(bad_fields))
if feature_names is None:
if isinstance(data.columns, MultiIndex):
feature_names = [
' '.join(map(str, i))
for i in data.columns
]
else:
feature_names = data.columns.format()
if feature_types is None:
feature_types = [PANDAS_DTYPE_MAPPER[dtype.name] for dtype in data_dtypes]
data = data.values.astype('float')
return data, feature_names, feature_types
def _maybe_pandas_label(label):
""" Extract internal data from pd.DataFrame for DMatrix label """
if isinstance(label, DataFrame):
if len(label.columns) > 1:
raise ValueError('DataFrame for label cannot have multiple columns')
label_dtypes = label.dtypes
if not all(dtype.name in PANDAS_DTYPE_MAPPER for dtype in label_dtypes):
raise ValueError('DataFrame.dtypes for label must be int, float or bool')
else:
label = label.values.astype('float')
# pd.Series can be passed to xgb as it is
return label
DT_TYPE_MAPPER = {'bool': 'bool', 'int': 'int', 'real': 'float'}
DT_TYPE_MAPPER2 = {'bool': 'i', 'int': 'int', 'real': 'float'}
def _maybe_dt_data(data, feature_names, feature_types):
"""
Validate feature names and types if data table
"""
if not isinstance(data, DataTable):
return data, feature_names, feature_types
data_types_names = tuple(lt.name for lt in data.ltypes)
if not all(type_name in DT_TYPE_MAPPER for type_name in data_types_names):
bad_fields = [data.names[i] for i, type_name in
enumerate(data_types_names) if type_name not in DT_TYPE_MAPPER]
msg = """DataFrame.types for data must be int, float or bool.
Did not expect the data types in fields """
raise ValueError(msg + ', '.join(bad_fields))
if feature_names is None:
feature_names = data.names
# always return stypes for dt ingestion
if feature_types is not None:
raise ValueError('DataTable has own feature types, cannot pass them in')
else:
feature_types = np.vectorize(DT_TYPE_MAPPER2.get)(data_types_names)
return data, feature_names, feature_types
def _maybe_dt_array(array):
""" Extract numpy array from single column data table """
if not isinstance(array, DataTable) or array is None:
return array
if array.shape[1] > 1:
raise ValueError('DataTable for label or weight cannot have multiple columns')
# below requires new dt version
# extract first column
array = array.tonumpy()[:, 0].astype('float')
return array
class DMatrix(object):
"""Data Matrix used in XGBoost.
DMatrix is a internal data structure that used by XGBoost
which is optimized for both memory efficiency and training speed.
You can construct DMatrix from numpy.arrays
"""
_feature_names = None # for previous version's pickle
_feature_types = None
def __init__(self, data, label=None, missing=None,
weight=None, silent=False,
feature_names=None, feature_types=None,
nthread=None):
"""
Parameters
----------
data : string/numpy array/scipy.sparse/pd.DataFrame/DataTable
Data source of DMatrix.
When data is string type, it represents the path libsvm format txt file,
or binary file that xgboost can read from.
label : list or numpy 1-D array, optional
Label of the training data.
missing : float, optional
Value in the data which needs to be present as a missing value. If
None, defaults to np.nan.
weight : list or numpy 1-D array , optional
Weight for each instance.
silent : boolean, optional
Whether print messages during construction
feature_names : list, optional
Set names for features.
feature_types : list, optional
Set types for features.
nthread : integer, optional
Number of threads to use for loading data from numpy array. If -1,
uses maximum threads available on the system.
"""
# force into void_p, mac need to pass things in as void_p
if data is None:
self.handle = None
if feature_names is not None:
self._feature_names = feature_names
if feature_types is not None:
self._feature_types = feature_types
return
data, feature_names, feature_types = _maybe_pandas_data(data,
feature_names,
feature_types)
data, feature_names, feature_types = _maybe_dt_data(data,
feature_names,
feature_types)
label = _maybe_pandas_label(label)
label = _maybe_dt_array(label)
weight = _maybe_dt_array(weight)
if isinstance(data, STRING_TYPES):
self.handle = ctypes.c_void_p()
_check_call(_LIB.XGDMatrixCreateFromFile(c_str(data),
ctypes.c_int(silent),
ctypes.byref(self.handle)))
elif isinstance(data, scipy.sparse.csr_matrix):
self._init_from_csr(data)
elif isinstance(data, scipy.sparse.csc_matrix):
self._init_from_csc(data)
elif isinstance(data, np.ndarray):
self._init_from_npy2d(data, missing, nthread)
elif isinstance(data, DataTable):
self._init_from_dt(data, nthread)
else:
try:
csr = scipy.sparse.csr_matrix(data)
self._init_from_csr(csr)
except:
raise TypeError('can not initialize DMatrix from'
' {}'.format(type(data).__name__))
if label is not None:
if isinstance(label, np.ndarray):
self.set_label_npy2d(label)
else:
self.set_label(label)
if weight is not None:
if isinstance(weight, np.ndarray):
self.set_weight_npy2d(weight)
else:
self.set_weight(weight)
self.feature_names = feature_names
self.feature_types = feature_types
def _init_from_csr(self, csr):
"""
Initialize data from a CSR matrix.
"""
if len(csr.indices) != len(csr.data):
raise ValueError('length mismatch: {} vs {}'.format(len(csr.indices), len(csr.data)))
self.handle = ctypes.c_void_p()
_check_call(_LIB.XGDMatrixCreateFromCSREx(c_array(ctypes.c_size_t, csr.indptr),
c_array(ctypes.c_uint, csr.indices),
c_array(ctypes.c_float, csr.data),
ctypes.c_size_t(len(csr.indptr)),
ctypes.c_size_t(len(csr.data)),
ctypes.c_size_t(csr.shape[1]),
ctypes.byref(self.handle)))
def _init_from_csc(self, csc):
"""
Initialize data from a CSC matrix.
"""
if len(csc.indices) != len(csc.data):
raise ValueError('length mismatch: {} vs {}'.format(len(csc.indices), len(csc.data)))
self.handle = ctypes.c_void_p()
_check_call(_LIB.XGDMatrixCreateFromCSCEx(c_array(ctypes.c_size_t, csc.indptr),
c_array(ctypes.c_uint, csc.indices),
c_array(ctypes.c_float, csc.data),
ctypes.c_size_t(len(csc.indptr)),
ctypes.c_size_t(len(csc.data)),
ctypes.c_size_t(csc.shape[0]),
ctypes.byref(self.handle)))
def _init_from_npy2d(self, mat, missing, nthread):
"""
Initialize data from a 2-D numpy matrix.
If ``mat`` does not have ``order='C'`` (aka row-major) or is not contiguous,
a temporary copy will be made.
If ``mat`` does not have ``dtype=numpy.float32``, a temporary copy will be made.
So there could be as many as two temporary data copies; be mindful of input layout
and type if memory use is a concern.
"""
if len(mat.shape) != 2:
raise ValueError('Input numpy.ndarray must be 2 dimensional')
# flatten the array by rows and ensure it is float32.
# we try to avoid data copies if possible (reshape returns a view when possible
# and we explicitly tell np.array to try and avoid copying)
data = np.array(mat.reshape(mat.size), copy=False, dtype=np.float32)
self.handle = ctypes.c_void_p()
missing = missing if missing is not None else np.nan
if nthread is None:
_check_call(_LIB.XGDMatrixCreateFromMat(
data.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
c_bst_ulong(mat.shape[0]),
c_bst_ulong(mat.shape[1]),
ctypes.c_float(missing),
ctypes.byref(self.handle)))
else:
_check_call(_LIB.XGDMatrixCreateFromMat_omp(
data.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
c_bst_ulong(mat.shape[0]),
c_bst_ulong(mat.shape[1]),
ctypes.c_float(missing),
ctypes.byref(self.handle),
nthread))
def _init_from_dt(self, data, nthread):
"""
Initialize data from a DataTable
"""
cols = []
ptrs = (ctypes.c_void_p * data.ncols)()
for icol in range(data.ncols):
col = data.internal.column(icol)
cols.append(col)
# int64_t (void*)
ptr = col.data_pointer
ptrs[icol] = ctypes.c_void_p(ptr)
# always return stypes for dt ingestion
feature_type_strings = (ctypes.c_char_p * data.ncols)()
for icol in range(data.ncols):
feature_type_strings[icol] = ctypes.c_char_p(data.stypes[icol].name.encode('utf-8'))
self.handle = ctypes.c_void_p()
_check_call(_LIB.XGDMatrixCreateFromDT(
ptrs, feature_type_strings,
c_bst_ulong(data.shape[0]),
c_bst_ulong(data.shape[1]),
ctypes.byref(self.handle),
nthread))
def __del__(self):
if hasattr(self, "handle") and self.handle is not None:
_check_call(_LIB.XGDMatrixFree(self.handle))
self.handle = None
def get_float_info(self, field):
"""Get float property from the DMatrix.
Parameters
----------
field: str
The field name of the information
Returns
-------
info : array
a numpy array of float information of the data
"""
length = c_bst_ulong()
ret = ctypes.POINTER(ctypes.c_float)()
_check_call(_LIB.XGDMatrixGetFloatInfo(self.handle,
c_str(field),
ctypes.byref(length),
ctypes.byref(ret)))
return ctypes2numpy(ret, length.value, np.float32)
def get_uint_info(self, field):
"""Get unsigned integer property from the DMatrix.
Parameters
----------
field: str
The field name of the information
Returns
-------
info : array
a numpy array of unsigned integer information of the data
"""
length = c_bst_ulong()
ret = ctypes.POINTER(ctypes.c_uint)()
_check_call(_LIB.XGDMatrixGetUIntInfo(self.handle,
c_str(field),
ctypes.byref(length),
ctypes.byref(ret)))
return ctypes2numpy(ret, length.value, np.uint32)
def set_float_info(self, field, data):
"""Set float type property into the DMatrix.
Parameters
----------
field: str
The field name of the information
data: numpy array
The array of data to be set
"""
c_data = c_array(ctypes.c_float, data)
_check_call(_LIB.XGDMatrixSetFloatInfo(self.handle,
c_str(field),
c_data,
c_bst_ulong(len(data))))
def set_float_info_npy2d(self, field, data):
"""Set float type property into the DMatrix
for numpy 2d array input
Parameters
----------
field: str
The field name of the information
data: numpy array
The array of data to be set
"""
data = np.array(data, copy=False, dtype=np.float32)
c_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
_check_call(_LIB.XGDMatrixSetFloatInfo(self.handle,
c_str(field),
c_data,
c_bst_ulong(len(data))))
def set_uint_info(self, field, data):
"""Set uint type property into the DMatrix.
Parameters
----------
field: str
The field name of the information
data: numpy array
The array of data to be set
"""
_check_call(_LIB.XGDMatrixSetUIntInfo(self.handle,
c_str(field),
c_array(ctypes.c_uint, data),
c_bst_ulong(len(data))))
def save_binary(self, fname, silent=True):
"""Save DMatrix to an XGBoost buffer.
Parameters
----------
fname : string
Name of the output buffer file.
silent : bool (optional; default: True)
If set, the output is suppressed.
"""
_check_call(_LIB.XGDMatrixSaveBinary(self.handle,
c_str(fname),
ctypes.c_int(silent)))
def set_label(self, label):
"""Set label of dmatrix
Parameters
----------
label: array like
The label information to be set into DMatrix
"""
self.set_float_info('label', label)
def set_label_npy2d(self, label):
"""Set label of dmatrix
Parameters
----------
label: array like
The label information to be set into DMatrix
from numpy 2D array
"""
self.set_float_info_npy2d('label', label)
def set_weight(self, weight):
""" Set weight of each instance.
Parameters
----------
weight : array like
Weight for each data point
"""
self.set_float_info('weight', weight)
def set_weight_npy2d(self, weight):
""" Set weight of each instance
for numpy 2D array
Parameters
----------
weight : array like
Weight for each data point in numpy 2D array
"""
self.set_float_info_npy2d('weight', weight)
def set_base_margin(self, margin):
""" Set base margin of booster to start from.
This can be used to specify a prediction value of
existing model to be base_margin
However, remember margin is needed, instead of transformed prediction
e.g. for logistic regression: need to put in value before logistic transformation
see also example/demo.py
Parameters
----------
margin: array like
Prediction margin of each datapoint
"""
self.set_float_info('base_margin', margin)
def set_group(self, group):
"""Set group size of DMatrix (used for ranking).
Parameters
----------
group : array like
Group size of each group
"""
_check_call(_LIB.XGDMatrixSetGroup(self.handle,
c_array(ctypes.c_uint, group),
c_bst_ulong(len(group))))
def get_label(self):
"""Get the label of the DMatrix.
Returns
-------
label : array
"""
return self.get_float_info('label')
def get_weight(self):
"""Get the weight of the DMatrix.
Returns
-------
weight : array
"""
return self.get_float_info('weight')
def get_base_margin(self):
"""Get the base margin of the DMatrix.
Returns
-------
base_margin : float
"""
return self.get_float_info('base_margin')
def num_row(self):
"""Get the number of rows in the DMatrix.
Returns
-------
number of rows : int
"""
ret = c_bst_ulong()
_check_call(_LIB.XGDMatrixNumRow(self.handle,
ctypes.byref(ret)))
return ret.value
def num_col(self):
"""Get the number of columns (features) in the DMatrix.
Returns
-------
number of columns : int
"""
ret = c_bst_ulong()
_check_call(_LIB.XGDMatrixNumCol(self.handle,
ctypes.byref(ret)))
return ret.value
def slice(self, rindex):
"""Slice the DMatrix and return a new DMatrix that only contains `rindex`.
Parameters
----------
rindex : list
List of indices to be selected.
Returns
-------
res : DMatrix
A new DMatrix containing only selected indices.
"""
res = DMatrix(None, feature_names=self.feature_names,
feature_types=self.feature_types)
res.handle = ctypes.c_void_p()
_check_call(_LIB.XGDMatrixSliceDMatrix(self.handle,
c_array(ctypes.c_int, rindex),
c_bst_ulong(len(rindex)),
ctypes.byref(res.handle)))
return res
@property
def feature_names(self):
"""Get feature names (column labels).
Returns
-------
feature_names : list or None
"""
if self._feature_names is None:
self._feature_names = ['f{0}'.format(i) for i in range(self.num_col())]
return self._feature_names
@property
def feature_types(self):
"""Get feature types (column types).
Returns
-------
feature_types : list or None
"""
return self._feature_types
@feature_names.setter
def feature_names(self, feature_names):
"""Set feature names (column labels).
Parameters
----------
feature_names : list or None
Labels for features. None will reset existing feature names
"""
if feature_names is not None:
# validate feature name
try:
if not isinstance(feature_names, str):
feature_names = [n for n in iter(feature_names)]
else:
feature_names = [feature_names]
except TypeError:
feature_names = [feature_names]
if len(feature_names) != len(set(feature_names)):
raise ValueError('feature_names must be unique')
if len(feature_names) != self.num_col():
msg = 'feature_names must have the same length as data'
raise ValueError(msg)
# prohibit to use symbols may affect to parse. e.g. []<
if not all(isinstance(f, STRING_TYPES) and
not any(x in f for x in set(('[', ']', '<')))
for f in feature_names):
raise ValueError('feature_names may not contain [, ] or <')
else:
# reset feature_types also
self.feature_types = None
self._feature_names = feature_names
@feature_types.setter
def feature_types(self, feature_types):
"""Set feature types (column types).
This is for displaying the results and unrelated
to the learning process.
Parameters
----------
feature_types : list or None
Labels for features. None will reset existing feature names
"""
if feature_types is not None:
if self._feature_names is None:
msg = 'Unable to set feature types before setting names'
raise ValueError(msg)
if isinstance(feature_types, STRING_TYPES):
# single string will be applied to all columns
feature_types = [feature_types] * self.num_col()
try:
if not isinstance(feature_types, str):
feature_types = [n for n in iter(feature_types)]
else:
feature_types = [feature_types]
except TypeError:
feature_types = [feature_types]
if len(feature_types) != self.num_col():
msg = 'feature_types must have the same length as data'
raise ValueError(msg)
valid = ('int', 'float', 'i', 'q')
if not all(isinstance(f, STRING_TYPES) and f in valid
for f in feature_types):
raise ValueError('All feature_names must be {int, float, i, q}')
self._feature_types = feature_types
class Booster(object):
"""A Booster of XGBoost.
Booster is the model of xgboost, that contains low level routines for
training, prediction and evaluation.
"""
feature_names = None
def __init__(self, params=None, cache=(), model_file=None):
# pylint: disable=invalid-name
"""
Parameters
----------
params : dict
Parameters for boosters.
cache : list
List of cache items.
model_file : string
Path to the model file.
"""
for d in cache:
if not isinstance(d, DMatrix):
raise TypeError('invalid cache item: {}'.format(type(d).__name__))
self._validate_features(d)
dmats = c_array(ctypes.c_void_p, [d.handle for d in cache])
self.handle = ctypes.c_void_p()
_check_call(_LIB.XGBoosterCreate(dmats, c_bst_ulong(len(cache)),
ctypes.byref(self.handle)))
self.set_param({'seed': 0})
self.set_param(params or {})
if (params is not None) and ('booster' in params):
self.booster = params['booster']
else:
self.booster = 'gbtree'
if model_file is not None:
self.load_model(model_file)
def __del__(self):
if self.handle is not None:
_check_call(_LIB.XGBoosterFree(self.handle))
self.handle = None
def __getstate__(self):
# can't pickle ctypes pointers
# put model content in bytearray
this = self.__dict__.copy()
handle = this['handle']
if handle is not None:
raw = self.save_raw()
this["handle"] = raw
return this
def __setstate__(self, state):
# reconstruct handle from raw data
handle = state['handle']
if handle is not None:
buf = handle
dmats = c_array(ctypes.c_void_p, [])
handle = ctypes.c_void_p()
_check_call(_LIB.XGBoosterCreate(dmats, c_bst_ulong(0), ctypes.byref(handle)))
length = c_bst_ulong(len(buf))
ptr = (ctypes.c_char * len(buf)).from_buffer(buf)
_check_call(_LIB.XGBoosterLoadModelFromBuffer(handle, ptr, length))
state['handle'] = handle
self.__dict__.update(state)
self.set_param({'seed': 0})
def __copy__(self):
return self.__deepcopy__(None)
def __deepcopy__(self, _):
return Booster(model_file=self.save_raw())
def copy(self):
"""Copy the booster object.
Returns
-------
booster: `Booster`
a copied booster model
"""
return self.__copy__()
def load_rabit_checkpoint(self):
"""Initialize the model by load from rabit checkpoint.
Returns
-------
version: integer
The version number of the model.
"""
version = ctypes.c_int()
_check_call(_LIB.XGBoosterLoadRabitCheckpoint(
self.handle, ctypes.byref(version)))
return version.value
def save_rabit_checkpoint(self):
"""Save the current booster to rabit checkpoint."""
_check_call(_LIB.XGBoosterSaveRabitCheckpoint(self.handle))
def attr(self, key):
"""Get attribute string from the Booster.
Parameters
----------
key : str
The key to get attribute from.
Returns
-------
value : str
The attribute value of the key, returns None if attribute do not exist.
"""
ret = ctypes.c_char_p()
success = ctypes.c_int()
_check_call(_LIB.XGBoosterGetAttr(
self.handle, c_str(key), ctypes.byref(ret), ctypes.byref(success)))
if success.value != 0:
return py_str(ret.value)
else:
return None
def attributes(self):
"""Get attributes stored in the Booster as a dictionary.
Returns
-------
result : dictionary of attribute_name: attribute_value pairs of strings.
Returns an empty dict if there's no attributes.
"""
length = c_bst_ulong()
sarr = ctypes.POINTER(ctypes.c_char_p)()
_check_call(_LIB.XGBoosterGetAttrNames(self.handle,
ctypes.byref(length),
ctypes.byref(sarr)))
attr_names = from_cstr_to_pystr(sarr, length)
res = dict([(n, self.attr(n)) for n in attr_names])
return res
def set_attr(self, **kwargs):
"""Set the attribute of the Booster.
Parameters
----------
**kwargs
The attributes to set. Setting a value to None deletes an attribute.
"""
for key, value in kwargs.items():
if value is not None:
if not isinstance(value, STRING_TYPES):
raise ValueError("Set Attr only accepts string values")
value = c_str(str(value))
_check_call(_LIB.XGBoosterSetAttr(
self.handle, c_str(key), value))
def set_param(self, params, value=None):
"""Set parameters into the Booster.
Parameters
----------
params: dict/list/str
list of key,value pairs, dict of key to value or simply str key
value: optional
value of the specified parameter, when params is str key
"""
if isinstance(params, collections.Mapping):
params = params.items()
elif isinstance(params, STRING_TYPES) and value is not None:
params = [(params, value)]
for key, val in params:
_check_call(_LIB.XGBoosterSetParam(self.handle, c_str(key), c_str(str(val))))
def update(self, dtrain, iteration, fobj=None):
"""
Update for one iteration, with objective function calculated internally.
Parameters
----------
dtrain : DMatrix
Training data.
iteration : int
Current iteration number.
fobj : function
Customized objective function.
"""
if not isinstance(dtrain, DMatrix):
raise TypeError('invalid training matrix: {}'.format(type(dtrain).__name__))
self._validate_features(dtrain)
if fobj is None:
_check_call(_LIB.XGBoosterUpdateOneIter(self.handle, ctypes.c_int(iteration),
dtrain.handle))
else:
pred = self.predict(dtrain)
grad, hess = fobj(pred, dtrain)
self.boost(dtrain, grad, hess)
def boost(self, dtrain, grad, hess):
"""
Boost the booster for one iteration, with customized gradient statistics.
Parameters
----------
dtrain : DMatrix
The training DMatrix.
grad : list
The first order of gradient.
hess : list
The second order of gradient.
"""
if len(grad) != len(hess):
raise ValueError('grad / hess length mismatch: {} / {}'.format(len(grad), len(hess)))
if not isinstance(dtrain, DMatrix):
raise TypeError('invalid training matrix: {}'.format(type(dtrain).__name__))
self._validate_features(dtrain)
_check_call(_LIB.XGBoosterBoostOneIter(self.handle, dtrain.handle,
c_array(ctypes.c_float, grad),
c_array(ctypes.c_float, hess),
c_bst_ulong(len(grad))))
def eval_set(self, evals, iteration=0, feval=None):
# pylint: disable=invalid-name
"""Evaluate a set of data.
Parameters
----------
evals : list of tuples (DMatrix, string)
List of items to be evaluated.
iteration : int
Current iteration.
feval : function
Custom evaluation function.
Returns
-------
result: str
Evaluation result string.
"""
for d in evals:
if not isinstance(d[0], DMatrix):
raise TypeError('expected DMatrix, got {}'.format(type(d[0]).__name__))
if not isinstance(d[1], STRING_TYPES):
raise TypeError('expected string, got {}'.format(type(d[1]).__name__))
self._validate_features(d[0])
dmats = c_array(ctypes.c_void_p, [d[0].handle for d in evals])
evnames = c_array(ctypes.c_char_p, [c_str(d[1]) for d in evals])
msg = ctypes.c_char_p()
_check_call(_LIB.XGBoosterEvalOneIter(self.handle, ctypes.c_int(iteration),
dmats, evnames,
c_bst_ulong(len(evals)),
ctypes.byref(msg)))
res = msg.value.decode()
if feval is not None:
for dmat, evname in evals:
feval_ret = feval(self.predict(dmat), dmat)
if isinstance(feval_ret, list):
for name, val in feval_ret:
res += '\t%s-%s:%f' % (evname, name, val)
else:
name, val = feval_ret
res += '\t%s-%s:%f' % (evname, name, val)
return res
def eval(self, data, name='eval', iteration=0):
"""Evaluate the model on mat.
Parameters
----------
data : DMatrix
The dmatrix storing the input.
name : str, optional
The name of the dataset.
iteration : int, optional
The current iteration number.
Returns
-------
result: str
Evaluation result string.
"""
self._validate_features(data)
return self.eval_set([(data, name)], iteration)
def predict(self, data, output_margin=False, ntree_limit=0, pred_leaf=False,
pred_contribs=False, approx_contribs=False, pred_interactions=False,
validate_features=True):
"""
Predict with data.
.. note:: This function is not thread safe.
For each booster object, predict can only be called from one thread.
If you want to run prediction using multiple thread, call ``bst.copy()`` to make copies
of model object and then call ``predict()``.
.. note:: Using ``predict()`` with DART booster
If the booster object is DART type, ``predict()`` will perform dropouts, i.e. only
some of the trees will be evaluated. This will produce incorrect results if ``data`` is
not the training data. To obtain correct results on test sets, set ``ntree_limit`` to
a nonzero value, e.g.
.. code-block:: python
preds = bst.predict(dtest, ntree_limit=num_round)
Parameters
----------
data : DMatrix
The dmatrix storing the input.
output_margin : bool
Whether to output the raw untransformed margin value.
ntree_limit : int
Limit number of trees in the prediction; defaults to 0 (use all trees).
pred_leaf : bool
When this option is on, the output will be a matrix of (nsample, ntrees)
with each record indicating the predicted leaf index of each sample in each tree.
Note that the leaf index of a tree is unique per tree, so you may find leaf 1
in both tree 1 and tree 0.
pred_contribs : bool
When this is True the output will be a matrix of size (nsample, nfeats + 1)
with each record indicating the feature contributions (SHAP values) for that
prediction. The sum of all feature contributions is equal to the raw untransformed
margin value of the prediction. Note the final column is the bias term.
approx_contribs : bool
Approximate the contributions of each feature
pred_interactions : bool
When this is True the output will be a matrix of size (nsample, nfeats + 1, nfeats + 1)
indicating the SHAP interaction values for each pair of features. The sum of each
row (or column) of the interaction values equals the corresponding SHAP value (from
pred_contribs), and the sum of the entire matrix equals the raw untransformed margin
value of the prediction. Note the last row and column correspond to the bias term.
validate_features : bool
When this is True, validate that the Booster's and data's feature_names are identical.
Otherwise, it is assumed that the feature_names are the same.
Returns
-------
prediction : numpy array
"""
option_mask = 0x00
if output_margin:
option_mask |= 0x01
if pred_leaf:
option_mask |= 0x02
if pred_contribs:
option_mask |= 0x04
if approx_contribs:
option_mask |= 0x08
if pred_interactions:
option_mask |= 0x10
if validate_features:
self._validate_features(data)
length = c_bst_ulong()
preds = ctypes.POINTER(ctypes.c_float)()
_check_call(_LIB.XGBoosterPredict(self.handle, data.handle,
ctypes.c_int(option_mask),
ctypes.c_uint(ntree_limit),
ctypes.byref(length),
ctypes.byref(preds)))
preds = ctypes2numpy(preds, length.value, np.float32)
if pred_leaf:
preds = preds.astype(np.int32)
nrow = data.num_row()
if preds.size != nrow and preds.size % nrow == 0:
chunk_size = int(preds.size / nrow)
if pred_interactions:
ngroup = int(chunk_size / ((data.num_col() + 1) * (data.num_col() + 1)))
if ngroup == 1:
preds = preds.reshape(nrow, data.num_col() + 1, data.num_col() + 1)
else:
preds = preds.reshape(nrow, ngroup, data.num_col() + 1, data.num_col() + 1)
elif pred_contribs:
ngroup = int(chunk_size / (data.num_col() + 1))
if ngroup == 1:
preds = preds.reshape(nrow, data.num_col() + 1)
else:
preds = preds.reshape(nrow, ngroup, data.num_col() + 1)
else:
preds = preds.reshape(nrow, chunk_size)
return preds
def save_model(self, fname):
"""
Save the model to a file.
The model is saved in an XGBoost internal binary format which is
universal among the various XGBoost interfaces. Auxiliary attributes of
the Python Booster object (such as feature_names) will not be saved.
To preserve all attributes, pickle the Booster object.
Parameters
----------
fname : string
Output file name
"""
if isinstance(fname, STRING_TYPES): # assume file name
_check_call(_LIB.XGBoosterSaveModel(self.handle, c_str(fname)))
else:
raise TypeError("fname must be a string")
def save_raw(self):
"""
Save the model to a in memory buffer representation
Returns
-------
a in memory buffer representation of the model
"""
length = c_bst_ulong()
cptr = ctypes.POINTER(ctypes.c_char)()
_check_call(_LIB.XGBoosterGetModelRaw(self.handle,
ctypes.byref(length),
ctypes.byref(cptr)))
return ctypes2buffer(cptr, length.value)
def load_model(self, fname):
"""
Load the model from a file.
The model is loaded from an XGBoost internal binary format which is
universal among the various XGBoost interfaces. Auxiliary attributes of
the Python Booster object (such as feature_names) will not be loaded.
To preserve all attributes, pickle the Booster object.
Parameters
----------
fname : string or a memory buffer
Input file name or memory buffer(see also save_raw)
"""
if isinstance(fname, STRING_TYPES):
# assume file name, cannot use os.path.exist to check, file can be from URL.
_check_call(_LIB.XGBoosterLoadModel(self.handle, c_str(fname)))
else:
buf = fname
length = c_bst_ulong(len(buf))
ptr = (ctypes.c_char * len(buf)).from_buffer(buf)
_check_call(_LIB.XGBoosterLoadModelFromBuffer(self.handle, ptr, length))
def dump_model(self, fout, fmap='', with_stats=False, dump_format="text"):
"""
Dump model into a text or JSON file.
Parameters
----------
fout : string
Output file name.
fmap : string, optional
Name of the file containing feature map names.
with_stats : bool, optional
Controls whether the split statistics are output.
dump_format : string, optional
Format of model dump file. Can be 'text' or 'json'.
"""
if isinstance(fout, STRING_TYPES):
fout = open(fout, 'w')
need_close = True
else:
need_close = False
ret = self.get_dump(fmap, with_stats, dump_format)
if dump_format == 'json':
fout.write('[\n')
for i in range(len(ret)):
fout.write(ret[i])
if i < len(ret) - 1:
fout.write(",\n")
fout.write('\n]')
else:
for i in range(len(ret)):
fout.write('booster[{}]:\n'.format(i))
fout.write(ret[i])
if need_close:
fout.close()
def get_dump(self, fmap='', with_stats=False, dump_format="text"):
"""
Returns the model dump as a list of strings.
Parameters
----------
fmap : string, optional
Name of the file containing feature map names.
with_stats : bool, optional
Controls whether the split statistics are output.
dump_format : string, optional
Format of model dump. Can be 'text' or 'json'.
"""
length = c_bst_ulong()
sarr = ctypes.POINTER(ctypes.c_char_p)()
if self.feature_names is not None and fmap == '':
flen = len(self.feature_names)
fname = from_pystr_to_cstr(self.feature_names)
if self.feature_types is None:
# use quantitative as default
# {'q': quantitative, 'i': indicator}
ftype = from_pystr_to_cstr(['q'] * flen)
else:
ftype = from_pystr_to_cstr(self.feature_types)
_check_call(_LIB.XGBoosterDumpModelExWithFeatures(
self.handle,
ctypes.c_int(flen),
fname,
ftype,
ctypes.c_int(with_stats),
c_str(dump_format),
ctypes.byref(length),
ctypes.byref(sarr)))
else:
if fmap != '' and not os.path.exists(fmap):
raise ValueError("No such file: {0}".format(fmap))
_check_call(_LIB.XGBoosterDumpModelEx(self.handle,
c_str(fmap),
ctypes.c_int(with_stats),
c_str(dump_format),
ctypes.byref(length),
ctypes.byref(sarr)))
res = from_cstr_to_pystr(sarr, length)
return res
def get_fscore(self, fmap=''):
"""Get feature importance of each feature.
.. note:: Feature importance is defined only for tree boosters
Feature importance is only defined when the decision tree model is chosen as base
learner (`booster=gbtree`). It is not defined for other base learner types, such
as linear learners (`booster=gblinear`).
.. note:: Zero-importance features will not be included
Keep in mind that this function does not include zero-importance feature, i.e.
those features that have not been used in any split conditions.
Parameters
----------
fmap: str (optional)
The name of feature map file
"""
return self.get_score(fmap, importance_type='weight')
def get_score(self, fmap='', importance_type='weight'):
"""Get feature importance of each feature.
Importance type can be defined as:
* 'weight': the number of times a feature is used to split the data across all trees.
* 'gain': the average gain across all splits the feature is used in.
* 'cover': the average coverage across all splits the feature is used in.
* 'total_gain': the total gain across all splits the feature is used in.
* 'total_cover': the total coverage across all splits the feature is used in.
.. note:: Feature importance is defined only for tree boosters
Feature importance is only defined when the decision tree model is chosen as base
learner (`booster=gbtree`). It is not defined for other base learner types, such
as linear learners (`booster=gblinear`).
Parameters
----------
fmap: str (optional)
The name of feature map file.
importance_type: str, default 'weight'
One of the importance types defined above.
"""
if self.booster != 'gbtree':
raise ValueError('Feature importance is not defined for Booster type {}'
.format(self.booster))
allowed_importance_types = ['weight', 'gain', 'cover', 'total_gain', 'total_cover']
if importance_type not in allowed_importance_types:
msg = ("importance_type mismatch, got '{}', expected one of " +
repr(allowed_importance_types))
raise ValueError(msg.format(importance_type))
# if it's weight, then omap stores the number of missing values
if importance_type == 'weight':
# do a simpler tree dump to save time
trees = self.get_dump(fmap, with_stats=False)
fmap = {}
for tree in trees:
for line in tree.split('\n'):
# look for the opening square bracket
arr = line.split('[')
# if no opening bracket (leaf node), ignore this line
if len(arr) == 1:
continue
# extract feature name from string between []
fid = arr[1].split(']')[0].split('<')[0]
if fid not in fmap:
# if the feature hasn't been seen yet
fmap[fid] = 1
else:
fmap[fid] += 1
return fmap
else:
average_over_splits = True
if importance_type == 'total_gain':
importance_type = 'gain'
average_over_splits = False
elif importance_type == 'total_cover':
importance_type = 'cover'
average_over_splits = False
trees = self.get_dump(fmap, with_stats=True)
importance_type += '='
fmap = {}
gmap = {}
for tree in trees:
for line in tree.split('\n'):
# look for the opening square bracket
arr = line.split('[')
# if no opening bracket (leaf node), ignore this line
if len(arr) == 1:
continue
# look for the closing bracket, extract only info within that bracket
fid = arr[1].split(']')
# extract gain or cover from string after closing bracket
g = float(fid[1].split(importance_type)[1].split(',')[0])
# extract feature name from string before closing bracket
fid = fid[0].split('<')[0]
if fid not in fmap:
# if the feature hasn't been seen yet
fmap[fid] = 1
gmap[fid] = g
else:
fmap[fid] += 1
gmap[fid] += g
# calculate average value (gain/cover) for each feature
if average_over_splits:
for fid in gmap:
gmap[fid] = gmap[fid] / fmap[fid]
return gmap
def _validate_features(self, data):
"""
Validate Booster and data's feature_names are identical.
Set feature_names and feature_types from DMatrix
"""
if self.feature_names is None:
self.feature_names = data.feature_names
self.feature_types = data.feature_types
else:
# Booster can't accept data with different feature names
if self.feature_names != data.feature_names:
dat_missing = set(self.feature_names) - set(data.feature_names)
my_missing = set(data.feature_names) - set(self.feature_names)
msg = 'feature_names mismatch: {0} {1}'
if dat_missing:
msg += ('\nexpected ' + ', '.join(str(s) for s in dat_missing) +
' in input data')
if my_missing:
msg += ('\ntraining data did not have the following fields: ' +
', '.join(str(s) for s in my_missing))
raise ValueError(msg.format(self.feature_names,
data.feature_names))
def get_split_value_histogram(self, feature, fmap='', bins=None, as_pandas=True):
"""Get split value histogram of a feature
Parameters
----------
feature: str
The name of the feature.
fmap: str (optional)
The name of feature map file.
bin: int, default None
The maximum number of bins.
Number of bins equals number of unique split values n_unique,
if bins == None or bins > n_unique.
as_pandas: bool, default True
Return pd.DataFrame when pandas is installed.
If False or pandas is not installed, return numpy ndarray.
Returns
-------
a histogram of used splitting values for the specified feature
either as numpy array or pandas DataFrame.
"""
xgdump = self.get_dump(fmap=fmap)
values = []
regexp = re.compile(r"\[{0}<([\d.Ee+-]+)\]".format(feature))
for i in range(len(xgdump)):
m = re.findall(regexp, xgdump[i])
values.extend(map(float, m))
n_unique = len(np.unique(values))
bins = max(min(n_unique, bins) if bins is not None else n_unique, 1)
nph = np.histogram(values, bins=bins)
nph = np.column_stack((nph[1][1:], nph[0]))
nph = nph[nph[:, 1] > 0]
if as_pandas and PANDAS_INSTALLED:
return DataFrame(nph, columns=['SplitValue', 'Count'])
elif as_pandas and not PANDAS_INSTALLED:
sys.stderr.write(
"Returning histogram as ndarray (as_pandas == True, but pandas is not installed).")
return nph
else:
return nph
| 35.978576 | 99 | 0.552927 |
ca80b29ddd7241b5c7f235e840080a9d59237b7a | 1,336 | py | Python | config/settings.py | wilsonpe66/server-backend | 16665d810fe1829f5dacc67f396b7cecf5af042f | [
"BSD-3-Clause"
] | 1 | 2019-09-26T04:00:55.000Z | 2019-09-26T04:00:55.000Z | config/settings.py | wilsonpe66/server-backend | 16665d810fe1829f5dacc67f396b7cecf5af042f | [
"BSD-3-Clause"
] | 2 | 2020-06-05T21:58:55.000Z | 2021-06-10T21:45:08.000Z | config/settings.py | wilsonpe66/server-backend | 16665d810fe1829f5dacc67f396b7cecf5af042f | [
"BSD-3-Clause"
] | 1 | 2019-09-26T03:55:06.000Z | 2019-09-26T03:55:06.000Z | from os import getenv
"""
To customize any of these values, append a line to config/remote_db_env.py such as:
os.environ['S3_BUCKET'] = 'bucket_name'
"""
# This is the secret key for the website. Mostly it is used to sign cookies. You should provide a
# cryptographically secure string to this value.
FLASK_SECRET_KEY = getenv("FLASK_SECRET_KEY")
# the name of the s3 bucket that will be used to store user generated data, and backups of local
# database information.
S3_BUCKET = getenv("S3_BUCKET")
# Domain name for the server
DOMAIN_NAME = getenv("DOMAIN_NAME")
# A list of email addresses that will receive error emails. This value must be a
# comma separated list; whitespace before and after addresses will be stripped.
SYSADMIN_EMAILS = getenv("SYSADMIN_EMAILS")
# Sentry DSNs
SENTRY_ANDROID_DSN = getenv("SENTRY_ANDROID_DSN")
SENTRY_DATA_PROCESSING_DSN = getenv("SENTRY_DATA_PROCESSING_DSN")
SENTRY_ELASTIC_BEANSTALK_DSN = getenv("SENTRY_ELASTIC_BEANSTALK_DSN")
SENTRY_JAVASCRIPT_DSN = getenv("SENTRY_JAVASCRIPT_DSN")
# Production/Staging: set to "TRUE" if staging
IS_STAGING = getenv("IS_STAGING") or "PRODUCTION"
# S3 bucket access
S3_ACCESS_CREDENTIALS_USER = getenv("S3_ACCESS_CREDENTIALS_USER")
S3_ACCESS_CREDENTIALS_KEY = getenv("S3_ACCESS_CREDENTIALS_KEY")
S3_REGION_NAME = getenv("S3_REGION_NAME", "us-east-1") | 37.111111 | 97 | 0.794162 |
ce2911195a0ecf9654bdcba434cebbfd2ae82746 | 750 | py | Python | openslidertfpy/__init__.py | OtaYuji/openslider-tfpy | 19891babb1d2ad85e0a3866939f86e0fdbcce3d8 | [
"Apache-2.0"
] | 4 | 2020-05-10T11:36:10.000Z | 2021-12-12T14:22:12.000Z | openslidertfpy/__init__.py | OtaYuji/openslider-tfpy | 19891babb1d2ad85e0a3866939f86e0fdbcce3d8 | [
"Apache-2.0"
] | 1 | 2021-03-20T07:20:34.000Z | 2021-03-20T07:20:34.000Z | openslidertfpy/__init__.py | yujota/openslider-tfpy | 19891babb1d2ad85e0a3866939f86e0fdbcce3d8 | [
"Apache-2.0"
] | null | null | null | # Copyright
# 2019 Department of Dermatology, School of Medicine, Tohoku University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "0.0.1-alpha"
from openslidertfpy.openslidertfpy import MicroPatchReader, is_mock
| 44.117647 | 76 | 0.752 |
7175d59264195992e9d1f9799379318b7a4fd217 | 7,179 | py | Python | resource/compute_dynamic_params.py | dataiku/dss-plugin-snowflake-stages | 866f4adca07fb24a7ed8feb90fb0758b28c82086 | [
"Apache-2.0"
] | null | null | null | resource/compute_dynamic_params.py | dataiku/dss-plugin-snowflake-stages | 866f4adca07fb24a7ed8feb90fb0758b28c82086 | [
"Apache-2.0"
] | null | null | null | resource/compute_dynamic_params.py | dataiku/dss-plugin-snowflake-stages | 866f4adca07fb24a7ed8feb90fb0758b28c82086 | [
"Apache-2.0"
] | null | null | null | import logging
from dataiku import SQLExecutor2, api_client, default_project_key
def do(payload, config, plugin_config, inputs):
"""
Currently we can't dynamically recalculate the choices of a param B depending on the value of a param A (see https://app.clubhouse.io/dataiku/story/53713)
This is why we have this tradeoff for macros created from a scenario: list all the Snowflake objects, grouped by connection,
rather than only the ones from the connection of the selected dataset.
Macros directly created from a dataset currently don't let the user change the input dataset, and list the Snowflake objects from the connection of this
dataset.
"""
# if the macro is run from the flow, the input_dataset param is set...
dataset_name = config.get('input_dataset')
# ...thus we know whether we should display the macro in "flow" or "scenario" style
if dataset_name:
return macro_from_dataset_params(payload['parameterName'], dataset_name)
else:
return macro_from_scenario_params(payload['parameterName'])
def macro_from_scenario_params(parameter_name):
"""
Params of type `SELECT` don't let us group the choices by connection. This is why we use this hack of displaying the connections as selectable
choices and indenting to the right the valid choices. If the user was to select a connection rather than a valid choice, he would then get an error.
"""
# We start by fetching all snowflake datasets in the current project and group them per connection
datasets_per_connection = dict()
for dataset in get_snowflake_datasets():
connection = dataset['params']['connection']
if connection not in datasets_per_connection:
datasets_per_connection[connection] = []
datasets_per_connection[connection] += [dataset.name]
# Only if there are multiple Snowflake connections, we need to display the different inputs grouped by connections
multiple_connections = len(datasets_per_connection) > 1
# indent only if we have multiple Snowflake connections
def do_indentation(choice):
return {'value': choice['value'], 'label': f" {choice['label']}"}
indent = do_indentation if multiple_connections else (lambda choice: choice)
if parameter_name == 'dataset':
choices = []
for connection in datasets_per_connection:
if multiple_connections:
choices += [connection_choice(connection)]
choices += [indent(dataset_choice(dataset)) for dataset in datasets_per_connection[connection]]
return {'choices': choices}
if parameter_name == 'stage':
choices = []
for connection in datasets_per_connection:
if multiple_connections:
choices += [connection_choice(connection)]
try:
choices += [indent(stage_choice(row)) for row in get_stages(connection=connection)]
except Exception as e:
logging.exception('Error while fetching Snowflake stages on connection `%s`', connection, exc_info=e)
choices += [indent(failed_connection("stages"))]
return {"choices": choices}
if parameter_name == 'file_format':
choices = [indent(default_format_choice)]
for connection in datasets_per_connection:
if multiple_connections:
choices += [connection_choice(connection)]
try:
choices += [indent(file_format_choice(row)) for row in get_file_formats(connection=connection)]
except Exception as e:
logging.exception('Error while fetching Snowflake file formats on connection `%s`', connection, exc_info=e)
choices += [indent(failed_connection("file formats"))]
return {"choices": choices}
def macro_from_dataset_params(parameter_name, dataset_name):
if parameter_name == 'dataset': # this param is only used to display the dataset name to the user in the macro modal
return {"choices": [{"value": "default", "label": dataset_name}]}
if parameter_name == 'stage':
if not is_dataset_valid(dataset_name):
return {"choices": [invalid_dataset_choice]}
try:
choices = [stage_choice(row) for row in get_stages(dataset=dataset_name)]
except Exception as e:
logging.exception('Error while fetching Snowflake stages for dataset `%s`', dataset_name, exc_info=e)
choices = [failed_connection("stages")]
return {"choices": choices}
if parameter_name == 'file_format':
if not is_dataset_valid(dataset_name):
return {"choices": [invalid_dataset_choice]}
try:
choices = [default_format_choice] + \
[file_format_choice(row) for row in get_file_formats(dataset=dataset_name)]
except Exception as e:
logging.exception('Error while fetching Snowflake file formats for dataset `%s`', dataset_name, exc_info=e)
choices = [failed_connection("file formats")]
return {"choices": choices}
def get_snowflake_datasets():
project_key = default_project_key()
project = api_client().get_project(project_key)
return [dataset for dataset in project.list_datasets() if dataset.type == 'Snowflake']
def is_dataset_valid(dataset_name):
project_key = default_project_key()
project = api_client().get_project(project_key)
dss_dataset = project.get_dataset(dataset_name)
return dss_dataset.get_settings().type == 'Snowflake'
def get_stages(**kwargs):
query = "SHOW STAGES"
logging.info("Fetching Snowflake stages with %s: `%s`", kwargs, query)
return SQLExecutor2(**kwargs).query_to_iter(query).iter_tuples()
def get_file_formats(**kwargs):
query = "SHOW FILE FORMATS IN ACCOUNT"
logging.info("Fetching Snowflake file formats with %s: `%s`", kwargs, query)
return SQLExecutor2(**kwargs).query_to_iter(query).iter_tuples()
def connection_choice(connection):
return {
"value": None,
"label": f"From connection {connection}:"
}
def failed_connection(what):
return {
"value": None,
"label": f"⚠️ Failed getting {what}"
}
# Given that we fully qualify all the file formats, our "default" option won't override an actual file format
default_format_choice = {
"value": "default",
"label": "DEFAULT"
}
invalid_dataset_choice = {
"value": None,
"label": "⚠️ Invalid input dataset"
}
def dataset_choice(dataset_name):
return {
"value": dataset_name,
"label": dataset_name
}
def stage_choice(row):
catalog = row[2]
schema = row[3]
name = row[1]
comment = row[8]
return {
"value": f"\"{catalog}\".\"{schema}\".\"{name}\"",
"label": f"{catalog}.{schema}.{name} {'(' + comment + ')' if comment else ''}"
}
def file_format_choice(row):
catalog = row[2]
schema = row[3]
name = row[1]
comment = row[6]
return {
"value": f"\"{catalog}\".\"{schema}\".\"{name}\"",
"label": f"{catalog}.{schema}.{name} {'(' + comment + ')' if comment else ''}"
}
| 38.805405 | 158 | 0.667363 |
e496e6fc11040043a2d18404d04d72f8b93399dd | 172 | py | Python | src/staticunderscorei18n/apps.py | chinsky/django-static-underscore-i18n | fb0ebd5b3848ed20e7e18e137b3722b3b8540c66 | [
"MIT"
] | null | null | null | src/staticunderscorei18n/apps.py | chinsky/django-static-underscore-i18n | fb0ebd5b3848ed20e7e18e137b3722b3b8540c66 | [
"MIT"
] | 1 | 2019-02-27T16:22:40.000Z | 2019-02-27T17:07:46.000Z | src/staticunderscorei18n/apps.py | chinsky/django-static-underscore-i18n | fb0ebd5b3848ed20e7e18e137b3722b3b8540c66 | [
"MIT"
] | 1 | 2019-02-28T10:08:55.000Z | 2019-02-28T10:08:55.000Z | from django.apps import AppConfig
class StaticUnderscoreI18NConfig(AppConfig):
name = 'staticunderscorei18n'
def ready(self):
from . import conf # noqa
| 19.111111 | 44 | 0.715116 |
bc5b3647ceec4b0897fd0c0fb15e6d2f2358b0bb | 1,178 | py | Python | migrations/versions/9f2495bd66de_.py | Rdbaker/Mealbound | 37cec6b45a632ac26a5341a0c9556279b6229ea8 | [
"BSD-3-Clause"
] | 1 | 2018-11-03T17:48:50.000Z | 2018-11-03T17:48:50.000Z | migrations/versions/9f2495bd66de_.py | Rdbaker/Mealbound | 37cec6b45a632ac26a5341a0c9556279b6229ea8 | [
"BSD-3-Clause"
] | 3 | 2021-03-09T09:47:04.000Z | 2022-02-12T13:04:41.000Z | migrations/versions/9f2495bd66de_.py | Rdbaker/Mealbound | 37cec6b45a632ac26a5341a0c9556279b6229ea8 | [
"BSD-3-Clause"
] | null | null | null | """Adds the review table
Revision ID: 9f2495bd66de
Revises: 3e4b230c5582
Create Date: 2017-07-09 11:02:10.683542
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '9f2495bd66de'
down_revision = '3e4b230c5582'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('review',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True), nullable=False),
sa.Column('rating', sa.Float(), nullable=False),
sa.Column('description', sa.Text(), nullable=False),
sa.Column('meal_id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['meal_id'], ['meal.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('review')
# ### end Alembic commands ###
| 30.205128 | 72 | 0.689304 |
7e40219045d7e993b75cca7674d6ec5da064241a | 38 | py | Python | dataloader/__init__.py | pidan1231239/SR-Stereo2 | 8e7ef8f33a10c9d857cc5383c02d126ee6ab8a29 | [
"MIT"
] | 1 | 2020-03-11T12:19:13.000Z | 2020-03-11T12:19:13.000Z | dataloader/__init__.py | pidan1231239/SR-Stereo2 | 8e7ef8f33a10c9d857cc5383c02d126ee6ab8a29 | [
"MIT"
] | null | null | null | dataloader/__init__.py | pidan1231239/SR-Stereo2 | 8e7ef8f33a10c9d857cc5383c02d126ee6ab8a29 | [
"MIT"
] | null | null | null | from .DataLoader import getDataLoader
| 19 | 37 | 0.868421 |
a3d5894c5feadeef271b6d94859a78639c244841 | 12,828 | py | Python | modules/nxos_snmp_user.py | nestor-xentaurs/nxos-programmability | bf43e833e66a1ab80039ca242226bca7de15c3c5 | [
"Apache-2.0"
] | null | null | null | modules/nxos_snmp_user.py | nestor-xentaurs/nxos-programmability | bf43e833e66a1ab80039ca242226bca7de15c3c5 | [
"Apache-2.0"
] | null | null | null | modules/nxos_snmp_user.py | nestor-xentaurs/nxos-programmability | bf43e833e66a1ab80039ca242226bca7de15c3c5 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_snmp_user
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages SNMP users for monitoring.
description:
- Manages SNMP user configuration.
author:
- Jason Edelman (@jedelman8)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- Authentication parameters not idempotent.
options:
user:
description:
- Name of the user.
required: true
group:
description:
- Group to which the user will belong to.
If state = present, and the user is existing,
the group is added to the user. If the user
is not existing, user entry is created with this
group argument.
If state = absent, only the group is removed from the
user entry. However, to maintain backward compatibility,
if the existing user belongs to only one group, and if
group argument is same as the existing user's group,
then the user entry also is deleted.
authentication:
description:
- Authentication parameters for the user.
choices: ['md5', 'sha']
pwd:
description:
- Authentication password when using md5 or sha.
This is not idempotent
privacy:
description:
- Privacy password for the user.
This is not idempotent
encrypt:
description:
- Enables AES-128 bit encryption when using privacy password.
type: bool
state:
description:
- Manage the state of the resource.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- nxos_snmp_user:
user: ntc
group: network-operator
authentication: md5
pwd: test_password
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["snmp-server user ntc network-operator auth md5 test_password"]
'''
import re
from ansible.module_utils.network.nxos.nxos import load_config, run_commands
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
def execute_show_command(command, module, text=False):
command = {
'command': command,
'output': 'json',
}
if text:
command['output'] = 'text'
return run_commands(module, command)
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_snmp_groups(module):
data = execute_show_command('show snmp group', module)[0]
group_list = []
try:
group_table = data['TABLE_role']['ROW_role']
for group in group_table:
group_list.append(group['role_name'])
except (KeyError, AttributeError):
return group_list
return group_list
def get_snmp_user(user, module):
command = 'show snmp user {0}'.format(user)
body = execute_show_command(command, module, text=True)
body_text = body[0]
if 'No such entry' not in body[0]:
body = execute_show_command(command, module)
resource = {}
try:
# The TABLE and ROW keys differ between NXOS platforms.
if body[0].get('TABLE_snmp_user'):
tablekey = 'TABLE_snmp_user'
rowkey = 'ROW_snmp_user'
tablegrpkey = 'TABLE_snmp_group_names'
rowgrpkey = 'ROW_snmp_group_names'
authkey = 'auth_protocol'
privkey = 'priv_protocol'
grpkey = 'group_names'
elif body[0].get('TABLE_snmp_users'):
tablekey = 'TABLE_snmp_users'
rowkey = 'ROW_snmp_users'
tablegrpkey = 'TABLE_groups'
rowgrpkey = 'ROW_groups'
authkey = 'auth'
privkey = 'priv'
grpkey = 'group'
rt = body[0][tablekey][rowkey]
# on some older platforms, all groups except the 1st one
# are in list elements by themselves and they are
# indexed by 'user'. This is due to a platform bug.
# Get first element if rt is a list due to the bug
# or if there is no bug, parse rt directly
if isinstance(rt, list):
resource_table = rt[0]
else:
resource_table = rt
resource['user'] = user
resource['authentication'] = str(resource_table[authkey]).strip()
encrypt = str(resource_table[privkey]).strip()
if encrypt.startswith('aes'):
resource['encrypt'] = 'aes-128'
else:
resource['encrypt'] = 'none'
groups = []
if tablegrpkey in resource_table:
group_table = resource_table[tablegrpkey][rowgrpkey]
try:
for group in group_table:
groups.append(str(group[grpkey]).strip())
except TypeError:
groups.append(str(group_table[grpkey]).strip())
# Now for the platform bug case, get the groups
if isinstance(rt, list):
# remove 1st element from the list as this is parsed already
rt.pop(0)
# iterate through other elements indexed by
# 'user' and add it to groups.
for each in rt:
groups.append(each['user'].strip())
# Some 'F' platforms use 'group' key instead
elif 'group' in resource_table:
# single group is a string, multiple groups in a list
groups = resource_table['group']
if isinstance(groups, str):
groups = [groups]
resource['group'] = groups
except (KeyError, AttributeError, IndexError, TypeError):
if not resource and body_text and 'No such entry' not in body_text:
# 6K and other platforms may not return structured output;
# attempt to get state from text output
resource = get_non_structured_snmp_user(body_text)
return resource
def get_non_structured_snmp_user(body_text):
# This method is a workaround for platforms that don't support structured
# output for 'show snmp user <foo>'. This workaround may not work on all
# platforms. Sample non-struct output:
#
# User Auth Priv(enforce) Groups acl_filter
# ____ ____ _____________ ______ __________
# sample1 no no network-admin ipv4:my_acl
# network-operator
# priv-11
# -OR-
# sample2 md5 des(no) priv-15
# -OR-
# sample3 md5 aes-128(no) network-admin
resource = {}
output = body_text.rsplit('__________')[-1]
pat = re.compile(r'^(?P<user>\S+)\s+'
r'(?P<auth>\S+)\s+'
r'(?P<priv>[\w\d-]+)(?P<enforce>\([\w\d-]+\))*\s+'
r'(?P<group>\S+)',
re.M)
m = re.search(pat, output)
if not m:
return resource
resource['user'] = m.group('user')
resource['auth'] = m.group('auth')
resource['encrypt'] = 'aes-128' if 'aes' in str(m.group('priv')) else 'none'
resource['group'] = [m.group('group')]
more_groups = re.findall(r'^\s+([\w\d-]+)\s*$', output, re.M)
if more_groups:
resource['group'] += more_groups
return resource
def remove_snmp_user(user, group=None):
if group:
return ['no snmp-server user {0} {1}'.format(user, group)]
else:
return ['no snmp-server user {0}'.format(user)]
def config_snmp_user(proposed, user, reset):
if reset:
commands = remove_snmp_user(user)
else:
commands = []
if proposed.get('group'):
cmd = 'snmp-server user {0} {group}'.format(user, **proposed)
else:
cmd = 'snmp-server user {0}'.format(user)
auth = proposed.get('authentication', None)
pwd = proposed.get('pwd', None)
if auth and pwd:
cmd += ' auth {authentication} {pwd}'.format(**proposed)
encrypt = proposed.get('encrypt', None)
privacy = proposed.get('privacy', None)
if encrypt and privacy:
cmd += ' priv {encrypt} {privacy}'.format(**proposed)
elif privacy:
cmd += ' priv {privacy}'.format(**proposed)
if cmd:
commands.append(cmd)
return commands
def main():
argument_spec = dict(
user=dict(required=True, type='str'),
group=dict(type='str'),
pwd=dict(type='str', no_log=True),
privacy=dict(type='str'),
authentication=dict(choices=['md5', 'sha']),
encrypt=dict(type='bool'),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
required_together=[['authentication', 'pwd'],
['encrypt', 'privacy']],
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
results = {'changed': False, 'commands': [], 'warnings': warnings}
user = module.params['user']
group = module.params['group']
pwd = module.params['pwd']
privacy = module.params['privacy']
encrypt = module.params['encrypt']
authentication = module.params['authentication']
state = module.params['state']
if privacy and encrypt:
if not pwd and authentication:
module.fail_json(msg='pwd and authentication must be provided '
'when using privacy and encrypt')
if group and group not in get_snmp_groups(module):
module.fail_json(msg='group not configured yet on switch.')
existing = get_snmp_user(user, module)
if state == 'present' and existing:
if group:
if group not in existing['group']:
existing['group'] = None
else:
existing['group'] = group
else:
existing['group'] = None
commands = []
if state == 'absent' and existing:
if group:
if group in existing['group']:
if len(existing['group']) == 1:
commands.append(remove_snmp_user(user))
else:
commands.append(remove_snmp_user(user, group))
else:
commands.append(remove_snmp_user(user))
elif state == 'present':
reset = False
args = dict(user=user, pwd=pwd, group=group, privacy=privacy,
encrypt=encrypt, authentication=authentication)
proposed = dict((k, v) for k, v in args.items() if v is not None)
if not existing:
if encrypt:
proposed['encrypt'] = 'aes-128'
commands.append(config_snmp_user(proposed, user, reset))
elif existing:
if encrypt and not existing['encrypt'].startswith('aes'):
reset = True
proposed['encrypt'] = 'aes-128'
delta = dict(set(proposed.items()).difference(existing.items()))
if delta.get('pwd'):
delta['authentication'] = authentication
if delta and encrypt:
delta['encrypt'] = 'aes-128'
if delta:
command = config_snmp_user(delta, user, reset)
commands.append(command)
cmds = flatten_list(commands)
if cmds:
results['changed'] = True
if not module.check_mode:
load_config(module, cmds)
if 'configure' in cmds:
cmds.pop(0)
results['commands'] = cmds
module.exit_json(**results)
if __name__ == '__main__':
main()
| 32.312343 | 81 | 0.584191 |
ac1589fdf2e0540302ba512fa40c6db000e71fbc | 2,157 | py | Python | src/gripit/core/point_cloud_model.py | yor1001/GripIt | a06b300df56473f692cbb9154d60525d35137ee3 | [
"MIT"
] | null | null | null | src/gripit/core/point_cloud_model.py | yor1001/GripIt | a06b300df56473f692cbb9154d60525d35137ee3 | [
"MIT"
] | null | null | null | src/gripit/core/point_cloud_model.py | yor1001/GripIt | a06b300df56473f692cbb9154d60525d35137ee3 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import int
from builtins import str
from future import standard_library
standard_library.install_aliases()
from builtins import object
import pyqtgraph as pg
import pyqtgraph.opengl as gl
import numpy as np
from enum import Enum
class PointCloudFilter(Enum):
ALL = 0
NEGATE = 1
ONLY = 2
class PointCloudModel(object):
""" Point cloud representation """
def __init__(self, **kwds):
""" All Keyword arguments are sent to setData """
self.pointCloudData = None
self.imageModel = None
self.renderProperties = {
'pointCloud': {
'color': (0.1, 0.1, 1, 0.4),
'size': 2.0
}
}
self._setValues(**kwds)
def _setValues(self, **kwds):
args = ('pointCloudData', 'imageModel', 'renderProperties', 'context')
for k in kwds.keys():
if k not in args:
raise Exception('Invalid keyword argument: %s (allowed arguments are %s)' % (k, str(args)))
for arg in args:
if arg in kwds:
setattr(self, arg, kwds[arg])
self.update()
def update(self):
pass
def getPointCloudData(self):
return self.pointCloudData
def getFilteredCloudData(self, indexList, filterType=None):
if filterType is None:
return self.getPointCloudData()
def getPoint(self, index):
return self.pointCloudData[index]
def getPointfromXYCoordinate(self, x, y):
index = self.getIndexfromXYCoordinate(x, y)
return self.getPoint(index)
def getIndexfromXYCoordinate(self, x, y):
shape = self.imageModel.getCroppedDepthImage().shape
indexx = int(y * shape[1] + x % shape[1])
return indexx
def saveToFile(self, outName):
handle = self.context.getFileWriter(outName)
for point in self.pointCloudData:
handle.write("[{}, {}, {}]\n".format(point[0], point[1], point[2]))
handle.close()
| 28.381579 | 107 | 0.628187 |
773a5aa65266063a8d5faf39b250a0db570570c9 | 148 | py | Python | saas/portal/models.py | ParthS28/MLfly | f87494cd55c33af096d7bcd6f778c6fe06a41174 | [
"MIT"
] | 1 | 2021-05-23T12:07:32.000Z | 2021-05-23T12:07:32.000Z | saas/portal/models.py | ParthS28/MLfly | f87494cd55c33af096d7bcd6f778c6fe06a41174 | [
"MIT"
] | null | null | null | saas/portal/models.py | ParthS28/MLfly | f87494cd55c33af096d7bcd6f778c6fe06a41174 | [
"MIT"
] | null | null | null | from django.db import models
from django.utils import timezone
from django.db.models import Count, CharField, Model
from django.urls import reverse
| 29.6 | 52 | 0.831081 |
bec9946664df93f12feef5c6b2800a8b66fdd4df | 990 | py | Python | frequencia/calendario/migrations/0001_initial.py | andersonqueiroz/frequencia | 7dae9bb6115759edb8e8297565d0dd1b638ac94a | [
"MIT"
] | 1 | 2021-11-22T17:17:03.000Z | 2021-11-22T17:17:03.000Z | frequencia/calendario/migrations/0001_initial.py | andersonqueiroz/frequencia | 7dae9bb6115759edb8e8297565d0dd1b638ac94a | [
"MIT"
] | 11 | 2019-06-18T11:19:23.000Z | 2021-08-23T12:04:54.000Z | frequencia/calendario/migrations/0001_initial.py | andersonqueiroz/frequencia | 7dae9bb6115759edb8e8297565d0dd1b638ac94a | [
"MIT"
] | 2 | 2019-04-09T16:23:22.000Z | 2022-01-27T19:13:19.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-10-24 15:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='FeriadoCalendarioAcademico',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Registrado em')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Atualizado em')),
('nome', models.CharField(max_length=100, verbose_name='Nome')),
('data', models.DateField(verbose_name='Data')),
],
options={
'verbose_name': 'Feriado',
'verbose_name_plural': 'Feriados',
},
),
]
| 31.935484 | 114 | 0.589899 |
50154cc696961324575ede3eed9b20ac6cd657da | 1,416 | py | Python | scraping/get-reviews/amazon_reviews/amazon_reviews_scrapy/spiders/amazon_reviews.py | hvarS/AmazonPrivacy | d3c975e428d8ac80dbe4b4e7a2e33082eec89524 | [
"MIT"
] | null | null | null | scraping/get-reviews/amazon_reviews/amazon_reviews_scrapy/spiders/amazon_reviews.py | hvarS/AmazonPrivacy | d3c975e428d8ac80dbe4b4e7a2e33082eec89524 | [
"MIT"
] | null | null | null | scraping/get-reviews/amazon_reviews/amazon_reviews_scrapy/spiders/amazon_reviews.py | hvarS/AmazonPrivacy | d3c975e428d8ac80dbe4b4e7a2e33082eec89524 | [
"MIT"
] | null | null | null | from pandas.core.algorithms import mode
import scrapy
import pandas as pd
import time
import os
import json
class QuotesSpider(scrapy.Spider):
name = "leapoffaith"
def start_requests(self):
list_of_urls = []
link_file = "./User_IDs.csv"
df1 = pd.read_csv(link_file)
length = df1.shape[0]
for i in range(length):
list_of_urls.append(df1.iat[i, 0])
urls = list_of_urls
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
profile_links = []
main_dir = "./main"
file_name = main_dir + "/p_" + str(filecount) + ".csv"
for reviews in response.css("div#cm_cr-review_list div.aok-relative"):
profile_link = "http://www.amazon.in" + reviews.css("a.a-profile").attrib['href']
profile_links.append(profile_link)
dict2 = {"Profile links": profile_links}
df2 = pd.DataFrame(dict2)
isExist2 = os.path.exists(file_name)
if isExist2 == False:
df2.to_csv(file_name, mode='w', index=False)
else:
df2.to_csv(file_name, mode='a', index=False, header=False)
time.sleep(10)
next_page = response.css("ul.a-pagination li.a-last a::attr(href)").get()
if next_page is not None:
yield response.follow(next_page, self.parse) | 28.897959 | 93 | 0.601695 |
dde865ec07132929cfa26715f8a96f22a786e608 | 1,162 | py | Python | Curso_de_Python_ Curso_em_Video/PythonExercicios/ex115/lib/arquivo/__init__.py | DanilooSilva/Cursos_de_Python | 8f167a4c6e16f01601e23b6f107578aa1454472d | [
"MIT"
] | null | null | null | Curso_de_Python_ Curso_em_Video/PythonExercicios/ex115/lib/arquivo/__init__.py | DanilooSilva/Cursos_de_Python | 8f167a4c6e16f01601e23b6f107578aa1454472d | [
"MIT"
] | null | null | null | Curso_de_Python_ Curso_em_Video/PythonExercicios/ex115/lib/arquivo/__init__.py | DanilooSilva/Cursos_de_Python | 8f167a4c6e16f01601e23b6f107578aa1454472d | [
"MIT"
] | null | null | null | from ex115.lib.interface import *
def arquivoExiste(nome):
try:
a = open(nome, 'rt')
a.close()
except FileNotFoundError:
return False
else:
return True
def criarArquivo(nome):
try:
a = open(nome, 'wt+')
a.close()
except:
print('Houve um ERRO na criação do arquivo!')
else:
print(f'Arquivo {nome} criado com sucesso')
def lerArquivo(nome):
try:
a = open(nome, 'rt')
except:
print('Erro ao ler o arquivo')
else:
cabecalho('PESSOAS CADASTRADAS')
for linha in a:
dado = linha.split(';')
dado[1] = dado[1].replace('\n','')
print(f'{dado[0]:<30}{dado[1]:>3} anos')
finally:
a.close()
def cadastrar(arq, nome='desconhecido', idade=0):
try:
a = open(arq, 'at')
except:
print('Houve um Erro na abertura do arquivo!')
else:
try:
a.write(f'{nome};{idade}\n')
except:
print('Houve um ERRO na hora de escrever os dados')
else:
print(f'Novo registro de {nome} adicionado.')
a.close() | 22.784314 | 63 | 0.519793 |
65956df00eb457b0ef7d48cc64986cfb5138e59a | 1,157 | py | Python | cloudmesh/docker/shell.py | cloudmesh/cloudmesh-docker | 1bfa8b02fae9108930100b3e9bf152398257a1dd | [
"Apache-2.0"
] | null | null | null | cloudmesh/docker/shell.py | cloudmesh/cloudmesh-docker | 1bfa8b02fae9108930100b3e9bf152398257a1dd | [
"Apache-2.0"
] | 1 | 2020-10-03T19:22:23.000Z | 2020-10-03T19:26:18.000Z | cloudmesh/docker/shell.py | cloudmesh/cloudmesh-docker | 1bfa8b02fae9108930100b3e9bf152398257a1dd | [
"Apache-2.0"
] | null | null | null | from pprint import pprint
from cloudmesh.common.dotdict import dotdict
from cloudmesh.shell.command import map_parameters
from docopt import docopt
def main():
"""cms.
Usage:
cm install
cm init
cm --help
cm [--echo] [--debug] [--nosplash] [-i] [COMMAND ...]
Arguments:
COMMAND A command to be executed
Options:
--file=SCRIPT -f SCRIPT Executes the script
-i After start keep the shell interactive,
otherwise quit [default: False]
--nosplash do not show the banner [default: False]
"""
arguments = dotdict(docopt(str(main.__doc__)))
pprint(arguments)
map_parameters(arguments,
'debug',
'echo',
'help',
'nosplash'
)
options = ""
for option in ['debug', 'echo', 'help', 'nosplash']:
if arguments[option]:
options += f" --{option}"
command = ' '.join(arguments.COMMAND)
execute = f"docker run cms {options} {command}"
print(execute)
if __name__ == '__main__':
main()
| 24.104167 | 64 | 0.54019 |
3498c34efb086d10286df77af015ae769581b8c7 | 2,614 | py | Python | clippy.py | zenador/clips-web-app | 71a0e0a66d947a5f718e83ffa063343b84518de5 | [
"MIT"
] | null | null | null | clippy.py | zenador/clips-web-app | 71a0e0a66d947a5f718e83ffa063343b84518de5 | [
"MIT"
] | null | null | null | clippy.py | zenador/clips-web-app | 71a0e0a66d947a5f718e83ffa063343b84518de5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import eventlet as eventlib
eventlib.monkey_patch()
import clips
namespace = "/"
def AddSpecificFunction(e, func, funcname=None):
e.define_function(func, funcname)
class TempAns:
def clearAns(self):
self.temp_ans = None
def __init__(self):
self.temp_ans = self.clearAns()
def setAns(self, ans):
self.temp_ans = ans
def getAns(self):
return self.temp_ans
def hasAns(self):
return self.temp_ans != None
class Clippy:
def __init__(self, socket, sid, source):
clipsEnv = clips.Environment()
AddSpecificFunction(clipsEnv, self.clips_debug, "debug")
AddSpecificFunction(clipsEnv, self.clips_alert, "alert")
AddSpecificFunction(clipsEnv, self.clips_prompt, "prompt")
AddSpecificFunction(clipsEnv, self.clips_prompt2, "prompt2")
AddSpecificFunction(clipsEnv, self.clips_final, "final")
clipsEnv.load("{}.clp".format(source))
self.ta = TempAns()
self.socket = socket
self.sid = sid
self.clips = clips
self.clipsEnv = clipsEnv
self.final = []
def clips_debug(self, message):
print(message)
self.socket.emit('debug', {'data': message}, namespace=namespace, room=self.sid)
eventlib.sleep(.01)
def clips_alert(self, message):
print(message)
self.socket.emit('alert', {'data': message}, namespace=namespace, room=self.sid)
eventlib.sleep(.01)
def clips_prompt(self, message, *options):
print(message)
print(options)
self.socket.emit('prompt', {'data': message, 'options': [str(i) for i in options]}, namespace=namespace, room=self.sid)
self.ta.clearAns()
while not self.ta.hasAns():
eventlib.sleep(1)
user_input = self.ta.getAns()
try:
int(user_input)
return self.clips.Integer(user_input)
except:
return self.clips.Symbol(user_input)
def clips_prompt2(self, message, display, *options):
print(message)
zipped = zip([str(i) for i in options], display.split("\n"))
print(zipped)
self.socket.emit('prompt2', {'data': message, 'options': zipped}, namespace=namespace, room=self.sid)
self.ta.clearAns()
while not self.ta.hasAns():
eventlib.sleep(1)
user_input = self.ta.getAns()
try:
int(user_input)
return self.clips.Integer(user_input)
except:
return self.clips.Symbol(user_input)
def clips_final(self, message):
print(message)
self.socket.emit('debug', {'data': message}, namespace=namespace, room=self.sid)
self.final.append(message)
def run(self):
eventlib.sleep(.01) # necessary with eventlet or first question won't appear (too soon after connect)
self.clipsEnv.reset()
self.clipsEnv.run()
return self.final
def setAns(self, ans):
self.ta.setAns(ans)
| 27.515789 | 121 | 0.717674 |
2851b6fba21992385d3e520df1b577382e9cfda9 | 1,295 | py | Python | alien.py | ysmintor/alien_invasion | 5b7c70b2d18ea57eff2b57a26b664b3c5e12a54c | [
"Apache-2.0"
] | null | null | null | alien.py | ysmintor/alien_invasion | 5b7c70b2d18ea57eff2b57a26b664b3c5e12a54c | [
"Apache-2.0"
] | null | null | null | alien.py | ysmintor/alien_invasion | 5b7c70b2d18ea57eff2b57a26b664b3c5e12a54c | [
"Apache-2.0"
] | null | null | null | import pygame
from pygame.sprite import Sprite
class Alien(Sprite):
"""A class to represent a single alien in the fleet."""
def __init__(self, ai_settings, screen):
"""Initialize the alien and set its starting position."""
super(Alien, self).__init__()
self.screen = screen
self.ai_settings = ai_settings
# Load the alien image and set its rect attribute.
self.image = pygame.image.load('images/alien.bmp')
self.rect = self.image.get_rect()
# Start each new alien near the top left of the screen.
self.rect.x = self.rect.width
self.rect.y = self.rect.height
# Store the alien's exact position.
self.x = float(self.rect.x)
def check_edges(self):
"""Return True if alien is at edge of scree."""
screen_rect = self.screen.get_rect()
if self.rect.right >= screen_rect.right:
return True
elif self.rect.left <= 0:
return True
def update(self):
"""Move the alien right"""
self.x += (self.ai_settings.alien_speed_factor * self.ai_settings.fleet_direction)
self.rect.x = self.x
def blitme(self):
"""Draw the alien at its current location."""
self.screen.blit(self.image, self.rect)
| 31.585366 | 90 | 0.620849 |
fe925012032839c4bee7c0cab6fd1df0dfb9a6b8 | 1,173 | py | Python | handlers/utils.py | AbhishekRana23/TeleTor | 864d528b3afbb12ac03d371e866475801ee5e0ab | [
"Apache-2.0"
] | 69 | 2018-11-30T11:22:50.000Z | 2021-12-05T20:30:53.000Z | handlers/utils.py | AbhishekRana23/TeleTor | 864d528b3afbb12ac03d371e866475801ee5e0ab | [
"Apache-2.0"
] | 2 | 2019-02-24T09:33:25.000Z | 2020-03-23T12:52:17.000Z | handlers/utils.py | AbhishekRana23/TeleTor | 864d528b3afbb12ac03d371e866475801ee5e0ab | [
"Apache-2.0"
] | 28 | 2019-03-28T16:32:00.000Z | 2022-02-07T13:14:24.000Z | import os
import urllib3
import shutil
from config import auth_cfg
def convert_bytes(num):
step_unit = 1000.0
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < step_unit:
return "%3.1f %s" % (num, x)
num /= step_unit
def auth(user=None):
if not user:
return False
credentials = zip([user['id'], user['username'], user['first_name'], user['last_name']],
[auth_cfg['id'], auth_cfg['username'], auth_cfg['first_name'], auth_cfg['last_name']])
pairs_check = [True if pair[0] == pair[1] else None for pair in credentials]
if all(pairs_check):
return True
return False
def is_path_writable(path):
return os.access(path, os.W_OK)
def download(url=None, path=None):
http = urllib3.PoolManager()
with http.request('GET', url, preload_content=False) as r, open(path, 'wb') as out_file:
shutil.copyfileobj(r, out_file)
def check_magnet_link(link=None):
if link is None:
return
import re
magnet_pattern = re.compile('magnet:\?xt=urn:btih:[a-zA-Z0-9]{32,40}.+')
if re.match(magnet_pattern, link):
return True
return None
| 25.5 | 108 | 0.622336 |
d45a5349050be3fbc7182cf918f71bfe2c1f66a0 | 6,670 | py | Python | examples/seismic/viscoelastic/operators.py | CavalcanteLucas/devito | f52cfd7d55b91f83245f33af4424adbdb03075d8 | [
"MIT"
] | 1 | 2020-01-30T17:49:12.000Z | 2020-01-30T17:49:12.000Z | examples/seismic/viscoelastic/operators.py | CavalcanteLucas/devito | f52cfd7d55b91f83245f33af4424adbdb03075d8 | [
"MIT"
] | 1 | 2019-11-06T18:01:25.000Z | 2019-11-06T18:01:25.000Z | examples/seismic/viscoelastic/operators.py | CavalcanteLucas/devito | f52cfd7d55b91f83245f33af4424adbdb03075d8 | [
"MIT"
] | 2 | 2018-11-15T12:03:48.000Z | 2018-11-15T13:16:19.000Z | import sympy as sp
from devito import Eq, Operator
from examples.seismic.elastic import tensor_function, vector_function, src_rec
def viscoelastic_2d(model, space_order, save, geometry):
"""
2D viscoelastic wave equation FD kernel
"""
vp, qp, vs, qs, rho, damp = \
model.vp, model.qp, model.vs, model.qs, model.rho, model.damp
s = model.grid.stepping_dim.spacing
cp2 = vp*vp
cs2 = vs*vs
ro = 1/rho
mu = cs2*rho
l = rho*(cp2 - 2*cs2)
pi = l + 2*mu
f0 = geometry._f0
t_s = (sp.sqrt(1.+1./qp**2)-1./qp)/f0
t_ep = 1./(f0**2*t_s)
t_es = (1.+f0*qs*t_s)/(f0*qs-f0**2*t_s)
# Create symbols for forward wavefield, source and receivers
vx, vy, vz = vector_function('v', model, save, space_order)
txx, tyy, tzz, _, txz, _ = tensor_function('t', model, save, space_order)
rxx, ryy, rzz, _, rxz, _ = tensor_function('r', model, save, space_order)
# Stencils
u_vx = Eq(vx.forward, damp * vx + damp * s * ro * (txx.dx + txz.dy))
u_vz = Eq(vz.forward, damp * vz + damp * ro * s * (txz.dx + tzz.dy))
u_txx = Eq(txx.forward, damp*txx + damp*s*pi*t_ep/t_s*(vx.forward.dx+vz.forward.dy)
- damp*2.*s*mu*t_es/t_s*(vz.forward.dy) + damp*s*rxx.forward)
u_tzz = Eq(tzz.forward, damp*tzz + damp*s*pi*t_ep/t_s*(vx.forward.dx+vz.forward.dy)
- damp*2.*s*mu*t_es/t_s*(vx.forward.dx) + damp*s*rzz.forward)
u_txz = Eq(txz.forward, damp*txz + damp*s*mu*t_es/t_s*(vx.forward.dy+vz.forward.dx)
+ damp*s*rxz.forward)
u_rxx = Eq(rxx.forward, damp*rxx
- damp*s*1./t_s*(rxx+pi*(t_ep/t_s-1)*(vx.forward.dx+vz.forward.dy)
- 2*mu*(t_es/t_s-1)*vz.forward.dy))
u_rzz = Eq(rzz.forward, damp*rzz
- damp*s*1./t_s*(rzz+pi*(t_ep/t_s-1)*(vx.forward.dx+vz.forward.dy)
- 2*mu*(t_es/t_s-1)*vx.forward.dx))
u_rxz = Eq(rxz.forward, damp*rxz
- damp*s*1/t_s*(rxz+mu*(t_es/t_s-1)*(vx.forward.dy+vz.forward.dx)))
src_rec_expr = src_rec(vx, vy, vz, txx, tyy, tzz, model, geometry)
return [u_vx, u_vz, u_rxx, u_rzz, u_rxz, u_txx, u_tzz, u_txz] + src_rec_expr
def viscoelastic_3d(model, space_order, save, geometry):
"""
3D viscoelastic wave equation FD kernel
"""
vp, qp, vs, qs, rho, damp = \
model.vp, model.qp, model.vs, model.qs, model.rho, model.damp
s = model.grid.stepping_dim.spacing
cp2 = vp*vp
cs2 = vs*vs
ro = 1/rho
mu = cs2*rho
l = rho*(cp2 - 2*cs2)
pi = l + 2*mu
f0 = geometry._f0
t_s = (sp.sqrt(1.+1./qp**2)-1./qp)/f0
t_ep = 1./(f0**2*t_s)
t_es = (1.+f0*qs*t_s)/(f0*qs-f0**2*t_s)
# Create symbols for forward wavefield, source and receivers
vx, vy, vz = vector_function('v', model, save, space_order)
txx, tyy, tzz, txy, txz, tyz = tensor_function('t', model, save, space_order)
rxx, ryy, rzz, rxy, rxz, ryz = tensor_function('r', model, save, space_order)
# Stencils
u_vx = Eq(vx.forward, damp * vx + damp * s * ro * (txx.dx + txy.dy + txz.dz))
u_vy = Eq(vy.forward, damp * vy + damp * s * ro * (txy.dx + tyy.dy + tyz.dz))
u_vz = Eq(vz.forward, damp * vz + damp * s * ro * (txz.dx + tyz.dy + tzz.dz))
u_txx = Eq(txx.forward, damp*txx
+ damp*s*pi*t_ep/t_s*(vx.forward.dx+vy.forward.dy+vz.forward.dz)
- damp*2.*s*mu*t_es/t_s*(vy.forward.dy+vz.forward.dz)
+ damp*s*rxx.forward)
u_tyy = Eq(tyy.forward, damp*tyy
+ damp*s*pi*t_ep/t_s*(vx.forward.dx+vy.forward.dy+vz.forward.dz)
- damp*2.*s*mu*t_es/t_s*(vx.forward.dx+vz.forward.dz)
+ damp*s*ryy.forward)
u_tzz = Eq(tzz.forward, damp*tzz
+ damp*s*pi*t_ep/t_s*(vx.forward.dx+vy.forward.dy+vz.forward.dz)
- damp*2.*s*mu*t_es/t_s*(vx.forward.dx+vy.forward.dy)
+ damp*s*rzz.forward)
u_txy = Eq(txy.forward, damp*txy
+ damp*s*mu*t_es/t_s*(vx.forward.dy+vy.forward.dx) + damp*s*rxy.forward)
u_txz = Eq(txz.forward, damp*txz
+ damp*s*mu*t_es/t_s*(vx.forward.dz+vz.forward.dx) + damp*s*rxz.forward)
u_tyz = Eq(tyz.forward, damp*tyz
+ damp*s*mu*t_es/t_s*(vy.forward.dz+vz.forward.dy) + damp*s*ryz.forward)
u_rxx = Eq(rxx.forward, damp*rxx
- damp*s*1./t_s*(rxx+pi*(t_ep/t_s-1)*(vx.forward.dx+vy.forward.dy
+ vz.forward.dz)
- 2*mu*(t_es/t_s-1)*(vz.forward.dz+vy.forward.dy)))
u_ryy = Eq(ryy.forward, damp*ryy
- damp*s*1./t_s*(ryy+pi*(t_ep/t_s-1)*(vx.forward.dx+vy.forward.dy
+ vz.forward.dz)
- 2*mu*(t_es/t_s-1)*(vx.forward.dx+vy.forward.dy)))
u_rzz = Eq(rzz.forward, damp*rzz
- damp*s*1./t_s*(rzz+pi*(t_ep/t_s-1)*(vx.forward.dx+vy.forward.dy
+ vz.forward.dz)
- 2*mu*(t_es/t_s-1)*(vx.forward.dx+vy.forward.dy)))
u_rxy = Eq(rxy.forward, damp*rxy
- damp*s*1/t_s*(rxy+mu*(t_es/t_s-1)*(vx.forward.dy+vy.forward.dx)))
u_rxz = Eq(rxz.forward, damp*rxz
- damp*s*1/t_s*(rxz+mu*(t_es/t_s-1)*(vx.forward.dz+vz.forward.dx)))
u_ryz = Eq(ryz.forward, ryz
- damp*s*1/t_s*(ryz+mu*(t_es/t_s-1)*(vy.forward.dz+vz.forward.dy)))
src_rec_expr = src_rec(vx, vy, vz, txx, tyy, tzz, model, geometry)
return [u_vx, u_vy, u_vz, u_rxx, u_ryy, u_rzz, u_rxz, u_rxy, u_ryz,
u_txx, u_tyy, u_tzz, u_txz, u_txy, u_tyz] + src_rec_expr
def ForwardOperator(model, geometry, space_order=4, save=False, **kwargs):
"""
Construct method for the forward modelling operator in an elastic media.
Parameters
----------
model : Model
Object containing the physical parameters.
geometry : AcquisitionGeometry
Geometry object that contains the source (SparseTimeFunction) and
receivers (SparseTimeFunction) and their position.
space_order : int, optional
Space discretization order.
save : int or Buffer
Saving flag, True saves all time steps, False saves three buffered
indices (last three time steps). Defaults to False.
"""
wave = kernels[model.grid.dim]
pde = wave(model, space_order, geometry.nt if save else None, geometry)
# Substitute spacing terms to reduce flops
return Operator(pde, subs=model.spacing_map,
name='Forward', **kwargs)
kernels = {3: viscoelastic_3d, 2: viscoelastic_2d}
| 39.467456 | 87 | 0.575562 |
0697fe21007d68cecae2fcceea4af17ff6635eff | 1,457 | py | Python | tinkoff/invest/strategies/base/account_manager.py | Tinkoff/invest-python | 26ef8cc1e70d144707246e64f29466b4491d4f8c | [
"Apache-2.0"
] | 41 | 2022-01-21T05:38:57.000Z | 2022-03-30T03:54:41.000Z | tinkoff/invest/strategies/base/account_manager.py | Tinkoff/invest-python | 26ef8cc1e70d144707246e64f29466b4491d4f8c | [
"Apache-2.0"
] | 20 | 2022-01-24T05:46:02.000Z | 2022-03-31T16:54:04.000Z | tinkoff/invest/strategies/base/account_manager.py | Tinkoff/invest-python | 26ef8cc1e70d144707246e64f29466b4491d4f8c | [
"Apache-2.0"
] | 15 | 2022-01-25T06:53:27.000Z | 2022-03-30T03:49:07.000Z | import logging
from decimal import Decimal
from tinkoff.invest import Quotation
from tinkoff.invest.services import Services
from tinkoff.invest.strategies.base.errors import (
InsufficientMarginalTradeFunds,
MarginalTradeIsNotActive,
)
from tinkoff.invest.strategies.base.strategy_settings_base import StrategySettings
from tinkoff.invest.utils import quotation_to_decimal
logger = logging.getLogger(__name__)
class AccountManager:
def __init__(self, services: Services, strategy_settings: StrategySettings):
self._services = services
self._strategy_settings = strategy_settings
def get_current_balance(self) -> Decimal:
account_id = self._strategy_settings.account_id
portfolio_response = self._services.operations.get_portfolio(
account_id=account_id
)
balance = portfolio_response.total_amount_currencies
return quotation_to_decimal(Quotation(units=balance.units, nano=balance.nano))
def ensure_marginal_trade(self) -> None:
account_id = self._strategy_settings.account_id
try:
response = self._services.users.get_margin_attributes(account_id=account_id)
except Exception as e:
raise MarginalTradeIsNotActive() from e
value = quotation_to_decimal(response.funds_sufficiency_level)
if value <= 1:
raise InsufficientMarginalTradeFunds()
logger.info("Marginal trade is active")
| 37.358974 | 88 | 0.748799 |
d29c6c6a0ce335ea56da25c8c20efe16b624d8c2 | 4,934 | py | Python | torch_geometric/data/lightning_data_module.py | Riyer01/pytorch_geometric | e8915ad1cb5831c33c77f0fa69ee8a2267074647 | [
"MIT"
] | 1 | 2022-03-22T10:08:23.000Z | 2022-03-22T10:08:23.000Z | torch_geometric/data/lightning_data_module.py | marcmk6/pytorch_geometric | e8915ad1cb5831c33c77f0fa69ee8a2267074647 | [
"MIT"
] | null | null | null | torch_geometric/data/lightning_data_module.py | marcmk6/pytorch_geometric | e8915ad1cb5831c33c77f0fa69ee8a2267074647 | [
"MIT"
] | null | null | null | from typing import Optional
import warnings
from torch_geometric.data import Dataset
from torch_geometric.loader import DataLoader
try:
from pytorch_lightning import LightningDataModule
no_pytorch_lightning = False
except (ImportError, ModuleNotFoundError):
LightningDataModule = object
no_pytorch_lightning = True
class LightningDataset(LightningDataModule):
r"""Converts a :class:`torch_geometric.data.Dataset` into a
:class:`pytorch_lightning.LightningDataModule` variant, which can be
automatically used as a :obj:`data_module` for multi-GPU training via
`PyTorch Lightning <https://www.pytorchlightning.ai>`_.
:class:`LightningDataset` will take care of providing mini-batches via
:class:`torch_geometric.loader.DataLoader`.
.. note::
Currently only supports the :obj:`"ddp_spawn"` training strategy of
PyTorch Lightning:
.. code-block::
import pytorch_lightning as pl
trainer = pl.Trainer(strategy="ddp_spawn')
Args:
train_dataset: (Dataset) The training dataset.
val_dataset: (Dataset, optional) The validation dataset.
(default: :obj:`None`)
test_dataset: (Dataset, optional) The test dataset.
(default: :obj:`None`)
batch_size (int, optional): How many samples per batch to load.
(default: :obj:`1`)
num_workers: How many subprocesses to use for data loading.
:obj:`0` means that the data will be loaded in the main process.
(default: :obj:`0`)
**kwargs (optional): Additional arguments of
:class:`torch_geometric..loader.DataLoader`.
"""
def __init__(
self,
train_dataset: Dataset,
val_dataset: Optional[Dataset] = None,
test_dataset: Optional[Dataset] = None,
batch_size: int = 1,
num_workers: int = 0,
**kwargs,
):
super().__init__()
if no_pytorch_lightning:
raise ModuleNotFoundError(
"No module named 'pytorch_lightning'. Please install it "
"via 'pip install pytorch_lightning'")
self.train_dataset = train_dataset
self.val_dataset = val_dataset
self.test_dataset = test_dataset
self.batch_size = batch_size
self.num_workers = num_workers
self.kwargs = kwargs
if 'shuffle' in kwargs:
shuffle = kwargs['shuffle']
warnings.warn(f"The 'shuffle={shuffle}' option is ignored in "
f"'{self.__class__.__name__}'. Remove it from the "
f"argument list to disable this warning")
del kwargs['shuffle']
if 'pin_memory' in kwargs:
self.pin_memory = kwargs['pin_memory']
del kwargs['pin_memory']
else:
self.pin_memory = True
if 'persistent_workers' in kwargs:
self.persistent_workers = kwargs['persistent_workers']
del kwargs['persistent_workers']
else:
self.persistent_workers = num_workers > 0
if self.val_dataset is None:
self.val_dataloader = None
if self.test_dataset is None:
self.test_dataloader = None
def setup(self, stage: Optional[str] = None):
from pytorch_lightning.plugins import DDPSpawnPlugin
if not isinstance(self.trainer.training_type_plugin, DDPSpawnPlugin):
raise NotImplementedError(
f"'{self.__class__.__name__}' currently only supports the "
f"'ddp_spawn' training strategy of 'pytorch_lightning'")
def dataloader(self, dataset_name: str, shuffle: bool) -> DataLoader:
return DataLoader(
dataset=getattr(self, dataset_name),
batch_size=self.batch_size,
shuffle=shuffle,
pin_memory=self.pin_memory,
num_workers=self.num_workers,
persistent_workers=self.persistent_workers,
**self.kwargs,
)
def train_dataloader(self) -> DataLoader:
from torch.utils.data import IterableDataset
shuffle = not isinstance(self.train_dataset, IterableDataset)
return self.dataloader('train_dataset', shuffle=shuffle)
def val_dataloader(self) -> DataLoader:
return self.dataloader('val_dataset', shuffle=False)
def test_dataloader(self) -> DataLoader:
return self.dataloader('test_dataset', shuffle=False)
def __repr__(self) -> str:
args_repr = [f'train_dataset={self.train_dataset}']
if self.val_dataset is not None:
args_repr += [f'val_dataset={self.val_dataset}']
if self.test_dataset is not None:
args_repr += [f'test_dataset={self.test_dataset}']
args_repr += [f'batch_size={self.batch_size}']
args_repr = ', '.join(args_repr)
return f'{self.__class__.__name__}({args_repr})'
| 36.820896 | 77 | 0.638022 |
76c3e8e9ee17edd26eac574c0258e1e2426b06bd | 171,093 | py | Python | src/config/vnc_openstack/vnc_openstack/neutron_plugin_db.py | jsmith00/contrail-controller | 95cda68a50f8f23d08c3ed08805f7af5781f7fdc | [
"Apache-2.0"
] | null | null | null | src/config/vnc_openstack/vnc_openstack/neutron_plugin_db.py | jsmith00/contrail-controller | 95cda68a50f8f23d08c3ed08805f7af5781f7fdc | [
"Apache-2.0"
] | null | null | null | src/config/vnc_openstack/vnc_openstack/neutron_plugin_db.py | jsmith00/contrail-controller | 95cda68a50f8f23d08c3ed08805f7af5781f7fdc | [
"Apache-2.0"
] | null | null | null | # Copyright 2012, Contrail Systems, Inc.
#
"""
.. attention:: Fix the license string
"""
import requests
import re
import uuid
import json
import time
import socket
import netaddr
from netaddr import IPNetwork, IPSet, IPAddress
import gevent
import bottle
from neutron.common import constants
from neutron.common import exceptions
from neutron.api.v2 import attributes as attr
from cfgm_common import exceptions as vnc_exc
from vnc_api.vnc_api import *
from vnc_api.common import SG_NO_RULE_FQ_NAME, SG_NO_RULE_NAME
import vnc_openstack
_DEFAULT_HEADERS = {
'Content-type': 'application/json; charset="UTF-8"', }
# TODO find if there is a common definition
CREATE = 1
READ = 2
UPDATE = 3
DELETE = 4
IP_PROTOCOL_MAP = {constants.PROTO_NUM_TCP: constants.PROTO_NAME_TCP,
constants.PROTO_NUM_UDP: constants.PROTO_NAME_UDP,
constants.PROTO_NUM_ICMP: constants.PROTO_NAME_ICMP}
# SNAT defines
SNAT_SERVICE_TEMPLATE_FQ_NAME = ['default-domain', 'netns-snat-template']
_IFACE_ROUTE_TABLE_NAME_PREFIX = 'NEUTRON_IFACE_RT'
class DBInterface(object):
"""
An instance of this class forwards requests to vnc cfg api (web)server
"""
Q_URL_PREFIX = '/extensions/ct'
def __init__(self, admin_name, admin_password, admin_tenant_name,
api_srvr_ip, api_srvr_port, user_info=None,
contrail_extensions_enabled=True,
list_optimization_enabled=False,
apply_subnet_host_routes=False):
self._api_srvr_ip = api_srvr_ip
self._api_srvr_port = api_srvr_port
self._apply_subnet_host_routes = apply_subnet_host_routes
self._contrail_extensions_enabled = contrail_extensions_enabled
self._list_optimization_enabled = list_optimization_enabled
# Retry till a api-server is up
connected = False
while not connected:
try:
# TODO remove hardcode
self._vnc_lib = VncApi(admin_name, admin_password,
admin_tenant_name, api_srvr_ip,
api_srvr_port, '/', user_info=user_info)
connected = True
except requests.exceptions.RequestException as e:
gevent.sleep(3)
#end __init__
# Helper routines
def _request_api_server(self, url, method, data=None, headers=None):
if method == 'GET':
return requests.get(url)
if method == 'POST':
return requests.post(url, data=data, headers=headers)
if method == 'DELETE':
return requests.delete(url)
#end _request_api_server
def _relay_request(self, request):
"""
Send received request to api server
"""
# chop neutron parts of url and add api server address
url_path = re.sub(self.Q_URL_PREFIX, '', request.environ['PATH_INFO'])
url = "http://%s:%s%s" % (self._api_srvr_ip, self._api_srvr_port,
url_path)
return self._request_api_server(
url, request.environ['REQUEST_METHOD'],
request.body, {'Content-type': request.environ['CONTENT_TYPE']})
#end _relay_request
def _validate_project_ids(self, context, project_ids):
if context and not context['is_admin']:
return [context['tenant']]
return_project_ids = []
for project_id in project_ids:
try:
return_project_ids.append(str(uuid.UUID(project_id)))
except ValueError:
continue
return return_project_ids
def _obj_to_dict(self, obj):
return self._vnc_lib.obj_to_dict(obj)
#end _obj_to_dict
def _get_plugin_property(self, property_in):
fq_name=['default-global-system-config'];
gsc_obj = self._vnc_lib.global_system_config_read(fq_name);
plugin_settings = gsc_obj.plugin_tuning.plugin_property
for each_setting in plugin_settings:
if each_setting.property == property_in:
return each_setting.value
return None
#end _get_plugin_property
def _ensure_instance_exists(self, instance_id):
instance_name = instance_id
instance_obj = VirtualMachine(instance_name)
try:
id = self._vnc_lib.obj_to_id(instance_obj)
instance_obj = self._vnc_lib.virtual_machine_read(id=id)
except NoIdError: # instance doesn't exist, create it
# check if instance_id is a uuid value or not
try:
uuid.UUID(instance_id)
instance_obj.uuid = instance_id
except ValueError:
# if instance_id is not a valid uuid, let
# virtual_machine_create generate uuid for the vm
pass
self._vnc_lib.virtual_machine_create(instance_obj)
return instance_obj
#end _ensure_instance_exists
def _ensure_default_security_group_exists(self, proj_id):
proj_id = str(uuid.UUID(proj_id))
proj_obj = self._vnc_lib.project_read(id=proj_id)
vnc_openstack.ensure_default_security_group(self._vnc_lib, proj_obj)
#end _ensure_default_security_group_exists
def _get_obj_tenant_id(self, q_type, obj_uuid):
# Seed the cache and return
if q_type == 'port':
port_obj = self._virtual_machine_interface_read(obj_uuid)
if port_obj.parent_type != "project":
net_id = port_obj.get_virtual_network_refs()[0]['uuid']
# recurse up type-hierarchy
tenant_id = self._get_obj_tenant_id('network', net_id)
else:
tenant_id = port_obj.parent_uuid.replace('-', '')
return tenant_id
if q_type == 'network':
net_obj = self._virtual_network_read(net_id=obj_uuid)
tenant_id = net_obj.parent_uuid.replace('-', '')
return tenant_id
return None
#end _get_obj_tenant_id
def _project_read(self, proj_id=None, fq_name=None):
proj_obj = self._vnc_lib.project_read(id=proj_id, fq_name=fq_name)
return proj_obj
#end _project_read
def _get_tenant_id_for_create(self, context, resource):
if context['is_admin'] and 'tenant_id' in resource:
tenant_id = resource['tenant_id']
elif ('tenant_id' in resource and
resource['tenant_id'] != context['tenant_id']):
reason = _('Cannot create resource for another tenant')
self._raise_contrail_exception('AdminRequired', reason=reason)
else:
tenant_id = context['tenant_id']
return tenant_id
# Encode and send an excption information to neutron. exc must be a
# valid exception class name in neutron, kwargs must contain all
# necessary arguments to create that exception
def _raise_contrail_exception(self, exc, **kwargs):
exc_info = {'exception': exc}
exc_info.update(kwargs)
bottle.abort(400, json.dumps(exc_info))
#end _raise_contrail_exception
def _security_group_rule_create(self, sg_id, sg_rule):
try:
sg_vnc = self._vnc_lib.security_group_read(id=sg_id)
except NoIdError:
self._raise_contrail_exception('SecurityGroupNotFound', id=sg_id)
rules = sg_vnc.get_security_group_entries()
if rules is None:
rules = PolicyEntriesType([sg_rule])
else:
rules.add_policy_rule(sg_rule)
sg_vnc.set_security_group_entries(rules)
try:
self._vnc_lib.security_group_update(sg_vnc)
except PermissionDenied as e:
self._raise_contrail_exception('BadRequest',
resource='security_group_rule', msg=str(e))
return
#end _security_group_rule_create
def _security_group_rule_find(self, sgr_id, project_uuid=None):
# Get all security group for a project if project uuid is specified
# else get all security groups in the system(admin context)
project_sgs = self._security_group_list_project(project_uuid)
for sg_obj in project_sgs:
sgr_entries = sg_obj.get_security_group_entries()
if sgr_entries is None:
continue
for sg_rule in sgr_entries.get_policy_rule():
if sg_rule.get_rule_uuid() == sgr_id:
return sg_obj, sg_rule
return None, None
#end _security_group_rule_find
def _security_group_rule_delete(self, sg_obj, sg_rule):
rules = sg_obj.get_security_group_entries()
rules.get_policy_rule().remove(sg_rule)
sg_obj.set_security_group_entries(rules)
self._vnc_lib.security_group_update(sg_obj)
return
#end _security_group_rule_delete
def _security_group_delete(self, sg_id):
self._vnc_lib.security_group_delete(id=sg_id)
#end _security_group_delete
def _svc_instance_create(self, si_obj):
try:
si_uuid = self._vnc_lib.service_instance_create(si_obj)
except RefsExistError as e:
self._raise_contrail_exception('BadRequest',
resource='svc_instance', msg=str(e))
st_fq_name = ['default-domain', 'nat-template']
st_obj = self._vnc_lib.service_template_read(fq_name=st_fq_name)
si_obj.set_service_template(st_obj)
self._vnc_lib.service_instance_update(si_obj)
return si_uuid
#end _svc_instance_create
def _svc_instance_delete(self, si_id):
self._vnc_lib.service_instance_delete(id=si_id)
#end _svc_instance_delete
def _route_table_create(self, rt_obj):
rt_uuid = self._vnc_lib.route_table_create(rt_obj)
return rt_uuid
#end _route_table_create
def _route_table_delete(self, rt_id):
self._vnc_lib.route_table_delete(id=rt_id)
#end _route_table_delete
def _resource_create(self, resource_type, obj):
create_method = getattr(self._vnc_lib, resource_type + '_create')
try:
obj_uuid = create_method(obj)
except RefsExistError:
obj.uuid = str(uuid.uuid4())
obj.name += '-' + obj.uuid
obj.fq_name[-1] += '-' + obj.uuid
obj_uuid = create_method(obj)
except (PermissionDenied, BadRequest) as e:
self._raise_contrail_exception('BadRequest',
resource=resource_type, msg=str(e))
return obj_uuid
#end _resource_create
def _virtual_network_read(self, net_id=None, fq_name=None, fields=None):
net_obj = self._vnc_lib.virtual_network_read(id=net_id,
fq_name=fq_name,
fields=fields)
return net_obj
#end _virtual_network_read
def _virtual_network_update(self, net_obj):
try:
self._vnc_lib.virtual_network_update(net_obj)
except (PermissionDenied, RefsExistError) as e:
self._raise_contrail_exception('BadRequest',
resource='network', msg=str(e))
# read back to get subnet gw allocated by api-server
fq_name_str = json.dumps(net_obj.get_fq_name())
#end _virtual_network_update
def _virtual_network_delete(self, net_id):
fq_name_str = None
try:
net_obj = self._vnc_lib.virtual_network_read(id=net_id)
fq_name_str = json.dumps(net_obj.get_fq_name())
except NoIdError:
return
try:
if net_obj.get_floating_ip_pools():
fip_pools = net_obj.get_floating_ip_pools()
for fip_pool in fip_pools:
self._floating_ip_pool_delete(fip_pool_id=fip_pool['uuid'])
self._vnc_lib.virtual_network_delete(id=net_id)
except RefsExistError:
self._raise_contrail_exception('NetworkInUse', net_id=net_id)
#end _virtual_network_delete
def _virtual_network_list(self, parent_id=None, obj_uuids=None,
fields=None, detail=False, count=False):
return self._vnc_lib.virtual_networks_list(
parent_id=parent_id,
obj_uuids=obj_uuids,
fields=fields,
detail=detail,
count=count)
#end _virtual_network_list
def _virtual_machine_interface_read(self, port_id=None, fq_name=None,
fields=None):
back_ref_fields = ['logical_router_back_refs', 'instance_ip_back_refs', 'floating_ip_back_refs']
if fields:
n_extra_fields = list(set(fields + back_ref_fields))
else:
n_extra_fields = back_ref_fields
port_obj = self._vnc_lib.virtual_machine_interface_read(
id=port_id, fq_name=fq_name, fields=n_extra_fields)
return port_obj
#end _virtual_machine_interface_read
def _virtual_machine_interface_update(self, port_obj):
self._vnc_lib.virtual_machine_interface_update(port_obj)
#end _virtual_machine_interface_update
def _virtual_machine_interface_delete(self, port_id):
self._vnc_lib.virtual_machine_interface_delete(id=port_id)
#end _virtual_machine_interface_delete
def _virtual_machine_interface_list(self, parent_id=None, back_ref_id=None,
obj_uuids=None, fields=None):
back_ref_fields = ['logical_router_back_refs', 'instance_ip_back_refs', 'floating_ip_back_refs']
if fields:
n_extra_fields = list(set(fields + back_ref_fields))
else:
n_extra_fields = back_ref_fields
vmi_objs = self._vnc_lib.virtual_machine_interfaces_list(
parent_id=parent_id,
back_ref_id=back_ref_id,
obj_uuids=obj_uuids,
detail=True,
fields=n_extra_fields)
return vmi_objs
#end _virtual_machine_interface_list
def _instance_ip_create(self, iip_obj):
iip_uuid = self._vnc_lib.instance_ip_create(iip_obj)
return iip_uuid
#end _instance_ip_create
def _instance_ip_read(self, instance_ip_id=None, fq_name=None):
iip_obj = self._vnc_lib.instance_ip_read(id=instance_ip_id,
fq_name=fq_name)
return iip_obj
#end _instance_ip_read
def _instance_ip_update(self, iip_obj):
self._vnc_lib.instance_ip_update(iip_obj)
#end _instance_ip_update
def _instance_ip_delete(self, instance_ip_id):
self._vnc_lib.instance_ip_delete(id=instance_ip_id)
#end _instance_ip_delete
def _virtual_machine_list(self, back_ref_id=None, obj_uuids=None, fields=None):
vm_objs = self._vnc_lib.virtual_machines_list(detail=True,
back_ref_id=back_ref_id,
obj_uuids=obj_uuids,
fields=fields)
return vm_objs
#end _virtual_machine_list
def _instance_ip_list(self, back_ref_id=None, obj_uuids=None, fields=None):
iip_objs = self._vnc_lib.instance_ips_list(detail=True,
back_ref_id=back_ref_id,
obj_uuids=obj_uuids,
fields=fields)
return iip_objs
#end _instance_ip_list
def _floating_ip_pool_create(self, fip_pool_obj):
fip_pool_uuid = self._vnc_lib.floating_ip_pool_create(fip_pool_obj)
return fip_pool_uuid
# end _floating_ip_pool_create
def _floating_ip_pool_delete(self, fip_pool_id):
fip_pool_uuid = self._vnc_lib.floating_ip_pool_delete(id=fip_pool_id)
# end _floating_ip_pool_delete
# find projects on a given domain
def _project_list_domain(self, domain_id):
# TODO till domain concept is not present in keystone
fq_name = ['default-domain']
resp_dict = self._vnc_lib.projects_list(parent_fq_name=fq_name)
return resp_dict['projects']
#end _project_list_domain
# find network ids on a given project
def _network_list_project(self, project_id, count=False):
if project_id:
try:
project_uuid = str(uuid.UUID(project_id))
except Exception:
print "Error in converting uuid %s" % (project_id)
else:
project_uuid = None
if count:
ret_val = self._virtual_network_list(parent_id=project_uuid,
count=True)
else:
ret_val = self._virtual_network_list(parent_id=project_uuid,
detail=True)
return ret_val
#end _network_list_project
# find router ids on a given project
def _router_list_project(self, project_id=None, detail=False):
if project_id:
try:
project_uuid = str(uuid.UUID(project_id))
except Exception:
print "Error in converting uuid %s" % (project_id)
return []
else:
project_uuid = None
resp = self._vnc_lib.logical_routers_list(parent_id=project_uuid,
detail=detail)
if detail:
return resp
return resp['logical-routers']
#end _router_list_project
def _ipam_list_project(self, project_id):
try:
project_uuid = str(uuid.UUID(project_id))
except Exception:
print "Error in converting uuid %s" % (project_id)
resp_dict = self._vnc_lib.network_ipams_list(parent_id=project_uuid)
return resp_dict['network-ipams']
#end _ipam_list_project
def _security_group_list_project(self, project_id):
if project_id:
try:
project_uuid = str(uuid.UUID(project_id))
# Trigger a project read to ensure project sync
project_obj = self._project_read(proj_id=project_uuid)
except Exception:
raise
else:
project_uuid = None
sg_objs = self._vnc_lib.security_groups_list(parent_id=project_uuid,
detail=True)
return sg_objs
#end _security_group_list_project
def _security_group_entries_list_sg(self, sg_id):
try:
sg_uuid = str(uuid.UUID(sg_id))
except Exception:
print "Error in converting SG uuid %s" % (sg_id)
resp_dict = self._vnc_lib.security_groups_list(obj_uuids=[sg_uuid])
return resp_dict['security-groups']
#end _security_group_entries_list_sg
def _route_table_list_project(self, project_id):
try:
project_uuid = str(uuid.UUID(project_id))
except Exception:
print "Error in converting uuid %s" % (project_id)
resp_dict = self._vnc_lib.route_tables_list(parent_id=project_uuid)
return resp_dict['route-tables']
#end _route_table_list_project
def _svc_instance_list_project(self, project_id):
try:
project_uuid = str(uuid.UUID(project_id))
except Exception:
print "Error in converting uuid %s" % (project_id)
resp_dict = self._vnc_lib.service_instances_list(parent_id=project_id)
return resp_dict['service-instances']
#end _svc_instance_list_project
def _policy_list_project(self, project_id):
try:
project_uuid = str(uuid.UUID(project_id))
except Exception:
print "Error in converting uuid %s" % (project_id)
resp_dict = self._vnc_lib.network_policys_list(parent_id=project_uuid)
return resp_dict['network-policys']
#end _policy_list_project
def _logical_router_list(self, parent_id=None, back_ref_id=None,
obj_uuids=None, fields=None):
rtr_obj = self._vnc_lib.logical_routers_list(parent_id=parent_id,
back_ref_id=back_ref_id,
obj_uuids=obj_uuids,
detail=True,
fields=fields)
return rtr_obj
#end _logical_router_list
def _logical_router_read(self, rtr_id=None, fq_name=None):
rtr_obj = self._vnc_lib.logical_router_read(id=rtr_id, fq_name=fq_name)
return rtr_obj
#end _logical_router_read
def _logical_router_update(self, rtr_obj):
self._vnc_lib.logical_router_update(rtr_obj)
fq_name_str = json.dumps(rtr_obj.get_fq_name())
#end _logical_router_update
def _logical_router_delete(self, rtr_id):
try:
self._vnc_lib.logical_router_delete(id=rtr_id)
except RefsExistError:
self._raise_contrail_exception('RouterInUse', router_id=rtr_id)
#end _logical_router_delete
def _floatingip_list(self, back_ref_id=None):
return self._vnc_lib.floating_ips_list(back_ref_id=back_ref_id,
detail=True)
#end _floatingip_list
# find floating ip pools a project has access to
def _fip_pool_refs_project(self, project_id):
project_obj = self._project_read(proj_id=project_id)
return project_obj.get_floating_ip_pool_refs()
#end _fip_pool_refs_project
def _network_list_shared_and_ext(self):
ret_list = []
nets = self._network_list_project(project_id=None)
for net in nets:
if net.get_router_external() and net.get_is_shared():
ret_list.append(net)
return ret_list
# end _network_list_router_external
def _network_list_router_external(self):
ret_list = []
nets = self._network_list_project(project_id=None)
for net in nets:
if not net.get_router_external():
continue
ret_list.append(net)
return ret_list
# end _network_list_router_external
def _network_list_shared(self):
ret_list = []
nets = self._network_list_project(project_id=None)
for net in nets:
if not net.get_is_shared():
continue
ret_list.append(net)
return ret_list
# end _network_list_shared
# find networks of floating ip pools project has access to
def _fip_pool_ref_networks(self, project_id):
ret_net_objs = self._network_list_shared()
proj_fip_pool_refs = self._fip_pool_refs_project(project_id)
if not proj_fip_pool_refs:
return ret_net_objs
for fip_pool_ref in proj_fip_pool_refs:
fip_uuid = fip_pool_ref['uuid']
fip_pool_obj = self._vnc_lib.floating_ip_pool_read(id=fip_uuid)
net_uuid = fip_pool_obj.parent_uuid
net_obj = self._virtual_network_read(net_id=net_uuid)
ret_net_objs.append(net_obj)
return ret_net_objs
#end _fip_pool_ref_networks
# find floating ip pools defined by network
def _fip_pool_list_network(self, net_id):
resp_dict = self._vnc_lib.floating_ip_pools_list(parent_id=net_id)
return resp_dict['floating-ip-pools']
#end _fip_pool_list_network
def _port_list(self, port_objs):
ret_q_ports = []
if not port_objs:
return ret_q_ports
memo_req = {'networks': {},
'subnets': {},
'virtual-machines': {},
'instance-ips': {}}
# Read only the nets associated to port_objs
net_refs = [port_obj.get_virtual_network_refs() for port_obj in port_objs]
net_ids = [ref[0]['uuid'] for ref in net_refs if ref]
net_objs = self._virtual_network_list(obj_uuids=net_ids,
detail=True)
for net_obj in net_objs:
# dictionary of iip_uuid to iip_obj
memo_req['networks'][net_obj.uuid] = net_obj
subnets_info = self._virtual_network_to_subnets(net_obj)
memo_req['subnets'][net_obj.uuid] = subnets_info
# Read only the instance-ips associated to port_objs
iip_objs = self._instance_ip_list(back_ref_id=
[port_obj.uuid for port_obj in port_objs])
for iip_obj in iip_objs:
# dictionary of iip_uuid to iip_obj
memo_req['instance-ips'][iip_obj.uuid] = iip_obj
# Read only the VMs associated to port_objs
vm_ids = []
for port_obj in port_objs:
if port_obj.parent_type == 'virtual-machine':
# created in <1.06 schema with VM as port parent
vm_id = self._vnc_lib.fq_name_to_id('virtual-machine',
port_obj.get_fq_name()[:-1])
vm_ids.append(vm_id)
else:
vm_refs = port_obj.get_virtual_machine_refs() or []
vm_ids.extend([ref['uuid'] for ref in vm_refs if ref])
vm_objs = self._virtual_machine_list(obj_uuids=vm_ids)
for vm_obj in vm_objs:
# dictionary of vm_uuid to vm_obj
memo_req['virtual-machines'][vm_obj.uuid] = vm_obj
# Convert port from contrail to neutron repr with the memo cache
for port_obj in port_objs:
try:
port_info = self._port_vnc_to_neutron(port_obj, memo_req)
except NoIdError:
continue
ret_q_ports.append(port_info)
return ret_q_ports
#end _port_list
def _port_list_network(self, network_ids, count=False):
ret_list = []
if not network_ids:
return ret_list
all_port_objs = self._virtual_machine_interface_list(
back_ref_id=network_ids)
return self._port_list(all_port_objs)
#end _port_list_network
# find port ids on a given project
def _port_list_project(self, project_id, count=False, is_admin=False):
if self._list_optimization_enabled:
if count:
port_objs = self._virtual_machine_interface_list(parent_id=project_id)
return len(port_objs)
# it is a list operation, not count
all_port_objs = self._virtual_machine_interface_list(parent_id=project_id)
return self._port_list(all_port_objs)
else:
if count:
ret_val = 0
else:
ret_val = []
net_objs = self._virtual_network_list(project_id,
fields=['virtual_machine_interface_back_refs'],
detail=True)
if not net_objs:
return ret_val
if count:
for net_obj in net_objs:
port_back_refs = (
net_obj.get_virtual_machine_interface_back_refs() or [])
ret_val = ret_val + len(port_back_refs)
return ret_val
net_ids = [net_obj.uuid for net_obj in net_objs]
port_objs = self._virtual_machine_interface_list(back_ref_id=net_ids)
return self._port_list(port_objs)
#end _port_list_project
# Returns True if
# * no filter is specified
# OR
# * search-param is not present in filters
# OR
# * 1. search-param is present in filters AND
# 2. resource matches param-list AND
# 3. shared parameter in filters is False
def _filters_is_present(self, filters, key_name, match_value):
if filters:
if key_name in filters:
try:
if key_name == 'tenant_id':
filter_value = [str(uuid.UUID(t_id)) \
for t_id in filters[key_name]]
else:
filter_value = filters[key_name]
idx = filter_value.index(match_value)
except ValueError: # not in requested list
return False
return True
#end _filters_is_present
def _network_read(self, net_uuid):
net_obj = self._virtual_network_read(net_id=net_uuid)
return net_obj
#end _network_read
def _subnet_vnc_create_mapping(self, subnet_id, subnet_key):
self._vnc_lib.kv_store(subnet_id, subnet_key)
self._vnc_lib.kv_store(subnet_key, subnet_id)
#end _subnet_vnc_create_mapping
def _subnet_vnc_read_mapping(self, id=None, key=None):
def _subnet_id_to_key():
all_net_objs = self._virtual_network_list(detail=True)
for net_obj in all_net_objs:
ipam_refs = net_obj.get_network_ipam_refs()
net_uuid = net_obj.uuid
for ipam_ref in ipam_refs or []:
subnet_vncs = ipam_ref['attr'].get_ipam_subnets()
for subnet_vnc in subnet_vncs:
if subnet_vnc.subnet_uuid == id:
return self._subnet_vnc_get_key(subnet_vnc,
net_uuid)
return None
# _subnet_id_to_key
if id:
try:
subnet_key = self._vnc_lib.kv_retrieve(id)
except NoIdError:
# contrail UI/api might have been used to create the subnet,
# create id to key mapping now/here.
subnet_key = _subnet_id_to_key()
if not subnet_key:
self._raise_contrail_exception('SubnetNotFound',
subnet_id=id)
# persist to avoid this calculation later
self._subnet_vnc_create_mapping(id, subnet_key)
return subnet_key
if key:
try:
subnet_id = self._vnc_lib.kv_retrieve(key)
except NoIdError:
# contrail UI/api might have been used to create the subnet,
# create key to id mapping now/here.
subnet_vnc = self._subnet_read(key)
subnet_id = subnet_vnc.uuid
# persist to avoid this calculation later
self._subnet_vnc_create_mapping(subnet_id, key)
return subnet_id
#end _subnet_vnc_read_mapping
def _subnet_vnc_read_or_create_mapping(self, id, key):
# if subnet was created outside of neutron handle it and create
# neutron representation now (lazily)
try:
return self._subnet_vnc_read_mapping(key=key)
except NoIdError:
self._subnet_vnc_create_mapping(id, key)
return self._subnet_vnc_read_mapping(key=key)
#end _subnet_vnc_read_or_create_mapping
def _subnet_vnc_delete_mapping(self, subnet_id, subnet_key):
self._vnc_lib.kv_delete(subnet_id)
self._vnc_lib.kv_delete(subnet_key)
#end _subnet_vnc_delete_mapping
def _subnet_vnc_get_key(self, subnet_vnc, net_id):
pfx = subnet_vnc.subnet.get_ip_prefix()
pfx_len = subnet_vnc.subnet.get_ip_prefix_len()
network = IPNetwork('%s/%s' % (pfx, pfx_len))
return '%s %s/%s' % (net_id, str(network.ip), pfx_len)
#end _subnet_vnc_get_key
def _subnet_read(self, subnet_key):
net_uuid = subnet_key.split(' ')[0]
try:
net_obj = self._virtual_network_read(net_id=net_uuid)
except NoIdError:
return None
ipam_refs = net_obj.get_network_ipam_refs()
if not ipam_refs:
return None
# TODO scope for optimization
for ipam_ref in ipam_refs:
subnet_vncs = ipam_ref['attr'].get_ipam_subnets()
for subnet_vnc in subnet_vncs:
if self._subnet_vnc_get_key(subnet_vnc,
net_uuid) == subnet_key:
return subnet_vnc
return None
#end _subnet_read
def _ip_address_to_subnet_id(self, ip_addr, net_obj, memo_req=None):
# find subnet-id for ip-addr, called when instance-ip created
# first try if memo created during req can help avoid trips to
# backend
try:
subnets_info = memo_req['subnets'][net_obj.uuid]
for subnet_info in subnets_info:
if IPAddress(ip_addr) in IPSet([subnet_info['cidr']]):
subnet_id = subnet_info['id']
return subnet_id
except Exception:
# memo didnt help, need to reach backend for info
ipam_refs = net_obj.get_network_ipam_refs()
for ipam_ref in ipam_refs or []:
subnet_vncs = ipam_ref['attr'].get_ipam_subnets()
for subnet_vnc in subnet_vncs:
cidr = '%s/%s' % (subnet_vnc.subnet.get_ip_prefix(),
subnet_vnc.subnet.get_ip_prefix_len())
if IPAddress(ip_addr) in IPSet([cidr]):
subnet_id = subnet_vnc.subnet_uuid
return subnet_id
return None
#end _ip_address_to_subnet_id
# Returns a list of dicts of subnet-id:cidr for a VN
def _virtual_network_to_subnets(self, net_obj):
ret_subnets = []
ipam_refs = net_obj.get_network_ipam_refs()
if ipam_refs:
for ipam_ref in ipam_refs:
subnet_vncs = ipam_ref['attr'].get_ipam_subnets()
for subnet_vnc in subnet_vncs:
subnet_id = subnet_vnc.subnet_uuid
cidr = '%s/%s' % (subnet_vnc.subnet.get_ip_prefix(),
subnet_vnc.subnet.get_ip_prefix_len())
ret_subnets.append({'id': subnet_id, 'cidr': cidr})
return ret_subnets
# end _virtual_network_to_subnets
# Conversion routines between VNC and Quantum objects
def _svc_instance_neutron_to_vnc(self, si_q, oper):
if oper == CREATE:
project_id = str(uuid.UUID(si_q['tenant_id']))
project_obj = self._project_read(proj_id=project_id)
net_id = si_q['external_net']
ext_vn = self._vnc_lib.virtual_network_read(id=net_id)
scale_out = ServiceScaleOutType(max_instances=1, auto_scale=False)
si_prop = ServiceInstanceType(
auto_policy=True,
left_virtual_network="",
right_virtual_network=ext_vn.get_fq_name_str(),
scale_out=scale_out)
si_prop.set_scale_out(scale_out)
si_vnc = ServiceInstance(name=si_q['name'],
parent_obj=project_obj,
service_instance_properties=si_prop)
return si_vnc
#end _svc_instance_neutron_to_vnc
def _svc_instance_vnc_to_neutron(self, si_obj):
si_q_dict = self._obj_to_dict(si_obj)
# replace field names
si_q_dict['id'] = si_obj.uuid
si_q_dict['tenant_id'] = si_obj.parent_uuid.replace('-', '')
si_q_dict['name'] = si_obj.name
si_props = si_obj.get_service_instance_properties()
if si_props:
vn_fq_name = si_props.get_right_virtual_network()
vn_obj = self._vnc_lib.virtual_network_read(fq_name_str=vn_fq_name)
si_q_dict['external_net'] = str(vn_obj.uuid) + ' ' + vn_obj.name
si_q_dict['internal_net'] = ''
return si_q_dict
#end _route_table_vnc_to_neutron
def _route_table_neutron_to_vnc(self, rt_q, oper):
if oper == CREATE:
project_id = str(uuid.UUID(rt_q['tenant_id']))
project_obj = self._project_read(proj_id=project_id)
rt_vnc = RouteTable(name=rt_q['name'],
parent_obj=project_obj)
if not rt_q['routes']:
return rt_vnc
for route in rt_q['routes']['route']:
try:
vm_obj = self._vnc_lib.virtual_machine_read(
id=route['next_hop'])
si_list = vm_obj.get_service_instance_refs()
if si_list:
fq_name = si_list[0]['to']
si_obj = self._vnc_lib.service_instance_read(
fq_name=fq_name)
route['next_hop'] = si_obj.get_fq_name_str()
except Exception as e:
pass
rt_vnc.set_routes(RouteTableType.factory(**rt_q['routes']))
else:
rt_vnc = self._vnc_lib.route_table_read(id=rt_q['id'])
for route in rt_q['routes']['route']:
try:
vm_obj = self._vnc_lib.virtual_machine_read(
id=route['next_hop'])
si_list = vm_obj.get_service_instance_refs()
if si_list:
fq_name = si_list[0]['to']
si_obj = self._vnc_lib.service_instance_read(
fq_name=fq_name)
route['next_hop'] = si_obj.get_fq_name_str()
except Exception as e:
pass
rt_vnc.set_routes(RouteTableType.factory(**rt_q['routes']))
return rt_vnc
#end _route_table_neutron_to_vnc
def _route_table_vnc_to_neutron(self, rt_obj):
rt_q_dict = self._obj_to_dict(rt_obj)
# replace field names
rt_q_dict['id'] = rt_obj.uuid
rt_q_dict['tenant_id'] = rt_obj.parent_uuid.replace('-', '')
rt_q_dict['name'] = rt_obj.name
rt_q_dict['fq_name'] = rt_obj.fq_name
# get route table routes
rt_q_dict['routes'] = rt_q_dict.pop('routes', None)
if rt_q_dict['routes']:
for route in rt_q_dict['routes']['route']:
if route['next_hop_type']:
route['next_hop'] = route['next_hop_type']
return rt_q_dict
#end _route_table_vnc_to_neutron
def _security_group_vnc_to_neutron(self, sg_obj):
sg_q_dict = {}
extra_dict = {}
extra_dict['contrail:fq_name'] = sg_obj.get_fq_name()
# replace field names
sg_q_dict['id'] = sg_obj.uuid
sg_q_dict['tenant_id'] = sg_obj.parent_uuid.replace('-', '')
if not sg_obj.display_name:
# for security groups created directly via vnc_api
sg_q_dict['name'] = sg_obj.get_fq_name()[-1]
else:
sg_q_dict['name'] = sg_obj.display_name
sg_q_dict['description'] = sg_obj.get_id_perms().get_description()
# get security group rules
sg_q_dict['security_group_rules'] = []
rule_list = self.security_group_rules_read(sg_obj.uuid, sg_obj)
if rule_list:
for rule in rule_list:
sg_q_dict['security_group_rules'].append(rule)
if self._contrail_extensions_enabled:
sg_q_dict.update(extra_dict)
return sg_q_dict
#end _security_group_vnc_to_neutron
def _security_group_neutron_to_vnc(self, sg_q, oper):
if oper == CREATE:
project_id = str(uuid.UUID(sg_q['tenant_id']))
project_obj = self._project_read(proj_id=project_id)
id_perms = IdPermsType(enable=True,
description=sg_q.get('description'))
sg_vnc = SecurityGroup(name=sg_q['name'],
parent_obj=project_obj,
id_perms=id_perms)
else:
sg_vnc = self._vnc_lib.security_group_read(id=sg_q['id'])
if 'name' in sg_q and sg_q['name']:
sg_vnc.display_name = sg_q['name']
if 'description' in sg_q:
id_perms = sg_vnc.get_id_perms()
id_perms.set_description(sg_q['description'])
sg_vnc.set_id_perms(id_perms)
return sg_vnc
#end _security_group_neutron_to_vnc
def _security_group_rule_vnc_to_neutron(self, sg_id, sg_rule, sg_obj=None):
sgr_q_dict = {}
if sg_id is None:
return sgr_q_dict
if not sg_obj:
try:
sg_obj = self._vnc_lib.security_group_read(id=sg_id)
except NoIdError:
self._raise_contrail_exception('SecurityGroupNotFound',
id=sg_id)
remote_cidr = None
remote_sg_uuid = None
saddr = sg_rule.get_src_addresses()[0]
daddr = sg_rule.get_dst_addresses()[0]
if saddr.get_security_group() == 'local':
direction = 'egress'
addr = daddr
elif daddr.get_security_group() == 'local':
direction = 'ingress'
addr = saddr
else:
self._raise_contrail_exception('SecurityGroupRuleNotFound',
id=sg_rule.get_rule_uuid())
if addr.get_subnet():
remote_cidr = '%s/%s' % (addr.get_subnet().get_ip_prefix(),
addr.get_subnet().get_ip_prefix_len())
elif addr.get_security_group():
if addr.get_security_group() != 'any' and \
addr.get_security_group() != 'local':
remote_sg = addr.get_security_group()
try:
if remote_sg != ':'.join(sg_obj.get_fq_name()):
remote_sg_obj = self._vnc_lib.security_group_read(fq_name_str=remote_sg)
else:
remote_sg_obj = sg_obj
remote_sg_uuid = remote_sg_obj.uuid
except NoIdError:
pass
sgr_q_dict['id'] = sg_rule.get_rule_uuid()
sgr_q_dict['tenant_id'] = sg_obj.parent_uuid.replace('-', '')
sgr_q_dict['security_group_id'] = sg_obj.uuid
sgr_q_dict['ethertype'] = sg_rule.get_ethertype()
sgr_q_dict['direction'] = direction
sgr_q_dict['protocol'] = sg_rule.get_protocol()
sgr_q_dict['port_range_min'] = sg_rule.get_dst_ports()[0].\
get_start_port()
sgr_q_dict['port_range_max'] = sg_rule.get_dst_ports()[0].\
get_end_port()
sgr_q_dict['remote_ip_prefix'] = remote_cidr
sgr_q_dict['remote_group_id'] = remote_sg_uuid
return sgr_q_dict
#end _security_group_rule_vnc_to_neutron
def _security_group_rule_neutron_to_vnc(self, sgr_q, oper):
if oper == CREATE:
port_min = 0
port_max = 65535
if sgr_q['port_range_min'] is not None:
port_min = sgr_q['port_range_min']
if sgr_q['port_range_max'] is not None:
port_max = sgr_q['port_range_max']
endpt = [AddressType(security_group='any')]
if sgr_q['remote_ip_prefix']:
cidr = sgr_q['remote_ip_prefix'].split('/')
pfx = cidr[0]
pfx_len = int(cidr[1])
endpt = [AddressType(subnet=SubnetType(pfx, pfx_len))]
elif sgr_q['remote_group_id']:
try:
sg_obj = self._vnc_lib.security_group_read(
id=sgr_q['remote_group_id'])
except NoIdError:
self._raise_contrail_exception('SecurityGroupNotFound',
id=sgr_q['remote_group_id'])
endpt = [AddressType(security_group=sg_obj.get_fq_name_str())]
if sgr_q['direction'] == 'ingress':
dir = '>'
local = endpt
remote = [AddressType(security_group='local')]
else:
dir = '>'
remote = endpt
local = [AddressType(security_group='local')]
if not sgr_q['protocol']:
sgr_q['protocol'] = 'any'
if not sgr_q['remote_ip_prefix'] and not sgr_q['remote_group_id']:
if not sgr_q['ethertype']:
sgr_q['ethertype'] = 'IPv4'
sgr_uuid = str(uuid.uuid4())
rule = PolicyRuleType(rule_uuid=sgr_uuid, direction=dir,
protocol=sgr_q['protocol'],
src_addresses=local,
src_ports=[PortType(0, 65535)],
dst_addresses=remote,
dst_ports=[PortType(port_min, port_max)],
ethertype=sgr_q['ethertype'])
return rule
#end _security_group_rule_neutron_to_vnc
def _network_neutron_to_vnc(self, network_q, oper):
net_name = network_q.get('name', None)
try:
external_attr = network_q['router:external']
except KeyError:
external_attr = attr.ATTR_NOT_SPECIFIED
if oper == CREATE:
project_id = str(uuid.UUID(network_q['tenant_id']))
project_obj = self._project_read(proj_id=project_id)
id_perms = IdPermsType(enable=True)
net_obj = VirtualNetwork(net_name, project_obj, id_perms=id_perms)
if external_attr == attr.ATTR_NOT_SPECIFIED:
net_obj.router_external = False
else:
net_obj.router_external = external_attr
if 'shared' in network_q:
net_obj.is_shared = network_q['shared']
else:
net_obj.is_shared = False
else: # READ/UPDATE/DELETE
net_obj = self._virtual_network_read(net_id=network_q['id'])
if oper == UPDATE:
if 'shared' in network_q:
net_obj.is_shared = network_q['shared']
if external_attr is not attr.ATTR_NOT_SPECIFIED:
net_obj.router_external = external_attr
if 'name' in network_q and network_q['name']:
net_obj.display_name = network_q['name']
id_perms = net_obj.get_id_perms()
if 'admin_state_up' in network_q:
id_perms.enable = network_q['admin_state_up']
net_obj.set_id_perms(id_perms)
if 'contrail:policys' in network_q:
policy_fq_names = network_q['contrail:policys']
# reset and add with newly specified list
net_obj.set_network_policy_list([], [])
seq = 0
for p_fq_name in policy_fq_names:
domain_name, project_name, policy_name = p_fq_name
domain_obj = Domain(domain_name)
project_obj = Project(project_name, domain_obj)
policy_obj = NetworkPolicy(policy_name, project_obj)
net_obj.add_network_policy(policy_obj,
VirtualNetworkPolicyType(
sequence=SequenceType(seq, 0)))
seq = seq + 1
if 'contrail:route_table' in network_q:
rt_fq_name = network_q['contrail:route_table']
if rt_fq_name:
try:
rt_obj = self._vnc_lib.route_table_read(fq_name=rt_fq_name)
net_obj.set_route_table(rt_obj)
except NoIdError:
# TODO add route table specific exception
self._raise_contrail_exception('NetworkNotFound',
net_id=net_obj.uuid)
return net_obj
#end _network_neutron_to_vnc
def _network_vnc_to_neutron(self, net_obj, net_repr='SHOW'):
net_q_dict = {}
extra_dict = {}
id_perms = net_obj.get_id_perms()
perms = id_perms.permissions
net_q_dict['id'] = net_obj.uuid
if not net_obj.display_name:
# for nets created directly via vnc_api
net_q_dict['name'] = net_obj.get_fq_name()[-1]
else:
net_q_dict['name'] = net_obj.display_name
extra_dict['contrail:fq_name'] = net_obj.get_fq_name()
net_q_dict['tenant_id'] = net_obj.parent_uuid.replace('-', '')
net_q_dict['admin_state_up'] = id_perms.enable
if net_obj.is_shared:
net_q_dict['shared'] = True
else:
net_q_dict['shared'] = False
net_q_dict['status'] = (constants.NET_STATUS_ACTIVE if id_perms.enable
else constants.NET_STATUS_DOWN)
if net_obj.router_external:
net_q_dict['router:external'] = True
else:
net_q_dict['router:external'] = False
if net_repr == 'SHOW' or net_repr == 'LIST':
extra_dict['contrail:instance_count'] = 0
net_policy_refs = net_obj.get_network_policy_refs()
if net_policy_refs:
sorted_refs = sorted(
net_policy_refs,
key=lambda t:(t['attr'].sequence.major,
t['attr'].sequence.minor))
extra_dict['contrail:policys'] = \
[np_ref['to'] for np_ref in sorted_refs]
rt_refs = net_obj.get_route_table_refs()
if rt_refs:
extra_dict['contrail:route_table'] = \
[rt_ref['to'] for rt_ref in rt_refs]
ipam_refs = net_obj.get_network_ipam_refs()
net_q_dict['subnets'] = []
if ipam_refs:
extra_dict['contrail:subnet_ipam'] = []
for ipam_ref in ipam_refs:
subnets = ipam_ref['attr'].get_ipam_subnets()
for subnet in subnets:
sn_dict = self._subnet_vnc_to_neutron(subnet, net_obj,
ipam_ref['to'])
net_q_dict['subnets'].append(sn_dict['id'])
sn_ipam = {}
sn_ipam['subnet_cidr'] = sn_dict['cidr']
sn_ipam['ipam_fq_name'] = ipam_ref['to']
extra_dict['contrail:subnet_ipam'].append(sn_ipam)
if self._contrail_extensions_enabled:
net_q_dict.update(extra_dict)
return net_q_dict
#end _network_vnc_to_neutron
def _subnet_neutron_to_vnc(self, subnet_q):
cidr = IPNetwork(subnet_q['cidr'])
pfx = str(cidr.network)
pfx_len = int(cidr.prefixlen)
if cidr.version != 4 and cidr.version != 6:
self._raise_contrail_exception('BadRequest',
resource='subnet', msg='Unknown IP family')
elif cidr.version != int(subnet_q['ip_version']):
msg = _("cidr '%s' does not match the ip_version '%s'") \
%(subnet_q['cidr'], subnet_q['ip_version'])
self._raise_contrail_exception('InvalidInput', error_message=msg)
if 'gateway_ip' in subnet_q:
default_gw = subnet_q['gateway_ip']
else:
# Assigned first+1 from cidr
default_gw = str(IPAddress(cidr.first + 1))
if 'allocation_pools' in subnet_q:
alloc_pools = subnet_q['allocation_pools']
else:
# Assigned by address manager
alloc_pools = None
dhcp_option_list = None
if 'dns_nameservers' in subnet_q and subnet_q['dns_nameservers']:
dhcp_options=[]
dns_servers=" ".join(subnet_q['dns_nameservers'])
if dns_servers:
dhcp_options.append(DhcpOptionType(dhcp_option_name='6',
dhcp_option_value=dns_servers))
if dhcp_options:
dhcp_option_list = DhcpOptionsListType(dhcp_options)
host_route_list = None
if 'host_routes' in subnet_q and subnet_q['host_routes']:
host_routes=[]
for host_route in subnet_q['host_routes']:
host_routes.append(RouteType(prefix=host_route['destination'],
next_hop=host_route['nexthop']))
if host_routes:
host_route_list = RouteTableType(host_routes)
if 'enable_dhcp' in subnet_q:
dhcp_config = subnet_q['enable_dhcp']
else:
dhcp_config = None
sn_name=subnet_q.get('name')
subnet_vnc = IpamSubnetType(subnet=SubnetType(pfx, pfx_len),
default_gateway=default_gw,
enable_dhcp=dhcp_config,
dns_nameservers=None,
allocation_pools=alloc_pools,
addr_from_start=True,
dhcp_option_list=dhcp_option_list,
host_routes=host_route_list,
subnet_name=sn_name,
subnet_uuid=str(uuid.uuid4()))
return subnet_vnc
#end _subnet_neutron_to_vnc
def _subnet_vnc_to_neutron(self, subnet_vnc, net_obj, ipam_fq_name):
sn_q_dict = {}
sn_name = subnet_vnc.get_subnet_name()
if sn_name is not None:
sn_q_dict['name'] = sn_name
else:
sn_q_dict['name'] = ''
sn_q_dict['tenant_id'] = net_obj.parent_uuid.replace('-', '')
sn_q_dict['network_id'] = net_obj.uuid
sn_q_dict['ipv6_ra_mode'] = None
sn_q_dict['ipv6_address_mode'] = None
cidr = '%s/%s' % (subnet_vnc.subnet.get_ip_prefix(),
subnet_vnc.subnet.get_ip_prefix_len())
sn_q_dict['cidr'] = cidr
sn_q_dict['ip_version'] = IPNetwork(cidr).version # 4 or 6
# read from useragent kv only for old subnets created
# before schema had uuid in subnet
sn_id = subnet_vnc.subnet_uuid
if not sn_id:
subnet_key = self._subnet_vnc_get_key(subnet_vnc, net_obj.uuid)
sn_id = self._subnet_vnc_read_or_create_mapping(id=subnet_vnc.subnet_uuid,
key=subnet_key)
sn_q_dict['id'] = sn_id
sn_q_dict['gateway_ip'] = subnet_vnc.default_gateway
alloc_obj_list = subnet_vnc.get_allocation_pools()
allocation_pools = []
for alloc_obj in alloc_obj_list:
first_ip = alloc_obj.get_start()
last_ip = alloc_obj.get_end()
alloc_dict = {'first_ip':first_ip, 'last_ip':last_ip}
allocation_pools.append(alloc_dict)
if allocation_pools is None or not allocation_pools:
if (int(IPNetwork(sn_q_dict['gateway_ip']).network) ==
int(IPNetwork(cidr).network+1)):
first_ip = str(IPNetwork(cidr).network + 2)
else:
first_ip = str(IPNetwork(cidr).network + 1)
last_ip = str(IPNetwork(cidr).broadcast - 1)
cidr_pool = {'first_ip':first_ip, 'last_ip':last_ip}
allocation_pools.append(cidr_pool)
sn_q_dict['allocation_pools'] = allocation_pools
sn_q_dict['enable_dhcp'] = subnet_vnc.get_enable_dhcp()
nameserver_dict_list = list()
dhcp_option_list = subnet_vnc.get_dhcp_option_list()
if dhcp_option_list:
for dhcp_option in dhcp_option_list.dhcp_option:
if dhcp_option.get_dhcp_option_name() == '6':
dns_servers = dhcp_option.get_dhcp_option_value().split()
for dns_server in dns_servers:
nameserver_entry = {'address': dns_server,
'subnet_id': sn_id}
nameserver_dict_list.append(nameserver_entry)
sn_q_dict['dns_nameservers'] = nameserver_dict_list
host_route_dict_list = list()
host_routes = subnet_vnc.get_host_routes()
if host_routes:
for host_route in host_routes.route:
host_route_entry = {'destination': host_route.get_prefix(),
'nexthop': host_route.get_next_hop(),
'subnet_id': sn_id}
host_route_dict_list.append(host_route_entry)
sn_q_dict['routes'] = host_route_dict_list
if net_obj.is_shared:
sn_q_dict['shared'] = True
else:
sn_q_dict['shared'] = False
return sn_q_dict
#end _subnet_vnc_to_neutron
def _ipam_neutron_to_vnc(self, ipam_q, oper):
ipam_name = ipam_q.get('name', None)
if oper == CREATE:
project_id = str(uuid.UUID(ipam_q['tenant_id']))
project_obj = self._project_read(proj_id=project_id)
ipam_obj = NetworkIpam(ipam_name, project_obj)
else: # READ/UPDATE/DELETE
ipam_obj = self._vnc_lib.network_ipam_read(id=ipam_q['id'])
options_vnc = DhcpOptionsListType()
if ipam_q['mgmt']:
#for opt_q in ipam_q['mgmt'].get('options', []):
# options_vnc.add_dhcp_option(DhcpOptionType(opt_q['option'],
# opt_q['value']))
#ipam_mgmt_vnc = IpamType.factory(
# ipam_method = ipam_q['mgmt']['method'],
# dhcp_option_list = options_vnc)
ipam_obj.set_network_ipam_mgmt(IpamType.factory(**ipam_q['mgmt']))
return ipam_obj
#end _ipam_neutron_to_vnc
def _ipam_vnc_to_neutron(self, ipam_obj):
ipam_q_dict = self._obj_to_dict(ipam_obj)
# replace field names
ipam_q_dict['id'] = ipam_q_dict.pop('uuid')
ipam_q_dict['name'] = ipam_obj.name
ipam_q_dict['tenant_id'] = ipam_obj.parent_uuid.replace('-', '')
ipam_q_dict['mgmt'] = ipam_q_dict.pop('network_ipam_mgmt', None)
net_back_refs = ipam_q_dict.pop('virtual_network_back_refs', None)
if net_back_refs:
ipam_q_dict['nets_using'] = []
for net_back_ref in net_back_refs:
net_fq_name = net_back_ref['to']
ipam_q_dict['nets_using'].append(net_fq_name)
return ipam_q_dict
#end _ipam_vnc_to_neutron
def _policy_neutron_to_vnc(self, policy_q, oper):
policy_name = policy_q.get('name', None)
if oper == CREATE:
project_id = str(uuid.UUID(policy_q['tenant_id']))
project_obj = self._project_read(proj_id=project_id)
policy_obj = NetworkPolicy(policy_name, project_obj)
else: # READ/UPDATE/DELETE
policy_obj = self._vnc_lib.network_policy_read(id=policy_q['id'])
policy_obj.set_network_policy_entries(
PolicyEntriesType.factory(**policy_q['entries']))
return policy_obj
#end _policy_neutron_to_vnc
def _policy_vnc_to_neutron(self, policy_obj):
policy_q_dict = self._obj_to_dict(policy_obj)
# replace field names
policy_q_dict['id'] = policy_q_dict.pop('uuid')
policy_q_dict['name'] = policy_obj.name
policy_q_dict['tenant_id'] = policy_obj.parent_uuid.replace('-', '')
policy_q_dict['entries'] = policy_q_dict.pop('network_policy_entries',
None)
net_back_refs = policy_obj.get_virtual_network_back_refs()
if net_back_refs:
policy_q_dict['nets_using'] = []
for net_back_ref in net_back_refs:
net_fq_name = net_back_ref['to']
policy_q_dict['nets_using'].append(net_fq_name)
return policy_q_dict
#end _policy_vnc_to_neutron
def _router_neutron_to_vnc(self, router_q, oper):
rtr_name = router_q.get('name', None)
if oper == CREATE:
project_id = str(uuid.UUID(router_q['tenant_id']))
project_obj = self._project_read(proj_id=project_id)
id_perms = IdPermsType(enable=True)
rtr_obj = LogicalRouter(rtr_name, project_obj, id_perms=id_perms)
else: # READ/UPDATE/DELETE
rtr_obj = self._logical_router_read(rtr_id=router_q['id'])
id_perms = rtr_obj.get_id_perms()
if 'admin_state_up' in router_q:
id_perms.enable = router_q['admin_state_up']
rtr_obj.set_id_perms(id_perms)
if 'name' in router_q and router_q['name']:
rtr_obj.display_name = router_q['name']
return rtr_obj
#end _router_neutron_to_vnc
def _get_external_gateway_info(self, rtr_obj):
vn_refs = rtr_obj.get_virtual_network_refs()
if not vn_refs:
return None
return vn_refs[0]['uuid']
def _router_vnc_to_neutron(self, rtr_obj, rtr_repr='SHOW'):
rtr_q_dict = {}
extra_dict = {}
extra_dict['contrail:fq_name'] = rtr_obj.get_fq_name()
rtr_q_dict['id'] = rtr_obj.uuid
if not rtr_obj.display_name:
rtr_q_dict['name'] = rtr_obj.get_fq_name()[-1]
else:
rtr_q_dict['name'] = rtr_obj.display_name
rtr_q_dict['tenant_id'] = rtr_obj.parent_uuid.replace('-', '')
rtr_q_dict['admin_state_up'] = rtr_obj.get_id_perms().enable
rtr_q_dict['shared'] = False
rtr_q_dict['status'] = constants.NET_STATUS_ACTIVE
rtr_q_dict['gw_port_id'] = None
ext_net_uuid = self._get_external_gateway_info(rtr_obj)
if not ext_net_uuid:
rtr_q_dict['external_gateway_info'] = None
else:
rtr_q_dict['external_gateway_info'] = {'network_id': ext_net_uuid,
'enable_snat': True}
if self._contrail_extensions_enabled:
rtr_q_dict.update(extra_dict)
return rtr_q_dict
#end _router_vnc_to_neutron
def _floatingip_neutron_to_vnc(self, context, fip_q, oper):
if oper == CREATE:
# TODO for now create from default pool, later
# use first available pool on net
net_id = fip_q['floating_network_id']
try:
fq_name = self._fip_pool_list_network(net_id)[0]['fq_name']
except IndexError:
# IndexError could happens when an attempt to
# retrieve a floating ip pool from a private network.
msg = "Network %s doesn't provide a floatingip pool" % net_id
self._raise_contrail_exception('BadRequest',
resource="floatingip", msg=msg)
fip_pool_obj = self._vnc_lib.floating_ip_pool_read(fq_name=fq_name)
fip_name = str(uuid.uuid4())
fip_obj = FloatingIp(fip_name, fip_pool_obj)
fip_obj.uuid = fip_name
proj_id = str(uuid.UUID(fip_q['tenant_id']))
proj_obj = self._project_read(proj_id=proj_id)
fip_obj.set_project(proj_obj)
else: # READ/UPDATE/DELETE
fip_obj = self._vnc_lib.floating_ip_read(id=fip_q['id'])
port_id = fip_q.get('port_id')
if port_id:
try:
port_obj = self._virtual_machine_interface_read(port_id=port_id)
if context and not context['is_admin']:
port_tenant_id = self._get_obj_tenant_id('port', port_id)
if port_tenant_id.replace('-', '') != context['tenant']:
raise NoIdError(port_id)
except NoIdError:
self._raise_contrail_exception('PortNotFound',
resource='floatingip',
port_id=port_id)
fip_obj.set_virtual_machine_interface(port_obj)
else:
fip_obj.set_virtual_machine_interface_list([])
if fip_q.get('fixed_ip_address'):
fip_obj.set_floating_ip_fixed_ip_address(fip_q['fixed_ip_address'])
else:
# fixed_ip_address not specified, pick from port_obj in create,
# reset in case of disassociate
port_refs = fip_obj.get_virtual_machine_interface_refs()
if not port_refs:
fip_obj.set_floating_ip_fixed_ip_address(None)
else:
port_obj = self._virtual_machine_interface_read(
port_id=port_refs[0]['uuid'], fields=['instance_ip_back_refs'])
iip_refs = port_obj.get_instance_ip_back_refs()
if iip_refs:
iip_obj = self._instance_ip_read(instance_ip_id=iip_refs[0]['uuid'])
fip_obj.set_floating_ip_fixed_ip_address(iip_obj.get_instance_ip_address())
return fip_obj
#end _floatingip_neutron_to_vnc
def _floatingip_vnc_to_neutron(self, fip_obj):
fip_q_dict = {}
floating_net_id = self._vnc_lib.fq_name_to_id('virtual-network',
fip_obj.get_fq_name()[:-2])
tenant_id = fip_obj.get_project_refs()[0]['uuid'].replace('-', '')
port_id = None
router_id = None
port_obj = None
port_refs = fip_obj.get_virtual_machine_interface_refs()
if port_refs:
for port_ref in port_refs:
try:
port_obj = self._virtual_machine_interface_read(
port_id=port_ref['uuid'])
port_id = port_ref['uuid']
break
except NoIdError:
pass
if port_obj:
port_net_id = port_obj.get_virtual_network_refs()[0]['uuid']
# find router_id from port
router_list = self._router_list_project(tenant_id, detail=True)
for router_obj in router_list or []:
for vmi in (router_obj.get_virtual_machine_interface_refs()
or []):
vmi_obj = self._virtual_machine_interface_read(
port_id=vmi['uuid'])
if (vmi_obj.get_virtual_network_refs()[0]['uuid'] ==
port_net_id):
router_id = router_obj.uuid
break
if router_id:
break
fip_q_dict['id'] = fip_obj.uuid
fip_q_dict['tenant_id'] = tenant_id
fip_q_dict['floating_ip_address'] = fip_obj.get_floating_ip_address()
fip_q_dict['floating_network_id'] = floating_net_id
fip_q_dict['router_id'] = router_id
fip_q_dict['port_id'] = port_id
fip_q_dict['fixed_ip_address'] = fip_obj.get_floating_ip_fixed_ip_address()
fip_q_dict['status'] = constants.PORT_STATUS_ACTIVE
return fip_q_dict
#end _floatingip_vnc_to_neutron
def _port_set_vm_instance(self, port_obj, instance_name):
""" This function also deletes the old virtual_machine object
associated with the port (if any) after the new virtual_machine
object is associated with it.
"""
vm_refs = port_obj.get_virtual_machine_refs()
delete_vm_list = []
for vm_ref in vm_refs or []:
if vm_ref['to'] != [instance_name]:
delete_vm_list.append(vm_ref)
if instance_name:
try:
instance_obj = self._ensure_instance_exists(instance_name)
port_obj.set_virtual_machine(instance_obj)
except RefsExistError as e:
self._raise_contrail_exception('BadRequest',
resource='port', msg=str(e))
else:
port_obj.set_virtual_machine_list([])
if delete_vm_list:
self._virtual_machine_interface_update(port_obj)
for vm_ref in delete_vm_list:
try:
self._vnc_lib.virtual_machine_delete(id=vm_ref['uuid'])
except RefsExistError:
pass
def _create_no_rule_sg(self):
domain_obj = Domain(SG_NO_RULE_FQ_NAME[0])
proj_obj = Project(SG_NO_RULE_FQ_NAME[1], domain_obj)
sg_rules = PolicyEntriesType()
id_perms = IdPermsType(enable=True,
description="Security group with no rules",
user_visible=False)
sg_obj = SecurityGroup(name=SG_NO_RULE_NAME,
parent_obj=proj_obj,
security_group_entries=sg_rules,
id_perms=id_perms)
sg_uuid = self._vnc_lib.security_group_create(sg_obj)
return sg_obj
# end _create_no_rule_sg
def _get_no_rule_security_group(self):
try:
sg_obj = self._vnc_lib.security_group_read(fq_name=SG_NO_RULE_FQ_NAME)
except NoIdError:
sg_obj = self._create_no_rule_sg()
return sg_obj
# end _get_no_rule_security_group
def _port_neutron_to_vnc(self, port_q, net_obj, oper):
if oper == CREATE:
project_id = str(uuid.UUID(port_q['tenant_id']))
proj_obj = self._project_read(proj_id=project_id)
id_perms = IdPermsType(enable=True)
port_uuid = str(uuid.uuid4())
if port_q.get('name'):
port_name = port_q['name']
else:
port_name = port_uuid
port_obj = VirtualMachineInterface(port_name, proj_obj,
id_perms=id_perms)
port_obj.uuid = port_uuid
port_obj.set_virtual_network(net_obj)
if ('mac_address' in port_q and port_q['mac_address']):
mac_addrs_obj = MacAddressesType()
mac_addrs_obj.set_mac_address([port_q['mac_address']])
port_obj.set_virtual_machine_interface_mac_addresses(mac_addrs_obj)
port_obj.set_security_group_list([])
if ('security_groups' not in port_q or
port_q['security_groups'].__class__ is object):
sg_obj = SecurityGroup("default", proj_obj)
port_obj.add_security_group(sg_obj)
else: # READ/UPDATE/DELETE
port_obj = self._virtual_machine_interface_read(port_id=port_q['id'])
if 'name' in port_q and port_q['name']:
port_obj.display_name = port_q['name']
if (port_q.get('device_owner') != constants.DEVICE_OWNER_ROUTER_INTF
and port_q.get('device_owner') != constants.DEVICE_OWNER_ROUTER_GW
and 'device_id' in port_q):
self._port_set_vm_instance(port_obj, port_q.get('device_id'))
if 'device_owner' in port_q:
port_obj.set_virtual_machine_interface_device_owner(port_q.get('device_owner'))
if 'security_groups' in port_q:
port_obj.set_security_group_list([])
for sg_id in port_q.get('security_groups') or []:
# TODO optimize to not read sg (only uuid/fqn needed)
sg_obj = self._vnc_lib.security_group_read(id=sg_id)
port_obj.add_security_group(sg_obj)
# When there is no-security-group for a port,the internal
# no_rule group should be used.
if not port_q['security_groups']:
sg_obj = self._get_no_rule_security_group()
port_obj.add_security_group(sg_obj)
id_perms = port_obj.get_id_perms()
if 'admin_state_up' in port_q:
id_perms.enable = port_q['admin_state_up']
port_obj.set_id_perms(id_perms)
if ('extra_dhcp_opts' in port_q):
dhcp_options = []
if port_q['extra_dhcp_opts']:
for option_pair in port_q['extra_dhcp_opts']:
option = \
DhcpOptionType(dhcp_option_name=option_pair['opt_name'],
dhcp_option_value=option_pair['opt_value'])
dhcp_options.append(option)
if dhcp_options:
olist = DhcpOptionsListType(dhcp_options)
port_obj.set_virtual_machine_interface_dhcp_option_list(olist)
else:
port_obj.set_virtual_machine_interface_dhcp_option_list(None)
if ('allowed_address_pairs' in port_q):
aap_array = []
if port_q['allowed_address_pairs']:
for address_pair in port_q['allowed_address_pairs']:
mode = u'active-standby';
if 'mac_address' not in address_pair:
address_pair['mac_address'] = ""
cidr = address_pair['ip_address'].split('/')
if len(cidr) == 1:
subnet=SubnetType(cidr[0], 32);
elif len(cidr) == 2:
subnet=SubnetType(cidr[0], int(cidr[1]));
else:
self._raise_contrail_exception(
'BadRequest', resource='port',
msg='Invalid address pair argument')
aap = AllowedAddressPair(subnet,
address_pair['mac_address'], mode)
aap_array.append(aap)
aaps = AllowedAddressPairs()
if aap_array:
aaps.set_allowed_address_pair(aap_array)
port_obj.set_virtual_machine_interface_allowed_address_pairs(aaps)
if 'fixed_ips' in port_q:
net_id = (port_q.get('network_id') or
port_obj.get_virtual_network_refs()[0]['uuid'])
port_obj_ips = None
for fixed_ip in port_q.get('fixed_ips', []):
if 'ip_address' in fixed_ip:
# read instance ip addrs on port only once
if port_obj_ips is None:
port_obj_ips = []
ip_back_refs = getattr(port_obj, 'instance_ip_back_refs', None)
if ip_back_refs:
for ip_back_ref in ip_back_refs:
try:
ip_obj = self._instance_ip_read(
instance_ip_id=ip_back_ref['uuid'])
except NoIdError:
continue
port_obj_ips.append(ip_obj.get_instance_ip_address())
ip_addr = fixed_ip['ip_address']
if ip_addr in port_obj_ips:
continue
if self._ip_addr_in_net_id(ip_addr, net_id):
self._raise_contrail_exception(
'IpAddressInUse', net_id=net_id,
ip_address=ip_addr)
return port_obj
#end _port_neutron_to_vnc
def _gw_port_vnc_to_neutron(self, port_obj, port_req_memo):
vm_refs = port_obj.get_virtual_machine_refs()
vm_uuid = vm_refs[0]['uuid']
vm_obj = None
try:
vm_obj = port_req_memo['virtual-machines'][vm_uuid]
except KeyError:
pass
if vm_obj is None:
try:
vm_obj = self._vnc_lib.virtual_machine_read(id=vm_uuid)
except NoIdError:
return None
port_req_memo['virtual-machines'][vm_uuid] = vm_obj
si_refs = vm_obj.get_service_instance_refs()
if not si_refs:
return None
try:
si_obj = self._vnc_lib.service_instance_read(id=si_refs[0]['uuid'],
fields=["logical_router_back_refs"])
except NoIdError:
return None
rtr_back_refs = getattr(si_obj, "logical_router_back_refs", None)
if not rtr_back_refs:
return None
return rtr_back_refs[0]['uuid']
#end _gw_port_vnc_to_neutron
def _port_vnc_to_neutron(self, port_obj, port_req_memo=None):
port_q_dict = {}
extra_dict = {}
extra_dict['contrail:fq_name'] = port_obj.get_fq_name()
if not port_obj.display_name:
# for ports created directly via vnc_api
port_q_dict['name'] = port_obj.get_fq_name()[-1]
else:
port_q_dict['name'] = port_obj.display_name
port_q_dict['id'] = port_obj.uuid
net_refs = port_obj.get_virtual_network_refs()
if net_refs:
net_id = net_refs[0]['uuid']
else:
# TODO hack to force network_id on default port
# as neutron needs it
net_id = self._vnc_lib.obj_to_id(VirtualNetwork())
if port_req_memo is None:
# create a memo only for this port's conversion in this method
port_req_memo = {}
if 'networks' not in port_req_memo:
port_req_memo['networks'] = {}
if 'subnets' not in port_req_memo:
port_req_memo['subnets'] = {}
if 'virtual-machines' not in port_req_memo:
port_req_memo['virtual-machines'] = {}
try:
net_obj = port_req_memo['networks'][net_id]
except KeyError:
net_obj = self._virtual_network_read(net_id=net_id)
port_req_memo['networks'][net_id] = net_obj
subnets_info = self._virtual_network_to_subnets(net_obj)
port_req_memo['subnets'][net_id] = subnets_info
if port_obj.parent_type != "project":
proj_id = net_obj.parent_uuid.replace('-', '')
else:
proj_id = port_obj.parent_uuid.replace('-', '')
port_q_dict['tenant_id'] = proj_id
port_q_dict['network_id'] = net_id
# TODO RHS below may need fixing
port_q_dict['mac_address'] = ''
mac_refs = port_obj.get_virtual_machine_interface_mac_addresses()
if mac_refs:
port_q_dict['mac_address'] = mac_refs.mac_address[0]
dhcp_options_list = port_obj.get_virtual_machine_interface_dhcp_option_list()
if dhcp_options_list and dhcp_options_list.dhcp_option:
dhcp_options = []
for dhcp_option in dhcp_options_list.dhcp_option:
pair = {"opt_value": dhcp_option.dhcp_option_value,
"opt_name": dhcp_option.dhcp_option_name}
dhcp_options.append(pair)
port_q_dict['extra_dhcp_opts'] = dhcp_options
allowed_address_pairs = port_obj.get_virtual_machine_interface_allowed_address_pairs()
if allowed_address_pairs and allowed_address_pairs.allowed_address_pair:
address_pairs = []
for aap in allowed_address_pairs.allowed_address_pair:
pair = {}
pair["mac_address"] = aap.mac
if aap.ip.get_ip_prefix_len() == 32:
pair["ip_address"] = '%s' % (aap.ip.get_ip_prefix())
else:
pair["ip_address"] = '%s/%s' % (aap.ip.get_ip_prefix(),
aap.ip.get_ip_prefix_len()),
address_pairs.append(pair)
port_q_dict['allowed_address_pairs'] = address_pairs
port_q_dict['fixed_ips'] = []
ip_back_refs = getattr(port_obj, 'instance_ip_back_refs', None)
if ip_back_refs:
for ip_back_ref in ip_back_refs:
iip_uuid = ip_back_ref['uuid']
# fetch it from request context cache/memo if there
try:
ip_obj = port_req_memo['instance-ips'][iip_uuid]
except KeyError:
try:
ip_obj = self._instance_ip_read(
instance_ip_id=ip_back_ref['uuid'])
except NoIdError:
continue
ip_addr = ip_obj.get_instance_ip_address()
ip_q_dict = {}
ip_q_dict['ip_address'] = ip_addr
ip_q_dict['subnet_id'] = self._ip_address_to_subnet_id(ip_addr,
net_obj, port_req_memo)
port_q_dict['fixed_ips'].append(ip_q_dict)
port_q_dict['security_groups'] = []
sg_refs = port_obj.get_security_group_refs()
for sg_ref in sg_refs or []:
port_q_dict['security_groups'].append(sg_ref['uuid'])
port_q_dict['admin_state_up'] = port_obj.get_id_perms().enable
# port can be router interface or vm interface
# for perf read logical_router_back_ref only when we have to
port_parent_name = port_obj.parent_name
router_refs = getattr(port_obj, 'logical_router_back_refs', None)
if router_refs is not None:
port_q_dict['device_id'] = router_refs[0]['uuid']
elif port_obj.parent_type == 'virtual-machine':
port_q_dict['device_id'] = port_obj.parent_name
elif port_obj.get_virtual_machine_refs() is not None:
rtr_uuid = self._gw_port_vnc_to_neutron(port_obj, port_req_memo)
if rtr_uuid:
port_q_dict['device_id'] = rtr_uuid
port_q_dict['device_owner'] = constants.DEVICE_OWNER_ROUTER_GW
else:
port_q_dict['device_id'] = \
port_obj.get_virtual_machine_refs()[0]['to'][-1]
port_q_dict['device_owner'] = ''
else:
port_q_dict['device_id'] = ''
if not port_q_dict.get('device_owner'):
port_q_dict['device_owner'] = \
port_obj.get_virtual_machine_interface_device_owner() or '';
if port_q_dict['device_id']:
port_q_dict['status'] = constants.PORT_STATUS_ACTIVE
else:
port_q_dict['status'] = constants.PORT_STATUS_DOWN
if self._contrail_extensions_enabled:
port_q_dict.update(extra_dict)
return port_q_dict
#end _port_vnc_to_neutron
def _port_get_host_prefixes(self, host_routes, subnet_cidr):
"""This function returns the host prefixes
Eg. If host_routes have the below routes
---------------------------
|destination | next hop |
---------------------------
| 10.0.0.0/24 | 8.0.0.2 |
| 12.0.0.0/24 | 10.0.0.4 |
| 14.0.0.0/24 | 12.0.0.23 |
| 16.0.0.0/24 | 8.0.0.4 |
| 15.0.0.0/24 | 16.0.0.2 |
| 20.0.0.0/24 | 8.0.0.12 |
---------------------------
subnet_cidr is 8.0.0.0/24
This function returns the dictionary
'8.0.0.2' : ['10.0.0.0/24', '12.0.0.0/24', '14.0.0.0/24']
'8.0.0.4' : ['16.0.0.0/24', '15.0.0.0/24']
'8.0.0.12': ['20.0.0.0/24']
"""
temp_host_routes = list(host_routes)
cidr_ip_set = IPSet([subnet_cidr])
host_route_dict = {}
for route in temp_host_routes[:]:
next_hop = route.get_next_hop()
if IPAddress(next_hop) in cidr_ip_set:
if next_hop in host_route_dict:
host_route_dict[next_hop].append(route.get_prefix())
else:
host_route_dict[next_hop] = [route.get_prefix()]
temp_host_routes.remove(route)
# look for indirect routes
if temp_host_routes:
for ipaddr in host_route_dict:
self._port_update_prefixes(host_route_dict[ipaddr],
temp_host_routes)
return host_route_dict
def _port_update_prefixes(self, matched_route_list, unmatched_host_routes):
process_host_routes = True
while process_host_routes:
process_host_routes = False
for route in unmatched_host_routes:
ip_addr = IPAddress(route.get_next_hop())
if ip_addr in IPSet(matched_route_list):
matched_route_list.append(route.get_prefix())
unmatched_host_routes.remove(route)
process_host_routes = True
def _port_check_and_add_iface_route_table(self, fixed_ips, net_obj,
port_obj):
ipam_refs = net_obj.get_network_ipam_refs()
if not ipam_refs:
return
for ipam_ref in ipam_refs:
subnets = ipam_ref['attr'].get_ipam_subnets()
for subnet in subnets:
host_routes = subnet.get_host_routes()
if host_routes is None:
continue
subnet_key = self._subnet_vnc_get_key(subnet, net_obj.uuid)
sn_id = self._subnet_vnc_read_mapping(key=subnet_key)
subnet_cidr = '%s/%s' % (subnet.subnet.get_ip_prefix(),
subnet.subnet.get_ip_prefix_len())
for ip_addr in [fixed_ip['ip_address'] for fixed_ip in \
fixed_ips if fixed_ip['subnet_id'] == sn_id]:
host_prefixes = self._port_get_host_prefixes(host_routes.route,
subnet_cidr)
if ip_addr in host_prefixes:
self._port_add_iface_route_table(host_prefixes[ip_addr],
port_obj, sn_id)
def _port_add_iface_route_table(self, route_prefix_list, port_obj,
subnet_id):
project_obj = self._project_read(proj_id=port_obj.parent_uuid)
intf_rt_name = '%s_%s_%s' % (_IFACE_ROUTE_TABLE_NAME_PREFIX,
subnet_id, port_obj.uuid)
intf_rt_fq_name = list(project_obj.get_fq_name())
intf_rt_fq_name.append(intf_rt_name)
try:
intf_route_table_obj = self._vnc_lib.interface_route_table_read(
fq_name=intf_rt_fq_name)
except vnc_exc.NoIdError:
route_table = RouteTableType(intf_rt_name)
route_table.set_route([])
intf_route_table = InterfaceRouteTable(
interface_route_table_routes=route_table,
parent_obj=project_obj,
name=intf_rt_name)
intf_route_table_id = self._vnc_lib.interface_route_table_create(
intf_route_table)
intf_route_table_obj = self._vnc_lib.interface_route_table_read(
id=intf_route_table_id)
rt_routes = intf_route_table_obj.get_interface_route_table_routes()
routes = rt_routes.get_route()
# delete any old routes
routes = []
for prefix in route_prefix_list:
routes.append(RouteType(prefix=prefix))
rt_routes.set_route(routes)
intf_route_table_obj.set_interface_route_table_routes(rt_routes)
self._vnc_lib.interface_route_table_update(intf_route_table_obj)
port_obj.add_interface_route_table(intf_route_table_obj)
self._vnc_lib.virtual_machine_interface_update(port_obj)
def _port_update_iface_route_table(self, net_obj, subnet_cidr, subnet_id,
new_host_routes, old_host_routes=None):
old_host_prefixes = {}
if old_host_routes:
old_host_prefixes = self._port_get_host_prefixes(old_host_routes.route,
subnet_cidr)
new_host_prefixes = self._port_get_host_prefixes(new_host_routes,
subnet_cidr)
for ipaddr, prefixes in old_host_prefixes.items():
if ipaddr in new_host_prefixes:
need_update = False
if len(prefixes) == len(new_host_prefixes[ipaddr]):
for prefix in prefixes:
if prefix not in new_host_prefixes[ipaddr]:
need_update = True
break
else:
need_update= True
if need_update:
old_host_prefixes.pop(ipaddr)
else:
# both the old and new are same. No need to do
# anything
old_host_prefixes.pop(ipaddr)
new_host_prefixes.pop(ipaddr)
if not new_host_prefixes and not old_host_prefixes:
# nothing to be done as old_host_routes and
# new_host_routes match exactly
return
# get the list of all the ip objs for this network
ipobjs = self._instance_ip_list(back_ref_id=[net_obj.uuid])
for ipobj in ipobjs:
ipaddr = ipobj.get_instance_ip_address()
if ipaddr in old_host_prefixes:
self._port_remove_iface_route_table(ipobj, subnet_id)
continue
if ipaddr in new_host_prefixes:
port_back_refs = ipobj.get_virtual_machine_interface_refs()
for port_ref in port_back_refs:
port_obj = self._virtual_machine_interface_read(
port_id=port_ref['uuid'])
self._port_add_iface_route_table(new_host_prefixes[ipaddr],
port_obj, subnet_id)
def _port_remove_iface_route_table(self, ipobj, subnet_id):
port_refs = ipobj.get_virtual_machine_interface_refs()
for port_ref in port_refs or []:
port_obj = self._virtual_machine_interface_read(port_id=port_ref['uuid'])
intf_rt_name = '%s_%s_%s' % (_IFACE_ROUTE_TABLE_NAME_PREFIX,
subnet_id, port_obj.uuid)
for rt_ref in port_obj.get_interface_route_table_refs() or []:
if rt_ref['to'][2] != intf_rt_name:
continue
try:
intf_route_table_obj = self._vnc_lib.interface_route_table_read(
id=rt_ref['uuid'])
port_obj.del_interface_route_table(intf_route_table_obj)
self._vnc_lib.virtual_machine_interface_update(port_obj)
self._vnc_lib.interface_route_table_delete(id=rt_ref['uuid'])
except vnc_exc.NoIdError:
pass
# public methods
# network api handlers
def network_create(self, network_q):
net_obj = self._network_neutron_to_vnc(network_q, CREATE)
try:
net_uuid = self._resource_create('virtual_network', net_obj)
except RefsExistError:
self._raise_contrail_exception('BadRequest',
resource='network', msg='Network Already exists')
if net_obj.router_external:
fip_pool_obj = FloatingIpPool('floating-ip-pool', net_obj)
self._floating_ip_pool_create(fip_pool_obj)
ret_network_q = self._network_vnc_to_neutron(net_obj, net_repr='SHOW')
return ret_network_q
#end network_create
def network_read(self, net_uuid, fields=None):
# see if we can return fast...
#if fields and (len(fields) == 1) and fields[0] == 'tenant_id':
# tenant_id = self._get_obj_tenant_id('network', net_uuid)
# return {'id': net_uuid, 'tenant_id': tenant_id}
try:
net_obj = self._network_read(net_uuid)
except NoIdError:
self._raise_contrail_exception('NetworkNotFound', net_id=net_uuid)
return self._network_vnc_to_neutron(net_obj, net_repr='SHOW')
#end network_read
def network_update(self, net_id, network_q):
net_obj = self._virtual_network_read(net_id=net_id)
router_external = net_obj.get_router_external()
shared = net_obj.get_is_shared()
network_q['id'] = net_id
net_obj = self._network_neutron_to_vnc(network_q, UPDATE)
if net_obj.router_external and not router_external:
fip_pools = net_obj.get_floating_ip_pools()
fip_pool_obj = FloatingIpPool('floating-ip-pool', net_obj)
self._floating_ip_pool_create(fip_pool_obj)
if router_external and not net_obj.router_external:
fip_pools = net_obj.get_floating_ip_pools()
if fip_pools:
for fip_pool in fip_pools:
try:
pool_id = fip_pool['uuid']
self._floating_ip_pool_delete(fip_pool_id=pool_id)
except RefsExistError:
self._raise_contrail_exception('NetworkInUse',
net_id=net_id)
if shared and not net_obj.is_shared:
for vmi in net_obj.get_virtual_machine_interface_back_refs() or []:
vmi_obj = self._virtual_machine_interface_read(port_id=vmi['uuid'])
if (vmi_obj.parent_type == 'project' and
vmi_obj.parent_uuid != net_obj.parent_uuid):
self._raise_contrail_exception(
'InvalidSharedSetting',
network=net_obj.display_name)
self._virtual_network_update(net_obj)
ret_network_q = self._network_vnc_to_neutron(net_obj, net_repr='SHOW')
return ret_network_q
#end network_update
def network_delete(self, net_id):
self._virtual_network_delete(net_id=net_id)
#end network_delete
# TODO request based on filter contents
def network_list(self, context=None, filters=None):
ret_dict = {}
def _collect_without_prune(net_ids):
for net_id in net_ids:
try:
net_obj = self._network_read(net_id)
net_info = self._network_vnc_to_neutron(net_obj,
net_repr='LIST')
ret_dict[net_id] = net_info
except NoIdError:
pass
#end _collect_without_prune
# collect phase
all_net_objs = [] # all n/ws in all projects
if context and not context['is_admin']:
if filters and 'id' in filters:
_collect_without_prune(filters['id'])
elif filters and 'name' in filters:
net_objs = self._network_list_project(context['tenant'])
all_net_objs.extend(net_objs)
all_net_objs.extend(self._network_list_shared())
all_net_objs.extend(self._network_list_router_external())
elif (filters and 'shared' in filters and filters['shared'][0] and
'router:external' not in filters):
all_net_objs.extend(self._network_list_shared())
elif (filters and 'router:external' in filters and
'shared' not in filters):
all_net_objs.extend(self._network_list_router_external())
elif (filters and 'router:external' in filters and
'shared' in filters):
all_net_objs.extend(self._network_list_shared_and_ext())
else:
project_uuid = str(uuid.UUID(context['tenant']))
if not filters:
all_net_objs.extend(self._network_list_router_external())
all_net_objs.extend(self._network_list_shared())
all_net_objs.extend(self._network_list_project(project_uuid))
# admin role from here on
elif filters and 'tenant_id' in filters:
# project-id is present
if 'id' in filters:
# required networks are also specified,
# just read and populate ret_dict
# prune is skipped because all_net_objs is empty
_collect_without_prune(filters['id'])
else:
# read all networks in project, and prune below
proj_ids = self._validate_project_ids(context, filters['tenant_id'])
for p_id in proj_ids:
all_net_objs.extend(self._network_list_project(p_id))
if 'router:external' in filters:
all_net_objs.extend(self._network_list_router_external())
elif filters and 'id' in filters:
# required networks are specified, just read and populate ret_dict
# prune is skipped because all_net_objs is empty
_collect_without_prune(filters['id'])
elif filters and 'name' in filters:
net_objs = self._network_list_project(None)
all_net_objs.extend(net_objs)
elif filters and 'shared' in filters:
if filters['shared'][0] == True:
nets = self._network_list_shared()
for net in nets:
net_info = self._network_vnc_to_neutron(net,
net_repr='LIST')
ret_dict[net.uuid] = net_info
elif filters and 'router:external' in filters:
nets = self._network_list_router_external()
if filters['router:external'][0] == True:
for net in nets:
net_info = self._network_vnc_to_neutron(net, net_repr='LIST')
ret_dict[net.uuid] = net_info
else:
# read all networks in all projects
all_net_objs.extend(self._virtual_network_list(detail=True))
# prune phase
for net_obj in all_net_objs:
if net_obj.uuid in ret_dict:
continue
net_fq_name = unicode(net_obj.get_fq_name())
if not self._filters_is_present(filters, 'contrail:fq_name',
net_fq_name):
continue
if not self._filters_is_present(
filters, 'name', net_obj.get_display_name() or net_obj.name):
continue
if net_obj.is_shared is None:
is_shared = False
else:
is_shared = net_obj.is_shared
if not self._filters_is_present(filters, 'shared',
is_shared):
continue
try:
net_info = self._network_vnc_to_neutron(net_obj,
net_repr='LIST')
except NoIdError:
continue
ret_dict[net_obj.uuid] = net_info
ret_list = []
for net in ret_dict.values():
ret_list.append(net)
return ret_list
#end network_list
def _resource_count_optimized(self, resource, filters=None):
if filters and ('tenant_id' not in filters or len(filters.keys()) > 1):
return None
project_ids = filters.get('tenant_id') if filters else None
if not isinstance(project_ids, list):
project_ids = [project_ids]
json_resource = resource.replace("_", "-")
if resource == "floating_ips":
count = lambda pid: self._vnc_lib. \
floating_ips_list(back_ref_id=pid,
count=True)[json_resource]['count']
else:
method = getattr(self._vnc_lib, resource + "_list")
count = lambda pid: method(parent_id=pid,
count=True)[json_resource]['count']
ret = [count(pid) for pid in project_ids] if project_ids \
else [count(None)]
return sum(ret)
# end _resource_count_optimized
def network_count(self, filters=None):
count = self._resource_count_optimized("virtual_networks", filters)
if count is not None:
return count
nets_info = self.network_list(filters=filters)
return len(nets_info)
#end network_count
# subnet api handlers
def subnet_create(self, subnet_q):
net_id = subnet_q['network_id']
net_obj = self._virtual_network_read(net_id=net_id)
ipam_fq_name = subnet_q.get('contrail:ipam_fq_name')
if ipam_fq_name:
domain_name, project_name, ipam_name = ipam_fq_name
domain_obj = Domain(domain_name)
project_obj = Project(project_name, domain_obj)
netipam_obj = NetworkIpam(ipam_name, project_obj)
else: # link with project's default ipam or global default ipam
try:
ipam_fq_name = net_obj.get_fq_name()[:-1]
ipam_fq_name.append('default-network-ipam')
netipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name)
except NoIdError:
netipam_obj = NetworkIpam()
ipam_fq_name = netipam_obj.get_fq_name()
subnet_vnc = self._subnet_neutron_to_vnc(subnet_q)
subnet_key = self._subnet_vnc_get_key(subnet_vnc, net_id)
# Locate list of subnets to which this subnet has to be appended
net_ipam_ref = None
ipam_refs = net_obj.get_network_ipam_refs()
if ipam_refs:
for ipam_ref in ipam_refs:
if ipam_ref['to'] == ipam_fq_name:
net_ipam_ref = ipam_ref
break
if not net_ipam_ref:
# First link from net to this ipam
vnsn_data = VnSubnetsType([subnet_vnc])
net_obj.add_network_ipam(netipam_obj, vnsn_data)
else: # virtual-network already linked to this ipam
for subnet in net_ipam_ref['attr'].get_ipam_subnets():
if subnet_key == self._subnet_vnc_get_key(subnet, net_id):
existing_sn_id = self._subnet_vnc_read_mapping(key=subnet_key)
# duplicate !!
msg = _("Cidr %s overlaps with another subnet of subnet %s"
) % (subnet_q['cidr'], existing_sn_id)
self._raise_contrail_exception('BadRequest',
resource='subnet', msg=msg)
vnsn_data = net_ipam_ref['attr']
vnsn_data.ipam_subnets.append(subnet_vnc)
# TODO: Add 'ref_update' API that will set this field
net_obj._pending_field_updates.add('network_ipam_refs')
self._virtual_network_update(net_obj)
# allocate an id to the subnet and store mapping with
# api-server
subnet_id = subnet_vnc.subnet_uuid
self._subnet_vnc_create_mapping(subnet_id, subnet_key)
# Read in subnet from server to get updated values for gw etc.
subnet_vnc = self._subnet_read(subnet_key)
subnet_info = self._subnet_vnc_to_neutron(subnet_vnc, net_obj,
ipam_fq_name)
return subnet_info
#end subnet_create
def subnet_read(self, subnet_id):
subnet_key = self._subnet_vnc_read_mapping(id=subnet_id)
net_id = subnet_key.split()[0]
try:
net_obj = self._network_read(net_id)
except NoIdError:
self._raise_contrail_exception('SubnetNotFound',
subnet_id=subnet_id)
ipam_refs = net_obj.get_network_ipam_refs()
if ipam_refs:
for ipam_ref in ipam_refs:
subnet_vncs = ipam_ref['attr'].get_ipam_subnets()
for subnet_vnc in subnet_vncs:
if self._subnet_vnc_get_key(subnet_vnc, net_id) == \
subnet_key:
ret_subnet_q = self._subnet_vnc_to_neutron(
subnet_vnc, net_obj, ipam_ref['to'])
return ret_subnet_q
return {}
#end subnet_read
def subnet_update(self, subnet_id, subnet_q):
if 'gateway_ip' in subnet_q:
if subnet_q['gateway_ip'] != None:
self._raise_contrail_exception(
'BadRequest', resource='subnet',
msg="update of gateway is not supported")
if 'allocation_pools' in subnet_q:
if subnet_q['allocation_pools'] != None:
self._raise_contrail_exception(
'BadRequest', resource='subnet',
msg="update of allocation_pools is not allowed")
subnet_key = self._subnet_vnc_read_mapping(id=subnet_id)
net_id = subnet_key.split()[0]
net_obj = self._network_read(net_id)
ipam_refs = net_obj.get_network_ipam_refs()
subnet_found = False
if ipam_refs:
for ipam_ref in ipam_refs:
subnets = ipam_ref['attr'].get_ipam_subnets()
for subnet_vnc in subnets:
if self._subnet_vnc_get_key(subnet_vnc,
net_id) == subnet_key:
subnet_found = True
break
if subnet_found:
if 'name' in subnet_q:
if subnet_q['name'] != None:
subnet_vnc.set_subnet_name(subnet_q['name'])
if 'gateway_ip' in subnet_q:
if subnet_q['gateway_ip'] != None:
subnet_vnc.set_default_gateway(subnet_q['gateway_ip'])
if 'enable_dhcp' in subnet_q:
if subnet_q['enable_dhcp'] != None:
subnet_vnc.set_enable_dhcp(subnet_q['enable_dhcp'])
if 'dns_nameservers' in subnet_q:
if subnet_q['dns_nameservers'] != None:
dhcp_options=[]
dns_servers=" ".join(subnet_q['dns_nameservers'])
if dns_servers:
dhcp_options.append(DhcpOptionType(dhcp_option_name='6',
dhcp_option_value=dns_servers))
if dhcp_options:
subnet_vnc.set_dhcp_option_list(DhcpOptionsListType(dhcp_options))
else:
subnet_vnc.set_dhcp_option_list(None)
if 'host_routes' in subnet_q:
if subnet_q['host_routes'] != None:
host_routes=[]
for host_route in subnet_q['host_routes']:
host_routes.append(RouteType(prefix=host_route['destination'],
next_hop=host_route['nexthop']))
if self._apply_subnet_host_routes:
old_host_routes = subnet_vnc.get_host_routes()
subnet_cidr = '%s/%s' % (subnet_vnc.subnet.get_ip_prefix(),
subnet_vnc.subnet.get_ip_prefix_len())
self._port_update_iface_route_table(net_obj,
subnet_cidr,
subnet_id,
host_routes,
old_host_routes)
if host_routes:
subnet_vnc.set_host_routes(RouteTableType(host_routes))
else:
subnet_vnc.set_host_routes(None)
net_obj._pending_field_updates.add('network_ipam_refs')
self._virtual_network_update(net_obj)
ret_subnet_q = self._subnet_vnc_to_neutron(
subnet_vnc, net_obj, ipam_ref['to'])
return ret_subnet_q
return {}
# end subnet_update
def subnet_delete(self, subnet_id):
subnet_key = self._subnet_vnc_read_mapping(id=subnet_id)
net_id = subnet_key.split()[0]
net_obj = self._network_read(net_id)
ipam_refs = net_obj.get_network_ipam_refs()
if ipam_refs:
for ipam_ref in ipam_refs:
orig_subnets = ipam_ref['attr'].get_ipam_subnets()
new_subnets = [subnet_vnc for subnet_vnc in orig_subnets
if self._subnet_vnc_get_key(subnet_vnc,
net_id) != subnet_key]
if len(orig_subnets) != len(new_subnets):
# matched subnet to be deleted
ipam_ref['attr'].set_ipam_subnets(new_subnets)
net_obj._pending_field_updates.add('network_ipam_refs')
try:
self._virtual_network_update(net_obj)
except RefsExistError:
self._raise_contrail_exception('SubnetInUse',
subnet_id=subnet_id)
self._subnet_vnc_delete_mapping(subnet_id, subnet_key)
return
#end subnet_delete
def subnets_list(self, context, filters=None):
ret_subnets = []
all_net_objs = []
if filters and 'id' in filters:
# required subnets are specified,
# just read in corresponding net_ids
net_ids = []
for subnet_id in filters['id']:
subnet_key = self._subnet_vnc_read_mapping(id=subnet_id)
net_id = subnet_key.split()[0]
net_ids.append(net_id)
all_net_objs.extend(self._virtual_network_list(obj_uuids=net_ids,
detail=True))
else:
if not context['is_admin']:
proj_id = context['tenant']
else:
proj_id = None
net_objs = self._network_list_project(proj_id)
all_net_objs.extend(net_objs)
net_objs = self._network_list_shared()
all_net_objs.extend(net_objs)
ret_dict = {}
for net_obj in all_net_objs:
if net_obj.uuid in ret_dict:
continue
ret_dict[net_obj.uuid] = 1
ipam_refs = net_obj.get_network_ipam_refs()
if ipam_refs:
for ipam_ref in ipam_refs:
subnet_vncs = ipam_ref['attr'].get_ipam_subnets()
for subnet_vnc in subnet_vncs:
sn_info = self._subnet_vnc_to_neutron(subnet_vnc,
net_obj,
ipam_ref['to'])
sn_id = sn_info['id']
sn_proj_id = sn_info['tenant_id']
sn_net_id = sn_info['network_id']
sn_name = sn_info['name']
if (filters and 'shared' in filters and
filters['shared'][0] == True):
if not net_obj.is_shared:
continue
elif filters:
if not self._filters_is_present(filters, 'id',
sn_id):
continue
if not self._filters_is_present(filters,
'tenant_id',
sn_proj_id):
continue
if not self._filters_is_present(filters,
'network_id',
sn_net_id):
continue
if not self._filters_is_present(filters,
'name',
sn_name):
continue
ret_subnets.append(sn_info)
return ret_subnets
#end subnets_list
def subnets_count(self, context, filters=None):
subnets_info = self.subnets_list(context, filters)
return len(subnets_info)
#end subnets_count
# ipam api handlers
def ipam_create(self, ipam_q):
# TODO remove below once api-server can read and create projects
# from keystone on startup
#self._ensure_project_exists(ipam_q['tenant_id'])
ipam_obj = self._ipam_neutron_to_vnc(ipam_q, CREATE)
try:
ipam_uuid = self._vnc_lib.network_ipam_create(ipam_obj)
except RefsExistError as e:
self._raise_contrail_exception('BadRequest',
resource='ipam', msg=str(e))
return self._ipam_vnc_to_neutron(ipam_obj)
#end ipam_create
def ipam_read(self, ipam_id):
try:
ipam_obj = self._vnc_lib.network_ipam_read(id=ipam_id)
except NoIdError:
# TODO add ipam specific exception
self._raise_contrail_exception('NetworkNotFound',
net_id=ipam_id)
return self._ipam_vnc_to_neutron(ipam_obj)
#end ipam_read
def ipam_update(self, ipam_id, ipam_q):
ipam_q['id'] = ipam_id
ipam_obj = self._ipam_neutron_to_vnc(ipam_q, UPDATE)
self._vnc_lib.network_ipam_update(ipam_obj)
return self._ipam_vnc_to_neutron(ipam_obj)
#end ipam_update
def ipam_delete(self, ipam_id):
self._vnc_lib.network_ipam_delete(id=ipam_id)
#end ipam_delete
# TODO request based on filter contents
def ipam_list(self, context=None, filters=None):
ret_list = []
# collect phase
all_ipams = [] # all ipams in all projects
if filters and 'tenant_id' in filters:
project_ids = self._validate_project_ids(context,
filters['tenant_id'])
for p_id in project_ids:
project_ipams = self._ipam_list_project(p_id)
all_ipams.append(project_ipams)
else: # no filters
dom_projects = self._project_list_domain(None)
for project in dom_projects:
proj_id = project['uuid']
project_ipams = self._ipam_list_project(proj_id)
all_ipams.append(project_ipams)
# prune phase
for project_ipams in all_ipams:
for proj_ipam in project_ipams:
# TODO implement same for name specified in filter
proj_ipam_id = proj_ipam['uuid']
if not self._filters_is_present(filters, 'id', proj_ipam_id):
continue
ipam_info = self.ipam_read(proj_ipam['uuid'])
ret_list.append(ipam_info)
return ret_list
#end ipam_list
def ipam_count(self, filters=None):
count = self._resource_count_optimized("network_ipams", filters)
if count is not None:
return count
ipam_info = self.ipam_list(filters=filters)
return len(ipam_info)
#end ipam_count
# policy api handlers
def policy_create(self, policy_q):
# TODO remove below once api-server can read and create projects
# from keystone on startup
#self._ensure_project_exists(policy_q['tenant_id'])
policy_obj = self._policy_neutron_to_vnc(policy_q, CREATE)
try:
policy_uuid = self._vnc_lib.network_policy_create(policy_obj)
except RefsExistError as e:
self._raise_contrail_exception('BadRequest',
resource='policy', msg=str(e))
return self._policy_vnc_to_neutron(policy_obj)
#end policy_create
def policy_read(self, policy_id):
try:
policy_obj = self._vnc_lib.network_policy_read(id=policy_id)
except NoIdError:
raise policy.PolicyNotFound(id=policy_id)
return self._policy_vnc_to_neutron(policy_obj)
#end policy_read
def policy_update(self, policy_id, policy):
policy_q = policy
policy_q['id'] = policy_id
policy_obj = self._policy_neutron_to_vnc(policy_q, UPDATE)
self._vnc_lib.network_policy_update(policy_obj)
return self._policy_vnc_to_neutron(policy_obj)
#end policy_update
def policy_delete(self, policy_id):
self._vnc_lib.network_policy_delete(id=policy_id)
#end policy_delete
# TODO request based on filter contents
def policy_list(self, context=None, filters=None):
ret_list = []
# collect phase
all_policys = [] # all policys in all projects
if filters and 'tenant_id' in filters:
project_ids = self._validate_project_ids(context,
filters['tenant_id'])
for p_id in project_ids:
project_policys = self._policy_list_project(p_id)
all_policys.append(project_policys)
else: # no filters
dom_projects = self._project_list_domain(None)
for project in dom_projects:
proj_id = project['uuid']
project_policys = self._policy_list_project(proj_id)
all_policys.append(project_policys)
# prune phase
for project_policys in all_policys:
for proj_policy in project_policys:
# TODO implement same for name specified in filter
proj_policy_id = proj_policy['uuid']
if not self._filters_is_present(filters, 'id', proj_policy_id):
continue
policy_info = self.policy_read(proj_policy['uuid'])
ret_list.append(policy_info)
return ret_list
#end policy_list
def policy_count(self, filters=None):
count = self._resource_count_optimized("network_policys", filters)
if count is not None:
return count
policy_info = self.policy_list(filters=filters)
return len(policy_info)
#end policy_count
def _router_add_gateway(self, router_q, rtr_obj):
ext_gateway = router_q.get('external_gateway_info', None)
old_ext_gateway = self._get_external_gateway_info(rtr_obj)
if ext_gateway or old_ext_gateway:
network_id = None
if ext_gateway:
network_id = ext_gateway.get('network_id', None)
if network_id:
if old_ext_gateway and network_id == old_ext_gateway:
return
try:
net_obj = self._virtual_network_read(net_id=network_id)
if not net_obj.get_router_external():
self._raise_contrail_exception(
'BadRequest', resource='router',
msg="Network %s is not a valid external network" % network_id)
except NoIdError:
self._raise_contrail_exception('NetworkNotFound',
net_id=network_id)
self._router_set_external_gateway(rtr_obj, net_obj)
else:
self._router_clear_external_gateway(rtr_obj)
def _router_set_external_gateway(self, router_obj, ext_net_obj):
project_obj = self._project_read(proj_id=router_obj.parent_uuid)
# Get netns SNAT service template
try:
st_obj = self._vnc_lib.service_template_read(
fq_name=SNAT_SERVICE_TEMPLATE_FQ_NAME)
except NoIdError:
self._raise_contrail_exception('BadRequest', resouce='router',
msg="Unable to set or clear the default gateway")
# Get the service instance if it exists
si_name = 'si_' + router_obj.uuid
si_fq_name = project_obj.get_fq_name() + [si_name]
try:
si_obj = self._vnc_lib.service_instance_read(fq_name=si_fq_name)
si_uuid = si_obj.uuid
except NoIdError:
si_obj = None
# Get route table for default route it it exists
rt_name = 'rt_' + router_obj.uuid
rt_fq_name = project_obj.get_fq_name() + [rt_name]
try:
rt_obj = self._vnc_lib.route_table_read(fq_name=rt_fq_name)
rt_uuid = rt_obj.uuid
except NoIdError:
rt_obj = None
# Set the service instance
si_created = False
if not si_obj:
si_obj = ServiceInstance(si_name, parent_obj=project_obj)
si_created = True
si_prop_obj = ServiceInstanceType(
scale_out=ServiceScaleOutType(max_instances=2,
auto_scale=True),
auto_policy=True)
# set right interface in order of [right, left] to match template
left_if = ServiceInstanceInterfaceType()
right_if = ServiceInstanceInterfaceType(
virtual_network=ext_net_obj.get_fq_name_str())
si_prop_obj.set_interface_list([right_if, left_if])
si_prop_obj.set_ha_mode('active-standby')
si_obj.set_service_instance_properties(si_prop_obj)
si_obj.set_service_template(st_obj)
if si_created:
si_uuid = self._vnc_lib.service_instance_create(si_obj)
else:
self._vnc_lib.service_instance_update(si_obj)
# Set the route table
route_obj = RouteType(prefix="0.0.0.0/0",
next_hop=si_obj.get_fq_name_str())
rt_created = False
if not rt_obj:
rt_obj = RouteTable(name=rt_name, parent_obj=project_obj)
rt_created = True
rt_obj.set_routes(RouteTableType.factory([route_obj]))
if rt_created:
rt_uuid = self._vnc_lib.route_table_create(rt_obj)
else:
self._vnc_lib.route_table_update(rt_obj)
# Associate route table to all private networks connected onto
# that router
for intf in router_obj.get_virtual_machine_interface_refs() or []:
port_id = intf['uuid']
net_id = self.port_read(port_id)['network_id']
try:
net_obj = self._vnc_lib.virtual_network_read(id=net_id)
except NoIdError:
self._raise_contrail_exception(
'NetworkNotFound', net_id=net_id)
net_obj.set_route_table(rt_obj)
self._vnc_lib.virtual_network_update(net_obj)
# Add logical gateway virtual network
router_obj.set_service_instance(si_obj)
router_obj.set_virtual_network(ext_net_obj)
self._vnc_lib.logical_router_update(router_obj)
def _router_clear_external_gateway(self, router_obj):
project_obj = self._project_read(proj_id=router_obj.parent_uuid)
# Get the service instance if it exists
si_name = 'si_' + router_obj.uuid
si_fq_name = project_obj.get_fq_name() + [si_name]
try:
si_obj = self._vnc_lib.service_instance_read(fq_name=si_fq_name)
si_uuid = si_obj.uuid
except NoIdError:
si_obj = None
# Get route table for default route it it exists
rt_name = 'rt_' + router_obj.uuid
rt_fq_name = project_obj.get_fq_name() + [rt_name]
try:
rt_obj = self._vnc_lib.route_table_read(fq_name=rt_fq_name)
rt_uuid = rt_obj.uuid
except NoIdError:
rt_obj = None
# Delete route table
if rt_obj:
# Disassociate route table to all private networks connected
# onto that router
for net_ref in rt_obj.get_virtual_network_back_refs() or []:
try:
net_obj = self._vnc_lib.virtual_network_read(
id=net_ref['uuid'])
except NoIdError:
continue
net_obj.del_route_table(rt_obj)
self._vnc_lib.virtual_network_update(net_obj)
self._vnc_lib.route_table_delete(id=rt_obj.uuid)
# Clear logical gateway virtual network
router_obj.set_virtual_network_list([])
router_obj.set_service_instance_list([])
self._vnc_lib.logical_router_update(router_obj)
# Delete service instance
if si_obj:
self._vnc_lib.service_instance_delete(id=si_uuid)
def _set_snat_routing_table(self, router_obj, network_id):
project_obj = self._project_read(proj_id=router_obj.parent_uuid)
rt_name = 'rt_' + router_obj.uuid
rt_fq_name = project_obj.get_fq_name() + [rt_name]
try:
rt_obj = self._vnc_lib.route_table_read(fq_name=rt_fq_name)
rt_uuid = rt_obj.uuid
except NoIdError:
# No route table set with that router ID, the gateway is not set
return
try:
net_obj = self._vnc_lib.virtual_network_read(id=network_id)
except NoIdError:
raise exceptions.NetworkNotFound(net_id=ext_net_id)
net_obj.set_route_table(rt_obj)
self._vnc_lib.virtual_network_update(net_obj)
def _clear_snat_routing_table(self, router_obj, network_id):
project_obj = self._project_read(proj_id=router_obj.parent_uuid)
rt_name = 'rt_' + router_obj.uuid
rt_fq_name = project_obj.get_fq_name() + [rt_name]
try:
rt_obj = self._vnc_lib.route_table_read(fq_name=rt_fq_name)
rt_uuid = rt_obj.uuid
except NoIdError:
# No route table set with that router ID, the gateway is not set
return
try:
net_obj = self._vnc_lib.virtual_network_read(id=network_id)
except NoIdError:
raise exceptions.NetworkNotFound(net_id=ext_net_id)
net_obj.del_route_table(rt_obj)
self._vnc_lib.virtual_network_update(net_obj)
# router api handlers
def router_create(self, router_q):
#self._ensure_project_exists(router_q['tenant_id'])
rtr_obj = self._router_neutron_to_vnc(router_q, CREATE)
rtr_uuid = self._resource_create('logical_router', rtr_obj)
# read it back to update id perms
rtr_obj = self._logical_router_read(rtr_uuid)
self._router_add_gateway(router_q, rtr_obj)
ret_router_q = self._router_vnc_to_neutron(rtr_obj, rtr_repr='SHOW')
return ret_router_q
#end router_create
def router_read(self, rtr_uuid, fields=None):
# see if we can return fast...
if fields and (len(fields) == 1) and fields[0] == 'tenant_id':
tenant_id = self._get_obj_tenant_id('router', rtr_uuid)
return {'id': rtr_uuid, 'tenant_id': tenant_id}
try:
rtr_obj = self._logical_router_read(rtr_uuid)
except NoIdError:
self._raise_contrail_exception('RouterNotFound',
router_id=rtr_uuid)
return self._router_vnc_to_neutron(rtr_obj, rtr_repr='SHOW')
#end router_read
def router_update(self, rtr_id, router_q):
router_q['id'] = rtr_id
rtr_obj = self._router_neutron_to_vnc(router_q, UPDATE)
self._logical_router_update(rtr_obj)
self._router_add_gateway(router_q, rtr_obj)
ret_router_q = self._router_vnc_to_neutron(rtr_obj, rtr_repr='SHOW')
return ret_router_q
#end router_update
def router_delete(self, rtr_id):
try:
rtr_obj = self._logical_router_read(rtr_id)
if rtr_obj.get_virtual_machine_interface_refs():
self._raise_contrail_exception('RouterInUse',
router_id=rtr_id)
except NoIdError:
self._raise_contrail_exception('RouterNotFound',
router_id=rtr_id)
self._router_clear_external_gateway(rtr_obj)
self._logical_router_delete(rtr_id=rtr_id)
#end router_delete
# TODO request based on filter contents
def router_list(self, context=None, filters=None):
ret_list = []
if filters and 'shared' in filters:
if filters['shared'][0] == True:
# no support for shared routers
return ret_list
# collect phase
all_rtrs = [] # all n/ws in all projects
if filters and 'tenant_id' in filters:
# project-id is present
if 'id' in filters:
# required routers are also specified,
# just read and populate ret_list
# prune is skipped because all_rtrs is empty
for rtr_id in filters['id']:
try:
rtr_obj = self._logical_router_read(rtr_id)
rtr_info = self._router_vnc_to_neutron(rtr_obj,
rtr_repr='LIST')
ret_list.append(rtr_info)
except NoIdError:
pass
else:
# read all routers in project, and prune below
project_ids = self._validate_project_ids(context,
filters['tenant_id'])
for p_id in project_ids:
if 'router:external' in filters:
all_rtrs.append(self._fip_pool_ref_routers(p_id))
else:
project_rtrs = self._router_list_project(p_id)
all_rtrs.append(project_rtrs)
elif filters and 'id' in filters:
# required routers are specified, just read and populate ret_list
# prune is skipped because all_rtrs is empty
for rtr_id in filters['id']:
try:
rtr_obj = self._logical_router_read(rtr_id)
rtr_info = self._router_vnc_to_neutron(rtr_obj,
rtr_repr='LIST')
ret_list.append(rtr_info)
except NoIdError:
pass
else:
# read all routers in all projects
project_rtrs = self._router_list_project()
all_rtrs.append(project_rtrs)
# prune phase
for project_rtrs in all_rtrs:
for proj_rtr in project_rtrs:
proj_rtr_id = proj_rtr['uuid']
if not self._filters_is_present(filters, 'id', proj_rtr_id):
continue
proj_rtr_fq_name = unicode(proj_rtr['fq_name'])
if not self._filters_is_present(filters, 'contrail:fq_name',
proj_rtr_fq_name):
continue
try:
rtr_obj = self._logical_router_read(proj_rtr['uuid'])
if not self._filters_is_present(
filters, 'name',
rtr_obj.get_display_name() or rtr_obj.name):
continue
rtr_info = self._router_vnc_to_neutron(rtr_obj,
rtr_repr='LIST')
except NoIdError:
continue
ret_list.append(rtr_info)
return ret_list
#end router_list
def router_count(self, filters=None):
count = self._resource_count_optimized("logical_routers", filters)
if count is not None:
return count
rtrs_info = self.router_list(filters=filters)
return len(rtrs_info)
#end router_count
def _check_for_dup_router_subnet(self, router_id,
network_id, subnet_id, subnet_cidr):
try:
rports = self.port_list(filters={'device_id': [router_id]})
# It's possible these ports are on the same network, but
# different subnets.
new_ipnet = netaddr.IPNetwork(subnet_cidr)
for p in rports:
for ip in p['fixed_ips']:
if ip['subnet_id'] == subnet_id:
msg = (_("Router %s already has a port "
"on subnet %s") % (router_id, subnet_id))
self._raise_contrail_exception(
'BadRequest', resource='router', msg=msg)
sub_id = ip['subnet_id']
subnet = self.subnet_read(sub_id)
cidr = subnet['cidr']
ipnet = netaddr.IPNetwork(cidr)
match1 = netaddr.all_matching_cidrs(new_ipnet, [cidr])
match2 = netaddr.all_matching_cidrs(ipnet, [subnet_cidr])
if match1 or match2:
data = {'subnet_cidr': subnet_cidr,
'subnet_id': subnet_id,
'cidr': cidr,
'sub_id': sub_id}
msg = (_("Cidr %(subnet_cidr)s of subnet "
"%(subnet_id)s overlaps with cidr %(cidr)s "
"of subnet %(sub_id)s") % data)
self._raise_contrail_exception(
'BadRequest', resource='router', msg=msg)
except NoIdError:
pass
def add_router_interface(self, context, router_id, port_id=None, subnet_id=None):
router_obj = self._logical_router_read(router_id)
if port_id:
port = self.port_read(port_id)
if (port['device_owner'] == constants.DEVICE_OWNER_ROUTER_INTF and
port['device_id']):
self._raise_contrail_exception('PortInUse',
net_id=port['network_id'],
port_id=port['id'],
device_id=port['device_id'])
fixed_ips = [ip for ip in port['fixed_ips']]
if len(fixed_ips) != 1:
self._raise_contrail_exception(
'BadRequest', resource='router',
msg='Router port must have exactly one fixed IP')
subnet_id = fixed_ips[0]['subnet_id']
subnet = self.subnet_read(subnet_id)
self._check_for_dup_router_subnet(router_id,
port['network_id'],
subnet['id'],
subnet['cidr'])
elif subnet_id:
subnet = self.subnet_read(subnet_id)
if not subnet['gateway_ip']:
self._raise_contrail_exception(
'BadRequest', resource='router',
msg='Subnet for router interface must have a gateway IP')
self._check_for_dup_router_subnet(router_id,
subnet['network_id'],
subnet_id,
subnet['cidr'])
fixed_ip = {'ip_address': subnet['gateway_ip'],
'subnet_id': subnet['id']}
port = self.port_create(context, {'tenant_id': subnet['tenant_id'],
'network_id': subnet['network_id'],
'fixed_ips': [fixed_ip],
'admin_state_up': True,
'device_id': router_id,
'device_owner': constants.DEVICE_OWNER_ROUTER_INTF,
'name': ''})
port_id = port['id']
else:
self._raise_contrail_exception(
'BadRequest', resource='router',
msg='Either port or subnet must be specified')
self._set_snat_routing_table(router_obj, subnet['network_id'])
vmi_obj = self._vnc_lib.virtual_machine_interface_read(id=port_id)
vmi_obj.set_virtual_machine_interface_device_owner(
constants.DEVICE_OWNER_ROUTER_INTF)
self._vnc_lib.virtual_machine_interface_update(vmi_obj)
router_obj.add_virtual_machine_interface(vmi_obj)
self._logical_router_update(router_obj)
info = {'id': router_id,
'tenant_id': subnet['tenant_id'],
'port_id': port_id,
'subnet_id': subnet_id}
return info
# end add_router_interface
def remove_router_interface(self, router_id, port_id=None, subnet_id=None):
router_obj = self._logical_router_read(router_id)
subnet = None
if port_id:
port_db = self.port_read(port_id)
if (port_db['device_owner'] != constants.DEVICE_OWNER_ROUTER_INTF
or port_db['device_id'] != router_id):
self._raise_contrail_exception('RouterInterfaceNotFound',
router_id=router_id,
port_id=port_id)
port_subnet_id = port_db['fixed_ips'][0]['subnet_id']
if subnet_id and (port_subnet_id != subnet_id):
self._raise_contrail_exception('SubnetMismatchForPort',
port_id=port_id,
subnet_id=subnet_id)
subnet_id = port_subnet_id
subnet = self.subnet_read(subnet_id)
network_id = subnet['network_id']
elif subnet_id:
subnet = self.subnet_read(subnet_id)
network_id = subnet['network_id']
for intf in router_obj.get_virtual_machine_interface_refs() or []:
port_id = intf['uuid']
port_db = self.port_read(port_id)
if subnet_id == port_db['fixed_ips'][0]['subnet_id']:
break
else:
msg = _('Subnet %s not connected to router %s') % (subnet_id,
router_id)
self._raise_contrail_exception('BadRequest',
resource='router', msg=msg)
self._clear_snat_routing_table(router_obj, subnet['network_id'])
port_obj = self._virtual_machine_interface_read(port_id)
router_obj.del_virtual_machine_interface(port_obj)
self._vnc_lib.logical_router_update(router_obj)
self.port_delete(port_id)
info = {'id': router_id,
'tenant_id': subnet['tenant_id'],
'port_id': port_id,
'subnet_id': subnet_id}
return info
# end remove_router_interface
# floatingip api handlers
def floatingip_create(self, context, fip_q):
try:
fip_obj = self._floatingip_neutron_to_vnc(context, fip_q, CREATE)
except Exception, e:
#logging.exception(e)
msg = _('Internal error when trying to create floating ip. '
'Please be sure the network %s is an external '
'network.') % (fip_q['floating_network_id'])
self._raise_contrail_exception('BadRequest',
resource='floatingip', msg=msg)
try:
fip_uuid = self._vnc_lib.floating_ip_create(fip_obj)
except Exception as e:
self._raise_contrail_exception('IpAddressGenerationFailure',
net_id=fip_q['floating_network_id'])
fip_obj = self._vnc_lib.floating_ip_read(id=fip_uuid)
return self._floatingip_vnc_to_neutron(fip_obj)
#end floatingip_create
def floatingip_read(self, fip_uuid):
try:
fip_obj = self._vnc_lib.floating_ip_read(id=fip_uuid)
except NoIdError:
self._raise_contrail_exception('FloatingIPNotFound',
floatingip_id=fip_uuid)
return self._floatingip_vnc_to_neutron(fip_obj)
#end floatingip_read
def floatingip_update(self, context, fip_id, fip_q):
fip_q['id'] = fip_id
fip_obj = self._floatingip_neutron_to_vnc(context, fip_q, UPDATE)
self._vnc_lib.floating_ip_update(fip_obj)
return self._floatingip_vnc_to_neutron(fip_obj)
#end floatingip_update
def floatingip_delete(self, fip_id):
self._vnc_lib.floating_ip_delete(id=fip_id)
#end floatingip_delete
def floatingip_list(self, context, filters=None):
# Read in floating ips with either
# - port(s) as anchor
# - project(s) as anchor
# - none as anchor (floating-ip collection)
ret_list = []
proj_ids = None
port_ids = None
if filters:
if 'tenant_id' in filters:
proj_ids = self._validate_project_ids(context,
filters['tenant_id'])
elif 'port_id' in filters:
port_ids = filters['port_id']
else: # no filters
if not context['is_admin']:
proj_ids = [str(uuid.UUID(context['tenant']))]
if port_ids:
fip_objs = self._floatingip_list(back_ref_id=port_ids)
elif proj_ids:
fip_objs = self._floatingip_list(back_ref_id=proj_ids)
else:
fip_objs = self._floatingip_list()
for fip_obj in fip_objs:
if 'floating_ip_address' in filters:
if (fip_obj.get_floating_ip_address() not in
filters['floating_ip_address']):
continue
ret_list.append(self._floatingip_vnc_to_neutron(fip_obj))
return ret_list
#end floatingip_list
def floatingip_count(self, context, filters=None):
count = self._resource_count_optimized("floating_ips", filters)
if count is not None:
return count
floatingip_info = self.floatingip_list(context, filters)
return len(floatingip_info)
#end floatingip_count
def _ip_addr_in_net_id(self, ip_addr, net_id):
"""Checks if ip address is present in net-id."""
net_ip_list = [ipobj.get_instance_ip_address() for ipobj in
self._instance_ip_list(back_ref_id=[net_id])]
return ip_addr in net_ip_list
def _create_instance_ip(self, net_obj, port_obj, ip_addr=None,
subnet_uuid=None, ip_family="v4"):
ip_name = str(uuid.uuid4())
ip_obj = InstanceIp(name=ip_name)
ip_obj.uuid = ip_name
if subnet_uuid:
ip_obj.set_subnet_uuid(subnet_uuid)
ip_obj.set_virtual_machine_interface(port_obj)
ip_obj.set_virtual_network(net_obj)
ip_obj.set_instance_ip_family(ip_family)
if ip_addr:
ip_obj.set_instance_ip_address(ip_addr)
ip_id = self._instance_ip_create(ip_obj)
return ip_id
# end _create_instance_ip
def _port_create_instance_ip(self, net_obj, port_obj, port_q, ip_family="v4"):
created_iip_ids = []
fixed_ips = port_q.get('fixed_ips')
if fixed_ips is None:
return
for fixed_ip in fixed_ips:
try:
ip_addr = fixed_ip.get('ip_address')
if ip_addr is not None:
if (IPAddress(fixed_ip['ip_address']).version == 4):
ip_family="v4"
elif (IPAddress(fixed_ip['ip_address']).version == 6):
ip_family="v6"
subnet_id = fixed_ip.get('subnet_id')
ip_id = self._create_instance_ip(net_obj, port_obj, ip_addr,
subnet_id, ip_family)
created_iip_ids.append(ip_id)
except vnc_exc.HttpError as e:
# Resources are not available
for iip_id in created_iip_ids:
self._instance_ip_delete(instance_ip_id=iip_id)
raise e
for iip in getattr(port_obj, 'instance_ip_back_refs', []):
if iip['uuid'] not in created_iip_ids:
iip_obj = self._instance_ip_delete(instance_ip_id=iip['uuid'])
# end _port_create_instance_ip
# port api handlers
def port_create(self, context, port_q):
net_id = port_q['network_id']
net_obj = self._network_read(net_id)
tenant_id = self._get_tenant_id_for_create(context, port_q);
proj_id = str(uuid.UUID(tenant_id))
# if mac-address is specified, check against the exisitng ports
# to see if there exists a port with the same mac-address
if 'mac_address' in port_q:
ports = self._vnc_lib.virtual_machine_interfaces_list(
parent_id=proj_id, back_ref_id=net_id, detail=True)
for port in ports:
macs = port.get_virtual_machine_interface_mac_addresses()
for mac in macs.get_mac_address():
if mac == port_q['mac_address']:
raise self._raise_contrail_exception("MacAddressInUse",
net_id=net_id, mac=port_q['mac_address'])
# initialize port object
port_obj = self._port_neutron_to_vnc(port_q, net_obj, CREATE)
# determine creation of v4 and v6 ip object
ip_obj_v4_create = False
ip_obj_v6_create = False
ipam_refs = net_obj.get_network_ipam_refs() or []
for ipam_ref in ipam_refs:
subnet_vncs = ipam_ref['attr'].get_ipam_subnets()
for subnet_vnc in subnet_vncs:
cidr = '%s/%s' %(subnet_vnc.subnet.get_ip_prefix(),
subnet_vnc.subnet.get_ip_prefix_len())
if (IPNetwork(cidr).version == 4):
ip_obj_v4_create = True
if (IPNetwork(cidr).version == 6):
ip_obj_v6_create = True
# create the object
port_id = self._resource_create('virtual_machine_interface', port_obj)
try:
if 'fixed_ips' in port_q:
self._port_create_instance_ip(net_obj, port_obj, port_q)
elif net_obj.get_network_ipam_refs():
if (ip_obj_v4_create is True):
self._port_create_instance_ip(net_obj, port_obj,
{'fixed_ips':[{'ip_address': None}]}, ip_family="v4")
if (ip_obj_v6_create is True):
self._port_create_instance_ip(net_obj, port_obj,
{'fixed_ips':[{'ip_address': None}]}, ip_family="v6")
except vnc_exc.HttpError:
# failure in creating the instance ip. Roll back
self._virtual_machine_interface_delete(port_id=port_id)
self._raise_contrail_exception('IpAddressGenerationFailure',
net_id=net_obj.uuid)
# TODO below reads back default parent name, fix it
port_obj = self._virtual_machine_interface_read(port_id=port_id)
ret_port_q = self._port_vnc_to_neutron(port_obj)
# create interface route table for the port if
# subnet has a host route for this port ip.
if self._apply_subnet_host_routes:
self._port_check_and_add_iface_route_table(ret_port_q['fixed_ips'],
net_obj, port_obj)
return ret_port_q
#end port_create
# TODO add obj param and let caller use below only as a converter
def port_read(self, port_id):
try:
port_obj = self._virtual_machine_interface_read(port_id=port_id)
except NoIdError:
self._raise_contrail_exception('PortNotFound', port_id=port_id)
ret_port_q = self._port_vnc_to_neutron(port_obj)
return ret_port_q
#end port_read
def port_update(self, port_id, port_q):
# if ip address passed then use it
req_ip_addrs = []
req_ip_subnets = []
port_q['id'] = port_id
port_obj = self._port_neutron_to_vnc(port_q, None, UPDATE)
net_id = port_obj.get_virtual_network_refs()[0]['uuid']
net_obj = self._network_read(net_id)
self._virtual_machine_interface_update(port_obj)
try:
self._port_create_instance_ip(net_obj, port_obj, port_q)
except vnc_exc.HttpError:
self._raise_contrail_exception('IpAddressGenerationFailure',
net_id=net_obj.uuid)
port_obj = self._virtual_machine_interface_read(port_id=port_id)
ret_port_q = self._port_vnc_to_neutron(port_obj)
return ret_port_q
#end port_update
def port_delete(self, port_id):
port_obj = self._port_neutron_to_vnc({'id': port_id}, None, DELETE)
if port_obj.parent_type == 'virtual-machine':
instance_id = port_obj.parent_uuid
else:
vm_refs = port_obj.get_virtual_machine_refs()
if vm_refs:
instance_id = vm_refs[0]['uuid']
else:
instance_id = None
if port_obj.get_logical_router_back_refs():
self._raise_contrail_exception('L3PortInUse', port_id=port_id,
device_owner=constants.DEVICE_OWNER_ROUTER_INTF)
# release instance IP address
iip_back_refs = getattr(port_obj, 'instance_ip_back_refs', None)
if iip_back_refs:
for iip_back_ref in iip_back_refs:
# if name contains IP address then this is shared ip
iip_obj = self._vnc_lib.instance_ip_read(
id=iip_back_ref['uuid'])
# in case of shared ip only delete the link to the VMI
iip_obj.del_virtual_machine_interface(port_obj)
if not iip_obj.get_virtual_machine_interface_refs():
self._instance_ip_delete(
instance_ip_id=iip_back_ref['uuid'])
else:
self._instance_ip_update(iip_obj)
# disassociate any floating IP used by instance
fip_back_refs = getattr(port_obj, 'floating_ip_back_refs', None)
if fip_back_refs:
for fip_back_ref in fip_back_refs:
self.floatingip_update(None, fip_back_ref['uuid'],
{'port_id': None})
tenant_id = self._get_obj_tenant_id('port', port_id)
self._virtual_machine_interface_delete(port_id=port_id)
# delete any interface route table associatd with the port
for rt_ref in port_obj.get_interface_route_table_refs() or []:
try:
self._vnc_lib.interface_route_table_delete(id=rt_ref['uuid'])
except vnc_exc.NoIdError:
pass
# delete instance if this was the last port
try:
if instance_id:
self._vnc_lib.virtual_machine_delete(id=instance_id)
except RefsExistError:
pass
#end port_delete
def _port_fixed_ips_is_present(self, check, against):
for addr in check['ip_address']:
for item in against:
if item['ip_address'] == addr:
return True
return False
# end _port_fixed_ips_is_present
def port_list(self, context=None, filters=None):
project_obj = None
ret_q_ports = []
all_project_ids = []
if not context:
context = {'is_admin': True}
# TODO used to find dhcp server field. support later...
if (filters.get('device_owner') == 'network:dhcp' or
'network:dhcp' in filters.get('device_owner', [])):
return ret_q_ports
if not context['is_admin']:
project_id = str(uuid.UUID(context['tenant']))
else:
project_id = None
if not 'device_id' in filters:
# Listing from back references
if not filters:
# TODO once vmi is linked to project in schema, use project_id
# to limit scope of list
ret_q_ports = self._port_list_project(project_id,
is_admin=context['is_admin'])
elif 'tenant_id' in filters:
all_project_ids = self._validate_project_ids(context,
filters['tenant_id'])
elif 'name' in filters or 'device_owner' in filters:
all_project_ids = [str(uuid.UUID(context['tenant']))]
elif 'id' in filters:
# TODO optimize
for port_id in filters['id']:
try:
port_info = self.port_read(port_id)
except NoIdError:
continue
ret_q_ports.append(port_info)
for proj_id in all_project_ids:
ret_q_ports = self._port_list_project(proj_id)
if 'network_id' in filters:
ret_q_ports = self._port_list_network(filters['network_id'])
# prune phase
ret_list = []
for port_obj in ret_q_ports:
if not self._filters_is_present(filters, 'name',
port_obj['name']):
continue
if not self._filters_is_present(filters, 'device_owner',
port_obj["device_owner"]):
continue
if 'fixed_ips' in filters and \
not self._port_fixed_ips_is_present(filters['fixed_ips'],
port_obj['fixed_ips']):
continue
ret_list.append(port_obj)
return ret_list
# Listing from parent to children
# port has a back_ref to LR, so need to read in LRs based on device id
device_ids = filters['device_id']
router_objs = self._logical_router_list(obj_uuids=device_ids)
more_ports = []
for router_obj in router_objs:
intfs = router_obj.get_virtual_machine_interface_refs()
for intf in (intfs or []):
more_ports.append(intf['uuid'])
# gather all ports from an anchor of virtual-machine (backref in
# current schema and parent in < 1.06 schema)
port_objs = self._virtual_machine_interface_list(parent_id=device_ids,
back_ref_id=device_ids)
if len(more_ports):
rtr_port_objs = self._virtual_machine_interface_list(obj_uuids=more_ports)
port_objs.extend(rtr_port_objs)
ret_q_ports = self._port_list(port_objs)
return ret_q_ports
#end port_list
def port_count(self, filters=None):
count = self._resource_count_optimized("virtual_machine_interfaces",
filters)
if count is not None:
return count
if (filters.get('device_owner') == 'network:dhcp' or
'network:dhcp' in filters.get('device_owner', [])):
return 0
if 'tenant_id' in filters:
if isinstance(filters['tenant_id'], list):
project_id = str(uuid.UUID(filters['tenant_id'][0]))
else:
project_id = str(uuid.UUID(filters['tenant_id']))
nports = self._port_list_project(project_id, count=True)
else:
# across all projects - TODO very expensive,
# get only a count from api-server!
nports = len(self.port_list(filters=filters))
return nports
#end port_count
# security group api handlers
def security_group_create(self, sg_q):
sg_obj = self._security_group_neutron_to_vnc(sg_q, CREATE)
# ensure default SG and deny create if the group name is default
if sg_q['name'] == 'default':
self._ensure_default_security_group_exists(sg_q['tenant_id'])
self._raise_contrail_exception("SecurityGroupAlreadyExists")
sg_uuid = self._resource_create('security_group', sg_obj)
#allow all egress traffic
def_rule = {}
def_rule['port_range_min'] = 0
def_rule['port_range_max'] = 65535
def_rule['direction'] = 'egress'
def_rule['remote_ip_prefix'] = '0.0.0.0/0'
def_rule['remote_group_id'] = None
def_rule['protocol'] = 'any'
def_rule['ethertype'] = 'IPv4'
rule = self._security_group_rule_neutron_to_vnc(def_rule, CREATE)
self._security_group_rule_create(sg_uuid, rule)
ret_sg_q = self._security_group_vnc_to_neutron(sg_obj)
return ret_sg_q
#end security_group_create
def security_group_update(self, sg_id, sg_q):
sg_q['id'] = sg_id
sg_obj = self._security_group_neutron_to_vnc(sg_q, UPDATE)
self._vnc_lib.security_group_update(sg_obj)
ret_sg_q = self._security_group_vnc_to_neutron(sg_obj)
return ret_sg_q
#end security_group_update
def security_group_read(self, sg_id):
try:
sg_obj = self._vnc_lib.security_group_read(id=sg_id)
except NoIdError:
self._raise_contrail_exception('SecurityGroupNotFound', id=sg_id)
return self._security_group_vnc_to_neutron(sg_obj)
#end security_group_read
def security_group_delete(self, context, sg_id):
try:
sg_obj = self._vnc_lib.security_group_read(id=sg_id)
if sg_obj.name == 'default' and \
str(uuid.UUID(context['tenant_id'])) == sg_obj.parent_uuid:
# Deny delete if the security group name is default and
# the owner of the SG is deleting it.
self._raise_contrail_exception(
'SecurityGroupCannotRemoveDefault')
except NoIdError:
return
try:
self._security_group_delete(sg_id)
except RefsExistError:
self._raise_contrail_exception('SecurityGroupInUse', id=sg_id)
#end security_group_delete
def security_group_list(self, context, filters=None):
ret_list = []
# collect phase
self._ensure_default_security_group_exists(context['tenant_id'])
all_sgs = [] # all sgs in all projects
if context and not context['is_admin']:
project_sgs = self._security_group_list_project(str(uuid.UUID(context['tenant'])))
all_sgs.append(project_sgs)
else: # admin context
if filters and 'tenant_id' in filters:
project_ids = self._validate_project_ids(context,
filters['tenant_id'])
for p_id in project_ids:
project_sgs = self._security_group_list_project(p_id)
all_sgs.append(project_sgs)
else: # no filters
all_sgs.append(self._security_group_list_project(None))
# prune phase
for project_sgs in all_sgs:
for sg_obj in project_sgs:
if not self._filters_is_present(filters, 'id', sg_obj.uuid):
continue
if not self._filters_is_present(filters, 'name',
sg_obj.get_display_name() or sg_obj.name):
continue
sg_info = self._security_group_vnc_to_neutron(sg_obj)
ret_list.append(sg_info)
return ret_list
#end security_group_list
def _convert_protocol(self, value):
if value is None:
return
try:
val = int(value)
#TODO(ethuleau): support all protocol numbers
if val >= 0 and val <= 255 and IP_PROTOCOL_MAP.has_key(val):
return IP_PROTOCOL_MAP[val]
self._raise_contrail_exception(
'SecurityGroupRuleInvalidProtocol',
protocol=value, values=IP_PROTOCOL_MAP.values())
except (ValueError, TypeError):
if value.lower() in IP_PROTOCOL_MAP.values():
return value.lower()
self._raise_contrail_exception(
'SecurityGroupRuleInvalidProtocol',
protocol=value, values=IP_PROTOCOL_MAP.values())
except AttributeError:
self._raise_contrail_exception(
'SecurityGroupRuleInvalidProtocol',
protocol=value, values=IP_PROTOCOL_MAP.values())
def _validate_port_range(self, rule):
"""Check that port_range is valid."""
if (rule['port_range_min'] is None and
rule['port_range_max'] is None):
return
if not rule['protocol']:
self._raise_contrail_exception(
'SecurityGroupProtocolRequiredWithPorts')
if rule['protocol'] in [constants.PROTO_NAME_TCP, constants.PROTO_NAME_UDP]:
if (rule['port_range_min'] is not None and
rule['port_range_min'] <= rule['port_range_max']):
pass
else:
self._raise_contrail_exception('SecurityGroupInvalidPortRange')
elif rule['protocol'] == constants.PROTO_NAME_ICMP:
for attr, field in [('port_range_min', 'type'),
('port_range_max', 'code')]:
if rule[attr] > 255:
self._raise_contrail_exception(
'SecurityGroupInvalidIcmpValue', field=field,
attr=attr, value=rule[attr])
if (rule['port_range_min'] is None and
rule['port_range_max']):
self._raise_contrail_exception('SecurityGroupMissingIcmpType',
value=rule['port_range_max'])
def security_group_rule_create(self, sgr_q):
sgr_q['protocol'] = self._convert_protocol(sgr_q['protocol'])
self._validate_port_range(sgr_q)
sg_id = sgr_q['security_group_id']
sg_rule = self._security_group_rule_neutron_to_vnc(sgr_q, CREATE)
self._security_group_rule_create(sg_id, sg_rule)
ret_sg_rule_q = self._security_group_rule_vnc_to_neutron(sg_id,
sg_rule)
return ret_sg_rule_q
#end security_group_rule_create
def security_group_rule_read(self, context, sgr_id):
project_uuid = None
if not context['is_admin']:
project_uuid = str(uuid.UUID(context['tenant_id']))
sg_obj, sg_rule = self._security_group_rule_find(sgr_id, project_uuid)
if sg_obj and sg_rule:
return self._security_group_rule_vnc_to_neutron(sg_obj.uuid,
sg_rule, sg_obj)
self._raise_contrail_exception('SecurityGroupRuleNotFound', id=sgr_id)
#end security_group_rule_read
def security_group_rule_delete(self, context, sgr_id):
project_uuid = None
if not context['is_admin']:
project_uuid = str(uuid.UUID(context['tenant_id']))
sg_obj, sg_rule = self._security_group_rule_find(sgr_id, project_uuid)
if sg_obj and sg_rule:
return self._security_group_rule_delete(sg_obj, sg_rule)
self._raise_contrail_exception('SecurityGroupRuleNotFound', id=sgr_id)
#end security_group_rule_delete
def security_group_rules_read(self, sg_id, sg_obj=None):
try:
if not sg_obj:
sg_obj = self._vnc_lib.security_group_read(id=sg_id)
sgr_entries = sg_obj.get_security_group_entries()
sg_rules = []
if sgr_entries is None:
return
for sg_rule in sgr_entries.get_policy_rule():
sg_info = self._security_group_rule_vnc_to_neutron(sg_obj.uuid,
sg_rule,
sg_obj)
sg_rules.append(sg_info)
except NoIdError:
self._raise_contrail_exception('SecurityGroupNotFound', id=sg_id)
return sg_rules
#end security_group_rules_read
def security_group_rule_list(self, context=None, filters=None):
ret_list = []
# collect phase
all_sgs = []
if filters and 'tenant_id' in filters:
project_ids = self._validate_project_ids(context,
filters['tenant_id'])
for p_id in project_ids:
project_sgs = self._security_group_list_project(p_id)
all_sgs.append(project_sgs)
else: # no filters
p_id = None
if context and not context['is_admin']:
p_id = str(uuid.UUID(context['tenant']))
all_sgs.append(self._security_group_list_project(p_id))
# prune phase
for project_sgs in all_sgs:
for sg_obj in project_sgs:
# TODO implement same for name specified in filter
if not self._filters_is_present(filters, 'id', sg_obj.uuid):
continue
sgr_info = self.security_group_rules_read(sg_obj.uuid, sg_obj)
if sgr_info:
ret_list.extend(sgr_info)
return ret_list
#end security_group_rule_list
#route table api handlers
def route_table_create(self, rt_q):
rt_obj = self._route_table_neutron_to_vnc(rt_q, CREATE)
try:
rt_uuid = self._route_table_create(rt_obj)
except RefsExistError as e:
self._raise_contrail_exception('BadRequest',
resource='route_table', msg=str(e))
ret_rt_q = self._route_table_vnc_to_neutron(rt_obj)
return ret_rt_q
#end security_group_create
def route_table_read(self, rt_id):
try:
rt_obj = self._vnc_lib.route_table_read(id=rt_id)
except NoIdError:
# TODO add route table specific exception
self._raise_contrail_exception('NetworkNotFound', net_id=rt_id)
return self._route_table_vnc_to_neutron(rt_obj)
#end route_table_read
def route_table_update(self, rt_id, rt_q):
rt_q['id'] = rt_id
rt_obj = self._route_table_neutron_to_vnc(rt_q, UPDATE)
self._vnc_lib.route_table_update(rt_obj)
return self._route_table_vnc_to_neutron(rt_obj)
#end policy_update
def route_table_delete(self, rt_id):
self._route_table_delete(rt_id)
#end route_table_delete
def route_table_list(self, context, filters=None):
ret_list = []
# collect phase
all_rts = [] # all rts in all projects
if filters and 'tenant_id' in filters:
project_ids = self._validate_project_ids(context,
filters['tenant_id'])
for p_id in project_ids:
project_rts = self._route_table_list_project(p_id)
all_rts.append(project_rts)
elif filters and 'name' in filters:
p_id = str(uuid.UUID(context['tenant']))
project_rts = self._route_table_list_project(p_id)
all_rts.append(project_rts)
else: # no filters
dom_projects = self._project_list_domain(None)
for project in dom_projects:
proj_id = project['uuid']
project_rts = self._route_table_list_project(proj_id)
all_rts.append(project_rts)
# prune phase
for project_rts in all_rts:
for proj_rt in project_rts:
# TODO implement same for name specified in filter
proj_rt_id = proj_rt['uuid']
if not self._filters_is_present(filters, 'id', proj_rt_id):
continue
rt_info = self.route_table_read(proj_rt_id)
if not self._filters_is_present(filters, 'name',
rt_info['name']):
continue
ret_list.append(rt_info)
return ret_list
#end route_table_list
#service instance api handlers
def svc_instance_create(self, si_q):
si_obj = self._svc_instance_neutron_to_vnc(si_q, CREATE)
si_uuid = self._svc_instance_create(si_obj)
ret_si_q = self._svc_instance_vnc_to_neutron(si_obj)
return ret_si_q
#end svc_instance_create
def svc_instance_read(self, si_id):
try:
si_obj = self._vnc_lib.service_instance_read(id=si_id)
except NoIdError:
# TODO add svc instance specific exception
self._raise_contrail_exception('NetworkNotFound', net_id=si_id)
return self._svc_instance_vnc_to_neutron(si_obj)
#end svc_instance_read
def svc_instance_delete(self, si_id):
self._svc_instance_delete(si_id)
#end svc_instance_delete
def svc_instance_list(self, context, filters=None):
ret_list = []
# collect phase
all_sis = [] # all sis in all projects
if filters and 'tenant_id' in filters:
project_ids = self._validate_project_ids(context,
filters['tenant_id'])
for p_id in project_ids:
project_sis = self._svc_instance_list_project(p_id)
all_sis.append(project_sis)
elif filters and 'name' in filters:
p_id = str(uuid.UUID(context['tenant']))
project_sis = self._svc_instance_list_project(p_id)
all_sis.append(project_sis)
else: # no filters
dom_projects = self._project_list_domain(None)
for project in dom_projects:
proj_id = project['uuid']
project_sis = self._svc_instance_list_project(proj_id)
all_sis.append(project_sis)
# prune phase
for project_sis in all_sis:
for proj_si in project_sis:
# TODO implement same for name specified in filter
proj_si_id = proj_si['uuid']
if not self._filters_is_present(filters, 'id', proj_si_id):
continue
si_info = self.svc_instance_read(proj_si_id)
if not self._filters_is_present(filters, 'name',
si_info['name']):
continue
ret_list.append(si_info)
return ret_list
#end svc_instance_list
#end class DBInterface
| 41.955125 | 104 | 0.577101 |
5bf606345a436f29f2f5df12903356518e00a286 | 783 | py | Python | tests/views_tests.py | saugatsthapit/airtng-flask | 172c6f00efbb965137e458d1a3018419b034138d | [
"MIT"
] | 17 | 2016-01-26T23:19:20.000Z | 2021-07-20T14:49:06.000Z | tests/views_tests.py | saugatsthapit/airtng-flask | 172c6f00efbb965137e458d1a3018419b034138d | [
"MIT"
] | 154 | 2016-01-27T21:11:28.000Z | 2022-03-31T10:12:54.000Z | tests/views_tests.py | saugatsthapit/airtng-flask | 172c6f00efbb965137e458d1a3018419b034138d | [
"MIT"
] | 15 | 2016-03-17T15:03:17.000Z | 2022-02-27T14:25:57.000Z | import unittest
import xml.etree.ElementTree as ElementTree
from tests.base import BaseTestCase
class ViewsTests(BaseTestCase):
def get_to_home_route_should_render_default_view(self):
self.client.get('/home')
self.assert_template_used('home.html')
def test_get_to_test_root_route_should_render_register_view(self):
self.client.get('/')
self.assert_template_used('register.html')
def test_get_to_login_route_should_render_default_view(self):
self.client.get('/login')
self.assert_template_used('login.html')
def test_get_to_register_route_should_render_default_view(self):
self.client.get('/register')
self.assert_template_used('register.html')
if __name__ == '__main__':
unittest.main()
| 25.258065 | 70 | 0.735632 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.