code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
import numpy as np
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.pipeline import Pipeline
from soccer_xg.ml.preprocessing import simple_proc_for_linear_algoritms
def logreg_gridsearch_classifier(
numeric_features,
categoric_features,
learning_rate=0.08,
use_dask=False,
n_iter=100,
scoring='roc_auc',
):
"""
Simple classification pipeline using hyperband to optimize logreg hyper-parameters
Parameters
----------
`numeric_features` : The list of numeric features
`categoric_features` : The list of categoric features
`learning_rate` : The learning rate
"""
return _logreg_gridsearch_model(
'classification',
numeric_features,
categoric_features,
learning_rate,
use_dask,
n_iter,
scoring,
)
def logreg_gridsearch_regressor(
numeric_features,
categoric_features,
learning_rate=0.08,
use_dask=False,
n_iter=100,
scoring='roc_auc',
):
"""
Simple regression pipeline using hyperband to optimize logreg hyper-parameters
Parameters
----------
`numeric_features` : The list of numeric features
`categoric_features` : The list of categoric features
`learning_rate` : The learning rate
"""
return _logreg_gridsearch_model(
'regression',
numeric_features,
categoric_features,
learning_rate,
use_dask,
n_iter,
scoring,
)
def _logreg_gridsearch_model(
task,
numeric_features,
categoric_features,
learning_rate,
use_dask,
n_iter,
scoring,
):
if learning_rate is None:
param_space = {
'clf__C': np.logspace(-5, 5, 100),
'clf__class_weight': ['balanced', None],
}
model = LogisticRegression(max_iter=10000, fit_intercept=False)
else:
param_space = {
'clf__penalty': ['l1', 'l2'],
'clf__alpha': np.logspace(-5, 5, 100),
'clf__class_weight': ['balanced', None],
}
learning_rate_schedule = (
'constant' if isinstance(learning_rate, float) else learning_rate
)
eta0 = learning_rate if isinstance(learning_rate, float) else 0
model = SGDClassifier(
learning_rate=learning_rate_schedule,
eta0=eta0,
loss='log',
max_iter=10000,
fit_intercept=False,
)
pipe = Pipeline(
[
(
'preprocessing',
simple_proc_for_linear_algoritms(
numeric_features, categoric_features
),
),
('clf', model),
]
)
if use_dask:
from dask_ml.model_selection import RandomizedSearchCV
return RandomizedSearchCV(
pipe, param_space, n_iter=n_iter, scoring=scoring, cv=5
)
else:
from sklearn.model_selection import RandomizedSearchCV
return RandomizedSearchCV(
pipe, param_space, n_iter=n_iter, scoring=scoring, cv=5
)
|
[
"sklearn.linear_model.SGDClassifier",
"soccer_xg.ml.preprocessing.simple_proc_for_linear_algoritms",
"numpy.logspace",
"sklearn.model_selection.RandomizedSearchCV",
"sklearn.linear_model.LogisticRegression"
] |
[((1818, 1873), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'max_iter': '(10000)', 'fit_intercept': '(False)'}), '(max_iter=10000, fit_intercept=False)\n', (1836, 1873), False, 'from sklearn.linear_model import LogisticRegression, SGDClassifier\n'), ((2275, 2390), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'learning_rate': 'learning_rate_schedule', 'eta0': 'eta0', 'loss': '"""log"""', 'max_iter': '(10000)', 'fit_intercept': '(False)'}), "(learning_rate=learning_rate_schedule, eta0=eta0, loss='log',\n max_iter=10000, fit_intercept=False)\n", (2288, 2390), False, 'from sklearn.linear_model import LogisticRegression, SGDClassifier\n'), ((2819, 2894), 'sklearn.model_selection.RandomizedSearchCV', 'RandomizedSearchCV', (['pipe', 'param_space'], {'n_iter': 'n_iter', 'scoring': 'scoring', 'cv': '(5)'}), '(pipe, param_space, n_iter=n_iter, scoring=scoring, cv=5)\n', (2837, 2894), False, 'from sklearn.model_selection import RandomizedSearchCV\n'), ((3006, 3081), 'sklearn.model_selection.RandomizedSearchCV', 'RandomizedSearchCV', (['pipe', 'param_space'], {'n_iter': 'n_iter', 'scoring': 'scoring', 'cv': '(5)'}), '(pipe, param_space, n_iter=n_iter, scoring=scoring, cv=5)\n', (3024, 3081), False, 'from sklearn.model_selection import RandomizedSearchCV\n'), ((1714, 1737), 'numpy.logspace', 'np.logspace', (['(-5)', '(5)', '(100)'], {}), '(-5, 5, 100)\n', (1725, 1737), True, 'import numpy as np\n'), ((1976, 1999), 'numpy.logspace', 'np.logspace', (['(-5)', '(5)', '(100)'], {}), '(-5, 5, 100)\n', (1987, 1999), True, 'import numpy as np\n'), ((2553, 2623), 'soccer_xg.ml.preprocessing.simple_proc_for_linear_algoritms', 'simple_proc_for_linear_algoritms', (['numeric_features', 'categoric_features'], {}), '(numeric_features, categoric_features)\n', (2585, 2623), False, 'from soccer_xg.ml.preprocessing import simple_proc_for_linear_algoritms\n')]
|
# Generated by Django 2.1.7 on 2019-04-17 09:25
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('engine', '0017_db_redesign_20190221'),
]
operations = [
migrations.CreateModel(
name='JobCommit',
fields=[
('id', models.BigAutoField(primary_key=True, serialize=False)),
('version', models.PositiveIntegerField(default=0)),
('timestamp', models.DateTimeField(auto_now=True)),
('message', models.CharField(default='', max_length=4096)),
('author', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
('job', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='commits', to='engine.Job')),
],
options={
'abstract': False,
'default_permissions': (),
},
),
]
|
[
"django.db.migrations.swappable_dependency",
"django.db.models.BigAutoField",
"django.db.models.CharField",
"django.db.models.PositiveIntegerField",
"django.db.models.ForeignKey",
"django.db.models.DateTimeField"
] |
[((227, 284), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (258, 284), False, 'from django.db import migrations, models\n'), ((467, 521), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (486, 521), False, 'from django.db import migrations, models\n'), ((552, 590), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (579, 590), False, 'from django.db import migrations, models\n'), ((623, 658), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (643, 658), False, 'from django.db import migrations, models\n'), ((689, 734), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(4096)'}), "(default='', max_length=4096)\n", (705, 734), False, 'from django.db import migrations, models\n'), ((764, 884), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'to': 'settings.AUTH_USER_MODEL'}), '(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, to=settings.AUTH_USER_MODEL)\n', (781, 884), False, 'from django.db import migrations, models\n'), ((906, 1014), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""commits"""', 'to': '"""engine.Job"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='commits', to='engine.Job')\n", (923, 1014), False, 'from django.db import migrations, models\n')]
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
import torch
import torch.nn.functional as F
from fairseq import utils
from . import FairseqCriterion, register_criterion
@register_criterion('kd_regularization_cross_entropy')
class KDRegularizationCrossEntropyCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
self.alpha = args.alpha
self.temperature = args.temperature
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
# fmt: offs
parser.add_argument('--alpha', default=0., type=float, metavar='D',
help='params.reg_alpha')
parser.add_argument('--temperature', default=0., type=float, metavar='D',
help='params.reg_temperature')
# fmt: on
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample['net_input'])
loss, nll_loss = self.compute_loss(model, net_output, sample, reduce=reduce)
sample_size = sample['target'].size(0) if self.args.sentence_avg else sample['ntokens']
logging_output = {
'loss': utils.item(loss.data) if reduce else loss.data,
'nll_loss': utils.item(nll_loss.data) if reduce else nll_loss.data,
'ntokens': sample['ntokens'],
'nsentences': sample['target'].size(0),
'sample_size': sample_size,
}
return loss, sample_size, logging_output
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view(-1, lprobs.size(-1))
target = model.get_targets(sample, net_output).view(-1, 1)
non_pad_mask = target.ne(self.padding_idx)
nll_loss = -lprobs.gather(dim=-1, index=target)[non_pad_mask]
# smooth_loss = -lprobs.sum(dim=-1, keepdim=True)[non_pad_mask]
"""
loss function for mannually-designed regularization: Tf-KD_{reg}
"""
alpha = self.alpha
T = self.temperature
correct_prob = 0.99 # the probability for correct class in u(k)
# loss_CE = F.cross_entropy(net_output, target)
output = net_output[0]
K = output.size(1)
multiplier = 100
teacher_soft = torch.ones_like(output).cuda()
teacher_soft = teacher_soft * (1 - correct_prob) / (K - 1) # p^d(k)
for i in range(output.shape[0]):
teacher_soft[i, target[i]] = correct_prob
loss_soft_regu = torch.nn.KLDivLoss()(F.log_softmax(output, dim=1), F.softmax(teacher_soft / T, dim=1)) * multiplier
if reduce:
nll_loss = nll_loss.sum()
# smooth_loss = smooth_loss.sum()
loss_soft_regu = loss_soft_regu.sum()
# eps_i = self.eps / lprobs.size(-1)
# loss = (1. - self.eps) * nll_loss + eps_i * smooth_loss
loss = (1. - alpha) * nll_loss + alpha * loss_soft_regu
return loss, nll_loss
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)
nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)
sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
return {
'loss': sum(log.get('loss', 0) for log in logging_outputs) / sample_size / math.log(2),
'nll_loss': sum(log.get('nll_loss', 0) for log in logging_outputs) / ntokens / math.log(2),
'ntokens': ntokens,
'nsentences': nsentences,
'sample_size': sample_size,
}
|
[
"torch.ones_like",
"fairseq.utils.item",
"torch.nn.KLDivLoss",
"torch.nn.functional.softmax",
"torch.nn.functional.log_softmax",
"math.log"
] |
[((1696, 1717), 'fairseq.utils.item', 'utils.item', (['loss.data'], {}), '(loss.data)\n', (1706, 1717), False, 'from fairseq import utils\n'), ((1768, 1793), 'fairseq.utils.item', 'utils.item', (['nll_loss.data'], {}), '(nll_loss.data)\n', (1778, 1793), False, 'from fairseq import utils\n'), ((2861, 2884), 'torch.ones_like', 'torch.ones_like', (['output'], {}), '(output)\n', (2876, 2884), False, 'import torch\n'), ((3089, 3109), 'torch.nn.KLDivLoss', 'torch.nn.KLDivLoss', ([], {}), '()\n', (3107, 3109), False, 'import torch\n'), ((3110, 3138), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (3123, 3138), True, 'import torch.nn.functional as F\n'), ((3140, 3174), 'torch.nn.functional.softmax', 'F.softmax', (['(teacher_soft / T)'], {'dim': '(1)'}), '(teacher_soft / T, dim=1)\n', (3149, 3174), True, 'import torch.nn.functional as F\n'), ((4022, 4033), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (4030, 4033), False, 'import math\n'), ((4126, 4137), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (4134, 4137), False, 'import math\n')]
|
from __future__ import print_function
# http://stackoverflow.com/questions/15247075/how-can-i-dynamically-create-derived-classes-from-a-base-class
from builtins import object
from pyqtgraph.flowchart import Node
from pyqtgraph.flowchart.library.common import CtrlNode
from RRtoolbox.lib.inspector import funcData
# TODO: see info under sharedlibs to come up with a way to convert functions to flowchar nodes.
class NodeGenerator(object):
"""
Generate Nodes.
:param nodeName: name of the node class. if None, it generates name on the fly from wrapped function
:param terminals: generic inputs and outputs. if None, it generates
terminals on the fly from wrapped function
:param uiTemplate: template to use in UI controls. if None, it uses Node class
:param nodeClass: class to use to generate the node. if None, it uses a convenient
Node class on the fly from wrapped function
"""
def __init__(self,nodeName=None,terminals=None,uiTemplate=None,nodeClass=None,classTemplate="{}Node",selfAs=None,addfuncs=None):
""" define a customized NodeGenerator
:return:
"""
# these are totally needed for the node creation
self.nodeName = nodeName # variable to use in the Node class to know it is a Node and show its name
# generateUi in flowchar.library.common.py
# currently it supports: 'intSpin', 'doubleSpin', 'spin', 'checkLoaded', 'combo', 'color', 'tip'
self.nodeClass = nodeClass # either way all should be derived from Node class
self.classTemplate = classTemplate
# these are use for the node creation but not that important
self.uiTemplate = uiTemplate
self.terminals = terminals # see Terminal under flowchart/Terminal.py
self.selfAs = selfAs
self.addfuncs = addfuncs
def config(self,func):
# kwargs are temporal parameters tu use instead of defaults
# func is a function that wants to be converted on the fly
if self.uiTemplate:
nodeClass = self.nodeClass or CtrlNode
if not issubclass(nodeClass,CtrlNode):
raise TypeError("nodeClass is not subclass of CtrlNode")
else:
nodeClass = self.nodeClass or Node
if not issubclass(nodeClass,Node):
raise TypeError("nodeClass is not subclass of Node")
data = funcData(func)
keywords, varargs = data["keywords"],data["varargs"]
if False and (keywords or varargs):
where = "and".join([i for i in (keywords,varargs) if i])
raise Exception("generic function has {}. It must have explicit arguments".format(where))
if False and varargs:
raise Exception("NodeGenerator does not support positional arguments like '{}', try keywords arguments".format(varargs))
args = data["args"]
useDisplay = "display" in args
nodeName = self.nodeName or data["name"]
classname = self.classTemplate.format(nodeName)
if not classname: raise Exception("classTemplate did not generate a classname")
doc = data["doc"]
templates = []
if self.uiTemplate:
for tmpl in self.uiTemplate:
replace = tmpl[0]
for i, arg in enumerate(args[:]):
if replace ==arg:
templates.append(replace) # register in process_handles
del args[i] # it won't apear in terminals
elif keywords: templates.append(replace) # register if support for more variables
if self.terminals:
terminals = self.terminals # replace by user terminals
else:
terminals = {arg:{"io":"in"} for arg in args} # only inputs registered
classData = funcData(nodeClass.__init__)
# know if nodeClass supports these parameters
classArgs = classData["args"]
if classData["keywords"]:
useAllowAddInput = useAllowAddOutput = useAllowRemove = True
else:
useAllowAddInput = "allowAddInput" in classArgs
useAllowAddOutput = "allowAddOutput" in classArgs
useAllowRemove = "allowRemove" in classArgs
useTerminals = "terminals" in classArgs
# handle function should be
# def hf(self,**kwargs):
# pass # process something here
# initialize handles
_init_handles = [] # it always must be
# now begin to register
# know if processing function supports these parameters
allowAddInput = bool(data["keywords"])
allowAddOutput = False
allowRemove = allowAddInput or allowAddOutput
if useAllowAddInput:
def handle_allowAddInput(self,kwargs):
kwargs["allowAddInput"] = allowAddInput
_init_handles.append(handle_allowAddInput)
if useAllowAddOutput:
def handle_allowAddOutput(self,kwargs):
kwargs["allowAddOutput"] = allowAddOutput
_init_handles.append(handle_allowAddOutput)
if useAllowRemove:
def handle_allowRemove(self,kwargs):
kwargs["allowRemove"] = allowRemove
_init_handles.append(handle_allowRemove)
if useTerminals:
def handle_terminals(self,kwargs):
kwargs["terminals"] = terminals
_init_handles.append(handle_terminals)
_process_handles = []
##
if not useDisplay:
def handle_display(self,kwargs):
del kwargs["display"]
_process_handles.append(handle_display)
if self.selfAs:
tempself = self.selfAs
def handle_addself(self,kwargs):
kwargs[tempself] = self
_process_handles.append(handle_addself)
d = {}
for tmpl in templates:
exec("def handle_{0}(self,kwargs): kwargs[{0}] = self.ctrls[{0}]".format(tmpl), d)
_process_handles.append(d["handle_{}".format(tmpl)])
def init(self,name,**kwargs):
for h in self._init_handles:
h(self,kwargs)
nodeClass.__init__(self,name,**kwargs)
def process(self, **kwargs):
for h in self._process_handles:
h(self,kwargs)
return func(**kwargs)
conf = dict(__init__=init,process=process,__doc__=doc,nodeName=nodeName,
_init_handles=_init_handles,_process_handles=_process_handles)
if self.uiTemplate: conf["uiTemplate"] = self.uiTemplate
if self.addfuncs: conf.update(self.addfuncs)
# returns parameters to use with type(what, bases, dict)
return classname, (nodeClass,), conf
def wrap(self, func):
# inspect.getsourcelines(my_function)
return type(*self.config(func))
__call__ = wrap
if __name__ == "__main__":
#new_class = type("NewClassName", (BaseClass), {"new_method": lambda self: ...})
@NodeGenerator()
def my_function1(param1, param2):
"some comment here"
print("processing something")
output1,output2 = 10,100
return output1,output2 # it must be clear
@NodeGenerator()
def my_function2(param1, param2, defparam1 = 10):
print("processing something")
output1,output2 = 10,100 # this shoud works
return output1,output2 # it must be clear
@NodeGenerator()
def my_function3(param1, param2, defparam1 = 10, defparam2 = 20, *args, **kwargs):
print("processing something")
output1,output2 = 10,100
return output1,output2 # it must be clear
n1 = my_function1()
n2 = my_function2
n3 = my_function3
print(n1,n2,n3)
|
[
"RRtoolbox.lib.inspector.funcData"
] |
[((2398, 2412), 'RRtoolbox.lib.inspector.funcData', 'funcData', (['func'], {}), '(func)\n', (2406, 2412), False, 'from RRtoolbox.lib.inspector import funcData\n'), ((3811, 3839), 'RRtoolbox.lib.inspector.funcData', 'funcData', (['nodeClass.__init__'], {}), '(nodeClass.__init__)\n', (3819, 3839), False, 'from RRtoolbox.lib.inspector import funcData\n')]
|
# Copyright 2015 The Shaderc Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import expect
import os.path
from glslc_test_framework import inside_glslc_testsuite
from placeholder import FileShader, StdinShader, TempFileName
@inside_glslc_testsuite('File')
class SimpleFileCompiled(expect.ValidObjectFile):
"""Tests whether or not a simple glsl file compiles."""
shader = FileShader('#version 310 es\nvoid main() {}', '.frag')
glslc_args = ['-c', shader]
@inside_glslc_testsuite('File')
class NotSpecifyingOutputName(expect.SuccessfulReturn,
expect.CorrectObjectFilePreamble):
"""Tests that when there is no -o and -E/-S/-c specified, output as a.spv."""
shader = FileShader('#version 140\nvoid main() {}', '.frag')
glslc_args = [shader]
def check_output_a_spv(self, status):
output_name = os.path.join(status.directory, 'a.spv')
return self.verify_object_file_preamble(output_name)
@inside_glslc_testsuite('Parameters')
class HelpParameters(
expect.ReturnCodeIsZero, expect.StdoutMatch, expect.StderrMatch):
"""Tests the --help flag outputs correctly and does not produce and error."""
glslc_args = ['--help']
expected_stdout = '''glslc - Compile shaders into SPIR-V
Usage: glslc [options] file...
An input file of - represents standard input.
Options:
-c Only run preprocess, compile, and assemble steps.
-Dmacro[=defn] Add an implicit macro definition.
-E Outputs only the results of the preprocessing step.
Output defaults to standard output.
-fshader-stage=<stage>
Treat subsequent input files as having stage <stage>.
Valid stages are vertex, fragment, tesscontrol, tesseval,
geometry, and compute.
-g Generate source-level debug information.
Currently this option has no effect.
--help Display available options.
--version Display compiler version information.
-I <value> Add directory to include search path.
-o <file> Write output to <file>.
A file name of '-' represents standard output.
-std=<value> Version and profile for input files. Possible values
are concatenations of version and profile, e.g. 310es,
450core, etc.
-M Generate make dependencies. Implies -E and -w.
-MM An alias for -M.
-MD Generate make dependencies and compile.
-MF <file> Write dependency output to the given file.
-MT <target> Specify the target of the rule emitted by dependency
generation.
-S Only run preprocess and compilation steps.
--target-env=<environment>
Set the target shader environment, and the semantics
of warnings and errors. Valid values are 'opengl',
'opengl_compat' and 'vulkan'. The default value is 'vulkan'.
-w Suppresses all warning messages.
-Werror Treat all warnings as errors.
-x <language> Treat subsequent input files as having type <language>.
The only supported language is glsl.
'''
expected_stderr = ''
@inside_glslc_testsuite('Parameters')
class HelpIsNotTooWide(expect.StdoutNoWiderThan80Columns):
"""Tests that --help output is not too wide."""
glslc_args = ['--help']
@inside_glslc_testsuite('Parameters')
class UnknownSingleLetterArgument(expect.ErrorMessage):
"""Tests that an unknown argument triggers an error message."""
glslc_args = ['-a']
expected_error = ["glslc: error: unknown argument: '-a'\n"]
@inside_glslc_testsuite('Parameters')
class UnknownMultiLetterArgument(expect.ErrorMessage):
"""Tests that an unknown argument triggers an error message."""
glslc_args = ['-zzz']
expected_error = ["glslc: error: unknown argument: '-zzz'\n"]
@inside_glslc_testsuite('Parameters')
class UnsupportedOption(expect.ErrorMessage):
"""Tests that an unsupported option triggers an error message."""
glslc_args = ['--unsupported-option']
expected_error = [
"glslc: error: unsupported option: '--unsupported-option'\n"]
@inside_glslc_testsuite('File')
class FileNotFound(expect.ErrorMessage):
"""Tests the error message if a file cannot be found."""
blabla_file = TempFileName('blabla.frag')
glslc_args = [blabla_file]
expected_error = [
"glslc: error: cannot open input file: '", blabla_file,
"': No such file or directory\n"]
@inside_glslc_testsuite('Unsupported')
class LinkingNotSupported(expect.ErrorMessage):
"""Tests the error message generated by linking not supported yet."""
shader1 = FileShader('#version 140\nvoid main() {}', '.vert')
shader2 = FileShader('#version 140\nvoid main() {}', '.frag')
glslc_args = [shader1, shader2]
expected_error = [
'glslc: error: linking multiple files is not supported yet. ',
'Use -c to compile files individually.\n']
@inside_glslc_testsuite('Unsupported')
class MultipleStdinUnsupported(expect.ErrorMessage):
"""Tests the error message generated by having more than one - input."""
glslc_args = ['-c', '-fshader-stage=vertex', '-', '-']
expected_error = [
'glslc: error: specifying standard input "-" as input more'
' than once is not allowed.\n']
@inside_glslc_testsuite('Parameters')
class StdinWithoutShaderStage(expect.StdoutMatch, expect.StderrMatch):
"""Tests that you must use -fshader-stage when specifying - as input."""
shader = StdinShader(
"""#version 140
int a() {
}
void main() {
int x = a();
}
""")
glslc_args = [shader]
expected_stdout = ''
expected_stderr = [
"glslc: error: '-': -fshader-stage required when input is from "
'standard input "-"\n']
|
[
"placeholder.StdinShader",
"placeholder.FileShader",
"placeholder.TempFileName",
"glslc_test_framework.inside_glslc_testsuite"
] |
[((756, 786), 'glslc_test_framework.inside_glslc_testsuite', 'inside_glslc_testsuite', (['"""File"""'], {}), "('File')\n", (778, 786), False, 'from glslc_test_framework import inside_glslc_testsuite\n'), ((1001, 1031), 'glslc_test_framework.inside_glslc_testsuite', 'inside_glslc_testsuite', (['"""File"""'], {}), "('File')\n", (1023, 1031), False, 'from glslc_test_framework import inside_glslc_testsuite\n'), ((1495, 1531), 'glslc_test_framework.inside_glslc_testsuite', 'inside_glslc_testsuite', (['"""Parameters"""'], {}), "('Parameters')\n", (1517, 1531), False, 'from glslc_test_framework import inside_glslc_testsuite\n'), ((3850, 3886), 'glslc_test_framework.inside_glslc_testsuite', 'inside_glslc_testsuite', (['"""Parameters"""'], {}), "('Parameters')\n", (3872, 3886), False, 'from glslc_test_framework import inside_glslc_testsuite\n'), ((4030, 4066), 'glslc_test_framework.inside_glslc_testsuite', 'inside_glslc_testsuite', (['"""Parameters"""'], {}), "('Parameters')\n", (4052, 4066), False, 'from glslc_test_framework import inside_glslc_testsuite\n'), ((4283, 4319), 'glslc_test_framework.inside_glslc_testsuite', 'inside_glslc_testsuite', (['"""Parameters"""'], {}), "('Parameters')\n", (4305, 4319), False, 'from glslc_test_framework import inside_glslc_testsuite\n'), ((4539, 4575), 'glslc_test_framework.inside_glslc_testsuite', 'inside_glslc_testsuite', (['"""Parameters"""'], {}), "('Parameters')\n", (4561, 4575), False, 'from glslc_test_framework import inside_glslc_testsuite\n'), ((4831, 4861), 'glslc_test_framework.inside_glslc_testsuite', 'inside_glslc_testsuite', (['"""File"""'], {}), "('File')\n", (4853, 4861), False, 'from glslc_test_framework import inside_glslc_testsuite\n'), ((5174, 5211), 'glslc_test_framework.inside_glslc_testsuite', 'inside_glslc_testsuite', (['"""Unsupported"""'], {}), "('Unsupported')\n", (5196, 5211), False, 'from glslc_test_framework import inside_glslc_testsuite\n'), ((5651, 5688), 'glslc_test_framework.inside_glslc_testsuite', 'inside_glslc_testsuite', (['"""Unsupported"""'], {}), "('Unsupported')\n", (5673, 5688), False, 'from glslc_test_framework import inside_glslc_testsuite\n'), ((6013, 6049), 'glslc_test_framework.inside_glslc_testsuite', 'inside_glslc_testsuite', (['"""Parameters"""'], {}), "('Parameters')\n", (6035, 6049), False, 'from glslc_test_framework import inside_glslc_testsuite\n'), ((911, 968), 'placeholder.FileShader', 'FileShader', (['"""#version 310 es\nvoid main() {}"""', '""".frag"""'], {}), '("""#version 310 es\nvoid main() {}""", \'.frag\')\n', (921, 968), False, 'from placeholder import FileShader, StdinShader, TempFileName\n'), ((1248, 1302), 'placeholder.FileShader', 'FileShader', (['"""#version 140\nvoid main() {}"""', '""".frag"""'], {}), '("""#version 140\nvoid main() {}""", \'.frag\')\n', (1258, 1302), False, 'from placeholder import FileShader, StdinShader, TempFileName\n'), ((4983, 5010), 'placeholder.TempFileName', 'TempFileName', (['"""blabla.frag"""'], {}), "('blabla.frag')\n", (4995, 5010), False, 'from placeholder import FileShader, StdinShader, TempFileName\n'), ((5349, 5403), 'placeholder.FileShader', 'FileShader', (['"""#version 140\nvoid main() {}"""', '""".vert"""'], {}), '("""#version 140\nvoid main() {}""", \'.vert\')\n', (5359, 5403), False, 'from placeholder import FileShader, StdinShader, TempFileName\n'), ((5415, 5469), 'placeholder.FileShader', 'FileShader', (['"""#version 140\nvoid main() {}"""', '""".frag"""'], {}), '("""#version 140\nvoid main() {}""", \'.frag\')\n', (5425, 5469), False, 'from placeholder import FileShader, StdinShader, TempFileName\n'), ((6211, 6320), 'placeholder.StdinShader', 'StdinShader', (['"""#version 140\n int a() {\n }\n void main() {\n int x = a();\n }\n """'], {}), '(\n """#version 140\n int a() {\n }\n void main() {\n int x = a();\n }\n """\n )\n', (6222, 6320), False, 'from placeholder import FileShader, StdinShader, TempFileName\n')]
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from uuid import UUID
from botframework.streaming.payload_transport import PayloadSender
from botframework.streaming.payloads.models import Header
class CancelDisassembler:
def __init__(self, *, sender: PayloadSender, identifier: UUID, type: str):
self._sender = sender
self._identifier = identifier
self._type = type
async def disassemble(self):
header = Header(type=self._type, id=self._identifier, end=True)
header.payload_length = 0
self._sender.send_payload(header, None, True, None)
return
|
[
"botframework.streaming.payloads.models.Header"
] |
[((495, 549), 'botframework.streaming.payloads.models.Header', 'Header', ([], {'type': 'self._type', 'id': 'self._identifier', 'end': '(True)'}), '(type=self._type, id=self._identifier, end=True)\n', (501, 549), False, 'from botframework.streaming.payloads.models import Header\n')]
|
import math
import random
import warnings
import numpy as np
import scipy.ndimage
import torch
from torch.autograd import Function
from torch.autograd.function import once_differentiable
import torch.backends.cudnn as cudnn
from util.logconf import logging
log = logging.getLogger(__name__)
# log.setLevel(logging.WARN)
# log.setLevel(logging.INFO)
log.setLevel(logging.DEBUG)
def cropToShape(image, new_shape, center_list=None, fill=0.0):
# log.debug([image.shape, new_shape, center_list])
# assert len(image.shape) == 3, repr(image.shape)
if center_list is None:
center_list = [int(image.shape[i] / 2) for i in range(3)]
crop_list = []
for i in range(0, 3):
crop_int = center_list[i]
if image.shape[i] > new_shape[i] and crop_int is not None:
# We can't just do crop_int +/- shape/2 since shape might be odd
# and ints round down.
start_int = crop_int - int(new_shape[i]/2)
end_int = start_int + new_shape[i]
crop_list.append(slice(max(0, start_int), end_int))
else:
crop_list.append(slice(0, image.shape[i]))
# log.debug([image.shape, crop_list])
image = image[crop_list]
crop_list = []
for i in range(0, 3):
if image.shape[i] < new_shape[i]:
crop_int = int((new_shape[i] - image.shape[i]) / 2)
crop_list.append(slice(crop_int, crop_int + image.shape[i]))
else:
crop_list.append(slice(0, image.shape[i]))
# log.debug([image.shape, crop_list])
new_image = np.zeros(new_shape, dtype=image.dtype)
new_image[:] = fill
new_image[crop_list] = image
return new_image
def zoomToShape(image, new_shape, square=True):
# assert image.shape[-1] in {1, 3, 4}, repr(image.shape)
if square and image.shape[0] != image.shape[1]:
crop_int = min(image.shape[0], image.shape[1])
new_shape = [crop_int, crop_int, image.shape[2]]
image = cropToShape(image, new_shape)
zoom_shape = [new_shape[i] / image.shape[i] for i in range(3)]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
image = scipy.ndimage.interpolation.zoom(
image, zoom_shape,
output=None, order=0, mode='nearest', cval=0.0, prefilter=True)
return image
def randomOffset(image_list, offset_rows=0.125, offset_cols=0.125):
center_list = [int(image_list[0].shape[i] / 2) for i in range(3)]
center_list[0] += int(offset_rows * (random.random() - 0.5) * 2)
center_list[1] += int(offset_cols * (random.random() - 0.5) * 2)
center_list[2] = None
new_list = []
for image in image_list:
new_image = cropToShape(image, image.shape, center_list)
new_list.append(new_image)
return new_list
def randomZoom(image_list, scale=None, scale_min=0.8, scale_max=1.3):
if scale is None:
scale = scale_min + (scale_max - scale_min) * random.random()
new_list = []
for image in image_list:
# assert image.shape[-1] in {1, 3, 4}, repr(image.shape)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# log.info([image.shape])
zimage = scipy.ndimage.interpolation.zoom(
image, [scale, scale, 1.0],
output=None, order=0, mode='nearest', cval=0.0, prefilter=True)
image = cropToShape(zimage, image.shape)
new_list.append(image)
return new_list
_randomFlip_transform_list = [
# lambda a: np.rot90(a, axes=(0, 1)),
# lambda a: np.flip(a, 0),
lambda a: np.flip(a, 1),
]
def randomFlip(image_list, transform_bits=None):
if transform_bits is None:
transform_bits = random.randrange(0, 2 ** len(_randomFlip_transform_list))
new_list = []
for image in image_list:
# assert image.shape[-1] in {1, 3, 4}, repr(image.shape)
for n in range(len(_randomFlip_transform_list)):
if transform_bits & 2**n:
# prhist(image, 'before')
image = _randomFlip_transform_list[n](image)
# prhist(image, 'after ')
new_list.append(image)
return new_list
def randomSpin(image_list, angle=None, range_tup=None, axes=(0, 1)):
if range_tup is None:
range_tup = (0, 360)
if angle is None:
angle = range_tup[0] + (range_tup[1] - range_tup[0]) * random.random()
new_list = []
for image in image_list:
# assert image.shape[-1] in {1, 3, 4}, repr(image.shape)
image = scipy.ndimage.interpolation.rotate(
image, angle, axes=axes, reshape=False,
output=None, order=0, mode='nearest', cval=0.0, prefilter=True)
new_list.append(image)
return new_list
def randomNoise(image_list, noise_min=-0.1, noise_max=0.1):
noise = np.zeros_like(image_list[0])
noise += (noise_max - noise_min) * np.random.random_sample(image_list[0].shape) + noise_min
noise *= 5
noise = scipy.ndimage.filters.gaussian_filter(noise, 3)
# noise += (noise_max - noise_min) * np.random.random_sample(image_hsv.shape) + noise_min
new_list = []
for image_hsv in image_list:
image_hsv = image_hsv + noise
new_list.append(image_hsv)
return new_list
def randomHsvShift(image_list, h=None, s=None, v=None,
h_min=-0.1, h_max=0.1,
s_min=0.5, s_max=2.0,
v_min=0.5, v_max=2.0):
if h is None:
h = h_min + (h_max - h_min) * random.random()
if s is None:
s = s_min + (s_max - s_min) * random.random()
if v is None:
v = v_min + (v_max - v_min) * random.random()
new_list = []
for image_hsv in image_list:
# assert image_hsv.shape[-1] == 3, repr(image_hsv.shape)
image_hsv[:,:,0::3] += h
image_hsv[:,:,1::3] = image_hsv[:,:,1::3] ** s
image_hsv[:,:,2::3] = image_hsv[:,:,2::3] ** v
new_list.append(image_hsv)
return clampHsv(new_list)
def clampHsv(image_list):
new_list = []
for image_hsv in image_list:
image_hsv = image_hsv.clone()
# Hue wraps around
image_hsv[:,:,0][image_hsv[:,:,0] > 1] -= 1
image_hsv[:,:,0][image_hsv[:,:,0] < 0] += 1
# Everything else clamps between 0 and 1
image_hsv[image_hsv > 1] = 1
image_hsv[image_hsv < 0] = 0
new_list.append(image_hsv)
return new_list
# def torch_augment(input):
# theta = random.random() * math.pi * 2
# s = math.sin(theta)
# c = math.cos(theta)
# c1 = 1 - c
# axis_vector = torch.rand(3, device='cpu', dtype=torch.float64)
# axis_vector -= 0.5
# axis_vector /= axis_vector.abs().sum()
# l, m, n = axis_vector
#
# matrix = torch.tensor([
# [l*l*c1 + c, m*l*c1 - n*s, n*l*c1 + m*s, 0],
# [l*m*c1 + n*s, m*m*c1 + c, n*m*c1 - l*s, 0],
# [l*n*c1 - m*s, m*n*c1 + l*s, n*n*c1 + c, 0],
# [0, 0, 0, 1],
# ], device=input.device, dtype=torch.float32)
#
# return th_affine3d(input, matrix)
# following from https://github.com/ncullen93/torchsample/blob/master/torchsample/utils.py
# MIT licensed
# def th_affine3d(input, matrix):
# """
# 3D Affine image transform on torch.Tensor
# """
# A = matrix[:3,:3]
# b = matrix[:3,3]
#
# # make a meshgrid of normal coordinates
# coords = th_iterproduct(input.size(-3), input.size(-2), input.size(-1), dtype=torch.float32)
#
# # shift the coordinates so center is the origin
# coords[:,0] = coords[:,0] - (input.size(-3) / 2. - 0.5)
# coords[:,1] = coords[:,1] - (input.size(-2) / 2. - 0.5)
# coords[:,2] = coords[:,2] - (input.size(-1) / 2. - 0.5)
#
# # apply the coordinate transformation
# new_coords = coords.mm(A.t().contiguous()) + b.expand_as(coords)
#
# # shift the coordinates back so origin is origin
# new_coords[:,0] = new_coords[:,0] + (input.size(-3) / 2. - 0.5)
# new_coords[:,1] = new_coords[:,1] + (input.size(-2) / 2. - 0.5)
# new_coords[:,2] = new_coords[:,2] + (input.size(-1) / 2. - 0.5)
#
# # map new coordinates using bilinear interpolation
# input_transformed = th_trilinear_interp3d(input, new_coords)
#
# return input_transformed
#
#
# def th_trilinear_interp3d(input, coords):
# """
# trilinear interpolation of 3D torch.Tensor image
# """
# # take clamp then floor/ceil of x coords
# x = torch.clamp(coords[:,0], 0, input.size(-3)-2)
# x0 = x.floor()
# x1 = x0 + 1
# # take clamp then floor/ceil of y coords
# y = torch.clamp(coords[:,1], 0, input.size(-2)-2)
# y0 = y.floor()
# y1 = y0 + 1
# # take clamp then floor/ceil of z coords
# z = torch.clamp(coords[:,2], 0, input.size(-1)-2)
# z0 = z.floor()
# z1 = z0 + 1
#
# stride = torch.tensor(input.stride()[-3:], dtype=torch.int64, device=input.device)
# x0_ix = x0.mul(stride[0]).long()
# x1_ix = x1.mul(stride[0]).long()
# y0_ix = y0.mul(stride[1]).long()
# y1_ix = y1.mul(stride[1]).long()
# z0_ix = z0.mul(stride[2]).long()
# z1_ix = z1.mul(stride[2]).long()
#
# # input_flat = th_flatten(input)
# input_flat = x.contiguous().view(x[0], x[1], -1)
#
# vals_000 = input_flat[:, :, x0_ix+y0_ix+z0_ix]
# vals_001 = input_flat[:, :, x0_ix+y0_ix+z1_ix]
# vals_010 = input_flat[:, :, x0_ix+y1_ix+z0_ix]
# vals_011 = input_flat[:, :, x0_ix+y1_ix+z1_ix]
# vals_100 = input_flat[:, :, x1_ix+y0_ix+z0_ix]
# vals_101 = input_flat[:, :, x1_ix+y0_ix+z1_ix]
# vals_110 = input_flat[:, :, x1_ix+y1_ix+z0_ix]
# vals_111 = input_flat[:, :, x1_ix+y1_ix+z1_ix]
#
# xd = x - x0
# yd = y - y0
# zd = z - z0
# xm1 = 1 - xd
# ym1 = 1 - yd
# zm1 = 1 - zd
#
# x_mapped = (
# vals_000.mul(xm1).mul(ym1).mul(zm1) +
# vals_001.mul(xm1).mul(ym1).mul(zd) +
# vals_010.mul(xm1).mul(yd).mul(zm1) +
# vals_011.mul(xm1).mul(yd).mul(zd) +
# vals_100.mul(xd).mul(ym1).mul(zm1) +
# vals_101.mul(xd).mul(ym1).mul(zd) +
# vals_110.mul(xd).mul(yd).mul(zm1) +
# vals_111.mul(xd).mul(yd).mul(zd)
# )
#
# return x_mapped.view_as(input)
#
# def th_iterproduct(*args, dtype=None):
# return torch.from_numpy(np.indices(args).reshape((len(args),-1)).T)
#
# def th_flatten(x):
# """Flatten tensor"""
# return x.contiguous().view(x[0], x[1], -1)
|
[
"numpy.zeros_like",
"numpy.flip",
"warnings.simplefilter",
"numpy.random.random_sample",
"numpy.zeros",
"util.logconf.logging.getLogger",
"random.random",
"warnings.catch_warnings"
] |
[((266, 293), 'util.logconf.logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (283, 293), False, 'from util.logconf import logging\n'), ((1568, 1606), 'numpy.zeros', 'np.zeros', (['new_shape'], {'dtype': 'image.dtype'}), '(new_shape, dtype=image.dtype)\n', (1576, 1606), True, 'import numpy as np\n'), ((4847, 4875), 'numpy.zeros_like', 'np.zeros_like', (['image_list[0]'], {}), '(image_list[0])\n', (4860, 4875), True, 'import numpy as np\n'), ((2086, 2111), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (2109, 2111), False, 'import warnings\n'), ((2121, 2152), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (2142, 2152), False, 'import warnings\n'), ((3602, 3615), 'numpy.flip', 'np.flip', (['a', '(1)'], {}), '(a, 1)\n', (3609, 3615), True, 'import numpy as np\n'), ((3092, 3117), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (3115, 3117), False, 'import warnings\n'), ((3131, 3162), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (3152, 3162), False, 'import warnings\n'), ((4915, 4959), 'numpy.random.random_sample', 'np.random.random_sample', (['image_list[0].shape'], {}), '(image_list[0].shape)\n', (4938, 4959), True, 'import numpy as np\n'), ((2949, 2964), 'random.random', 'random.random', ([], {}), '()\n', (2962, 2964), False, 'import random\n'), ((4402, 4417), 'random.random', 'random.random', ([], {}), '()\n', (4415, 4417), False, 'import random\n'), ((5526, 5541), 'random.random', 'random.random', ([], {}), '()\n', (5539, 5541), False, 'import random\n'), ((5598, 5613), 'random.random', 'random.random', ([], {}), '()\n', (5611, 5613), False, 'import random\n'), ((5670, 5685), 'random.random', 'random.random', ([], {}), '()\n', (5683, 5685), False, 'import random\n'), ((2509, 2524), 'random.random', 'random.random', ([], {}), '()\n', (2522, 2524), False, 'import random\n'), ((2578, 2593), 'random.random', 'random.random', ([], {}), '()\n', (2591, 2593), False, 'import random\n')]
|
"""
Licensed under the terms of the BSD-3-Clause license.
Copyright (C) 2019 <NAME>, <EMAIL>
"""
from dataclasses import dataclass
from typing import ClassVar, Generator, Tuple, Union
import numpy as _np
from numpy.lib.stride_tricks import as_strided
from . audio import AudioFile
from . container import Params
from . signal.tools import zero_padding as _zero_padding
from . types import Array, Schema
@dataclass
class LazySegmentParams:
"""Encapsulates segmentation parameters."""
n_perseg: int
n_overlap: int
norm: bool = False
mono: bool = True
expand: bool = True
dtype: str = 'float64'
SEGMENTATION_PARAMS = {
"type": "object",
"properties": {
"n_perseg": {"type": "integer"},
"n_overlap": {"type": "integer"},
"extend": {"anyOf": [{"type": "boolean"}, {"type": "integer"}]},
"pad": {"anyOf": [{"type": "boolean"}, {"type": "integer"}]}
}
}
@dataclass
class SegmentationParams(Params):
"""Parameters for Segmentation."""
_schema: ClassVar[Schema] = SEGMENTATION_PARAMS
n_perseg: int = 512
n_overlap: int = 256
extend: Union[bool, int] = True
pad: Union[bool, int] = True
@dataclass
class Segment:
"""Encapsulates audio segment data and meta data."""
idx: int
start: int
stop: int
center: int
n_frames: int
data: _np.ndarray
class Segments:
"""Segement"""
def __init__(self, params: SegmentationParams, segs: _np.ndarray) -> None:
self._segs = segs
self._params = params
if self._params.extend:
self._offset = 0
else:
self._offset = self._params.n_perseg // 2
@property
def data(self) -> Array:
"""Return the raw segment data array."""
return self._segs
@property
def n_segs(self) -> int:
return self._segs.shape[1]
@property
def n_perseg(self) -> int:
return self._params.n_perseg
@property
def n_overlap(self) -> int:
return self._params.n_overlap
@property
def step(self) -> int:
return self._params.n_perseg - self._params.n_overlap
@property
def params(self) -> SegmentationParams:
"""Parameter set used to compute this instance."""
return self._params
def center(self, seg_idx) -> int:
"""Return the center of segment ``seg_idx`` as frame number
of the original signal.
Args:
seg_indx: Segment index.
Returns:
Center frame index.
"""
if not (0 <= seg_idx < self.n_segs):
raise IndexError('Requested index out of range.')
return seg_idx * self.step + self._offset
def bounds(self, seg_idx) -> Tuple[int, int]:
"""Return the frame numbers of the lower and upper bound
of segment ``seg_idx``. Lower bound index is inclusive,
upper bound index is exclusive.
Args:
seg_idx: Segment index.
Returns:
Lower and upper bound frame index.
"""
if not (0 <= seg_idx < self.n_segs):
raise IndexError('Requested index out of range.')
lob = self.center(seg_idx) - self._params.n_perseg // 2
upb = lob + self._params.n_perseg
return lob, upb
def get(self, seg_idx) -> Segment:
"""Retrun segment ``seg_idx`` wrapped in an ``Segment`` object.
Args:
seg_idx: Segment index.
Returns:
Segment ``seg_idx``.
"""
return Segment(seg_idx, *self.bounds(seg_idx), self.center(seg_idx),
self._params.n_perseg, self[seg_idx])
def __iter__(self) -> Generator[_np.ndarray, None, None]:
for seg in self._segs.T:
yield _np.expand_dims(seg, 1)
def __getitem__(self, key) -> _np.ndarray:
out = self._segs[:, key]
if out.ndim < 2:
return _np.expand_dims(out, 1)
return out
def __repr__(self) -> str:
return f'Segments(params={self._params!s}, segs={self._segs!s})'
def __str__(self) -> str:
return f'<n_segs: {self.n_segs}, len_seg: {self._params.n_perseg}>'
class Segmentation:
"""Segementation"""
def __init__(self, n_perseg: int, n_overlap: int, extend: bool = True,
pad: bool = True) -> None:
"""Subdivide input array.
Args:
n_perseg: Samples per segment.
n_overlap: Overlap in samples.
extend: Extend a half window at start and end.
pad: Pad extension.
"""
if n_perseg > 0:
self.n_perseg = n_perseg
else:
msg = (f'Argument to ``n_perseg`` must be greater than '
f'zero.\nFound ``n_perseg`` = {n_perseg}.')
raise ValueError(msg)
if 0 < n_overlap < n_perseg:
self.n_overlap = n_overlap
else:
msg = (f'Argument to ``n_overlap`` must be greater than '
f'zero and less then ``n_perseg``.\n Found '
f'``n_perseg`` = {self.n_perseg} and ``n_overlap`` '
f' = {n_overlap}.')
raise ValueError(msg)
self._extend = extend
self._pad = pad
self._ext_len = 0
self._pad_len = 0
def transform(self, data: _np.ndarray) -> Segments:
"""Apply segmentation.
Input array must be either one-, or two-dimensional.
If ``data`` is two-dimensional, it must be of shape
(n_elements, 1).
Args:
data: Input array.
Returns:
``Segments`` object.
"""
self._validate_data_shape(data)
self._validate_nps(data.shape[0])
n_frames = data.shape[0]
step = self.n_perseg - self.n_overlap
if self._extend:
self._ext_len = self.n_perseg // 2
if self._pad:
self._pad_len = (-(n_frames-self.n_perseg) % step) % self.n_perseg
data = _np.pad(data.squeeze(), (self._ext_len, self._ext_len+self._pad_len))
new_shape = data.shape[:-1] + ((data.shape[-1] - self.n_overlap) // step, self.n_perseg)
new_strides = data.strides[:-1] + (step * data.strides[-1], data.strides[-1])
segs = as_strided(data, new_shape, new_strides, writeable=False).T
params = SegmentationParams(self.n_perseg, self.n_overlap,
self._extend, self._pad)
return Segments(params, segs)
def _validate_nps(self, n_frames: int) -> None:
if self.n_perseg > n_frames:
msg = (f'Input data length ({n_frames}) incompatible with '
'parameter ``n_perseg`` = {self.n_perseg}. ``n_perseg`` '
'must be less then or equal to input data length.')
raise ValueError(msg)
def _validate_data_shape(self, data: _np.ndarray) -> None:
if not (0 < data.ndim < 3):
msg = (f'Input array must have one or two dimensions.\n'
f'Found ``data.shape`` = {data.shape}.')
elif data.ndim == 2 and data.shape[1] != 1:
msg = (f'Two-dimensional import arrays can only have one '
f'column.\nFound ``data.shape``= {data.shape}.')
else:
return None
raise ValueError(msg)
class LazySegments:
"""Read segments from audio file."""
def __init__(self, snd: AudioFile, n_perseg: int, n_overlap: int,
norm: bool = False, mono: bool = True,
expand: bool = True, dtype: str = 'float64') -> None:
"""Compute equal-sized segments.
Args:
snd:
n_perseg: Number of samples per segment.
n_overlap: Size of segment overlap in samples.
norm: Normalize each segment separately.
mono: If ``True`` mixdown all channels.
expand: Start segmentation at -n_perseg//2.
dtype: Dtype of output array.
"""
self._snd = snd
self.n_perseg = n_perseg
self.n_overlap = n_overlap
self.expand = expand
self.n_segs = int(_np.ceil(self._snd.n_frames / n_overlap))
if expand:
self.n_segs += 1
self.offset = -self.n_perseg // 2
else:
self.n_segs -= 1
self.offset = 0
self.step = self.n_perseg - self.n_overlap
self.norm = norm
self.mono = mono
self.dtype = dtype
def compute_bounds(self, seg_idx):
if seg_idx < 0:
raise IndexError('Expected positive integer for ``seg_idx``. '
f'Got {seg_idx}.')
if seg_idx >= self.n_segs:
raise IndexError(f'You requested segment {seg_idx}, but there '
f'are only {self.n_segs} segments.')
start = seg_idx * self.n_overlap + self.offset
return start, start + self.n_perseg
def read_segment(self, seg_idx: int, norm: bool = None,
mono: bool = None, dtype: str = None):
norm = norm or self.norm
mono = mono or self.mono
dtype = dtype or self.dtype
offset = seg_idx * self.n_overlap + self.offset
return self._snd.read(self.n_perseg, offset, norm, mono, dtype)
def loc(self, seg_idx: int, norm: bool = None,
mono: bool = None, dtype: str = None) -> Segment:
"""Locate segment by index.
Args:
seg_idx: Segment index.
norm: If ``True``, normalize each segment separately.
Falls back to ``self.norm``.
mono: If ``True`` mixdown all channels.
Falls back to ``self.mono``.
dtype: Output dtype. Falls back to ``self.dtype``.
Returns:
Segment number ``seg_idx``.
"""
start, stop = self.compute_bounds(seg_idx)
data = self.read_segment(seg_idx, norm, mono, dtype)
return Segment(seg_idx, start, stop, self.n_perseg,
self._snd.fps, data)
def __getitem__(self, key):
return self.loc(key)
def __iter__(self):
for i in range(self.n_segs):
yield self.__getitem__(i)
def iter_data(self):
for i in range(self.n_segs):
yield self._snd.read(self.n_perseg)
def iter_bounds(self):
for i in range(self.n_segs):
yield self.compute_bounds(i)
def _by_samples(x: Array, n_perseg: int) -> Array:
"""Split ``x`` into segments of lenght ``n_perseg`` samples.
This function automatically applies zero padding for inputs that cannot be
split evenly.
Args:
x: One-dimensional input array.
n_perseg: Length of segments in samples.
Returns:
Two-dimensional array of segments.
"""
if not isinstance(n_perseg, int):
raise TypeError('Param ``n_perchunk`` must be of type int.')
if n_perseg < 1:
raise ValueError('``n_perchunk`` out of range. '
'Expected 1 <= n_perchunk.')
fit_size = int(_np.ceil(x.size / n_perseg) * n_perseg)
n_ext = fit_size - x.size
x = _zero_padding(x, n_ext)
return x.reshape(-1, n_perseg)
def _by_samples_with_hop(x: Array, n_perseg: int, hop_size: int) -> Array:
"""Split `x` into segments of lenght `n_perseg` samples. Move the
extraction window `hop_size` samples.
This function automatically applies zero padding for inputs that cannot be
split evenly.
Args:
x: One-dimensional input array.
n_perseg: Length of segments in samples.
hop_size: Hop size in samples
Returns:
Two-dimensional array of segments.
"""
if not (isinstance(n_perseg, int) and isinstance(hop_size, int)):
raise TypeError('Params must be of type int.')
if not 1 < n_perseg <= x.size:
raise ValueError('n_perseg out of range. '
'Expected 1 < n_perseg <= len(x).')
if hop_size < 1:
raise ValueError('hop_size out of range. Expected 1 < hop_size.')
n_hops = (x.size - n_perseg) // hop_size + 1
n_segs = n_hops
if (x.size - n_perseg) % hop_size != 0 and n_perseg > hop_size:
n_segs += 1
fit_size = hop_size * n_hops + n_perseg
n_ext = fit_size - x.size
x = _zero_padding(x, n_ext)
out = _np.empty((n_segs, n_perseg), dtype=x.dtype)
for i in range(n_segs):
off = i * hop_size
out[i] = x[off:off+n_perseg]
return out
def by_samples(x: Array, n_perseg: int, hop_size: int = 0) -> Array:
"""Segment the input into n segments of length n_perseg and move the
window `hop_size` samples.
This function automatically applies zero padding for inputs that cannot be
split evenly.
If `hop_size` is less than one, it is reset to `n_perseg`.
Overlap in percent is calculated as ov = hop_size / n_perseg * 100.
Args:
x One-dimensional input array.
n_perseg Length of segments in samples.
hop_size Hop size in samples. If < 1, hop_size = n_perseg.
Returns:
Two-dimensional array of segments.
"""
if hop_size < 1:
return _by_samples(x, n_perseg)
else:
return _by_samples_with_hop(x, n_perseg, hop_size)
def by_ms(x: Array, fps: int, ms_perseg: int, hop_size: int = 0) -> Array:
"""Segment the input into n segments of length ms_perseg and move the
window `hop_size` milliseconds.
This function automatically applies zero padding for inputs that cannot be
split evenly.
If `hop_size` is less than one, it is reset to `n_perseg`.
Overlap in percent is calculated as ov = hop_size / n_perseg * 100.
Args:
x One-dimensional input array.
fs Sampling frequency.
n_perseg Length of segments in milliseconds.
hop_size Hop size in milliseconds. If < 1, hop_size = n_perseg.
Returns:
Two-dimensional array of segments.
"""
n_perseg = fps * ms_perseg // 1000
hop_size = fps * hop_size // 1000
return by_samples(x, n_perseg, hop_size)
def by_onsets(x: Array, n_perseg: int, ons_idx: Array, off: int = 0
) -> Array:
"""Split input `x` into len(ons_idx) segments of length `n_perseg`.
Extraction windos start at `ons_idx[i]` + `off`.
Args:
x One-dimensional input array.
n_perseg Length of segments in samples.
ons_idx One-dimensional array of onset positions.
off Length of offset.
Returns:
Two-dimensional array of shape (len(ons_idx), n_perseg).
"""
n_ons = ons_idx.size
out = _np.empty((n_ons, n_perseg), dtype=x.dtype)
for i, idx in enumerate(ons_idx):
pos = idx + off
if pos < 0:
pos = 0
elif pos >= x.size:
pos = x.size - 1
if pos + n_perseg >= x.size:
buff = x[pos:]
out[i] = _zero_padding(buff, n_perseg-buff.size)
else:
out[i] = x[pos:pos+n_perseg]
return out
|
[
"numpy.ceil",
"numpy.empty",
"numpy.lib.stride_tricks.as_strided",
"numpy.expand_dims"
] |
[((12391, 12435), 'numpy.empty', '_np.empty', (['(n_segs, n_perseg)'], {'dtype': 'x.dtype'}), '((n_segs, n_perseg), dtype=x.dtype)\n', (12400, 12435), True, 'import numpy as _np\n'), ((14728, 14771), 'numpy.empty', '_np.empty', (['(n_ons, n_perseg)'], {'dtype': 'x.dtype'}), '((n_ons, n_perseg), dtype=x.dtype)\n', (14737, 14771), True, 'import numpy as _np\n'), ((3905, 3928), 'numpy.expand_dims', '_np.expand_dims', (['out', '(1)'], {}), '(out, 1)\n', (3920, 3928), True, 'import numpy as _np\n'), ((6256, 6313), 'numpy.lib.stride_tricks.as_strided', 'as_strided', (['data', 'new_shape', 'new_strides'], {'writeable': '(False)'}), '(data, new_shape, new_strides, writeable=False)\n', (6266, 6313), False, 'from numpy.lib.stride_tricks import as_strided\n'), ((8147, 8187), 'numpy.ceil', '_np.ceil', (['(self._snd.n_frames / n_overlap)'], {}), '(self._snd.n_frames / n_overlap)\n', (8155, 8187), True, 'import numpy as _np\n'), ((11108, 11135), 'numpy.ceil', '_np.ceil', (['(x.size / n_perseg)'], {}), '(x.size / n_perseg)\n', (11116, 11135), True, 'import numpy as _np\n'), ((3756, 3779), 'numpy.expand_dims', '_np.expand_dims', (['seg', '(1)'], {}), '(seg, 1)\n', (3771, 3779), True, 'import numpy as _np\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-08-18 09:06
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('series', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='episodes',
name='season',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='episodes', to='series.Seasons'),
),
migrations.AlterField(
model_name='seasons',
name='series',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='seasons', to='series.Series'),
),
]
|
[
"django.db.models.ForeignKey"
] |
[((422, 535), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""episodes"""', 'to': '"""series.Seasons"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='episodes', to='series.Seasons')\n", (439, 535), False, 'from django.db import migrations, models\n'), ((653, 764), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""seasons"""', 'to': '"""series.Series"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='seasons', to='series.Series')\n", (670, 764), False, 'from django.db import migrations, models\n')]
|
from abc import ABC, abstractmethod
from typing import Tuple, Union, Optional, Iterable
import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
import numpy as np
from matplotlib import pyplot as plt
from scipy.stats import norm
from pyPDP.surrogate_models import SurrogateModel
from pyPDP.utils.plotting import get_ax, check_and_set_axis
from pyPDP.utils.utils import get_hyperparameters, get_selected_idx, ConfigSpaceHolder
class AcquisitionFunction(ConfigSpaceHolder, ABC):
def __init__(self,
config_space: CS.ConfigurationSpace,
surrogate_model: SurrogateModel,
samples_for_optimization: int = 100,
minimize_objective: bool = True,
seed=None):
super().__init__(config_space, seed=seed)
self.surrogate_model = surrogate_model
self.n_samples_for_optimization = samples_for_optimization
self.minimize_objective = minimize_objective
@abstractmethod
def __call__(self, configuration: CS.Configuration) -> Union[float, np.ndarray]:
pass
def update(self, eta: float):
pass
def get_optimum(self) -> CS.Configuration:
return self._get_optimum_uniform_distribution()[0]
def _get_optimum_uniform_distribution(self) -> Tuple[CS.Configuration, float]:
configs = self.config_space.sample_configuration(self.n_samples_for_optimization)
values = self(configs)
config_value_pairs = [(config, value) for config, value in zip(configs, values)]
return max(config_value_pairs, key=lambda x: x[1])
def convert_configs(self, configuration: Union[CS.Configuration, np.ndarray]):
if isinstance(configuration, CS.Configuration):
x = np.asarray(configuration.get_array())
x = x.reshape([1, -1])
elif isinstance(configuration, list):
x = []
for config in configuration:
if isinstance(config, CS.Configuration):
x.append(config.get_array())
else:
x.append(config.copy())
x = np.asarray(x)
else:
x = configuration.copy()
return x
def plot(self,
color_acquisition="darkgreen",
color_optimum="red",
show_optimum=True,
x_hyperparameters: Optional[Iterable[CSH.Hyperparameter]] = None,
ax: Optional[plt.Axes] = None):
ax = get_ax(ax)
x_hyperparameters = get_hyperparameters(x_hyperparameters, self.config_space)
check_and_set_axis(ax, x_hyperparameters, ylabel="Acquisition")
# Sample configs and get values of acquisition function
configs = self.config_space.sample_configuration(self.n_samples_for_optimization * len(x_hyperparameters))
acquisition_y = np.asarray([self(x) for x in configs]).reshape(-1)
x = np.asarray([[config[hp.name] for hp in x_hyperparameters] for config in configs])
# Get optimum
optimum = self.get_optimum()
# Plot
n_hyperparameters = len(tuple(x_hyperparameters))
if n_hyperparameters == 1: # 1D
# Sort by x axis
order = np.argsort(x, axis=0)[:, 0]
x = x[order, 0]
acquisition_y = acquisition_y[order]
ax.fill_between(x, acquisition_y, color=color_acquisition, alpha=0.3)
ax.plot(x, acquisition_y, color=color_acquisition, label=self.__class__.__name__)
if show_optimum:
ax.plot(list(optimum.values())[0], self(optimum), "*", color=color_optimum, label=f"Optimum ({optimum})",
markersize=15)
elif n_hyperparameters == 2: # 2D
idx = get_selected_idx(x_hyperparameters, self.config_space)
raise NotImplementedError("2D currently not implemented (#TODO)")
else:
raise NotImplementedError(f"Plotting for {n_hyperparameters} dimensions not implemented. "
"Please select a specific hp by setting `x_hyperparemeters`")
class ExpectedImprovement(AcquisitionFunction):
def __init__(
self,
config_space,
surrogate_model: SurrogateModel,
eps: float = 0.0, # Exploration parameter
samples_for_optimization=100,
minimize_objective=True,
seed=None
):
super().__init__(
config_space,
surrogate_model,
samples_for_optimization,
minimize_objective, seed=seed
)
if not minimize_objective:
raise NotImplementedError('EI for maximization')
self.eta = 0
self.exploration = eps
def __call__(self, configuration: Union[CS.Configuration, np.ndarray]) -> Union[float, np.ndarray]:
x = self.convert_configs(configuration)
mean, sigma = self.surrogate_model.predict(x)
Z = (self.eta - mean - self.exploration) / sigma
Phi_Z = norm.cdf(Z)
phi_Z = norm.pdf(Z)
ret = sigma * (Z * Phi_Z + phi_Z)
ret[sigma == 0] = 0
return ret
def update(self, eta: float):
self.eta = eta
class ProbabilityOfImprovement(AcquisitionFunction):
def __init__(self,
config_space: CS.ConfigurationSpace,
surrogate_model: SurrogateModel,
eps: float = 0.1, # Exploration parameter
samples_for_optimization: int = 100,
minimize_objective=True,
seed=None):
super().__init__(config_space, surrogate_model, samples_for_optimization=samples_for_optimization,
minimize_objective=minimize_objective, seed=seed)
self.eta = 0
self.exploration = eps
def __call__(self, configuration: Union[CS.Configuration, np.ndarray]) -> Union[float, np.ndarray]:
x = self.convert_configs(configuration)
mean, sigma = self.surrogate_model.predict(x)
if self.minimize_objective:
temp = (self.eta - mean - self.exploration) / sigma
else:
temp = (mean - self.eta - self.exploration) / sigma
prob_of_improvement = norm.cdf(temp)
prob_of_improvement[sigma == 0] = 0
return prob_of_improvement
def update(self, eta: float):
self.eta = eta
class LowerConfidenceBound(AcquisitionFunction):
"""LCB"""
def __init__(self,
config_space: CS.ConfigurationSpace,
surrogate_model: SurrogateModel,
tau: float = 5,
samples_for_optimization=100,
minimize_objective=True,
seed=None):
super().__init__(config_space, surrogate_model, samples_for_optimization, minimize_objective=minimize_objective,
seed=seed)
self.tau = tau
def __call__(self, configuration: Union[CS.Configuration, np.ndarray]) -> Union[float, np.ndarray]:
x = self.convert_configs(configuration)
mean, sigma = self.surrogate_model.predict(x)
if self.minimize_objective:
return - mean + self.tau * sigma
else:
return mean + self.tau * sigma
|
[
"pyPDP.utils.utils.get_hyperparameters",
"pyPDP.utils.plotting.get_ax",
"numpy.asarray",
"scipy.stats.norm.pdf",
"scipy.stats.norm.cdf",
"numpy.argsort",
"pyPDP.utils.utils.get_selected_idx",
"pyPDP.utils.plotting.check_and_set_axis"
] |
[((2469, 2479), 'pyPDP.utils.plotting.get_ax', 'get_ax', (['ax'], {}), '(ax)\n', (2475, 2479), False, 'from pyPDP.utils.plotting import get_ax, check_and_set_axis\n'), ((2508, 2565), 'pyPDP.utils.utils.get_hyperparameters', 'get_hyperparameters', (['x_hyperparameters', 'self.config_space'], {}), '(x_hyperparameters, self.config_space)\n', (2527, 2565), False, 'from pyPDP.utils.utils import get_hyperparameters, get_selected_idx, ConfigSpaceHolder\n'), ((2574, 2637), 'pyPDP.utils.plotting.check_and_set_axis', 'check_and_set_axis', (['ax', 'x_hyperparameters'], {'ylabel': '"""Acquisition"""'}), "(ax, x_hyperparameters, ylabel='Acquisition')\n", (2592, 2637), False, 'from pyPDP.utils.plotting import get_ax, check_and_set_axis\n'), ((2905, 2990), 'numpy.asarray', 'np.asarray', (['[[config[hp.name] for hp in x_hyperparameters] for config in configs]'], {}), '([[config[hp.name] for hp in x_hyperparameters] for config in\n configs])\n', (2915, 2990), True, 'import numpy as np\n'), ((5016, 5027), 'scipy.stats.norm.cdf', 'norm.cdf', (['Z'], {}), '(Z)\n', (5024, 5027), False, 'from scipy.stats import norm\n'), ((5044, 5055), 'scipy.stats.norm.pdf', 'norm.pdf', (['Z'], {}), '(Z)\n', (5052, 5055), False, 'from scipy.stats import norm\n'), ((6221, 6235), 'scipy.stats.norm.cdf', 'norm.cdf', (['temp'], {}), '(temp)\n', (6229, 6235), False, 'from scipy.stats import norm\n'), ((2120, 2133), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (2130, 2133), True, 'import numpy as np\n'), ((3211, 3232), 'numpy.argsort', 'np.argsort', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (3221, 3232), True, 'import numpy as np\n'), ((3745, 3799), 'pyPDP.utils.utils.get_selected_idx', 'get_selected_idx', (['x_hyperparameters', 'self.config_space'], {}), '(x_hyperparameters, self.config_space)\n', (3761, 3799), False, 'from pyPDP.utils.utils import get_hyperparameters, get_selected_idx, ConfigSpaceHolder\n')]
|
#!/usr/bin/env python
# Copyright (c) 2020 Carnegie Mellon University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Cabot UI Manager
This class manages the state of the robot.
It serves low level event and maps to high level event which may change state of the robot.
Ideally, this class has plugin architecture to add new UI component but it is not the current goal.
So, all controls which needs to see the current state of the robot are managed by this code.
Low-level (cabot.event) should be mapped into ui-level (cabot_ui.event)
Author: <NAME><<EMAIL>>
"""
import traceback
import rospy
import std_msgs.msg
import std_srvs.srv
import cabot
from cabot import util
import cabot.button
from cabot.event import BaseEvent, ButtonEvent, ClickEvent, JoyButtonEvent, JoyClickEvent
from cabot_ui.event import MenuEvent, NavigationEvent, ExplorationEvent
from cabot_ui.menu import Menu
from cabot_ui.status import State, StatusManager
from cabot_ui.interface import UserInterface
from cabot_ui.navigation import Navigation
from cabot_ui.exploration import Exploration
class CabotUIManager(object):
def __init__(self):
self.main_menu = Menu.create_menu({"menu":"main_menu"}, name_space=rospy.get_name())
self.speed_menu = None
if self.main_menu:
self.main_menu.delegate = self
self.speed_menu = self.main_menu.get_menu_by_identifier("max_velocity_menu")
else:
rospy.logerr("menu is not initialized")
if self.speed_menu:
init_speed = self.speed_menu.value
try:
init_speed = float(rospy.get_param("~init_speed", self.speed_menu.value))
except ValueError:
pass
rospy.logdebug("Initial Speed = %.2f", init_speed)
self.speed_menu.set_value(init_speed)
self.menu_stack = []
self.in_navigation = False
self.destination = None
self.reset()
self._event_mapper = EventMapper()
self._event_mapper.delegate = self
self._status_manager = StatusManager.get_instance()
self._status_manager.delegate = self
self._interface = UserInterface()
self._interface.delegate = self
self._navigation = Navigation()
self._navigation.delegate = self
self._exploration = Exploration()
self._exploration.delegate = self
self._retry_count = 0
rospy.Subscriber("/cabot/event", std_msgs.msg.String,
self._event_callback, None)
self._eventPub = rospy.Publisher("/cabot/event", std_msgs.msg.String, queue_size=1)
rospy.wait_for_service("set_touch_speed_active_mode")
self._touchModeProxy = rospy.ServiceProxy("set_touch_speed_active_mode", std_srvs.srv.SetBool)
rospy.wait_for_service("/cabot/user_speed_enabled")
self._userSpeedEnabledProxy = rospy.ServiceProxy("/cabot/user_speed_enabled", std_srvs.srv.SetBool)
### navigation delegate
def i_am_ready(self):
self._interface.i_am_ready()
def start_navigation(self, pose):
self._interface.start_navigation(pose)
def notify_turn(self, turn=None, pose=None):
self._interface.notify_turn(turn=turn, pose=pose)
def notify_human(self, angle=0, pose=None):
self._interface.notify_human(angle=angle, pose=pose)
def goal_canceled(self, goal):
# unexpected cancel, may need to retry
if self._status_manager.state == State.in_action:
rospy.loginfo("NavigationState: canceled (system)")
self._status_manager.set_state(State.in_pausing)
self._retry_navigation()
return
rospy.loginfo("NavigationState: canceled (user)")
@util.setInterval(2, times=1)
def _retry_navigation(self):
self._retry_count += 1
rospy.loginfo("NavigationState: retrying (system)")
self._navigation.retry_navigation()
self._status_manager.set_state(State.in_action)
rospy.loginfo("NavigationState: retried (system)")
def have_arrived(self, goal):
#rospy.loginfo("delegate have_arrived called")
#self._interface.have_arrived(goal)
rospy.loginfo("NavigationState: arrived")
# notify external nodes about arrival
e = NavigationEvent("arrived", None)
msg = std_msgs.msg.String()
msg.data = str(e)
self._eventPub.publish(msg)
def approaching_to_poi(self, poi=None, pose=None):
self._interface.approaching_to_poi(poi=poi, pose=pose)
def approached_to_poi(self, poi=None, pose=None):
self._interface.approached_to_poi(poi=poi, pose=pose)
def passed_poi(self, poi=None, pose=None):
self._interface.passed_poi(poi=poi, pose=pose)
# def request_action(self, goal=None, pose=None):
# self._interface.request_action(goal=goal, pose=pose)
#
# def completed_action(self, goal=None, pose=None):
# self._interface.completed_action(goal=goal, pose=pose)
#
def could_not_get_current_locaion(self):
self._interface.could_not_get_current_locaion()
def enter_goal(self, goal):
self._interface.enter_goal(goal)
def exit_goal(self, goal):
self._interface.exit_goal(goal)
def announce_social(self, message):
self._interface.announce_social(message)
def please_call_elevator(self, pos):
self._interface.please_call_elevator(pos)
def elevator_opening(self, pose):
self._interface.elevator_opening(pose)
def floor_changed(self, floor):
self._interface.floor_changed(floor)
def queue_start_arrived(self):
self._interface.queue_start_arrived()
def queue_proceed(self, pose=None):
self._interface.queue_proceed(pose=pose)
def queue_target_arrived(self):
self._interface.queue_target_arrived()
def please_pass_door(self):
self._interface.please_pass_door()
def door_passed(self):
self._interface.door_passed()
###
def _event_callback(self, msg):
event = BaseEvent.parse(msg.data)
if event is None:
rospy.logerr("cabot event %s cannot be parsed", msg.data)
return
self.process_event(event)
def reset(self):
"""reset menu"""
if self.main_menu:
self.main_menu.reset()
self.menu_stack = [self.main_menu]
# menu delegate method
def menu_selected(self, menu):
rospy.logdebug("menu_selected, %s, %s"%(menu.identifier, menu.type))
if menu.identifier == "destination_menu":
event = NavigationEvent("destination", menu.value.value)
self.process_event(event)
if menu.identifier == "main_menu" and menu.value is not None:
rospy.loginfo(menu.value)
rospy.loginfo(menu.value.identifier)
if menu.value.identifier == "exploration_menu":
event = ExplorationEvent("start")
self.process_event(event)
# event delegate method
def process_event(self, event):
'''
all events go through this method
'''
#rospy.loginfo("process_event %s", str(event))
self._event_mapper.push(event)
self._process_menu_event(event)
self._process_navigation_event(event)
self._process_exploration_event(event)
def _process_menu_event(self, event):
'''
process only menu event
'''
if event.type != MenuEvent.TYPE:
return
curr_menu = self.menu_stack[-1]
if event.subtype == "next":
curr_menu.next()
self._interface.menu_changed(menu=curr_menu)
elif event.subtype == "prev":
curr_menu.prev()
self._interface.menu_changed(menu=curr_menu)
elif event.subtype == "select":
selected = curr_menu.select()
if selected is None: ## from main menu
if curr_menu.value is None:
curr_menu.next()
selected = curr_menu
elif not selected.can_explore:
self.reset()
elif selected is not curr_menu:
self.menu_stack.append(selected)
if selected.value is None:
selected.next()
self._interface.menu_changed(menu=selected, usage=True)
elif event.subtype == "back":
if len(self.menu_stack) > 1:
self.menu_stack.pop()
curr_menu = self.menu_stack[-1]
self._interface.menu_changed(menu=curr_menu, backed=True)
self.speed = 0
#self.cancel_pub.publish(True)
self._navigation.pause_navigation()
def _process_navigation_event(self, event):
if event.type != NavigationEvent.TYPE:
return
if event.subtype == "speedup":
self.speed_menu.prev()
self._interface.menu_changed(menu=self.speed_menu)
e = NavigationEvent("sound", "SpeedUp")
msg = std_msgs.msg.String()
msg.data = str(e)
self._eventPub.publish(msg)
if event.subtype == "speeddown":
self.speed_menu.next()
self._interface.menu_changed(menu=self.speed_menu)
e = NavigationEvent("sound", "SpeedDown")
msg = std_msgs.msg.String()
msg.data = str(e)
self._eventPub.publish(msg)
if event.subtype == "destination":
rospy.loginfo("Destination: "+event.param)
self._retry_count = 0
self._navigation.set_destination(event.param)
self.destination = event.param
## change handle mode
try:
self._touchModeProxy(True)
except rospy.ServiceException as e:
rospy.loginfo("Could not set touch mode to True")
try:
self._userSpeedEnabledProxy(True)
except rospy.ServiceException as e:
rospy.loginfo("Could not set user speed enabled to True")
## change state
# change to waiting_action by using actionlib
self._status_manager.set_state(State.in_action)
if event.subtype == "summons":
rospy.loginfo("Summons Destination: "+event.param)
self._navigation.set_destination(event.param)
self.destination = event.param
## change handle mode
try:
self._touchModeProxy(False)
except rospy.ServiceException as e:
rospy.loginfo("Could not set touch mode to False")
try:
self._userSpeedEnabledProxy(False)
except rospy.ServiceException as e:
rospy.loginfo("Could not set user speed enabled to False")
## change state
# change to waiting_action by using actionlib
self._status_manager.set_state(State.in_summons)
if event.subtype == "event":
self._navigation.process_event(event)
if event.subtype == "cancel":
rospy.loginfo("NavigationState: User Cancel requested")
if self._status_manager.state == State.in_action or \
self._status_manager.state == State.in_summons:
rospy.loginfo("NavigationState: canceling (user)")
self._interface.cancel_navigation()
self._navigation.cancel_navigation()
self.in_navigation = False
self.destination = None
self._status_manager.set_state(State.idle)
rospy.loginfo("NavigationState: canceled (user)")
else:
rospy.loginfo("NavigationState: state is not in action state={}".format(self._status_manager.state))
if event.subtype == "pause":
rospy.loginfo("NavigationState: User Pause requested")
if self._status_manager.state == State.in_action or \
self._status_manager.state == State.in_summons:
rospy.loginfo("NavigationState: pausing (user)")
self._status_manager.set_state(State.in_pausing)
self._interface.pause_navigation()
self._navigation.pause_navigation()
self._status_manager.set_state(State.in_pause)
rospy.loginfo("NavigationState: paused (user)")
else:
# force to pause state
rospy.loginfo("NavigationState: state is not in action state={}".format(self._status_manager.state))
#self._status_manager.set_state(State.in_pausing)
#self._navigation.pause_navigation()
#self._status_manager.set_state(State.in_pause)
if event.subtype == "resume":
if self.destination is not None:
rospy.loginfo("NavigationState: User Resume requested")
if self._status_manager.state == State.in_pause:
rospy.loginfo("NavigationState: resuming (user)")
self._interface.resume_navigation()
self._navigation.resume_navigation()
self._status_manager.set_state(State.in_action)
rospy.loginfo("NavigationState: resumed (user)")
else:
rospy.loginfo("NavigationState: state is not in pause state")
else:
rospy.loginfo("NavigationState: Next")
e = NavigationEvent("next", None)
msg = std_msgs.msg.String()
msg.data = str(e)
self._eventPub.publish(msg)
if event.subtype == "arrived":
self.destination = None
def _process_exploration_event(self, event):
if event.type != ExplorationEvent.TYPE:
return
if event.subtype == "start":
self._interface.start_exploration()
self._exploration.start_exploration()
class EventMapper(object):
def __init__(self):
self._manager = StatusManager.get_instance()
def push(self, event):
state = self._manager.state
if event.type != ButtonEvent.TYPE and event.type != ClickEvent.TYPE:
return
mevent = None
# simplify the control
mevent = self.map_button_to_navigation(event)
'''
if state == State.idle:
mevent = self.map_button_to_menu(event)
elif state == State.in_action or state == State.waiting_action:
mevent = self.map_button_to_navigation(event)
elif state == State.in_pause or state == State.waiting_pause:
mevent = self.map_button_to_navigation(event)
'''
if mevent:
self.delegate.process_event(mevent)
def map_button_to_menu(self, event):
if event.type == "click" and event.count == 1:
if event.buttons == cabot.button.BUTTON_NEXT:
return MenuEvent(subtype="next")
if event.buttons == cabot.button.BUTTON_PREV:
return MenuEvent(subtype="prev")
if event.buttons == cabot.button.BUTTON_SELECT:
return MenuEvent(subtype="select")
elif event.type == "click" and event.count == 2:
if event.buttons == cabot.button.BUTTON_SELECT:
return MenuEvent(subtype="back")
return None
def map_button_to_navigation(self, event):
if event.type == "button" and event.down:
if event.button == cabot.button.BUTTON_UP:
return NavigationEvent(subtype="speedup")
if event.button == cabot.button.BUTTON_DOWN:
return NavigationEvent(subtype="speeddown")
if event.button == cabot.button.BUTTON_LEFT:
return NavigationEvent(subtype="pause")
if event.button == cabot.button.BUTTON_RIGHT:
return NavigationEvent(subtype="resume")
'''
if event.button == cabot.button.BUTTON_SELECT:
return NavigationEvent(subtype="pause")
if event.type == "click":
if event.buttons == cabot.button.BUTTON_SELECT and event.count == 2:
return NavigationEvent(subtype="cancel")
if event.buttons == cabot.button.BUTTON_NEXT and event.count == 2:
return NavigationEvent(subtype="resume")
'''
return None
if __name__ == "__main__":
rospy.init_node("cabot_ui_manager", log_level=rospy.DEBUG)
try:
CabotUIManager()
except:
rospy.logerr(traceback.format_exc())
rospy.spin()
|
[
"rospy.logerr",
"cabot_ui.status.StatusManager.get_instance",
"rospy.Subscriber",
"cabot_ui.exploration.Exploration",
"rospy.ServiceProxy",
"cabot_ui.navigation.Navigation",
"rospy.get_name",
"rospy.init_node",
"traceback.format_exc",
"rospy.wait_for_service",
"cabot_ui.interface.UserInterface",
"rospy.loginfo",
"rospy.logdebug",
"cabot.event.BaseEvent.parse",
"cabot_ui.event.NavigationEvent",
"cabot.util.setInterval",
"rospy.Publisher",
"rospy.get_param",
"cabot_ui.event.MenuEvent",
"rospy.spin",
"cabot_ui.event.ExplorationEvent"
] |
[((4774, 4802), 'cabot.util.setInterval', 'util.setInterval', (['(2)'], {'times': '(1)'}), '(2, times=1)\n', (4790, 4802), False, 'from cabot import util\n'), ((17544, 17602), 'rospy.init_node', 'rospy.init_node', (['"""cabot_ui_manager"""'], {'log_level': 'rospy.DEBUG'}), "('cabot_ui_manager', log_level=rospy.DEBUG)\n", (17559, 17602), False, 'import rospy\n'), ((17698, 17710), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (17708, 17710), False, 'import rospy\n'), ((3096, 3124), 'cabot_ui.status.StatusManager.get_instance', 'StatusManager.get_instance', ([], {}), '()\n', (3122, 3124), False, 'from cabot_ui.status import State, StatusManager\n'), ((3196, 3211), 'cabot_ui.interface.UserInterface', 'UserInterface', ([], {}), '()\n', (3209, 3211), False, 'from cabot_ui.interface import UserInterface\n'), ((3279, 3291), 'cabot_ui.navigation.Navigation', 'Navigation', ([], {}), '()\n', (3289, 3291), False, 'from cabot_ui.navigation import Navigation\n'), ((3361, 3374), 'cabot_ui.exploration.Exploration', 'Exploration', ([], {}), '()\n', (3372, 3374), False, 'from cabot_ui.exploration import Exploration\n'), ((3457, 3542), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/cabot/event"""', 'std_msgs.msg.String', 'self._event_callback', 'None'], {}), "('/cabot/event', std_msgs.msg.String, self._event_callback,\n None)\n", (3473, 3542), False, 'import rospy\n'), ((3589, 3655), 'rospy.Publisher', 'rospy.Publisher', (['"""/cabot/event"""', 'std_msgs.msg.String'], {'queue_size': '(1)'}), "('/cabot/event', std_msgs.msg.String, queue_size=1)\n", (3604, 3655), False, 'import rospy\n'), ((3665, 3718), 'rospy.wait_for_service', 'rospy.wait_for_service', (['"""set_touch_speed_active_mode"""'], {}), "('set_touch_speed_active_mode')\n", (3687, 3718), False, 'import rospy\n'), ((3750, 3821), 'rospy.ServiceProxy', 'rospy.ServiceProxy', (['"""set_touch_speed_active_mode"""', 'std_srvs.srv.SetBool'], {}), "('set_touch_speed_active_mode', std_srvs.srv.SetBool)\n", (3768, 3821), False, 'import rospy\n'), ((3831, 3882), 'rospy.wait_for_service', 'rospy.wait_for_service', (['"""/cabot/user_speed_enabled"""'], {}), "('/cabot/user_speed_enabled')\n", (3853, 3882), False, 'import rospy\n'), ((3921, 3990), 'rospy.ServiceProxy', 'rospy.ServiceProxy', (['"""/cabot/user_speed_enabled"""', 'std_srvs.srv.SetBool'], {}), "('/cabot/user_speed_enabled', std_srvs.srv.SetBool)\n", (3939, 3990), False, 'import rospy\n'), ((4718, 4767), 'rospy.loginfo', 'rospy.loginfo', (['"""NavigationState: canceled (user)"""'], {}), "('NavigationState: canceled (user)')\n", (4731, 4767), False, 'import rospy\n'), ((4875, 4926), 'rospy.loginfo', 'rospy.loginfo', (['"""NavigationState: retrying (system)"""'], {}), "('NavigationState: retrying (system)')\n", (4888, 4926), False, 'import rospy\n'), ((5035, 5085), 'rospy.loginfo', 'rospy.loginfo', (['"""NavigationState: retried (system)"""'], {}), "('NavigationState: retried (system)')\n", (5048, 5085), False, 'import rospy\n'), ((5228, 5269), 'rospy.loginfo', 'rospy.loginfo', (['"""NavigationState: arrived"""'], {}), "('NavigationState: arrived')\n", (5241, 5269), False, 'import rospy\n'), ((5329, 5361), 'cabot_ui.event.NavigationEvent', 'NavigationEvent', (['"""arrived"""', 'None'], {}), "('arrived', None)\n", (5344, 5361), False, 'from cabot_ui.event import MenuEvent, NavigationEvent, ExplorationEvent\n'), ((7094, 7119), 'cabot.event.BaseEvent.parse', 'BaseEvent.parse', (['msg.data'], {}), '(msg.data)\n', (7109, 7119), False, 'from cabot.event import BaseEvent, ButtonEvent, ClickEvent, JoyButtonEvent, JoyClickEvent\n'), ((7492, 7562), 'rospy.logdebug', 'rospy.logdebug', (["('menu_selected, %s, %s' % (menu.identifier, menu.type))"], {}), "('menu_selected, %s, %s' % (menu.identifier, menu.type))\n", (7506, 7562), False, 'import rospy\n'), ((15119, 15147), 'cabot_ui.status.StatusManager.get_instance', 'StatusManager.get_instance', ([], {}), '()\n', (15145, 15147), False, 'from cabot_ui.status import State, StatusManager\n'), ((2452, 2491), 'rospy.logerr', 'rospy.logerr', (['"""menu is not initialized"""'], {}), "('menu is not initialized')\n", (2464, 2491), False, 'import rospy\n'), ((2753, 2803), 'rospy.logdebug', 'rospy.logdebug', (['"""Initial Speed = %.2f"""', 'init_speed'], {}), "('Initial Speed = %.2f', init_speed)\n", (2767, 2803), False, 'import rospy\n'), ((4541, 4592), 'rospy.loginfo', 'rospy.loginfo', (['"""NavigationState: canceled (system)"""'], {}), "('NavigationState: canceled (system)')\n", (4554, 4592), False, 'import rospy\n'), ((7158, 7215), 'rospy.logerr', 'rospy.logerr', (['"""cabot event %s cannot be parsed"""', 'msg.data'], {}), "('cabot event %s cannot be parsed', msg.data)\n", (7170, 7215), False, 'import rospy\n'), ((7631, 7679), 'cabot_ui.event.NavigationEvent', 'NavigationEvent', (['"""destination"""', 'menu.value.value'], {}), "('destination', menu.value.value)\n", (7646, 7679), False, 'from cabot_ui.event import MenuEvent, NavigationEvent, ExplorationEvent\n'), ((7801, 7826), 'rospy.loginfo', 'rospy.loginfo', (['menu.value'], {}), '(menu.value)\n', (7814, 7826), False, 'import rospy\n'), ((7839, 7875), 'rospy.loginfo', 'rospy.loginfo', (['menu.value.identifier'], {}), '(menu.value.identifier)\n', (7852, 7875), False, 'import rospy\n'), ((10048, 10083), 'cabot_ui.event.NavigationEvent', 'NavigationEvent', (['"""sound"""', '"""SpeedUp"""'], {}), "('sound', 'SpeedUp')\n", (10063, 10083), False, 'from cabot_ui.event import MenuEvent, NavigationEvent, ExplorationEvent\n'), ((10351, 10388), 'cabot_ui.event.NavigationEvent', 'NavigationEvent', (['"""sound"""', '"""SpeedDown"""'], {}), "('sound', 'SpeedDown')\n", (10366, 10388), False, 'from cabot_ui.event import MenuEvent, NavigationEvent, ExplorationEvent\n'), ((10556, 10600), 'rospy.loginfo', 'rospy.loginfo', (["('Destination: ' + event.param)"], {}), "('Destination: ' + event.param)\n", (10569, 10600), False, 'import rospy\n'), ((11330, 11382), 'rospy.loginfo', 'rospy.loginfo', (["('Summons Destination: ' + event.param)"], {}), "('Summons Destination: ' + event.param)\n", (11343, 11382), False, 'import rospy\n'), ((12170, 12225), 'rospy.loginfo', 'rospy.loginfo', (['"""NavigationState: User Cancel requested"""'], {}), "('NavigationState: User Cancel requested')\n", (12183, 12225), False, 'import rospy\n'), ((12920, 12974), 'rospy.loginfo', 'rospy.loginfo', (['"""NavigationState: User Pause requested"""'], {}), "('NavigationState: User Pause requested')\n", (12933, 12974), False, 'import rospy\n'), ((2217, 2233), 'rospy.get_name', 'rospy.get_name', ([], {}), '()\n', (2231, 2233), False, 'import rospy\n'), ((7960, 7985), 'cabot_ui.event.ExplorationEvent', 'ExplorationEvent', (['"""start"""'], {}), "('start')\n", (7976, 7985), False, 'from cabot_ui.event import MenuEvent, NavigationEvent, ExplorationEvent\n'), ((12371, 12421), 'rospy.loginfo', 'rospy.loginfo', (['"""NavigationState: canceling (user)"""'], {}), "('NavigationState: canceling (user)')\n", (12384, 12421), False, 'import rospy\n'), ((12685, 12734), 'rospy.loginfo', 'rospy.loginfo', (['"""NavigationState: canceled (user)"""'], {}), "('NavigationState: canceled (user)')\n", (12698, 12734), False, 'import rospy\n'), ((13120, 13168), 'rospy.loginfo', 'rospy.loginfo', (['"""NavigationState: pausing (user)"""'], {}), "('NavigationState: pausing (user)')\n", (13133, 13168), False, 'import rospy\n'), ((13416, 13463), 'rospy.loginfo', 'rospy.loginfo', (['"""NavigationState: paused (user)"""'], {}), "('NavigationState: paused (user)')\n", (13429, 13463), False, 'import rospy\n'), ((13921, 13976), 'rospy.loginfo', 'rospy.loginfo', (['"""NavigationState: User Resume requested"""'], {}), "('NavigationState: User Resume requested')\n", (13934, 13976), False, 'import rospy\n'), ((14500, 14538), 'rospy.loginfo', 'rospy.loginfo', (['"""NavigationState: Next"""'], {}), "('NavigationState: Next')\n", (14513, 14538), False, 'import rospy\n'), ((14559, 14588), 'cabot_ui.event.NavigationEvent', 'NavigationEvent', (['"""next"""', 'None'], {}), "('next', None)\n", (14574, 14588), False, 'from cabot_ui.event import MenuEvent, NavigationEvent, ExplorationEvent\n'), ((16062, 16087), 'cabot_ui.event.MenuEvent', 'MenuEvent', ([], {'subtype': '"""next"""'}), "(subtype='next')\n", (16071, 16087), False, 'from cabot_ui.event import MenuEvent, NavigationEvent, ExplorationEvent\n'), ((16169, 16194), 'cabot_ui.event.MenuEvent', 'MenuEvent', ([], {'subtype': '"""prev"""'}), "(subtype='prev')\n", (16178, 16194), False, 'from cabot_ui.event import MenuEvent, NavigationEvent, ExplorationEvent\n'), ((16278, 16305), 'cabot_ui.event.MenuEvent', 'MenuEvent', ([], {'subtype': '"""select"""'}), "(subtype='select')\n", (16287, 16305), False, 'from cabot_ui.event import MenuEvent, NavigationEvent, ExplorationEvent\n'), ((16668, 16702), 'cabot_ui.event.NavigationEvent', 'NavigationEvent', ([], {'subtype': '"""speedup"""'}), "(subtype='speedup')\n", (16683, 16702), False, 'from cabot_ui.event import MenuEvent, NavigationEvent, ExplorationEvent\n'), ((16783, 16819), 'cabot_ui.event.NavigationEvent', 'NavigationEvent', ([], {'subtype': '"""speeddown"""'}), "(subtype='speeddown')\n", (16798, 16819), False, 'from cabot_ui.event import MenuEvent, NavigationEvent, ExplorationEvent\n'), ((16900, 16932), 'cabot_ui.event.NavigationEvent', 'NavigationEvent', ([], {'subtype': '"""pause"""'}), "(subtype='pause')\n", (16915, 16932), False, 'from cabot_ui.event import MenuEvent, NavigationEvent, ExplorationEvent\n'), ((17014, 17047), 'cabot_ui.event.NavigationEvent', 'NavigationEvent', ([], {'subtype': '"""resume"""'}), "(subtype='resume')\n", (17029, 17047), False, 'from cabot_ui.event import MenuEvent, NavigationEvent, ExplorationEvent\n'), ((17670, 17692), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (17690, 17692), False, 'import traceback\n'), ((2632, 2685), 'rospy.get_param', 'rospy.get_param', (['"""~init_speed"""', 'self.speed_menu.value'], {}), "('~init_speed', self.speed_menu.value)\n", (2647, 2685), False, 'import rospy\n'), ((10892, 10941), 'rospy.loginfo', 'rospy.loginfo', (['"""Could not set touch mode to True"""'], {}), "('Could not set touch mode to True')\n", (10905, 10941), False, 'import rospy\n'), ((11073, 11130), 'rospy.loginfo', 'rospy.loginfo', (['"""Could not set user speed enabled to True"""'], {}), "('Could not set user speed enabled to True')\n", (11086, 11130), False, 'import rospy\n'), ((11641, 11691), 'rospy.loginfo', 'rospy.loginfo', (['"""Could not set touch mode to False"""'], {}), "('Could not set touch mode to False')\n", (11654, 11691), False, 'import rospy\n'), ((11824, 11882), 'rospy.loginfo', 'rospy.loginfo', (['"""Could not set user speed enabled to False"""'], {}), "('Could not set user speed enabled to False')\n", (11837, 11882), False, 'import rospy\n'), ((14062, 14111), 'rospy.loginfo', 'rospy.loginfo', (['"""NavigationState: resuming (user)"""'], {}), "('NavigationState: resuming (user)')\n", (14075, 14111), False, 'import rospy\n'), ((14313, 14361), 'rospy.loginfo', 'rospy.loginfo', (['"""NavigationState: resumed (user)"""'], {}), "('NavigationState: resumed (user)')\n", (14326, 14361), False, 'import rospy\n'), ((14404, 14465), 'rospy.loginfo', 'rospy.loginfo', (['"""NavigationState: state is not in pause state"""'], {}), "('NavigationState: state is not in pause state')\n", (14417, 14465), False, 'import rospy\n'), ((16446, 16471), 'cabot_ui.event.MenuEvent', 'MenuEvent', ([], {'subtype': '"""back"""'}), "(subtype='back')\n", (16455, 16471), False, 'from cabot_ui.event import MenuEvent, NavigationEvent, ExplorationEvent\n')]
|
import time
import urllib.request
import concurrent.futures
url_list = [
"https://www.baidu.com", "https://www.qq.com", "https://www.sogou.com",
"https://www.cnblogs.com"
]
def get_html(url, timeout=10):
with urllib.request.urlopen(url, timeout=timeout) as conn:
return conn.read()
def main():
start_time = time.time()
with concurrent.futures.ThreadPoolExecutor() as executor:
# 用字典可以通过返回的 future 拿到 url
tasks = [executor.submit(get_html, url) for url in url_list]
# 遍历完成的 future 对象
for task in concurrent.futures.as_completed(tasks):
try:
result = task.result()
except Exception as ex:
print(ex)
else:
print(len(result))
print(time.time() - start_time)
if __name__ == "__main__":
main()
|
[
"time.time"
] |
[((336, 347), 'time.time', 'time.time', ([], {}), '()\n', (345, 347), False, 'import time\n'), ((783, 794), 'time.time', 'time.time', ([], {}), '()\n', (792, 794), False, 'import time\n')]
|
# -*- coding: utf-8 -*-
import os
import subprocess
from pathlib import Path
# -- Breathe Configs for ReadTheDocs ------------------------------------------
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if on_rtd:
subprocess.run("cd ..; doxygen alfasim_sdk_api.cfg", shell=True)
breathe_projects = {"alfasim_sdk_api": "../alfasim_sdk_api/xml"}
else:
breathe_projects = {
"alfasim_sdk_api": "../_build/breathe/doxygen/alfasim_sdk_api/xml"
}
# -- Breathe Configs -------------------------------------------------------
breathe_default_project = "alfasim_sdk_api"
alfasim_sdk_api_project_folder = (
Path(os.getcwd()).parents[1] / "src/alfasim_sdk/alfasim_sdk_api"
)
breathe_projects_source = {
"alfasim_sdk_api": (
alfasim_sdk_api_project_folder,
["common.h", "api.h", "detail/bootstrap_win.h"],
)
}
breathe_doxygen_config_options = {
"MACRO_EXPANSION": "YES",
"EXPAND_ONLY_PREDEF": "YES",
"PREDEFINED": "DLL_EXPORT= DOXYGEN_SHOULD_SKIP_THIS",
}
# -- Project information -----------------------------------------------------
project = ""
copyright = "2019, ESSS"
author = "ESSS"
version = ""
release = ""
# -- Options for Graphviz -------------------------------------------------
graphviz_dot = "dot"
graphviz_dot_args = ["-Tsvg"]
graphviz_output_format = "svg"
# -- General configuration ---------------------------------------------------
extensions = [
"breathe",
"sphinx.ext.autodoc",
"sphinx_click.ext",
"sphinx.ext.graphviz",
"sphinx.ext.napoleon",
"sphinx.ext.intersphinx",
"sphinx_inline_tabs",
"sphinx_copybutton",
"sphinx_togglebutton",
]
add_module_names = False
autodoc_typehints = "none"
templates_path = ["_templates"]
source_suffix = ".rst"
master_doc = "index"
rst_prolog = """
.. |alfasim| replace:: :program:`ALFAsim`
.. |sdk| replace:: :program:`ALFAsim-SDK`
.. |template-command| replace:: :ref:`Template Command <alfasim_sdk_cli_template_section>`
.. |gui_hook| replace:: :py:func:`alfasim_get_data_model_type`
.. |container| replace:: :py:func:`alfasim_sdk.container_model`
.. |model| replace:: :py:func:`alfasim_sdk.data_model`
.. |s_variable| replace:: :py:func:`alfasim_sdk.SecondaryVariable`
.. |s_variable_hook| replace:: :py:func:`alfasim_get_additional_variables`
.. |marker_1| image:: /_static/images/marker_1.png
:scale: 80%
.. |marker_2| image:: /_static/images/marker_2.png
:scale: 80%
.. # define a hard line break for HTML
.. |br| raw:: html
<br />
.. |tracer_warning_text| replace::
This is advanced customization. We strongly encourage the plugin developer to read the Tracers chapter of
|alfasim|'s Technical Report available on the `Help` menu at |alfasim| application.
.. |manual| image:: /_static/images/help_menu_technical_manual.png
:scale: 80%
.. |tracer_warn| replace::
|tracer_warning_text| |br|
|manual|
"""
language = None
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "breathe/*"]
# -- Options for HTML output -------------------------------------------------
html_theme = "pydata_sphinx_theme"
html_logo = "_static/images/logo-alfasim.svg"
html_theme_options = {
"github_url": "https://github.com/esss/alfasim-sdk",
"google_analytics_id": "UA-149094345-1",
}
html_static_path = ["_static"]
html_css_files = [
"css/custom.css",
]
html_favicon = "_static/images/alfasim_gui.ico"
# -- Options for intersphinx -------------------------------------------------
intersphinx_mapping = {
"python": ("http://docs.python.org/3", None),
"barril": ("https://barril.readthedocs.io/en/latest/", None),
}
# -- Options for Autodoc -----------------------------------------------------
autodoc_member_order = "groupwise"
|
[
"os.environ.get",
"subprocess.run",
"os.getcwd"
] |
[((167, 202), 'os.environ.get', 'os.environ.get', (['"""READTHEDOCS"""', 'None'], {}), "('READTHEDOCS', None)\n", (181, 202), False, 'import os\n'), ((229, 293), 'subprocess.run', 'subprocess.run', (['"""cd ..; doxygen alfasim_sdk_api.cfg"""'], {'shell': '(True)'}), "('cd ..; doxygen alfasim_sdk_api.cfg', shell=True)\n", (243, 293), False, 'import subprocess\n'), ((644, 655), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (653, 655), False, 'import os\n')]
|
from django.contrib import admin
from sidebar.models import Sidebar
from django import forms
from django.db import models
class SidebarAdmin(admin.ModelAdmin):
fields = ('position',)
admin.site.register(Sidebar, SidebarAdmin)
|
[
"django.contrib.admin.site.register"
] |
[((189, 231), 'django.contrib.admin.site.register', 'admin.site.register', (['Sidebar', 'SidebarAdmin'], {}), '(Sidebar, SidebarAdmin)\n', (208, 231), False, 'from django.contrib import admin\n')]
|
# Copyright 2022 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides the `NewBestMetric` condition and associated helper classes."""
import json
import os
import sys
from typing import Any, Callable, Optional, Union
import uuid
from orbit import runner
from orbit import utils
import tensorflow as tf
MetricFn = Callable[[runner.Output], Union[float, tf.Tensor]]
class NewBestMetric:
"""Condition that is satisfied when a new best metric is achieved.
This class keeps track of the best metric value seen so far, optionally in a
persistent (preemption-safe) way.
Two methods are provided, which each satisfy the `Action` protocol: `test` for
only testing whether a new best metric is achieved by a given train/eval
output, and `commit`, which both tests and records the new best metric value
if it is achieved. These separate methods enable the same `NewBestMetric`
instance to be reused as a condition multiple times, and can also provide
additional preemption/failure safety. For example, to avoid updating the best
metric if a model export fails or is pre-empted:
new_best_metric = orbit.actions.NewBestMetric(
'accuracy', filename='/model/dir/best_metric')
action = orbit.actions.ConditionalAction(
condition=new_best_metric.test,
action=[
orbit.actions.ExportSavedModel(...),
new_best_metric.commit
])
The default `__call__` implementation is equivalent to `commit`.
This class is safe to use in multi-client settings if all clients can be
guaranteed to compute the same metric. However when saving metrics it may be
helpful to avoid unnecessary writes by setting the `write_value` parameter to
`False` for most clients.
Attributes:
metric: The metric passed to __init__ (may be a string key or a callable
that can be applied to train/eval output).
higher_is_better: Whether higher metric values are better.
"""
def __init__(self,
metric: Union[str, MetricFn],
higher_is_better: bool = True,
filename: Optional[str] = None,
write_metric=True):
"""Initializes the instance.
Args:
metric: Either a string key name to use to look up a metric (assuming the
train/eval output is a dictionary), or a callable that accepts the
train/eval output and returns a metric value.
higher_is_better: Whether higher metric values are better. If `True`, a
new best metric is achieved when the metric value is strictly greater
than the previous best metric. If `False`, a new best metric is achieved
when the metric value is strictly less than the previous best metric.
filename: A filename to use for storage of the best metric value seen so
far, to allow peristence of the value across preemptions. If `None`
(default), values aren't persisted.
write_metric: If `filename` is set, this controls whether this instance
will write new best metric values to the file, or just read from the
file to obtain the initial value. Setting this to `False` for most
clients in some multi-client setups can avoid unnecessary file writes.
Has no effect if `filename` is `None`.
"""
self.metric = metric
self.higher_is_better = higher_is_better
float_max = sys.float_info.max
self._best_value = JSONPersistedValue(
initial_value=-float_max if higher_is_better else float_max,
filename=filename,
write_value=write_metric)
def __call__(self, output: runner.Output) -> bool:
"""Tests `output` and updates the current best value if necessary.
This is equivalent to `commit` below.
Args:
output: The train or eval output to test.
Returns:
`True` if `output` contains a new best metric value, `False` otherwise.
"""
return self.commit(output)
def metric_value(self, output: runner.Output) -> float:
"""Computes the metric value for the given `output`."""
if callable(self.metric):
value = self.metric(output)
else:
value = output[self.metric]
return float(utils.get_value(value))
@property
def best_value(self) -> float:
"""Returns the best metric value seen so far."""
return self._best_value.read()
def test(self, output: runner.Output) -> bool:
"""Tests `output` to see if it contains a new best metric value.
If `output` does contain a new best metric value, this method does *not*
save it (i.e., calling this method multiple times in a row with the same
`output` will continue to return `True`).
Args:
output: The train or eval output to test.
Returns:
`True` if `output` contains a new best metric value, `False` otherwise.
"""
metric_value = self.metric_value(output)
if self.higher_is_better:
if metric_value > self.best_value:
return True
else: # Lower is better.
if metric_value < self.best_value:
return True
return False
def commit(self, output: runner.Output) -> bool:
"""Tests `output` and updates the current best value if necessary.
Unlike `test` above, if `output` does contain a new best metric value, this
method *does* save it (i.e., subsequent calls to this method with the same
`output` will return `False`).
Args:
output: The train or eval output to test.
Returns:
`True` if `output` contains a new best metric value, `False` otherwise.
"""
if self.test(output):
self._best_value.write(self.metric_value(output))
return True
return False
class JSONPersistedValue:
"""Represents a value that is persisted via a file-based backing store.
The value must be JSON-serializable. Each time the value is updated, it will
be written to the backing file. It is only read from the file at
initialization.
"""
def __init__(self,
initial_value: Any,
filename: str,
write_value: bool = True):
"""Initializes the instance.
Args:
initial_value: The initial value to use if no backing file exists or was
given. This must be a JSON-serializable value (possibly nested
combination of lists, dicts, and primitive values).
filename: The path to use for persistent storage of the value. This may be
`None`, in which case the value is not stable across preemptions.
write_value: If `True`, new values will be written to `filename` on calls
to `write()`. If `False`, `filename` is only read once to restore any
persisted value, and new values will not be written to it. This can be
useful in certain multi-client settings to avoid race conditions or
excessive file writes. If `filename` is `None`, this parameter has no
effect.
"""
self._value = None
self._filename = filename
self._write_value = write_value
if self._filename is not None:
if tf.io.gfile.exists(self._filename):
if tf.io.gfile.stat(self._filename).length > 0:
with tf.io.gfile.GFile(self._filename, 'r') as f:
self._value = json.load(f)
elif self._write_value:
tf.io.gfile.makedirs(os.path.dirname(self._filename))
if self._value is None:
self.write(initial_value)
def read(self):
"""Returns the value."""
return self._value
def write(self, value):
"""Writes the value, updating the backing store if one was provided."""
self._value = value
if self._filename is not None and self._write_value:
# To achieve atomic writes, we first write to a temporary file, and then
# rename it to `self._filename`.
tmp_filename = f'{self._filename}.tmp.{uuid.uuid4().hex}'
with tf.io.gfile.GFile(tmp_filename, 'w') as f:
json.dump(self._value, f)
tf.io.gfile.rename(tmp_filename, self._filename, overwrite=True)
|
[
"json.dump",
"json.load",
"uuid.uuid4",
"orbit.utils.get_value",
"tensorflow.io.gfile.rename",
"os.path.dirname",
"tensorflow.io.gfile.exists",
"tensorflow.io.gfile.stat",
"tensorflow.io.gfile.GFile"
] |
[((4703, 4725), 'orbit.utils.get_value', 'utils.get_value', (['value'], {}), '(value)\n', (4718, 4725), False, 'from orbit import utils\n'), ((7539, 7573), 'tensorflow.io.gfile.exists', 'tf.io.gfile.exists', (['self._filename'], {}), '(self._filename)\n', (7557, 7573), True, 'import tensorflow as tf\n'), ((8414, 8478), 'tensorflow.io.gfile.rename', 'tf.io.gfile.rename', (['tmp_filename', 'self._filename'], {'overwrite': '(True)'}), '(tmp_filename, self._filename, overwrite=True)\n', (8432, 8478), True, 'import tensorflow as tf\n'), ((8331, 8367), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['tmp_filename', '"""w"""'], {}), "(tmp_filename, 'w')\n", (8348, 8367), True, 'import tensorflow as tf\n'), ((8382, 8407), 'json.dump', 'json.dump', (['self._value', 'f'], {}), '(self._value, f)\n', (8391, 8407), False, 'import json\n'), ((7586, 7618), 'tensorflow.io.gfile.stat', 'tf.io.gfile.stat', (['self._filename'], {}), '(self._filename)\n', (7602, 7618), True, 'import tensorflow as tf\n'), ((7646, 7684), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['self._filename', '"""r"""'], {}), "(self._filename, 'r')\n", (7663, 7684), True, 'import tensorflow as tf\n'), ((7717, 7729), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7726, 7729), False, 'import json\n'), ((7789, 7820), 'os.path.dirname', 'os.path.dirname', (['self._filename'], {}), '(self._filename)\n', (7804, 7820), False, 'import os\n'), ((8301, 8313), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (8311, 8313), False, 'import uuid\n')]
|
import torch
from .base import RLAlgorithmBase
from policies.models.actor import DeterministicPolicy
from torchkit.networks import FlattenMlp
class TD3(RLAlgorithmBase):
name = "td3"
continuous_action = True
use_target_actor = True
def __init__(
self, exploration_noise=0.1, target_noise=0.2, target_noise_clip=0.5, **kwargs
):
self.exploration_noise = exploration_noise
self.target_noise = target_noise
self.target_noise_clip = target_noise_clip
@staticmethod
def build_actor(input_size, action_dim, hidden_sizes, **kwargs):
return DeterministicPolicy(
obs_dim=input_size,
action_dim=action_dim,
hidden_sizes=hidden_sizes,
**kwargs,
)
@staticmethod
def build_critic(hidden_sizes, input_size=None, obs_dim=None, action_dim=None):
if obs_dim is not None and action_dim is not None:
input_size = obs_dim + action_dim
qf1 = FlattenMlp(
input_size=input_size, output_size=1, hidden_sizes=hidden_sizes
)
qf2 = FlattenMlp(
input_size=input_size, output_size=1, hidden_sizes=hidden_sizes
)
return qf1, qf2
def select_action(self, actor, observ, deterministic: bool, **kwargs):
mean = actor(observ)
if deterministic:
action_tuple = (mean, mean, None, None)
else:
action = (mean + torch.randn_like(mean) * self.exploration_noise).clamp(
-1, 1
) # NOTE
action_tuple = (action, mean, None, None)
return action_tuple
@staticmethod
def forward_actor(actor, observ):
new_actions = actor(observ) # (*, B, dim)
return new_actions, None
def _inject_noise(self, actions):
action_noise = (torch.randn_like(actions) * self.target_noise).clamp(
-self.target_noise_clip, self.target_noise_clip
)
new_actions = (actions + action_noise).clamp(-1, 1) # NOTE
return new_actions
def critic_loss(
self,
markov_actor: bool,
markov_critic: bool,
actor,
actor_target,
critic,
critic_target,
observs,
actions,
rewards,
dones,
gamma,
next_observs=None, # used in markov_critic
):
with torch.no_grad():
# first next_actions from target policy,
# (T+1, B, dim) including reaction to last obs
if markov_actor:
new_actions, _ = self.forward_actor(
actor_target, next_observs if markov_critic else observs
)
else:
new_actions, _ = actor_target(
prev_actions=actions,
rewards=rewards,
observs=next_observs if markov_critic else observs,
)
new_actions = self._inject_noise(new_actions)
if markov_critic: # (B, 1)
next_q1 = critic_target[0](next_observs, new_actions)
next_q2 = critic_target[1](next_observs, new_actions)
else:
next_q1, next_q2 = critic_target(
prev_actions=actions,
rewards=rewards,
observs=observs,
current_actions=new_actions,
) # (T+1, B, 1)
min_next_q_target = torch.min(next_q1, next_q2)
# q_target: (T, B, 1)
q_target = rewards + (1.0 - dones) * gamma * min_next_q_target # next q
if not markov_critic:
q_target = q_target[1:] # (T, B, 1)
if markov_critic:
q1_pred = critic[0](observs, actions)
q2_pred = critic[1](observs, actions)
else:
# Q(h(t), a(t)) (T, B, 1)
q1_pred, q2_pred = critic(
prev_actions=actions,
rewards=rewards,
observs=observs,
current_actions=actions[1:],
) # (T, B, 1)
return (q1_pred, q2_pred), q_target
def actor_loss(
self,
markov_actor: bool,
markov_critic: bool,
actor,
actor_target,
critic,
critic_target,
observs,
actions=None,
rewards=None,
):
if markov_actor:
new_actions, _ = self.forward_actor(actor, observs)
else:
new_actions, _ = actor(
prev_actions=actions, rewards=rewards, observs=observs
) # (T+1, B, A)
if markov_critic:
q1 = critic[0](observs, new_actions)
q2 = critic[1](observs, new_actions)
else:
q1, q2 = critic(
prev_actions=actions,
rewards=rewards,
observs=observs,
current_actions=new_actions,
) # (T+1, B, 1)
min_q_new_actions = torch.min(q1, q2) # (T+1,B,1)
policy_loss = -min_q_new_actions
if not markov_critic:
policy_loss = policy_loss[:-1] # (T,B,1) remove the last obs
return policy_loss, None
#### Below are used in shared RNN setting
def forward_actor_in_target(self, actor, actor_target, next_observ):
new_next_actions, _ = self.forward_actor(actor_target, next_observ)
return self._inject_noise(new_next_actions), None
def entropy_bonus(self, log_probs):
return 0.0
|
[
"torch.randn_like",
"policies.models.actor.DeterministicPolicy",
"torchkit.networks.FlattenMlp",
"torch.no_grad",
"torch.min"
] |
[((605, 709), 'policies.models.actor.DeterministicPolicy', 'DeterministicPolicy', ([], {'obs_dim': 'input_size', 'action_dim': 'action_dim', 'hidden_sizes': 'hidden_sizes'}), '(obs_dim=input_size, action_dim=action_dim, hidden_sizes\n =hidden_sizes, **kwargs)\n', (624, 709), False, 'from policies.models.actor import DeterministicPolicy\n'), ((986, 1061), 'torchkit.networks.FlattenMlp', 'FlattenMlp', ([], {'input_size': 'input_size', 'output_size': '(1)', 'hidden_sizes': 'hidden_sizes'}), '(input_size=input_size, output_size=1, hidden_sizes=hidden_sizes)\n', (996, 1061), False, 'from torchkit.networks import FlattenMlp\n'), ((1098, 1173), 'torchkit.networks.FlattenMlp', 'FlattenMlp', ([], {'input_size': 'input_size', 'output_size': '(1)', 'hidden_sizes': 'hidden_sizes'}), '(input_size=input_size, output_size=1, hidden_sizes=hidden_sizes)\n', (1108, 1173), False, 'from torchkit.networks import FlattenMlp\n'), ((4975, 4992), 'torch.min', 'torch.min', (['q1', 'q2'], {}), '(q1, q2)\n', (4984, 4992), False, 'import torch\n'), ((2373, 2388), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2386, 2388), False, 'import torch\n'), ((3451, 3478), 'torch.min', 'torch.min', (['next_q1', 'next_q2'], {}), '(next_q1, next_q2)\n', (3460, 3478), False, 'import torch\n'), ((1832, 1857), 'torch.randn_like', 'torch.randn_like', (['actions'], {}), '(actions)\n', (1848, 1857), False, 'import torch\n'), ((1446, 1468), 'torch.randn_like', 'torch.randn_like', (['mean'], {}), '(mean)\n', (1462, 1468), False, 'import torch\n')]
|
import json
import ipyvuetify as v
import ipywidgets as w
from src.neo_utils import delete_content, extract_jsons, get_file_content, get_files_list, get_files_stats, \
get_arrows_json, get_label_properties, save_merge_on, delete_merge_on, get_merge_on
import pprint
class ColPreview(v.Col):
TEXTFIELD_PREFIX = "text_field_"
def __init__(self, parent, *args, **kwargs):
super().__init__(*args, **kwargs)
self.pp = pprint.PrettyPrinter(indent=1)
self.parent = parent
self.neo = None
self.label_properties = {}
self.merge_on = {}
# #files
self.list_of_chkboxes = []
self.list_of_selected_files = []
self.col_list_files = v.Col(children=[])
self.btn_delete = v.Btn(children=["Delete"], disabled=True)
self.btn_delete.on_event('click', self.on_btn_delete_clicked)
self.btn_edit = v.Btn(children=["Edit"], disabled=True)
self.btn_edit.on_event('click', self.on_btn_edit_clicked)
self.btn_edit_all = v.Btn(children=["Edit All"])
self.btn_edit_all.on_event('click', self.on_btn_edit_all_clicked)
# #summary
self.chk_detailed_summary = v.Checkbox(label="Detailed Summary", v_model=False, disabled=True)
self.chk_detailed_summary.on_event("change", self.on_chk_detailed_summary_changed)
self.out_summary = w.Output()
# #labels
self.btn_edit_by_label = v.Btn(children=["Edit"])
self.btn_edit_by_label.on_event('click', self.on_btn_edit_by_label_clicked)
self.chk_include_neighbors = v.Checkbox(label="Include Neighbors", v_model=False)
self.select_edit_selected = v.Autocomplete(items=[], select_first=True, hint='Edit')
# #setup merge
self.select_setup_merge_label = v.Autocomplete(items=[], select_first=True, label="Select Label")
self.select_setup_merge_label.on_event('change', self.on_select_setup_merge_label_changed)
self.text_setup_merge_label = v.TextField(v_model="", label="Enter other Label")
self.text_setup_merge_label.on_event('input', self.on_text_setup_merge_label_changed)
self.text_setup_merge_prop = v.TextField(v_model="", label="Comma-separated property list")
self.text_setup_merge_prop.on_event('input', self.on_text_setup_merge_prop_changed)
self.btn_save_merge = v.Btn(children=["Save"])
self.btn_save_merge.on_event('click', self.on_btn_save_merge_clicked)
self.btn_delete_merge = v.Btn(children=["Delete"])
self.btn_delete_merge.on_event('click', self.on_btn_delete_merge_clicked)
self.out_setup_merge = w.Output()
# #all
self.out = w.Output()
self.children = [
v.Tabs(children=[
v.Tab(children=["Files"]),
v.TabItem(children=[
v.Row(children=[
v.Col(children=[
self.col_list_files,
self.btn_delete,
self.btn_edit,
self.btn_edit_all,
]),
v.Col(children=[
self.chk_detailed_summary,
self.out_summary,
]),
])
]),
v.Tab(children=["Labels"]),
v.TabItem(children=[
v.Row(children=[
self.select_edit_selected,
self.chk_include_neighbors
]),
self.btn_edit_by_label,
]),
v.Tab(children=["Merge_On Setup"]),
v.TabItem(children=[
v.Row(children=[
v.Col(children=[
self.select_setup_merge_label,
self.text_setup_merge_label,
self.text_setup_merge_prop,
self.btn_save_merge,
self.btn_delete_merge,
]),
v.Col(children=[
self.out_setup_merge,
])
])
]),
]),
self.out
]
self.render()
def on_btn_edit_clicked(self, widget, event, data):
neores = get_file_content(self.neo, self.list_of_selected_files[0])
if len(neores) == 1:
json_content = neores[0]['content']
if json_content:
self.parent.col_json.text_area.v_model = json_content
else:
self.out.clear_output()
with self.out:
print(
"ERROR: selected file does not have content in .json property. Please contact database administrator.")
elif len(neores) > 1:
self.out.clear_output()
with self.out:
print("ERROR: there is >1 file with specified name. Please contact database administrator.")
else:
self.out.clear_output()
with self.out:
print(
f"ERROR: no file with name {self.list_of_selected_files[0]} was found. Please contact database administrator.")
def on_btn_edit_all_clicked(self, widget, event, data):
res = get_arrows_json(neo=self.neo, where="NOT (x:_File_:_Metadata_) and NOT (x:_MergeOn_:_Metadata_)",
incl_neighbors=True)
if res:
self.parent.col_json.text_area.v_model = json.dumps(res[0])
else:
self.out.clear_output()
with self.out:
print(
f"No data found in the database")
def on_btn_edit_by_label_clicked(self, widget, event, data):
if self.select_edit_selected.v_model:
label = self.select_edit_selected.v_model
res = get_arrows_json(neo=self.neo, where=f"x:`{label}`", incl_neighbors=self.chk_include_neighbors.v_model)
if res:
self.parent.col_json.text_area.v_model = json.dumps(res[0])
else:
self.out.clear_output()
with self.out:
print(
f"No {label} data found in the database")
def on_btn_delete_clicked(self, widget, event, data):
delete_content(self.neo, names=self.list_of_selected_files)
self.neo.clean_slate(keep_labels=['_File_', '_MergeOn_', '_Metadata_'])
extract_jsons(neo=self.neo, merge_on=self.parent.get_merge_on())
self.render()
def on_chk_detailed_summary_changed(self, widget, event, data):
self.refresh_selected_files_stats()
def get_selected_files(self):
return [item.label for item in self.list_of_chkboxes if item.v_model]
def on_chkbox_changed(self, widget, event, data):
self.list_of_selected_files = self.get_selected_files()
self.refresh_selected_files_stats()
if len(self.list_of_selected_files) != 1:
self.btn_edit.disabled = True
else:
self.btn_edit.disabled = False
if len(self.list_of_selected_files) > 0:
self.chk_detailed_summary.disabled = False
self.btn_delete.disabled = False
else:
self.chk_detailed_summary.disabled = True
self.btn_delete.disabled = True
# print(self.list_of_selected_files)
def refresh_col_list_files(self):
res = get_files_list(self.neo)
self.out.clear_output()
if res:
with self.out:
assert len(res) == 1, """
>1 chain of _File_ nodes exists in the database.
Clear your database if you have a backup of you data, otherwise contact the database administrator.
"""
files = res[0]['filenames']
if files:
self.col_list_files.children = []
self.list_of_chkboxes = []
for i, file in enumerate(files):
chkbox = v.Checkbox(label=file, v_model=False)
self.list_of_chkboxes.append(chkbox)
self.list_of_chkboxes[i].on_event("change", self.on_chkbox_changed)
self.col_list_files.children = self.list_of_chkboxes
else:
self.col_list_files.children = []
self.out.clear_output()
with self.out:
print("No files data was found in the database")
else:
self.col_list_files.children = []
with self.out:
print("No files data was found in the database or >1000 files")
def refresh_selected_files_stats(self):
res = get_files_stats(self.neo, filenames=self.get_selected_files(), detailed=self.chk_detailed_summary.v_model)
self.out_summary.clear_output()
if res:
with self.out_summary:
self.pp.pprint(res)
def refresh_select_edit_selected(self):
self.select_edit_selected.items = [label for label in self.neo.get_labels() if
label not in ['_File_', '_MergeOn_', '_Metadata_']]
if self.select_edit_selected.items:
self.select_edit_selected.v_model = self.select_edit_selected.items[0]
def on_btn_save_merge_clicked(self, widget, event, data):
label = (self.select_setup_merge_label.v_model
if self.select_setup_merge_label.v_model != "Other" else
self.text_setup_merge_label.v_model)
prop_list = self.text_setup_merge_prop.v_model.split(",")
for prop in prop_list:
self.neo.create_index(label, prop) #TODO: for Neo4j enterprise edition the can set index on pair of properties
save_merge_on(
neo=self.neo,
label=label,
properties=self.text_setup_merge_prop.v_model
)
self.refresh_out_setup_merge()
def on_btn_delete_merge_clicked(self, widget, event, data):
label = (self.select_setup_merge_label.v_model
if self.select_setup_merge_label.v_model != "Other" else
self.text_setup_merge_label.v_model)
prop_list = self.text_setup_merge_prop.v_model.split(",")
for prop in prop_list:
self.neo.drop_index(f"{label}.{prop}")
delete_merge_on(
neo=self.neo,
label=label
)
self.refresh_out_setup_merge()
def on_select_setup_merge_label_changed(self, widget, event, data):
cur_selection = self.select_setup_merge_label.v_model
if cur_selection and cur_selection != 'Other':
self.text_setup_merge_prop.v_model = self.label_properties[cur_selection]
self.text_setup_merge_label.disabled = True
self.btn_save_merge.disabled = False
self.btn_delete_merge.disabled = False
else:
self.text_setup_merge_prop.v_model = ''
self.text_setup_merge_label.disabled = False
self.btn_save_merge.disabled = True
self.btn_delete_merge.disabled = True
def on_text_setup_merge_label_changed(self, widget, event, data):
if self.text_setup_merge_label.v_model and self.text_setup_merge_prop:
self.btn_save_merge.disabled = False
def on_text_setup_merge_prop_changed(self, widget, event, data):
if (self.text_setup_merge_label.v_model or self.select_setup_merge_label.v_model != 'Other') \
and self.text_setup_merge_prop:
self.btn_save_merge.disabled = False
def refresh_select_setup_merge(self):
res1 = get_label_properties(self.neo)
res2 = get_merge_on(self.neo)
if res1:
self.label_properties = {k: ','.join(i) for k, i in res1[0]['map'].items() if
not k in ['_File_', '_MergeOn_', '_Metadata_']}
if res2:
self.label_properties.update(res2[0]['map'].items())
else:
self.label_properties = {}
cur_selection = self.select_setup_merge_label.v_model
self.select_setup_merge_label.items = ['Other'] + list(self.label_properties.keys())
if cur_selection in self.select_setup_merge_label.items:
pass
else:
self.select_setup_merge_label.v_model = 'Other'
self.on_select_setup_merge_label_changed(None, None, None)
def refresh_out_setup_merge(self):
res = get_merge_on(self.neo)
self.out_setup_merge.clear_output()
if res:
self.merge_on = res[0]['map']
else:
self.merge_on = {}
with self.out_setup_merge:
self.pp.pprint(self.merge_on)
def render(self):
if self.neo:
self.refresh_col_list_files()
self.refresh_selected_files_stats()
self.refresh_select_edit_selected()
self.refresh_select_setup_merge()
self.refresh_out_setup_merge()
|
[
"src.neo_utils.get_file_content",
"ipyvuetify.Checkbox",
"src.neo_utils.get_merge_on",
"src.neo_utils.get_files_list",
"ipyvuetify.Tab",
"src.neo_utils.save_merge_on",
"src.neo_utils.delete_merge_on",
"ipyvuetify.TextField",
"ipywidgets.Output",
"src.neo_utils.get_label_properties",
"src.neo_utils.get_arrows_json",
"pprint.PrettyPrinter",
"json.dumps",
"ipyvuetify.Col",
"ipyvuetify.Autocomplete",
"ipyvuetify.Btn",
"src.neo_utils.delete_content",
"ipyvuetify.Row"
] |
[((444, 474), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {'indent': '(1)'}), '(indent=1)\n', (464, 474), False, 'import pprint\n'), ((713, 731), 'ipyvuetify.Col', 'v.Col', ([], {'children': '[]'}), '(children=[])\n', (718, 731), True, 'import ipyvuetify as v\n'), ((758, 799), 'ipyvuetify.Btn', 'v.Btn', ([], {'children': "['Delete']", 'disabled': '(True)'}), "(children=['Delete'], disabled=True)\n", (763, 799), True, 'import ipyvuetify as v\n'), ((894, 933), 'ipyvuetify.Btn', 'v.Btn', ([], {'children': "['Edit']", 'disabled': '(True)'}), "(children=['Edit'], disabled=True)\n", (899, 933), True, 'import ipyvuetify as v\n'), ((1028, 1056), 'ipyvuetify.Btn', 'v.Btn', ([], {'children': "['Edit All']"}), "(children=['Edit All'])\n", (1033, 1056), True, 'import ipyvuetify as v\n'), ((1186, 1252), 'ipyvuetify.Checkbox', 'v.Checkbox', ([], {'label': '"""Detailed Summary"""', 'v_model': '(False)', 'disabled': '(True)'}), "(label='Detailed Summary', v_model=False, disabled=True)\n", (1196, 1252), True, 'import ipyvuetify as v\n'), ((1371, 1381), 'ipywidgets.Output', 'w.Output', ([], {}), '()\n', (1379, 1381), True, 'import ipywidgets as w\n'), ((1433, 1457), 'ipyvuetify.Btn', 'v.Btn', ([], {'children': "['Edit']"}), "(children=['Edit'])\n", (1438, 1457), True, 'import ipyvuetify as v\n'), ((1579, 1631), 'ipyvuetify.Checkbox', 'v.Checkbox', ([], {'label': '"""Include Neighbors"""', 'v_model': '(False)'}), "(label='Include Neighbors', v_model=False)\n", (1589, 1631), True, 'import ipyvuetify as v\n'), ((1668, 1724), 'ipyvuetify.Autocomplete', 'v.Autocomplete', ([], {'items': '[]', 'select_first': '(True)', 'hint': '"""Edit"""'}), "(items=[], select_first=True, hint='Edit')\n", (1682, 1724), True, 'import ipyvuetify as v\n'), ((1788, 1853), 'ipyvuetify.Autocomplete', 'v.Autocomplete', ([], {'items': '[]', 'select_first': '(True)', 'label': '"""Select Label"""'}), "(items=[], select_first=True, label='Select Label')\n", (1802, 1853), True, 'import ipyvuetify as v\n'), ((1991, 2041), 'ipyvuetify.TextField', 'v.TextField', ([], {'v_model': '""""""', 'label': '"""Enter other Label"""'}), "(v_model='', label='Enter other Label')\n", (2002, 2041), True, 'import ipyvuetify as v\n'), ((2173, 2235), 'ipyvuetify.TextField', 'v.TextField', ([], {'v_model': '""""""', 'label': '"""Comma-separated property list"""'}), "(v_model='', label='Comma-separated property list')\n", (2184, 2235), True, 'import ipyvuetify as v\n'), ((2358, 2382), 'ipyvuetify.Btn', 'v.Btn', ([], {'children': "['Save']"}), "(children=['Save'])\n", (2363, 2382), True, 'import ipyvuetify as v\n'), ((2493, 2519), 'ipyvuetify.Btn', 'v.Btn', ([], {'children': "['Delete']"}), "(children=['Delete'])\n", (2498, 2519), True, 'import ipyvuetify as v\n'), ((2633, 2643), 'ipywidgets.Output', 'w.Output', ([], {}), '()\n', (2641, 2643), True, 'import ipywidgets as w\n'), ((2678, 2688), 'ipywidgets.Output', 'w.Output', ([], {}), '()\n', (2686, 2688), True, 'import ipywidgets as w\n'), ((4407, 4465), 'src.neo_utils.get_file_content', 'get_file_content', (['self.neo', 'self.list_of_selected_files[0]'], {}), '(self.neo, self.list_of_selected_files[0])\n', (4423, 4465), False, 'from src.neo_utils import delete_content, extract_jsons, get_file_content, get_files_list, get_files_stats, get_arrows_json, get_label_properties, save_merge_on, delete_merge_on, get_merge_on\n'), ((5395, 5522), 'src.neo_utils.get_arrows_json', 'get_arrows_json', ([], {'neo': 'self.neo', 'where': '"""NOT (x:_File_:_Metadata_) and NOT (x:_MergeOn_:_Metadata_)"""', 'incl_neighbors': '(True)'}), "(neo=self.neo, where=\n 'NOT (x:_File_:_Metadata_) and NOT (x:_MergeOn_:_Metadata_)',\n incl_neighbors=True)\n", (5410, 5522), False, 'from src.neo_utils import delete_content, extract_jsons, get_file_content, get_files_list, get_files_stats, get_arrows_json, get_label_properties, save_merge_on, delete_merge_on, get_merge_on\n'), ((6418, 6477), 'src.neo_utils.delete_content', 'delete_content', (['self.neo'], {'names': 'self.list_of_selected_files'}), '(self.neo, names=self.list_of_selected_files)\n', (6432, 6477), False, 'from src.neo_utils import delete_content, extract_jsons, get_file_content, get_files_list, get_files_stats, get_arrows_json, get_label_properties, save_merge_on, delete_merge_on, get_merge_on\n'), ((7550, 7574), 'src.neo_utils.get_files_list', 'get_files_list', (['self.neo'], {}), '(self.neo)\n', (7564, 7574), False, 'from src.neo_utils import delete_content, extract_jsons, get_file_content, get_files_list, get_files_stats, get_arrows_json, get_label_properties, save_merge_on, delete_merge_on, get_merge_on\n'), ((9844, 9936), 'src.neo_utils.save_merge_on', 'save_merge_on', ([], {'neo': 'self.neo', 'label': 'label', 'properties': 'self.text_setup_merge_prop.v_model'}), '(neo=self.neo, label=label, properties=self.\n text_setup_merge_prop.v_model)\n', (9857, 9936), False, 'from src.neo_utils import delete_content, extract_jsons, get_file_content, get_files_list, get_files_stats, get_arrows_json, get_label_properties, save_merge_on, delete_merge_on, get_merge_on\n'), ((10421, 10463), 'src.neo_utils.delete_merge_on', 'delete_merge_on', ([], {'neo': 'self.neo', 'label': 'label'}), '(neo=self.neo, label=label)\n', (10436, 10463), False, 'from src.neo_utils import delete_content, extract_jsons, get_file_content, get_files_list, get_files_stats, get_arrows_json, get_label_properties, save_merge_on, delete_merge_on, get_merge_on\n'), ((11717, 11747), 'src.neo_utils.get_label_properties', 'get_label_properties', (['self.neo'], {}), '(self.neo)\n', (11737, 11747), False, 'from src.neo_utils import delete_content, extract_jsons, get_file_content, get_files_list, get_files_stats, get_arrows_json, get_label_properties, save_merge_on, delete_merge_on, get_merge_on\n'), ((11763, 11785), 'src.neo_utils.get_merge_on', 'get_merge_on', (['self.neo'], {}), '(self.neo)\n', (11775, 11785), False, 'from src.neo_utils import delete_content, extract_jsons, get_file_content, get_files_list, get_files_stats, get_arrows_json, get_label_properties, save_merge_on, delete_merge_on, get_merge_on\n'), ((12553, 12575), 'src.neo_utils.get_merge_on', 'get_merge_on', (['self.neo'], {}), '(self.neo)\n', (12565, 12575), False, 'from src.neo_utils import delete_content, extract_jsons, get_file_content, get_files_list, get_files_stats, get_arrows_json, get_label_properties, save_merge_on, delete_merge_on, get_merge_on\n'), ((5613, 5631), 'json.dumps', 'json.dumps', (['res[0]'], {}), '(res[0])\n', (5623, 5631), False, 'import json\n'), ((5970, 6077), 'src.neo_utils.get_arrows_json', 'get_arrows_json', ([], {'neo': 'self.neo', 'where': 'f"""x:`{label}`"""', 'incl_neighbors': 'self.chk_include_neighbors.v_model'}), "(neo=self.neo, where=f'x:`{label}`', incl_neighbors=self.\n chk_include_neighbors.v_model)\n", (5985, 6077), False, 'from src.neo_utils import delete_content, extract_jsons, get_file_content, get_files_list, get_files_stats, get_arrows_json, get_label_properties, save_merge_on, delete_merge_on, get_merge_on\n'), ((6150, 6168), 'json.dumps', 'json.dumps', (['res[0]'], {}), '(res[0])\n', (6160, 6168), False, 'import json\n'), ((8096, 8133), 'ipyvuetify.Checkbox', 'v.Checkbox', ([], {'label': 'file', 'v_model': '(False)'}), '(label=file, v_model=False)\n', (8106, 8133), True, 'import ipyvuetify as v\n'), ((2761, 2786), 'ipyvuetify.Tab', 'v.Tab', ([], {'children': "['Files']"}), "(children=['Files'])\n", (2766, 2786), True, 'import ipyvuetify as v\n'), ((3344, 3370), 'ipyvuetify.Tab', 'v.Tab', ([], {'children': "['Labels']"}), "(children=['Labels'])\n", (3349, 3370), True, 'import ipyvuetify as v\n'), ((3652, 3686), 'ipyvuetify.Tab', 'v.Tab', ([], {'children': "['Merge_On Setup']"}), "(children=['Merge_On Setup'])\n", (3657, 3686), True, 'import ipyvuetify as v\n'), ((3429, 3500), 'ipyvuetify.Row', 'v.Row', ([], {'children': '[self.select_edit_selected, self.chk_include_neighbors]'}), '(children=[self.select_edit_selected, self.chk_include_neighbors])\n', (3434, 3500), True, 'import ipyvuetify as v\n'), ((2886, 2979), 'ipyvuetify.Col', 'v.Col', ([], {'children': '[self.col_list_files, self.btn_delete, self.btn_edit, self.btn_edit_all]'}), '(children=[self.col_list_files, self.btn_delete, self.btn_edit, self.\n btn_edit_all])\n', (2891, 2979), True, 'import ipyvuetify as v\n'), ((3139, 3200), 'ipyvuetify.Col', 'v.Col', ([], {'children': '[self.chk_detailed_summary, self.out_summary]'}), '(children=[self.chk_detailed_summary, self.out_summary])\n', (3144, 3200), True, 'import ipyvuetify as v\n'), ((3786, 3938), 'ipyvuetify.Col', 'v.Col', ([], {'children': '[self.select_setup_merge_label, self.text_setup_merge_label, self.\n text_setup_merge_prop, self.btn_save_merge, self.btn_delete_merge]'}), '(children=[self.select_setup_merge_label, self.text_setup_merge_label,\n self.text_setup_merge_prop, self.btn_save_merge, self.btn_delete_merge])\n', (3791, 3938), True, 'import ipyvuetify as v\n'), ((4127, 4165), 'ipyvuetify.Col', 'v.Col', ([], {'children': '[self.out_setup_merge]'}), '(children=[self.out_setup_merge])\n', (4132, 4165), True, 'import ipyvuetify as v\n')]
|
from django.shortcuts import render_to_response, get_object_or_404
from django.http import HttpResponse
from openpds.core.models import Profile
from pymongo import Connection
from openpds import settings
import json
import random
def dump(request):
profiles = Profile.objects.all()
data = {}
connection = Connection(
host=random.choice(getattr(settings, "MONGODB_HOST", None)),
port=getattr(settings, "MONGODB_PORT", None),
readPreference='nearest'
)
for profile in profiles:
db = connection["User_" + str(profile.id)]
funf = db["funf"]
data[profile.uuid] = funf.find()
connection.close()
return render_to_response("dataDump.csv", data)
|
[
"django.shortcuts.render_to_response",
"openpds.core.models.Profile.objects.all"
] |
[((267, 288), 'openpds.core.models.Profile.objects.all', 'Profile.objects.all', ([], {}), '()\n', (286, 288), False, 'from openpds.core.models import Profile\n'), ((691, 731), 'django.shortcuts.render_to_response', 'render_to_response', (['"""dataDump.csv"""', 'data'], {}), "('dataDump.csv', data)\n", (709, 731), False, 'from django.shortcuts import render_to_response, get_object_or_404\n')]
|
import connexion
import six
from app.models.bank_card import BankCard # noqa: E501
from app import util
def add_bank_card(body, account_id): # noqa: E501
"""Add a new bank card for an account
Add a new card for the specified account # noqa: E501
:param body: Bank card details
:type body: dict | bytes
:param account_id: Id of account
:type account_id: str
:rtype: BankCard
"""
if connexion.request.is_json:
body = BankCard.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def get_card(card_id): # noqa: E501
"""Return all bank cards for an account
Return all cards for the specified account # noqa: E501
:param card_id: Id of the card
:type card_id: str
:rtype: BankCard
"""
return 'do some magic!'
def list_bank_cards(account_id): # noqa: E501
"""Return all bank cards for an account
Return all cards for the specified account # noqa: E501
:param account_id: Id of account
:type account_id: str
:rtype: List[BankCard]
"""
return 'do some magic!'
|
[
"connexion.request.get_json"
] |
[((486, 514), 'connexion.request.get_json', 'connexion.request.get_json', ([], {}), '()\n', (512, 514), False, 'import connexion\n')]
|
# -*- coding: utf-8 -*-
"""
Created by @dtheodor at 2015-05-31
Test SQL compilation of SQL schema set and get.
"""
from sqlalchemy.dialects.postgres import dialect as pg_dialect
from sqlalchemy.dialects.oracle import dialect as oracle_dialect
from sqlalchemy.dialects.mssql import dialect as mssql_dialect
from sqlalchemy_sqlschema.sql import set_schema, get_schema
class TestDefaultSqlCompilation(object):
def test_get_schema(self):
assert str(get_schema()) == "SHOW SCHEMA"
def test_set_schema(self):
assert str(set_schema("new_schema")) == "SET SCHEMA new_schema"
class TestPostgresSqlCompilation(object):
def test_get_schema(self):
get_schema_stmt = get_schema()
assert str(get_schema_stmt.compile(dialect=pg_dialect())) == \
"SHOW search_path"
def test_set_schema(self):
set_schema_stmt = set_schema("new_schema")
assert str(set_schema_stmt.compile(dialect=pg_dialect())) == \
"SET search_path TO new_schema"
class TestOracleCompilation(object):
def test_get_schema(self):
get_schema_stmt = get_schema()
assert str(get_schema_stmt.compile(dialect=oracle_dialect())) == \
"SELECT sys_context('USERENV', 'CURRENT_SCHEMA') FROM dual"
def test_set_schema(self):
set_schema_stmt = set_schema("new_schema")
assert str(set_schema_stmt.compile(dialect=oracle_dialect())) == \
"ALTER SESSION SET CURRENT_SCHEMA = new_schema"
class TestMssqlCompilation(object):
def test_get_schema(self):
get_schema_stmt = get_schema()
assert str(get_schema_stmt.compile(dialect=mssql_dialect())) == \
"SELECT SCHEMA_NAME()"
|
[
"sqlalchemy.dialects.mssql.dialect",
"sqlalchemy_sqlschema.sql.get_schema",
"sqlalchemy.dialects.oracle.dialect",
"sqlalchemy_sqlschema.sql.set_schema",
"sqlalchemy.dialects.postgres.dialect"
] |
[((694, 706), 'sqlalchemy_sqlschema.sql.get_schema', 'get_schema', ([], {}), '()\n', (704, 706), False, 'from sqlalchemy_sqlschema.sql import set_schema, get_schema\n'), ((870, 894), 'sqlalchemy_sqlschema.sql.set_schema', 'set_schema', (['"""new_schema"""'], {}), "('new_schema')\n", (880, 894), False, 'from sqlalchemy_sqlschema.sql import set_schema, get_schema\n'), ((1108, 1120), 'sqlalchemy_sqlschema.sql.get_schema', 'get_schema', ([], {}), '()\n', (1118, 1120), False, 'from sqlalchemy_sqlschema.sql import set_schema, get_schema\n'), ((1329, 1353), 'sqlalchemy_sqlschema.sql.set_schema', 'set_schema', (['"""new_schema"""'], {}), "('new_schema')\n", (1339, 1353), False, 'from sqlalchemy_sqlschema.sql import set_schema, get_schema\n'), ((1586, 1598), 'sqlalchemy_sqlschema.sql.get_schema', 'get_schema', ([], {}), '()\n', (1596, 1598), False, 'from sqlalchemy_sqlschema.sql import set_schema, get_schema\n'), ((459, 471), 'sqlalchemy_sqlschema.sql.get_schema', 'get_schema', ([], {}), '()\n', (469, 471), False, 'from sqlalchemy_sqlschema.sql import set_schema, get_schema\n'), ((541, 565), 'sqlalchemy_sqlschema.sql.set_schema', 'set_schema', (['"""new_schema"""'], {}), "('new_schema')\n", (551, 565), False, 'from sqlalchemy_sqlschema.sql import set_schema, get_schema\n'), ((758, 770), 'sqlalchemy.dialects.postgres.dialect', 'pg_dialect', ([], {}), '()\n', (768, 770), True, 'from sqlalchemy.dialects.postgres import dialect as pg_dialect\n'), ((946, 958), 'sqlalchemy.dialects.postgres.dialect', 'pg_dialect', ([], {}), '()\n', (956, 958), True, 'from sqlalchemy.dialects.postgres import dialect as pg_dialect\n'), ((1172, 1188), 'sqlalchemy.dialects.oracle.dialect', 'oracle_dialect', ([], {}), '()\n', (1186, 1188), True, 'from sqlalchemy.dialects.oracle import dialect as oracle_dialect\n'), ((1405, 1421), 'sqlalchemy.dialects.oracle.dialect', 'oracle_dialect', ([], {}), '()\n', (1419, 1421), True, 'from sqlalchemy.dialects.oracle import dialect as oracle_dialect\n'), ((1650, 1665), 'sqlalchemy.dialects.mssql.dialect', 'mssql_dialect', ([], {}), '()\n', (1663, 1665), True, 'from sqlalchemy.dialects.mssql import dialect as mssql_dialect\n')]
|
from abc import ABC
from dataclasses import dataclass
from enum import Enum
from logging import getLogger
from typing import List, Tuple, Callable
from beamngpy import Scenario
_logger = getLogger("DriveBuild.SimNode.DBTypes.Criteria")
class KPValue(Enum):
"""
Represents the Kleene-Priest logic.
"""
TRUE = True,
FALSE = False,
UNKNOWN = None
# NOTE Do not underestimate the complexity of the implementation of these logical operators!
def __and__(self, other):
if self == self.FALSE or other == self.FALSE:
return self.FALSE
if self == self.UNKNOWN or other == self.UNKNOWN:
return self.UNKNOWN
return self.TRUE
def __or__(self, other):
if self == self.TRUE or other == self.TRUE:
return self.TRUE
if self == self.UNKNOWN or other == self.UNKNOWN:
return self.UNKNOWN
return self.FALSE
def __neg__(self):
if self == self.TRUE:
return self.FALSE
if self == self.FALSE:
return self.TRUE
return self.UNKNOWN
class Evaluable(ABC):
from abc import abstractmethod
@abstractmethod
def eval(self) -> KPValue:
"""
Evaluates to KPValue.TRUE only if the condition got triggered.
"""
pass
class UnknownEvaluable(Evaluable):
"""
A class that can be used for representing an "empty" evaluable e.g. representing an empty precondition criterion.
"""
def eval(self) -> KPValue:
return KPValue.UNKNOWN
class Criterion(Evaluable, ABC):
def __init__(self, scenario: Scenario) -> None:
self.scenario = scenario
# State conditions
# FIXME Recognize "any" participant
class StateCondition(Criterion, ABC):
"""
NOTE: A StateCondition does never call Vehicle::update_vehicle() which has to be called before every evaluation.
"""
from abc import abstractmethod
from requests import AiRequest
from beamngpy import Vehicle
from typing import Any
from drivebuildclient import static_vars
def __init__(self, scenario: Scenario, participant: str) -> None:
super().__init__(scenario)
# TODO Check existence of participant id
self.participant = participant
self.requests = self._create_requests()
for request in self.requests:
vehicle = self._get_vehicle()
request.add_sensor_to(vehicle)
# Make sure vehicle sensor_cache is not empty
if self._is_simulation_running():
scenario.bng.poll_sensors(vehicle)
def _get_vehicle(self) -> Vehicle:
return self.scenario.get_vehicle(self.participant)
def _poll_request_data(self) -> List[Any]:
request_data = []
for request in self.requests:
request_data.append(request.read_sensor_cache_of(self._get_vehicle(), self.scenario))
return request_data
@static_vars(prefix="criterion_", counter=0)
def _generate_rid(self) -> str:
while True: # Pseudo "do-while"-loop
rid = StateCondition._generate_rid.prefix + str(StateCondition._generate_rid.counter)
if rid in self._get_vehicle().sensors:
StateCondition._generate_rid.counter += 1
else:
break
return rid
def _is_simulation_running(self) -> bool:
return self.scenario.bng is not None
def eval(self) -> KPValue:
if self._is_simulation_running():
return self._eval_impl()
else:
return KPValue.UNKNOWN
@abstractmethod
def _eval_impl(self) -> KPValue:
pass
@abstractmethod
def _create_requests(self) -> List[AiRequest]:
pass
class SCPosition(StateCondition):
from requests import AiRequest
def __init__(self, scenario: Scenario, participant: str, x: float, y: float, tolerance: float):
super().__init__(scenario, participant)
if tolerance < 0:
raise ValueError("The tolerance must be non negative.")
self.x = x
self.y = y
self.tolerance = tolerance
def _create_requests(self) -> List[AiRequest]:
from requests import PositionRequest
return [PositionRequest(self._generate_rid())]
def _eval_impl(self) -> KPValue:
from numpy import array
from numpy.linalg import norm
position = self._poll_request_data()[0]
if position:
x, y = position
return KPValue.TRUE if norm(array((x, y)) - array((self.x, self.y))) <= self.tolerance else KPValue.FALSE
else:
return KPValue.UNKNOWN
class SCArea(StateCondition):
from requests import AiRequest
def __init__(self, scenario: Scenario, participant: str, points: List[Tuple[float, float]]):
from shapely.geometry import Polygon
super().__init__(scenario, participant)
self.polygon = Polygon(points)
def _create_requests(self) -> List[AiRequest]:
from requests import PositionRequest
return [PositionRequest(self._generate_rid())]
def _eval_impl(self) -> KPValue:
from shapely.geometry import Point
position = self._poll_request_data()[0]
if position:
x, y = position
return KPValue.TRUE if self.polygon.contains(Point(x, y)) else KPValue.FALSE
else:
return KPValue.UNKNOWN
class SCLane(StateCondition):
from requests import AiRequest
def __init__(self, scenario: Scenario, participant: str, lane: str):
super().__init__(scenario, participant)
# TODO Check existence of lane id
self.lane = lane
def _create_requests(self) -> List[AiRequest]:
from requests import BoundingBoxRequest
return [BoundingBoxRequest(self._generate_rid())]
def _eval_impl(self) -> KPValue:
from typing import Dict
from shapely.geometry import Polygon
bbox = self._poll_request_data()[0]
def _to_polygon(road_edges: List[Dict[str, float]]) -> Polygon:
points = [p["left"][0:2] for p in road_edges]
right_edge_points = [p["right"][0:2] for p in road_edges]
right_edge_points.reverse()
points.extend(right_edge_points)
return Polygon(shell=points)
if bbox:
if self.lane == "offroad":
is_offroad = KPValue.TRUE
for road in self.scenario.roads:
if road.rid:
edges = self.scenario.bng.get_road_edges(road.rid)
polygon = _to_polygon(edges)
if polygon.intersects(bbox):
is_offroad = KPValue.FALSE
break
else:
_logger.warning("SCLane can not consider roads without ID.")
return is_offroad
else:
for road in self.scenario.roads:
edges = self.scenario.bng.get_road_edges(road.rid)
polygon = _to_polygon(edges)
return KPValue.TRUE if polygon.intersects(bbox) else KPValue.FALSE
else:
return KPValue.UNKNOWN
class SCSpeed(StateCondition):
from requests import AiRequest
def __init__(self, scenario: Scenario, participant: str, speed_limit: float):
super().__init__(scenario, participant)
if speed_limit < 0:
raise ValueError("Speed limits must be non negative.")
self.speed_limit = speed_limit
def _create_requests(self) -> List[AiRequest]:
from requests import SpeedRequest
return [SpeedRequest(self._generate_rid())]
def _eval_impl(self) -> KPValue:
speed = self._poll_request_data()[0]
if speed:
return KPValue.TRUE if speed > self.speed_limit else KPValue.FALSE
else:
return KPValue.UNKNOWN
class SCDamage(StateCondition):
from requests import AiRequest
def __init__(self, scenario: Scenario, participant: str):
super().__init__(scenario, participant)
def _create_requests(self) -> List[AiRequest]:
from requests import DamageRequest
return [DamageRequest(self._generate_rid())]
def _eval_impl(self) -> KPValue:
damage = self._poll_request_data()[0]
if damage:
return KPValue.TRUE if damage else KPValue.FALSE
else:
return KPValue.UNKNOWN
class SCDistance(StateCondition):
from requests import AiRequest
def __init__(self, scenario: Scenario, participant: str, other_participant: str, max_distance: float):
super().__init__(scenario, participant)
if max_distance < 0:
raise ValueError("The maximum allowed distance has to be non negative.")
# TODO Check whether other_participant id exists
self.other_participant = other_participant
self.max_distance = max_distance
def _create_requests(self) -> List[AiRequest]:
from requests import PositionRequest
return [PositionRequest(self._generate_rid())]
def _eval_impl(self) -> KPValue:
from numpy import array
from numpy.linalg import norm
position1 = self._poll_request_data()[0]
# FIXME This circumvents the request mechanism...
other_vehicle = self.scenario.get_vehicle(self.other_participant)
position2 = other_vehicle["pos"] if other_vehicle else None
if position1 and position2:
x1, y1 = position1
x2, y2, _ = position2
return KPValue.FALSE if norm(array((x1, y1)) - array((x2, y2))) > self.max_distance else KPValue.TRUE
else:
return KPValue.UNKNOWN
class SCLight(StateCondition):
from dbtypes.scheme import CarLight
from requests import AiRequest
def __init__(self, scenario: Scenario, participant: str, light: CarLight):
super().__init__(scenario, participant)
self.light = light
def _create_requests(self) -> List[AiRequest]:
from requests import LightRequest
return [LightRequest(self._generate_rid())]
def _eval_impl(self) -> KPValue:
# FIXME Implement light criterion
print(self._poll_request_data()[0])
return KPValue.UNKNOWN
class SCWaypoint(StateCondition):
from requests import AiRequest
def __init__(self, scenario: Scenario, participant: str, waypoint: str):
super().__init__(scenario, participant)
# TODO Check whether waypoint id exists
self.waypoint = waypoint
def _create_requests(self) -> List[AiRequest]:
return []
def _eval_impl(self) -> KPValue:
# FIXME Implement waypoint criterion
return KPValue.UNKNOWN
# Validation constraints
class ValidationConstraint(Criterion, ABC):
from abc import abstractmethod
def __init__(self, scenario: Scenario, inner: Evaluable) -> None:
super().__init__(scenario)
self.inner = inner
def eval(self) -> KPValue:
# FIXME How to distinguish VCs that got ignored from ones that could not be determined?
return self.inner.eval() if self.eval_cond() == KPValue.TRUE else KPValue.UNKNOWN
@abstractmethod
def eval_cond(self) -> KPValue:
pass
class ValidationConstraintSC(ValidationConstraint, ABC):
def __init__(self, scenario: Scenario, inner: Evaluable, sc: StateCondition):
super().__init__(scenario, inner)
self.sc = sc
def eval_cond(self) -> KPValue:
return self.sc.eval()
class VCPosition(ValidationConstraintSC):
def __init__(self, scenario: Scenario, inner: Evaluable, sc: SCPosition):
super().__init__(scenario, inner, sc)
class VCArea(ValidationConstraintSC):
def __init__(self, scenario: Scenario, inner: Evaluable, sc: SCArea):
super().__init__(scenario, inner, sc)
class VCLane(ValidationConstraintSC):
def __init__(self, scenario: Scenario, inner: Evaluable, sc: SCLane):
super().__init__(scenario, inner, sc)
class VCSpeed(ValidationConstraintSC):
def __init__(self, scenario: Scenario, inner: Evaluable, sc: SCSpeed):
super().__init__(scenario, inner, sc)
class VCDamage(ValidationConstraintSC):
def __init__(self, scenario: Scenario, inner: Evaluable, sc: SCDamage):
super().__init__(scenario, inner, sc)
class VCTime(ValidationConstraint):
def __init__(self, scenario: Scenario, inner: Evaluable, from_tick: int, to_tick: int):
# FIXME from_step/to_step inclusive/exclusive?
super().__init__(scenario, inner)
self.from_tick = from_tick
self.to_tick = to_tick
def eval_cond(self) -> KPValue:
from dbtypes.beamngpy import DBBeamNGpy
from warnings import warn
bng = self.scenario.bng
if bng and type(bng) is DBBeamNGpy:
# FIXME from_step/to_step inclusive/exclusive?
return KPValue.TRUE if self.from_tick <= bng.current_tick <= self.to_tick else KPValue.FALSE
else:
warn("The underlying BeamNGpy instance does not provide time information.")
class VCDistance(ValidationConstraintSC):
def __init__(self, scenario: Scenario, inner: Evaluable, sc: SCDistance):
super().__init__(scenario, inner, sc)
class VCTTC(ValidationConstraint):
from beamngpy import Scenario
def __init__(self, scenario: Scenario, inner: Evaluable):
super().__init__(scenario, inner)
def eval_cond(self) -> KPValue:
# TODO Determine collision to which participant/obstacle
# FIXME Position is in center of car vs crash when colliding with its bounding box
return KPValue.UNKNOWN
class VCLight(ValidationConstraintSC):
def __init__(self, scenario: Scenario, inner: Evaluable, sc: SCLight):
super().__init__(scenario, inner, sc)
class VCWaypoint(ValidationConstraintSC):
from beamngpy import Scenario
def __init__(self, scenario: Scenario, inner: Evaluable, sc: SCWaypoint):
super().__init__(scenario, inner, sc)
# Connectives
class Connective(Evaluable, ABC):
pass
class BinaryConnective(Connective, ABC):
def __init__(self, evaluables: List[Evaluable]) -> None:
self.evaluables = evaluables
class And(BinaryConnective):
def eval(self) -> KPValue:
return KPValue.TRUE if all(map(lambda e: e.eval() is KPValue.TRUE, self.evaluables)) else KPValue.FALSE
class Or(BinaryConnective):
def eval(self) -> KPValue:
return KPValue.TRUE if any(map(lambda e: e.eval() is KPValue.TRUE, self.evaluables)) else KPValue.FALSE
class Not(Connective):
def __init__(self, evaluable: Evaluable) -> None:
self.evaluable = evaluable
def eval(self) -> KPValue:
return self.evaluable.eval().__neg__()
CriteriaFunction = Callable[[Scenario], Evaluable]
# Test case type
@dataclass
class TestCase:
from generator import ScenarioBuilder
name: str
scenario: ScenarioBuilder
precondition_fct: CriteriaFunction
success_fct: CriteriaFunction
failure_fct: CriteriaFunction
stepsPerSecond: int
aiFrequency: int
authors: List[str]
|
[
"shapely.geometry.Point",
"shapely.geometry.Polygon",
"numpy.array",
"warnings.warn",
"drivebuildclient.static_vars",
"logging.getLogger"
] |
[((189, 237), 'logging.getLogger', 'getLogger', (['"""DriveBuild.SimNode.DBTypes.Criteria"""'], {}), "('DriveBuild.SimNode.DBTypes.Criteria')\n", (198, 237), False, 'from logging import getLogger\n'), ((2936, 2979), 'drivebuildclient.static_vars', 'static_vars', ([], {'prefix': '"""criterion_"""', 'counter': '(0)'}), "(prefix='criterion_', counter=0)\n", (2947, 2979), False, 'from drivebuildclient import static_vars\n'), ((4928, 4943), 'shapely.geometry.Polygon', 'Polygon', (['points'], {}), '(points)\n', (4935, 4943), False, 'from shapely.geometry import Polygon\n'), ((6290, 6311), 'shapely.geometry.Polygon', 'Polygon', ([], {'shell': 'points'}), '(shell=points)\n', (6297, 6311), False, 'from shapely.geometry import Polygon\n'), ((13059, 13134), 'warnings.warn', 'warn', (['"""The underlying BeamNGpy instance does not provide time information."""'], {}), "('The underlying BeamNGpy instance does not provide time information.')\n", (13063, 13134), False, 'from warnings import warn\n'), ((5331, 5342), 'shapely.geometry.Point', 'Point', (['x', 'y'], {}), '(x, y)\n', (5336, 5342), False, 'from shapely.geometry import Point\n'), ((4520, 4533), 'numpy.array', 'array', (['(x, y)'], {}), '((x, y))\n', (4525, 4533), False, 'from numpy import array\n'), ((4536, 4559), 'numpy.array', 'array', (['(self.x, self.y)'], {}), '((self.x, self.y))\n', (4541, 4559), False, 'from numpy import array\n'), ((9621, 9636), 'numpy.array', 'array', (['(x1, y1)'], {}), '((x1, y1))\n', (9626, 9636), False, 'from numpy import array\n'), ((9639, 9654), 'numpy.array', 'array', (['(x2, y2)'], {}), '((x2, y2))\n', (9644, 9654), False, 'from numpy import array\n')]
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Helper functions for issue template servlets"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import collections
import logging
from framework import authdata
from framework import exceptions
from framework import framework_bizobj
from framework import framework_helpers
from tracker import field_helpers
from tracker import tracker_bizobj
from tracker import tracker_constants
from tracker import tracker_helpers
from proto import tracker_pb2
MAX_NUM_PHASES = 6
PHASE_INPUTS = [
'phase_0', 'phase_1', 'phase_2', 'phase_3', 'phase_4', 'phase_5']
_NO_PHASE_VALUE = 'no_phase'
ParsedTemplate = collections.namedtuple(
'ParsedTemplate', 'name, members_only, summary, summary_must_be_edited, '
'content, status, owner_str, labels, field_val_strs, component_paths, '
'component_required, owner_defaults_to_member, admin_str, add_approvals, '
'phase_names, approvals_to_phase_idx, required_approval_ids')
def ParseTemplateRequest(post_data, config):
"""Parse an issue template."""
name = post_data.get('name', '')
members_only = (post_data.get('members_only') == 'on')
summary = post_data.get('summary', '')
summary_must_be_edited = (
post_data.get('summary_must_be_edited') == 'on')
content = post_data.get('content', '')
content = framework_helpers.WordWrapSuperLongLines(content, max_cols=75)
status = post_data.get('status', '')
owner_str = post_data.get('owner', '')
labels = post_data.getall('label')
field_val_strs = collections.defaultdict(list)
for fd in config.field_defs:
field_value_key = 'custom_%d' % fd.field_id
if post_data.get(field_value_key):
field_val_strs[fd.field_id].append(post_data[field_value_key])
component_paths = []
if post_data.get('components'):
for component_path in post_data.get('components').split(','):
if component_path.strip() not in component_paths:
component_paths.append(component_path.strip())
component_required = post_data.get('component_required') == 'on'
owner_defaults_to_member = post_data.get('owner_defaults_to_member') == 'on'
admin_str = post_data.get('admin_names', '')
add_approvals = post_data.get('add_approvals') == 'on'
phase_names = [post_data.get(phase_input, '') for phase_input in PHASE_INPUTS]
required_approval_ids = []
approvals_to_phase_idx = {}
for approval_def in config.approval_defs:
phase_num = post_data.get('approval_%d' % approval_def.approval_id, '')
if phase_num == _NO_PHASE_VALUE:
approvals_to_phase_idx[approval_def.approval_id] = None
else:
try:
idx = PHASE_INPUTS.index(phase_num)
approvals_to_phase_idx[approval_def.approval_id] = idx
except ValueError:
logging.info('approval %d was omitted' % approval_def.approval_id)
required_name = 'approval_%d_required' % approval_def.approval_id
if (post_data.get(required_name) == 'on'):
required_approval_ids.append(approval_def.approval_id)
return ParsedTemplate(
name, members_only, summary, summary_must_be_edited, content, status,
owner_str, labels, field_val_strs, component_paths, component_required,
owner_defaults_to_member, admin_str, add_approvals, phase_names,
approvals_to_phase_idx, required_approval_ids)
def GetTemplateInfoFromParsed(mr, services, parsed, config):
"""Get Template field info and PBs from a ParsedTemplate."""
admin_ids, _ = tracker_helpers.ParsePostDataUsers(
mr.cnxn, parsed.admin_str, services.user)
owner_id = 0
if parsed.owner_str:
try:
user_id = services.user.LookupUserID(mr.cnxn, parsed.owner_str)
auth = authdata.AuthData.FromUserID(mr.cnxn, user_id, services)
if framework_bizobj.UserIsInProject(mr.project, auth.effective_ids):
owner_id = user_id
else:
mr.errors.owner = 'User is not a member of this project.'
except exceptions.NoSuchUserException:
mr.errors.owner = 'Owner not found.'
component_ids = tracker_helpers.LookupComponentIDs(
parsed.component_paths, config, mr.errors)
# TODO(jojwang): monorail:4678 Process phase field values.
phase_field_val_strs = {}
field_values = field_helpers.ParseFieldValues(
mr.cnxn, services.user, parsed.field_val_strs,
phase_field_val_strs, config)
for fv in field_values:
logging.info('field_value is %r: %r',
fv.field_id, tracker_bizobj.GetFieldValue(fv, {}))
phases = []
approvals = []
if parsed.add_approvals:
phases, approvals = _GetPhasesAndApprovalsFromParsed(
mr, parsed.phase_names, parsed.approvals_to_phase_idx,
parsed.required_approval_ids)
return admin_ids, owner_id, component_ids, field_values, phases, approvals
def _GetPhasesAndApprovalsFromParsed(
mr, phase_names, approvals_to_phase_idx, required_approval_ids):
"""Get Phase PBs from a parsed phase_names and approvals_by_phase_idx."""
phases = []
approvals = []
valid_phase_names = []
for name in phase_names:
if name:
if not tracker_constants.PHASE_NAME_RE.match(name):
mr.errors.phase_approvals = 'Invalid gate name(s).'
return phases, approvals
valid_phase_names.append(name)
if len(valid_phase_names) != len(
set(name.lower() for name in valid_phase_names)):
mr.errors.phase_approvals = 'Duplicate gate names.'
return phases, approvals
valid_phase_idxs = [idx for idx, name in enumerate(phase_names) if name]
if set(valid_phase_idxs) != set([
idx for idx in approvals_to_phase_idx.values() if idx is not None]):
mr.errors.phase_approvals = 'Defined gates must have assigned approvals.'
return phases, approvals
# Distributing the ranks over a wider range is not necessary since
# any edits to template phases will cause a complete rewrite.
# phase_id is temporarily the idx for keeping track of which approvals
# belong to which phases.
for idx, phase_name in enumerate(phase_names):
if phase_name:
phase = tracker_pb2.Phase(name=phase_name, rank=idx, phase_id=idx)
phases.append(phase)
for approval_id, phase_idx in approvals_to_phase_idx.items():
av = tracker_pb2.ApprovalValue(
approval_id=approval_id, phase_id=phase_idx)
if approval_id in required_approval_ids:
av.status = tracker_pb2.ApprovalStatus.NEEDS_REVIEW
approvals.append(av)
return phases, approvals
def FilterApprovalsAndPhases(approval_values, phases, config):
"""Return lists without deleted approvals and empty phases."""
deleted_approval_ids = [fd.field_id for fd in config.field_defs if
fd.is_deleted and
fd.field_type is tracker_pb2.FieldTypes.APPROVAL_TYPE]
filtered_avs = [av for av in approval_values if
av.approval_id not in deleted_approval_ids]
av_phase_ids = list(set([av.phase_id for av in filtered_avs]))
filtered_phases = [phase for phase in phases if
phase.phase_id in av_phase_ids]
return filtered_avs, filtered_phases
def GatherApprovalsPageData(approval_values, tmpl_phases, config):
"""Create the page data necessary for filling in the launch-gates-table."""
filtered_avs, filtered_phases = FilterApprovalsAndPhases(
approval_values, tmpl_phases, config)
filtered_phases.sort(key=lambda phase: phase.rank)
required_approval_ids = []
prechecked_approvals = []
phase_idx_by_id = {
phase.phase_id:idx for idx, phase in enumerate(filtered_phases)}
for av in filtered_avs:
# approval is part of a phase and that phase can be found.
if phase_idx_by_id.get(av.phase_id) is not None:
idx = phase_idx_by_id.get(av.phase_id)
prechecked_approvals.append(
'%d_phase_%d' % (av.approval_id, idx))
else:
prechecked_approvals.append('%d' % av.approval_id)
if av.status is tracker_pb2.ApprovalStatus.NEEDS_REVIEW:
required_approval_ids.append(av.approval_id)
num_phases = len(filtered_phases)
filtered_phases.extend([tracker_pb2.Phase()] * (
MAX_NUM_PHASES - num_phases))
return prechecked_approvals, required_approval_ids, filtered_phases
def GetCheckedApprovalsFromParsed(approvals_to_phase_idx):
checked_approvals = []
for approval_id, phs_idx in approvals_to_phase_idx.items():
if phs_idx is not None:
checked_approvals.append('%d_phase_%d' % (approval_id, phs_idx))
else:
checked_approvals.append('%d' % approval_id)
return checked_approvals
def GetIssueFromTemplate(template, project_id, reporter_id):
# type: (proto.tracker_pb2.TemplateDef, int, int) ->
# proto.tracker_pb2.Issue
"""Build a templated issue from TemplateDef.
Args:
template: Template that issue creation is based on.
project_id: ID of the Project the template belongs to.
reporter_id: Requesting user's ID.
Returns:
protorpc Issue filled with data from given `template`.
"""
owner_id = None
if template.owner_id:
owner_id = template.owner_id
elif template.owner_defaults_to_member:
owner_id = reporter_id
issue = tracker_pb2.Issue(
project_id=project_id,
summary=template.summary,
status=template.status,
owner_id=owner_id,
labels=template.labels,
component_ids=template.component_ids,
reporter_id=reporter_id,
field_values=template.field_values,
phases=template.phases,
approval_values=template.approval_values)
return issue
|
[
"proto.tracker_pb2.ApprovalValue",
"framework.framework_bizobj.UserIsInProject",
"tracker.tracker_bizobj.GetFieldValue",
"proto.tracker_pb2.Issue",
"framework.authdata.AuthData.FromUserID",
"collections.defaultdict",
"logging.info",
"tracker.field_helpers.ParseFieldValues",
"collections.namedtuple",
"tracker.tracker_helpers.LookupComponentIDs",
"framework.framework_helpers.WordWrapSuperLongLines",
"tracker.tracker_helpers.ParsePostDataUsers",
"proto.tracker_pb2.Phase",
"tracker.tracker_constants.PHASE_NAME_RE.match"
] |
[((893, 1198), 'collections.namedtuple', 'collections.namedtuple', (['"""ParsedTemplate"""', '"""name, members_only, summary, summary_must_be_edited, content, status, owner_str, labels, field_val_strs, component_paths, component_required, owner_defaults_to_member, admin_str, add_approvals, phase_names, approvals_to_phase_idx, required_approval_ids"""'], {}), "('ParsedTemplate',\n 'name, members_only, summary, summary_must_be_edited, content, status, owner_str, labels, field_val_strs, component_paths, component_required, owner_defaults_to_member, admin_str, add_approvals, phase_names, approvals_to_phase_idx, required_approval_ids'\n )\n", (915, 1198), False, 'import collections\n'), ((1567, 1629), 'framework.framework_helpers.WordWrapSuperLongLines', 'framework_helpers.WordWrapSuperLongLines', (['content'], {'max_cols': '(75)'}), '(content, max_cols=75)\n', (1607, 1629), False, 'from framework import framework_helpers\n'), ((1766, 1795), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (1789, 1795), False, 'import collections\n'), ((3686, 3762), 'tracker.tracker_helpers.ParsePostDataUsers', 'tracker_helpers.ParsePostDataUsers', (['mr.cnxn', 'parsed.admin_str', 'services.user'], {}), '(mr.cnxn, parsed.admin_str, services.user)\n', (3720, 3762), False, 'from tracker import tracker_helpers\n'), ((4243, 4320), 'tracker.tracker_helpers.LookupComponentIDs', 'tracker_helpers.LookupComponentIDs', (['parsed.component_paths', 'config', 'mr.errors'], {}), '(parsed.component_paths, config, mr.errors)\n', (4277, 4320), False, 'from tracker import tracker_helpers\n'), ((4435, 4547), 'tracker.field_helpers.ParseFieldValues', 'field_helpers.ParseFieldValues', (['mr.cnxn', 'services.user', 'parsed.field_val_strs', 'phase_field_val_strs', 'config'], {}), '(mr.cnxn, services.user, parsed.\n field_val_strs, phase_field_val_strs, config)\n', (4465, 4547), False, 'from tracker import field_helpers\n'), ((9318, 9633), 'proto.tracker_pb2.Issue', 'tracker_pb2.Issue', ([], {'project_id': 'project_id', 'summary': 'template.summary', 'status': 'template.status', 'owner_id': 'owner_id', 'labels': 'template.labels', 'component_ids': 'template.component_ids', 'reporter_id': 'reporter_id', 'field_values': 'template.field_values', 'phases': 'template.phases', 'approval_values': 'template.approval_values'}), '(project_id=project_id, summary=template.summary, status=\n template.status, owner_id=owner_id, labels=template.labels,\n component_ids=template.component_ids, reporter_id=reporter_id,\n field_values=template.field_values, phases=template.phases,\n approval_values=template.approval_values)\n', (9335, 9633), False, 'from proto import tracker_pb2\n'), ((6406, 6476), 'proto.tracker_pb2.ApprovalValue', 'tracker_pb2.ApprovalValue', ([], {'approval_id': 'approval_id', 'phase_id': 'phase_idx'}), '(approval_id=approval_id, phase_id=phase_idx)\n', (6431, 6476), False, 'from proto import tracker_pb2\n'), ((3901, 3957), 'framework.authdata.AuthData.FromUserID', 'authdata.AuthData.FromUserID', (['mr.cnxn', 'user_id', 'services'], {}), '(mr.cnxn, user_id, services)\n', (3929, 3957), False, 'from framework import authdata\n'), ((3967, 4031), 'framework.framework_bizobj.UserIsInProject', 'framework_bizobj.UserIsInProject', (['mr.project', 'auth.effective_ids'], {}), '(mr.project, auth.effective_ids)\n', (3999, 4031), False, 'from framework import framework_bizobj\n'), ((4654, 4690), 'tracker.tracker_bizobj.GetFieldValue', 'tracker_bizobj.GetFieldValue', (['fv', '{}'], {}), '(fv, {})\n', (4682, 4690), False, 'from tracker import tracker_bizobj\n'), ((6246, 6304), 'proto.tracker_pb2.Phase', 'tracker_pb2.Phase', ([], {'name': 'phase_name', 'rank': 'idx', 'phase_id': 'idx'}), '(name=phase_name, rank=idx, phase_id=idx)\n', (6263, 6304), False, 'from proto import tracker_pb2\n'), ((5284, 5327), 'tracker.tracker_constants.PHASE_NAME_RE.match', 'tracker_constants.PHASE_NAME_RE.match', (['name'], {}), '(name)\n', (5321, 5327), False, 'from tracker import tracker_constants\n'), ((8260, 8279), 'proto.tracker_pb2.Phase', 'tracker_pb2.Phase', ([], {}), '()\n', (8277, 8279), False, 'from proto import tracker_pb2\n'), ((2993, 3059), 'logging.info', 'logging.info', (["('approval %d was omitted' % approval_def.approval_id)"], {}), "('approval %d was omitted' % approval_def.approval_id)\n", (3005, 3059), False, 'import logging\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from logging import NullHandler, getLogger
from six import text_type
logger = getLogger(__name__)
logger.addHandler(NullHandler())
class Reportable(object):
"""Reportable abstract class."""
def __init__(self, name, description=None, reportable=True):
"""Constructor."""
self.name = name
self._description = description
self.reportable = reportable
@property
def description(self):
"""Rule description."""
return self._description or self.name
def report(self, value, context):
"""Report unknown value."""
if not value or not self.reportable:
return
value = text_type(value)
if 'report' in context:
report_map = context['report'].setdefault(self.description, {})
if value not in report_map:
report_map[value] = context['path']
logger.info('Invalid %s: %r', self.description, value)
|
[
"six.text_type",
"logging.getLogger",
"logging.NullHandler"
] |
[((145, 164), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (154, 164), False, 'from logging import NullHandler, getLogger\n'), ((183, 196), 'logging.NullHandler', 'NullHandler', ([], {}), '()\n', (194, 196), False, 'from logging import NullHandler, getLogger\n'), ((734, 750), 'six.text_type', 'text_type', (['value'], {}), '(value)\n', (743, 750), False, 'from six import text_type\n')]
|
from django.contrib.auth.models import User
from django.db import models
from datetime import datetime, timedelta
TARGETS = (
('ACA', 'Academic'),
('GAM', 'Gaming'),
('SCL', 'Social'),
('SCT', 'Society'),
)
COMMS_TYPE = (
('NL', 'Newsletter'),
('M', 'Minute'),
('N', 'News Item'),
)
STATUS = (
('RE', 'Requested'),
('PR', 'Present'),
('DD', 'Disabled'),
)
# The following models are copied from the previous compsoc website (Django Reinhardt)
class Communication(models.Model):
title = models.CharField(max_length=100)
date = models.DateField()
text = models.TextField()
type = models.CharField(max_length=2, choices=COMMS_TYPE)
class Meta:
db_table = 'comms_communication'
class OldEventType(models.Model):
name = models.CharField(max_length=20)
info = models.TextField()
target = models.CharField(max_length=3, choices=TARGETS)
class Meta:
db_table = 'events_eventtype'
class OldLocation(models.Model):
name = models.CharField(max_length=60)
description = models.TextField()
image_url = models.CharField(max_length=255, default="/static/img/no_location.png")
map_loc = models.CharField(max_length=30, blank=True)
class Meta:
db_table = 'events_location'
class OldEvent(models.Model):
"""
Represents a single event
"""
# I'm never using camel case for model fields again :p
type = models.ForeignKey(OldEventType)
shortDescription = models.CharField(max_length=255, verbose_name="Short Description",
help_text="This text is displayed on the events index.")
location = models.ForeignKey(OldLocation)
longDescription = models.TextField(verbose_name="Long Description",
help_text="This text is displayed on the details page for this event.")
start = models.DateTimeField(default=datetime.now)
finish = models.DateTimeField(default=lambda: datetime.now() + timedelta(hours=1))
displayFrom = models.DateTimeField(default=datetime.now, verbose_name="Display From",
help_text="This controls when the event will be visible in the index and feeds.")
cancelled = models.BooleanField()
class Meta:
db_table = 'events_event'
class OldEventSignup(models.Model):
"""
This represents the signup options for a particular event,
e.g Signup limits and time constraints
This might be renamed to EventSignupOptions
"""
event = models.OneToOneField(OldEvent)
signupsLimit = models.IntegerField(verbose_name="Signups Limit", help_text="0 here implies unlimited signups.")
open = models.DateTimeField()
close = models.DateTimeField()
fresher_open = models.DateTimeField(
help_text="This allows you to control whether freshers can sign up earlier or later than regular members.")
guest_open = models.DateTimeField(
help_text="This allows you to control whether guests can sign up earlier or later than regular members.")
# this might be renamed to seating_plan for clarity
class Meta:
db_table = 'events_eventsignup'
class Signup(models.Model):
event = models.ForeignKey(OldEvent)
time = models.DateTimeField()
user = models.ForeignKey(User)
comment = models.TextField(blank=True)
class Meta:
db_table = 'events_signup'
class Member(models.Model):
"""
Used to store auxiliary data to the default profile data for
a django User.
"""
user = models.OneToOneField(User)
showDetails = models.BooleanField()
guest = models.BooleanField()
# Optional info about one's website
class WebsiteDetails(models.Model):
user = models.OneToOneField(User)
websiteUrl = models.CharField(max_length=50)
websiteTitle = models.CharField(max_length=50)
class Meta:
db_table = 'memberinfo_websitedetails'
class NicknameDetails(models.Model):
user = models.OneToOneField(User)
nickname = models.CharField(max_length=20)
class Meta:
db_table = 'memberinfo_nicknamedetails'
class OldShellAccount(models.Model):
user = models.OneToOneField(User)
name = models.CharField(max_length=30)
status = models.CharField(max_length=2, choices=STATUS)
class Meta:
db_table = 'memberinfo_shellaccount'
class OldDatabaseAccount(models.Model):
user = models.OneToOneField(User)
name = models.CharField(max_length=30)
status = models.CharField(max_length=2, choices=STATUS)
class Meta:
db_table = 'memberinfo_databaseaccount'
class OldExecPosition(models.Model):
"""
Represents an exec position
"""
title = models.CharField(max_length=30)
class Meta:
db_table = 'memberinfo_execposition'
class OldExecPlacement(models.Model):
"""
Represents a time period of working on the exec
"""
position = models.ForeignKey(OldExecPosition)
user = models.ForeignKey(User)
start = models.DateField()
end = models.DateField()
class Meta:
db_table = 'memberinfo_execplacement'
|
[
"django.db.models.TextField",
"django.db.models.OneToOneField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.models.IntegerField",
"datetime.timedelta",
"django.db.models.DateField",
"django.db.models.DateTimeField",
"datetime.datetime.now"
] |
[((536, 568), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (552, 568), False, 'from django.db import models\n'), ((580, 598), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (596, 598), False, 'from django.db import models\n'), ((610, 628), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (626, 628), False, 'from django.db import models\n'), ((640, 690), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(2)', 'choices': 'COMMS_TYPE'}), '(max_length=2, choices=COMMS_TYPE)\n', (656, 690), False, 'from django.db import models\n'), ((796, 827), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (812, 827), False, 'from django.db import models\n'), ((839, 857), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (855, 857), False, 'from django.db import models\n'), ((871, 918), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(3)', 'choices': 'TARGETS'}), '(max_length=3, choices=TARGETS)\n', (887, 918), False, 'from django.db import models\n'), ((1020, 1051), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(60)'}), '(max_length=60)\n', (1036, 1051), False, 'from django.db import models\n'), ((1070, 1088), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1086, 1088), False, 'from django.db import models\n'), ((1105, 1176), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'default': '"""/static/img/no_location.png"""'}), "(max_length=255, default='/static/img/no_location.png')\n", (1121, 1176), False, 'from django.db import models\n'), ((1191, 1234), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'blank': '(True)'}), '(max_length=30, blank=True)\n', (1207, 1234), False, 'from django.db import models\n'), ((1437, 1468), 'django.db.models.ForeignKey', 'models.ForeignKey', (['OldEventType'], {}), '(OldEventType)\n', (1454, 1468), False, 'from django.db import models\n'), ((1492, 1619), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'verbose_name': '"""Short Description"""', 'help_text': '"""This text is displayed on the events index."""'}), "(max_length=255, verbose_name='Short Description',\n help_text='This text is displayed on the events index.')\n", (1508, 1619), False, 'from django.db import models\n'), ((1671, 1701), 'django.db.models.ForeignKey', 'models.ForeignKey', (['OldLocation'], {}), '(OldLocation)\n', (1688, 1701), False, 'from django.db import models\n'), ((1724, 1850), 'django.db.models.TextField', 'models.TextField', ([], {'verbose_name': '"""Long Description"""', 'help_text': '"""This text is displayed on the details page for this event."""'}), "(verbose_name='Long Description', help_text=\n 'This text is displayed on the details page for this event.')\n", (1740, 1850), False, 'from django.db import models\n'), ((1897, 1939), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'datetime.now'}), '(default=datetime.now)\n', (1917, 1939), False, 'from django.db import models\n'), ((2045, 2207), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'datetime.now', 'verbose_name': '"""Display From"""', 'help_text': '"""This controls when the event will be visible in the index and feeds."""'}), "(default=datetime.now, verbose_name='Display From',\n help_text=\n 'This controls when the event will be visible in the index and feeds.')\n", (2065, 2207), False, 'from django.db import models\n'), ((2254, 2275), 'django.db.models.BooleanField', 'models.BooleanField', ([], {}), '()\n', (2273, 2275), False, 'from django.db import models\n'), ((2547, 2577), 'django.db.models.OneToOneField', 'models.OneToOneField', (['OldEvent'], {}), '(OldEvent)\n', (2567, 2577), False, 'from django.db import models\n'), ((2597, 2698), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'verbose_name': '"""Signups Limit"""', 'help_text': '"""0 here implies unlimited signups."""'}), "(verbose_name='Signups Limit', help_text=\n '0 here implies unlimited signups.')\n", (2616, 2698), False, 'from django.db import models\n'), ((2705, 2727), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (2725, 2727), False, 'from django.db import models\n'), ((2740, 2762), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (2760, 2762), False, 'from django.db import models\n'), ((2782, 2920), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'help_text': '"""This allows you to control whether freshers can sign up earlier or later than regular members."""'}), "(help_text=\n 'This allows you to control whether freshers can sign up earlier or later than regular members.'\n )\n", (2802, 2920), False, 'from django.db import models\n'), ((2937, 3073), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'help_text': '"""This allows you to control whether guests can sign up earlier or later than regular members."""'}), "(help_text=\n 'This allows you to control whether guests can sign up earlier or later than regular members.'\n )\n", (2957, 3073), False, 'from django.db import models\n'), ((3229, 3256), 'django.db.models.ForeignKey', 'models.ForeignKey', (['OldEvent'], {}), '(OldEvent)\n', (3246, 3256), False, 'from django.db import models\n'), ((3268, 3290), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (3288, 3290), False, 'from django.db import models\n'), ((3302, 3325), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {}), '(User)\n', (3319, 3325), False, 'from django.db import models\n'), ((3340, 3368), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (3356, 3368), False, 'from django.db import models\n'), ((3562, 3588), 'django.db.models.OneToOneField', 'models.OneToOneField', (['User'], {}), '(User)\n', (3582, 3588), False, 'from django.db import models\n'), ((3607, 3628), 'django.db.models.BooleanField', 'models.BooleanField', ([], {}), '()\n', (3626, 3628), False, 'from django.db import models\n'), ((3641, 3662), 'django.db.models.BooleanField', 'models.BooleanField', ([], {}), '()\n', (3660, 3662), False, 'from django.db import models\n'), ((3748, 3774), 'django.db.models.OneToOneField', 'models.OneToOneField', (['User'], {}), '(User)\n', (3768, 3774), False, 'from django.db import models\n'), ((3792, 3823), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (3808, 3823), False, 'from django.db import models\n'), ((3843, 3874), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (3859, 3874), False, 'from django.db import models\n'), ((3989, 4015), 'django.db.models.OneToOneField', 'models.OneToOneField', (['User'], {}), '(User)\n', (4009, 4015), False, 'from django.db import models\n'), ((4031, 4062), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (4047, 4062), False, 'from django.db import models\n'), ((4178, 4204), 'django.db.models.OneToOneField', 'models.OneToOneField', (['User'], {}), '(User)\n', (4198, 4204), False, 'from django.db import models\n'), ((4216, 4247), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (4232, 4247), False, 'from django.db import models\n'), ((4261, 4307), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(2)', 'choices': 'STATUS'}), '(max_length=2, choices=STATUS)\n', (4277, 4307), False, 'from django.db import models\n'), ((4423, 4449), 'django.db.models.OneToOneField', 'models.OneToOneField', (['User'], {}), '(User)\n', (4443, 4449), False, 'from django.db import models\n'), ((4461, 4492), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (4477, 4492), False, 'from django.db import models\n'), ((4506, 4552), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(2)', 'choices': 'STATUS'}), '(max_length=2, choices=STATUS)\n', (4522, 4552), False, 'from django.db import models\n'), ((4717, 4748), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (4733, 4748), False, 'from django.db import models\n'), ((4934, 4968), 'django.db.models.ForeignKey', 'models.ForeignKey', (['OldExecPosition'], {}), '(OldExecPosition)\n', (4951, 4968), False, 'from django.db import models\n'), ((4980, 5003), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {}), '(User)\n', (4997, 5003), False, 'from django.db import models\n'), ((5016, 5034), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (5032, 5034), False, 'from django.db import models\n'), ((5045, 5063), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (5061, 5063), False, 'from django.db import models\n'), ((1990, 2004), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2002, 2004), False, 'from datetime import datetime, timedelta\n'), ((2007, 2025), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (2016, 2025), False, 'from datetime import datetime, timedelta\n')]
|
from suds.client import Client
from suds import WebFault
class SoapHelper:
def __init__(self, app):
self.app = app
def can_login(self, username, password):
client = Client("http://localhost/mantisbt-2.25.2/api/soap/mantisconnect.php?wsdl")
try:
client.service.mc_login(username, password)
return True
except WebFault:
return False
def get_projects(self, username, password, pname):
client = Client("http://localhost/mantisbt-2.25.2/api/soap/mantisconnect.php?wsdl")
try:
p = client.service.mc_project_get_id_from_name(username=username, password=password, project_name=str(pname))
assert p>0
except:
assert False
def get_projects_del(self, username, password, pname):
client = Client("http://localhost/mantisbt-2.25.2/api/soap/mantisconnect.php?wsdl")
try:
p = client.service.mc_project_get_id_from_name(username=username, password=password, project_name=str(pname))
assert client.service.mc_project_get_id_from_name(username=username, password=password, project_name=str(pname)) is False
except:
assert False
|
[
"suds.client.Client"
] |
[((193, 267), 'suds.client.Client', 'Client', (['"""http://localhost/mantisbt-2.25.2/api/soap/mantisconnect.php?wsdl"""'], {}), "('http://localhost/mantisbt-2.25.2/api/soap/mantisconnect.php?wsdl')\n", (199, 267), False, 'from suds.client import Client\n'), ((484, 558), 'suds.client.Client', 'Client', (['"""http://localhost/mantisbt-2.25.2/api/soap/mantisconnect.php?wsdl"""'], {}), "('http://localhost/mantisbt-2.25.2/api/soap/mantisconnect.php?wsdl')\n", (490, 558), False, 'from suds.client import Client\n'), ((835, 909), 'suds.client.Client', 'Client', (['"""http://localhost/mantisbt-2.25.2/api/soap/mantisconnect.php?wsdl"""'], {}), "('http://localhost/mantisbt-2.25.2/api/soap/mantisconnect.php?wsdl')\n", (841, 909), False, 'from suds.client import Client\n')]
|
from unittest.mock import MagicMock, Mock, patch
import numpy as np
import pytest
from chitra.visualization.metrics import (
cm_accuracy,
detect_multilabel,
plot_confusion_matrix,
)
def test_detect_multilabel():
with pytest.raises(UserWarning):
detect_multilabel({"label1": "this will raise UserWarning"})
assert detect_multilabel([1, 2, 3, 4])
assert not detect_multilabel([0, 1, 1, 0])
def test_cm_accuracy():
x = np.asarray([[1, 2], [1, 2]])
assert cm_accuracy(x) == 0.5
@patch("chitra.visualization.metrics.plt")
def test_plot_confusion_matrix(mock_plt: Mock):
mock_plt.show = MagicMock()
y_pred = [1, 1, 0, 1]
y_true = [0, 1, 0, 1]
assert plot_confusion_matrix(y_pred, y_true) is None
mock_plt.show.assert_called_once()
|
[
"chitra.visualization.metrics.cm_accuracy",
"unittest.mock.MagicMock",
"numpy.asarray",
"chitra.visualization.metrics.plot_confusion_matrix",
"unittest.mock.patch",
"pytest.raises",
"chitra.visualization.metrics.detect_multilabel"
] |
[((524, 565), 'unittest.mock.patch', 'patch', (['"""chitra.visualization.metrics.plt"""'], {}), "('chitra.visualization.metrics.plt')\n", (529, 565), False, 'from unittest.mock import MagicMock, Mock, patch\n'), ((346, 377), 'chitra.visualization.metrics.detect_multilabel', 'detect_multilabel', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (363, 377), False, 'from chitra.visualization.metrics import cm_accuracy, detect_multilabel, plot_confusion_matrix\n'), ((459, 487), 'numpy.asarray', 'np.asarray', (['[[1, 2], [1, 2]]'], {}), '([[1, 2], [1, 2]])\n', (469, 487), True, 'import numpy as np\n'), ((634, 645), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (643, 645), False, 'from unittest.mock import MagicMock, Mock, patch\n'), ((237, 263), 'pytest.raises', 'pytest.raises', (['UserWarning'], {}), '(UserWarning)\n', (250, 263), False, 'import pytest\n'), ((273, 333), 'chitra.visualization.metrics.detect_multilabel', 'detect_multilabel', (["{'label1': 'this will raise UserWarning'}"], {}), "({'label1': 'this will raise UserWarning'})\n", (290, 333), False, 'from chitra.visualization.metrics import cm_accuracy, detect_multilabel, plot_confusion_matrix\n'), ((393, 424), 'chitra.visualization.metrics.detect_multilabel', 'detect_multilabel', (['[0, 1, 1, 0]'], {}), '([0, 1, 1, 0])\n', (410, 424), False, 'from chitra.visualization.metrics import cm_accuracy, detect_multilabel, plot_confusion_matrix\n'), ((499, 513), 'chitra.visualization.metrics.cm_accuracy', 'cm_accuracy', (['x'], {}), '(x)\n', (510, 513), False, 'from chitra.visualization.metrics import cm_accuracy, detect_multilabel, plot_confusion_matrix\n'), ((711, 748), 'chitra.visualization.metrics.plot_confusion_matrix', 'plot_confusion_matrix', (['y_pred', 'y_true'], {}), '(y_pred, y_true)\n', (732, 748), False, 'from chitra.visualization.metrics import cm_accuracy, detect_multilabel, plot_confusion_matrix\n')]
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 <NAME> <<EMAIL>>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import six
import mock
import requests
from apprise import plugins
from helpers import AppriseURLTester
# Disable logging for a cleaner testing output
import logging
logging.disable(logging.CRITICAL)
apprise_url_tests = (
##################################
# NotifyNextcloud
##################################
('ncloud://:@/', {
'instance': None,
}),
('ncloud://', {
'instance': None,
}),
('nclouds://', {
# No hostname
'instance': None,
}),
('ncloud://localhost', {
# No user specified
'instance': TypeError,
}),
('ncloud://user@localhost?to=user1,user2&version=invalid', {
# An invalid version was specified
'instance': TypeError,
}),
('ncloud://user@localhost?to=user1,user2&version=0', {
# An invalid version was specified
'instance': TypeError,
}),
('ncloud://user@localhost?to=user1,user2&version=-23', {
# An invalid version was specified
'instance': TypeError,
}),
('ncloud://localhost/admin', {
'instance': plugins.NotifyNextcloud,
}),
('ncloud://user@localhost/admin', {
'instance': plugins.NotifyNextcloud,
}),
('ncloud://user@localhost?to=user1,user2', {
'instance': plugins.NotifyNextcloud,
}),
('ncloud://user@localhost?to=user1,user2&version=20', {
'instance': plugins.NotifyNextcloud,
}),
('ncloud://user@localhost?to=user1,user2&version=21', {
'instance': plugins.NotifyNextcloud,
}),
('ncloud://user:pass@localhost/user1/user2', {
'instance': plugins.NotifyNextcloud,
# Our expected url(privacy=True) startswith() response:
'privacy_url': 'ncloud://user:****@localhost/user1/user2',
}),
('ncloud://user:pass@localhost:8080/admin', {
'instance': plugins.NotifyNextcloud,
}),
('nclouds://user:pass@localhost/admin', {
'instance': plugins.NotifyNextcloud,
# Our expected url(privacy=True) startswith() response:
'privacy_url': 'nclouds://user:****@localhost/admin',
}),
('nclouds://user:pass@localhost:8080/admin/', {
'instance': plugins.NotifyNextcloud,
}),
('ncloud://localhost:8080/admin?+HeaderKey=HeaderValue', {
'instance': plugins.NotifyNextcloud,
}),
('ncloud://user:pass@localhost:8081/admin', {
'instance': plugins.NotifyNextcloud,
# force a failure
'response': False,
'requests_response_code': requests.codes.internal_server_error,
}),
('ncloud://user:pass@localhost:8082/admin', {
'instance': plugins.NotifyNextcloud,
# throw a bizzare code forcing us to fail to look it up
'response': False,
'requests_response_code': 999,
}),
('ncloud://user:pass@localhost:8083/user1/user2/user3', {
'instance': plugins.NotifyNextcloud,
# Throws a series of connection and transfer exceptions when this flag
# is set and tests that we gracfully handle them
'test_requests_exceptions': True,
}),
)
def test_plugin_nextcloud_urls():
"""
NotifyNextcloud() Apprise URLs
"""
# Run our general tests
AppriseURLTester(tests=apprise_url_tests).run_all()
@mock.patch('requests.post')
def test_plugin_nextcloud_edge_cases(mock_post):
"""
NotifyNextcloud() Edge Cases
"""
# Disable Throttling to speed testing
plugins.NotifyBase.request_rate_per_sec = 0
# A response
robj = mock.Mock()
robj.content = ''
robj.status_code = requests.codes.ok
# Prepare Mock
mock_post.return_value = robj
# Variation Initializations
obj = plugins.NotifyNextcloud(
host="localhost", user="admin", password="<PASSWORD>", targets="user")
assert isinstance(obj, plugins.NotifyNextcloud) is True
assert isinstance(obj.url(), six.string_types) is True
# An empty body
assert obj.send(body="") is True
assert 'data' in mock_post.call_args_list[0][1]
assert 'shortMessage' in mock_post.call_args_list[0][1]['data']
# The longMessage argument is not set
assert 'longMessage' not in mock_post.call_args_list[0][1]['data']
|
[
"helpers.AppriseURLTester",
"mock.patch",
"logging.disable",
"mock.Mock",
"apprise.plugins.NotifyNextcloud"
] |
[((1362, 1395), 'logging.disable', 'logging.disable', (['logging.CRITICAL'], {}), '(logging.CRITICAL)\n', (1377, 1395), False, 'import logging\n'), ((4461, 4488), 'mock.patch', 'mock.patch', (['"""requests.post"""'], {}), "('requests.post')\n", (4471, 4488), False, 'import mock\n'), ((4707, 4718), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (4716, 4718), False, 'import mock\n'), ((4879, 4978), 'apprise.plugins.NotifyNextcloud', 'plugins.NotifyNextcloud', ([], {'host': '"""localhost"""', 'user': '"""admin"""', 'password': '"""<PASSWORD>"""', 'targets': '"""user"""'}), "(host='localhost', user='admin', password=\n '<PASSWORD>', targets='user')\n", (4902, 4978), False, 'from apprise import plugins\n'), ((4406, 4447), 'helpers.AppriseURLTester', 'AppriseURLTester', ([], {'tests': 'apprise_url_tests'}), '(tests=apprise_url_tests)\n', (4422, 4447), False, 'from helpers import AppriseURLTester\n')]
|
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import re
import sys
from puppet_test import PuppetTest
path = os.path.abspath(__file__)
path = os.path.dirname(path)
sys.path.insert(0, path)
class PuppetModule:
"""This class represents Puppet module."""
def __init__(self, local_module_path):
"""You should give this constructor the full path to the module."""
self.local_module_path = local_module_path
self.module_name = os.path.basename(self.local_module_path)
self.__tests = []
self.__dependencies = []
self.comment_regexp = re.compile(r'^\s*#')
self.dependency_regexp = \
re.compile(r'^\s*dependency\s*[\'\"]*([^\'\"]+)[\'\"]*')
self.find_tests()
self.find_dependencies()
def find_dependencies(self):
"""Get dependencies of this module from Modulefile if present."""
module_file = 'Modulefile'
dependencies = []
module_file_path = os.path.join(self.local_module_path, module_file)
if not os.path.isfile(module_file_path):
self.__dependencies = dependencies
return False
opened_file = open(module_file_path, 'r')
for line in opened_file.readlines():
if re.match(self.comment_regexp, line):
# skip commented line
continue
match = re.match(self.dependency_regexp, line)
if match:
# found dependency line
dependency_name = match.group(1).split('/')[-1]
dependencies.append(dependency_name)
self.__dependencies = dependencies
return True
def find_tests(self):
"""Find all tests.
Find all tests in this module and fill tests array
with PuppetTest objects.
"""
current_path = os.path.abspath(os.curdir)
try:
os.chdir(self.local_module_path)
except OSError as error:
logging.error("Cannot change directory to %s: %s" %
(self.local_module_path, error.message))
else:
for root, dirs, files in os.walk('tests'):
for test_file in files:
if not test_file[-3:] == '.pp':
continue
test_file_path = os.path.join(root, test_file)
puppet_test = PuppetTest(test_file_path)
self.__tests.append(puppet_test)
finally:
# try to restore original folder on exit
try:
os.chdir(current_path)
except OSError as error:
logging.error("Cannot change directory to %s: %s" %
(self.local_module_path, error.message), 1)
@property
def tests(self):
"""Property returns list of tests."""
return self.__tests
@property
def name(self):
"""Property returns module name."""
return self.module_name
@property
def path(self):
"""Property returns path to this module."""
return self.local_module_path
@property
def dependencies(self):
"""Property returns list of module dependencies."""
return self.__dependencies
def __repr__(self):
"""String representation of PuppetModule."""
tests_string = ''
if len(self.tests) > 0:
tests = [repr(test) for test in self.tests]
tests_string += ", ".join(tests)
tpl = "PuppetModule(name=%s, path=%s, tests=[%s]" \
% (self.name, self.path, tests_string)
return tpl
|
[
"os.path.abspath",
"logging.error",
"os.path.basename",
"os.path.dirname",
"os.walk",
"sys.path.insert",
"re.match",
"os.path.isfile",
"puppet_test.PuppetTest",
"os.path.join",
"os.chdir",
"re.compile"
] |
[((700, 725), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (715, 725), False, 'import os\n'), ((733, 754), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (748, 754), False, 'import os\n'), ((755, 779), 'sys.path.insert', 'sys.path.insert', (['(0)', 'path'], {}), '(0, path)\n', (770, 779), False, 'import sys\n'), ((1047, 1087), 'os.path.basename', 'os.path.basename', (['self.local_module_path'], {}), '(self.local_module_path)\n', (1063, 1087), False, 'import os\n'), ((1179, 1199), 're.compile', 're.compile', (['"""^\\\\s*#"""'], {}), "('^\\\\s*#')\n", (1189, 1199), False, 'import re\n'), ((1247, 1313), 're.compile', 're.compile', (['"""^\\\\s*dependency\\\\s*[\\\\\'\\\\"]*([^\\\\\'\\\\"]+)[\\\\\'\\\\"]*"""'], {}), '(\'^\\\\s*dependency\\\\s*[\\\\\\\'\\\\"]*([^\\\\\\\'\\\\"]+)[\\\\\\\'\\\\"]*\')\n', (1257, 1313), False, 'import re\n'), ((1560, 1609), 'os.path.join', 'os.path.join', (['self.local_module_path', 'module_file'], {}), '(self.local_module_path, module_file)\n', (1572, 1609), False, 'import os\n'), ((2423, 2449), 'os.path.abspath', 'os.path.abspath', (['os.curdir'], {}), '(os.curdir)\n', (2438, 2449), False, 'import os\n'), ((1625, 1657), 'os.path.isfile', 'os.path.isfile', (['module_file_path'], {}), '(module_file_path)\n', (1639, 1657), False, 'import os\n'), ((1841, 1876), 're.match', 're.match', (['self.comment_regexp', 'line'], {}), '(self.comment_regexp, line)\n', (1849, 1876), False, 'import re\n'), ((1961, 1999), 're.match', 're.match', (['self.dependency_regexp', 'line'], {}), '(self.dependency_regexp, line)\n', (1969, 1999), False, 'import re\n'), ((2475, 2507), 'os.chdir', 'os.chdir', (['self.local_module_path'], {}), '(self.local_module_path)\n', (2483, 2507), False, 'import os\n'), ((2723, 2739), 'os.walk', 'os.walk', (['"""tests"""'], {}), "('tests')\n", (2730, 2739), False, 'import os\n'), ((2553, 2649), 'logging.error', 'logging.error', (["('Cannot change directory to %s: %s' % (self.local_module_path, error.message))"], {}), "('Cannot change directory to %s: %s' % (self.local_module_path,\n error.message))\n", (2566, 2649), False, 'import logging\n'), ((3150, 3172), 'os.chdir', 'os.chdir', (['current_path'], {}), '(current_path)\n', (3158, 3172), False, 'import os\n'), ((2903, 2932), 'os.path.join', 'os.path.join', (['root', 'test_file'], {}), '(root, test_file)\n', (2915, 2932), False, 'import os\n'), ((2967, 2993), 'puppet_test.PuppetTest', 'PuppetTest', (['test_file_path'], {}), '(test_file_path)\n', (2977, 2993), False, 'from puppet_test import PuppetTest\n'), ((3226, 3325), 'logging.error', 'logging.error', (["('Cannot change directory to %s: %s' % (self.local_module_path, error.message))", '(1)'], {}), "('Cannot change directory to %s: %s' % (self.local_module_path,\n error.message), 1)\n", (3239, 3325), False, 'import logging\n')]
|
# -*- coding: utf-8 -*-
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
POINTS = 1000
if __name__ == '__main__':
gauss1 = (np.random.randn(POINTS), np.random.randn(POINTS)*0.24);
gauss2 = (np.random.randn(POINTS)*0.28, np.random.randn(POINTS));
x1 = np.array(range(POINTS)) * 0.005
y1 = x1 * -2
x2 = x1
y2 = x2 * 1
offset_x1 = -4
offset_y1 = 2
cc = zip(gauss1)
dd = zip(gauss2)
cc[0] = cc[0] + x1
cc[1] = cc[1] + y1
cc[0] = cc[0] + offset_x1
cc[1] = cc[1] + offset_y1
dd[0] = dd[0] + x2
dd[1] = dd[1] + y2
plt.scatter(cc[0], cc[1], c=u'b')
plt.scatter(dd[0], dd[1], c=u'r')
plt.draw()
plt.show()
|
[
"matplotlib.pyplot.draw",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.show",
"numpy.random.randn"
] |
[((604, 637), 'matplotlib.pyplot.scatter', 'plt.scatter', (['cc[0]', 'cc[1]'], {'c': 'u"""b"""'}), "(cc[0], cc[1], c=u'b')\n", (615, 637), True, 'import matplotlib.pyplot as plt\n'), ((642, 675), 'matplotlib.pyplot.scatter', 'plt.scatter', (['dd[0]', 'dd[1]'], {'c': 'u"""r"""'}), "(dd[0], dd[1], c=u'r')\n", (653, 675), True, 'import matplotlib.pyplot as plt\n'), ((680, 690), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (688, 690), True, 'import matplotlib.pyplot as plt\n'), ((695, 705), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (703, 705), True, 'import matplotlib.pyplot as plt\n'), ((150, 173), 'numpy.random.randn', 'np.random.randn', (['POINTS'], {}), '(POINTS)\n', (165, 173), True, 'import numpy as np\n'), ((250, 273), 'numpy.random.randn', 'np.random.randn', (['POINTS'], {}), '(POINTS)\n', (265, 273), True, 'import numpy as np\n'), ((175, 198), 'numpy.random.randn', 'np.random.randn', (['POINTS'], {}), '(POINTS)\n', (190, 198), True, 'import numpy as np\n'), ((220, 243), 'numpy.random.randn', 'np.random.randn', (['POINTS'], {}), '(POINTS)\n', (235, 243), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
/* ----------------------------------------------------------------------------
* PalmSens Method SCRIPT SDK
* ----------------------------------------------------------------------------
* Copyright (c) 2019-2020, PalmSens BV
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the disclaimer below.
*
* PalmSens's name may not be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* DISCLAIMER: THIS SOFTWARE IS PROVIDED BY PALMSENS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
* DISCLAIMED. IN NO EVENT SHALL PALMSENS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
* OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* ----------------------------------------------------------------------------
*/
"""
###############################################################################
# Description
###############################################################################
# This example showcases how to perform and plot a simple Cyclic Voltammetry (CV) measurement
###############################################################################
# Imports
###############################################################################
import serial
import os.path
import PSEsPicoLib
import matplotlib.pyplot as plt
import sys
###############################################################################
# Configuration
###############################################################################
#Folder where scripts are stored
MSfilepath = ".\\MethodSCRIPT files"
#Name of script file to run
MScriptFile = "MSExampleCV.mscr"
#COM port of the EmStat Pico
myport = "COM9"
#Set to False to disable printing of raw and parsed data
verbose_printing = True
###############################################################################
# Code
###############################################################################
#Set printing verbosity
PSEsPicoLib.SetPrintVerbose(verbose_printing)
#used to only parse data once we have succesfully executed the script
measurement_succes = False
#combine the path and filename
MScriptPathandFile = os.path.join(MSfilepath, MScriptFile)
#initialization and open the port
ser = serial.Serial() #Create an instance of the serial object
if PSEsPicoLib.OpenComport(ser,myport,1): #open myport with 1 sec timeout
print("Succesfuly opened: " + ser.port )
try:
PSEsPicoLib.Flush(ser) #Flush the EmstatPico parse buffer
if PSEsPicoLib.IsConnected(ser): #Check if EmstatPico is connected
print("Connected!")
# Send the MethodSCRIPT file
PSEsPicoLib.SendScriptFile(ser,MScriptPathandFile)
#Get the results and store it in datafile
datafile=PSEsPicoLib.GetResults(ser) # fetch the results
#Create "data" subfolder
(prefix, sep, suffix) = MScriptFile.rpartition('.') #split the file-extension and the filename
ResultFile = prefix + '.dat' #change the extension to .dat
ResultPath = MSfilepath+"\\data" #use subfolder for the data
try:
os.mkdir(ResultPath)
except OSError:
print ("Creation of the directory %s failed" % ResultPath)
else:
print ("Successfully created the directory %s " % ResultPath)
ResultFile = os.path.join(ResultPath, ResultFile) #combine the path and the filename
ResultFile = PSEsPicoLib.CheckFileExistAndRename(ResultFile) #Rename the file if it exists to a unique name by add the date+time
#print(ResultFile)
f = open(ResultFile,"w+") #Open file for writing
f.write(datafile) #write data to file
f.close() #close file
measurement_succes = True
else:
print("Unable to connected!")
except Exception as e1: #catch exception
print("error communicating...: " + str(e1)) #print the exception
finally:
ser.close() #close the comport
else:
print("cannot open serial port ")
if(not measurement_succes):
sys.exit()
value_matrix = PSEsPicoLib.ParseResultFile(ResultFile) #Parse result file to Value matrix
applied_potential=PSEsPicoLib.GetColumnFromMatrix(value_matrix,0) #Get the applied potentials
measured_current=PSEsPicoLib.GetColumnFromMatrix(value_matrix,1) #Get the measured current
plt.figure(1)
plt.plot(applied_potential,measured_current)
plt.title("Voltammogram")
plt.xlabel("Applied Potential (V)")
plt.ylabel("Measured Current (A)")
plt.show()
plt.grid(b=True, which='major')
plt.grid(b=True, which='minor', color='b', linestyle='-', alpha=0.2)
plt.minorticks_on()
|
[
"matplotlib.pyplot.title",
"PSEsPicoLib.IsConnected",
"matplotlib.pyplot.figure",
"serial.Serial",
"PSEsPicoLib.CheckFileExistAndRename",
"PSEsPicoLib.Flush",
"PSEsPicoLib.SendScriptFile",
"PSEsPicoLib.GetResults",
"matplotlib.pyplot.show",
"PSEsPicoLib.OpenComport",
"PSEsPicoLib.GetColumnFromMatrix",
"matplotlib.pyplot.ylabel",
"PSEsPicoLib.ParseResultFile",
"matplotlib.pyplot.grid",
"sys.exit",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.minorticks_on",
"matplotlib.pyplot.xlabel",
"PSEsPicoLib.SetPrintVerbose"
] |
[((2738, 2783), 'PSEsPicoLib.SetPrintVerbose', 'PSEsPicoLib.SetPrintVerbose', (['verbose_printing'], {}), '(verbose_printing)\n', (2765, 2783), False, 'import PSEsPicoLib\n'), ((3015, 3030), 'serial.Serial', 'serial.Serial', ([], {}), '()\n', (3028, 3030), False, 'import serial\n'), ((3078, 3117), 'PSEsPicoLib.OpenComport', 'PSEsPicoLib.OpenComport', (['ser', 'myport', '(1)'], {}), '(ser, myport, 1)\n', (3101, 3117), False, 'import PSEsPicoLib\n'), ((5217, 5256), 'PSEsPicoLib.ParseResultFile', 'PSEsPicoLib.ParseResultFile', (['ResultFile'], {}), '(ResultFile)\n', (5244, 5256), False, 'import PSEsPicoLib\n'), ((5312, 5360), 'PSEsPicoLib.GetColumnFromMatrix', 'PSEsPicoLib.GetColumnFromMatrix', (['value_matrix', '(0)'], {}), '(value_matrix, 0)\n', (5343, 5360), False, 'import PSEsPicoLib\n'), ((5406, 5454), 'PSEsPicoLib.GetColumnFromMatrix', 'PSEsPicoLib.GetColumnFromMatrix', (['value_matrix', '(1)'], {}), '(value_matrix, 1)\n', (5437, 5454), False, 'import PSEsPicoLib\n'), ((5483, 5496), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (5493, 5496), True, 'import matplotlib.pyplot as plt\n'), ((5497, 5542), 'matplotlib.pyplot.plot', 'plt.plot', (['applied_potential', 'measured_current'], {}), '(applied_potential, measured_current)\n', (5505, 5542), True, 'import matplotlib.pyplot as plt\n'), ((5542, 5567), 'matplotlib.pyplot.title', 'plt.title', (['"""Voltammogram"""'], {}), "('Voltammogram')\n", (5551, 5567), True, 'import matplotlib.pyplot as plt\n'), ((5568, 5603), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Applied Potential (V)"""'], {}), "('Applied Potential (V)')\n", (5578, 5603), True, 'import matplotlib.pyplot as plt\n'), ((5604, 5638), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Measured Current (A)"""'], {}), "('Measured Current (A)')\n", (5614, 5638), True, 'import matplotlib.pyplot as plt\n'), ((5639, 5649), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5647, 5649), True, 'import matplotlib.pyplot as plt\n'), ((5650, 5681), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'b': '(True)', 'which': '"""major"""'}), "(b=True, which='major')\n", (5658, 5681), True, 'import matplotlib.pyplot as plt\n'), ((5682, 5750), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'b': '(True)', 'which': '"""minor"""', 'color': '"""b"""', 'linestyle': '"""-"""', 'alpha': '(0.2)'}), "(b=True, which='minor', color='b', linestyle='-', alpha=0.2)\n", (5690, 5750), True, 'import matplotlib.pyplot as plt\n'), ((5751, 5770), 'matplotlib.pyplot.minorticks_on', 'plt.minorticks_on', ([], {}), '()\n', (5768, 5770), True, 'import matplotlib.pyplot as plt\n'), ((5187, 5197), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5195, 5197), False, 'import sys\n'), ((3213, 3235), 'PSEsPicoLib.Flush', 'PSEsPicoLib.Flush', (['ser'], {}), '(ser)\n', (3230, 3235), False, 'import PSEsPicoLib\n'), ((3303, 3331), 'PSEsPicoLib.IsConnected', 'PSEsPicoLib.IsConnected', (['ser'], {}), '(ser)\n', (3326, 3331), False, 'import PSEsPicoLib\n'), ((3491, 3542), 'PSEsPicoLib.SendScriptFile', 'PSEsPicoLib.SendScriptFile', (['ser', 'MScriptPathandFile'], {}), '(ser, MScriptPathandFile)\n', (3517, 3542), False, 'import PSEsPicoLib\n'), ((3617, 3644), 'PSEsPicoLib.GetResults', 'PSEsPicoLib.GetResults', (['ser'], {}), '(ser)\n', (3639, 3644), False, 'import PSEsPicoLib\n'), ((4431, 4478), 'PSEsPicoLib.CheckFileExistAndRename', 'PSEsPicoLib.CheckFileExistAndRename', (['ResultFile'], {}), '(ResultFile)\n', (4466, 4478), False, 'import PSEsPicoLib\n')]
|
from reinforch.agents import DQNAgent
from reinforch.core.memorys import SimpleMatrixMemory
from reinforch.environments import OpenAIGym
from reinforch.execution import Runner
def test_dqn():
gym_id = 'CartPole-v0'
env = OpenAIGym(gym_id)
env.seed(7)
n_s = env.n_s
n_a = env.n_a
memory = SimpleMatrixMemory(row_size=3000, every_class_size=[n_s, 1, 1, n_s, 1])
agent = DQNAgent(n_s=n_s,
n_a=n_a,
memory=memory,
config='tests/configs/test_dqn.json')
with Runner(agent=agent,
environment=env,
verbose=False) as runner:
runner.train(total_episode=10,
max_step_in_one_episode=200,
save_model=False,
save_final_model=False,
visualize=False)
|
[
"reinforch.core.memorys.SimpleMatrixMemory",
"reinforch.execution.Runner",
"reinforch.environments.OpenAIGym",
"reinforch.agents.DQNAgent"
] |
[((232, 249), 'reinforch.environments.OpenAIGym', 'OpenAIGym', (['gym_id'], {}), '(gym_id)\n', (241, 249), False, 'from reinforch.environments import OpenAIGym\n'), ((315, 386), 'reinforch.core.memorys.SimpleMatrixMemory', 'SimpleMatrixMemory', ([], {'row_size': '(3000)', 'every_class_size': '[n_s, 1, 1, n_s, 1]'}), '(row_size=3000, every_class_size=[n_s, 1, 1, n_s, 1])\n', (333, 386), False, 'from reinforch.core.memorys import SimpleMatrixMemory\n'), ((399, 478), 'reinforch.agents.DQNAgent', 'DQNAgent', ([], {'n_s': 'n_s', 'n_a': 'n_a', 'memory': 'memory', 'config': '"""tests/configs/test_dqn.json"""'}), "(n_s=n_s, n_a=n_a, memory=memory, config='tests/configs/test_dqn.json')\n", (407, 478), False, 'from reinforch.agents import DQNAgent\n'), ((551, 602), 'reinforch.execution.Runner', 'Runner', ([], {'agent': 'agent', 'environment': 'env', 'verbose': '(False)'}), '(agent=agent, environment=env, verbose=False)\n', (557, 602), False, 'from reinforch.execution import Runner\n')]
|
import base64
from io import BytesIO
from pathlib import Path
from PIL import Image, ImageDraw, UnidentifiedImageError
from nonebot.log import logger
from typing import Tuple, Union, Optional, Literal
from .util import load_font
import asyncio
class BuildImage:
def __init__(
self,
w: int,
h: int,
img_w: int = 0,
img_h: int = 0,
background: str = "",
color: Union[float, Tuple[float, ...], str] = "white",
image_type: Literal[
"1",
"CMYK",
"F",
"HSV",
"I",
"L",
"LAB",
"P",
"RGB",
"RGBA",
"RGBX",
"YCbCr",
] = "RGBA",
divisor: float = 1,
font_size: int = 10,
):
self.w = int(w)
self.h = int(h)
self.img_w = int(img_w)
self.img_h = int(img_h)
self.current_w = 0
self.current_h = 0
if not background:
self.markImg = Image.new(image_type, (self.w, self.h), color)
else:
try:
if w == 0 and h == 0:
self.markImg = Image.open(background)
w, h = self.markImg.size
if divisor:
self.w = int(divisor * w)
self.h = int(divisor * h)
self.markImg = self.markImg.resize(
(self.w, self.h), Image.ANTIALIAS
)
else:
self.w = w
self.h = h
else:
self.markImg = Image.open(background).resize(
(self.w, self.h), Image.ANTIALIAS
)
except UnidentifiedImageError as e:
logger.warning(f"无法识别图片 已删除图片,下次更新重新下载... e:{e}")
Path(background).unlink(missing_ok=True)
self.markImg = Image.new(image_type, (self.w, self.h), color)
except FileNotFoundError:
logger.warning(f"{background} not exists")
self.markImg = Image.new(image_type, (self.w, self.h), color)
self.font = load_font(fontsize=font_size)
self.draw = ImageDraw.Draw(self.markImg)
self.size = self.w, self.h
try:
self.loop = asyncio.get_event_loop()
except RuntimeError:
new_loop = asyncio.new_event_loop()
asyncio.set_event_loop(new_loop)
self.loop = asyncio.get_event_loop()
async def atext(
self,
pos: Tuple[int, int],
text: str,
fill: Union[str, Tuple[int, int, int]] = (0, 0, 0),
):
"""
说明:
异步 在图片上添加文字
参数:
:param pos: 文字位置
:param text: 文字内容
:param fill: 文字颜色
"""
await self.loop.run_in_executor(None, self.text, pos, text, fill)
def text(
self,
pos: Tuple[int, int],
text: str,
fill: Union[str, Tuple[int, int, int]] = (0, 0, 0),
):
"""
说明:
在图片上添加文字
参数:
:param pos: 文字位置
:param text: 文字内容
:param fill: 文字颜色
"""
self.draw.text(pos, text, fill=fill, font=self.font)
async def apaste(
self,
img: "BuildImage" or Image,
pos: Optional[Tuple[int, int]] = None,
alpha: bool = False,
):
"""
说明:
异步 贴图
参数:
:param img: 已打开的图片文件,可以为 BuildImage 或 Image
:param pos: 贴图位置(左上角)
:param alpha: 图片背景是否为透明
"""
await self.loop.run_in_executor(None, self.paste, img, pos, alpha)
# 贴图
def paste(self, img, pos=None, alpha=False):
if isinstance(img, BuildImage):
img = img.markImg
if self.current_w == self.w:
self.current_w = 0
self.current_h += self.img_h
if not pos:
pos = (self.current_w, self.current_h)
if alpha:
try:
self.markImg.paste(img, pos, img)
except ValueError:
img = img.convert("RGBA")
self.markImg.paste(img, pos, img)
else:
self.markImg.paste(img, pos)
self.current_w += self.img_w
return self.markImg
def circle_corner(self, r: int):
img = self.markImg.convert("RGBA")
w, h = img.size
alpha = img.split()[-1]
circle = Image.new("L", (r * 2, r * 2), 0) # 创建黑色方形
draw = ImageDraw.Draw(circle)
draw.ellipse((0, 0, r * 2, r * 2), fill=255) # 黑色方形内切白色圆形
draw = ImageDraw.Draw(alpha)
alpha.paste(circle.crop((0, 0, r, r)), (0, 0)) # 左上角
alpha.paste(circle.crop((r, 0, r * 2, r)), (w - r, 0)) # 右上角
alpha.paste(circle.crop((r, r, r * 2, r * 2)), (w - r, h - r)) # 右下角
alpha.paste(circle.crop((0, r, r, r * 2)), (0, h - r)) # 左下角
img.putalpha(alpha)
self.markImg = img
# 转bs4:
def pic2bs4(self):
buf = BytesIO()
self.markImg.save(buf, format="PNG")
return f"base64://{base64.b64encode(buf.getvalue()).decode()}"
|
[
"io.BytesIO",
"PIL.Image.new",
"asyncio.get_event_loop",
"nonebot.log.logger.warning",
"asyncio.set_event_loop",
"PIL.Image.open",
"pathlib.Path",
"PIL.ImageDraw.Draw",
"asyncio.new_event_loop"
] |
[((2282, 2310), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['self.markImg'], {}), '(self.markImg)\n', (2296, 2310), False, 'from PIL import Image, ImageDraw, UnidentifiedImageError\n'), ((4544, 4577), 'PIL.Image.new', 'Image.new', (['"""L"""', '(r * 2, r * 2)', '(0)'], {}), "('L', (r * 2, r * 2), 0)\n", (4553, 4577), False, 'from PIL import Image, ImageDraw, UnidentifiedImageError\n'), ((4603, 4625), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['circle'], {}), '(circle)\n', (4617, 4625), False, 'from PIL import Image, ImageDraw, UnidentifiedImageError\n'), ((4708, 4729), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['alpha'], {}), '(alpha)\n', (4722, 4729), False, 'from PIL import Image, ImageDraw, UnidentifiedImageError\n'), ((5115, 5124), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (5122, 5124), False, 'from io import BytesIO\n'), ((1025, 1071), 'PIL.Image.new', 'Image.new', (['image_type', '(self.w, self.h)', 'color'], {}), '(image_type, (self.w, self.h), color)\n', (1034, 1071), False, 'from PIL import Image, ImageDraw, UnidentifiedImageError\n'), ((2383, 2407), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (2405, 2407), False, 'import asyncio\n'), ((2460, 2484), 'asyncio.new_event_loop', 'asyncio.new_event_loop', ([], {}), '()\n', (2482, 2484), False, 'import asyncio\n'), ((2497, 2529), 'asyncio.set_event_loop', 'asyncio.set_event_loop', (['new_loop'], {}), '(new_loop)\n', (2519, 2529), False, 'import asyncio\n'), ((2554, 2578), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (2576, 2578), False, 'import asyncio\n'), ((1176, 1198), 'PIL.Image.open', 'Image.open', (['background'], {}), '(background)\n', (1186, 1198), False, 'from PIL import Image, ImageDraw, UnidentifiedImageError\n'), ((1852, 1901), 'nonebot.log.logger.warning', 'logger.warning', (['f"""无法识别图片 已删除图片,下次更新重新下载... e:{e}"""'], {}), "(f'无法识别图片 已删除图片,下次更新重新下载... e:{e}')\n", (1866, 1901), False, 'from nonebot.log import logger\n'), ((1990, 2036), 'PIL.Image.new', 'Image.new', (['image_type', '(self.w, self.h)', 'color'], {}), '(image_type, (self.w, self.h), color)\n', (1999, 2036), False, 'from PIL import Image, ImageDraw, UnidentifiedImageError\n'), ((2091, 2133), 'nonebot.log.logger.warning', 'logger.warning', (['f"""{background} not exists"""'], {}), "(f'{background} not exists')\n", (2105, 2133), False, 'from nonebot.log import logger\n'), ((2165, 2211), 'PIL.Image.new', 'Image.new', (['image_type', '(self.w, self.h)', 'color'], {}), '(image_type, (self.w, self.h), color)\n', (2174, 2211), False, 'from PIL import Image, ImageDraw, UnidentifiedImageError\n'), ((1677, 1699), 'PIL.Image.open', 'Image.open', (['background'], {}), '(background)\n', (1687, 1699), False, 'from PIL import Image, ImageDraw, UnidentifiedImageError\n'), ((1918, 1934), 'pathlib.Path', 'Path', (['background'], {}), '(background)\n', (1922, 1934), False, 'from pathlib import Path\n')]
|
# phaseplots.py - examples of phase portraits
# RMM, 24 July 2011
#
# This file contains examples of phase portraits pulled from "Feedback
# Systems" by <NAME> Murray (Princeton University Press, 2008).
import numpy as np
import matplotlib.pyplot as mpl
from control.phaseplot import phase_plot
from numpy import pi
# Clear out any figures that are present
mpl.close('all')
#
# Inverted pendulum
#
# Define the ODEs for a damped (inverted) pendulum
def invpend_ode(x, t, m=1., l=1., b=0.2, g=1):
return (x[1], -b/m*x[1] + (g*l/m) * np.sin(x[0]))
# Set up the figure the way we want it to look
mpl.figure(); mpl.clf();
mpl.axis([-2*pi, 2*pi, -2.1, 2.1]);
mpl.title('Inverted pendlum')
# Outer trajectories
phase_plot(invpend_ode,
X0 = [ [-2*pi, 1.6], [-2*pi, 0.5], [-1.8, 2.1],
[-1, 2.1], [4.2, 2.1], [5, 2.1],
[2*pi, -1.6], [2*pi, -0.5], [1.8, -2.1],
[1, -2.1], [-4.2, -2.1], [-5, -2.1] ],
T = np.linspace(0, 40, 200),
logtime = (3, 0.7) )
# Separatrices
mpl.hold(True);
phase_plot(invpend_ode, X0 = [[-2.3056, 2.1], [2.3056, -2.1]], T=6, lingrid=0)
mpl.show();
#
# Systems of ODEs: damped oscillator example (simulation + phase portrait)
#
def oscillator_ode(x, t, m=1., b=1, k=1):
return (x[1], -k/m*x[0] - b/m*x[1])
# Generate a vector plot for the damped oscillator
mpl.figure(); mpl.clf();
phase_plot(oscillator_ode, [-1, 1, 10], [-1, 1, 10], 0.15);
mpl.hold(True); mpl.plot([0], [0], '.');
# a=gca; set(a,'FontSize',20); set(a,'DataAspectRatio',[1,1,1]);
mpl.xlabel('x1'); mpl.ylabel('x2');
# Generate a phase plot for the damped oscillator
mpl.figure(); mpl.clf();
mpl.axis([-1, 1, -1, 1]); # set(gca, 'DataAspectRatio', [1, 1, 1]);
phase_plot(oscillator_ode,
X0 = [
[-1, 1], [-0.3, 1], [0, 1], [0.25, 1], [0.5, 1], [0.75, 1], [1, 1],
[1, -1], [0.3, -1], [0, -1], [-0.25, -1], [-0.5, -1], [-0.75, -1], [-1, -1]
], T = np.linspace(0, 8, 80), timepts = [0.25, 0.8, 2, 3])
mpl.hold(True); mpl.plot([0], [0], 'k.'); # 'MarkerSize', AM_data_markersize*3);
# set(gca,'DataAspectRatio',[1,1,1]);
mpl.xlabel('x1'); mpl.ylabel('x2');
mpl.show()
#
# Stability definitions
#
# This set of plots illustrates the various types of equilibrium points.
#
# Saddle point vector field
def saddle_ode(x, t):
return (x[0] - 3*x[1], -3*x[0] + x[1]);
# Asy stable
m = 1; b = 1; k = 1; # default values
mpl.figure(); mpl.clf();
mpl.axis([-1, 1, -1, 1]); # set(gca, 'DataAspectRatio', [1 1 1]);
phase_plot(oscillator_ode,
X0 = [
[-1,1], [-0.3,1], [0,1], [0.25,1], [0.5,1], [0.7,1], [1,1], [1.3,1],
[1,-1], [0.3,-1], [0,-1], [-0.25,-1], [-0.5,-1], [-0.7,-1], [-1,-1],
[-1.3,-1]
], T = np.linspace(0, 10, 100),
timepts = [0.3, 1, 2, 3], parms = (m, b, k));
mpl.hold(True); mpl.plot([0], [0], 'k.'); # 'MarkerSize', AM_data_markersize*3);
# set(gca,'FontSize', 16);
mpl.xlabel('{\itx}_1'); mpl.ylabel('{\itx}_2');
# Saddle
mpl.figure(); mpl.clf();
mpl.axis([-1, 1, -1, 1]); # set(gca, 'DataAspectRatio', [1 1 1]);
phase_plot(saddle_ode, scale = 2, timepts = [0.2, 0.5, 0.8], X0 =
[ [-1, -1], [1, 1],
[-1, -0.95], [-1, -0.9], [-1, -0.8], [-1, -0.6], [-1, -0.4], [-1, -0.2],
[-0.95, -1], [-0.9, -1], [-0.8, -1], [-0.6, -1], [-0.4, -1], [-0.2, -1],
[1, 0.95], [1, 0.9], [1, 0.8], [1, 0.6], [1, 0.4], [1, 0.2],
[0.95, 1], [0.9, 1], [0.8, 1], [0.6, 1], [0.4, 1], [0.2, 1],
[-0.5, -0.45], [-0.45, -0.5], [0.5, 0.45], [0.45, 0.5],
[-0.04, 0.04], [0.04, -0.04] ], T = np.linspace(0, 2, 20));
mpl.hold(True); mpl.plot([0], [0], 'k.'); # 'MarkerSize', AM_data_markersize*3);
# set(gca,'FontSize', 16);
mpl.xlabel('{\itx}_1'); mpl.ylabel('{\itx}_2');
# Stable isL
m = 1; b = 0; k = 1; # zero damping
mpl.figure(); mpl.clf();
mpl.axis([-1, 1, -1, 1]); # set(gca, 'DataAspectRatio', [1 1 1]);
phase_plot(oscillator_ode, timepts =
[pi/6, pi/3, pi/2, 2*pi/3, 5*pi/6, pi, 7*pi/6, 4*pi/3, 9*pi/6, 5*pi/3, 11*pi/6, 2*pi],
X0 = [ [0.2,0], [0.4,0], [0.6,0], [0.8,0], [1,0], [1.2,0], [1.4,0] ],
T = np.linspace(0, 20, 200), parms = (m, b, k));
mpl.hold(True); mpl.plot([0], [0], 'k.') # 'MarkerSize', AM_data_markersize*3);
# set(gca,'FontSize', 16);
mpl.xlabel('{\itx}_1'); mpl.ylabel('{\itx}_2');
mpl.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.close",
"matplotlib.pyplot.hold",
"matplotlib.pyplot.axis",
"control.phaseplot.phase_plot",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((360, 376), 'matplotlib.pyplot.close', 'mpl.close', (['"""all"""'], {}), "('all')\n", (369, 376), True, 'import matplotlib.pyplot as mpl\n'), ((603, 615), 'matplotlib.pyplot.figure', 'mpl.figure', ([], {}), '()\n', (613, 615), True, 'import matplotlib.pyplot as mpl\n'), ((617, 626), 'matplotlib.pyplot.clf', 'mpl.clf', ([], {}), '()\n', (624, 626), True, 'import matplotlib.pyplot as mpl\n'), ((629, 667), 'matplotlib.pyplot.axis', 'mpl.axis', (['[-2 * pi, 2 * pi, -2.1, 2.1]'], {}), '([-2 * pi, 2 * pi, -2.1, 2.1])\n', (637, 667), True, 'import matplotlib.pyplot as mpl\n'), ((665, 694), 'matplotlib.pyplot.title', 'mpl.title', (['"""Inverted pendlum"""'], {}), "('Inverted pendlum')\n", (674, 694), True, 'import matplotlib.pyplot as mpl\n'), ((1014, 1028), 'matplotlib.pyplot.hold', 'mpl.hold', (['(True)'], {}), '(True)\n', (1022, 1028), True, 'import matplotlib.pyplot as mpl\n'), ((1030, 1106), 'control.phaseplot.phase_plot', 'phase_plot', (['invpend_ode'], {'X0': '[[-2.3056, 2.1], [2.3056, -2.1]]', 'T': '(6)', 'lingrid': '(0)'}), '(invpend_ode, X0=[[-2.3056, 2.1], [2.3056, -2.1]], T=6, lingrid=0)\n', (1040, 1106), False, 'from control.phaseplot import phase_plot\n'), ((1109, 1119), 'matplotlib.pyplot.show', 'mpl.show', ([], {}), '()\n', (1117, 1119), True, 'import matplotlib.pyplot as mpl\n'), ((1336, 1348), 'matplotlib.pyplot.figure', 'mpl.figure', ([], {}), '()\n', (1346, 1348), True, 'import matplotlib.pyplot as mpl\n'), ((1350, 1359), 'matplotlib.pyplot.clf', 'mpl.clf', ([], {}), '()\n', (1357, 1359), True, 'import matplotlib.pyplot as mpl\n'), ((1361, 1419), 'control.phaseplot.phase_plot', 'phase_plot', (['oscillator_ode', '[-1, 1, 10]', '[-1, 1, 10]', '(0.15)'], {}), '(oscillator_ode, [-1, 1, 10], [-1, 1, 10], 0.15)\n', (1371, 1419), False, 'from control.phaseplot import phase_plot\n'), ((1421, 1435), 'matplotlib.pyplot.hold', 'mpl.hold', (['(True)'], {}), '(True)\n', (1429, 1435), True, 'import matplotlib.pyplot as mpl\n'), ((1437, 1460), 'matplotlib.pyplot.plot', 'mpl.plot', (['[0]', '[0]', '"""."""'], {}), "([0], [0], '.')\n", (1445, 1460), True, 'import matplotlib.pyplot as mpl\n'), ((1527, 1543), 'matplotlib.pyplot.xlabel', 'mpl.xlabel', (['"""x1"""'], {}), "('x1')\n", (1537, 1543), True, 'import matplotlib.pyplot as mpl\n'), ((1545, 1561), 'matplotlib.pyplot.ylabel', 'mpl.ylabel', (['"""x2"""'], {}), "('x2')\n", (1555, 1561), True, 'import matplotlib.pyplot as mpl\n'), ((1614, 1626), 'matplotlib.pyplot.figure', 'mpl.figure', ([], {}), '()\n', (1624, 1626), True, 'import matplotlib.pyplot as mpl\n'), ((1628, 1637), 'matplotlib.pyplot.clf', 'mpl.clf', ([], {}), '()\n', (1635, 1637), True, 'import matplotlib.pyplot as mpl\n'), ((1640, 1664), 'matplotlib.pyplot.axis', 'mpl.axis', (['[-1, 1, -1, 1]'], {}), '([-1, 1, -1, 1])\n', (1648, 1664), True, 'import matplotlib.pyplot as mpl\n'), ((1959, 1973), 'matplotlib.pyplot.hold', 'mpl.hold', (['(True)'], {}), '(True)\n', (1967, 1973), True, 'import matplotlib.pyplot as mpl\n'), ((1975, 1999), 'matplotlib.pyplot.plot', 'mpl.plot', (['[0]', '[0]', '"""k."""'], {}), "([0], [0], 'k.')\n", (1983, 1999), True, 'import matplotlib.pyplot as mpl\n'), ((2078, 2094), 'matplotlib.pyplot.xlabel', 'mpl.xlabel', (['"""x1"""'], {}), "('x1')\n", (2088, 2094), True, 'import matplotlib.pyplot as mpl\n'), ((2096, 2112), 'matplotlib.pyplot.ylabel', 'mpl.ylabel', (['"""x2"""'], {}), "('x2')\n", (2106, 2112), True, 'import matplotlib.pyplot as mpl\n'), ((2115, 2125), 'matplotlib.pyplot.show', 'mpl.show', ([], {}), '()\n', (2123, 2125), True, 'import matplotlib.pyplot as mpl\n'), ((2379, 2391), 'matplotlib.pyplot.figure', 'mpl.figure', ([], {}), '()\n', (2389, 2391), True, 'import matplotlib.pyplot as mpl\n'), ((2393, 2402), 'matplotlib.pyplot.clf', 'mpl.clf', ([], {}), '()\n', (2400, 2402), True, 'import matplotlib.pyplot as mpl\n'), ((2405, 2429), 'matplotlib.pyplot.axis', 'mpl.axis', (['[-1, 1, -1, 1]'], {}), '([-1, 1, -1, 1])\n', (2413, 2429), True, 'import matplotlib.pyplot as mpl\n'), ((2751, 2765), 'matplotlib.pyplot.hold', 'mpl.hold', (['(True)'], {}), '(True)\n', (2759, 2765), True, 'import matplotlib.pyplot as mpl\n'), ((2767, 2791), 'matplotlib.pyplot.plot', 'mpl.plot', (['[0]', '[0]', '"""k."""'], {}), "([0], [0], 'k.')\n", (2775, 2791), True, 'import matplotlib.pyplot as mpl\n'), ((2860, 2883), 'matplotlib.pyplot.xlabel', 'mpl.xlabel', (['"""{\\\\itx}_1"""'], {}), "('{\\\\itx}_1')\n", (2870, 2883), True, 'import matplotlib.pyplot as mpl\n'), ((2884, 2907), 'matplotlib.pyplot.ylabel', 'mpl.ylabel', (['"""{\\\\itx}_2"""'], {}), "('{\\\\itx}_2')\n", (2894, 2907), True, 'import matplotlib.pyplot as mpl\n'), ((2918, 2930), 'matplotlib.pyplot.figure', 'mpl.figure', ([], {}), '()\n', (2928, 2930), True, 'import matplotlib.pyplot as mpl\n'), ((2932, 2941), 'matplotlib.pyplot.clf', 'mpl.clf', ([], {}), '()\n', (2939, 2941), True, 'import matplotlib.pyplot as mpl\n'), ((2943, 2967), 'matplotlib.pyplot.axis', 'mpl.axis', (['[-1, 1, -1, 1]'], {}), '([-1, 1, -1, 1])\n', (2951, 2967), True, 'import matplotlib.pyplot as mpl\n'), ((3507, 3521), 'matplotlib.pyplot.hold', 'mpl.hold', (['(True)'], {}), '(True)\n', (3515, 3521), True, 'import matplotlib.pyplot as mpl\n'), ((3523, 3547), 'matplotlib.pyplot.plot', 'mpl.plot', (['[0]', '[0]', '"""k."""'], {}), "([0], [0], 'k.')\n", (3531, 3547), True, 'import matplotlib.pyplot as mpl\n'), ((3616, 3639), 'matplotlib.pyplot.xlabel', 'mpl.xlabel', (['"""{\\\\itx}_1"""'], {}), "('{\\\\itx}_1')\n", (3626, 3639), True, 'import matplotlib.pyplot as mpl\n'), ((3640, 3663), 'matplotlib.pyplot.ylabel', 'mpl.ylabel', (['"""{\\\\itx}_2"""'], {}), "('{\\\\itx}_2')\n", (3650, 3663), True, 'import matplotlib.pyplot as mpl\n'), ((3716, 3728), 'matplotlib.pyplot.figure', 'mpl.figure', ([], {}), '()\n', (3726, 3728), True, 'import matplotlib.pyplot as mpl\n'), ((3730, 3739), 'matplotlib.pyplot.clf', 'mpl.clf', ([], {}), '()\n', (3737, 3739), True, 'import matplotlib.pyplot as mpl\n'), ((3741, 3765), 'matplotlib.pyplot.axis', 'mpl.axis', (['[-1, 1, -1, 1]'], {}), '([-1, 1, -1, 1])\n', (3749, 3765), True, 'import matplotlib.pyplot as mpl\n'), ((4058, 4072), 'matplotlib.pyplot.hold', 'mpl.hold', (['(True)'], {}), '(True)\n', (4066, 4072), True, 'import matplotlib.pyplot as mpl\n'), ((4074, 4098), 'matplotlib.pyplot.plot', 'mpl.plot', (['[0]', '[0]', '"""k."""'], {}), "([0], [0], 'k.')\n", (4082, 4098), True, 'import matplotlib.pyplot as mpl\n'), ((4166, 4189), 'matplotlib.pyplot.xlabel', 'mpl.xlabel', (['"""{\\\\itx}_1"""'], {}), "('{\\\\itx}_1')\n", (4176, 4189), True, 'import matplotlib.pyplot as mpl\n'), ((4190, 4213), 'matplotlib.pyplot.ylabel', 'mpl.ylabel', (['"""{\\\\itx}_2"""'], {}), "('{\\\\itx}_2')\n", (4200, 4213), True, 'import matplotlib.pyplot as mpl\n'), ((4215, 4225), 'matplotlib.pyplot.show', 'mpl.show', ([], {}), '()\n', (4223, 4225), True, 'import matplotlib.pyplot as mpl\n'), ((948, 971), 'numpy.linspace', 'np.linspace', (['(0)', '(40)', '(200)'], {}), '(0, 40, 200)\n', (959, 971), True, 'import numpy as np\n'), ((1907, 1928), 'numpy.linspace', 'np.linspace', (['(0)', '(8)', '(80)'], {}), '(0, 8, 80)\n', (1918, 1928), True, 'import numpy as np\n'), ((2677, 2700), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(100)'], {}), '(0, 10, 100)\n', (2688, 2700), True, 'import numpy as np\n'), ((3483, 3504), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', '(20)'], {}), '(0, 2, 20)\n', (3494, 3504), True, 'import numpy as np\n'), ((4013, 4036), 'numpy.linspace', 'np.linspace', (['(0)', '(20)', '(200)'], {}), '(0, 20, 200)\n', (4024, 4036), True, 'import numpy as np\n'), ((541, 553), 'numpy.sin', 'np.sin', (['x[0]'], {}), '(x[0])\n', (547, 553), True, 'import numpy as np\n')]
|
import os.path as path
import csv
class Filesystem(object):
#This method stores the passed data to the default video id's file
def __init__(self, filename, open_mode):
self.open_mode = open_mode
self.file = filename
#This method stores txt on the specified file with a specified EOL character.
def save_default( self, txt, EOL="" ):
save_path = path.dirname(path.abspath(__file__))
name_of_file = self.file
completeName = path.join(save_path, name_of_file+".txt")
file1 = open(completeName, self.open_mode)
file1.write(txt+""+EOL+"\n")
file1.close()
def print_path(self):
save_path = path.dirname(path.abspath(__file__))
completeName = path.join(save_path, self.file+".txt")
print(completeName)
def file_to_list(self):
#list_of_lists = []
save_path = path.dirname(path.abspath(__file__))
completeName = path.join(save_path, self.file+".txt")
file1 = open(completeName, self.open_mode)
#list_of_lists = file1.splitlines()
list_of_songs = [line.split('\n') for line in file1.readlines()]
file1.close()
return list_of_songs
def make_ids_table(self, file_name, array):
save_path = path.dirname(path.abspath(__file__))
completeName = path.join(save_path, self.file+".txt")
file1 = open(completeName, self.open_mode)
csvWriter = csv.writer(file1,delimiter=',')
csvWriter.writerows(array)
file1.close()
def get_songs_list(self):
songs = list()
with open(self.file, self.open_mode) as my_csv:
song_list = csv.reader(my_csv)
for row in song_list:
songs.append(row)
return songs
def read_csv_file_to_list(self, file_path, file_name, open_mode):
song_lyrics_list = {'mood':[], 'title':[], 'artist':[], 'lyric':[], 'youtube_id':[]}
completeName = path.join(file_path, file_name+".csv")
with open(completeName,open_mode) as my_csv:
reader_list = csv.reader(my_csv)
for row in reader_list:
######
"""
songs structure
[0] uselss id
[1] artist
[2] lyrics
[3] mood
[4] title
"""
######
song_lyrics_list['mood'].append(str(row[3].strip()))
song_lyrics_list['title'].append(str(row[4].strip()))
song_lyrics_list['artist'].append(str(row[1].strip()))
song_lyrics_list['lyric'].append(row[2].strip())
song_lyrics_list['youtube_id'].append("")
return song_lyrics_list
|
[
"csv.reader",
"os.path.abspath",
"os.path.join",
"csv.writer"
] |
[((438, 481), 'os.path.join', 'path.join', (['save_path', "(name_of_file + '.txt')"], {}), "(save_path, name_of_file + '.txt')\n", (447, 481), True, 'import os.path as path\n'), ((664, 704), 'os.path.join', 'path.join', (['save_path', "(self.file + '.txt')"], {}), "(save_path, self.file + '.txt')\n", (673, 704), True, 'import os.path as path\n'), ((842, 882), 'os.path.join', 'path.join', (['save_path', "(self.file + '.txt')"], {}), "(save_path, self.file + '.txt')\n", (851, 882), True, 'import os.path as path\n'), ((1184, 1224), 'os.path.join', 'path.join', (['save_path', "(self.file + '.txt')"], {}), "(save_path, self.file + '.txt')\n", (1193, 1224), True, 'import os.path as path\n'), ((1288, 1320), 'csv.writer', 'csv.writer', (['file1'], {'delimiter': '""","""'}), "(file1, delimiter=',')\n", (1298, 1320), False, 'import csv\n'), ((1740, 1780), 'os.path.join', 'path.join', (['file_path', "(file_name + '.csv')"], {}), "(file_path, file_name + '.csv')\n", (1749, 1780), True, 'import os.path as path\n'), ((370, 392), 'os.path.abspath', 'path.abspath', (['__file__'], {}), '(__file__)\n', (382, 392), True, 'import os.path as path\n'), ((623, 645), 'os.path.abspath', 'path.abspath', (['__file__'], {}), '(__file__)\n', (635, 645), True, 'import os.path as path\n'), ((801, 823), 'os.path.abspath', 'path.abspath', (['__file__'], {}), '(__file__)\n', (813, 823), True, 'import os.path as path\n'), ((1143, 1165), 'os.path.abspath', 'path.abspath', (['__file__'], {}), '(__file__)\n', (1155, 1165), True, 'import os.path as path\n'), ((1478, 1496), 'csv.reader', 'csv.reader', (['my_csv'], {}), '(my_csv)\n', (1488, 1496), False, 'import csv\n'), ((1843, 1861), 'csv.reader', 'csv.reader', (['my_csv'], {}), '(my_csv)\n', (1853, 1861), False, 'import csv\n')]
|
# Generated by Django 2.0.8 on 2019-01-20 09:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0013_auto_20180902_1533'),
]
operations = [
migrations.AddField(
model_name='product',
name='abstract',
field=models.TextField(blank=True, max_length=200, verbose_name='abstract'),
),
migrations.AddField(
model_name='product',
name='abstract_en',
field=models.TextField(blank=True, max_length=200, null=True, verbose_name='abstract'),
),
migrations.AddField(
model_name='product',
name='abstract_it',
field=models.TextField(blank=True, max_length=200, null=True, verbose_name='abstract'),
),
]
|
[
"django.db.models.TextField"
] |
[((338, 407), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'max_length': '(200)', 'verbose_name': '"""abstract"""'}), "(blank=True, max_length=200, verbose_name='abstract')\n", (354, 407), False, 'from django.db import migrations, models\n'), ((533, 618), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'max_length': '(200)', 'null': '(True)', 'verbose_name': '"""abstract"""'}), "(blank=True, max_length=200, null=True, verbose_name='abstract'\n )\n", (549, 618), False, 'from django.db import migrations, models\n'), ((739, 824), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'max_length': '(200)', 'null': '(True)', 'verbose_name': '"""abstract"""'}), "(blank=True, max_length=200, null=True, verbose_name='abstract'\n )\n", (755, 824), False, 'from django.db import migrations, models\n')]
|
import logging
DEBUG=True
try:
from .local import *
except ImportError:
logging.warning("No local settings defined. Using Defaults")
|
[
"logging.warning"
] |
[((82, 142), 'logging.warning', 'logging.warning', (['"""No local settings defined. Using Defaults"""'], {}), "('No local settings defined. Using Defaults')\n", (97, 142), False, 'import logging\n')]
|
# Copyright 2020, 2021 <NAME> <<EMAIL>>
# Copyright 2020, 2021 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .import_ket import _import_globals_ket
from . import *
from .ket import label, branch, jump, build_info
from .preprocessor import *
def __ket__():
import argparse
from os import path, getcwd
parser = argparse.ArgumentParser(prog='ket', description='Ket interpreter')
parser.add_argument('--version', action='version', version=f'Ket {build_info()}')
parser.add_argument('-o', '--out', help='KQASM output file', type=str)
parser.add_argument('-s', '--kbw', help='quantum execution (KBW) IP address', type=str, default='127.0.0.1')
parser.add_argument('-u', '--user', help='quantum execution (KBW) SSH user', type=str)
parser.add_argument('-p', '--port', help='quantum execution (KBW) port', type=str, default='4242')
parser.add_argument('-P', '--ssh-port', help='quantum execution (KBW) SSH port', type=str, default='22')
parser.add_argument('--seed', help='set RNG seed for quantum execution', type=int)
parser.add_argument('--api-args', help='additional parameters for quantum execution', type=str)
parser.add_argument('--no-execute', help='does not execute KQASM, measurements return 0', action='store_false')
parser.add_argument('--dump2fs', help='use the filesystem to transfer dump data', action='store_true')
parser.add_argument('input', metavar='.ket', help='source code', type=str)
args = parser.parse_args()
ket_args = {
"server" : args.kbw,
"port" : args.port,
"execute" : args.no_execute,
"dump2fs" : args.dump2fs,
"ssh_port" : args.ssh_port,
}
if args.user:
ket_args["user"] = args.user
if args.out:
ket_args["kqasm"] = args.out
if args.seed:
ket_args["seed"] = args.seed
if args.api_args:
ket_args["api-args"] = args.api_args
ket_config(**ket_args)
globals()['__name__'] = '__main__'
globals()['__in_ket__'] = True
source = path.join(getcwd(), args.input)
_import_globals_ket(source, globals())
if __name__ == '__main__':
__ket__()
|
[
"os.getcwd",
"argparse.ArgumentParser"
] |
[((860, 926), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""ket"""', 'description': '"""Ket interpreter"""'}), "(prog='ket', description='Ket interpreter')\n", (883, 926), False, 'import argparse\n'), ((2785, 2793), 'os.getcwd', 'getcwd', ([], {}), '()\n', (2791, 2793), False, 'from os import path, getcwd\n')]
|
#!/usr/bin/env python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple application that performs a query with BigQuery."""
# [START bigquery_simple_app_all]
# [START bigquery_simple_app_deps]
from google.cloud import bigquery
# [END bigquery_simple_app_deps]
def query_stackoverflow() -> None:
# [START bigquery_simple_app_client]
client = bigquery.Client()
# [END bigquery_simple_app_client]
# [START bigquery_simple_app_query]
query_job = client.query(
"""
SELECT
CONCAT(
'https://stackoverflow.com/questions/',
CAST(id as STRING)) as url,
view_count
FROM `bigquery-public-data.stackoverflow.posts_questions`
WHERE tags like '%google-bigquery%'
ORDER BY view_count DESC
LIMIT 10"""
)
results = query_job.result() # Waits for job to complete.
# [END bigquery_simple_app_query]
# [START bigquery_simple_app_print]
for row in results:
print("{} : {} views".format(row.url, row.view_count))
# [END bigquery_simple_app_print]
if __name__ == "__main__":
query_stackoverflow()
# [END bigquery_simple_app_all]
|
[
"google.cloud.bigquery.Client"
] |
[((910, 927), 'google.cloud.bigquery.Client', 'bigquery.Client', ([], {}), '()\n', (925, 927), False, 'from google.cloud import bigquery\n')]
|
import socket
import sys
import threading
import time
import traceback
import os
import re
import hashlib
MAX_HEADER = 4096
RECV_SIZE = 512
ban_list = [
b'jwes.hit.edu.cn'
] # 网站过滤列表
change_list = {
b'www.hit.edu.cn': b'studyathit.hit.edu.cn'
} # 网站引导字典
user_list = [
'127.0.0.1'
] # 用户过滤列表
c = {}
def getHeader(string, name): # 从请求头中提取关键字
decode = string.decode('UTF-8')
header = re.compile(name+r'.*', re.IGNORECASE)
match = header.search(decode)
if match:
head = match.group()
replace = re.compile(r'\r')
head = replace.sub('', head)
return head.encode('UTF-8')
else:
return None
def transHost(raw_host): # 将请求头中的host和port分隔开
header = raw_host.decode('UTF-8', 'ignore')
groups = header.split(":")
host = groups[1].encode('UTF-8')
if len(groups) > 2:
port = int(groups[2])
else:
port = 80
return host, port
def splitHeader(string): # 获取请求头
return string.split(b'\r\n\r\n')[0]
def recvBody(conn, base, size): # 接收剩余的数据内容
if size == -1: # 没有写明长度,按照报告中的方法确定结尾
while(base[-7:] != b'\r\n0\r\n\r\n'):
base += conn.recv(RECV_SIZE)
else:
while len(base) < size: # 如果写明了长度,读取到规定的长度
base += conn.recv(RECV_SIZE)
return base
def checkCache(cache, url): # 检查该url是否被缓存
hl = hashlib.md5()
hl.update(url)
url = hl.hexdigest()
if cache.__contains__(url):
return True
else:
return False
def writeCache(cache, url, timestamp, body): # 将缓存写入文件夹并且在字典中添加md5编码和时间
hl = hashlib.md5()
hl.update(url)
url = hl.hexdigest()
cache[url] = timestamp
file = open('计算机网络\HTTP\dict.txt', 'a')
file.write(url+'::'+timestamp+'\n')
file.close()
file = open('计算机网络\HTTP\cache\\'+url, 'wb')
file.write(body)
file.close()
def loadbody(cache, url): # 从文件夹中读取缓存的内容
hl = hashlib.md5()
hl.update(url)
url = hl.hexdigest()
for entry in os.listdir('计算机网络\HTTP\cache'):
if(entry == url):
file = open('计算机网络\HTTP\cache\\'+entry, 'rb')
return file.read()
def thread_proxy(client, addr, cache, banlist, changelist, userlist): # 代理线程
thread_name = threading.currentThread().name
# 监测是否ban IP地址
if userlist != None:
if userlist.count(addr[0]) != 0:
print("%sThis client is banned!" % (thread_name))
client.close()
return
# 尝试接受客户端发送的requset
try:
request = client.recv(MAX_HEADER)
except: # 如果超时输出错误信息
print("%sTime out!" % (thread_name))
client.close()
return
# 获得初始的host
raw_host = getHeader(request, "Host").replace(b' ', b'')
url = getHeader(request, 'get').split(b' ')[1]
if not raw_host: # 如果提取不到host输出错误信息
print("%sHost request error%s" % (thread_name, str(addr)))
client.close()
return
host, port = transHost(raw_host)
print("%sGET:%s:%s" % (thread_name, url, str(port)))
# 钓鱼
if changelist != None:
if changelist.__contains__(host):
host = changelist[host] # 修改host
print("%sHost has change to %s" % (thread_name, host))
# 禁止访问的host
if banlist != None:
if banlist.count(host) != 0:
print("%sThis host is banned" % (thread_name))
client.close()
return
# 建立到服务器的连接
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.settimeout(10)
try:
server.connect((host, port))
except socket.timeout: # 如果超时输出错误信息
print("%sTime out!" % (thread_name))
server.close()
client.close()
return
# 检查缓存
if checkCache(cache, url):
# 修改request 监测是否变化
url_md5 = hashlib.md5()
url_md5.update(url)
url_md5 = url_md5.hexdigest()
modify = '\r\nIf-Modified-Since:'+cache[url_md5]+'\r\n\r\n'
newrequest = request
newrequest = newrequest.replace(
b'\r\n\r\n', modify.encode('UTF-8')) # 修改request
server.sendall(newrequest)
response = server.recv(MAX_HEADER)
responseHeader = splitHeader(response)
flag = getHeader(responseHeader, 'HTTP/1.1').split(b' ')[1]
if flag == b'304': # 如果返回了304,直接读取缓存,然后结束
print("%sCache hit!!" % (thread_name))
response = loadbody(cache, url)
client.sendall(response)
server.close()
client.close()
return
# 未命中发送未修改的request
server.sendall(request)
response = server.recv(RECV_SIZE)
responseHeader = splitHeader(response)
if len(responseHeader) < len(response) - 4: # 如果响应头长度和接收长度不同,说明没有接受完全部数据
content_size = getHeader(responseHeader, 'content-length')
if content_size:
size = int(content_size.split(b':')[1]) + 4 + len(responseHeader)
else:
size = -1
response = recvBody(server, response, size)
client.sendall(response) # 转发数据
# 写入缓存
time = getHeader(responseHeader, 'Last-Modified')
if time != None:
# 如果含有Last-Modified说明可被缓存
time = time.split(b': ')[1].decode('UTF-8')
writeCache(cache, url, time, response)
server.close()
client.close()
def thread_server(myserver):
while True:
conn, addr = myserver.accept()
conn.settimeout(10)
thread_p = threading.Thread(target=thread_proxy, args=(
conn, addr, c, None, change_list, None))
thread_p.setDaemon(True)
thread_p.start()
def main(port=8000):
try:
myserver = socket.socket()
myserver.bind(('127.0.0.1', port))
myserver.listen(1024)
thread_s = threading.Thread(target=thread_server, args=(myserver,))
thread_s.setDaemon(True)
thread_s.start()
while True:
time.sleep(1000)
except KeyboardInterrupt:
print("sys exit")
finally:
myserver.close()
def loadCache(): # 从文件中建立起字典
file = open('计算机网络\HTTP\dict.txt', 'r')
line = file.readline()
while line:
line = line.split('::')
c[line[0]] = line[1][:-1]
line = file.readline()
# 命令入口
if __name__ == '__main__':
try:
loadCache()
print("Start proxy...")
main()
except Exception as e:
print("error exit")
traceback.print_exc()
finally:
print("end server")
sys.exit(0)
|
[
"threading.Thread",
"hashlib.md5",
"traceback.print_exc",
"time.split",
"socket.socket",
"time.sleep",
"threading.currentThread",
"os.listdir",
"sys.exit",
"re.compile"
] |
[((412, 450), 're.compile', 're.compile', (["(name + '.*')", 're.IGNORECASE'], {}), "(name + '.*', re.IGNORECASE)\n", (422, 450), False, 'import re\n'), ((1360, 1373), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (1371, 1373), False, 'import hashlib\n'), ((1585, 1598), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (1596, 1598), False, 'import hashlib\n'), ((1910, 1923), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (1921, 1923), False, 'import hashlib\n'), ((1985, 2017), 'os.listdir', 'os.listdir', (['"""计算机网络\\\\HTTP\\\\cache"""'], {}), "('计算机网络\\\\HTTP\\\\cache')\n", (1995, 2017), False, 'import os\n'), ((3411, 3460), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (3424, 3460), False, 'import socket\n'), ((6428, 6439), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (6436, 6439), False, 'import sys\n'), ((545, 562), 're.compile', 're.compile', (['"""\\\\r"""'], {}), "('\\\\r')\n", (555, 562), False, 'import re\n'), ((2230, 2255), 'threading.currentThread', 'threading.currentThread', ([], {}), '()\n', (2253, 2255), False, 'import threading\n'), ((3768, 3781), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (3779, 3781), False, 'import hashlib\n'), ((5397, 5485), 'threading.Thread', 'threading.Thread', ([], {'target': 'thread_proxy', 'args': '(conn, addr, c, None, change_list, None)'}), '(target=thread_proxy, args=(conn, addr, c, None,\n change_list, None))\n', (5413, 5485), False, 'import threading\n'), ((5604, 5619), 'socket.socket', 'socket.socket', ([], {}), '()\n', (5617, 5619), False, 'import socket\n'), ((5712, 5768), 'threading.Thread', 'threading.Thread', ([], {'target': 'thread_server', 'args': '(myserver,)'}), '(target=thread_server, args=(myserver,))\n', (5728, 5768), False, 'import threading\n'), ((5859, 5875), 'time.sleep', 'time.sleep', (['(1000)'], {}), '(1000)\n', (5869, 5875), False, 'import time\n'), ((6361, 6382), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (6380, 6382), False, 'import traceback\n'), ((5141, 5158), 'time.split', 'time.split', (["b': '"], {}), "(b': ')\n", (5151, 5158), False, 'import time\n')]
|
#!/usr/bin/python
# ----------------------------------------------------------------------------
# Copyright 2018 Intel
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import numpy as np
import os
import argparse
import psutil
import time
parser = argparse.ArgumentParser(
description="Benchmark 3D and 2D Convolution Models",add_help=True)
parser.add_argument("--dim_lengthx",
type = int,
default=16,
help="Tensor cube length of side x")
parser.add_argument("--dim_lengthy",
type = int,
default=16,
help="Tensor cube length of side y")
parser.add_argument("--dim_lengthz",
type = int,
default=16,
help="Tensor cube length of side z")
parser.add_argument("--num_channels",
type = int,
default=1,
help="Number of channels")
parser.add_argument("--num_outputs",
type = int,
default=1,
help="Number of outputs")
parser.add_argument("--bz",
type = int,
default=1,
help="Batch size")
parser.add_argument("--lr",
type = float,
default=0.001,
help="Learning rate")
parser.add_argument("--num_datapoints",
type = int,
default=1024,
help="Number of datapoints")
parser.add_argument("--epochs",
type = int,
default=3,
help="Number of epochs")
parser.add_argument("--intraop_threads",
type = int,
default=psutil.cpu_count(logical=False),
help="Number of intraop threads")
parser.add_argument("--interop_threads",
type = int,
default=2,
help="Number of interop threads")
parser.add_argument("--blocktime",
type = int,
default=0,
help="Block time for CPU threads")
parser.add_argument("--print_model",
action="store_true",
default=False,
help="Print the summary of the model layers")
parser.add_argument("--use_upsampling",
action="store_true",
default=False,
help="Use upsampling instead of transposed convolution")
parser.add_argument("--D2",
action="store_true",
default=False,
help="Use 2D model and images instead of 3D.")
parser.add_argument("--single_class_output",
action="store_true",
default=False,
help="Use binary classifier instead of U-Net")
parser.add_argument("--mkl_verbose",
action="store_true",
default=False,
help="Print MKL debug statements.")
parser.add_argument("--trace",
action="store_true",
default=False,
help="Create trace of TensorFlow timeline")
parser.add_argument("--inference",
action="store_true",
default=False,
help="Test inference speed. Default=Test training speed")
args = parser.parse_args()
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" # Get rid of the AVX, SSE warnings
if args.mkl_verbose:
os.environ["MKL_VERBOSE"] = "1" # Print out messages from MKL operations
os.environ["MKLDNN_VERBOSE"] = "1" # Print out messages from MKL-DNN operations
os.environ["OMP_NUM_THREADS"] = str(args.intraop_threads)
os.environ["KMP_BLOCKTIME"] = str(args.blocktime)
os.environ["KMP_AFFINITY"] = "granularity=thread,compact,1,0"
import tensorflow as tf
from model import *
from tqdm import tqdm
import datetime
print("Started script on {}".format(datetime.datetime.now()))
print("args = {}".format(args))
print("OS: {}".format(os.system("uname -a")))
print("TensorFlow version: {}".format(tf.__version__))
import keras as K
print("Keras API version: {}".format(K.__version__))
if args.D2: # Define shape of the tensors (2D)
dims = (1,2)
tensor_shape = [args.bz,
args.dim_lengthx,
args.dim_lengthy,
args.num_channels]
out_shape = [args.bz,
args.dim_lengthx,
args.dim_lengthy,
args.num_outputs]
else: # Define shape of the tensors (3D)
dims=(1,2,3)
tensor_shape = [args.bz,
args.dim_lengthx,
args.dim_lengthy,
args.dim_lengthz,
args.num_channels]
tensor_shape = [args.bz,
args.dim_lengthx,
args.dim_lengthy,
args.dim_lengthz,
args.num_outputs]
# Optimize CPU threads for TensorFlow
config = tf.ConfigProto(
inter_op_parallelism_threads=args.interop_threads,
intra_op_parallelism_threads=args.intraop_threads)
sess = tf.Session(config=config)
K.backend.set_session(sess)
global_step = tf.Variable(0, name="global_step", trainable=False)
# Define the shape of the input images
# For segmentation models, the label (mask) is the same shape.
img = tf.placeholder(tf.float32, shape=tensor_shape) # Input tensor
if args.single_class_output:
truth = tf.placeholder(tf.float32, shape=(args.bz,args.num_outputs)) # Label tensor
else:
truth = tf.placeholder(tf.float32, shape=tensor_shape) # Label tensor
# Define the model
# Predict the output mask
if not args.inference:
# Set keras learning phase to train
K.backend.set_learning_phase(True)
# Don"t initialize variables on the fly
K.backend.manual_variable_initialization(False)
if args.single_class_output:
if args.D2: # 2D convnet model
predictions = conv2D(img,
print_summary=args.print_model, n_out=args.num_outputs)
else: # 3D convet model
predictions = conv3D(img,
print_summary=args.print_model, n_out=args.num_outputs)
else:
if args.D2: # 2D U-Net model
predictions = unet2D(img,
use_upsampling=args.use_upsampling,
print_summary=args.print_model, n_out=args.num_outputs)
else: # 3D U-Net model
predictions = unet3D(img,
use_upsampling=args.use_upsampling,
print_summary=args.print_model, n_out=args.num_outputs)
# Performance metrics for model
if args.single_class_output:
loss = tf.losses.sigmoid_cross_entropy(truth, predictions)
metric_score = tf.metrics.mean_squared_error(truth, predictions)
else:
loss = dice_coef_loss(truth, predictions, dims) # Loss is the dice between mask and prediction
metric_score = dice_coef(truth, predictions, dims)
train_op = tf.train.AdamOptimizer(args.lr).minimize(loss, global_step=global_step)
# Just feed completely random data in for the benchmark testing
imgs = np.random.rand(*tensor_shape)
if args.single_class_output:
truths = np.random.rand(args.bz, args.num_outputs)
else:
truths = np.random.rand(*tensor_shape)
# Initialize all variables
init_op = tf.global_variables_initializer()
init_l = tf.local_variables_initializer() # For TensorFlow metrics
sess.run(init_op)
sess.run(init_l)
saver = tf.train.Saver()
save_path = saver.save(sess, "./saved_model/model.ckpt")
print("Model saved in path: %s" % save_path)
# Freeze graph if inference
if args.inference:
K.backend.set_learning_phase(False)
# Set up trace for operations
run_metadata = tf.RunMetadata()
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
# Same number of sample to process regardless of batch size
# So if we have a larger batch size we can take fewer steps.
total_steps = args.num_datapoints//args.bz
print("Using random data.")
if args.inference:
print("Testing inference speed.")
else:
print("Testing training speed.")
start_time = time.time()
for epoch in tqdm(range(args.epochs), desc="Epoch #"):
for i in tqdm(range(total_steps), desc="Step #"):
if args.inference:
feed_dict = {img: imgs}
else:
feed_dict = {img: imgs, truth:truths}
if args.inference:
if args.trace:
history = sess.run([predictions], feed_dict=feed_dict,
options=run_options, run_metadata=run_metadata)
else:
history = sess.run([predictions], feed_dict=feed_dict)
else:
if args.trace:
history, loss_v, metric_v, this_step = \
sess.run([train_op, loss, metric_score, global_step],
feed_dict=feed_dict,
options=run_options, run_metadata=run_metadata)
else:
history, loss_v, metric_v, this_step = \
sess.run([train_op, loss, metric_score, global_step],
feed_dict=feed_dict)
stop_time = time.time()
print("\n\nTotal time = {:,.3f} seconds".format(stop_time - start_time))
print("Total images = {:,}".format(args.epochs*args.num_datapoints))
print("Speed = {:,.3f} images per second".format( \
(args.epochs*args.num_datapoints)/(stop_time - start_time)))
if args.trace:
"""
Save the training timeline
"""
from tensorflow.python.client import timeline
timeline_filename = "./timeline_trace.json"
fetched_timeline = timeline.Timeline(run_metadata.step_stats)
chrome_trace = fetched_timeline.generate_chrome_trace_format()
with open(timeline_filename, "w") as f:
print("Saved Tensorflow trace to: {}".format(timeline_filename))
print("To view the trace:\n(1) Open Chrome browser.\n"
"(2) Go to this url -- chrome://tracing\n"
"(3) Click the load button.\n"
"(4) Load the file {}.".format(timeline_filename))
f.write(chrome_trace)
print("Stopped script on {}".format(datetime.datetime.now()))
|
[
"argparse.ArgumentParser",
"tensorflow.local_variables_initializer",
"tensorflow.ConfigProto",
"tensorflow.Variable",
"tensorflow.RunOptions",
"psutil.cpu_count",
"tensorflow.placeholder",
"datetime.datetime.now",
"tensorflow.train.Saver",
"tensorflow.global_variables_initializer",
"keras.backend.set_session",
"tensorflow.Session",
"os.system",
"tensorflow.losses.sigmoid_cross_entropy",
"keras.backend.manual_variable_initialization",
"time.time",
"tensorflow.python.client.timeline.Timeline",
"keras.backend.set_learning_phase",
"tensorflow.RunMetadata",
"tensorflow.metrics.mean_squared_error",
"numpy.random.rand",
"tensorflow.train.AdamOptimizer"
] |
[((827, 924), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Benchmark 3D and 2D Convolution Models"""', 'add_help': '(True)'}), "(description=\n 'Benchmark 3D and 2D Convolution Models', add_help=True)\n", (850, 924), False, 'import argparse\n'), ((4769, 4889), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'inter_op_parallelism_threads': 'args.interop_threads', 'intra_op_parallelism_threads': 'args.intraop_threads'}), '(inter_op_parallelism_threads=args.interop_threads,\n intra_op_parallelism_threads=args.intraop_threads)\n', (4783, 4889), True, 'import tensorflow as tf\n'), ((4899, 4924), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (4909, 4924), True, 'import tensorflow as tf\n'), ((4925, 4952), 'keras.backend.set_session', 'K.backend.set_session', (['sess'], {}), '(sess)\n', (4946, 4952), True, 'import keras as K\n'), ((4969, 5020), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'name': '"""global_step"""', 'trainable': '(False)'}), "(0, name='global_step', trainable=False)\n", (4980, 5020), True, 'import tensorflow as tf\n'), ((5130, 5176), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'tensor_shape'}), '(tf.float32, shape=tensor_shape)\n', (5144, 5176), True, 'import tensorflow as tf\n'), ((6731, 6760), 'numpy.random.rand', 'np.random.rand', (['*tensor_shape'], {}), '(*tensor_shape)\n', (6745, 6760), True, 'import numpy as np\n'), ((6927, 6960), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (6958, 6960), True, 'import tensorflow as tf\n'), ((6970, 7002), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (7000, 7002), True, 'import tensorflow as tf\n'), ((7073, 7089), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (7087, 7089), True, 'import tensorflow as tf\n'), ((7323, 7339), 'tensorflow.RunMetadata', 'tf.RunMetadata', ([], {}), '()\n', (7337, 7339), True, 'import tensorflow as tf\n'), ((7354, 7405), 'tensorflow.RunOptions', 'tf.RunOptions', ([], {'trace_level': 'tf.RunOptions.FULL_TRACE'}), '(trace_level=tf.RunOptions.FULL_TRACE)\n', (7367, 7405), True, 'import tensorflow as tf\n'), ((7709, 7720), 'time.time', 'time.time', ([], {}), '()\n', (7718, 7720), False, 'import time\n'), ((8513, 8524), 'time.time', 'time.time', ([], {}), '()\n', (8522, 8524), False, 'import time\n'), ((5231, 5292), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(args.bz, args.num_outputs)'}), '(tf.float32, shape=(args.bz, args.num_outputs))\n', (5245, 5292), True, 'import tensorflow as tf\n'), ((5322, 5368), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'tensor_shape'}), '(tf.float32, shape=tensor_shape)\n', (5336, 5368), True, 'import tensorflow as tf\n'), ((5492, 5526), 'keras.backend.set_learning_phase', 'K.backend.set_learning_phase', (['(True)'], {}), '(True)\n', (5520, 5526), True, 'import keras as K\n'), ((5569, 5616), 'keras.backend.manual_variable_initialization', 'K.backend.manual_variable_initialization', (['(False)'], {}), '(False)\n', (5609, 5616), True, 'import keras as K\n'), ((6302, 6353), 'tensorflow.losses.sigmoid_cross_entropy', 'tf.losses.sigmoid_cross_entropy', (['truth', 'predictions'], {}), '(truth, predictions)\n', (6333, 6353), True, 'import tensorflow as tf\n'), ((6370, 6419), 'tensorflow.metrics.mean_squared_error', 'tf.metrics.mean_squared_error', (['truth', 'predictions'], {}), '(truth, predictions)\n', (6399, 6419), True, 'import tensorflow as tf\n'), ((6801, 6842), 'numpy.random.rand', 'np.random.rand', (['args.bz', 'args.num_outputs'], {}), '(args.bz, args.num_outputs)\n', (6815, 6842), True, 'import numpy as np\n'), ((6859, 6888), 'numpy.random.rand', 'np.random.rand', (['*tensor_shape'], {}), '(*tensor_shape)\n', (6873, 6888), True, 'import numpy as np\n'), ((7241, 7276), 'keras.backend.set_learning_phase', 'K.backend.set_learning_phase', (['(False)'], {}), '(False)\n', (7269, 7276), True, 'import keras as K\n'), ((8951, 8993), 'tensorflow.python.client.timeline.Timeline', 'timeline.Timeline', (['run_metadata.step_stats'], {}), '(run_metadata.step_stats)\n', (8968, 8993), False, 'from tensorflow.python.client import timeline\n'), ((2124, 2155), 'psutil.cpu_count', 'psutil.cpu_count', ([], {'logical': '(False)'}), '(logical=False)\n', (2140, 2155), False, 'import psutil\n'), ((3938, 3961), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3959, 3961), False, 'import datetime\n'), ((4019, 4040), 'os.system', 'os.system', (['"""uname -a"""'], {}), "('uname -a')\n", (4028, 4040), False, 'import os\n'), ((6587, 6618), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['args.lr'], {}), '(args.lr)\n', (6609, 6618), True, 'import tensorflow as tf\n'), ((9415, 9438), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (9436, 9438), False, 'import datetime\n')]
|
import bpy
import bgl
from logging import getLogger
logger = getLogger(__name__)
translation = bpy.app.translations.pgettext
def capture_under_cursor(buffer, mouse_x=0, mouse_y=0, type_flg="i") -> list:
"""
フラットなrgba(float)のlistを返す
"""
# GL_FLOATでバッファ作って読むと馬鹿みたいに重いのでGL_BYTE,GL_UNSIGNED_BYTEになってる
bgl.glReadBuffer(bgl.GL_FRONT)
bgl.glReadPixels(
mouse_x,
mouse_y,
1,
1,
bgl.GL_RGBA,
bgl.GL_UNSIGNED_BYTE,
buffer,
)
if type_flg == "i":
return [value for value in buffer]
elif type_flg == "f":
return [value / 255 for value in buffer]
def bytes_to_color_code(color: list) -> str:
"""RGBAのイテラブルを投げるとカラーコードを返してくれる"""
c = color
return f"#{c[0]:x}{c[1]:x}{c[2]:x}{c[3]:x}"
def create_buffer(src_width: int = 1, src_height: int = 1):
buffer = bgl.Buffer(bgl.GL_BYTE, src_width * src_height * 4)
return buffer
class TEMPLATE_OT_CaptureColor(bpy.types.Operator):
"""カーソル下の色を取得するやつ"""
bl_idname = "template.capture_color"
bl_label = translation("my operator")
bl_description = "operator description"
bl_options = {"REGISTER", "UNDO"}
buffer = create_buffer()
# イベントを受け取りたいときはexecuteの代わりにinvokeが使える
def invoke(self, context, event):
color = capture_under_cursor(self.buffer, event.mouse_x, event.mouse_y, "f")
context.tool_settings.gpencil_paint.brush.color = color[:3]
# brushes = [b for b in bpy.data.brushes]
# for b in brushes:
# b.color = (color[:3])
# logging
logger.debug(color)
# infoにメッセージを通知
self.report({"INFO"}, f"{color}")
# 正常終了ステータスを返す
return {"FINISHED"}
class TEMPLATE_PT_CursorColor(bpy.types.Panel):
bl_label = "CursorColor"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
def draw(self, context):
layout = self.layout
layout.operator(TEMPLATE_OT_CaptureColor.bl_idname)
classses = [TEMPLATE_OT_CaptureColor, TEMPLATE_PT_CursorColor]
tools = []
def register():
for c in classses:
bpy.utils.register_class(c)
for t in tools:
bpy.utils.register_tool(t)
def unregister():
for c in classses:
bpy.utils.unregister_class(c)
for t in tools:
bpy.utils.unregister_tool(t)
|
[
"bpy.utils.register_tool",
"logging.getLogger",
"bgl.glReadBuffer",
"bpy.utils.unregister_class",
"bgl.glReadPixels",
"bpy.utils.unregister_tool",
"bgl.Buffer",
"bpy.utils.register_class"
] |
[((62, 81), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (71, 81), False, 'from logging import getLogger\n'), ((321, 351), 'bgl.glReadBuffer', 'bgl.glReadBuffer', (['bgl.GL_FRONT'], {}), '(bgl.GL_FRONT)\n', (337, 351), False, 'import bgl\n'), ((356, 443), 'bgl.glReadPixels', 'bgl.glReadPixels', (['mouse_x', 'mouse_y', '(1)', '(1)', 'bgl.GL_RGBA', 'bgl.GL_UNSIGNED_BYTE', 'buffer'], {}), '(mouse_x, mouse_y, 1, 1, bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE,\n buffer)\n', (372, 443), False, 'import bgl\n'), ((868, 919), 'bgl.Buffer', 'bgl.Buffer', (['bgl.GL_BYTE', '(src_width * src_height * 4)'], {}), '(bgl.GL_BYTE, src_width * src_height * 4)\n', (878, 919), False, 'import bgl\n'), ((2104, 2131), 'bpy.utils.register_class', 'bpy.utils.register_class', (['c'], {}), '(c)\n', (2128, 2131), False, 'import bpy\n'), ((2160, 2186), 'bpy.utils.register_tool', 'bpy.utils.register_tool', (['t'], {}), '(t)\n', (2183, 2186), False, 'import bpy\n'), ((2238, 2267), 'bpy.utils.unregister_class', 'bpy.utils.unregister_class', (['c'], {}), '(c)\n', (2264, 2267), False, 'import bpy\n'), ((2296, 2324), 'bpy.utils.unregister_tool', 'bpy.utils.unregister_tool', (['t'], {}), '(t)\n', (2321, 2324), False, 'import bpy\n')]
|
import importlib
name = 'test_module'
# method 1
module = importlib.import_module('test_module')
# method 2
module = __import__(name, fromlist=[''])
module.some_func()
|
[
"importlib.import_module"
] |
[((59, 97), 'importlib.import_module', 'importlib.import_module', (['"""test_module"""'], {}), "('test_module')\n", (82, 97), False, 'import importlib\n')]
|
from setuptools import setup
setup(name="Feel the streets")
|
[
"setuptools.setup"
] |
[((32, 62), 'setuptools.setup', 'setup', ([], {'name': '"""Feel the streets"""'}), "(name='Feel the streets')\n", (37, 62), False, 'from setuptools import setup\n')]
|
"""
Base forms for editing the models in this module. You can use or extend these forms in your
project to ensure that all validation is correct.
"""
from django import forms
from rdflib_django import models
from rdflib import namespace
class NamespaceForm(forms.ModelForm):
"""
Form for editing namespaces.
"""
class Meta:
model = models.NamespaceModel
fields = ('prefix', 'uri')
def __init__(self, *args, **kwargs):
super(NamespaceForm, self).__init__(*args, **kwargs)
if self.instance.fixed:
self.fields['prefix'].widget.attrs['readonly'] = True
self.fields['uri'].widget.attrs['readonly'] = True
def clean_prefix(self):
"""
Validates the prefix
"""
if self.instance.fixed:
return self.instance.prefix
prefix = self.cleaned_data['prefix']
if not namespace.is_ncname(prefix):
raise forms.ValidationError("This is an invalid prefix")
return prefix
def clean_uri(self):
"""
Validates the URI
"""
if self.instance.fixed:
return self.instance.uri
uri = self.cleaned_data['uri']
# todo: URI validation
return uri
|
[
"django.forms.ValidationError",
"rdflib.namespace.is_ncname"
] |
[((896, 923), 'rdflib.namespace.is_ncname', 'namespace.is_ncname', (['prefix'], {}), '(prefix)\n', (915, 923), False, 'from rdflib import namespace\n'), ((943, 993), 'django.forms.ValidationError', 'forms.ValidationError', (['"""This is an invalid prefix"""'], {}), "('This is an invalid prefix')\n", (964, 993), False, 'from django import forms\n')]
|
from codecs import open
from setuptools import setup
import re
with open('src/agstoolbox/__init__.py', 'r') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
setup(
name='agstoolbox',
version=version,
description='A Toolbox for managing AGS Editor versions.',
url='https://github.com/ericoporto/agstoolbox',
download_url='https://github.com/ericoporto/agstoolbox/tarball/' + version,
author='erico',
author_email='<EMAIL>',
license='MIT',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License'
],
keywords='AGS Toolbox',
install_requires=['pyqt6', 'requests', 'defusedxml', 'platformdirs', 'pefile'],
packages=["agstoolbox"],
package_dir={"": "src"},
scripts=["agstoolbox", "atbx"],
package_data={
'agstoolbox': ['data/*.png']
},
)
|
[
"codecs.open",
"setuptools.setup"
] |
[((243, 886), 'setuptools.setup', 'setup', ([], {'name': '"""agstoolbox"""', 'version': 'version', 'description': '"""A Toolbox for managing AGS Editor versions."""', 'url': '"""https://github.com/ericoporto/agstoolbox"""', 'download_url': "('https://github.com/ericoporto/agstoolbox/tarball/' + version)", 'author': '"""erico"""', 'author_email': '"""<EMAIL>"""', 'license': '"""MIT"""', 'classifiers': "['Intended Audience :: Developers', 'License :: OSI Approved :: MIT License']", 'keywords': '"""AGS Toolbox"""', 'install_requires': "['pyqt6', 'requests', 'defusedxml', 'platformdirs', 'pefile']", 'packages': "['agstoolbox']", 'package_dir': "{'': 'src'}", 'scripts': "['agstoolbox', 'atbx']", 'package_data': "{'agstoolbox': ['data/*.png']}"}), "(name='agstoolbox', version=version, description=\n 'A Toolbox for managing AGS Editor versions.', url=\n 'https://github.com/ericoporto/agstoolbox', download_url=\n 'https://github.com/ericoporto/agstoolbox/tarball/' + version, author=\n 'erico', author_email='<EMAIL>', license='MIT', classifiers=[\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License'], keywords='AGS Toolbox',\n install_requires=['pyqt6', 'requests', 'defusedxml', 'platformdirs',\n 'pefile'], packages=['agstoolbox'], package_dir={'': 'src'}, scripts=[\n 'agstoolbox', 'atbx'], package_data={'agstoolbox': ['data/*.png']})\n", (248, 886), False, 'from setuptools import setup\n'), ((70, 109), 'codecs.open', 'open', (['"""src/agstoolbox/__init__.py"""', '"""r"""'], {}), "('src/agstoolbox/__init__.py', 'r')\n", (74, 109), False, 'from codecs import open\n')]
|
"""Script to program a bitstream to a chip"""
import argparse
from nirram import NIRRAM
# Get arguments
parser = argparse.ArgumentParser(description="Program a bitstream to a chip.")
parser.add_argument("chipname", help="chip name for logging")
parser.add_argument("bitstream", help="bitstream file name")
# Expect to receive two arg numbers when specifying a LRS (or HRS) range
parser.add_argument("--lrs-range", nargs='+', type=float, default=[9e3, 11e3], help="target LRS")
parser.add_argument("--hrs-range", nargs='+', type=float, default=[100e3, 1e9], help="target HRS")
parser.add_argument("--start-addr", type=int, default=0, help="start addr")
parser.add_argument("--end-addr", type=int, default=65536, help="end addr")
parser.add_argument("--step-addr", type=int, default=1, help="addr step")
parser.add_argument("--iterations", type=int, default=3, help="number of programming iterations")
args = parser.parse_args()
# Initialize NI system
nisys = NIRRAM(args.chipname)
# Read bitstream
bitstream = open(args.bitstream).readlines()
# Do operation across cells
for i in range(args.iterations):
for addr, bit in zip(range(args.start_addr, args.end_addr, args.step_addr), bitstream):
nisys.set_addr(addr)
bit = int(bit.strip())
if bit == 0: # bit 0: LRS
target = nisys.target(args.lrs_range[0], args.lrs_range[1])
if bit == 1: # bit 1: HRS
target = nisys.target(args.hrs_range[0], args.hrs_range[1])
print(f"Iteration {i}, Address {addr}: {target}")
# Shutdown
nisys.close()
|
[
"nirram.NIRRAM",
"argparse.ArgumentParser"
] |
[((114, 183), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Program a bitstream to a chip."""'}), "(description='Program a bitstream to a chip.')\n", (137, 183), False, 'import argparse\n'), ((962, 983), 'nirram.NIRRAM', 'NIRRAM', (['args.chipname'], {}), '(args.chipname)\n', (968, 983), False, 'from nirram import NIRRAM\n')]
|
"""Build the qbsolv package."""
from setuptools import setup
from setuptools.extension import Extension
from setuptools.command.build_ext import build_ext
import os
cwd = os.path.abspath(os.path.dirname(__file__))
if not os.path.exists(os.path.join(cwd, 'PKG-INFO')):
try:
from Cython.Build import cythonize
USE_CYTHON = True
except ImportError:
USE_CYTHON = False
else:
USE_CYTHON = False
extra_compile_args = {
'msvc': [],
'unix': ['-std=c++11', '-Ofast', '-Wall', '-Wextra'],
# 'unix': ['-std=c++1y','-w','-O0', '-g', '-fipa-pure-const'],
}
extra_link_args = {
'msvc': [],
'unix': [],
}
class build_ext_compiler_check(build_ext):
def build_extensions(self):
compiler = self.compiler.compiler_type
compile_args = extra_compile_args[compiler]
for ext in self.extensions:
ext.extra_compile_args = compile_args
link_args = extra_compile_args[compiler]
for ext in self.extensions:
ext.extra_compile_args = link_args
build_ext.build_extensions(self)
ext = '.pyx' if USE_CYTHON else '.cpp'
extensions = [Extension('dwave_qbsolv.qbsolv_binding',
['python/dwave_qbsolv/qbsolv_binding' + ext,
'./python/globals.cc',
'./src/solver.cc',
'./src/dwsolv.cc',
'./src/util.cc'],
include_dirs=['./python', './src', './include', './cmd']
)]
if USE_CYTHON:
extensions = cythonize(extensions, language='c++')
packages = ['dwave_qbsolv']
classifiers = [
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
]
python_requires = '>=3.5'
setup(
name='dwave-qbsolv',
version='0.3.2',
packages=packages,
package_dir={'dwave_qbsolv': 'python/dwave_qbsolv'},
install_requires=['dimod>=0.8.1,<0.10.0'],
ext_modules=extensions,
cmdclass={'build_ext': build_ext_compiler_check},
long_description=open('README.rst').read(),
classifiers=classifiers,
license='Apache 2.0',
python_requires=python_requires
)
|
[
"Cython.Build.cythonize",
"os.path.dirname",
"setuptools.extension.Extension",
"setuptools.command.build_ext.build_ext.build_extensions",
"os.path.join"
] |
[((189, 214), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (204, 214), False, 'import os\n'), ((1145, 1381), 'setuptools.extension.Extension', 'Extension', (['"""dwave_qbsolv.qbsolv_binding"""', "['python/dwave_qbsolv/qbsolv_binding' + ext, './python/globals.cc',\n './src/solver.cc', './src/dwsolv.cc', './src/util.cc']"], {'include_dirs': "['./python', './src', './include', './cmd']"}), "('dwave_qbsolv.qbsolv_binding', [\n 'python/dwave_qbsolv/qbsolv_binding' + ext, './python/globals.cc',\n './src/solver.cc', './src/dwsolv.cc', './src/util.cc'], include_dirs=[\n './python', './src', './include', './cmd'])\n", (1154, 1381), False, 'from setuptools.extension import Extension\n'), ((1575, 1612), 'Cython.Build.cythonize', 'cythonize', (['extensions'], {'language': '"""c++"""'}), "(extensions, language='c++')\n", (1584, 1612), False, 'from Cython.Build import cythonize\n'), ((238, 267), 'os.path.join', 'os.path.join', (['cwd', '"""PKG-INFO"""'], {}), "(cwd, 'PKG-INFO')\n", (250, 267), False, 'import os\n'), ((1056, 1088), 'setuptools.command.build_ext.build_ext.build_extensions', 'build_ext.build_extensions', (['self'], {}), '(self)\n', (1082, 1088), False, 'from setuptools.command.build_ext import build_ext\n')]
|
import itertools
import os
import vlcp.service.sdn.ofpportmanager as ofpportmanager
import vlcp.service.kvdb.objectdb as objectdb
import vlcp.service.sdn.ioprocessing as iop
from vlcp.service.sdn.flowbase import FlowBase
from vlcp.server.module import depend, call_api
from vlcp.config.config import defaultconfig
from vlcp.event.runnable import RoutineContainer
from vlcp.service.sdn.ofpmanager import FlowInitialize
from vlcp.utils.ethernet import mac_addr_bytes, ip4_addr_bytes,ip4_icmp_payload,\
ethernet_l7, ip4_packet_l7, ip4_payload,ICMP_ECHOREPLY,icmp_bestparse,icmp_echo,\
ip_frag
from vlcp.utils.flowupdater import FlowUpdater
from vlcp.protocol.openflow.openflow import OpenflowConnectionStateEvent, OpenflowAsyncMessageEvent
from vlcp.utils.networkmodel import SubNet,RouterPort
from namedstruct.stdprim import uint16
from vlcp.event.event import M_
class ICMPResponderUpdater(FlowUpdater):
def __init__(self,connection,parent):
super(ICMPResponderUpdater,self).__init__(connection,(),('icmpresponderupdate',connection),parent._logger)
self.parent = parent
self._lastlognets = ()
self._lastlogports = ()
self._lastsubnetsinfo = dict()
self._orig_initialkeys = ()
async def main(self):
try:
self.subroutine(self._update_handler(),True,"update_handler_routine")
# use controller to reply icmp ping ,so start routine handler packet in
if not self.parent.prepush:
self.subroutine(self._icmp_packetin_handler(),True,"icmp_packetin_handler_routine")
await FlowUpdater.main(self)
finally:
if hasattr(self,"update_handler_routine"):
self.update_handler_routine.close()
if hasattr(self,"icmp_packetin_handler_routine"):
self.icmp_packetin_handler_routine.close()
async def _icmp_packetin_handler(self):
conn = self._connection
ofdef = self._connection.openflowdef
l3input = self.parent._gettableindex("l3input",self._connection.protocol.vhost)
transactid = uint16.create(os.urandom(2))
async def send_packet_out(portid,packet):
await self.execute_commands(conn,
[
ofdef.ofp_packet_out(
buffer_id = ofdef.OFP_NO_BUFFER,
in_port = ofdef.OFPP_CONTROLLER,
actions = [
ofdef.ofp_action_output(port = portid,
max_len = ofdef.OFPCML_NO_BUFFER
)
],
data = packet._tobytes()
)
])
icmp_packetin_matcher = OpenflowAsyncMessageEvent.createMatcher(ofdef.OFPT_PACKET_IN,None,None,l3input,2,
self._connection,self._connection.connmark)
while True:
ev = await icmp_packetin_matcher
msg = ev.message
inport = ofdef.ofp_port_no.create(ofdef.get_oxm(msg.match.oxm_fields,ofdef.OXM_OF_IN_PORT))
# it must be icmp packet ...
icmp_packet = ethernet_l7.create(msg.data)
if ip_frag(icmp_packet) != 0:
# ignore fragmented packets
continue
transactid = (transactid + 1) & 0xffff
reply_packet = ip4_packet_l7((ip4_payload,ip4_icmp_payload),
(icmp_bestparse, icmp_echo),
dl_src = icmp_packet.dl_dst,
dl_dst = icmp_packet.dl_src,
ip_src = icmp_packet.ip_dst,
ip_dst = icmp_packet.ip_src,
frag_off = 0,
ttl = 128,
identifier = transactid,
icmp_type = ICMP_ECHOREPLY,
icmp_code = icmp_packet.icmp_code,
icmp_id = icmp_packet.icmp_id,
icmp_seq = icmp_packet.icmp_seq,
data = icmp_packet.data
)
self.subroutine(send_packet_out(inport,reply_packet))
async def _update_handler(self):
# when lgport,lgnet,phyport,phynet object change , receive this event from ioprocessing module
dataobjectchange = iop.DataObjectChanged.createMatcher(None,None,self._connection)
while True:
ev = await dataobjectchange
# save to instance attr , us in other method
self._lastlogports,_,self._lastlognets,_ = ev.current
self._update_walk()
def _walk_lgport(self,key,value,walk,save):
if value is not None:
save(key)
if hasattr(value,'subnet'):
try:
subnetobj = walk(value.subnet.getkey())
except KeyError:
pass
else:
save(value.subnet.getkey())
if subnetobj is not None and hasattr(subnetobj,"router"):
try:
_ = walk(subnetobj.router.getkey())
except KeyError:
pass
else:
save(subnetobj.router.getkey())
def _walk_lgnet(self,key,value,walk,save):
save(key)
# if value is None, also save its key
# means watch key, when created , we will recv event
def _update_walk(self):
lgportkeys = [p.getkey() for p,_ in self._lastlogports]
lgnetkeys = [p.getkey() for p,_ in self._lastlognets]
self._initialkeys = lgportkeys + lgnetkeys
self._orig_initialkeys = lgportkeys + lgnetkeys
self._walkerdict = dict(itertools.chain(((p,self._walk_lgport) for p in lgportkeys),
((n,self._walk_lgnet) for n in lgnetkeys)))
self.subroutine(self.restart_walk(),False)
def reset_initialkeys(self,keys,values):
# walk map logicalport --> subnet ---> routerport
# we get subnet object, add keys to initialkeys,
# when subnet update, it will restart walk ,, after we will get new routerport
subnetkeys = [k for k,v in zip(keys,values) if v is not None and not v.isdeleted() and
v.isinstance(SubNet)]
self._initialkeys = tuple(itertools.chain(self._orig_initialkeys,subnetkeys))
async def updateflow(self, connection, addvalues, removevalues, updatedvalues):
try:
allobjects = set(o for o in self._savedresult if o is not None and not o.isdeleted())
lastsubnetsinfo = self._lastsubnetsinfo
currentlognetsinfo = dict((n,id) for n,id in self._lastlognets if n in allobjects)
currentrouterportsinfo = dict((o.subnet,o) for o in allobjects
if o.isinstance(RouterPort))
currentsubnetsinfo = dict((o,(getattr(currentrouterportsinfo[o],"ip_address",getattr(o,"gateway",None)),
self.parent.inroutermac,o.network.id,currentlognetsinfo[o.network]))
for o in allobjects if o.isinstance(SubNet)
and hasattr(o,"router") and o in currentrouterportsinfo
and o.network in currentlognetsinfo
and (hasattr(currentrouterportsinfo[o],"ip_address")
or hasattr(o,"gateway"))
and ( not hasattr(o,"isexternal") or o.isexternal == False))
self._lastsubnetsinfo = currentsubnetsinfo
ofdef = connection.openflowdef
vhost = connection.protocol.vhost
l3input = self.parent._gettableindex("l3input",vhost)
cmds = []
if connection.protocol.disablenxext:
def match_network(nid):
return ofdef.create_oxm(ofdef.OXM_OF_METADATA_W, (nid & 0xffff) << 32,
b'\x00\x00\xff\xff\x00\x00\x00\x00')
else:
def match_network(nid):
return ofdef.create_oxm(ofdef.NXM_NX_REG4, nid)
# prepush or not ,, it is same , so ..
def _deleteicmpflows(ipaddress, macaddress, networkid):
return [
ofdef.ofp_flow_mod(
cookie = 0x2,
cookie_mask = 0xffffffffffffffff,
table_id = l3input,
command = ofdef.OFPFC_DELETE,
priority = ofdef.OFP_DEFAULT_PRIORITY + 1,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(
oxm_fields = [
ofdef.create_oxm(ofdef.NXM_NX_REG4,networkid),
ofdef.create_oxm(ofdef.OXM_OF_ETH_DST,mac_addr_bytes(macaddress)),
ofdef.create_oxm(ofdef.OXM_OF_ETH_TYPE,ofdef.ETHERTYPE_IP),
ofdef.create_oxm(ofdef.OXM_OF_IPV4_DST,ip4_addr_bytes(ipaddress)),
ofdef.create_oxm(ofdef.OXM_OF_IP_PROTO,ofdef.IPPROTO_ICMP),
ofdef.create_oxm(ofdef.OXM_OF_ICMPV4_TYPE,8),
ofdef.create_oxm(ofdef.OXM_OF_ICMPV4_CODE,0)
]
)
)
]
if not self.parent.prepush:
def _createicmpflows(ipaddress, macaddress, networkid):
return [
ofdef.ofp_flow_mod(
cookie = 0x2,
cookie_mask = 0xffffffffffffffff,
table_id = l3input,
command = ofdef.OFPFC_ADD,
# icmp to router matcher same as ip forward to router
# so priority + 1
priority = ofdef.OFP_DEFAULT_PRIORITY + 1,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(
oxm_fields = [
match_network(networkid),
ofdef.create_oxm(ofdef.OXM_OF_ETH_DST,mac_addr_bytes(macaddress)),
ofdef.create_oxm(ofdef.OXM_OF_ETH_TYPE,ofdef.ETHERTYPE_IP),
ofdef.create_oxm(ofdef.OXM_OF_IPV4_DST,ip4_addr_bytes(ipaddress)),
ofdef.create_oxm(ofdef.OXM_OF_IP_PROTO,ofdef.IPPROTO_ICMP),
ofdef.create_oxm(ofdef.OXM_OF_ICMPV4_TYPE,8),
ofdef.create_oxm(ofdef.OXM_OF_ICMPV4_CODE,0)
]
),
instructions = [
ofdef.ofp_instruction_actions(
actions = [
ofdef.ofp_action_output(
port = ofdef.OFPP_CONTROLLER,
max_len = ofdef.OFPCML_NO_BUFFER
)
]
)
]
)
]
else:
def _createicmpflows(ipaddress, macaddress, networkid):
return [
ofdef.ofp_flow_mod(
cookie = 0x2,
cookie_mask = 0xffffffffffffffff,
table_id = l3input,
command = ofdef.OFPFC_ADD,
# icmp to router matcher same as ip forward to router
# so priority + 1
priority = ofdef.OFP_DEFAULT_PRIORITY + 1,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(
oxm_fields = [
match_network(networkid),
ofdef.create_oxm(ofdef.OXM_OF_ETH_DST,mac_addr_bytes(macaddress)),
ofdef.create_oxm(ofdef.OXM_OF_ETH_TYPE,ofdef.ETHERTYPE_IP),
ofdef.create_oxm(ofdef.OXM_OF_IPV4_DST,ip4_addr_bytes(ipaddress)),
ofdef.create_oxm(ofdef.OXM_OF_IP_PROTO,ofdef.IPPROTO_ICMP),
ofdef.create_oxm(ofdef.OXM_OF_ICMPV4_TYPE,8),
ofdef.create_oxm(ofdef.OXM_OF_ICMPV4_CODE,0)
]
),
instructions = [
ofdef.ofp_instruction_actions(
actions = [
ofdef.nx_action_reg_move(
n_bits = 48,
src = ofdef.OXM_OF_ETH_SRC,
dst = ofdef.OXM_OF_ETH_DST
),
ofdef.ofp_action_set_field(
field = ofdef.create_oxm(
ofdef.OXM_OF_ETH_SRC,
ofdef.mac_addr(macaddress)
)
),
ofdef.nx_action_reg_move(
n_bits = 32,
src = ofdef.OXM_OF_IPV4_SRC,
dst = ofdef.OXM_OF_IPV4_DST
),
ofdef.ofp_action_set_field(
field = ofdef.create_oxm(
ofdef.OXM_OF_IPV4_SRC,
ofdef.ip4_addr(ipaddress)
)
),
ofdef.ofp_action_set_field(
field = ofdef.create_oxm(
ofdef.OXM_OF_ICMPV4_TYPE,
ICMP_ECHOREPLY
)
),
ofdef.ofp_action_nw_ttl(
nw_ttl = 128
),
ofdef.ofp_action_output(
port = ofdef.OFPP_IN_PORT
)
]
)
]
)
]
for subnet in lastsubnetsinfo.keys():
if subnet not in currentsubnetsinfo\
or (subnet in currentsubnetsinfo and lastsubnetsinfo[subnet] != currentsubnetsinfo[subnet]):
# subnet remove or subnet info changed , remove flow info
ip_address, mac_address, networkid, nid = lastsubnetsinfo[subnet]
remove_arp = {(ip_address,mac_address,networkid,True),}
await call_api(self, 'arpresponder', 'removeproxyarp', {'connection':connection,
'arpentries': remove_arp})
cmds.extend(_deleteicmpflows(ip_address,mac_address,nid))
await self.execute_commands(connection, cmds)
for subnet in currentsubnetsinfo.keys():
if subnet not in lastsubnetsinfo\
or (subnet in lastsubnetsinfo and lastsubnetsinfo[subnet] != currentsubnetsinfo[subnet]):
ip_address, mac_address, networkid, nid = currentsubnetsinfo[subnet]
add_arp = {(ip_address,mac_address,networkid,True),}
await call_api(self, 'arpresponder', 'createproxyarp', {'connection': connection,
'arpentries': add_arp})
cmds.extend(_createicmpflows(ip_address,mac_address,nid))
await self.execute_commands(connection, cmds)
except Exception:
self._logger.warning("Unexpected exception in icmp_flow_updater, ignore it! Continue",exc_info=True)
@defaultconfig
@depend(ofpportmanager.OpenflowPortManager,objectdb.ObjectDB)
class ICMPResponder(FlowBase):
"""
Respond ICMP echo (ping) requests to the gateway
"""
_tablerequest = (
("l3input",("l2input",),""),
("l2output",("l3input",),"")
)
# True : reply icmp ping with flow
# False: reply icmp ping with controller PACKET_IN/PACKET_OUT
#
# Must use prepush=True with OpenvSwitch 2.5+
#
_default_prepush = False
# "Gateway" responds with this MAC address
_default_inroutermac = '1a:23:67:59:63:33'
def __init__(self,server):
super(ICMPResponder,self).__init__(server)
self.app_routine = RoutineContainer(self.scheduler)
self.app_routine.main = self._main
self.routines.append(self.app_routine)
self._flowupdater = dict()
async def _main(self):
flowinit = FlowInitialize.createMatcher(_ismatch=lambda x: self.vhostbind is None or
x.vhost in self.vhostbind)
conndown = OpenflowConnectionStateEvent.createMatcher(state = OpenflowConnectionStateEvent.CONNECTION_DOWN,
_ismatch=lambda x:self.vhostbind is None or
x.createby.vhost in self.vhostbind)
while True:
ev, m = await M_(flowinit,conndown)
if m is flowinit:
c = ev.connection
self.app_routine.subroutine(self._init_conn(c))
if m is conndown:
c = ev.connection
self.app_routine.subroutine(self._remove_conn(c))
async def _init_conn(self,conn):
if conn in self._flowupdater:
updater = self._flowupdater.pop(conn)
updater.close()
updater = ICMPResponderUpdater(conn,self)
self._flowupdater[conn] = updater
updater.start()
async def _remove_conn(self,conn):
if conn in self._flowupdater:
updater = self._flowupdater.pop(conn)
updater.close()
|
[
"vlcp.event.event.M_",
"vlcp.server.module.depend",
"vlcp.utils.ethernet.ip4_packet_l7",
"vlcp.utils.flowupdater.FlowUpdater.main",
"vlcp.utils.ethernet.mac_addr_bytes",
"vlcp.event.runnable.RoutineContainer",
"vlcp.protocol.openflow.openflow.OpenflowConnectionStateEvent.createMatcher",
"vlcp.utils.ethernet.ip4_addr_bytes",
"vlcp.protocol.openflow.openflow.OpenflowAsyncMessageEvent.createMatcher",
"vlcp.utils.ethernet.ip_frag",
"vlcp.utils.ethernet.ethernet_l7.create",
"vlcp.server.module.call_api",
"os.urandom",
"itertools.chain",
"vlcp.service.sdn.ofpmanager.FlowInitialize.createMatcher",
"vlcp.service.sdn.ioprocessing.DataObjectChanged.createMatcher"
] |
[((18060, 18121), 'vlcp.server.module.depend', 'depend', (['ofpportmanager.OpenflowPortManager', 'objectdb.ObjectDB'], {}), '(ofpportmanager.OpenflowPortManager, objectdb.ObjectDB)\n', (18066, 18121), False, 'from vlcp.server.module import depend, call_api\n'), ((2901, 3035), 'vlcp.protocol.openflow.openflow.OpenflowAsyncMessageEvent.createMatcher', 'OpenflowAsyncMessageEvent.createMatcher', (['ofdef.OFPT_PACKET_IN', 'None', 'None', 'l3input', '(2)', 'self._connection', 'self._connection.connmark'], {}), '(ofdef.OFPT_PACKET_IN, None, None,\n l3input, 2, self._connection, self._connection.connmark)\n', (2940, 3035), False, 'from vlcp.protocol.openflow.openflow import OpenflowConnectionStateEvent, OpenflowAsyncMessageEvent\n'), ((4804, 4869), 'vlcp.service.sdn.ioprocessing.DataObjectChanged.createMatcher', 'iop.DataObjectChanged.createMatcher', (['None', 'None', 'self._connection'], {}), '(None, None, self._connection)\n', (4839, 4869), True, 'import vlcp.service.sdn.ioprocessing as iop\n'), ((18724, 18756), 'vlcp.event.runnable.RoutineContainer', 'RoutineContainer', (['self.scheduler'], {}), '(self.scheduler)\n', (18740, 18756), False, 'from vlcp.event.runnable import RoutineContainer\n'), ((18930, 19035), 'vlcp.service.sdn.ofpmanager.FlowInitialize.createMatcher', 'FlowInitialize.createMatcher', ([], {'_ismatch': '(lambda x: self.vhostbind is None or x.vhost in self.vhostbind)'}), '(_ismatch=lambda x: self.vhostbind is None or x\n .vhost in self.vhostbind)\n', (18958, 19035), False, 'from vlcp.service.sdn.ofpmanager import FlowInitialize\n'), ((19098, 19283), 'vlcp.protocol.openflow.openflow.OpenflowConnectionStateEvent.createMatcher', 'OpenflowConnectionStateEvent.createMatcher', ([], {'state': 'OpenflowConnectionStateEvent.CONNECTION_DOWN', '_ismatch': '(lambda x: self.vhostbind is None or x.createby.vhost in self.vhostbind)'}), '(state=\n OpenflowConnectionStateEvent.CONNECTION_DOWN, _ismatch=lambda x: self.\n vhostbind is None or x.createby.vhost in self.vhostbind)\n', (19140, 19283), False, 'from vlcp.protocol.openflow.openflow import OpenflowConnectionStateEvent, OpenflowAsyncMessageEvent\n'), ((2126, 2139), 'os.urandom', 'os.urandom', (['(2)'], {}), '(2)\n', (2136, 2139), False, 'import os\n'), ((3354, 3382), 'vlcp.utils.ethernet.ethernet_l7.create', 'ethernet_l7.create', (['msg.data'], {}), '(msg.data)\n', (3372, 3382), False, 'from vlcp.utils.ethernet import mac_addr_bytes, ip4_addr_bytes, ip4_icmp_payload, ethernet_l7, ip4_packet_l7, ip4_payload, ICMP_ECHOREPLY, icmp_bestparse, icmp_echo, ip_frag\n'), ((3598, 3989), 'vlcp.utils.ethernet.ip4_packet_l7', 'ip4_packet_l7', (['(ip4_payload, ip4_icmp_payload)', '(icmp_bestparse, icmp_echo)'], {'dl_src': 'icmp_packet.dl_dst', 'dl_dst': 'icmp_packet.dl_src', 'ip_src': 'icmp_packet.ip_dst', 'ip_dst': 'icmp_packet.ip_src', 'frag_off': '(0)', 'ttl': '(128)', 'identifier': 'transactid', 'icmp_type': 'ICMP_ECHOREPLY', 'icmp_code': 'icmp_packet.icmp_code', 'icmp_id': 'icmp_packet.icmp_id', 'icmp_seq': 'icmp_packet.icmp_seq', 'data': 'icmp_packet.data'}), '((ip4_payload, ip4_icmp_payload), (icmp_bestparse, icmp_echo),\n dl_src=icmp_packet.dl_dst, dl_dst=icmp_packet.dl_src, ip_src=\n icmp_packet.ip_dst, ip_dst=icmp_packet.ip_src, frag_off=0, ttl=128,\n identifier=transactid, icmp_type=ICMP_ECHOREPLY, icmp_code=icmp_packet.\n icmp_code, icmp_id=icmp_packet.icmp_id, icmp_seq=icmp_packet.icmp_seq,\n data=icmp_packet.data)\n', (3611, 3989), False, 'from vlcp.utils.ethernet import mac_addr_bytes, ip4_addr_bytes, ip4_icmp_payload, ethernet_l7, ip4_packet_l7, ip4_payload, ICMP_ECHOREPLY, icmp_bestparse, icmp_echo, ip_frag\n'), ((6249, 6359), 'itertools.chain', 'itertools.chain', (['((p, self._walk_lgport) for p in lgportkeys)', '((n, self._walk_lgnet) for n in lgnetkeys)'], {}), '(((p, self._walk_lgport) for p in lgportkeys), ((n, self.\n _walk_lgnet) for n in lgnetkeys))\n', (6264, 6359), False, 'import itertools\n'), ((6899, 6950), 'itertools.chain', 'itertools.chain', (['self._orig_initialkeys', 'subnetkeys'], {}), '(self._orig_initialkeys, subnetkeys)\n', (6914, 6950), False, 'import itertools\n'), ((1603, 1625), 'vlcp.utils.flowupdater.FlowUpdater.main', 'FlowUpdater.main', (['self'], {}), '(self)\n', (1619, 1625), False, 'from vlcp.utils.flowupdater import FlowUpdater\n'), ((3411, 3431), 'vlcp.utils.ethernet.ip_frag', 'ip_frag', (['icmp_packet'], {}), '(icmp_packet)\n', (3418, 3431), False, 'from vlcp.utils.ethernet import mac_addr_bytes, ip4_addr_bytes, ip4_icmp_payload, ethernet_l7, ip4_packet_l7, ip4_payload, ICMP_ECHOREPLY, icmp_bestparse, icmp_echo, ip_frag\n'), ((19417, 19439), 'vlcp.event.event.M_', 'M_', (['flowinit', 'conndown'], {}), '(flowinit, conndown)\n', (19419, 19439), False, 'from vlcp.event.event import M_\n'), ((16860, 16966), 'vlcp.server.module.call_api', 'call_api', (['self', '"""arpresponder"""', '"""removeproxyarp"""', "{'connection': connection, 'arpentries': remove_arp}"], {}), "(self, 'arpresponder', 'removeproxyarp', {'connection': connection,\n 'arpentries': remove_arp})\n", (16868, 16966), False, 'from vlcp.server.module import depend, call_api\n'), ((17586, 17689), 'vlcp.server.module.call_api', 'call_api', (['self', '"""arpresponder"""', '"""createproxyarp"""', "{'connection': connection, 'arpentries': add_arp}"], {}), "(self, 'arpresponder', 'createproxyarp', {'connection': connection,\n 'arpentries': add_arp})\n", (17594, 17689), False, 'from vlcp.server.module import depend, call_api\n'), ((9743, 9769), 'vlcp.utils.ethernet.mac_addr_bytes', 'mac_addr_bytes', (['macaddress'], {}), '(macaddress)\n', (9757, 9769), False, 'from vlcp.utils.ethernet import mac_addr_bytes, ip4_addr_bytes, ip4_icmp_payload, ethernet_l7, ip4_packet_l7, ip4_payload, ICMP_ECHOREPLY, icmp_bestparse, icmp_echo, ip_frag\n'), ((9943, 9968), 'vlcp.utils.ethernet.ip4_addr_bytes', 'ip4_addr_bytes', (['ipaddress'], {}), '(ipaddress)\n', (9957, 9968), False, 'from vlcp.utils.ethernet import mac_addr_bytes, ip4_addr_bytes, ip4_icmp_payload, ethernet_l7, ip4_packet_l7, ip4_payload, ICMP_ECHOREPLY, icmp_bestparse, icmp_echo, ip_frag\n'), ((11346, 11372), 'vlcp.utils.ethernet.mac_addr_bytes', 'mac_addr_bytes', (['macaddress'], {}), '(macaddress)\n', (11360, 11372), False, 'from vlcp.utils.ethernet import mac_addr_bytes, ip4_addr_bytes, ip4_icmp_payload, ethernet_l7, ip4_packet_l7, ip4_payload, ICMP_ECHOREPLY, icmp_bestparse, icmp_echo, ip_frag\n'), ((11546, 11571), 'vlcp.utils.ethernet.ip4_addr_bytes', 'ip4_addr_bytes', (['ipaddress'], {}), '(ipaddress)\n', (11560, 11571), False, 'from vlcp.utils.ethernet import mac_addr_bytes, ip4_addr_bytes, ip4_icmp_payload, ethernet_l7, ip4_packet_l7, ip4_payload, ICMP_ECHOREPLY, icmp_bestparse, icmp_echo, ip_frag\n'), ((13443, 13469), 'vlcp.utils.ethernet.mac_addr_bytes', 'mac_addr_bytes', (['macaddress'], {}), '(macaddress)\n', (13457, 13469), False, 'from vlcp.utils.ethernet import mac_addr_bytes, ip4_addr_bytes, ip4_icmp_payload, ethernet_l7, ip4_packet_l7, ip4_payload, ICMP_ECHOREPLY, icmp_bestparse, icmp_echo, ip_frag\n'), ((13643, 13668), 'vlcp.utils.ethernet.ip4_addr_bytes', 'ip4_addr_bytes', (['ipaddress'], {}), '(ipaddress)\n', (13657, 13668), False, 'from vlcp.utils.ethernet import mac_addr_bytes, ip4_addr_bytes, ip4_icmp_payload, ethernet_l7, ip4_packet_l7, ip4_payload, ICMP_ECHOREPLY, icmp_bestparse, icmp_echo, ip_frag\n')]
|
import json
from django.contrib.auth.models import User
from ldap3 import Server, Connection
from kubeops_api.models.setting import Setting
from message_center.models import UserNotificationConfig, UserReceiver, Message
class LDAPSync:
def __init__(self):
self._conn = None
settings = Setting.get_settings(tab='ldap')
self.ldap_enable = settings.get("AUTH_LDAP_ENABLE", False)
if not self.ldap_enable:
return
self.bind_dn = settings.get("AUTH_LDAP_BIND_DN")
self.bind_password = settings.get("AUTH_LDAP_BIND_PASSWORD")
self.search_ou = settings.get("AUTH_LDAP_SEARCH_OU")
self.search_filter = settings.get("AUTH_LDAP_SEARCH_FILTER")
self.server_uri = settings.get("AUTH_LDAP_SERVER_URI")
self.attr_map = json.loads(settings.get("AUTH_LDAP_USER_ATTR_MAP"))
@property
def connection(self):
if self._conn:
return self._conn
server = Server(self.server_uri, use_ssl=False)
conn = Connection(server, self.bind_dn, self.bind_password)
conn.bind()
self._conn = conn
return self._conn
def search_users(self):
user_entries = list()
search_ous = str(self.search_ou).split('|')
for ou in search_ous:
self.search_user_entries_ou(search_ou=ou)
user_entries.extend(self.connection.entries)
return user_entries
def search_user_entries_ou(self, search_ou):
search_filter = self.search_filter % {'user': '*'}
attributes = list(self.attr_map.values())
self.connection.search(
search_base=search_ou, search_filter=search_filter,
attributes=attributes)
def user_entry_to_dict(self, entry):
user = {}
attr_map = self.attr_map.items()
for attr, mapping in attr_map:
if not hasattr(entry, mapping):
continue
value = getattr(entry, mapping).value or ''
user[attr] = value
return user
def user_entries_to_dict(self, user_entries):
users = []
for user_entry in user_entries:
user = self.user_entry_to_dict(user_entry)
users.append(user)
return users
def run(self):
user_entries = self.search_users()
user_dicts = self.user_entries_to_dict(user_entries)
for ud in user_dicts:
defaults = {
"username": ud.get("username", None).strip(),
"email": ud.get("email", None)
}
if not defaults["username"] or not defaults["email"]:
continue
User.objects.get_or_create(defaults, username=defaults.get("username"))
vars = {
"LOCAL": "ENABLE",
"EMAIL": "DISABLE",
"DINGTALK": "DISABLE",
"WORKWEIXIN": "DISABLE",
}
user = User.objects.get(username=defaults["username"])
UserNotificationConfig(vars=vars, user=user, type=Message.MESSAGE_TYPE_CLUSTER).save()
UserNotificationConfig(vars=vars, user=user, type=Message.MESSAGE_TYPE_SYSTEM).save()
vars2 = {
"EMAIL": user.email,
"DINGTALK": "",
"WORKWEIXIN": "",
}
UserReceiver(vars=vars2, user=user).save()
|
[
"ldap3.Connection",
"django.contrib.auth.models.User.objects.get",
"ldap3.Server",
"message_center.models.UserNotificationConfig",
"message_center.models.UserReceiver",
"kubeops_api.models.setting.Setting.get_settings"
] |
[((310, 342), 'kubeops_api.models.setting.Setting.get_settings', 'Setting.get_settings', ([], {'tab': '"""ldap"""'}), "(tab='ldap')\n", (330, 342), False, 'from kubeops_api.models.setting import Setting\n'), ((968, 1006), 'ldap3.Server', 'Server', (['self.server_uri'], {'use_ssl': '(False)'}), '(self.server_uri, use_ssl=False)\n', (974, 1006), False, 'from ldap3 import Server, Connection\n'), ((1022, 1074), 'ldap3.Connection', 'Connection', (['server', 'self.bind_dn', 'self.bind_password'], {}), '(server, self.bind_dn, self.bind_password)\n', (1032, 1074), False, 'from ldap3 import Server, Connection\n'), ((2932, 2979), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'username': "defaults['username']"}), "(username=defaults['username'])\n", (2948, 2979), False, 'from django.contrib.auth.models import User\n'), ((2992, 3071), 'message_center.models.UserNotificationConfig', 'UserNotificationConfig', ([], {'vars': 'vars', 'user': 'user', 'type': 'Message.MESSAGE_TYPE_CLUSTER'}), '(vars=vars, user=user, type=Message.MESSAGE_TYPE_CLUSTER)\n', (3014, 3071), False, 'from message_center.models import UserNotificationConfig, UserReceiver, Message\n'), ((3091, 3169), 'message_center.models.UserNotificationConfig', 'UserNotificationConfig', ([], {'vars': 'vars', 'user': 'user', 'type': 'Message.MESSAGE_TYPE_SYSTEM'}), '(vars=vars, user=user, type=Message.MESSAGE_TYPE_SYSTEM)\n', (3113, 3169), False, 'from message_center.models import UserNotificationConfig, UserReceiver, Message\n'), ((3328, 3363), 'message_center.models.UserReceiver', 'UserReceiver', ([], {'vars': 'vars2', 'user': 'user'}), '(vars=vars2, user=user)\n', (3340, 3363), False, 'from message_center.models import UserNotificationConfig, UserReceiver, Message\n')]
|
__doc__ = """
docs_helper.py
--------------
| Small script that makes it easier to work with intersphinx it takes config data from docs/conf.py
| :doc:`sphinx:usage/extensions/intersphinx`
:ref:`sphinx:xref-syntax`
| insted of: ``python -m sphinx.ext.intersphinx https://www.sphinx-doc.org/en/master/objects.inv``
| run ``python scripts/docs_helper.py.py`` and then pick the project you want.
Wraps also over `sphobjinv <https://github.com/bskinn/sphobjinv>`_ to make easy to search
"localhost" need to have a webserver with built html from sphinx,
have ``autobuild-html-docs`` running
.. todo::
have better support for finding port of autobuild-html-docs
use psutil?
"""
import sys
from pathlib import Path
import os
from urllib.parse import urljoin
import signal
def signal_SIGINT_handler(signal, frame):
"""This is to have a nicer printout on KeyboardInterrupt"""
print("\nGOT SIGINT(Probably KeyboardInterrupt), Quitting")
sys.exit(0)
def main():
project_path = Path(__file__).absolute().parents[1]
# we need this for import to work
sys.path.insert(0, str(project_path))
print("Intersphinx objects.inv printout")
print("intersphinx_mapping is from docs/conf.py")
from docs.conf import intersphinx_mapping
# add localhost to make easy to see self
intersphinx_mapping["localhost"] = ("http://127.0.0.1:8000/", None)
for i, doc in enumerate(intersphinx_mapping.keys()):
print(f"{i}) {doc}")
int_picker = int(input("Pick a number for docs: "))
picked_name = list(intersphinx_mapping.keys())[int_picker]
print(f"picked: {picked_name}\n")
type_picker = int(
input(
f"0) Print all from objects.inv from {picked_name}\n"
f"1) Search and suggest object\n"
f"Select mode: "
)
)
# the extra slash if it is missing in the config data
# and urljoin will fix the url for us
obj_inv_url = urljoin(intersphinx_mapping[picked_name][0] + "/", "objects.inv")
if type_picker:
print("--- sphobjinv ---")
search = input("Search: ")
cli = f"sphobjinv suggest {obj_inv_url} {search} -su"
else:
print("--- intersphinx ---")
cli = f"{sys.executable} -m sphinx.ext.intersphinx {obj_inv_url}"
os.system(cli)
# todo: Change this printout to use triple quotes insted
print(
"--- Note ---\n"
"Please note the output from this tools\n"
"need to be changed to work as a cross-references\n"
"Exemple:\n"
":std:label:`thing_to_link` -> :ref:`thing_to_link`\n"
"or\n"
":std:label:`thing_to_link` -> :ref:`project_name:thing_to_link`\n\n"
":py:function:`that_func` -> :py:func:`that_func`\n"
"or\n"
":py:function:`that_func` -> :py:func:`project_name:that_func`\n"
"--- Note ---\n"
)
print(
"--- Links ---\n"
"Link for intersphinx cross-referencing tags\n"
"https://www.sphinx-doc.org/en/master/usage/restructuredtext/roles.html#xref-syntax\n" # noqa b950
"--- Links ---\n"
)
print(f"CLI:\n{cli}")
if __name__ == "__main__":
# activate signal
signal.signal(signal.SIGINT, signal_SIGINT_handler)
main()
|
[
"urllib.parse.urljoin",
"os.system",
"docs.conf.intersphinx_mapping.keys",
"pathlib.Path",
"signal.signal",
"sys.exit"
] |
[((957, 968), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (965, 968), False, 'import sys\n'), ((1946, 2011), 'urllib.parse.urljoin', 'urljoin', (["(intersphinx_mapping[picked_name][0] + '/')", '"""objects.inv"""'], {}), "(intersphinx_mapping[picked_name][0] + '/', 'objects.inv')\n", (1953, 2011), False, 'from urllib.parse import urljoin\n'), ((2292, 2306), 'os.system', 'os.system', (['cli'], {}), '(cli)\n', (2301, 2306), False, 'import os\n'), ((3195, 3246), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'signal_SIGINT_handler'], {}), '(signal.SIGINT, signal_SIGINT_handler)\n', (3208, 3246), False, 'import signal\n'), ((1415, 1441), 'docs.conf.intersphinx_mapping.keys', 'intersphinx_mapping.keys', ([], {}), '()\n', (1439, 1441), False, 'from docs.conf import intersphinx_mapping\n'), ((1553, 1579), 'docs.conf.intersphinx_mapping.keys', 'intersphinx_mapping.keys', ([], {}), '()\n', (1577, 1579), False, 'from docs.conf import intersphinx_mapping\n'), ((1002, 1016), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1006, 1016), False, 'from pathlib import Path\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('staff', '0008_auto_20180412_1515'),
]
operations = [
migrations.AlterField(
model_name='staff',
name='photo',
field=models.ImageField(default=b'/static/images/default_user.png', upload_to=b'staffs'),
),
migrations.AlterUniqueTogether(
name='attendance',
unique_together=set([('attendance_date', 'team')]),
),
]
|
[
"django.db.models.ImageField"
] |
[((348, 435), 'django.db.models.ImageField', 'models.ImageField', ([], {'default': "b'/static/images/default_user.png'", 'upload_to': "b'staffs'"}), "(default=b'/static/images/default_user.png', upload_to=\n b'staffs')\n", (365, 435), False, 'from django.db import migrations, models\n')]
|
import tensorflow.contrib.slim as slim
from Projects.DeepLearningTechniques.ShakeNet.imagenet.constants import *
class Model:
def __init__(self, sess, width, height, channel, lr, dr, is_training, is_tb_logging, name):
self.sess = sess
self.width = width
self.height = height
self.channel = channel
self.lr = lr
self.dr = dr
self.is_training = is_training
self.is_tb_logging = is_tb_logging
self.name = name
self.weights_initializers = tf.contrib.layers.xavier_initializer(uniform=False)
self.weights_regularizers = tf.contrib.layers.l2_regularizer(scale=flags.FLAGS.l2_scale)
self.summary_values = []
self._build_graph()
def _build_graph(self):
with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):
with tf.variable_scope(name_or_scope='input_scope'):
self.x = tf.placeholder(dtype=tf.float32, shape=[None, self.height, self.width, self.channel], name='x')
self.y = tf.placeholder(dtype=tf.int64, shape=[None], name='y')
with tf.variable_scope(name_or_scope='body_scope'):
layer = self.conv2d(inputs=self.x, filters=32, kernel_size=3, strides=2, name='conv2d_0')
layer = self.batch_norm(inputs=layer, name='conv2d_0_batch')
layer = self.inverted_bottleneck(inputs=layer, filters=16, strides=1, repeat=1, factor=1, name='bottleneck_1')
layer = self.inverted_bottleneck(inputs=layer, filters=24, strides=2, repeat=2, factor=4, name='bottleneck_2')
layer = self.inverted_bottleneck(inputs=layer, filters=32, strides=2, repeat=3, factor=4, name='bottleneck_3')
layer = self.inverted_bottleneck(inputs=layer, filters=64, strides=2, repeat=4, factor=4, name='bottleneck_4')
layer = self.inverted_bottleneck(inputs=layer, filters=96, strides=1, repeat=1, factor=4, name='bottleneck_5')
layer = self.inverted_bottleneck(inputs=layer, filters=160, strides=2, repeat=3, factor=6, name='bottleneck_6')
layer = self.inverted_bottleneck(inputs=layer, filters=320, strides=1, repeat=1, factor=6, name='bottleneck_7')
if self.is_tb_logging:
self.summary_values.append(tf.summary.histogram('bottleneck_module', layer))
layer = self.conv2d(inputs=layer, filters=1280, name='conv2d_8')
layer = self.batch_norm(inputs=layer, name='conv2d_8_batch')
self.cam_layer = layer
layer = self.dropout(inputs=layer, rate=flags.FLAGS.dropout_rate, name='conv2d_8_dropout')
layer = tf.layers.average_pooling2d(inputs=layer, pool_size=7, strides=1, name='conv2d_8_avg_pool')
layer = self.conv2d(inputs=layer, filters=flags.FLAGS.image_class, name='conv2d_8_output')
self.logits = tf.squeeze(input=layer, axis=[1, 2], name='logits')
with tf.variable_scope(name_or_scope='output_scope'):
self.variables = [var for var in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if self.name in var.name]
self.prob = tf.nn.softmax(logits=self.logits, name='softmax')
self.loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y, name='ce_loss'))
self.loss = tf.add_n([self.loss] +
[var for var in tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) if self.name in var.name], name='tot_loss')
self.accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.logits, -1), self.y), dtype=tf.float32))
if self.is_tb_logging:
self.summary_values.append(tf.summary.scalar('loss', self.loss))
self.summary_values.append(tf.summary.scalar('accuracy', self.accuracy))
self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.lr)
update_opt = [var for var in tf.get_collection(tf.GraphKeys.UPDATE_OPS) if self.name in var.name]
with tf.control_dependencies(update_opt):
self.train_op = self.optimizer.minimize(self.loss, var_list=self.variables)
if self.is_tb_logging:
self.summary_merged_values = tf.summary.merge(inputs=self.summary_values)
def batch_norm(self, inputs, act=tf.nn.relu6, name='batch_norm_layer'):
'''
Batch Normalization
- scale=True, scale factor(gamma) 를 사용
- center=True, shift factor(beta) 를 사용
'''
with tf.variable_scope(name_or_scope=name):
return tf.contrib.layers.batch_norm(inputs=inputs, decay=0.9, center=True, scale=True, fused=True,
updates_collections=tf.GraphKeys.UPDATE_OPS, activation_fn=act,
is_training=self.is_training, scope='batch_norm')
def conv2d(self, inputs, filters, kernel_size=1, strides=1, padding='same', act=tf.identity, name='conv2d_layer'):
return tf.layers.conv2d(inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,
padding=padding, activation=act,
kernel_initializer=self.weights_initializers,
bias_initializer=self.weights_initializers,
kernel_regularizer=self.weights_regularizers,
bias_regularizer=self.weights_regularizers,
name=name)
def dropout(self, inputs, rate, name):
with tf.variable_scope(name_or_scope=name):
return tf.layers.dropout(inputs=inputs, rate=rate, training=self.is_training, name='dropout')
def depthwise_conv2d(self, inputs, kernel_size=3, strides=2, padding='SAME', depth_multiplier=1, name=None):
layer = slim.separable_conv2d(inputs=inputs, num_outputs=None, kernel_size=kernel_size, activation_fn=tf.identity,
weights_initializer=self.weights_initializers, weights_regularizer=self.weights_regularizers,
depth_multiplier=depth_multiplier, stride=strides, padding=padding, scope=name)
return layer
def inverted_bottleneck(self, inputs, filters, strides, repeat, factor, name=None):
def _mobilenet_block(inputs, input_filters, output_filters, strides, name):
with tf.variable_scope(name_or_scope=name):
layer = self.conv2d(inputs=inputs, filters=input_filters * factor, name='bottleneck_layer')
layer = self.batch_norm(inputs=layer, name='bottleneck_batch')
layer = self.depthwise_conv2d(inputs=layer, strides=strides, name='depthwise_layer')
layer = self.batch_norm(inputs=layer, name='depthwise_batch')
layer = self.conv2d(inputs=layer, filters=output_filters, name='linear_layer')
layer = self.batch_norm(inputs=layer, act=tf.identity, name='linear_batch')
return layer
prev_layer = inputs
input_filters = inputs.get_shape().as_list()[-1]
with tf.variable_scope(name_or_scope=name):
for idx in range(repeat):
layer = _mobilenet_block(inputs=prev_layer, input_filters=input_filters, output_filters=filters,
strides=strides, name='mobilenet_block_{}'.format(idx))
'''inverted_bottleneck 내의 첫 번째 layer 가 strides=2 인 경우 shortcut connection 생략'''
if idx != 0 and strides != 2:
if prev_layer.get_shape().as_list()[-1] != layer.get_shape().as_list()[-1]:
prev_layer = self.conv2d(inputs=prev_layer, filters=filters, name='residual_match_{}'.format(idx))
layer = tf.add(prev_layer, layer, name='residual_add_{}'.format(idx))
'''마지막 repeat 단계는 제외'''
if idx != repeat-1:
strides = 1
prev_layer = layer
return layer
def train(self, x, y):
if self.is_tb_logging:
return self.sess.run([self.accuracy, self.loss, self.summary_merged_values, self.train_op], feed_dict={self.x: x, self.y: y})
else:
return self.sess.run([self.accuracy, self.loss, self.train_op], feed_dict={self.x: x, self.y: y})
def validation(self, x, y):
return self.sess.run([self.accuracy, self.loss, self.prob], feed_dict={self.x: x, self.y: y})
def test(self, x, y):
return self.sess.run([self.accuracy, self.loss, self.prob], feed_dict={self.x: x, self.y: y})
|
[
"tensorflow.contrib.slim.separable_conv2d"
] |
[((6029, 6327), 'tensorflow.contrib.slim.separable_conv2d', 'slim.separable_conv2d', ([], {'inputs': 'inputs', 'num_outputs': 'None', 'kernel_size': 'kernel_size', 'activation_fn': 'tf.identity', 'weights_initializer': 'self.weights_initializers', 'weights_regularizer': 'self.weights_regularizers', 'depth_multiplier': 'depth_multiplier', 'stride': 'strides', 'padding': 'padding', 'scope': 'name'}), '(inputs=inputs, num_outputs=None, kernel_size=\n kernel_size, activation_fn=tf.identity, weights_initializer=self.\n weights_initializers, weights_regularizer=self.weights_regularizers,\n depth_multiplier=depth_multiplier, stride=strides, padding=padding,\n scope=name)\n', (6050, 6327), True, 'import tensorflow.contrib.slim as slim\n')]
|
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from neon import logger as neon_logger
from neon.backends import gen_backend
from neon.util.argparser import NeonArgparser
from examples.np_semantic_segmentation.data import NpSemanticSegData, absolute_path
from nlp_architect.models.np_semantic_segmentation import NpSemanticSegClassifier
from nlp_architect.utils.io import validate_existing_filepath, validate_parent_exists
def train_mlp_classifier(dataset, model_file_path, num_epochs, callback_args):
"""
Train the np_semantic_segmentation mlp classifier
Args:
model_file_path (str): model path
num_epochs (int): number of epochs
callback_args (dict): callback_arg
dataset: NpSemanticSegData object containing the dataset
Returns:
print error_rate, test_accuracy_rate and precision_recall_rate evaluation from the model
"""
model = NpSemanticSegClassifier(num_epochs, callback_args)
model.build()
# run fit
model.fit(dataset.test_set, dataset.train_set)
# save model params
model.save(model_file_path)
# set evaluation error rates
error_rate, test_accuracy_rate, precision_recall_rate = model.eval(dataset.test_set)
neon_logger.display('Misclassification error = %.1f%%' %
(error_rate * 100))
neon_logger.display('Test accuracy rate = %.1f%%' %
(test_accuracy_rate * 100))
neon_logger.display('precision rate = %s!!' %
(str(precision_recall_rate[0])))
neon_logger.display('recall rate = %s!!' %
(str(precision_recall_rate[1])))
if __name__ == "__main__":
# parse the command line arguments
parser = NeonArgparser()
parser.set_defaults(epochs=200)
parser.add_argument('--data', type=validate_existing_filepath,
help='Path to the CSV file where the prepared dataset is saved')
parser.add_argument('--model_path', type=validate_parent_exists,
help='Path to save the model')
args = parser.parse_args()
data_path = absolute_path(args.data)
model_path = absolute_path(args.model_path)
# generate backend
be = gen_backend(batch_size=64)
# load data sets from file
data_set = NpSemanticSegData(data_path, train_to_test_ratio=0.8)
# train the mlp classifier
train_mlp_classifier(data_set, model_path, args.epochs, args.callback_args)
|
[
"neon.util.argparser.NeonArgparser",
"neon.backends.gen_backend",
"nlp_architect.models.np_semantic_segmentation.NpSemanticSegClassifier",
"examples.np_semantic_segmentation.data.NpSemanticSegData",
"examples.np_semantic_segmentation.data.absolute_path",
"neon.logger.display"
] |
[((1603, 1653), 'nlp_architect.models.np_semantic_segmentation.NpSemanticSegClassifier', 'NpSemanticSegClassifier', (['num_epochs', 'callback_args'], {}), '(num_epochs, callback_args)\n', (1626, 1653), False, 'from nlp_architect.models.np_semantic_segmentation import NpSemanticSegClassifier\n'), ((1919, 1995), 'neon.logger.display', 'neon_logger.display', (["('Misclassification error = %.1f%%' % (error_rate * 100))"], {}), "('Misclassification error = %.1f%%' % (error_rate * 100))\n", (1938, 1995), True, 'from neon import logger as neon_logger\n'), ((2024, 2103), 'neon.logger.display', 'neon_logger.display', (["('Test accuracy rate = %.1f%%' % (test_accuracy_rate * 100))"], {}), "('Test accuracy rate = %.1f%%' % (test_accuracy_rate * 100))\n", (2043, 2103), True, 'from neon import logger as neon_logger\n'), ((2420, 2435), 'neon.util.argparser.NeonArgparser', 'NeonArgparser', ([], {}), '()\n', (2433, 2435), False, 'from neon.util.argparser import NeonArgparser\n'), ((2799, 2823), 'examples.np_semantic_segmentation.data.absolute_path', 'absolute_path', (['args.data'], {}), '(args.data)\n', (2812, 2823), False, 'from examples.np_semantic_segmentation.data import NpSemanticSegData, absolute_path\n'), ((2841, 2871), 'examples.np_semantic_segmentation.data.absolute_path', 'absolute_path', (['args.model_path'], {}), '(args.model_path)\n', (2854, 2871), False, 'from examples.np_semantic_segmentation.data import NpSemanticSegData, absolute_path\n'), ((2904, 2930), 'neon.backends.gen_backend', 'gen_backend', ([], {'batch_size': '(64)'}), '(batch_size=64)\n', (2915, 2930), False, 'from neon.backends import gen_backend\n'), ((2977, 3030), 'examples.np_semantic_segmentation.data.NpSemanticSegData', 'NpSemanticSegData', (['data_path'], {'train_to_test_ratio': '(0.8)'}), '(data_path, train_to_test_ratio=0.8)\n', (2994, 3030), False, 'from examples.np_semantic_segmentation.data import NpSemanticSegData, absolute_path\n')]
|
""" Copyright 2012, 2013 UW Information Technology, University of Washington
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.http import HttpResponse, HttpResponseNotFound
from spacescout_web.spot import SpotImage, SpotException
from spacescout_web.middleware.unpatch_vary import unpatch_vary_headers
def ImageView(request, spot_id, image_id, thumb_width=None, thumb_height=None, constrain=False):
try:
image = SpotImage(spot_id, request=request)
contenttype, img = image.get(image_id, constrain, thumb_width, thumb_height)
except SpotException as ex:
return HttpResponse(status=ex.status_code)
else:
response = HttpResponse(img, content_type=contenttype)
# Remove some headers that don't vary for images
unpatch_vary_headers(response, ['Cookie', 'X-Mobile', 'Accept-Language', 'User-Agent'])
return response
def MultiImageView(request, spot_id=None, image_ids=None, thumb_width=None, thumb_height=None, constrain=False):
try:
image = SpotImage(spot_id, request=request)
headers, img = image.get_multi(image_ids, constrain, thumb_width, thumb_height)
except SpotException as ex:
return HttpResponse(status=ex.status_code)
else:
response = HttpResponse(img)
response['Content-Type'] = headers['Content-Type']
response['Sprite-Offsets'] = headers['Sprite-Offsets']
# Remove some headers that don't vary for images
unpatch_vary_headers(response, ['Cookie', 'X-Mobile', 'Accept-Language', 'User-Agent'])
return response
|
[
"spacescout_web.spot.SpotImage",
"spacescout_web.middleware.unpatch_vary.unpatch_vary_headers",
"django.http.HttpResponse"
] |
[((952, 987), 'spacescout_web.spot.SpotImage', 'SpotImage', (['spot_id'], {'request': 'request'}), '(spot_id, request=request)\n', (961, 987), False, 'from spacescout_web.spot import SpotImage, SpotException\n'), ((1185, 1228), 'django.http.HttpResponse', 'HttpResponse', (['img'], {'content_type': 'contenttype'}), '(img, content_type=contenttype)\n', (1197, 1228), False, 'from django.http import HttpResponse, HttpResponseNotFound\n'), ((1294, 1385), 'spacescout_web.middleware.unpatch_vary.unpatch_vary_headers', 'unpatch_vary_headers', (['response', "['Cookie', 'X-Mobile', 'Accept-Language', 'User-Agent']"], {}), "(response, ['Cookie', 'X-Mobile', 'Accept-Language',\n 'User-Agent'])\n", (1314, 1385), False, 'from spacescout_web.middleware.unpatch_vary import unpatch_vary_headers\n'), ((1546, 1581), 'spacescout_web.spot.SpotImage', 'SpotImage', (['spot_id'], {'request': 'request'}), '(spot_id, request=request)\n', (1555, 1581), False, 'from spacescout_web.spot import SpotImage, SpotException\n'), ((1782, 1799), 'django.http.HttpResponse', 'HttpResponse', (['img'], {}), '(img)\n', (1794, 1799), False, 'from django.http import HttpResponse, HttpResponseNotFound\n'), ((1987, 2078), 'spacescout_web.middleware.unpatch_vary.unpatch_vary_headers', 'unpatch_vary_headers', (['response', "['Cookie', 'X-Mobile', 'Accept-Language', 'User-Agent']"], {}), "(response, ['Cookie', 'X-Mobile', 'Accept-Language',\n 'User-Agent'])\n", (2007, 2078), False, 'from spacescout_web.middleware.unpatch_vary import unpatch_vary_headers\n'), ((1120, 1155), 'django.http.HttpResponse', 'HttpResponse', ([], {'status': 'ex.status_code'}), '(status=ex.status_code)\n', (1132, 1155), False, 'from django.http import HttpResponse, HttpResponseNotFound\n'), ((1717, 1752), 'django.http.HttpResponse', 'HttpResponse', ([], {'status': 'ex.status_code'}), '(status=ex.status_code)\n', (1729, 1752), False, 'from django.http import HttpResponse, HttpResponseNotFound\n')]
|
from add_function import add
def test_add_zeros():
result = add(0, 0)
assert result == 0
def test_add_one_and_zero():
result = add(1, 0)
assert result == 1
def test_add_zero_and_one():
result = add(0, 1)
assert result == 1
def test_add_one_and_one():
result = add(1, 1)
assert result == 2
def test_add_zero_and_minus_one():
result = add(0, -1)
assert result == -1
def test_add_minus_one_and_zero():
result = add(-1, 0)
assert result == -1
def test_add_minus_one_and_minus_one():
result = add(-1, -1)
assert result == -2
|
[
"add_function.add"
] |
[((65, 74), 'add_function.add', 'add', (['(0)', '(0)'], {}), '(0, 0)\n', (68, 74), False, 'from add_function import add\n'), ((141, 150), 'add_function.add', 'add', (['(1)', '(0)'], {}), '(1, 0)\n', (144, 150), False, 'from add_function import add\n'), ((217, 226), 'add_function.add', 'add', (['(0)', '(1)'], {}), '(0, 1)\n', (220, 226), False, 'from add_function import add\n'), ((292, 301), 'add_function.add', 'add', (['(1)', '(1)'], {}), '(1, 1)\n', (295, 301), False, 'from add_function import add\n'), ((374, 384), 'add_function.add', 'add', (['(0)', '(-1)'], {}), '(0, -1)\n', (377, 384), False, 'from add_function import add\n'), ((458, 468), 'add_function.add', 'add', (['(-1)', '(0)'], {}), '(-1, 0)\n', (461, 468), False, 'from add_function import add\n'), ((547, 558), 'add_function.add', 'add', (['(-1)', '(-1)'], {}), '(-1, -1)\n', (550, 558), False, 'from add_function import add\n')]
|
# Generated by Django 3.1.7 on 2021-04-29 12:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("main_app", "0013_notfication"),
]
operations = [
migrations.AlterField(
model_name="notfication",
name="notification_name",
field=models.CharField(default="", max_length=50),
),
]
|
[
"django.db.models.CharField"
] |
[((346, 389), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(50)'}), "(default='', max_length=50)\n", (362, 389), False, 'from django.db import migrations, models\n')]
|
import imp
import unittest
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
from docutils.readers import standalone
from docutils.core import Publisher, default_description, \
default_usage
import quicktest
from types import ModuleType
#rst2odp = ModuleType('rst2odp')
#exec open('../bin/rst2odp') in rst2odp.__dict__
try:
rst2odp = imp.load_source('rst2odp', '../bin/rst2odp')
except IOError:
rst2odp = imp.load_source('rst2odp', 'bin/rst2odp')
from odplib import preso, zipwrap
class TestRegressions(unittest.TestCase):
def _to_odp_content(self, rst, xml_filename, odp_name='/tmp/out'):
reader = standalone.Reader()
reader_name = 'standalone'
writer = rst2odp.Writer()
writer_name = 'pseudoxml'
parser = None
parser_name = 'restructuredtext'
settings = None
settings_spec = None
settings_overrides = None
config_section = None
enable_exit_status = 1
usage = default_usage
publisher = Publisher(reader, parser, writer,# source=StringIO(rst),
settings=settings,
destination_class=rst2odp.BinaryFileOutput)
publisher.set_components(reader_name, parser_name, writer_name)
description = ('Generates OpenDocument/OpenOffice/ODF slides from '
'standalone reStructuredText sources. ' + default_description)
fin = open('/tmp/in.rst', 'w')
fin.write(rst)
fin.close()
argv = ['--traceback', '/tmp/in.rst', odp_name]
output = publisher.publish(argv, usage, description, settings_spec, settings_overrides, config_section=config_section, enable_exit_status=enable_exit_status)
# pull content.xml out of /tmp/out
z = zipwrap.Zippier(odp_name)
fout = open(xml_filename, 'w')
content = preso.pretty_xml(z.cat('content.xml'))
fout.write(content)
fout.close()
return content
def check_output(self, rst, desired, filename='/tmp/foo.xml', outname='/tmp/out'):
content = self._to_odp_content(rst, filename, odp_name=outname)
tree = self.get_tree(rst)
self.assertTrue(_contains_lines(content, desired), "%s should have %s \nTree: %s" %(content, desired, tree))
def get_tree(self, rst):
parser = quicktest.Parser()
input = rst
source_path='test file'
settings = quicktest.OptionParser(components=(quicktest.Parser,)).get_default_values()
document = quicktest.new_document(source_path, settings)
parser.parse(input, document)
format = 'pretty'
optargs = {'debug': 0, 'attributes': 0}
output = quicktest.format(format, input, document, optargs)
return output
def test_basic(self):
rst = """
Title
-----
hello world
"""
desired = """<text:p text:style-name="P1">hello world</text:p>"""
self.check_output(rst, desired, outname='/tmp/basic.odp')
def test_link(self):
rst = """
Title
-----
https://github.com/talkpython/illustrated-python-3-course
"""
desired = """ <draw:text-box>
<text:p text:style-name="P1">https://github.com/talkpython/illustrated-python-3-course</text:p>
</draw:text-box>"""
self.check_output(rst, desired, outname='/tmp/link.odp')
def test_2_paragraphs(self):
rst = """
2 para
-------
Hello
World
"""
desired = """<text:p text:style-name="P1">Hello</text:p>
<text:p text:style-name="P1">World</text:p>"""
self.check_output(rst, desired, '/tmp/2para.xml', '/tmp/2para.odp')
def test_mono_block(self):
rst = """
From script
------------
Make file ``hello.py`` with ::
print "hello world"
Run with::
python hello.py
"""
desired='''<text:p text:style-name="P1">
Make file
<text:s/>
<text:span text:style-name="T0">hello.py</text:span>
with
</text:p>
<text:p text:style-name="P1">
<text:span text:style-name="T0">
print "hello world"
<text:line-break/>
</text:span>
</text:p>'''
self.check_output(rst, desired, '/tmp/monoblock.xml', outname='/tmp/monoblock.odp')
def tes2t_code_block(self):
rst = """
``id``
--------
.. code-block:: pycon
>>> a = 4
>>> id(a)
"""
desired='bad'
self.check_output(rst, desired, '/tmp/code.xml')
def te2st_code_block_with_space(self):
rst = """
``id``
--------
.. code-block:: pycon
>>> a = 4
>>> id(a)
"""
desired='bad'
self.check_output(rst, desired, '/tmp/code.xml')
def test_vert_spacing_42(self):
rst = '''Doc attribute exploration
=========================
:Author: <NAME>
:Email: <EMAIL>
:Institute: Data Science Institute, ICL
:Date: 2017-11-30
:Twitter: @mehere
:Organisation: My university
:Tagline: Only connect
.. email address is rendered as a hyperlinked "<EMAIL>"
'''
desired = '''<draw:text-box>
<text:p text:style-name="P0">
<text:span text:style-name="T0"><NAME></text:span>
</text:p>
<text:p text:style-name="P0">
<text:span text:style-name="T0"><EMAIL></text:span>
</text:p>
<text:p text:style-name="P0">
<text:span text:style-name="T0">Data Science Institute, ICL</text:span>
</text:p>
<text:p text:style-name="P0">
<text:span text:style-name="T0">2017-11-30</text:span>
</text:p>
<text:p text:style-name="P0">
<text:span text:style-name="T0">@mehere</text:span>
</text:p>
<text:p text:style-name="P0">
<text:span text:style-name="T0">My university</text:span>
</text:p>
<text:p text:style-name="P0">
<text:span text:style-name="T0">Only connect</text:span>
</text:p>
</draw:text-box>'''
self.check_output(rst, desired, '/tmp/code.xml')
def test_email_43(self):
rst = """Doc attribute exploration
=========================
:Author: <NAME>
:Email: <EMAIL>
:Institute: Data Science Institute, ICL
:Date: 2017-11-30
.. email address is rendered as a hyperlinked "<EMAIL>"
"""
desired = '''<draw:text-box>
<text:p text:style-name="P0">
<text:span text:style-name="T0">Ann Author</text:span>
</text:p>
<text:p text:style-name="P0">
<text:span text:style-name="T0"><EMAIL></text:span>
</text:p>
<text:p text:style-name="P0">
<text:span text:style-name="T0">Data Science Institute, ICL</text:span>
</text:p>
<text:p text:style-name="P0">
<text:span text:style-name="T0">2017-11-30</text:span>
</text:p>
</draw:text-box>'''
self.check_output(rst, desired, '/tmp/code.xml')
def test_from_script(self):
rst = """From script
------------
Make file ``hello.py`` with::
print("hello world")
Run with:
.. code-block:: console
$ python3 hello.py
"""
desired = '''<draw:text-box>
<text:p text:style-name="P1">
Make file
<text:s/>
<text:span text:style-name="T0">hello.py</text:span>
with:
</text:p>
<text:p text:style-name="P1">
<text:span text:style-name="T0">
print("hello world")
<text:line-break/>
</text:span>
</text:p>
<text:p text:style-name="P1">Run with:</text:p>
<text:p text:style-name="P1">
<text:span text:style-name="T1">$</text:span>
<text:span text:style-name="T0">
<text:s/>
python3
<text:s/>
hello.py
<text:line-break/>
</text:span>
</text:p>
</draw:text-box>'''
self.check_output(rst, desired, '/tmp/code.xml')
def test_textbox_with_size(self):
rst = """
Who Created Python?
-------------------
.. grid:: 2,2x1
Python was created by Dutch programmer <NAME> in 1989.
He wanted to create a tool to allow for easy scripting
.. class:: font-size:8pt
.. textbox:: {"x": "2cm", "y": "18.2cm", "width": "25cm"}
Image via https://en.wikipedia.org/wiki/Guido_van_Rossum
"""
desired = 'foo'
self.check_output(rst, desired, '/tmp/code.xml')
def test_normal_sized_styled_before_code(self):
rst ="""
txt before code
----------------
.. class:: normal
foo
.. class:: normal
.. code-block:: python
a = 3
"""
desired='''<text:p text:style-name="P1">
<text:span text:style-name="T1">
a
<text:s/>
</text:span>
<text:span text:style-name="T3">=</text:span>
<text:span text:style-name="T1">
<text:s/>
</text:span>
<text:span text:style-name="T3">3</text:span>
<text:span text:style-name="T1">
<text:line-break/>
</text:span>
</text:p>'''
self.check_output(rst, desired, '/tmp/code.xml')
def te2st_styled_before_code(self):
rst ="""
txt before code
----------------
.. class:: large
foo
.. class:: large
.. code-block:: python
a = 3
"""
desired='''<text:p text:style-name="P1">
foo
</text:p>
<text:p text:style-name="P1">
<text:span text:style-name="T0">
a
<text:s/>
</text:span>
<text:span text:style-name="T1">
=
</text:span>
<text:span text:style-name="T0">
<text:s/>
</text:span>
<text:span text:style-name="T1">
3
</text:span>
<text:span text:style-name="T0">
<text:line-break/>
</text:span>
</text:p>
'''
self.check_output(rst, desired, '/tmp/code2.xml')
def _contains_lines(haystack, needle, ignore_whitespace=True):
"""
>>> _contains_lines(range(4), range(1,2))
True
>>> _contains_lines(range(4), range(1,5))
False
"""
if isinstance(haystack, str):
haystack = haystack.split('\n')
if isinstance(needle, str):
needle = needle.split('\n')
if ignore_whitespace:
haystack = [str(x).strip() for x in haystack]
needle = [str(x).strip() for x in needle]
for i, line in enumerate(haystack):
if needle[0] == line and haystack[i:i+len(needle)] == needle:
return True
return False
if __name__ == '__main__':
unittest.main()
# import doctest
# doctest.testmod()
|
[
"unittest.main",
"quicktest.new_document",
"quicktest.Parser",
"odplib.zipwrap.Zippier",
"imp.load_source",
"quicktest.format",
"docutils.readers.standalone.Reader",
"docutils.core.Publisher",
"quicktest.OptionParser"
] |
[((393, 437), 'imp.load_source', 'imp.load_source', (['"""rst2odp"""', '"""../bin/rst2odp"""'], {}), "('rst2odp', '../bin/rst2odp')\n", (408, 437), False, 'import imp\n'), ((11257, 11272), 'unittest.main', 'unittest.main', ([], {}), '()\n', (11270, 11272), False, 'import unittest\n'), ((468, 509), 'imp.load_source', 'imp.load_source', (['"""rst2odp"""', '"""bin/rst2odp"""'], {}), "('rst2odp', 'bin/rst2odp')\n", (483, 509), False, 'import imp\n'), ((677, 696), 'docutils.readers.standalone.Reader', 'standalone.Reader', ([], {}), '()\n', (694, 696), False, 'from docutils.readers import standalone\n'), ((1061, 1162), 'docutils.core.Publisher', 'Publisher', (['reader', 'parser', 'writer'], {'settings': 'settings', 'destination_class': 'rst2odp.BinaryFileOutput'}), '(reader, parser, writer, settings=settings, destination_class=\n rst2odp.BinaryFileOutput)\n', (1070, 1162), False, 'from docutils.core import Publisher, default_description, default_usage\n'), ((1836, 1861), 'odplib.zipwrap.Zippier', 'zipwrap.Zippier', (['odp_name'], {}), '(odp_name)\n', (1851, 1861), False, 'from odplib import preso, zipwrap\n'), ((2388, 2406), 'quicktest.Parser', 'quicktest.Parser', ([], {}), '()\n', (2404, 2406), False, 'import quicktest\n'), ((2574, 2619), 'quicktest.new_document', 'quicktest.new_document', (['source_path', 'settings'], {}), '(source_path, settings)\n', (2596, 2619), False, 'import quicktest\n'), ((2749, 2799), 'quicktest.format', 'quicktest.format', (['format', 'input', 'document', 'optargs'], {}), '(format, input, document, optargs)\n', (2765, 2799), False, 'import quicktest\n'), ((2478, 2532), 'quicktest.OptionParser', 'quicktest.OptionParser', ([], {'components': '(quicktest.Parser,)'}), '(components=(quicktest.Parser,))\n', (2500, 2532), False, 'import quicktest\n')]
|
#!python
# 2-D mesh of the PB-FHR reactor, consisting of the core and reflectors
import numpy as np
import sys
import os
# append path to module so cubit can find it
sys.path.append(os.getcwd())
import pbfhr as fhr
# whether to include orificing along the outflow boundary
plenum = True
# meshing scheme
scheme = 'Map'
# approximate element size
# dx = 0.06
cubit.cmd('reset')
################################################################################
# Surface (volume) definitions
################################################################################
# Current index and dictionnary of indices
index = 1
ind = {}
####
# Inner reflector (surface 1, block 1)
# Create left most vertices
cubit.cmd('create vertex 0.01 ' + str(fhr.geometry['inner_cr_channel']['z'][0]) + ' 0')
cubit.cmd('create vertex 0.01 ' + str(fhr.geometry['inner_cr_channel']['z'][-1]) + ' 0')
ind['center_axis'] = index
index += 2
# Create vertices defining the inner reflector
ind['inner_reflector'] = index
for i in [0, fhr.geometry['inner_cr_channel']['n_vertices'] - 1]: #range(fhr.geometry['inner_cr_channel']['n_vertices']):
cubit.cmd('create vertex ' + str(fhr.geometry['inner_cr_channel']['r'][i]) + ' ' + str(fhr.geometry['inner_cr_channel']['z'][i]) + ' 0')
index += 1
# Create surface from those vertices
vertices = "4 3 1 2"
cubit.cmd('create surface vertex ' + vertices)
cubit.cmd('merge all')
cubit.cmd('compress all')
####
# Control rod channel (surface 2, block 2)
# Create vertices at the right of the channel
cubit.cmd('create vertex 0.35 ' + str(fhr.geometry['inner_cr_channel']['z'][0]) + ' 0')
cubit.cmd('create vertex 0.35 ' + str(fhr.geometry['inner_cr_channel']['z'][-1]) + ' 0')
ind['inner_cr_channel'] = index
index += 2
# Create vertices where the channel touches the active region, as well as the other part of the reflector
for i in range(fhr.geometry['inner_radius']['n_vertices']):
cubit.cmd('create vertex ' + str(fhr.geometry['inner_radius']['r'][i]) + ' ' + str(fhr.geometry['inner_radius']['z'][i]) + ' 0')
index += fhr.geometry['inner_radius']['n_vertices']
# Make the control rod channel surface
vertices = "3 4 6 12 11 10 5"
cubit.cmd('create surface vertex ' + vertices)
cubit.cmd('merge all')
cubit.cmd('compress all')
####
# Inner reflector outside of the control rod channel (surface 3 & 4, block 1)
# Bottom part
vertices = "10 5 7 8 9"
cubit.cmd('create surface vertex ' + vertices)
cubit.cmd('merge all')
cubit.cmd('compress all')
# Top part
vertices = "14 13 12 6"
cubit.cmd('create surface vertex ' + vertices)
cubit.cmd('merge all')
cubit.cmd('compress all')
####
# Fueled active region (surface 5, block 3)
# Create the vertices between the fuel and the reflector pebbles
ind['active_region'] = index
for i in range(fhr.geometry['middle_radius']['n_vertices']):
cubit.cmd('create vertex ' + str(fhr.geometry['middle_radius']['r'][i]) + ' ' + str(fhr.geometry['middle_radius']['z'][i]) + ' 0')
index += fhr.geometry['middle_radius']['n_vertices']
# create surface from vertices defining the active region
vertices = "7 8 16 15"
cubit.cmd('create surface vertex ' + vertices)
vertices = "8 9 17 16"
cubit.cmd('create surface vertex ' + vertices)
vertices = "9 10 18 17"
cubit.cmd('create surface vertex ' + vertices)
vertices = "10 11 19 18"
cubit.cmd('create surface vertex ' + vertices)
vertices = "11 12 20 19"
cubit.cmd('create surface vertex ' + vertices)
vertices = "12 13 21 20"
cubit.cmd('create surface vertex ' + vertices)
vertices = "13 14 22 21"
cubit.cmd('create surface vertex ' + vertices)
cubit.cmd('compress all')
cubit.cmd('merge all')
# ####
# # Reflector/blanket pebbles region (surface 6, block 4)
ind['pebble_reflector'] = index
for i in range(fhr.geometry['outer_radius']['n_vertices']):
cubit.cmd('create vertex ' + str(fhr.geometry['outer_radius']['r'][i]) + ' ' + str(fhr.geometry['outer_radius']['z'][i]) + ' 0')
index += fhr.geometry['outer_radius']['n_vertices']
cubit.cmd('compress all')
cubit.cmd('merge all')
vertices = "15 16 24 23"
cubit.cmd('create surface vertex ' + vertices)
vertices = "16 17 18 19 25 24"
cubit.cmd('create surface vertex ' + vertices)
vertices = "19 20 26 25"
cubit.cmd('create surface vertex ' + vertices)
if (not plenum):
vertices = "20 21 27 26"
else:
cubit.cmd('create vertex 0.94333 4.42014 0')
cubit.cmd('compress all')
cubit.cmd('merge all')
index += 1
vertices = "26 39 27 21 20" # should be 29!
cubit.cmd('create surface vertex ' + vertices)
vertices = "21 22 28 27"
cubit.cmd('create surface vertex ' + vertices)
cubit.cmd('compress all')
cubit.cmd('merge all')
# ####
# # Plenum
if plenum:
# split the curve on the right to permit different boundary condition specifications.
# First we need to split the curve according which part of the boundary is outflow.
# cubit.cmd('split curve 40 fraction ' + str(1.0 - fhr.bcs['outflow_h_fraction']))
# cubit.cmd('merge all')
# cubit.cmd('compress all')
#
# Create the rest of the plenum outlet
#TODO Create orifices for the connection to the plenum
#TODO Add a component limiting flow from the plenum to reflector
# We have not found specifications, so all of this is assumed
cubit.cmd('create vertex 1.05 4.47017 0')
cubit.cmd('create vertex 1.05 5.3125 0')
cubit.cmd('create vertex 1.25 5.3125 0')
cubit.cmd('create vertex 1.25 4.47017 0')
cubit.cmd('compress all')
cubit.cmd('merge all')
#
index += 4
vertices = '29 30 33 26'
cubit.cmd('create surface vertex ' + vertices)
vertices = '30 31 32 33'
cubit.cmd('compress all')
cubit.cmd('merge all')
cubit.cmd('create surface vertex ' + vertices)
cubit.cmd('compress all')
cubit.cmd('merge all')
# ####
# # Outer reflector surface (surface 7, block 5)
#
# # create vertices defining the outer reflector and surface
ind['outer_reflector'] = index
cubit.cmd('create vertex ' + str(fhr.geometry['barrel']['inner_radius']) + ' ' + str(fhr.geometry['outer_radius']['z'][-1]) + ' 0')
cubit.cmd('create vertex ' + str(fhr.geometry['barrel']['inner_radius']) + ' ' + str(fhr.geometry['outer_radius']['z'][0]) + ' 0')
index += 2
cubit.cmd('compress all')
cubit.cmd('merge all')
if not plenum:
vertices = "28 27 26 25 24 23 30 29"
cubit.cmd('create surface vertex ' + vertices)
else:
# Between defueling chute and plenum
vertices = "28 27 29 30 31"
cubit.cmd('create surface vertex ' + vertices)
# Outside of plenum
vertices = "35 34 32 33 26 25 24 23"
cubit.cmd('create surface vertex ' + vertices)
cubit.cmd('merge all')
cubit.cmd('compress all')
####
# Core barrel (surface 8, block 6)
cubit.cmd('create vertex ' + str(fhr.geometry['barrel']['outer_radius']) + ' ' + str(fhr.geometry['outer_radius']['z'][-1]) + ' 0')
cubit.cmd('create vertex ' + str(fhr.geometry['barrel']['outer_radius']) + ' ' + str(fhr.geometry['outer_radius']['z'][0]) + ' 0')
ind['core_barrel'] = index
index += 2
cubit.cmd('create surface vertex '+str(ind['outer_reflector']+1)+' '+str(ind['outer_reflector'])+' '+\
str(ind['core_barrel'])+' '+str(ind['core_barrel']+1))
cubit.cmd('merge all')
cubit.cmd('compress all')
####
# Downcomer (surface 9, block 7)
cubit.cmd('create vertex ' + str(fhr.geometry['downcomer']['outer_radius']) + ' ' + str(fhr.geometry['outer_radius']['z'][-1]) + ' 0')
cubit.cmd('create vertex ' + str(fhr.geometry['downcomer']['outer_radius']) + ' ' + str(fhr.geometry['outer_radius']['z'][0]) + ' 0')
ind['downcomer'] = index
index += 2
cubit.cmd('create surface vertex '+str(ind['core_barrel']+1)+' '+str(ind['core_barrel'])+' '+\
str(ind['downcomer'])+' '+str(ind['downcomer']+1))
cubit.cmd('merge all')
cubit.cmd('compress all')
####
# Vessel (surface 10, block 8)
cubit.cmd('create vertex ' + str(fhr.geometry['vessel']['outer_radius']) + ' ' + str(fhr.geometry['outer_radius']['z'][-1]) + ' 0')
cubit.cmd('create vertex ' + str(fhr.geometry['vessel']['outer_radius']) + ' ' + str(fhr.geometry['outer_radius']['z'][0]) + ' 0')
ind['vessel'] = index
index += 2
cubit.cmd('create surface vertex '+str(ind['downcomer']+1)+' '+str(ind['downcomer'])+' '+\
str(ind['vessel'])+' '+str(ind['vessel']+1))
cubit.cmd('merge all')
cubit.cmd('compress all')
####
# Fire bricks (surface 11, block 9)
cubit.cmd('create vertex ' + str(fhr.geometry['bricks']['outer_radius']) + ' ' + str(fhr.geometry['outer_radius']['z'][-1]) + ' 0')
cubit.cmd('create vertex ' + str(fhr.geometry['bricks']['outer_radius']) + ' ' + str(fhr.geometry['outer_radius']['z'][0]) + ' 0')
ind['bricks'] = index
index += 2
cubit.cmd('create surface vertex '+str(ind['vessel']+1)+' '+str(ind['vessel'])+' '+\
str(ind['bricks'])+' '+str(ind['bricks']+1))
cubit.cmd('merge all')
cubit.cmd('compress all')
################################################################################
# Mesh generation
################################################################################
refinement = 1
# Set intervals for each curves
# Oulet regions
cubit.cmd('curve 16 30 interval '+str(4*refinement))
cubit.cmd('curve 40 interval '+str(3*refinement))
cubit.cmd('curve 41 interval '+str(refinement))
cubit.cmd('curve 15 32 43 interval '+str(5*refinement))
# Main pebble region
cubit.cmd('curve 7 28 39 interval '+str(10*refinement))
# Inlet regions
cubit.cmd('curve 8 26 interval '+str(2*refinement))
cubit.cmd('curve 14 24 interval '+str(refinement))
cubit.cmd('curve 13 22 interval '+str(refinement))
cubit.cmd('curve 12 19 interval '+str(3*refinement))
# Plenum bottom
cubit.cmd('curve 45 interval '+str(refinement))
cubit.cmd('curve 47 interval '+str(2*refinement))
# cubit.cmd('surface 17 size '+str(0.02/refinement**2))
# Plenum top
cubit.cmd('curve 48 50 interval '+str(6*refinement))
cubit.cmd('curve 46 49 interval '+str(2*refinement))
# Inner reflector side
# cubit.cmd('curve 6 interval 5')
# cubit.cmd('curve 1 3 interval 30')
# cubit.cmd('curve 9 interval 5')
cubit.cmd('curve 2 4 interval '+str(refinement))
cubit.cmd('curve 10 5 interval '+str(refinement))
# cubit.cmd('curve 11 17 interval 1')
# Horizontal for flow regions
cubit.cmd('curve 31 29 27 25 23 21 18 20 interval '+str(3*refinement))
cubit.cmd('curve 35 33 36 38 43 42 interval '+str(refinement))
# mesh the entire domain
# cubit.cmd('surface 1 2 3 4 5 6 7 8 9 10 11 12 size ' + str(dx))
cubit.cmd('surface 1 2 3 4 5 6 7 8 9 10 11 12 scheme ' + scheme)
cubit.cmd('block 1 surface 1 3 4') # inner reflector
cubit.cmd('block 2 surface 2') # control rod channel
cubit.cmd('block 3 surface 5 6 7 8 9 10 11') # core
cubit.cmd('block 4 surface 12 13 14 15 16') # pebble reflector
cubit.cmd('block 5 surface 17 18') # plenum
cubit.cmd('block 6 surface 19 20') # outer reflector
cubit.cmd('block 7 surface 21') # core barrel
cubit.cmd('block 8 surface 22') # downcomer
cubit.cmd('block 9 surface 23') # vessel
cubit.cmd('block 10 surface 24') # bricks
cubit.cmd('mesh surface 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24')
################################################################################
# Curves and sideset definitions
################################################################################
# Inflow
cubit.cmd('sideset 101 curve 20 wrt surface 5')
cubit.cmd('sideset 101 curve 35 wrt surface 12')
cubit.cmd('sideset 101 name "bed_horizontal_bottom"')
# Outflow
cubit.cmd('sideset 102 curve 31 wrt surface 11')
cubit.cmd('sideset 102 curve 43 wrt surface 16')
cubit.cmd('sideset 102 name "bed_horizontal_top"')
cubit.cmd('sideset 103 curve 49 wrt surface 17')
cubit.cmd('sideset 103 name "plenum_top"')
# Inner
cubit.cmd('sideset 104 curve 12 13 14 7 8 15 16 wrt volume 3')
cubit.cmd('sideset 104 name "bed_left"')
# Right-most boundary
cubit.cmd('sideset 105 curve 65 wrt volume 9')
cubit.cmd('sideset 105 name "brick_surface"')
################################################################################
# Refine some interfaces
################################################################################
# interfaces between _large_ changes in porosity
# porosity_interfaces = '14 15 16 21 23 25'
#
# cubit.cmd('refine curve ' + porosity_interfaces + ' numsplit 1 bias 1.0 depth 2 smooth')
#
# cubit.cmd('surface 1 2 3 4 5 smooth scheme condition number beta 1.2 cpu 10')
#
# more_refinement = '4 34 35 25 30 1 15 16'
#
# cubit.cmd('refine curve ' + more_refinement + ' numsplit 1 bias 1.0 depth 1 smooth')
#
# # refine on angled section near bottom
# cubit.cmd('refine curve 2 numsplit 1 bias 1.0 depth 5 smooth')
#
# # curves near the inlet where there is a rapid redistribution of the flow
# inlet_refinement = '12 13 14 39'
# cubit.cmd('refine curve ' + inlet_refinement + ' numsplit 1 bias 1.0 depth 2 smooth')
# if (orifice == False):
# curves = fhr.number_string(1, 6, 1) + ' ' + fhr.number_string(12, 18, 1)
# cubit.cmd('refine curve ' + curves + ' 25 26 numsplit 1 bias 1.0 depth 2 smooth')
# cubit.cmd('surface 1 2 3 4 smooth scheme condition number beta 1.2 cpu 10')
# else:
# curves = fhr.number_string(1, 6, 1) + ' ' + fhr.number_string(12, 17, 1) + fhr.number_string(24, split_curves[-1], 1)
# cubit.cmd('refine curve ' + curves + ' numsplit 1 bias 1.0 depth 2 smooth')
# cubit.cmd('surface 1 2 smooth scheme condition number beta 1.2 cpu 10')
# curves = fhr.number_string(7, 11, 1)
# cubit.cmd('refine curve ' + curves + ' numsplit 1 bias 1.0 depth 1 smooth')
# cubit.cmd('surface 1 2 smooth scheme condition number beta 1.2 cpu 10')
################################################################################
# Output
################################################################################
# Select file name
filename = 'core_pronghorn'
# Mesh size is controlled by intervals
# filename += "_" + str(dx)
# save file
cubit.cmd('set large exodus file on')
cubit.cmd('export Genesis "/Users/giudgl/projects/virtual_test_bed/pbfhr/meshes/' + filename + '.e" dimension 2 overwrite')
|
[
"os.getcwd"
] |
[((185, 196), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (194, 196), False, 'import os\n')]
|
from django.http import HttpResponse
from django.shortcuts import render
def index(request):
return HttpResponse('Page content')
def custom(request):
return render(request, 'custom.html', {})
|
[
"django.shortcuts.render",
"django.http.HttpResponse"
] |
[((105, 133), 'django.http.HttpResponse', 'HttpResponse', (['"""Page content"""'], {}), "('Page content')\n", (117, 133), False, 'from django.http import HttpResponse\n'), ((167, 201), 'django.shortcuts.render', 'render', (['request', '"""custom.html"""', '{}'], {}), "(request, 'custom.html', {})\n", (173, 201), False, 'from django.shortcuts import render\n')]
|
import numpy as np
a = np.array([1,2,3,4,5,6]) #_ rewrite it!
b = np.array([0,1,2,3,4,5]) #_ rewrite it!
|
[
"numpy.array"
] |
[((24, 52), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6]'], {}), '([1, 2, 3, 4, 5, 6])\n', (32, 52), True, 'import numpy as np\n'), ((68, 96), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5]'], {}), '([0, 1, 2, 3, 4, 5])\n', (76, 96), True, 'import numpy as np\n')]
|
from src import auth
auth.run()
|
[
"src.auth.run"
] |
[((24, 34), 'src.auth.run', 'auth.run', ([], {}), '()\n', (32, 34), False, 'from src import auth\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
from setuptools import setup, find_packages
# Get the version
version_regex = r'__version__ = ["\']([^"\']*)["\']'
with open('endpoints/__init__.py', 'r') as f:
text = f.read()
match = re.search(version_regex, text)
if match:
version = match.group(1)
else:
raise RuntimeError("No version number found!")
install_requires = [
'attrs==17.4.0',
'google-endpoints-api-management>=1.10.0',
'semver==2.7.7',
'setuptools>=36.2.5',
]
setup(
name='google-endpoints',
version=version,
description='Google Cloud Endpoints',
long_description=open('README.rst').read(),
author='Google Endpoints Authors',
author_email='<EMAIL>',
url='https://github.com/cloudendpoints/endpoints-python',
packages=find_packages(exclude=['test', 'test.*']),
package_dir={'google-endpoints': 'endpoints'},
include_package_data=True,
license='Apache',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: Implementation :: CPython',
],
scripts=['endpoints/endpointscfg.py'],
tests_require=['mock', 'protobuf', 'protorpc', 'pytest', 'webtest'],
install_requires=install_requires,
)
|
[
"re.search",
"setuptools.find_packages"
] |
[((865, 895), 're.search', 're.search', (['version_regex', 'text'], {}), '(version_regex, text)\n', (874, 895), False, 'import re\n'), ((1437, 1478), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['test', 'test.*']"}), "(exclude=['test', 'test.*'])\n", (1450, 1478), False, 'from setuptools import setup, find_packages\n')]
|
import requests
project_key = 'RS_P_1495608305106685963'
token = 'v5sRS_P_1495608305106685963s1496806325613631987'
api_key = '<KEY>'
# Authentication API
# url = "https://api.sports.roanuz.com/v5/core/{}/auth/".format(project_key)
# payload = {
# 'api_key': api_key
# }
# response = requests.post(url, json=payload)
# print(response.json())
# Association List
# print("Association List API")
# url = "https://api.sports.roanuz.com/v5/cricket/{}/association/list/".format(
# project_key)
# headers = {
# 'rs-token': token
# }
# response = requests.get(url, headers=headers)
# print(response.json())
# Country List
# print("Country List API")
# url = "https://api.sports.roanuz.com/v5/cricket/{}/country/list/".format(
# project_key)
# headers = {
# 'rs-token': token
# }
# response = requests.get(url, headers=headers)
# print(response.json())
# Featured List
# url = "https://api.sports.roanuz.com/v5/cricket/{}/featured-tournaments/".format(
# project_key)
# headers = {
# 'rs-token': token
# }
# response = requests.get(url, headers=headers)
# print(response.json())
# Featured Matches
tournament_key = "nzwindw_2022"
url = "https://api.sports.roanuz.com/v5/cricket/{}/tournament/{}/featured-matches/".format(
project_key, tournament_key)
headers = {
'rs-token': token
}
response = requests.get(url, headers=headers)
print(response.json())
|
[
"requests.get"
] |
[((1333, 1367), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (1345, 1367), False, 'import requests\n')]
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Consolidated evaluator factory module.
This module consolidates the creation of specific evaluator combinators, used
throughout Pinpoint to evaluate task graphs we support.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from dashboard.pinpoint.models import evaluators
from dashboard.pinpoint.models.tasks import find_isolate
from dashboard.pinpoint.models.tasks import performance_bisection
from dashboard.pinpoint.models.tasks import read_value
from dashboard.pinpoint.models.tasks import run_test
EXCLUDED_PAYLOAD_KEYS = {'commits', 'swarming_request_body'}
class ExecutionEngine(evaluators.SequenceEvaluator):
def __init__(self, job):
# We gather all the evaluators from the modules we know.
super(ExecutionEngine, self).__init__(evaluators=[
evaluators.DispatchByTaskType({
'find_isolate': find_isolate.Evaluator(job),
'find_culprit': performance_bisection.Evaluator(job),
'read_value': read_value.Evaluator(job),
'run_test': run_test.Evaluator(job),
}),
# We then always lift the task payload up, skipping some of the
# larger objects that we know we are not going to need when deciding
# what the end result is.
evaluators.TaskPayloadLiftingEvaluator(
exclude_keys=EXCLUDED_PAYLOAD_KEYS)
])
|
[
"dashboard.pinpoint.models.tasks.read_value.Evaluator",
"dashboard.pinpoint.models.tasks.performance_bisection.Evaluator",
"dashboard.pinpoint.models.tasks.run_test.Evaluator",
"dashboard.pinpoint.models.evaluators.TaskPayloadLiftingEvaluator",
"dashboard.pinpoint.models.tasks.find_isolate.Evaluator"
] |
[((1464, 1538), 'dashboard.pinpoint.models.evaluators.TaskPayloadLiftingEvaluator', 'evaluators.TaskPayloadLiftingEvaluator', ([], {'exclude_keys': 'EXCLUDED_PAYLOAD_KEYS'}), '(exclude_keys=EXCLUDED_PAYLOAD_KEYS)\n', (1502, 1538), False, 'from dashboard.pinpoint.models import evaluators\n'), ((1063, 1090), 'dashboard.pinpoint.models.tasks.find_isolate.Evaluator', 'find_isolate.Evaluator', (['job'], {}), '(job)\n', (1085, 1090), False, 'from dashboard.pinpoint.models.tasks import find_isolate\n'), ((1120, 1156), 'dashboard.pinpoint.models.tasks.performance_bisection.Evaluator', 'performance_bisection.Evaluator', (['job'], {}), '(job)\n', (1151, 1156), False, 'from dashboard.pinpoint.models.tasks import performance_bisection\n'), ((1184, 1209), 'dashboard.pinpoint.models.tasks.read_value.Evaluator', 'read_value.Evaluator', (['job'], {}), '(job)\n', (1204, 1209), False, 'from dashboard.pinpoint.models.tasks import read_value\n'), ((1235, 1258), 'dashboard.pinpoint.models.tasks.run_test.Evaluator', 'run_test.Evaluator', (['job'], {}), '(job)\n', (1253, 1258), False, 'from dashboard.pinpoint.models.tasks import run_test\n')]
|
import pytest
from dae.variants.attributes import Role
from dae.pedigrees.family import FamilyType, Person, Family, FamiliesData
def trio_persons(family_id="trio_family"):
return [
Person(**{
"family_id": family_id,
"person_id": "mom",
"sex": "F",
"role": "mom",
"status": 1
}),
Person(**{
"family_id": family_id,
"person_id": "dad",
"sex": "M",
"role": "dad",
"status": 1
}),
Person(**{
"family_id": family_id,
"person_id": "p1",
"sex": "M",
"role": "prb",
"status": 2
}),
]
@pytest.fixture
def quad_persons():
persons = trio_persons("quad_family")
persons.append(Person(**{
"family_id": "quad_family",
"person_id": "s1",
"sex": "M",
"role": "sib",
"status": 1
}))
return persons
@pytest.fixture
def multigenerational_persons():
persons = trio_persons("multigenerational_family")
persons.append(Person(**{
"family_id": "multigenerational_family",
"person_id": "grandparent",
"sex": "M",
"role": str(Role.maternal_grandfather),
"status": 1
}))
return persons
@pytest.fixture
def simplex_persons():
persons = trio_persons("simplex_family")
persons[0]._status = 2
persons[0]._attributes["status"] = 2
return persons
@pytest.fixture
def simplex_persons_2():
persons = trio_persons("simplex_family")
persons[0]._status = 2
persons[0]._attributes["status"] = 2
persons.append(Person(**{
"family_id": "simplex_family",
"person_id": "s1",
"sex": "M",
"role": "sib",
"status": 1
}))
return persons
@pytest.fixture
def multiplex_persons():
persons = trio_persons("multiplex_family")
persons.append(Person(**{
"family_id": "multiplex_family",
"person_id": "s1",
"sex": "M",
"role": "sib",
"status": 2
}))
return persons
def test_family_type_trio():
family = Family.from_persons(trio_persons())
assert family.family_type is FamilyType.TRIO
def test_family_type_quad(quad_persons):
family = Family.from_persons(quad_persons)
assert family.family_type is FamilyType.QUAD
@pytest.mark.parametrize("role", [
(Role.maternal_grandfather),
(Role.paternal_grandfather),
(Role.maternal_grandmother),
(Role.paternal_grandmother),
])
def test_family_type_multigenerational(role):
persons = list(trio_persons("multigenerational"))
persons.append(Person(**{
"family_id": "multigenerational",
"person_id": "grandparent",
"sex": "U",
"role": str(role),
"status": 1
}))
family = Family.from_persons(persons)
assert family.family_type is FamilyType.MULTIGENERATIONAL
def test_family_type_simplex(simplex_persons):
family = Family.from_persons(simplex_persons)
assert family.family_type is FamilyType.SIMPLEX
def test_family_type_simplex_2(simplex_persons_2):
family = Family.from_persons(simplex_persons_2)
assert family.family_type is FamilyType.SIMPLEX
def test_family_type_multiplex(multiplex_persons):
family = Family.from_persons(multiplex_persons)
assert family.family_type is FamilyType.MULTIPLEX
def test_families_data_families_by_type(
quad_persons,
multigenerational_persons,
simplex_persons,
multiplex_persons
):
families_data = FamiliesData.from_families(
{
"trio_family": Family.from_persons(trio_persons()),
"quad_family": Family.from_persons(quad_persons),
"multigenerational_family": Family.from_persons(multigenerational_persons),
"simplex_family": Family.from_persons(simplex_persons),
"multiplex_family": Family.from_persons(multiplex_persons),
}
)
assert families_data.families_by_type == {
FamilyType.QUAD: {"quad_family"},
FamilyType.TRIO: {"trio_family"},
FamilyType.MULTIGENERATIONAL: {"multigenerational_family"},
FamilyType.SIMPLEX: {"simplex_family"},
FamilyType.MULTIPLEX: {"multiplex_family"},
}
|
[
"pytest.mark.parametrize",
"dae.pedigrees.family.Person",
"dae.pedigrees.family.Family.from_persons"
] |
[((2378, 2529), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""role"""', '[Role.maternal_grandfather, Role.paternal_grandfather, Role.\n maternal_grandmother, Role.paternal_grandmother]'], {}), "('role', [Role.maternal_grandfather, Role.\n paternal_grandfather, Role.maternal_grandmother, Role.paternal_grandmother]\n )\n", (2401, 2529), False, 'import pytest\n'), ((2292, 2325), 'dae.pedigrees.family.Family.from_persons', 'Family.from_persons', (['quad_persons'], {}), '(quad_persons)\n', (2311, 2325), False, 'from dae.pedigrees.family import FamilyType, Person, Family, FamiliesData\n'), ((2843, 2871), 'dae.pedigrees.family.Family.from_persons', 'Family.from_persons', (['persons'], {}), '(persons)\n', (2862, 2871), False, 'from dae.pedigrees.family import FamilyType, Person, Family, FamiliesData\n'), ((2996, 3032), 'dae.pedigrees.family.Family.from_persons', 'Family.from_persons', (['simplex_persons'], {}), '(simplex_persons)\n', (3015, 3032), False, 'from dae.pedigrees.family import FamilyType, Person, Family, FamiliesData\n'), ((3151, 3189), 'dae.pedigrees.family.Family.from_persons', 'Family.from_persons', (['simplex_persons_2'], {}), '(simplex_persons_2)\n', (3170, 3189), False, 'from dae.pedigrees.family import FamilyType, Person, Family, FamiliesData\n'), ((3308, 3346), 'dae.pedigrees.family.Family.from_persons', 'Family.from_persons', (['multiplex_persons'], {}), '(multiplex_persons)\n', (3327, 3346), False, 'from dae.pedigrees.family import FamilyType, Person, Family, FamiliesData\n'), ((196, 294), 'dae.pedigrees.family.Person', 'Person', ([], {}), "(**{'family_id': family_id, 'person_id': 'mom', 'sex': 'F', 'role':\n 'mom', 'status': 1})\n", (202, 294), False, 'from dae.pedigrees.family import FamilyType, Person, Family, FamiliesData\n'), ((370, 468), 'dae.pedigrees.family.Person', 'Person', ([], {}), "(**{'family_id': family_id, 'person_id': 'dad', 'sex': 'M', 'role':\n 'dad', 'status': 1})\n", (376, 468), False, 'from dae.pedigrees.family import FamilyType, Person, Family, FamiliesData\n'), ((544, 641), 'dae.pedigrees.family.Person', 'Person', ([], {}), "(**{'family_id': family_id, 'person_id': 'p1', 'sex': 'M', 'role':\n 'prb', 'status': 2})\n", (550, 641), False, 'from dae.pedigrees.family import FamilyType, Person, Family, FamiliesData\n'), ((814, 915), 'dae.pedigrees.family.Person', 'Person', ([], {}), "(**{'family_id': 'quad_family', 'person_id': 's1', 'sex': 'M', 'role':\n 'sib', 'status': 1})\n", (820, 915), False, 'from dae.pedigrees.family import FamilyType, Person, Family, FamiliesData\n'), ((1662, 1766), 'dae.pedigrees.family.Person', 'Person', ([], {}), "(**{'family_id': 'simplex_family', 'person_id': 's1', 'sex': 'M',\n 'role': 'sib', 'status': 1})\n", (1668, 1766), False, 'from dae.pedigrees.family import FamilyType, Person, Family, FamiliesData\n'), ((1938, 2044), 'dae.pedigrees.family.Person', 'Person', ([], {}), "(**{'family_id': 'multiplex_family', 'person_id': 's1', 'sex': 'M',\n 'role': 'sib', 'status': 2})\n", (1944, 2044), False, 'from dae.pedigrees.family import FamilyType, Person, Family, FamiliesData\n'), ((3688, 3721), 'dae.pedigrees.family.Family.from_persons', 'Family.from_persons', (['quad_persons'], {}), '(quad_persons)\n', (3707, 3721), False, 'from dae.pedigrees.family import FamilyType, Person, Family, FamiliesData\n'), ((3763, 3809), 'dae.pedigrees.family.Family.from_persons', 'Family.from_persons', (['multigenerational_persons'], {}), '(multigenerational_persons)\n', (3782, 3809), False, 'from dae.pedigrees.family import FamilyType, Person, Family, FamiliesData\n'), ((3841, 3877), 'dae.pedigrees.family.Family.from_persons', 'Family.from_persons', (['simplex_persons'], {}), '(simplex_persons)\n', (3860, 3877), False, 'from dae.pedigrees.family import FamilyType, Person, Family, FamiliesData\n'), ((3911, 3949), 'dae.pedigrees.family.Family.from_persons', 'Family.from_persons', (['multiplex_persons'], {}), '(multiplex_persons)\n', (3930, 3949), False, 'from dae.pedigrees.family import FamilyType, Person, Family, FamiliesData\n')]
|
#!/Users/marc/miniconda3/bin/python3
import glfw
from OpenGL.GL import *
from OpenGL.GLU import *
import math
import ctypes
def framebuffer_size_callback(window, width, height):
# make sure the viewport matches the new window dimensions; note that width and
# height will be significantly larger than specified on retina displays.
glViewport(0, 0, width, height)
# process all input: query GLFW whether relevant keys are pressed/released this frame and react accordingly
# ---------------------------------------------------------------------------------------------------------
def processInput(window):
if glfw.get_key(window, glfw.KEY_ESCAPE) == glfw.PRESS:
glfw.set_window_should_close(window, True)
width = 800
height = 600
# Initialize the library
if not glfw.init():
print("Failed to init glfw")
else:
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3)
glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)
glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, GL_TRUE)
window = glfw.create_window(width, height, "LearnOpenGL", None, None)
if not window:
print("Failed to create GLFW window")
glfw.terminate()
glfw.make_context_current(window)
glfw.set_framebuffer_size_callback(window, framebuffer_size_callback)
## Load, compile, link shaders
import myshader
shaders = myshader.shader( "shaders/hellocolor.vert", "shaders/hellocolor.frag")
shaders.linkShaders()
# set up vertex data (and buffer(s)) and configure vertex attributes
# ------------------------------------------------------------------
import numpy as np
vertices = np.array([
0.5, 0.5, 0.0, 1.0, 0.0, 0.0, # top right
0.5, -0.5, 0.0, 0.0, 1.0, 0.0, # bottom right
-0.5, -0.5, 0.0, 0.0, 0.0, 1.0, # bottom left
-0.5, 0.5, 0.0, 1.0, 1.0, 1.0 # top left
], dtype=np.float32)
indices = np.array([ # note that we start from 0!
1, 3, 0, # first Triangle
1, 2, 3 # second Triangle
], dtype=np.uint32)
# bind the Vertex Array Object first, then bind and set vertex buffer(s), and then configure vertex attributes(s).
VAO = glGenVertexArrays(1)
glBindVertexArray(VAO)
VBO = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, VBO)
glBufferData(GL_ARRAY_BUFFER, vertices, GL_STATIC_DRAW)
EBO = glGenBuffers(1)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO)
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices, GL_STATIC_DRAW)
# glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices_buffer, GL_STATIC_DRAW)
# d = glGetBufferSubData( GL_ELEMENT_ARRAY_BUFFER, 0, 6 * 4)
# print(d)
# d = glGetBufferSubData( GL_ARRAY_BUFFER, 0, 12 * 4)
# print(d)
## position of the attrib array, must match the shader
location = 0
glVertexAttribPointer(location, 3, GL_FLOAT, GL_FALSE, 6*4, None) #3 * 4, 0)
glEnableVertexAttribArray(location)
## position of the attrib array, must match the shader
location = 1
glVertexAttribPointer(location, 3, GL_FLOAT, GL_FALSE, 6*4, ctypes.c_void_p(3*4)) #3 * 4, 0)
glEnableVertexAttribArray(location)
# note that this is allowed, the call to glVertexAttribPointer registered VBO as the
# vertex attribute's bound vertex buffer object so afterwards we can safely unbind
# glBindBuffer(GL_ARRAY_BUFFER, 0)
# remember: do NOT unbind the EBO while a VAO is active as the bound element buffer object
# IS stored in the VAO; keep the EBO bound.
# NO glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0)
# You can unbind the VAO afterwards so other VAO calls won't accidentally modify this VAO,
# but this rarely happens. Modifying other VAOs requires a call to glBindVertexArray anyways
# so we generally don't unbind VAOs (nor VBOs) when it's not directly necessary.
glBindVertexArray(0)
# uncomment this call to draw in wireframe polygons.
# glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
# render loop
# -----------
glClearColor(0.9, 0.7, 0.7, 1.0)
shaders.use()
# no need to bind it every time, but we'll do so to keep things a bit more organized
glBindVertexArray(VAO) # seeing as we only have a single VAO there's
while not glfw.window_should_close(window):
# input
processInput(window)
timeValue = glfw.get_time()*1.0
greenValue = (math.sin(timeValue) / 2.0) + 0.5
# print( greenValue )
shaders.setUniform4f( "extraColor", 0.0, greenValue, 0.0, 1.0)
scaleUp = abs( greenValue )
shaders.setUniform1f( "scaleUp", scaleUp)
angle = timeValue
rotation = np.array([
math.cos(angle), - math.sin(angle),
math.sin(angle), math.cos(angle)
], dtype=np.float32)
shaders.setUniformMatrix2fv( "rotation", rotation)
# render
glClear(GL_COLOR_BUFFER_BIT)
# draw our first triangle
# glDrawArrays(GL_TRIANGLES, 0, 6)
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, None)
# glfw: swap buffers and poll IO events (keys pressed/released, mouse moved etc.)
# -------------------------------------------------------------------------------
glfw.swap_buffers(window)
glfw.poll_events()
glBindVertexArray(0) # no need to unbind it every time
# optional: de-allocate all resources once they've outlived their purpose:
# ------------------------------------------------------------------------
glDeleteVertexArrays(1, [VAO])
glDeleteBuffers(1, [VBO])
glDeleteBuffers(1, [EBO])
# glfw: terminate, clearing all previously allocated GLFW resources.
# ------------------------------------------------------------------
glfw.terminate()
|
[
"glfw.window_hint",
"myshader.shader",
"glfw.poll_events",
"glfw.make_context_current",
"glfw.set_window_should_close",
"glfw.window_should_close",
"glfw.get_time",
"math.sin",
"glfw.init",
"glfw.get_key",
"numpy.array",
"ctypes.c_void_p",
"glfw.set_framebuffer_size_callback",
"math.cos",
"glfw.terminate",
"glfw.swap_buffers",
"glfw.create_window"
] |
[((1086, 1146), 'glfw.create_window', 'glfw.create_window', (['width', 'height', '"""LearnOpenGL"""', 'None', 'None'], {}), "(width, height, 'LearnOpenGL', None, None)\n", (1104, 1146), False, 'import glfw\n'), ((1234, 1267), 'glfw.make_context_current', 'glfw.make_context_current', (['window'], {}), '(window)\n', (1259, 1267), False, 'import glfw\n'), ((1268, 1337), 'glfw.set_framebuffer_size_callback', 'glfw.set_framebuffer_size_callback', (['window', 'framebuffer_size_callback'], {}), '(window, framebuffer_size_callback)\n', (1302, 1337), False, 'import glfw\n'), ((1396, 1465), 'myshader.shader', 'myshader.shader', (['"""shaders/hellocolor.vert"""', '"""shaders/hellocolor.frag"""'], {}), "('shaders/hellocolor.vert', 'shaders/hellocolor.frag')\n", (1411, 1465), False, 'import myshader\n'), ((1660, 1816), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.0, 1.0, 0.0, 0.0, 0.5, -0.5, 0.0, 0.0, 1.0, 0.0, -0.5, -0.5, \n 0.0, 0.0, 0.0, 1.0, -0.5, 0.5, 0.0, 1.0, 1.0, 1.0]'], {'dtype': 'np.float32'}), '([0.5, 0.5, 0.0, 1.0, 0.0, 0.0, 0.5, -0.5, 0.0, 0.0, 1.0, 0.0, -0.5,\n -0.5, 0.0, 0.0, 0.0, 1.0, -0.5, 0.5, 0.0, 1.0, 1.0, 1.0], dtype=np.float32)\n', (1668, 1816), True, 'import numpy as np\n'), ((1943, 1988), 'numpy.array', 'np.array', (['[1, 3, 0, 1, 2, 3]'], {'dtype': 'np.uint32'}), '([1, 3, 0, 1, 2, 3], dtype=np.uint32)\n', (1951, 1988), True, 'import numpy as np\n'), ((5465, 5481), 'glfw.terminate', 'glfw.terminate', ([], {}), '()\n', (5479, 5481), False, 'import glfw\n'), ((794, 805), 'glfw.init', 'glfw.init', ([], {}), '()\n', (803, 805), False, 'import glfw\n'), ((850, 897), 'glfw.window_hint', 'glfw.window_hint', (['glfw.CONTEXT_VERSION_MAJOR', '(3)'], {}), '(glfw.CONTEXT_VERSION_MAJOR, 3)\n', (866, 897), False, 'import glfw\n'), ((902, 949), 'glfw.window_hint', 'glfw.window_hint', (['glfw.CONTEXT_VERSION_MINOR', '(3)'], {}), '(glfw.CONTEXT_VERSION_MINOR, 3)\n', (918, 949), False, 'import glfw\n'), ((954, 1017), 'glfw.window_hint', 'glfw.window_hint', (['glfw.OPENGL_PROFILE', 'glfw.OPENGL_CORE_PROFILE'], {}), '(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)\n', (970, 1017), False, 'import glfw\n'), ((1022, 1075), 'glfw.window_hint', 'glfw.window_hint', (['glfw.OPENGL_FORWARD_COMPAT', 'GL_TRUE'], {}), '(glfw.OPENGL_FORWARD_COMPAT, GL_TRUE)\n', (1038, 1075), False, 'import glfw\n'), ((1216, 1232), 'glfw.terminate', 'glfw.terminate', ([], {}), '()\n', (1230, 1232), False, 'import glfw\n'), ((3004, 3026), 'ctypes.c_void_p', 'ctypes.c_void_p', (['(3 * 4)'], {}), '(3 * 4)\n', (3019, 3026), False, 'import ctypes\n'), ((4087, 4119), 'glfw.window_should_close', 'glfw.window_should_close', (['window'], {}), '(window)\n', (4111, 4119), False, 'import glfw\n'), ((4987, 5012), 'glfw.swap_buffers', 'glfw.swap_buffers', (['window'], {}), '(window)\n', (5004, 5012), False, 'import glfw\n'), ((5017, 5035), 'glfw.poll_events', 'glfw.poll_events', ([], {}), '()\n', (5033, 5035), False, 'import glfw\n'), ((630, 667), 'glfw.get_key', 'glfw.get_key', (['window', 'glfw.KEY_ESCAPE'], {}), '(window, glfw.KEY_ESCAPE)\n', (642, 667), False, 'import glfw\n'), ((691, 733), 'glfw.set_window_should_close', 'glfw.set_window_should_close', (['window', '(True)'], {}), '(window, True)\n', (719, 733), False, 'import glfw\n'), ((4176, 4191), 'glfw.get_time', 'glfw.get_time', ([], {}), '()\n', (4189, 4191), False, 'import glfw\n'), ((4214, 4233), 'math.sin', 'math.sin', (['timeValue'], {}), '(timeValue)\n', (4222, 4233), False, 'import math\n'), ((4476, 4491), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (4484, 4491), False, 'import math\n'), ((4520, 4535), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (4528, 4535), False, 'import math\n'), ((4539, 4554), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (4547, 4554), False, 'import math\n'), ((4495, 4510), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (4503, 4510), False, 'import math\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 14 09:52:17 2018
@author: liaamaral
"""
#------
# Load the main libraries
import os
import csv
import numpy as np
import pandas as pd
import logging
#------
# Data input and output paths:
pathin="/media/DATA/tmp/datasets/subsetDB/rain/" # Path of the rain dataset
pathrain="/media/DATA/tmp/datasets/subsetDB/rain/" # Path of the rain dataset
#pathnorain="/Volumes/lia_595gb/randel/python/dados/subsetDB/norain/" # Path of the non rain dataset
#------
# Create the list of Dataframes, eliminating the files that start with ".":
frames = []
for file in os.listdir(pathin):
if file.startswith(".", 0, len(file)):
name = os.path.splitext(file)[0]
print("File name starts with point: ", name)
else:
logging.debug(file)
df = pd.read_csv(os.path.join(pathin, file), sep=',', decimal='.', encoding="utf8")
df.reset_index(drop=True, inplace=True)
frames.append(df)
logging.debug(frames)
#------
# Concatenation of the monthly Dataframes into the yearly Dataframe:
try:
DB_yrly_rain = pd.concat(frames, sort=False, ignore_index=True, verify_integrity=True)
except ValueError as e:
print("ValueError:", e)
# Repairing the additional column wrongly generated in concatenation:
if np.where(np.isfinite(DB_yrly_rain.iloc[:,34])):
DB_yrly_rain["correto"]=DB_yrly_rain.iloc[:,34]
else:
pos=np.where(isnan())
DB_yrly_rain["correto"]=DB_yrly_rain.iloc[:,33]
#DB_yrly_norain = pd.concat(frames)
#------
# Giving the output file names:
DB_name="BR_yrly_rain.csv"
#DB_yrly_norain="BR_yrly_norain_.csv"
#------
# Saving the new output DB's (rain and no rain):
#DB_yrly_rain.to_csv(os.path.join(pathrain, DB_name),index=False,sep=",",decimal='.')
#print("The file ", DB_yrly_rain ," was genetared!")
DB_yrly_rain.to_csv(os.path.join(pathrain, DB_name),index=False,sep=",",decimal='.')
print("The file ", DB_name ," was genetared!")
|
[
"os.listdir",
"logging.debug",
"numpy.isfinite",
"os.path.splitext",
"os.path.join",
"pandas.concat"
] |
[((630, 648), 'os.listdir', 'os.listdir', (['pathin'], {}), '(pathin)\n', (640, 648), False, 'import os\n'), ((1151, 1222), 'pandas.concat', 'pd.concat', (['frames'], {'sort': '(False)', 'ignore_index': '(True)', 'verify_integrity': '(True)'}), '(frames, sort=False, ignore_index=True, verify_integrity=True)\n', (1160, 1222), True, 'import pandas as pd\n'), ((1368, 1405), 'numpy.isfinite', 'np.isfinite', (['DB_yrly_rain.iloc[:, 34]'], {}), '(DB_yrly_rain.iloc[:, 34])\n', (1379, 1405), True, 'import numpy as np\n'), ((1918, 1949), 'os.path.join', 'os.path.join', (['pathrain', 'DB_name'], {}), '(pathrain, DB_name)\n', (1930, 1949), False, 'import os\n'), ((808, 827), 'logging.debug', 'logging.debug', (['file'], {}), '(file)\n', (821, 827), False, 'import logging\n'), ((1002, 1023), 'logging.debug', 'logging.debug', (['frames'], {}), '(frames)\n', (1015, 1023), False, 'import logging\n'), ((710, 732), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (726, 732), False, 'import os\n'), ((853, 879), 'os.path.join', 'os.path.join', (['pathin', 'file'], {}), '(pathin, file)\n', (865, 879), False, 'import os\n')]
|
"""
Copyright (C) 2021 Red Hat, Inc. (https://github.com/Commonjava/charon)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import os
import sys
import tarfile
import requests
import tempfile
import shutil
from enum import Enum
from json import load, JSONDecodeError
from typing import Tuple
from zipfile import ZipFile, is_zipfile
logger = logging.getLogger(__name__)
def extract_zip_all(zf: ZipFile, target_dir: str):
zf.extractall(target_dir)
def extract_zip_with_files(zf: ZipFile, target_dir: str, file_suffix: str, debug=False):
names = zf.namelist()
filtered = list(filter(lambda n: n.endswith(file_suffix), names))
if debug:
logger.debug("Filtered files list as below with %s", file_suffix)
for name in filtered:
logger.debug(name)
zf.extractall(target_dir, members=filtered)
def extract_npm_tarball(path: str, target_dir: str, is_for_upload: bool) -> Tuple[str, list]:
""" Extract npm tarball will relocate the tgz file and metadata files.
* Locate tar path ( e.g.: jquery/-/jquery-7.6.1.tgz or @types/jquery/-/jquery-2.2.3.tgz).
* Locate version metadata path (e.g.: jquery/7.6.1 or @types/jquery/2.2.3).
Result returns the version meta file path and is for following package meta generating.
"""
valid_paths = []
package_name_path = str()
tgz = tarfile.open(path)
tgz.extractall()
for f in tgz:
if f.name.endswith("package.json"):
parse_paths = __parse_npm_package_version_paths(f.path)
package_name_path = parse_paths[0]
os.makedirs(os.path.join(target_dir, parse_paths[0]))
tarball_parent_path = os.path.join(target_dir, parse_paths[0], "-")
valid_paths.append(os.path.join(tarball_parent_path, _get_tgz_name(path)))
version_metadata_parent_path = os.path.join(
target_dir, parse_paths[0], parse_paths[1]
)
valid_paths.append(os.path.join(version_metadata_parent_path, "package.json"))
if is_for_upload:
os.makedirs(tarball_parent_path)
target = os.path.join(tarball_parent_path, os.path.basename(path))
shutil.copyfile(path, target)
os.makedirs(version_metadata_parent_path)
target = os.path.join(version_metadata_parent_path, os.path.basename(f.path))
shutil.copyfile(f.path, target)
break
return package_name_path, valid_paths
def _get_tgz_name(path: str):
parts = path.split("/")
if len(parts) > 0:
return parts[-1]
return ""
def __parse_npm_package_version_paths(path: str) -> list:
try:
with open(path, encoding='utf-8') as version_package:
data = load(version_package)
package_version_paths = [data['name'], data['version']]
return package_version_paths
except JSONDecodeError:
logger.error('Error: Failed to parse json!')
class NpmArchiveType(Enum):
"""Possible types of detected archive"""
NOT_NPM = 0
DIRECTORY = 1
ZIP_FILE = 2
TAR_FILE = 3
def detect_npm_archive(repo):
"""Detects, if the archive needs to have npm workflow.
:parameter repo repository directory
:return NpmArchiveType value
"""
expanded_repo = os.path.expanduser(repo)
if not os.path.exists(expanded_repo):
logger.error("Repository %s does not exist!", expanded_repo)
sys.exit(1)
if os.path.isdir(expanded_repo):
# we have archive repository
repo_path = "".join((expanded_repo, "/package.json"))
if os.path.isfile(repo_path):
return NpmArchiveType.DIRECTORY
elif is_zipfile(expanded_repo):
# we have a ZIP file to expand
with ZipFile(expanded_repo) as zz:
try:
if zz.getinfo("package.json"):
return NpmArchiveType.ZIP_FILE
except KeyError:
pass
elif tarfile.is_tarfile(expanded_repo):
with tarfile.open(expanded_repo) as tt:
try:
if tt.getmember("package/package.json").isfile():
return (
NpmArchiveType.TAR_FILE
) # it is a tar file and has package.json in the right place
except KeyError:
pass
return NpmArchiveType.NOT_NPM
def download_archive(url: str, base_dir=None) -> str:
dir_ = base_dir
if not dir_ or not os.path.isdir(dir_):
dir_ = tempfile.mkdtemp()
logger.info("No base dir specified for holding archive."
" Will use a temp dir %s to hold archive",
dir_)
# Used solution here:
# https://stackoverflow.com/questions/16694907/download-large-file-in-python-with-requests
local_filename = os.path.join(dir_, url.split('/')[-1])
# NOTE the stream=True parameter below
# NOTE(2) timeout=30 parameter to set a 30-second timeout, and prevent indefinite hang.
with requests.get(url, stream=True, timeout=30, verify=True) as r:
r.raise_for_status()
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
# If you have chunk encoded response uncomment if
# and set chunk_size parameter to None.
# if chunk:
f.write(chunk)
return local_filename
|
[
"zipfile.is_zipfile",
"json.load",
"zipfile.ZipFile",
"os.path.join",
"os.makedirs",
"os.path.basename",
"os.path.isdir",
"tarfile.is_tarfile",
"os.path.exists",
"os.path.isfile",
"tempfile.mkdtemp",
"requests.get",
"tarfile.open",
"sys.exit",
"shutil.copyfile",
"os.path.expanduser",
"logging.getLogger"
] |
[((843, 870), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (860, 870), False, 'import logging\n'), ((1856, 1874), 'tarfile.open', 'tarfile.open', (['path'], {}), '(path)\n', (1868, 1874), False, 'import tarfile\n'), ((3809, 3833), 'os.path.expanduser', 'os.path.expanduser', (['repo'], {}), '(repo)\n', (3827, 3833), False, 'import os\n'), ((3973, 4001), 'os.path.isdir', 'os.path.isdir', (['expanded_repo'], {}), '(expanded_repo)\n', (3986, 4001), False, 'import os\n'), ((3845, 3874), 'os.path.exists', 'os.path.exists', (['expanded_repo'], {}), '(expanded_repo)\n', (3859, 3874), False, 'import os\n'), ((3953, 3964), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3961, 3964), False, 'import sys\n'), ((4113, 4138), 'os.path.isfile', 'os.path.isfile', (['repo_path'], {}), '(repo_path)\n', (4127, 4138), False, 'import os\n'), ((4193, 4218), 'zipfile.is_zipfile', 'is_zipfile', (['expanded_repo'], {}), '(expanded_repo)\n', (4203, 4218), False, 'from zipfile import ZipFile, is_zipfile\n'), ((5021, 5039), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (5037, 5039), False, 'import tempfile\n'), ((5519, 5574), 'requests.get', 'requests.get', (['url'], {'stream': '(True)', 'timeout': '(30)', 'verify': '(True)'}), '(url, stream=True, timeout=30, verify=True)\n', (5531, 5574), False, 'import requests\n'), ((2173, 2218), 'os.path.join', 'os.path.join', (['target_dir', 'parse_paths[0]', '"""-"""'], {}), "(target_dir, parse_paths[0], '-')\n", (2185, 2218), False, 'import os\n'), ((2349, 2405), 'os.path.join', 'os.path.join', (['target_dir', 'parse_paths[0]', 'parse_paths[1]'], {}), '(target_dir, parse_paths[0], parse_paths[1])\n', (2361, 2405), False, 'import os\n'), ((3267, 3288), 'json.load', 'load', (['version_package'], {}), '(version_package)\n', (3271, 3288), False, 'from json import load, JSONDecodeError\n'), ((4476, 4509), 'tarfile.is_tarfile', 'tarfile.is_tarfile', (['expanded_repo'], {}), '(expanded_repo)\n', (4494, 4509), False, 'import tarfile\n'), ((4985, 5004), 'os.path.isdir', 'os.path.isdir', (['dir_'], {}), '(dir_)\n', (4998, 5004), False, 'import os\n'), ((2097, 2137), 'os.path.join', 'os.path.join', (['target_dir', 'parse_paths[0]'], {}), '(target_dir, parse_paths[0])\n', (2109, 2137), False, 'import os\n'), ((2467, 2525), 'os.path.join', 'os.path.join', (['version_metadata_parent_path', '"""package.json"""'], {}), "(version_metadata_parent_path, 'package.json')\n", (2479, 2525), False, 'import os\n'), ((2573, 2605), 'os.makedirs', 'os.makedirs', (['tarball_parent_path'], {}), '(tarball_parent_path)\n', (2584, 2605), False, 'import os\n'), ((2705, 2734), 'shutil.copyfile', 'shutil.copyfile', (['path', 'target'], {}), '(path, target)\n', (2720, 2734), False, 'import shutil\n'), ((2751, 2792), 'os.makedirs', 'os.makedirs', (['version_metadata_parent_path'], {}), '(version_metadata_parent_path)\n', (2762, 2792), False, 'import os\n'), ((2903, 2934), 'shutil.copyfile', 'shutil.copyfile', (['f.path', 'target'], {}), '(f.path, target)\n', (2918, 2934), False, 'import shutil\n'), ((4272, 4294), 'zipfile.ZipFile', 'ZipFile', (['expanded_repo'], {}), '(expanded_repo)\n', (4279, 4294), False, 'from zipfile import ZipFile, is_zipfile\n'), ((2665, 2687), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (2681, 2687), False, 'import os\n'), ((2861, 2885), 'os.path.basename', 'os.path.basename', (['f.path'], {}), '(f.path)\n', (2877, 2885), False, 'import os\n'), ((4524, 4551), 'tarfile.open', 'tarfile.open', (['expanded_repo'], {}), '(expanded_repo)\n', (4536, 4551), False, 'import tarfile\n')]
|
"""Utilities for calculating effective repository diffs.
Some intelligence is required because the tool shed updates attributes that it
is beneficial to ignore.
"""
from __future__ import print_function
import os
import sys
from xml.etree import ElementTree
from planemo.xml import diff
def diff_and_remove(working, label_a, label_b, f):
"""Remove tool shed XML files and use a smart XML diff on them.
Return 0 if and only if the XML content is the sam after stripping
attirbutes the tool shed updates.
"""
assert label_a != label_b
special = ["tool_dependencies.xml", "repository_dependencies.xml"]
deps_diff = 0
# Could walk either A or B; will only compare if in same relative location
for dirpath, dirnames, filenames in os.walk(os.path.join(working, label_a)):
for filename in filenames:
if filename in special:
a = os.path.join(dirpath, filename)
b = os.path.join(working, label_b, os.path.relpath(a, os.path.join(working, label_a)))
files_exist = os.path.exists(a) and os.path.exists(b)
if files_exist:
deps_diff |= _shed_diff(a, b, f)
os.remove(a)
os.remove(b)
return deps_diff
def _shed_diff(file_a, file_b, f=sys.stdout):
"""Strip attributes the tool shed writes and do smart XML diff.
Returns 0 if and only if the XML content is the same after stripping
``tool_shed`` and ``changeset_revision`` attributes.
"""
xml_a = ElementTree.parse(file_a).getroot()
xml_b = ElementTree.parse(file_b).getroot()
_strip_shed_attributes(xml_a)
_strip_shed_attributes(xml_b)
return diff.diff(xml_a, xml_b, reporter=f.write)
def _strip_shed_attributes(xml_element):
if xml_element.tag == "repository":
_remove_attribs(xml_element)
children = xml_element.getchildren()
if len(children) > 0:
for child in children:
_strip_shed_attributes(child)
def _remove_attribs(xml_element):
for attrib in ["changeset_revision", "toolshed"]:
if attrib in xml_element.attrib:
del xml_element.attrib[attrib]
__all__ = (
"diff_and_remove",
)
|
[
"xml.etree.ElementTree.parse",
"os.remove",
"planemo.xml.diff.diff",
"os.path.exists",
"os.path.join"
] |
[((1706, 1747), 'planemo.xml.diff.diff', 'diff.diff', (['xml_a', 'xml_b'], {'reporter': 'f.write'}), '(xml_a, xml_b, reporter=f.write)\n', (1715, 1747), False, 'from planemo.xml import diff\n'), ((775, 805), 'os.path.join', 'os.path.join', (['working', 'label_a'], {}), '(working, label_a)\n', (787, 805), False, 'import os\n'), ((1543, 1568), 'xml.etree.ElementTree.parse', 'ElementTree.parse', (['file_a'], {}), '(file_a)\n', (1560, 1568), False, 'from xml.etree import ElementTree\n'), ((1591, 1616), 'xml.etree.ElementTree.parse', 'ElementTree.parse', (['file_b'], {}), '(file_b)\n', (1608, 1616), False, 'from xml.etree import ElementTree\n'), ((899, 930), 'os.path.join', 'os.path.join', (['dirpath', 'filename'], {}), '(dirpath, filename)\n', (911, 930), False, 'import os\n'), ((1064, 1081), 'os.path.exists', 'os.path.exists', (['a'], {}), '(a)\n', (1078, 1081), False, 'import os\n'), ((1086, 1103), 'os.path.exists', 'os.path.exists', (['b'], {}), '(b)\n', (1100, 1103), False, 'import os\n'), ((1209, 1221), 'os.remove', 'os.remove', (['a'], {}), '(a)\n', (1218, 1221), False, 'import os\n'), ((1242, 1254), 'os.remove', 'os.remove', (['b'], {}), '(b)\n', (1251, 1254), False, 'import os\n'), ((1001, 1031), 'os.path.join', 'os.path.join', (['working', 'label_a'], {}), '(working, label_a)\n', (1013, 1031), False, 'import os\n')]
|
import pickle
from pathlib import Path
from typing import Optional, List, Iterable, Dict, Any
import click
import pandas as pd
import torch
from tqdm import tqdm
from generation.generation import gpt2, gpt3, gpt2_affect, gpt2_ctrl, \
openai_gpt, ctrl, pplm, gpt2mcm
from utils.constants import PERSPECTIVE_API_ATTRIBUTES_LOWER
from utils.perspective_api import PerspectiveWorker, unpack_scores
from utils.utils import load_jsonl, batchify
import sys
import os
ALLOWED_MODELS = ['gpt3', 'gpt2', 'gpt2-affect', 'gpt2-ctrl', 'gpt2-greedy', 'gpt2-naughty-list',
'pplm', 'ctrl', 'openai-gpt', 'xlnet',
'gpt2mcm']
def make_generations_col(generations, responses):
for generation, response in zip(generations, responses):
if response['response']:
response = unpack_scores(response['response'])[0]
else:
response = {x: None for x in PERSPECTIVE_API_ATTRIBUTES_LOWER}
yield {'text': generation, **response}
def collate(dataset: Optional[pd.DataFrame], generations: List[str], responses: Iterable[Dict[str, Any]],
output_file: str):
generations_col_iter = make_generations_col(generations, responses)
if dataset is None:
generations_col = list(tqdm(generations_col_iter, total=len(generations), desc='Collating files'))
dataset = pd.DataFrame(generations_col)
else:
assert len(generations) % len(dataset) == 0
n = len(generations) // len(dataset)
print(f"Detected samples per prompt:", n)
generations_col = list(tqdm(batchify(generations_col_iter, n), total=len(dataset), desc='Collating files'))
dataset['generations'] = generations_col
dataset.to_json(output_file, orient='records', lines=True)
@click.command()
@click.argument('output-dir')
@click.option('--dataset-file', required=False, type=str,
help='JSONL file containing prompts data. Each row must contain a prompt at `row["prompt"]["text"]`.')
@click.option('--use-eos/--use-dataset', default=False, help='Whether to use EOS or a dataset file for generation.')
@click.option('--model', required=True, help='Equivalent to `model_name_or_path` in transformers.')
@click.option('--model-type', required=True,
type=click.Choice(ALLOWED_MODELS))
@click.option('--perspective-rate-limit', default=25)
@click.option('--n', default=25, help='Number of samples to generate for each prompt. When used with --eos')
@click.option('--max-tokens', default=20, help='Number of tokens (usually BPE) to generate for each prompt.')
@click.option('--batch-size', default=32)
@click.option('--start-index', default=0)
@click.option('--end-index', default=-1)
@click.option('--mcm-t', default=0.0)
@click.option('--mcm-keepmin', default=30)
@click.option('--top-k', default=50)
@click.option('--resume/--no-resume', default=False)
@click.option('--usemcm/--no-usemcm', default=True, help='Use mcm or not. When used with --model-type gpt2mcm')
def main(output_dir: str, dataset_file: Optional[str], use_eos: bool, model: str, model_type: str, n: int,
max_tokens: int, batch_size: int, perspective_rate_limit: int, start_index: int, end_index: int,
mcm_t: float, mcm_keepmin: int, top_k: int,
resume: bool, usemcm: bool):
# Load prompts
if dataset_file:
assert not use_eos
# Load prompts from dataset file
assert dataset_file.endswith('.jsonl')
dataset = pd.read_json(dataset_file, lines=True)
dataset = dataset[start_index:end_index]
prompts = pd.json_normalize(dataset['prompt'])['text']
elif use_eos:
assert not dataset_file
dataset = None
# Create EOS prompts
if model_type in ['gpt2', 'gpt2-affect', 'gpt2-naughty-list', 'pplm', 'gpt2mcm']:
prompts = pd.Series('<|endoftext|>')
elif model_type == 'gpt2-ctrl':
prompts = pd.Series('<|nontoxic|>')
elif model_type == 'ctrl':
# HACK: update gen_samples since we use it as our batch size for pipelines
prompts = pd.Series('').repeat(n // batch_size + 1)
n = batch_size
elif model_type == 'xlnet':
prompts = pd.Series('<s>').repeat(n // batch_size + 1)
n = batch_size
elif model_type == 'openai-gpt':
prompts = pd.Series('. ').repeat(n // batch_size + 1)
n = batch_size
elif model_type == 'gpt3':
prompts = pd.Series('').repeat(n // batch_size + 1)
else:
raise RuntimeError('Model not implemented with EOS prompts')
else:
raise click.exceptions.MissingParameter('Missing --dataset-file or --use-eos option.')
print('Prompts:', '\n', prompts)
# Create output files
output_dir = Path(output_dir)
output_dir.mkdir(exist_ok=resume)
generations_file = output_dir / 'generations.jsonl'
perspective_file = output_dir / 'perspective.jsonl'
output_file = output_dir / f'{"eos" if use_eos else "prompted"}_gens_{model_type}.jsonl'
# Create perspective worker thread
perspective = PerspectiveWorker(out_file=perspective_file,
total=len(prompts) * n,
rate_limit=perspective_rate_limit)
# Setup model for generation
# TODO: move this logic into generation.py
if model_type == 'gpt2':
generations_iter = gpt2(prompts=prompts,
max_len=max_tokens,
num_samples=n,
batch_size=batch_size,
model_name_or_path=model,
out_file=generations_file)
elif model_type == 'gpt2mcm':
generations_iter = gpt2mcm(prompts=prompts,
max_len=max_tokens,
num_samples=n,
batch_size=batch_size,
model_name_or_path=model,
out_file=generations_file,
threshold=mcm_t,
k=top_k,
min_token_number=mcm_keepmin,
use_mcm=usemcm)
elif model_type == 'gpt3':
generations_iter = gpt3(prompts=prompts,
max_len=max_tokens,
num_samples=n,
batch_size=batch_size,
model_name_or_path=model,
out_file=generations_file)
elif model_type == 'gpt2-greedy':
print("Using n=1 for greedy generation (sampling does not apply)")
generations_iter = gpt2(prompts=prompts,
max_len=max_tokens,
num_samples=1,
batch_size=batch_size,
model_name_or_path=model,
out_file=generations_file,
sample=False)
elif model_type == 'gpt2-naughty-list':
# Load pre-tokenized naughty words
# FIXME: output dir must already exist with this file
with open(output_dir / 'gpt2_naughty_token_ids.pkl', 'rb') as f:
naughty_list_ids = pickle.load(f)
generations_iter = gpt2(prompts=prompts,
max_len=max_tokens,
num_samples=n,
batch_size=batch_size,
model_name_or_path=model,
out_file=generations_file,
bad_words_ids=naughty_list_ids)
elif model_type == 'gpt2-affect':
generations_iter = gpt2_affect(prompts=prompts,
max_len=max_tokens,
num_samples=n,
batch_size=batch_size,
model_name_or_path=model,
out_file=generations_file,
# Affect
target_class=0,
num_classes=2,
beta=1)
elif model_type == 'gpt2-ctrl':
generations_iter = gpt2_ctrl(prompts=prompts,
max_len=max_tokens,
num_samples=n,
batch_size=batch_size,
model_name_or_path=model,
out_file=generations_file,
# GPT2-CTRL
prompt_ctrl_code='<|nontoxic|>')
elif model_type == 'openai-gpt':
generations_iter = openai_gpt(prompts=prompts,
max_len=max_tokens,
num_samples=n,
model_name_or_path=model,
out_file=generations_file)
elif model_type == 'ctrl':
assert model == 'ctrl'
generations_iter = ctrl(prompts=prompts,
max_len=max_tokens,
num_samples=n,
model_name_or_path=model,
out_file=generations_file,
# CTRL
ctrl_code='Links',
temperature=1.0,
repetition_penalty=1.2)
elif model_type == 'pplm':
generations_iter = pplm(prompts=prompts,
max_len=max_tokens,
num_samples=n,
batch_size=batch_size,
class_label=0,
num_iterations=10,
model_name_or_path='toxicity',
out_file=generations_file)
else:
raise NotImplementedError(f'Model {model} not implemented')
# Generate and collate perspective scores
generations = []
for i, gen in enumerate(generations_iter):
generations.append(gen)
perspective(f'generation-{i}', gen)
torch.cuda.empty_cache()
perspective.stop()
print('Finished generation and perspective scoring!')
print('Collating output files')
collate(dataset, generations, load_jsonl(perspective_file), output_file)
if __name__ == '__main__':
main()
|
[
"click.option",
"pathlib.Path",
"pickle.load",
"generation.generation.ctrl",
"utils.perspective_api.unpack_scores",
"generation.generation.gpt2_ctrl",
"utils.utils.batchify",
"pandas.DataFrame",
"generation.generation.gpt3",
"click.command",
"click.Choice",
"generation.generation.gpt2_affect",
"click.exceptions.MissingParameter",
"generation.generation.openai_gpt",
"generation.generation.gpt2mcm",
"pandas.Series",
"generation.generation.gpt2",
"click.argument",
"utils.utils.load_jsonl",
"pandas.json_normalize",
"pandas.read_json",
"torch.cuda.empty_cache",
"generation.generation.pplm"
] |
[((1775, 1790), 'click.command', 'click.command', ([], {}), '()\n', (1788, 1790), False, 'import click\n'), ((1792, 1820), 'click.argument', 'click.argument', (['"""output-dir"""'], {}), "('output-dir')\n", (1806, 1820), False, 'import click\n'), ((1822, 1991), 'click.option', 'click.option', (['"""--dataset-file"""'], {'required': '(False)', 'type': 'str', 'help': '"""JSONL file containing prompts data. Each row must contain a prompt at `row["prompt"]["text"]`."""'}), '(\'--dataset-file\', required=False, type=str, help=\n \'JSONL file containing prompts data. Each row must contain a prompt at `row["prompt"]["text"]`.\'\n )\n', (1834, 1991), False, 'import click\n'), ((1997, 2117), 'click.option', 'click.option', (['"""--use-eos/--use-dataset"""'], {'default': '(False)', 'help': '"""Whether to use EOS or a dataset file for generation."""'}), "('--use-eos/--use-dataset', default=False, help=\n 'Whether to use EOS or a dataset file for generation.')\n", (2009, 2117), False, 'import click\n'), ((2114, 2217), 'click.option', 'click.option', (['"""--model"""'], {'required': '(True)', 'help': '"""Equivalent to `model_name_or_path` in transformers."""'}), "('--model', required=True, help=\n 'Equivalent to `model_name_or_path` in transformers.')\n", (2126, 2217), False, 'import click\n'), ((2308, 2360), 'click.option', 'click.option', (['"""--perspective-rate-limit"""'], {'default': '(25)'}), "('--perspective-rate-limit', default=25)\n", (2320, 2360), False, 'import click\n'), ((2362, 2474), 'click.option', 'click.option', (['"""--n"""'], {'default': '(25)', 'help': '"""Number of samples to generate for each prompt. When used with --eos"""'}), "('--n', default=25, help=\n 'Number of samples to generate for each prompt. When used with --eos')\n", (2374, 2474), False, 'import click\n'), ((2471, 2584), 'click.option', 'click.option', (['"""--max-tokens"""'], {'default': '(20)', 'help': '"""Number of tokens (usually BPE) to generate for each prompt."""'}), "('--max-tokens', default=20, help=\n 'Number of tokens (usually BPE) to generate for each prompt.')\n", (2483, 2584), False, 'import click\n'), ((2581, 2621), 'click.option', 'click.option', (['"""--batch-size"""'], {'default': '(32)'}), "('--batch-size', default=32)\n", (2593, 2621), False, 'import click\n'), ((2623, 2663), 'click.option', 'click.option', (['"""--start-index"""'], {'default': '(0)'}), "('--start-index', default=0)\n", (2635, 2663), False, 'import click\n'), ((2665, 2704), 'click.option', 'click.option', (['"""--end-index"""'], {'default': '(-1)'}), "('--end-index', default=-1)\n", (2677, 2704), False, 'import click\n'), ((2706, 2742), 'click.option', 'click.option', (['"""--mcm-t"""'], {'default': '(0.0)'}), "('--mcm-t', default=0.0)\n", (2718, 2742), False, 'import click\n'), ((2744, 2785), 'click.option', 'click.option', (['"""--mcm-keepmin"""'], {'default': '(30)'}), "('--mcm-keepmin', default=30)\n", (2756, 2785), False, 'import click\n'), ((2787, 2822), 'click.option', 'click.option', (['"""--top-k"""'], {'default': '(50)'}), "('--top-k', default=50)\n", (2799, 2822), False, 'import click\n'), ((2824, 2875), 'click.option', 'click.option', (['"""--resume/--no-resume"""'], {'default': '(False)'}), "('--resume/--no-resume', default=False)\n", (2836, 2875), False, 'import click\n'), ((2877, 2992), 'click.option', 'click.option', (['"""--usemcm/--no-usemcm"""'], {'default': '(True)', 'help': '"""Use mcm or not. When used with --model-type gpt2mcm"""'}), "('--usemcm/--no-usemcm', default=True, help=\n 'Use mcm or not. When used with --model-type gpt2mcm')\n", (2889, 2992), False, 'import click\n'), ((4794, 4810), 'pathlib.Path', 'Path', (['output_dir'], {}), '(output_dir)\n', (4798, 4810), False, 'from pathlib import Path\n'), ((10479, 10503), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (10501, 10503), False, 'import torch\n'), ((1356, 1385), 'pandas.DataFrame', 'pd.DataFrame', (['generations_col'], {}), '(generations_col)\n', (1368, 1385), True, 'import pandas as pd\n'), ((3465, 3503), 'pandas.read_json', 'pd.read_json', (['dataset_file'], {'lines': '(True)'}), '(dataset_file, lines=True)\n', (3477, 3503), True, 'import pandas as pd\n'), ((5425, 5562), 'generation.generation.gpt2', 'gpt2', ([], {'prompts': 'prompts', 'max_len': 'max_tokens', 'num_samples': 'n', 'batch_size': 'batch_size', 'model_name_or_path': 'model', 'out_file': 'generations_file'}), '(prompts=prompts, max_len=max_tokens, num_samples=n, batch_size=\n batch_size, model_name_or_path=model, out_file=generations_file)\n', (5429, 5562), False, 'from generation.generation import gpt2, gpt3, gpt2_affect, gpt2_ctrl, openai_gpt, ctrl, pplm, gpt2mcm\n'), ((10656, 10684), 'utils.utils.load_jsonl', 'load_jsonl', (['perspective_file'], {}), '(perspective_file)\n', (10666, 10684), False, 'from utils.utils import load_jsonl, batchify\n'), ((2277, 2305), 'click.Choice', 'click.Choice', (['ALLOWED_MODELS'], {}), '(ALLOWED_MODELS)\n', (2289, 2305), False, 'import click\n'), ((3571, 3607), 'pandas.json_normalize', 'pd.json_normalize', (["dataset['prompt']"], {}), "(dataset['prompt'])\n", (3588, 3607), True, 'import pandas as pd\n'), ((4632, 4717), 'click.exceptions.MissingParameter', 'click.exceptions.MissingParameter', (['"""Missing --dataset-file or --use-eos option."""'], {}), "('Missing --dataset-file or --use-eos option.'\n )\n", (4665, 4717), False, 'import click\n'), ((5779, 5995), 'generation.generation.gpt2mcm', 'gpt2mcm', ([], {'prompts': 'prompts', 'max_len': 'max_tokens', 'num_samples': 'n', 'batch_size': 'batch_size', 'model_name_or_path': 'model', 'out_file': 'generations_file', 'threshold': 'mcm_t', 'k': 'top_k', 'min_token_number': 'mcm_keepmin', 'use_mcm': 'usemcm'}), '(prompts=prompts, max_len=max_tokens, num_samples=n, batch_size=\n batch_size, model_name_or_path=model, out_file=generations_file,\n threshold=mcm_t, k=top_k, min_token_number=mcm_keepmin, use_mcm=usemcm)\n', (5786, 5995), False, 'from generation.generation import gpt2, gpt3, gpt2_affect, gpt2_ctrl, openai_gpt, ctrl, pplm, gpt2mcm\n'), ((821, 856), 'utils.perspective_api.unpack_scores', 'unpack_scores', (["response['response']"], {}), "(response['response'])\n", (834, 856), False, 'from utils.perspective_api import PerspectiveWorker, unpack_scores\n'), ((1579, 1612), 'utils.utils.batchify', 'batchify', (['generations_col_iter', 'n'], {}), '(generations_col_iter, n)\n', (1587, 1612), False, 'from utils.utils import load_jsonl, batchify\n'), ((3830, 3856), 'pandas.Series', 'pd.Series', (['"""<|endoftext|>"""'], {}), "('<|endoftext|>')\n", (3839, 3856), True, 'import pandas as pd\n'), ((6361, 6498), 'generation.generation.gpt3', 'gpt3', ([], {'prompts': 'prompts', 'max_len': 'max_tokens', 'num_samples': 'n', 'batch_size': 'batch_size', 'model_name_or_path': 'model', 'out_file': 'generations_file'}), '(prompts=prompts, max_len=max_tokens, num_samples=n, batch_size=\n batch_size, model_name_or_path=model, out_file=generations_file)\n', (6365, 6498), False, 'from generation.generation import gpt2, gpt3, gpt2_affect, gpt2_ctrl, openai_gpt, ctrl, pplm, gpt2mcm\n'), ((3919, 3944), 'pandas.Series', 'pd.Series', (['"""<|nontoxic|>"""'], {}), "('<|nontoxic|>')\n", (3928, 3944), True, 'import pandas as pd\n'), ((6794, 6950), 'generation.generation.gpt2', 'gpt2', ([], {'prompts': 'prompts', 'max_len': 'max_tokens', 'num_samples': '(1)', 'batch_size': 'batch_size', 'model_name_or_path': 'model', 'out_file': 'generations_file', 'sample': '(False)'}), '(prompts=prompts, max_len=max_tokens, num_samples=1, batch_size=\n batch_size, model_name_or_path=model, out_file=generations_file, sample\n =False)\n', (6798, 6950), False, 'from generation.generation import gpt2, gpt3, gpt2_affect, gpt2_ctrl, openai_gpt, ctrl, pplm, gpt2mcm\n'), ((7428, 7601), 'generation.generation.gpt2', 'gpt2', ([], {'prompts': 'prompts', 'max_len': 'max_tokens', 'num_samples': 'n', 'batch_size': 'batch_size', 'model_name_or_path': 'model', 'out_file': 'generations_file', 'bad_words_ids': 'naughty_list_ids'}), '(prompts=prompts, max_len=max_tokens, num_samples=n, batch_size=\n batch_size, model_name_or_path=model, out_file=generations_file,\n bad_words_ids=naughty_list_ids)\n', (7432, 7601), False, 'from generation.generation import gpt2, gpt3, gpt2_affect, gpt2_ctrl, openai_gpt, ctrl, pplm, gpt2mcm\n'), ((7386, 7400), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7397, 7400), False, 'import pickle\n'), ((7850, 8037), 'generation.generation.gpt2_affect', 'gpt2_affect', ([], {'prompts': 'prompts', 'max_len': 'max_tokens', 'num_samples': 'n', 'batch_size': 'batch_size', 'model_name_or_path': 'model', 'out_file': 'generations_file', 'target_class': '(0)', 'num_classes': '(2)', 'beta': '(1)'}), '(prompts=prompts, max_len=max_tokens, num_samples=n, batch_size=\n batch_size, model_name_or_path=model, out_file=generations_file,\n target_class=0, num_classes=2, beta=1)\n', (7861, 8037), False, 'from generation.generation import gpt2, gpt3, gpt2_affect, gpt2_ctrl, openai_gpt, ctrl, pplm, gpt2mcm\n'), ((4089, 4102), 'pandas.Series', 'pd.Series', (['""""""'], {}), "('')\n", (4098, 4102), True, 'import pandas as pd\n'), ((8452, 8631), 'generation.generation.gpt2_ctrl', 'gpt2_ctrl', ([], {'prompts': 'prompts', 'max_len': 'max_tokens', 'num_samples': 'n', 'batch_size': 'batch_size', 'model_name_or_path': 'model', 'out_file': 'generations_file', 'prompt_ctrl_code': '"""<|nontoxic|>"""'}), "(prompts=prompts, max_len=max_tokens, num_samples=n, batch_size=\n batch_size, model_name_or_path=model, out_file=generations_file,\n prompt_ctrl_code='<|nontoxic|>')\n", (8461, 8631), False, 'from generation.generation import gpt2, gpt3, gpt2_affect, gpt2_ctrl, openai_gpt, ctrl, pplm, gpt2mcm\n'), ((4216, 4232), 'pandas.Series', 'pd.Series', (['"""<s>"""'], {}), "('<s>')\n", (4225, 4232), True, 'import pandas as pd\n'), ((8958, 9077), 'generation.generation.openai_gpt', 'openai_gpt', ([], {'prompts': 'prompts', 'max_len': 'max_tokens', 'num_samples': 'n', 'model_name_or_path': 'model', 'out_file': 'generations_file'}), '(prompts=prompts, max_len=max_tokens, num_samples=n,\n model_name_or_path=model, out_file=generations_file)\n', (8968, 9077), False, 'from generation.generation import gpt2, gpt3, gpt2_affect, gpt2_ctrl, openai_gpt, ctrl, pplm, gpt2mcm\n'), ((4351, 4366), 'pandas.Series', 'pd.Series', (['""". """'], {}), "('. ')\n", (4360, 4366), True, 'import pandas as pd\n'), ((9315, 9493), 'generation.generation.ctrl', 'ctrl', ([], {'prompts': 'prompts', 'max_len': 'max_tokens', 'num_samples': 'n', 'model_name_or_path': 'model', 'out_file': 'generations_file', 'ctrl_code': '"""Links"""', 'temperature': '(1.0)', 'repetition_penalty': '(1.2)'}), "(prompts=prompts, max_len=max_tokens, num_samples=n, model_name_or_path\n =model, out_file=generations_file, ctrl_code='Links', temperature=1.0,\n repetition_penalty=1.2)\n", (9319, 9493), False, 'from generation.generation import gpt2, gpt3, gpt2_affect, gpt2_ctrl, openai_gpt, ctrl, pplm, gpt2mcm\n'), ((4479, 4492), 'pandas.Series', 'pd.Series', (['""""""'], {}), "('')\n", (4488, 4492), True, 'import pandas as pd\n'), ((9806, 9987), 'generation.generation.pplm', 'pplm', ([], {'prompts': 'prompts', 'max_len': 'max_tokens', 'num_samples': 'n', 'batch_size': 'batch_size', 'class_label': '(0)', 'num_iterations': '(10)', 'model_name_or_path': '"""toxicity"""', 'out_file': 'generations_file'}), "(prompts=prompts, max_len=max_tokens, num_samples=n, batch_size=\n batch_size, class_label=0, num_iterations=10, model_name_or_path=\n 'toxicity', out_file=generations_file)\n", (9810, 9987), False, 'from generation.generation import gpt2, gpt3, gpt2_affect, gpt2_ctrl, openai_gpt, ctrl, pplm, gpt2mcm\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: <NAME>
@contact: <EMAIL>
@software: PyCharm
@file: cam_sal_to_seed.py
@time: 2020/3/27 1:10
@desc:
"""
import numpy as np
from contrib.tasks.wsss.seeding.e2e_seeding.resolve_loc_cue_conflict import resolve_loc_cue_conflict_by_area_order
__all__ = ["cam_sal_to_seed"]
def cam_sal_to_seed(cam, sal, cls_in_label, cam_thresh, sal_thresh, ignore_label) -> np.ndarray:
"""Get localization cues with method in SEC paper
Perform hard threshold for each foreground class
Args:
cam: (H, W, num_class - 1) cam
sal: (H, W) saliency map
cls_in_label: list of foreground classes
cam_thresh: hard threshold to extract foreground class cues
sal_thresh: hard threshold to extract background class cues
ignore_label: ignore label in class cues
Returns:
(H, W) seed
"""
loc_cue_proposal = np.zeros(shape=(cam.shape[0], cam.shape[1], cam.shape[2] + 1), dtype=np.int) # (H, W, num_class)
for cls_idx in range(1, len(cls_in_label)):
if cls_in_label[cls_idx] == 1:
heat_map = cam[:, :, cls_idx - 1]
loc_cue_proposal[:, :, cls_idx] = heat_map > cam_thresh * np.amax(heat_map)
if cls_in_label[0] == 1:
loc_cue_proposal[:, :, 0] = sal < sal_thresh
# handle conflict seed
seed = resolve_loc_cue_conflict_by_area_order(loc_cue_proposal, ignore_label, train_boat=True)
return seed
|
[
"numpy.amax",
"numpy.zeros",
"contrib.tasks.wsss.seeding.e2e_seeding.resolve_loc_cue_conflict.resolve_loc_cue_conflict_by_area_order"
] |
[((916, 992), 'numpy.zeros', 'np.zeros', ([], {'shape': '(cam.shape[0], cam.shape[1], cam.shape[2] + 1)', 'dtype': 'np.int'}), '(shape=(cam.shape[0], cam.shape[1], cam.shape[2] + 1), dtype=np.int)\n', (924, 992), True, 'import numpy as np\n'), ((1357, 1448), 'contrib.tasks.wsss.seeding.e2e_seeding.resolve_loc_cue_conflict.resolve_loc_cue_conflict_by_area_order', 'resolve_loc_cue_conflict_by_area_order', (['loc_cue_proposal', 'ignore_label'], {'train_boat': '(True)'}), '(loc_cue_proposal, ignore_label,\n train_boat=True)\n', (1395, 1448), False, 'from contrib.tasks.wsss.seeding.e2e_seeding.resolve_loc_cue_conflict import resolve_loc_cue_conflict_by_area_order\n'), ((1217, 1234), 'numpy.amax', 'np.amax', (['heat_map'], {}), '(heat_map)\n', (1224, 1234), True, 'import numpy as np\n')]
|
"""This module represents various utilities and structures for image manipulation
__author__ = <NAME> (www.sashaouellet.com)
__version__ = 1.0.0
__date__ = 11/27/17
"""
import hou
import os
import imghdr
import subprocess
class ImageType():
EXR = '.exr'
RAT = '.rat'
HDR = '.hdr'
JPG = '.jpg'
PNG = '.png'
TIFF = '.tiff'
ALTERNATE_IMAGE_EXTS = [ImageType.RAT, ImageType.HDR]
def convertImage(file, maxDim, scale, ext):
"""Converts the given absolute file path to the given extension, using the icp command from $HFS/bin
Args:
file (str): The absolute file path of the image to convert. The converted image will
have the same path/filename, but with the given extension instead
maxDim (float): The maximimum dimension of either side of the outputted image. If the
image (after scaling) still does not meet this dimension, it will be further scaled
down
scale (float): The initial scale factor to apply to the outputted image. The final calculated
scale gets passed to icp with the -s flag
ext (sdm.houdini.image.ImageType): The image type to convert to
Returns:
str: The path to the outputted file
"""
args = [os.path.join(hou.getenv('HFS'), 'bin', 'icp')]
scale /= 100.0
resolution = hou.imageResolution(file)
width = float(resolution[0]) * scale
height = float(resolution[1]) * scale
resizeFactor = 1.0
if maxDim != -1: # Only calculate if user hasn't selected 'None'
resizeFactor = float(maxDim) / width
resizeFactor = min(resizeFactor, float(maxDim) / height) # if a smaller ratio, use that
if resizeFactor < 1.0: # only want to scale down
scale *= resizeFactor
args.append('-u') # uncompressed, if supported
args.append('-s')
args.append(str(float(scale * 100)))
newPath = os.path.splitext(file)[0] + ext
args.append(file)
args.append(newPath)
subprocess.call(args)
return newPath
def isImage(file):
"""Determines if the given absolute file path points to an image filetype
Args:
file (str): The absolute path of the file to test
Returns:
bool: True if the file is an image, otherwise False
"""
if imghdr.what(file) is not None:
return True
path, ext = os.path.splitext(file)
if ext in ALTERNATE_IMAGE_EXTS:
return True
return False
|
[
"imghdr.what",
"subprocess.call",
"os.path.splitext",
"hou.getenv",
"hou.imageResolution"
] |
[((1257, 1282), 'hou.imageResolution', 'hou.imageResolution', (['file'], {}), '(file)\n', (1276, 1282), False, 'import hou\n'), ((1847, 1868), 'subprocess.call', 'subprocess.call', (['args'], {}), '(args)\n', (1862, 1868), False, 'import subprocess\n'), ((2210, 2232), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (2226, 2232), False, 'import os\n'), ((2142, 2159), 'imghdr.what', 'imghdr.what', (['file'], {}), '(file)\n', (2153, 2159), False, 'import imghdr\n'), ((1193, 1210), 'hou.getenv', 'hou.getenv', (['"""HFS"""'], {}), "('HFS')\n", (1203, 1210), False, 'import hou\n'), ((1771, 1793), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (1787, 1793), False, 'import os\n')]
|
import cv2
from xml.etree import ElementTree as ET
import matplotlib.pyplot as plt
img = cv2.imread('/mnt/6B133E147DED759E/VOCdevkit/VOC2007/JPEGImages/img_1777.jpg')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
anno = ET.parse('/mnt/6B133E147DED759E/VOCdevkit/VOC2007/Annotations/img_1777.xml')
obj_node=anno.getiterator("object")
rects = []
for obj in obj_node:
bndbox = obj.find('bndbox')
xmin = bndbox.find('xmin')
ymin = bndbox.find('ymin')
xmax = bndbox.find('xmax')
ymax = bndbox.find('ymax')
rects.append(((int(xmin.text), int(ymin.text)), (int(xmax.text), int(ymax.text))))
for r in rects:
cv2.rectangle(img, r[0], r[1], (0,255,0),1)
plt.imshow(img)
plt.show()
|
[
"xml.etree.ElementTree.parse",
"matplotlib.pyplot.show",
"cv2.cvtColor",
"matplotlib.pyplot.imshow",
"cv2.imread",
"cv2.rectangle"
] |
[((92, 169), 'cv2.imread', 'cv2.imread', (['"""/mnt/6B133E147DED759E/VOCdevkit/VOC2007/JPEGImages/img_1777.jpg"""'], {}), "('/mnt/6B133E147DED759E/VOCdevkit/VOC2007/JPEGImages/img_1777.jpg')\n", (102, 169), False, 'import cv2\n'), ((176, 212), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (188, 212), False, 'import cv2\n'), ((221, 297), 'xml.etree.ElementTree.parse', 'ET.parse', (['"""/mnt/6B133E147DED759E/VOCdevkit/VOC2007/Annotations/img_1777.xml"""'], {}), "('/mnt/6B133E147DED759E/VOCdevkit/VOC2007/Annotations/img_1777.xml')\n", (229, 297), True, 'from xml.etree import ElementTree as ET\n'), ((678, 693), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (688, 693), True, 'import matplotlib.pyplot as plt\n'), ((694, 704), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (702, 704), True, 'import matplotlib.pyplot as plt\n'), ((633, 679), 'cv2.rectangle', 'cv2.rectangle', (['img', 'r[0]', 'r[1]', '(0, 255, 0)', '(1)'], {}), '(img, r[0], r[1], (0, 255, 0), 1)\n', (646, 679), False, 'import cv2\n')]
|
from cocoex import default_observers
from cocoex import Observer
from cocoex import Suite
from cocoex.utilities import ObserverOptions
from tqdm import tqdm
from typing import Callable # NOQA
from typing import Optional # NOQA
class Experiment(object):
def __init__(self,
solver,
suite_name="bbob",
suite_instance="",
suite_options="dimensions: 2,3",
algorithm_name=None):
# type: (Callable, str, str, str, Optional[str]) -> None
self._solver = solver
self._suite_name = suite_name
self._suite_instance = suite_instance
self._suite_options = suite_options
default_algorithm_name = '{}({})'.format(solver.__name__, solver.__module__)
self._algorithm_name = algorithm_name or default_algorithm_name
def run(self, budget=100, current_batch=1, number_of_batches=15):
# type: (int, int, int) -> None
suite = Suite(self._suite_name, self._suite_instance,
self._suite_options)
observer_name = default_observers()[self._suite_name]
observer_options = self._build_observer_options(budget)
observer = Observer(observer_name, observer_options.as_string)
for problem_index, problem in enumerate(tqdm(suite)):
if (problem_index % number_of_batches) != current_batch - 1:
continue
observer.observe(problem)
max_evals = budget * problem.dimension
self._solver(problem,
problem.lower_bounds,
problem.upper_bounds,
max_evals - problem.evaluations_constraints,
verbose=False)
def _build_observer_options(self, budget):
# type: (int) -> ObserverOptions
opts = {
'result_folder':
'"%s/on_%s_budget%04dxD"' %
(self._algorithm_name, self._suite_name, budget),
'algorithm_name': self._algorithm_name
}
return ObserverOptions(opts)
|
[
"tqdm.tqdm",
"cocoex.Observer",
"cocoex.utilities.ObserverOptions",
"cocoex.Suite",
"cocoex.default_observers"
] |
[((976, 1042), 'cocoex.Suite', 'Suite', (['self._suite_name', 'self._suite_instance', 'self._suite_options'], {}), '(self._suite_name, self._suite_instance, self._suite_options)\n', (981, 1042), False, 'from cocoex import Suite\n'), ((1212, 1263), 'cocoex.Observer', 'Observer', (['observer_name', 'observer_options.as_string'], {}), '(observer_name, observer_options.as_string)\n', (1220, 1263), False, 'from cocoex import Observer\n'), ((2068, 2089), 'cocoex.utilities.ObserverOptions', 'ObserverOptions', (['opts'], {}), '(opts)\n', (2083, 2089), False, 'from cocoex.utilities import ObserverOptions\n'), ((1090, 1109), 'cocoex.default_observers', 'default_observers', ([], {}), '()\n', (1107, 1109), False, 'from cocoex import default_observers\n'), ((1313, 1324), 'tqdm.tqdm', 'tqdm', (['suite'], {}), '(suite)\n', (1317, 1324), False, 'from tqdm import tqdm\n')]
|
#!/usr/bin/env python
# coding: utf-8
'''
Poor man's unit test which goes through each of the example doctests in
docs/examples/ and makes sure they actually work as advertised.
'''
# import raws, pydwarf; df = pydwarf.df(raws)
import sys
import os
pydwarf_root = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../..'))
sys.path.append(pydwarf_root)
sys.path.append(os.path.join(pydwarf_root, 'lib'))
import inspect
import doctest
import re
import raws
import pydwarf
from examples import examples
def verify(examples, skipreset=False, **globs):
docparser = doctest.DocTestParser()
docrunner = doctest.DocTestRunner()
results = []
testnum = 0
df = globs['df']
for example in examples:
if skipreset and 'reset' in example['flags']:
print('Skipping example %s' % example['name'])
else:
print('Running example %s' % example['name'])
testnum += 1
# Create the doctest object
test = docparser.get_doctest(
string = example['text'],
globs = globs,
name = example['name'],
filename = None,
lineno = None
)
# Actually run the test
resultcount = len(results)
docrunner.run(
test = test,
out = lambda result: results.append(result),
clear_globs = False
)
# Handle flags
if 'reset' in example['flags']:
print('Resetting df raws.')
df.reset()
return results
doctest_pattern = (
'(?s)\*+\n'
'Line (?P<line>\d+), in (?P<name>.*)\n'
'Failed example:\n'
'(?P<text>.*)\n'
'('
'Expected:\n'
'(?P<expected>.*)\n'
'Got:\n'
'(?P<got>.*?)'
'|'
'Exception raised:\n'
'(?P<exception>.*)'
')'
'\s*$'
)
doctest_result_re = re.compile(doctest_pattern)
if __name__ == '__main__':
print('Initializing session.')
conf = pydwarf.config.load(root=pydwarf_root, args={
'log': '',
'verbose': False,
})
session = pydwarf.session(raws, conf)
print('Running examples.')
results = verify(
examples,
df = session.df,
raws = raws,
pydwarf = pydwarf,
session = session,
conf = conf
)
realresults = []
lastfailurein = None
for result in results:
match = doctest_result_re.match(result.expandtabs(4))
if match:
groups = match.groupdict()
if groups['got'] and groups['expected']:
ignore = groups['got'].strip() == groups['expected'].strip()
else:
ignore = False
if groups['name'] == lastfailurein:
ignore = True
else:
lastfailurein = groups['name']
if not ignore: realresults.append(result)
if realresults:
resultstext = '\n\n'.join(reversed(realresults))
print(resultstext)
else:
print('Successfully ran all %d examples.' % len(examples))
|
[
"sys.path.append",
"pydwarf.session",
"os.path.abspath",
"pydwarf.config.load",
"doctest.DocTestParser",
"doctest.DocTestRunner",
"os.path.join",
"re.compile"
] |
[((359, 388), 'sys.path.append', 'sys.path.append', (['pydwarf_root'], {}), '(pydwarf_root)\n', (374, 388), False, 'import sys\n'), ((2080, 2107), 're.compile', 're.compile', (['doctest_pattern'], {}), '(doctest_pattern)\n', (2090, 2107), False, 'import re\n'), ((405, 438), 'os.path.join', 'os.path.join', (['pydwarf_root', '"""lib"""'], {}), "(pydwarf_root, 'lib')\n", (417, 438), False, 'import os\n'), ((631, 654), 'doctest.DocTestParser', 'doctest.DocTestParser', ([], {}), '()\n', (652, 654), False, 'import doctest\n'), ((671, 694), 'doctest.DocTestRunner', 'doctest.DocTestRunner', ([], {}), '()\n', (692, 694), False, 'import doctest\n'), ((2184, 2258), 'pydwarf.config.load', 'pydwarf.config.load', ([], {'root': 'pydwarf_root', 'args': "{'log': '', 'verbose': False}"}), "(root=pydwarf_root, args={'log': '', 'verbose': False})\n", (2203, 2258), False, 'import pydwarf\n'), ((2296, 2323), 'pydwarf.session', 'pydwarf.session', (['raws', 'conf'], {}), '(raws, conf)\n', (2311, 2323), False, 'import pydwarf\n'), ((321, 346), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (336, 346), False, 'import os\n')]
|
from django.db import models
# Create your models here.
class Employee(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=20)
sal = models.DecimalField(max_digits=10, decimal_places=3)
def __str__(self):
return f'Employee object with ID: {self.id}, Name: {self.name} and Salary: {self.sal}'
|
[
"django.db.models.CharField",
"django.db.models.IntegerField",
"django.db.models.DecimalField"
] |
[((96, 133), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (115, 133), False, 'from django.db import models\n'), ((145, 176), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (161, 176), False, 'from django.db import models\n'), ((187, 239), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(10)', 'decimal_places': '(3)'}), '(max_digits=10, decimal_places=3)\n', (206, 239), False, 'from django.db import models\n')]
|
from vic.vic import ffi
from vic import lib as vic_lib
def test_calc_snow_coverage_no_change():
store_snow = ffi.new('_Bool *')
max_snow_depth = ffi.new('double *')
store_swq = ffi.new('double *')
snow_distrib_slope = ffi.new('double *')
store_coverage = ffi.new('double *')
old_coverage = 0.75
coverage = vic_lib.calc_snow_coverage(
store_snow, 0.5, old_coverage, 1.25, 1.25, 2.3, 2.3, 0.,
max_snow_depth, 0., store_swq, snow_distrib_slope, store_coverage)
assert coverage == old_coverage
def test_calc_snow_coverage_increased():
store_snow = ffi.new('_Bool *')
store_snow[0] = True
max_snow_depth = ffi.new('double *')
max_snow_depth[0] = 3.
store_swq = ffi.new('double *')
store_swq[0] = 0.5
snow_distrib_slope = ffi.new('double *')
snow_distrib_slope[0] = 0.5
store_coverage = ffi.new('double *')
store_coverage[0] = 0.75
old_coverage = 0.75
coverage = vic_lib.calc_snow_coverage(
store_snow, 0.5, old_coverage, 1.25, 1.5, 2.3, 3., 0., max_snow_depth,
0.25, store_swq, snow_distrib_slope, store_coverage)
assert coverage > old_coverage
|
[
"vic.vic.ffi.new",
"vic.lib.calc_snow_coverage"
] |
[((115, 133), 'vic.vic.ffi.new', 'ffi.new', (['"""_Bool *"""'], {}), "('_Bool *')\n", (122, 133), False, 'from vic.vic import ffi\n'), ((155, 174), 'vic.vic.ffi.new', 'ffi.new', (['"""double *"""'], {}), "('double *')\n", (162, 174), False, 'from vic.vic import ffi\n'), ((191, 210), 'vic.vic.ffi.new', 'ffi.new', (['"""double *"""'], {}), "('double *')\n", (198, 210), False, 'from vic.vic import ffi\n'), ((236, 255), 'vic.vic.ffi.new', 'ffi.new', (['"""double *"""'], {}), "('double *')\n", (243, 255), False, 'from vic.vic import ffi\n'), ((277, 296), 'vic.vic.ffi.new', 'ffi.new', (['"""double *"""'], {}), "('double *')\n", (284, 296), False, 'from vic.vic import ffi\n'), ((336, 497), 'vic.lib.calc_snow_coverage', 'vic_lib.calc_snow_coverage', (['store_snow', '(0.5)', 'old_coverage', '(1.25)', '(1.25)', '(2.3)', '(2.3)', '(0.0)', 'max_snow_depth', '(0.0)', 'store_swq', 'snow_distrib_slope', 'store_coverage'], {}), '(store_snow, 0.5, old_coverage, 1.25, 1.25, 2.3, \n 2.3, 0.0, max_snow_depth, 0.0, store_swq, snow_distrib_slope,\n store_coverage)\n', (362, 497), True, 'from vic import lib as vic_lib\n'), ((600, 618), 'vic.vic.ffi.new', 'ffi.new', (['"""_Bool *"""'], {}), "('_Bool *')\n", (607, 618), False, 'from vic.vic import ffi\n'), ((665, 684), 'vic.vic.ffi.new', 'ffi.new', (['"""double *"""'], {}), "('double *')\n", (672, 684), False, 'from vic.vic import ffi\n'), ((728, 747), 'vic.vic.ffi.new', 'ffi.new', (['"""double *"""'], {}), "('double *')\n", (735, 747), False, 'from vic.vic import ffi\n'), ((796, 815), 'vic.vic.ffi.new', 'ffi.new', (['"""double *"""'], {}), "('double *')\n", (803, 815), False, 'from vic.vic import ffi\n'), ((869, 888), 'vic.vic.ffi.new', 'ffi.new', (['"""double *"""'], {}), "('double *')\n", (876, 888), False, 'from vic.vic import ffi\n'), ((957, 1118), 'vic.lib.calc_snow_coverage', 'vic_lib.calc_snow_coverage', (['store_snow', '(0.5)', 'old_coverage', '(1.25)', '(1.5)', '(2.3)', '(3.0)', '(0.0)', 'max_snow_depth', '(0.25)', 'store_swq', 'snow_distrib_slope', 'store_coverage'], {}), '(store_snow, 0.5, old_coverage, 1.25, 1.5, 2.3, \n 3.0, 0.0, max_snow_depth, 0.25, store_swq, snow_distrib_slope,\n store_coverage)\n', (983, 1118), True, 'from vic import lib as vic_lib\n')]
|
from typing import Optional, List
from pydantic import Field, validator
from bug_killer_api_interface.test.test_doubles.default_values import mock_project_title, mock_project_description, \
mock_project_tags, mock_member_id
from bug_killer_app.test.test_doubles.default_values import mock_manager_id
from bug_killer_utils.collections import remove_duplicates_in_list
from bug_killer_utils.model.bk_base_model import BkBaseModel
class CreateProjectPayload(BkBaseModel):
""" Payload used to create a new project """
title: str = Field(description='The title of the project to create')
description: str = Field(description='The title of the project to create')
members: List[str] = Field(
default_factory=list,
description='List of members to be added to the project. It should be a list of cognito user ids'
)
tags: List[str] = Field(default_factory=list, description='List of tags to be added to the project to create')
@validator('members', 'tags', pre=True)
def set_values(cls, value: List[str]) -> List[str]:
return sorted(remove_duplicates_in_list(value))
@classmethod
def test_double( # type: ignore[override]
cls, *,
title: Optional[str] = None,
description: Optional[str] = None,
members: Optional[List[str]] = None,
tags: Optional[List[str]] = None
) -> 'CreateProjectPayload':
return cls(
title=title or mock_project_title,
description=description or mock_project_description,
members=members or [mock_member_id],
tags=tags or mock_project_tags
)
class UpdateProjectPayload(BkBaseModel):
""" Payload used to update an existing project """
title: Optional[str] = Field(None, description='The new title to set')
description: Optional[str] = Field(None, description='The new description to set')
manager: Optional[str] = Field(None, description='The cognito user id of the new manager of the project')
members: Optional[List[str]] = Field(None, description='The new list of members that the project should have')
tags: Optional[List[str]] = Field(None, description='The new list of tags to set')
@validator('members', 'tags')
def set_values(cls, value: Optional[List[str]]) -> Optional[List[str]]:
if value is None:
return None
return sorted(remove_duplicates_in_list(value))
@classmethod
def test_double( # type: ignore[override]
cls, *,
title: Optional[str] = None,
description: Optional[str] = None,
manager: Optional[str] = None,
members: Optional[List[str]] = None,
tags: Optional[List[str]] = None,
) -> 'UpdateProjectPayload':
return cls(
title=title or mock_project_title,
description=description or mock_project_description,
manager=manager or mock_manager_id,
members=members or [mock_member_id],
tags=tags or mock_project_tags
)
|
[
"pydantic.Field",
"pydantic.validator",
"bug_killer_utils.collections.remove_duplicates_in_list"
] |
[((543, 598), 'pydantic.Field', 'Field', ([], {'description': '"""The title of the project to create"""'}), "(description='The title of the project to create')\n", (548, 598), False, 'from pydantic import Field, validator\n'), ((622, 677), 'pydantic.Field', 'Field', ([], {'description': '"""The title of the project to create"""'}), "(description='The title of the project to create')\n", (627, 677), False, 'from pydantic import Field, validator\n'), ((703, 839), 'pydantic.Field', 'Field', ([], {'default_factory': 'list', 'description': '"""List of members to be added to the project. It should be a list of cognito user ids"""'}), "(default_factory=list, description=\n 'List of members to be added to the project. It should be a list of cognito user ids'\n )\n", (708, 839), False, 'from pydantic import Field, validator\n'), ((874, 971), 'pydantic.Field', 'Field', ([], {'default_factory': 'list', 'description': '"""List of tags to be added to the project to create"""'}), "(default_factory=list, description=\n 'List of tags to be added to the project to create')\n", (879, 971), False, 'from pydantic import Field, validator\n'), ((973, 1011), 'pydantic.validator', 'validator', (['"""members"""', '"""tags"""'], {'pre': '(True)'}), "('members', 'tags', pre=True)\n", (982, 1011), False, 'from pydantic import Field, validator\n'), ((1783, 1830), 'pydantic.Field', 'Field', (['None'], {'description': '"""The new title to set"""'}), "(None, description='The new title to set')\n", (1788, 1830), False, 'from pydantic import Field, validator\n'), ((1864, 1917), 'pydantic.Field', 'Field', (['None'], {'description': '"""The new description to set"""'}), "(None, description='The new description to set')\n", (1869, 1917), False, 'from pydantic import Field, validator\n'), ((1947, 2032), 'pydantic.Field', 'Field', (['None'], {'description': '"""The cognito user id of the new manager of the project"""'}), "(None, description='The cognito user id of the new manager of the project'\n )\n", (1952, 2032), False, 'from pydantic import Field, validator\n'), ((2063, 2142), 'pydantic.Field', 'Field', (['None'], {'description': '"""The new list of members that the project should have"""'}), "(None, description='The new list of members that the project should have')\n", (2068, 2142), False, 'from pydantic import Field, validator\n'), ((2175, 2229), 'pydantic.Field', 'Field', (['None'], {'description': '"""The new list of tags to set"""'}), "(None, description='The new list of tags to set')\n", (2180, 2229), False, 'from pydantic import Field, validator\n'), ((2236, 2264), 'pydantic.validator', 'validator', (['"""members"""', '"""tags"""'], {}), "('members', 'tags')\n", (2245, 2264), False, 'from pydantic import Field, validator\n'), ((1090, 1122), 'bug_killer_utils.collections.remove_duplicates_in_list', 'remove_duplicates_in_list', (['value'], {}), '(value)\n', (1115, 1122), False, 'from bug_killer_utils.collections import remove_duplicates_in_list\n'), ((2413, 2445), 'bug_killer_utils.collections.remove_duplicates_in_list', 'remove_duplicates_in_list', (['value'], {}), '(value)\n', (2438, 2445), False, 'from bug_killer_utils.collections import remove_duplicates_in_list\n')]
|
import os
import traceback
from collections import defaultdict
from click.testing import CliRunner
from ocdsindex.cli.__main__ import main
from tests import elasticsearch, search
def test_copy(tmpdir):
host = os.getenv("ELASTICSEARCH_URL", "localhost:9200")
runner = CliRunner()
with elasticsearch(host) as es:
result = runner.invoke(main, ["index", host, os.path.join("tests", "fixtures", "data.json")])
assert result.exit_code == 0, traceback.print_exception(*result.exc_info)
assert result.output == ""
es.indices.refresh("ocdsindex_en")
es.indices.refresh("ocdsindex_es")
source = "https://standard.open-contracting.org/dev/"
destination = "https://standard.open-contracting.org/copy/"
result = runner.invoke(main, ["copy", host, source, destination])
assert result.exit_code == 0, traceback.print_exception(*result.exc_info)
assert result.output == ""
for index, value in (("ocdsindex_en", 8), ("ocdsindex_es", 1)):
hits = search(es, index)
counts = defaultdict(int)
for hit in hits["hits"]:
counts[hit["_source"]["base_url"]] += 1
assert counts == {
source: value,
destination: value,
}
assert hits["total"]["value"] == value * 2
|
[
"tests.elasticsearch",
"tests.search",
"collections.defaultdict",
"traceback.print_exception",
"click.testing.CliRunner",
"os.path.join",
"os.getenv"
] |
[((217, 265), 'os.getenv', 'os.getenv', (['"""ELASTICSEARCH_URL"""', '"""localhost:9200"""'], {}), "('ELASTICSEARCH_URL', 'localhost:9200')\n", (226, 265), False, 'import os\n'), ((280, 291), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (289, 291), False, 'from click.testing import CliRunner\n'), ((302, 321), 'tests.elasticsearch', 'elasticsearch', (['host'], {}), '(host)\n', (315, 321), False, 'from tests import elasticsearch, search\n'), ((470, 513), 'traceback.print_exception', 'traceback.print_exception', (['*result.exc_info'], {}), '(*result.exc_info)\n', (495, 513), False, 'import traceback\n'), ((881, 924), 'traceback.print_exception', 'traceback.print_exception', (['*result.exc_info'], {}), '(*result.exc_info)\n', (906, 924), False, 'import traceback\n'), ((1052, 1069), 'tests.search', 'search', (['es', 'index'], {}), '(es, index)\n', (1058, 1069), False, 'from tests import elasticsearch, search\n'), ((1091, 1107), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (1102, 1107), False, 'from collections import defaultdict\n'), ((382, 428), 'os.path.join', 'os.path.join', (['"""tests"""', '"""fixtures"""', '"""data.json"""'], {}), "('tests', 'fixtures', 'data.json')\n", (394, 428), False, 'import os\n')]
|
from django.views.generic import TemplateView
from wagtail.images.models import Image as WagtailImage
class MainView(TemplateView):
template_name = "index.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["img"] = WagtailImage.objects.first()
return context
|
[
"wagtail.images.models.Image.objects.first"
] |
[((289, 317), 'wagtail.images.models.Image.objects.first', 'WagtailImage.objects.first', ([], {}), '()\n', (315, 317), True, 'from wagtail.images.models import Image as WagtailImage\n')]
|
"""Contains a Graph Attention Network v2 and associated layers."""
from typing import Any, Callable, Mapping, Optional, Union
import tensorflow as tf
import tensorflow_gnn as tfgnn
@tf.keras.utils.register_keras_serializable(package="GNN>models>gat_v2")
class GATv2Conv(tfgnn.keras.layers.AnyToAnyConvolutionBase):
"""The multi-head attention from Graph Attention Networks v2 (GATv2).
GATv2 (https://arxiv.org/abs/2105.14491) improves upon the popular
GAT architecture (https://arxiv.org/abs/1710.10903) by allowing the network
to compute a more expressive "dynamic" instead of just "static" attention,
each of whose heads is described by Equations (7), (3) and (4) in
https://arxiv.org/abs/2105.14491.
Example: GATv2-style attention on incoming edges whose result is
concatenated with the old node state and passed through a Dense layer
to compute the new node state.
```
dense = tf.keras.layers.Dense
graph = tfgnn.keras.layers.GraphUpdate(
node_sets={"paper": tfgnn.keras.layers.NodeSetUpdate(
{"cites": tfgnn.keras.layers.GATv2Conv(
message_dim, receiver_tag=tfgnn.TARGET)},
tfgnn.keras.layers.NextStateFromConcat(dense(node_state_dim)))}
)(graph)
```
This layer implements the multi-head attention of GATv2 with the following
generalizations:
* This implementation of GATv2 attends only to edges that are explicitly
stored in the input GraphTensor. Attention of a node to itself is
enabled or disabled by storing or not storing an explicit loop in the
edge set. The example above uses a separate layer to combine the old
node state with the attention result to form the new node state.
* Attention values can be computed from a sender node state that gets
broadcast onto the edge (see arg `sender_node_feature`), from an edge
feature (see arg `sender_edge_feature`), or from their concatenation
(by setting both arguments). This choice is used in place of the sender
node state $h_j$ in the defining equations cited above.
* This layer can be used with `receiver_tag=tfgnn.CONTEXT` to perform a
convolution to the context, with graph components as receivers and the
containment in graph components used in lieu of edges.
* An `edge_dropout` option is provided.
This layer can also be configured to do attention pooling from edges to
context or to receiver nodes (without regard for source nodes) by setting
`sender_node_feature=None` and setting `sender_edge_feature=...` to the
applicable edge feature name (e.g., `tfgnn.DEFAULT_FEATURE_NAME`).
Like the Keras Dense layer, if the input features have rank greater than 2,
this layer computes a point-wise attention along the last axis of the inputs.
For example, if the input features have shape [num_nodes, 2, 4, 1], then it
will perform an identical computation on each of the num_nodes * 2 * 4 input
values.
Init args:
num_heads: The number of attention heads.
per_head_channels: The number of channels for each attention head. This
means that the final output size will be per_head_channels * num_heads.
receiver_tag: one of `tfgnn.SOURCE`, `tfgnn.TARGET` or `tfgnn.CONTEXT`.
The results of attention are aggregated for this graph piece.
If set to `tfgnn.SOURCE` or `tfgnn.TARGET`, the layer can be called for
an edge set and will aggregate results at the specified endpoint of the
edges.
If set to `tfgnn.CONTEXT`, the layer can be called for an edge set or
node set.
If left unset for init, the tag must be passed at call time.
receiver_feature: Can be set to override `tfgnn.DEFAULT_FEATURE_NAME`
for use as the receiver's input feature to attention. (The attention key
is derived from this input.)
sender_node_feature: Can be set to override `tfgnn.DEFAULT_FEATURE_NAME`
for use as the input feature from sender nodes to attention.
IMPORANT: Must be set to `None` for use with `receiver_tag=tfgnn.CONTEXT`
on an edge set, or for pooling from edges without sender node states.
sender_edge_feature: Can be set to a feature name of the edge set to select
it as an input feature. By default, this set to `None`, which disables
this input.
IMPORTANT: Must be set for use with `receiver_tag=tfgnn.CONTEXT`
on an edge set.
use_bias: If true, a bias term is added to the transformations of query and
value inputs.
edge_dropout: Can be set to a dropout rate for edge dropout. (When pooling
nodes to context, it's the node's membership in a graph component that
is dropped out.)
attention_activation: The nonlinearity used on the transformed inputs
before multiplying with the trained weights of the attention layer.
This can be specified as a Keras layer, a tf.keras.activations.*
function, or a string understood by tf.keras.layers.Activation().
Defaults to "leaky_relu", which in turn defaults to a negative slope
of alpha=0.2.
activation: The nonlinearity applied to the final result of attention,
specified in the same ways as attention_activation.
kernel_initializer: Can be set to a `kerner_initializer` as understood
by tf.keras.layers.Dense etc.
"""
def __init__(self,
*,
num_heads: int,
per_head_channels: int,
receiver_tag: Optional[tfgnn.IncidentNodeOrContextTag] = None,
receiver_feature: tfgnn.FieldName = tfgnn.HIDDEN_STATE,
sender_node_feature: Optional[
tfgnn.FieldName] = tfgnn.HIDDEN_STATE,
sender_edge_feature: Optional[tfgnn.FieldName] = None,
use_bias: bool = True,
edge_dropout: float = 0.,
attention_activation: Union[str,
Callable[..., Any]] = "leaky_relu",
activation: Union[str, Callable[..., Any]] = "relu",
kernel_initializer: Union[
None, str, tf.keras.initializers.Initializer] = None,
**kwargs):
kwargs.setdefault("name", "gat_v2_conv")
super().__init__(
receiver_tag=receiver_tag,
receiver_feature=receiver_feature,
sender_node_feature=sender_node_feature,
sender_edge_feature=sender_edge_feature,
extra_receiver_ops={"softmax": tfgnn.softmax},
**kwargs)
if not self.takes_receiver_input:
raise ValueError("Receiver feature cannot be None")
if num_heads <= 0:
raise ValueError(f"Number of heads {num_heads} must be greater than 0.")
self._num_heads = num_heads
if per_head_channels <= 0:
raise ValueError(
f"Per-head channels {per_head_channels} must be greater than 0.")
self._per_head_channels = per_head_channels
self._use_bias = use_bias
if not 0 <= edge_dropout < 1:
raise ValueError(f"Edge dropout {edge_dropout} must be in [0, 1).")
self._edge_dropout = edge_dropout
self._attention_activation = tf.keras.activations.get(attention_activation)
self._activation = tf.keras.activations.get(activation)
self._kernel_initializer = kernel_initializer
# Create the transformations for the query input in all heads.
self._w_query = tf.keras.layers.Dense(
per_head_channels * num_heads,
kernel_initializer=kernel_initializer,
# This bias gets added to the attention features but not the outputs.
use_bias=use_bias,
name="query")
# Create the transformations for value input from sender nodes and edges.
if self.takes_sender_node_input:
self._w_sender_node = tf.keras.layers.Dense(
per_head_channels * num_heads,
kernel_initializer=kernel_initializer,
# This bias gets added to the attention features and the outputs.
use_bias=use_bias,
name="value_node")
else:
self._w_sender_node = None
if self.takes_sender_edge_input:
self._w_sender_edge = tf.keras.layers.Dense(
per_head_channels * num_heads,
kernel_initializer=kernel_initializer,
# This bias would be redundant with self._w_sender_node.
use_bias=use_bias and self._w_sender_node is None,
name="value_edge")
else:
self._w_sender_edge = None
if self._w_sender_node is None and self._w_sender_edge is None:
raise ValueError("GATv2Attention initialized with no inputs.")
# Create attention logits layers, one for each head. Note that we can't
# use a single Dense layer that outputs `num_heads` units because we need
# to apply a different attention function a_k to its corresponding
# W_k-transformed features.
self._attention_logits_fn = tf.keras.layers.experimental.EinsumDense(
"...ik,ki->...i",
output_shape=(None, num_heads, 1), # TODO(b/205825425): (num_heads,)
kernel_initializer=kernel_initializer,
name="attn_logits")
def get_config(self):
return dict(
num_heads=self._num_heads,
per_head_channels=self._per_head_channels,
use_bias=self._use_bias,
edge_dropout=self._edge_dropout,
attention_activation=self._attention_activation,
activation=self._activation,
kernel_initializer=self._kernel_initializer,
**super().get_config())
def convolve(self, *,
sender_node_input: Optional[tf.Tensor],
sender_edge_input: Optional[tf.Tensor],
receiver_input: Optional[tf.Tensor],
broadcast_from_sender_node: Callable[[tf.Tensor], tf.Tensor],
broadcast_from_receiver: Callable[[tf.Tensor], tf.Tensor],
pool_to_receiver: Callable[..., tf.Tensor],
extra_receiver_ops: Optional[
Mapping[str, Callable[..., Any]]] = None,
training: bool) -> tf.Tensor:
# Form the attention query for each head.
# [num_items, *extra_dims, num_heads, channels_per_head]
assert receiver_input is not None, "__init__() should have checked this."
query = broadcast_from_receiver(self._split_heads(self._w_query(
receiver_input)))
# Form the attention value by transforming the configured inputs
# and adding up the transformed values.
# [num_items, *extra_dims, num_heads, channels_per_head]
value_terms = []
if sender_node_input is not None:
value_terms.append(broadcast_from_sender_node(
self._split_heads(self._w_sender_node(sender_node_input))))
if sender_edge_input is not None:
value_terms.append(
self._split_heads(self._w_sender_edge(sender_edge_input)))
assert value_terms, "Internal error: no values, __init__ should catch this."
value = tf.add_n(value_terms)
# Compute the features from which attention logits are computed.
# [num_items, *extra_dims, num_heads, channels_per_head]
attention_features = self._attention_activation(query + value)
# Compute the attention logits and softmax to get the coefficients.
# [num_items, *extra_dims, num_heads, 1]
logits = tf.expand_dims(self._attention_logits_fn(attention_features), -1)
attention_coefficients = extra_receiver_ops["softmax"](logits)
if training:
# Apply dropout to the normalized attention coefficients, as is done in
# the original GAT paper. This should have the same effect as edge
# dropout. Also, note that tf.nn.dropout upscales the remaining values,
# which should maintain the sum-up-to-1 per node in expectation.
attention_coefficients = tf.nn.dropout(attention_coefficients,
self._edge_dropout)
# Apply the attention coefficients to the transformed query.
# [num_items, *extra_dims, num_heads, per_head_channels]
messages = value * attention_coefficients
# Take the sum of the weighted values, which equals the weighted average.
# Receivers without incoming senders get the empty sum 0.
# [num_receivers, *extra_dims, num_heads, per_head_channels]
pooled_messages = pool_to_receiver(messages, reduce_type="sum")
# Apply the nonlinearity.
pooled_messages = self._activation(pooled_messages)
pooled_messages = self._merge_heads(pooled_messages)
return pooled_messages
# The following helpers map forth and back between tensors with...
# - a separate heads dimension: shape [..., num_heads, channels_per_head],
# - all heads concatenated: shape [..., num_heads * channels_per_head].
def _split_heads(self, tensor):
extra_dims = tensor.shape[1:-1] # Possibly empty.
if not extra_dims.is_fully_defined():
raise ValueError(
"GATv2Attention requires non-ragged Tensors as inputs, "
"and GraphTensor requires these to have statically known "
f"dimensions except the first, but got {tensor.shape}")
new_shape = (-1, *extra_dims, self._num_heads, self._per_head_channels)
return tf.reshape(tensor, new_shape)
def _merge_heads(self, tensor):
num_merged = 2
extra_dims = tensor.shape[1 : -num_merged] # Possibly empty.
merged_dims = tensor.shape[-num_merged:]
if not extra_dims.is_fully_defined() or not merged_dims.is_fully_defined():
raise ValueError(
f"Unexpected unknown dimensions in shape {tensor.shape}")
new_shape = (-1, *extra_dims, merged_dims.num_elements())
return tf.reshape(tensor, new_shape)
def GATv2EdgePool(*, # To be called like a class initializer. pylint: disable=invalid-name
num_heads: int,
per_head_channels: int,
receiver_tag: Optional[tfgnn.IncidentNodeOrContextTag] = None,
receiver_feature: tfgnn.FieldName = tfgnn.HIDDEN_STATE,
sender_feature: tfgnn.FieldName = tfgnn.HIDDEN_STATE,
**kwargs):
"""Returns a layer for pooling edges with GATv2-style attention.
When initialized with receiver_tag SOURCE or TARGET, the returned layer can
be called on an edge set to compute the weighted sum of edge states at the
given endpoint. The weights are computed by the method of Graph Attention
Networks v2 (GATv2), except that edge states, not node states broadcast from
the edges' other endpoint, are used as input values to attention.
When initialized with receiver_tag CONTEXT, the returned layer can be called
on an edge set to do the analogous pooling of edge states to context.
NOTE: This layer cannot pool node states. For that, use GATv2Conv.
Args:
num_heads: The number of attention heads.
per_head_channels: The number of channels for each attention head. This
means that the final output size will be per_head_channels * num_heads.
receiver_tag: The results of attention are aggregated for this graph piece.
If set to `tfgnn.CONTEXT`, the layer can be called for an edge set or
node set.
If set to an IncidentNodeTag (e.g., `tfgnn.SOURCE` or `tfgnn.TARGET`),
the layer can be called for an edge set and will aggregate results at
the specified endpoint of the edges.
If left unset, the tag must be passed when calling the layer.
receiver_feature: By default, the default state feature of the receiver
is used to compute the attention query. A different feature name can be
selected by setting this argument.
sender_feature: By default, the default state feature of the edge set is
used to compute the attention values. A different feature name can be
selected by setting this argument.
**kwargs: Any other option for GATv2Conv, except sender_node_feature,
which is set to None.
"""
if kwargs.pop("sender_node_feature", None) is not None:
raise TypeError("GATv2EdgePool() got an unexpected keyword argument "
"'sender_node_feature'. Did you mean GATv2Conv()?")
kwargs.setdefault("name", "gat_v2_edge_pool")
return GATv2Conv(
num_heads=num_heads,
per_head_channels=per_head_channels,
receiver_tag=receiver_tag,
receiver_feature=receiver_feature,
sender_edge_feature=sender_feature,
sender_node_feature=None,
**kwargs)
def GATv2GraphUpdate(*, # To be called like a class initializer. pylint: disable=invalid-name
num_heads: int,
per_head_channels: int,
edge_set_name: str,
feature_name: str = tfgnn.HIDDEN_STATE,
name: str = "gat_v2",
**kwargs):
"""Returns a GraphUpdater layer with a Graph Attention Network V2 (GATv2).
The returned layer performs one update step of a Graph Attention Network v2
(GATv2) from https://arxiv.org/abs/2105.14491 on an edge set of a GraphTensor.
It is best suited for graphs that have just that one edge set.
For heterogeneous graphs with multiple node sets and edge sets, users are
advised to consider a GraphUpdate with one or more GATv2Conv objects
instead.
This implementation of GAT attends only to edges that are explicitly stored
in the input GraphTensor. Attention of a node to itself requires having an
explicit loop in the edge set.
Args:
num_heads: The number of attention heads.
per_head_channels: The number of channels for each attention head. This
means that the final output size will be per_head_channels * num_heads.
edge_set_name: A GATv2 update happens on this edge set and its incident
node set(s) of the input GraphTensor.
feature_name: The feature name of node states; defaults to
tfgnn.HIDDEN_STATE.
name: Optionally, a name for the layer returned.
**kwargs: Any optional arguments to GATv2Conv, see there.
"""
# Compat logic, remove in late 2021.
if "output_feature_name" in kwargs:
raise TypeError("Argument 'output_feature_name' is no longer supported.")
# Build a GraphUpdate for the target node set of the given edge_set_name.
# That needs to be deferred until we see a GraphTensorSpec that tells us
# the node_set_name.
def deferred_init_callback(spec: tfgnn.GraphTensorSpec):
node_set_name = spec.edge_sets_spec[
edge_set_name].adjacency_spec.node_set_name(tfgnn.TARGET)
node_set_updates = {
node_set_name: tfgnn.keras.layers.NodeSetUpdate(
{edge_set_name: GATv2Conv(
num_heads=num_heads, per_head_channels=per_head_channels,
receiver_tag=tfgnn.TARGET,
sender_node_feature=feature_name, receiver_feature=feature_name,
**kwargs)},
next_state=NextStateForNodeSetFromSingleEdgeSetInput(),
node_input_feature=feature_name)}
return dict(node_sets=node_set_updates)
return tfgnn.keras.layers.GraphUpdate(
deferred_init_callback=deferred_init_callback, name=name)
# For use by GATv2GraphUpdate().
@tf.keras.utils.register_keras_serializable(package="GNN>models>gat_v2")
class NextStateForNodeSetFromSingleEdgeSetInput(tf.keras.layers.Layer):
def call(self, inputs):
unused_node_input, edge_inputs, unused_context_input = inputs
single_edge_set_input, = edge_inputs.values() # Unpack.
return single_edge_set_input
|
[
"tensorflow_gnn.keras.layers.GraphUpdate",
"tensorflow.keras.utils.register_keras_serializable",
"tensorflow.keras.layers.experimental.EinsumDense",
"tensorflow.add_n",
"tensorflow.keras.layers.Dense",
"tensorflow.reshape",
"tensorflow.keras.activations.get",
"tensorflow.nn.dropout"
] |
[((185, 256), 'tensorflow.keras.utils.register_keras_serializable', 'tf.keras.utils.register_keras_serializable', ([], {'package': '"""GNN>models>gat_v2"""'}), "(package='GNN>models>gat_v2')\n", (227, 256), True, 'import tensorflow as tf\n'), ((18945, 19016), 'tensorflow.keras.utils.register_keras_serializable', 'tf.keras.utils.register_keras_serializable', ([], {'package': '"""GNN>models>gat_v2"""'}), "(package='GNN>models>gat_v2')\n", (18987, 19016), True, 'import tensorflow as tf\n'), ((18813, 18906), 'tensorflow_gnn.keras.layers.GraphUpdate', 'tfgnn.keras.layers.GraphUpdate', ([], {'deferred_init_callback': 'deferred_init_callback', 'name': 'name'}), '(deferred_init_callback=\n deferred_init_callback, name=name)\n', (18843, 18906), True, 'import tensorflow_gnn as tfgnn\n'), ((7085, 7131), 'tensorflow.keras.activations.get', 'tf.keras.activations.get', (['attention_activation'], {}), '(attention_activation)\n', (7109, 7131), True, 'import tensorflow as tf\n'), ((7155, 7191), 'tensorflow.keras.activations.get', 'tf.keras.activations.get', (['activation'], {}), '(activation)\n', (7179, 7191), True, 'import tensorflow as tf\n'), ((7330, 7459), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(per_head_channels * num_heads)'], {'kernel_initializer': 'kernel_initializer', 'use_bias': 'use_bias', 'name': '"""query"""'}), "(per_head_channels * num_heads, kernel_initializer=\n kernel_initializer, use_bias=use_bias, name='query')\n", (7351, 7459), True, 'import tensorflow as tf\n'), ((8807, 8969), 'tensorflow.keras.layers.experimental.EinsumDense', 'tf.keras.layers.experimental.EinsumDense', (['"""...ik,ki->...i"""'], {'output_shape': '(None, num_heads, 1)', 'kernel_initializer': 'kernel_initializer', 'name': '"""attn_logits"""'}), "('...ik,ki->...i', output_shape=(\n None, num_heads, 1), kernel_initializer=kernel_initializer, name=\n 'attn_logits')\n", (8847, 8969), True, 'import tensorflow as tf\n'), ((10821, 10842), 'tensorflow.add_n', 'tf.add_n', (['value_terms'], {}), '(value_terms)\n', (10829, 10842), True, 'import tensorflow as tf\n'), ((13045, 13074), 'tensorflow.reshape', 'tf.reshape', (['tensor', 'new_shape'], {}), '(tensor, new_shape)\n', (13055, 13074), True, 'import tensorflow as tf\n'), ((13485, 13514), 'tensorflow.reshape', 'tf.reshape', (['tensor', 'new_shape'], {}), '(tensor, new_shape)\n', (13495, 13514), True, 'import tensorflow as tf\n'), ((7710, 7844), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(per_head_channels * num_heads)'], {'kernel_initializer': 'kernel_initializer', 'use_bias': 'use_bias', 'name': '"""value_node"""'}), "(per_head_channels * num_heads, kernel_initializer=\n kernel_initializer, use_bias=use_bias, name='value_node')\n", (7731, 7844), True, 'import tensorflow as tf\n'), ((8066, 8236), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(per_head_channels * num_heads)'], {'kernel_initializer': 'kernel_initializer', 'use_bias': '(use_bias and self._w_sender_node is None)', 'name': '"""value_edge"""'}), "(per_head_channels * num_heads, kernel_initializer=\n kernel_initializer, use_bias=use_bias and self._w_sender_node is None,\n name='value_edge')\n", (8087, 8236), True, 'import tensorflow as tf\n'), ((11654, 11711), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['attention_coefficients', 'self._edge_dropout'], {}), '(attention_coefficients, self._edge_dropout)\n', (11667, 11711), True, 'import tensorflow as tf\n')]
|
# Copyright 2020 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
******** Ferromagnetic chain ********
1. Try to make an anti-ferromagnetic chain
2. What happens when you add linear biases on either end of a ferromagnetic chain?
3. What happens when you modify the J's so they aren't all the same value?
'''
from dwave.system import EmbeddingComposite, DWaveSampler
import dwave.inspector as inspector
# Modifiable parameters
num_qubits = 10 # Number of qubits in our chain
fm_qubit_bias = [0] * num_qubits # List of biases to apply to each qubit in our chain
fm_coupler_strength = -1 # The coupling we want to apply to two adjacent qubits
# Ising model parameters
h = fm_qubit_bias
J = {}
for i in range(num_qubits-1):
J[(i, i+1)] = fm_coupler_strength
# Submit the problem to the QPU
sampler = EmbeddingComposite(DWaveSampler(solver={'qpu': True}))
sampleset = sampler.sample_ising(h, J, num_reads=10)
inspector.show(sampleset)
print("Ferromagetic QPU response")
print(sampleset)
|
[
"dwave.inspector.show",
"dwave.system.DWaveSampler"
] |
[((1483, 1508), 'dwave.inspector.show', 'inspector.show', (['sampleset'], {}), '(sampleset)\n', (1497, 1508), True, 'import dwave.inspector as inspector\n'), ((1393, 1427), 'dwave.system.DWaveSampler', 'DWaveSampler', ([], {'solver': "{'qpu': True}"}), "(solver={'qpu': True})\n", (1405, 1427), False, 'from dwave.system import EmbeddingComposite, DWaveSampler\n')]
|
from uuid import uuid4
import sqlalchemy
from sqlalchemy import TIMESTAMP, Boolean, Column, String
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.sql import expression
naming_convention = {
"ix": "ix_%(column_0_N_label)s",
"uq": "uq_%(table_name)s_%(column_0_N_name)s",
"ck": "ck_%(table_name)s_%(constraint_name)s",
"fk": "fk_%(table_name)s_%(column_0_N_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s",
}
metadata = sqlalchemy.MetaData(naming_convention=naming_convention)
User = sqlalchemy.Table(
"user",
metadata,
Column("uid", UUID(), default=uuid4, primary_key=True),
Column("username", String(), nullable=False, unique=True),
Column("disabled", Boolean(), nullable=False, default=False, server_default=expression.false()),
Column("hashed_password", String(), nullable=False),
Column("is_admin", Boolean(), server_default=expression.false(), nullable=False),
Column("created", TIMESTAMP(timezone=True), nullable=False, index=True),
)
|
[
"sqlalchemy.MetaData",
"sqlalchemy.dialects.postgresql.UUID",
"sqlalchemy.Boolean",
"sqlalchemy.TIMESTAMP",
"sqlalchemy.String",
"sqlalchemy.sql.expression.false"
] |
[((468, 524), 'sqlalchemy.MetaData', 'sqlalchemy.MetaData', ([], {'naming_convention': 'naming_convention'}), '(naming_convention=naming_convention)\n', (487, 524), False, 'import sqlalchemy\n'), ((595, 601), 'sqlalchemy.dialects.postgresql.UUID', 'UUID', ([], {}), '()\n', (599, 601), False, 'from sqlalchemy.dialects.postgresql import UUID\n'), ((660, 668), 'sqlalchemy.String', 'String', ([], {}), '()\n', (666, 668), False, 'from sqlalchemy import TIMESTAMP, Boolean, Column, String\n'), ((723, 732), 'sqlalchemy.Boolean', 'Boolean', ([], {}), '()\n', (730, 732), False, 'from sqlalchemy import TIMESTAMP, Boolean, Column, String\n'), ((831, 839), 'sqlalchemy.String', 'String', ([], {}), '()\n', (837, 839), False, 'from sqlalchemy import TIMESTAMP, Boolean, Column, String\n'), ((881, 890), 'sqlalchemy.Boolean', 'Boolean', ([], {}), '()\n', (888, 890), False, 'from sqlalchemy import TIMESTAMP, Boolean, Column, String\n'), ((966, 990), 'sqlalchemy.TIMESTAMP', 'TIMESTAMP', ([], {'timezone': '(True)'}), '(timezone=True)\n', (975, 990), False, 'from sqlalchemy import TIMESTAMP, Boolean, Column, String\n'), ((780, 798), 'sqlalchemy.sql.expression.false', 'expression.false', ([], {}), '()\n', (796, 798), False, 'from sqlalchemy.sql import expression\n'), ((907, 925), 'sqlalchemy.sql.expression.false', 'expression.false', ([], {}), '()\n', (923, 925), False, 'from sqlalchemy.sql import expression\n')]
|
import torch
from core.network.rainbow import Rainbow
def test_rainbow_call():
D_in, D_out, D_hidden = 2, 3, 4
N_atom = 5
noise_type = "factorized"
net = Rainbow(
D_in=D_in, D_out=D_out, N_atom=N_atom, noise_type=noise_type, D_hidden=D_hidden
)
batch_size = 6
mock_input = torch.rand((batch_size, D_in))
out = net(mock_input, is_train=True)
assert out.shape == (batch_size, D_out, N_atom)
|
[
"core.network.rainbow.Rainbow",
"torch.rand"
] |
[((174, 266), 'core.network.rainbow.Rainbow', 'Rainbow', ([], {'D_in': 'D_in', 'D_out': 'D_out', 'N_atom': 'N_atom', 'noise_type': 'noise_type', 'D_hidden': 'D_hidden'}), '(D_in=D_in, D_out=D_out, N_atom=N_atom, noise_type=noise_type,\n D_hidden=D_hidden)\n', (181, 266), False, 'from core.network.rainbow import Rainbow\n'), ((314, 344), 'torch.rand', 'torch.rand', (['(batch_size, D_in)'], {}), '((batch_size, D_in))\n', (324, 344), False, 'import torch\n')]
|
from scipy.io import wavfile
import sounddevice as sd
# import speech_recognition as sr
import os
from pyaudio import PyAudio, paInt16, paFloat32
from scipy.io.wavfile import *
from random import randint
import time
import wave
import base64
import matplotlib.pyplot as plt
import soundfile as sf
# print(o)
#
# exit(122)
# sd.play('./tmp/float32speech.wav')
# sd.wait()
fs=16000
myrecording = sd.rec(int(4 * fs), samplerate=fs, channels=1, dtype='int16')
sd.wait() # Wait until recording is finished
print('finished')
sd.play(myrecording, fs)
write(f'./tmp/test.wav', fs, myrecording)
# data, fs = sf.read('./tmp/float32speech.wav', dtype='float32')
#
# plt.plot(data)
# plt.show()
# print(sd.wait()fs)
# sd.play(data, fs)
#
# sd.wait()
# import pyaudio
# import wave
#
# chunk = 1024 # Record in chunks of 1024 samples
# sample_format = pyaudio.paInt16 # 16 bits per sample
# channels = 1
# fs = 44100 # Record at 44100 samples per second
# seconds = 3
# filename = "output.wav"
#
# p = pyaudio.PyAudio() # Create an interface to PortAudio
#
# print('Recording')
#
# stream = p.open(format=sample_format,
# channels=channels,
# rate=fs,
# frames_per_buffer=chunk,
# input=True)
#
# frames = [] # Initialize array to store frames
#
# # Store data in chunks for 3 seconds
# for i in range(0, int(fs / chunk * seconds)):
# data = stream.read(chunk)
# frames.append(data)
#
# # Stop and close the stream
# stream.stop_stream()
# stream.close()
# # Terminate the PortAudio interface
# p.terminate()
#
# print('Finished recording')
#
# # Save the recorded data as a WAV file
# wf = wave.open(filename, 'wb')
# wf.setnchannels(channels)
# wf.setsampwidth(p.get_sample_size(sample_format))
# wf.setframerate(fs)
# wf.writeframes(b''.join(frames))
# wf.close()
#
#
#
# fs, data = read('output.wav')
# plt.plot(data)
# plt.show()
#
#
#
# # def save_wave_file(filepath, data):
# # wf = wave.open(filepath, 'wb')
# # wf.setnchannels(1)
# # wf.setsampwidth(2)
# # wf.setframerate(16000)
# # wf.writeframes(b''.join(data))
# # wf.close()
# #
# # pa = PyAudio()
# #
# # stream = pa.open(format=paFloat32, channels=1,
# # rate=16000, input=True, frames_per_buffer=2000, )
# # my_buf = []
# # # count = 0
# # t = time.time()
# # while time.time() < t + 6: # 秒
# # string_audio_data = stream.read(2000)
# # my_buf.append(string_audio_data)
# # # print('录音结束.')
# # save_wave_file('./tmp/static.wav', my_buf)
# # stream.close()
|
[
"sounddevice.play",
"sounddevice.wait"
] |
[((461, 470), 'sounddevice.wait', 'sd.wait', ([], {}), '()\n', (468, 470), True, 'import sounddevice as sd\n'), ((525, 549), 'sounddevice.play', 'sd.play', (['myrecording', 'fs'], {}), '(myrecording, fs)\n', (532, 549), True, 'import sounddevice as sd\n')]
|
# coding: utf-8
"""
loadbalancer
OpenAPI spec version: 2018-06-21T02:19:18Z
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from ncloud_loadbalancer.model.load_balancer_rule_parameter import LoadBalancerRuleParameter # noqa: F401,E501
class ChangeLoadBalancerInstanceConfigurationRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'load_balancer_instance_no': 'str',
'load_balancer_algorithm_type_code': 'str',
'load_balancer_description': 'str',
'load_balancer_rule_list': 'list[LoadBalancerRuleParameter]'
}
attribute_map = {
'load_balancer_instance_no': 'loadBalancerInstanceNo',
'load_balancer_algorithm_type_code': 'loadBalancerAlgorithmTypeCode',
'load_balancer_description': 'loadBalancerDescription',
'load_balancer_rule_list': 'loadBalancerRuleList'
}
def __init__(self, load_balancer_instance_no=None, load_balancer_algorithm_type_code=None, load_balancer_description=None, load_balancer_rule_list=None): # noqa: E501
"""ChangeLoadBalancerInstanceConfigurationRequest - a model defined in Swagger""" # noqa: E501
self._load_balancer_instance_no = None
self._load_balancer_algorithm_type_code = None
self._load_balancer_description = None
self._load_balancer_rule_list = None
self.discriminator = None
self.load_balancer_instance_no = load_balancer_instance_no
self.load_balancer_algorithm_type_code = load_balancer_algorithm_type_code
if load_balancer_description is not None:
self.load_balancer_description = load_balancer_description
self.load_balancer_rule_list = load_balancer_rule_list
@property
def load_balancer_instance_no(self):
"""Gets the load_balancer_instance_no of this ChangeLoadBalancerInstanceConfigurationRequest. # noqa: E501
로드밸런서인스턴스번호 # noqa: E501
:return: The load_balancer_instance_no of this ChangeLoadBalancerInstanceConfigurationRequest. # noqa: E501
:rtype: str
"""
return self._load_balancer_instance_no
@load_balancer_instance_no.setter
def load_balancer_instance_no(self, load_balancer_instance_no):
"""Sets the load_balancer_instance_no of this ChangeLoadBalancerInstanceConfigurationRequest.
로드밸런서인스턴스번호 # noqa: E501
:param load_balancer_instance_no: The load_balancer_instance_no of this ChangeLoadBalancerInstanceConfigurationRequest. # noqa: E501
:type: str
"""
if load_balancer_instance_no is None:
raise ValueError("Invalid value for `load_balancer_instance_no`, must not be `None`") # noqa: E501
self._load_balancer_instance_no = load_balancer_instance_no
@property
def load_balancer_algorithm_type_code(self):
"""Gets the load_balancer_algorithm_type_code of this ChangeLoadBalancerInstanceConfigurationRequest. # noqa: E501
로드밸런서알고리즘구분코드 # noqa: E501
:return: The load_balancer_algorithm_type_code of this ChangeLoadBalancerInstanceConfigurationRequest. # noqa: E501
:rtype: str
"""
return self._load_balancer_algorithm_type_code
@load_balancer_algorithm_type_code.setter
def load_balancer_algorithm_type_code(self, load_balancer_algorithm_type_code):
"""Sets the load_balancer_algorithm_type_code of this ChangeLoadBalancerInstanceConfigurationRequest.
로드밸런서알고리즘구분코드 # noqa: E501
:param load_balancer_algorithm_type_code: The load_balancer_algorithm_type_code of this ChangeLoadBalancerInstanceConfigurationRequest. # noqa: E501
:type: str
"""
if load_balancer_algorithm_type_code is None:
raise ValueError("Invalid value for `load_balancer_algorithm_type_code`, must not be `None`") # noqa: E501
self._load_balancer_algorithm_type_code = load_balancer_algorithm_type_code
@property
def load_balancer_description(self):
"""Gets the load_balancer_description of this ChangeLoadBalancerInstanceConfigurationRequest. # noqa: E501
로드밸런서설명 # noqa: E501
:return: The load_balancer_description of this ChangeLoadBalancerInstanceConfigurationRequest. # noqa: E501
:rtype: str
"""
return self._load_balancer_description
@load_balancer_description.setter
def load_balancer_description(self, load_balancer_description):
"""Sets the load_balancer_description of this ChangeLoadBalancerInstanceConfigurationRequest.
로드밸런서설명 # noqa: E501
:param load_balancer_description: The load_balancer_description of this ChangeLoadBalancerInstanceConfigurationRequest. # noqa: E501
:type: str
"""
self._load_balancer_description = load_balancer_description
@property
def load_balancer_rule_list(self):
"""Gets the load_balancer_rule_list of this ChangeLoadBalancerInstanceConfigurationRequest. # noqa: E501
로드밸런RULE리스트 # noqa: E501
:return: The load_balancer_rule_list of this ChangeLoadBalancerInstanceConfigurationRequest. # noqa: E501
:rtype: list[LoadBalancerRuleParameter]
"""
return self._load_balancer_rule_list
@load_balancer_rule_list.setter
def load_balancer_rule_list(self, load_balancer_rule_list):
"""Sets the load_balancer_rule_list of this ChangeLoadBalancerInstanceConfigurationRequest.
로드밸런RULE리스트 # noqa: E501
:param load_balancer_rule_list: The load_balancer_rule_list of this ChangeLoadBalancerInstanceConfigurationRequest. # noqa: E501
:type: list[LoadBalancerRuleParameter]
"""
if load_balancer_rule_list is None:
raise ValueError("Invalid value for `load_balancer_rule_list`, must not be `None`") # noqa: E501
self._load_balancer_rule_list = load_balancer_rule_list
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ChangeLoadBalancerInstanceConfigurationRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"six.iteritems"
] |
[((6455, 6488), 'six.iteritems', 'six.iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (6468, 6488), False, 'import six\n')]
|
from __future__ import annotations
from typing import TYPE_CHECKING
from grouper.fe.forms import GroupEditForm
from grouper.fe.util import GrouperHandler
from grouper.models.audit_log import AuditLog
from grouper.models.counter import Counter
from grouper.models.group import Group
from grouper.role_user import is_role_user
from grouper.user_group import user_can_manage_group
if TYPE_CHECKING:
from typing import Any
class GroupEdit(GrouperHandler):
def get(self, *args: Any, **kwargs: Any) -> None:
name = self.get_path_argument("name")
group = Group.get(self.session, name=name)
if not group:
return self.notfound()
if not user_can_manage_group(self.session, group, self.current_user):
return self.forbidden()
form = GroupEditForm(obj=group)
self.render("group-edit.html", group=group, form=form)
def post(self, *args: Any, **kwargs: Any) -> None:
name = self.get_path_argument("name")
group = Group.get(self.session, name=name)
if not group:
return self.notfound()
if not user_can_manage_group(self.session, group, self.current_user):
return self.forbidden()
form = GroupEditForm(self.request.arguments, obj=group)
if not form.validate():
return self.render(
"group-edit.html", group=group, form=form, alerts=self.get_form_alerts(form.errors)
)
new_name = form.data["groupname"]
renamed = group.groupname != new_name
if renamed and is_role_user(self.session, group=group):
form.groupname.errors.append("You cannot change the name of service account groups")
return self.render(
"group-edit.html", group=group, form=form, alerts=self.get_form_alerts(form.errors)
)
if renamed and Group.get(self.session, name=new_name):
message = f"A group named '{new_name}' already exists (possibly disabled)"
form.groupname.errors.append(message)
return self.render(
"group-edit.html", group=group, form=form, alerts=self.get_form_alerts(form.errors)
)
group.groupname = new_name
group.email_address = form.data["email_address"]
group.description = form.data["description"]
group.canjoin = form.data["canjoin"]
group.auto_expire = form.data["auto_expire"]
group.require_clickthru_tojoin = form.data["require_clickthru_tojoin"]
Counter.incr(self.session, "updates")
self.session.commit()
AuditLog.log(
self.session, self.current_user.id, "edit_group", "Edited group.", on_group_id=group.id
)
url = f"/groups/{group.name}"
if renamed:
url += "?refresh=yes"
self.redirect(url)
|
[
"grouper.models.counter.Counter.incr",
"grouper.user_group.user_can_manage_group",
"grouper.models.group.Group.get",
"grouper.role_user.is_role_user",
"grouper.models.audit_log.AuditLog.log",
"grouper.fe.forms.GroupEditForm"
] |
[((578, 612), 'grouper.models.group.Group.get', 'Group.get', (['self.session'], {'name': 'name'}), '(self.session, name=name)\n', (587, 612), False, 'from grouper.models.group import Group\n'), ((801, 825), 'grouper.fe.forms.GroupEditForm', 'GroupEditForm', ([], {'obj': 'group'}), '(obj=group)\n', (814, 825), False, 'from grouper.fe.forms import GroupEditForm\n'), ((1009, 1043), 'grouper.models.group.Group.get', 'Group.get', (['self.session'], {'name': 'name'}), '(self.session, name=name)\n', (1018, 1043), False, 'from grouper.models.group import Group\n'), ((1232, 1280), 'grouper.fe.forms.GroupEditForm', 'GroupEditForm', (['self.request.arguments'], {'obj': 'group'}), '(self.request.arguments, obj=group)\n', (1245, 1280), False, 'from grouper.fe.forms import GroupEditForm\n'), ((2534, 2571), 'grouper.models.counter.Counter.incr', 'Counter.incr', (['self.session', '"""updates"""'], {}), "(self.session, 'updates')\n", (2546, 2571), False, 'from grouper.models.counter import Counter\n'), ((2611, 2716), 'grouper.models.audit_log.AuditLog.log', 'AuditLog.log', (['self.session', 'self.current_user.id', '"""edit_group"""', '"""Edited group."""'], {'on_group_id': 'group.id'}), "(self.session, self.current_user.id, 'edit_group',\n 'Edited group.', on_group_id=group.id)\n", (2623, 2716), False, 'from grouper.models.audit_log import AuditLog\n'), ((686, 747), 'grouper.user_group.user_can_manage_group', 'user_can_manage_group', (['self.session', 'group', 'self.current_user'], {}), '(self.session, group, self.current_user)\n', (707, 747), False, 'from grouper.user_group import user_can_manage_group\n'), ((1117, 1178), 'grouper.user_group.user_can_manage_group', 'user_can_manage_group', (['self.session', 'group', 'self.current_user'], {}), '(self.session, group, self.current_user)\n', (1138, 1178), False, 'from grouper.user_group import user_can_manage_group\n'), ((1572, 1611), 'grouper.role_user.is_role_user', 'is_role_user', (['self.session'], {'group': 'group'}), '(self.session, group=group)\n', (1584, 1611), False, 'from grouper.role_user import is_role_user\n'), ((1880, 1918), 'grouper.models.group.Group.get', 'Group.get', (['self.session'], {'name': 'new_name'}), '(self.session, name=new_name)\n', (1889, 1918), False, 'from grouper.models.group import Group\n')]
|
import pandas as pd
import numpy as np
import itertools
class handlers(object):
def get_column(filename_with_path, ext_value, annot='gene_id', header_line=0, sep="\t", opt=0):
"""
filename_with_path = filepath + basename
ext_value = column name of file
sep = separator
"""
# Don't use pandas.read_csv because of memory usage
index_list = []
value_list = []
with open(filename_with_path, 'r') as infile:
for i, line in enumerate(infile):
line = line.strip()
if i==header_line: # found header
header_info = line.split(sep)
value_ext_location = header_info.index(ext_value) # location of value extraction point
index_ext_location = header_info.index(annot) # location of value extraction point
elif i!=header_line:
line_list = line.split(sep)
index_list.append(str(line_list[index_ext_location])) # Value list
value_list.append(float(line_list[value_ext_location])) # Index list
result_df = pd.DataFrame(data={ext_value: value_list}, index=index_list)
return result_df
def get_samplename(filelist):
"""
filelist = list of basename
Lambda function could be--
_get_samplename = lambda filelist : [x.split("-")[0] for x in filelist]
"""
sampleName = [x.split("-")[0] for x in filelist]
return sampleName
def get_condtionMatrix_by_category(dataframe, sampleColumn, dataColname, conditions:list):
"""
Transform meta data to DESeq condition matrix
Input
dataframe: metadata input
sampleColumn: Column name for Sample ID in metadata input
dataColumn: Column name for category value in metadata input
conditions: Conditions you selected, list type, and it has 2 elements
Output
result dataframe with 2 columns (colnames: sampleID, conditions)
"""
assert len(conditions)==2, "Please make sure that conditions list has 2 elements"
sampleList = [] # empty list
conditionValues = []
for x in conditions:
data = dataframe[dataframe[dataColname]==x][sampleColumn] # get sample name
sampleList.append(data.values.tolist()) # sampleID
conditionValues.append([x]*len(data.values.tolist())) # condition value
sampleList = list(itertools.chain(*sampleList)) # flatten
conditionValues = list(itertools.chain(*conditionValues))
result = pd.DataFrame(data={'sampleID':sampleList, 'conditions':conditionValues}).set_index('sampleID')
return result
|
[
"pandas.DataFrame",
"itertools.chain"
] |
[((1157, 1217), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '{ext_value: value_list}', 'index': 'index_list'}), '(data={ext_value: value_list}, index=index_list)\n', (1169, 1217), True, 'import pandas as pd\n'), ((2518, 2546), 'itertools.chain', 'itertools.chain', (['*sampleList'], {}), '(*sampleList)\n', (2533, 2546), False, 'import itertools\n'), ((2589, 2622), 'itertools.chain', 'itertools.chain', (['*conditionValues'], {}), '(*conditionValues)\n', (2604, 2622), False, 'import itertools\n'), ((2642, 2716), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'sampleID': sampleList, 'conditions': conditionValues}"}), "(data={'sampleID': sampleList, 'conditions': conditionValues})\n", (2654, 2716), True, 'import pandas as pd\n')]
|
from rest_framework import serializers
from goods.models import GoodsVisitCount
# 日商品分类调用
class GoodVistiModelSerializer(serializers.ModelSerializer):
category = serializers.StringRelatedField()
class Meta:
model = GoodsVisitCount
fields = [
'category', # 外键关联字段
'count'
]
|
[
"rest_framework.serializers.StringRelatedField"
] |
[((166, 198), 'rest_framework.serializers.StringRelatedField', 'serializers.StringRelatedField', ([], {}), '()\n', (196, 198), False, 'from rest_framework import serializers\n')]
|
from __future__ import division
import librosa, pydub
import numpy as np
from tempfile import TemporaryFile
import pickle, json, os
class mash:
def __init__(self, json_, cached=False):
self.sr = 22050 # new Sampling Rate for the audio files
self.songs = json_
self.Yin = []
self.Yout = []
self.pathIn = []
self.pathOut = []
self.beats = {'in': [], 'out': []}
self.tempo = {'in': 0, 'out': 0}
self._setup()
self._load(cached=cached)
self._extract()
self._segment()
self._speedUp()
out = self._mix()
print("Exporting...")
out.export(out_f="final.mp3", format="mp3")
print("[SUCCESS] Export as `final.mp3`")
def _setup(self):
if not os.path.exists('cache'):
os.makedirs('cache')
def _load(self, cached=True):
for song in self.songs:
if os.path.exists("cache/%s.pkl"%song['name']):
print("\nLoading", song['name'], "from cache")
with open("cache/%s.pkl"%song['name'], 'rb') as f:
if song['mixin']:
print("Yin=", song['name'])
self.Yin = pickle.load(f)
self.pathIn = song['path']
else:
print("Yout=", song['name'])
self.Yout.append(pickle.load(f))
self.pathOut.append(song['path'])
continue
print("\nLoading", song['name'])
y, sr = librosa.load(song['path'], sr=self.sr)
if song['mixin']:
self.Yin = y
self.pathIn = song['path']
else:
self.Yout.append(y)
self.pathOut.append(song['path'])
print("[SUCCESS] Loaded", song['name'])
if cached:
try:
with open('cache/%s.pkl'%song['name'], 'wb') as f:
pickle.dump(y, f)
print("[SUCCESS] Cached", song['name'])
except Exception as e:
print("[FAILED] Caching", song['name'])
print(e)
def _extract(self):
# TODO: Add cosine distance similarity to choose the best mixout
self.Yout = self.Yout[0] # NOTE: considering 1mixin & 1mixout
self.pathOut = self.pathOut[0]
self.tempo['in'], self.beats['in'] = librosa.beat.beat_track(y=self.Yin, sr=self.sr)
self.tempo['out'], self.beats['out'] = librosa.beat.beat_track(y=self.Yout, sr=self.sr)
print("TempoIn=", self.tempo['in'])
print("TempoOut=", self.tempo['out'])
self._OTAC()
self._crossFadeRegion()
def _OTAC(self): # Optimal Tempo Adjustment Coefficient Computation
C = [-2, -1, 0, 1, 2]
if self.tempo['in'] == self.tempo['out']:
self.tempo['tgt'] = self.tempo['in']
return
Tin_ = [(2**c)*self.tempo['in'] for c in C]
TinIndex_ = np.argmin(np.absolute(Tin_ - self.tempo['out']))
Copt = C[TinIndex_]
Bopt = (2**Copt)*self.tempo['in']
Tlow = min(Bopt, self.tempo['out'])
Thigh = max(Bopt, self.tempo['out'])
a, b = 0.765, 1
Ttgt = (a-b)*Tlow + np.sqrt( ((a-b)**2)*(Tlow**2) + 4*a*b*Thigh*Tlow )
Ttgt = Ttgt/(2*a)
print("FoptIn=", Ttgt/Bopt)
print("FoptOut=", Ttgt/self.tempo['out'])
print("Ttgt=", Ttgt)
self.tempo['tgt'] = Ttgt
def _crossFadeRegion(self): # Computes the cross fade region for the mixed song
Na = self.beats['in'].shape[0]-1
scores = [self._score(i, Na) for i in range(2, int(Na/4))]
noBeats = np.argmax(scores)+2
inDuration = librosa.get_duration(y=self.Yin, sr=self.sr)
fadeInStart = librosa.frames_to_time(self.beats['in'], sr=self.sr)[-int(noBeats/2)]
fadeIn = inDuration - fadeInStart
fadeOut = librosa.frames_to_time(self.beats['out'], sr=self.sr)[int(noBeats/2)]
print("Best Power Corelation Scores=", np.max(scores))
print("Number of beats in cross fade region=", noBeats)
print("fadeInStart=", fadeInStart)
print("fadeOutEnd=", fadeOut)
print("Cross Fade Time=", fadeIn+fadeOut)
self.crossFade = [fadeInStart*1000, fadeOut*1000] # In milliseconds
def _score(self, T, Na):
cr = 0
for i in range(1, T+1):
cr += self.beats['in'][Na-i+1]*self.beats['out'][i]
return cr/T
def _segment(self):
print("Started Segmentation")
sIn = pydub.AudioSegment.from_file(self.pathIn, format="mp3")
sOut = pydub.AudioSegment.from_file(self.pathOut, format="mp3")
print("[SUCCESS] Segmented audio files")
self.segments = {
'in': [ sIn[:self.crossFade[0]], sIn[self.crossFade[0]:] ],
'out': [ sOut[:self.crossFade[1]], sOut[self.crossFade[1]:] ],
}
del sIn, sOut
def _speedUp(self):
s1 = self.segments['in'][1]
s2 = self.segments['out'][0]
speed1 = self.tempo['tgt']/self.tempo['in']
speed2 = self.tempo['tgt']/self.tempo['out']
print("Playback Speed of in end segment=",speed1,'X')
print("Playback Speed of out start segment=",speed2,'X')
s1 = s1.speedup(playback_speed=speed1)
s2 = s1.speedup(playback_speed=speed2)
def _mix(self):
xf = self.segments['in'][1].fade(to_gain=-120, start=0, end=float('inf'))
xf *= self.segments['out'][0].fade(from_gain=-120, start=0, end=float('inf'))
out = TemporaryFile()
out.write(self.segments['in'][0]._data)
out.write(xf._data)
out.write(self.segments['out'][1]._data)
out.seek(0)
print("[SUCCESS] Mixed 4 audio segment to 1")
return self.segments['in'][0]._spawn(data=out)
if __name__ == '__main__':
with open('songs.json', 'r') as f:
j = json.loads(f.read())
obj = mash(j, cached=True)
|
[
"numpy.absolute",
"pickle.dump",
"librosa.frames_to_time",
"os.makedirs",
"numpy.argmax",
"os.path.exists",
"tempfile.TemporaryFile",
"numpy.max",
"pickle.load",
"librosa.load",
"librosa.beat.beat_track",
"pydub.AudioSegment.from_file",
"numpy.sqrt",
"librosa.get_duration"
] |
[((2481, 2528), 'librosa.beat.beat_track', 'librosa.beat.beat_track', ([], {'y': 'self.Yin', 'sr': 'self.sr'}), '(y=self.Yin, sr=self.sr)\n', (2504, 2528), False, 'import librosa, pydub\n'), ((2576, 2624), 'librosa.beat.beat_track', 'librosa.beat.beat_track', ([], {'y': 'self.Yout', 'sr': 'self.sr'}), '(y=self.Yout, sr=self.sr)\n', (2599, 2624), False, 'import librosa, pydub\n'), ((3808, 3852), 'librosa.get_duration', 'librosa.get_duration', ([], {'y': 'self.Yin', 'sr': 'self.sr'}), '(y=self.Yin, sr=self.sr)\n', (3828, 3852), False, 'import librosa, pydub\n'), ((4652, 4707), 'pydub.AudioSegment.from_file', 'pydub.AudioSegment.from_file', (['self.pathIn'], {'format': '"""mp3"""'}), "(self.pathIn, format='mp3')\n", (4680, 4707), False, 'import librosa, pydub\n'), ((4723, 4779), 'pydub.AudioSegment.from_file', 'pydub.AudioSegment.from_file', (['self.pathOut'], {'format': '"""mp3"""'}), "(self.pathOut, format='mp3')\n", (4751, 4779), False, 'import librosa, pydub\n'), ((5667, 5682), 'tempfile.TemporaryFile', 'TemporaryFile', ([], {}), '()\n', (5680, 5682), False, 'from tempfile import TemporaryFile\n'), ((790, 813), 'os.path.exists', 'os.path.exists', (['"""cache"""'], {}), "('cache')\n", (804, 813), False, 'import pickle, json, os\n'), ((827, 847), 'os.makedirs', 'os.makedirs', (['"""cache"""'], {}), "('cache')\n", (838, 847), False, 'import pickle, json, os\n'), ((930, 975), 'os.path.exists', 'os.path.exists', (["('cache/%s.pkl' % song['name'])"], {}), "('cache/%s.pkl' % song['name'])\n", (944, 975), False, 'import pickle, json, os\n'), ((1581, 1619), 'librosa.load', 'librosa.load', (["song['path']"], {'sr': 'self.sr'}), "(song['path'], sr=self.sr)\n", (1593, 1619), False, 'import librosa, pydub\n'), ((3075, 3112), 'numpy.absolute', 'np.absolute', (["(Tin_ - self.tempo['out'])"], {}), "(Tin_ - self.tempo['out'])\n", (3086, 3112), True, 'import numpy as np\n'), ((3327, 3387), 'numpy.sqrt', 'np.sqrt', (['((a - b) ** 2 * Tlow ** 2 + 4 * a * b * Thigh * Tlow)'], {}), '((a - b) ** 2 * Tlow ** 2 + 4 * a * b * Thigh * Tlow)\n', (3334, 3387), True, 'import numpy as np\n'), ((3766, 3783), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (3775, 3783), True, 'import numpy as np\n'), ((3875, 3927), 'librosa.frames_to_time', 'librosa.frames_to_time', (["self.beats['in']"], {'sr': 'self.sr'}), "(self.beats['in'], sr=self.sr)\n", (3897, 3927), False, 'import librosa, pydub\n'), ((4006, 4059), 'librosa.frames_to_time', 'librosa.frames_to_time', (["self.beats['out']"], {'sr': 'self.sr'}), "(self.beats['out'], sr=self.sr)\n", (4028, 4059), False, 'import librosa, pydub\n'), ((4124, 4138), 'numpy.max', 'np.max', (['scores'], {}), '(scores)\n', (4130, 4138), True, 'import numpy as np\n'), ((1230, 1244), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1241, 1244), False, 'import pickle, json, os\n'), ((2018, 2035), 'pickle.dump', 'pickle.dump', (['y', 'f'], {}), '(y, f)\n', (2029, 2035), False, 'import pickle, json, os\n'), ((1416, 1430), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1427, 1430), False, 'import pickle, json, os\n')]
|
from django.conf.urls import url
from django.conf import settings
from django.views.static import serve
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^([0-9]+)/$', views.detail, name='detail'),
url(r'^user/(\w+)/$', views.profile, name='profile'),
url(r'^post_url/$', views.post_waifu, name='post_waifu'),
url(r'^register/$', views.register, name='register'),
url(r'^login/$', views.login_view, name='login'),
url(r'^logout/$', views.logout_view, name='logout'),
url(r'^heart_waifu/$', views.heart_waifu, name='heart_waifu'),
url(r'^search/$', views.search, name='search'),
]
if settings.DEBUG:
urlpatterns += [
url(r'^media/(?P<path>.*)$', serve, {'document_root': settings.MEDIA_ROOT, }),
]
|
[
"django.conf.urls.url"
] |
[((142, 178), 'django.conf.urls.url', 'url', (['"""^$"""', 'views.index'], {'name': '"""index"""'}), "('^$', views.index, name='index')\n", (145, 178), False, 'from django.conf.urls import url\n'), ((182, 229), 'django.conf.urls.url', 'url', (['"""^([0-9]+)/$"""', 'views.detail'], {'name': '"""detail"""'}), "('^([0-9]+)/$', views.detail, name='detail')\n", (185, 229), False, 'from django.conf.urls import url\n'), ((233, 285), 'django.conf.urls.url', 'url', (['"""^user/(\\\\w+)/$"""', 'views.profile'], {'name': '"""profile"""'}), "('^user/(\\\\w+)/$', views.profile, name='profile')\n", (236, 285), False, 'from django.conf.urls import url\n'), ((288, 343), 'django.conf.urls.url', 'url', (['"""^post_url/$"""', 'views.post_waifu'], {'name': '"""post_waifu"""'}), "('^post_url/$', views.post_waifu, name='post_waifu')\n", (291, 343), False, 'from django.conf.urls import url\n'), ((347, 398), 'django.conf.urls.url', 'url', (['"""^register/$"""', 'views.register'], {'name': '"""register"""'}), "('^register/$', views.register, name='register')\n", (350, 398), False, 'from django.conf.urls import url\n'), ((402, 449), 'django.conf.urls.url', 'url', (['"""^login/$"""', 'views.login_view'], {'name': '"""login"""'}), "('^login/$', views.login_view, name='login')\n", (405, 449), False, 'from django.conf.urls import url\n'), ((453, 503), 'django.conf.urls.url', 'url', (['"""^logout/$"""', 'views.logout_view'], {'name': '"""logout"""'}), "('^logout/$', views.logout_view, name='logout')\n", (456, 503), False, 'from django.conf.urls import url\n'), ((507, 567), 'django.conf.urls.url', 'url', (['"""^heart_waifu/$"""', 'views.heart_waifu'], {'name': '"""heart_waifu"""'}), "('^heart_waifu/$', views.heart_waifu, name='heart_waifu')\n", (510, 567), False, 'from django.conf.urls import url\n'), ((571, 616), 'django.conf.urls.url', 'url', (['"""^search/$"""', 'views.search'], {'name': '"""search"""'}), "('^search/$', views.search, name='search')\n", (574, 616), False, 'from django.conf.urls import url\n'), ((661, 735), 'django.conf.urls.url', 'url', (['"""^media/(?P<path>.*)$"""', 'serve', "{'document_root': settings.MEDIA_ROOT}"], {}), "('^media/(?P<path>.*)$', serve, {'document_root': settings.MEDIA_ROOT})\n", (664, 735), False, 'from django.conf.urls import url\n')]
|
# adapted from original JupyterHub_config.py file
# which is Copyright (c) Jupyter Development Team, see LICENSE
import os
from dockerspawner import DockerSpawner
c = get_config()
# use our custom spawnwer dor jupyterhub config
class MathHubSpawner(DockerSpawner):
def __init__(self, *args, **kwargs):
super(MathHubSpawner, self).__init__(*args, **kwargs)
self.env_keep += ['MMT_FRONTEND_BASE_URL', 'UPLOAD_REDIRECT_PREFIX']
self.container_image = os.environ['DOCKER_NOTEBOOK_IMAGE']
c.JupyterHub.spawner_class = MathHubSpawner
# Command used to spawn jupyter hub inside of DockerSpawner
spawn_cmd = os.environ.get('DOCKER_SPAWN_CMD', "start-singleuser.sh")
c.DockerSpawner.extra_create_kwargs.update({ 'command': spawn_cmd })
# Docker Spawner network configuration
c.DockerSpawner.network_name = os.environ['DOCKER_NETWORK_NAME']
c.DockerSpawner.extra_host_config = { 'network_mode': c.DockerSpawner.network_name }
c.DockerSpawner.use_internal_ip = True
# Docker Volumes and Notebook directory
c.DockerSpawner.notebook_dir = os.environ.get('DOCKER_NOTEBOOK_DIR') or '/home/jovyan/work'
c.DockerSpawner.volumes = { 'jupyterhub-user-{username}': c.DockerSpawner.notebook_dir }
c.DockerSpawner.volumes[os.environ.get('MMT_VOLUME_HOST')] = {'bind' : '/content/', 'mode' : 'ro' }
c.DockerSpawner.remove_containers = True
c.DockerSpawner.debug = True
# Jupyter Hub Public URL
c.JupyterHub.port = 80
# Jupyter Hub Internal URL (for docker containers)
c.JupyterHub.hub_ip = 'jupyterhub'
c.JupyterHub.hub_port = 8080
# cookie secret can be found in the data_volume container under /data
data_dir = os.environ.get('DATA_VOLUME_CONTAINER', '/data')
c.JupyterHub.cookie_secret_file = os.path.join(data_dir, 'jupyterhub_cookie_secret')
# postgresql database config
c.JupyterHub.db_url = 'postgresql://postgres:{password}@{host}/{db}'.format(
host=os.environ['POSTGRES_HOST'],
password=os.environ['POSTGRES_PASSWORD'],
db=os.environ['POSTGRES_DB']
)
# Authentication Setup for JupyterHub
# TODO: Try using GitLab OAuth here
c.JupyterHub.authenticator_class = 'tmpauthenticator.TmpAuthenticator'
|
[
"os.environ.get",
"os.path.join"
] |
[((632, 689), 'os.environ.get', 'os.environ.get', (['"""DOCKER_SPAWN_CMD"""', '"""start-singleuser.sh"""'], {}), "('DOCKER_SPAWN_CMD', 'start-singleuser.sh')\n", (646, 689), False, 'import os\n'), ((1628, 1676), 'os.environ.get', 'os.environ.get', (['"""DATA_VOLUME_CONTAINER"""', '"""/data"""'], {}), "('DATA_VOLUME_CONTAINER', '/data')\n", (1642, 1676), False, 'import os\n'), ((1711, 1761), 'os.path.join', 'os.path.join', (['data_dir', '"""jupyterhub_cookie_secret"""'], {}), "(data_dir, 'jupyterhub_cookie_secret')\n", (1723, 1761), False, 'import os\n'), ((1060, 1097), 'os.environ.get', 'os.environ.get', (['"""DOCKER_NOTEBOOK_DIR"""'], {}), "('DOCKER_NOTEBOOK_DIR')\n", (1074, 1097), False, 'import os\n'), ((1234, 1267), 'os.environ.get', 'os.environ.get', (['"""MMT_VOLUME_HOST"""'], {}), "('MMT_VOLUME_HOST')\n", (1248, 1267), False, 'import os\n')]
|
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM) # set up BCM GPIO numbering
GPIO.setup(4, GPIO.IN, pull_up_down=GPIO.PUD_UP)
|
[
"RPi.GPIO.setup",
"RPi.GPIO.setmode"
] |
[((24, 46), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (36, 46), True, 'import RPi.GPIO as GPIO\n'), ((75, 123), 'RPi.GPIO.setup', 'GPIO.setup', (['(4)', 'GPIO.IN'], {'pull_up_down': 'GPIO.PUD_UP'}), '(4, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n', (85, 123), True, 'import RPi.GPIO as GPIO\n')]
|
"""Meta Writer application
Configures and starts up an instance of the MetaListener for receiving and writing meta data.
<NAME>, Diamond Light Source
"""
from argparse import ArgumentParser
from odin_data.meta_writer.meta_listener import MetaListener
from odin_data.logconfig import setup_logging, add_graylog_handler, set_log_level
def parse_args():
"""Parse program arguments"""
parser = ArgumentParser()
parser.add_argument(
"-c", "--ctrl", default=5659, help="Control channel port to listen on"
)
parser.add_argument(
"-d",
"--data-endpoints",
default="tcp://127.0.0.1:5558,tcp://127.0.0.1:5559",
help="Data endpoints - comma separated list",
)
parser.add_argument(
"-w",
"--writer",
default="odin_data.meta_writer.meta_writer.MetaWriter",
help="Module path to detector specific meta writer class",
)
parser.add_argument("-l", "--log-level", default="INFO", help="Logging level")
parser.add_argument(
"--log-server",
default=None,
help="Graylog server address and port - e.g. 127.0.0.1:8000",
)
parser.add_argument(
"--static-log-fields",
default=None,
help="Comma separated list of key=value fields to be attached to every log message",
)
return parser.parse_args()
def main():
args = parse_args()
static_fields = None
if args.static_log_fields is not None:
static_fields = dict(f.split("=") for f in args.static_log_fields.split(","))
if args.log_server is not None:
log_server_address, log_server_port = args.log_server.split(":")
log_server_port = int(log_server_port)
add_graylog_handler(
log_server_address, log_server_port, static_fields=static_fields
)
set_log_level(args.log_level)
setup_logging()
data_endpoints = args.data_endpoints.split(",")
meta_listener = MetaListener(args.ctrl, data_endpoints, args.writer)
meta_listener.run()
if __name__ == "__main__":
main()
|
[
"odin_data.meta_writer.meta_listener.MetaListener",
"argparse.ArgumentParser",
"odin_data.logconfig.add_graylog_handler",
"odin_data.logconfig.set_log_level",
"odin_data.logconfig.setup_logging"
] |
[((403, 419), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (417, 419), False, 'from argparse import ArgumentParser\n'), ((1826, 1855), 'odin_data.logconfig.set_log_level', 'set_log_level', (['args.log_level'], {}), '(args.log_level)\n', (1839, 1855), False, 'from odin_data.logconfig import setup_logging, add_graylog_handler, set_log_level\n'), ((1860, 1875), 'odin_data.logconfig.setup_logging', 'setup_logging', ([], {}), '()\n', (1873, 1875), False, 'from odin_data.logconfig import setup_logging, add_graylog_handler, set_log_level\n'), ((1949, 2001), 'odin_data.meta_writer.meta_listener.MetaListener', 'MetaListener', (['args.ctrl', 'data_endpoints', 'args.writer'], {}), '(args.ctrl, data_endpoints, args.writer)\n', (1961, 2001), False, 'from odin_data.meta_writer.meta_listener import MetaListener\n'), ((1713, 1803), 'odin_data.logconfig.add_graylog_handler', 'add_graylog_handler', (['log_server_address', 'log_server_port'], {'static_fields': 'static_fields'}), '(log_server_address, log_server_port, static_fields=\n static_fields)\n', (1732, 1803), False, 'from odin_data.logconfig import setup_logging, add_graylog_handler, set_log_level\n')]
|
import cv2
import tensorflow as tf
import numpy as np
modelFile = "./opencv_face_detector_uint8.pb"
configFile = "./opencv_face_detector.pbtxt"
net = cv2.dnn.readNetFromTensorflow(modelFile, configFile)
cap = cv2.VideoCapture(0)
while(True):
ret, frame = cap.read()
frameHeight = frame.shape[0]
frameWidth = frame.shape[1]
blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300), [104, 117, 123], False, False)
net.setInput(blob)
detections = net.forward()
bboxes = []
for i in range(detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > 0.3:
x1 = int(detections[0, 0, i, 3] * frameWidth)
y1 = int(detections[0, 0, i, 4] * frameHeight)
x2 = int(detections[0, 0, i, 5] * frameWidth)
y2 = int(detections[0, 0, i, 6] * frameHeight)
cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (23, 230, 210), thickness=2)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
|
[
"cv2.waitKey",
"cv2.dnn.blobFromImage",
"cv2.VideoCapture",
"cv2.dnn.readNetFromTensorflow",
"cv2.imshow"
] |
[((152, 204), 'cv2.dnn.readNetFromTensorflow', 'cv2.dnn.readNetFromTensorflow', (['modelFile', 'configFile'], {}), '(modelFile, configFile)\n', (181, 204), False, 'import cv2\n'), ((212, 231), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (228, 231), False, 'import cv2\n'), ((352, 428), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['frame', '(1.0)', '(300, 300)', '[104, 117, 123]', '(False)', '(False)'], {}), '(frame, 1.0, (300, 300), [104, 117, 123], False, False)\n', (373, 428), False, 'import cv2\n'), ((956, 982), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (966, 982), False, 'import cv2\n'), ((990, 1004), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1001, 1004), False, 'import cv2\n')]
|
import os
import datetime
import pytest
import re
from pychpp import __version__
from pychpp import CHPP
from pychpp.ht_team import HTTeam, HTYouthTeam
from pychpp.ht_user import HTUser
from pychpp.ht_player import HTPlayer, HTYouthPlayer, HTLineupPlayer
from pychpp.ht_arena import HTArena
from pychpp.ht_region import HTRegion
from pychpp.ht_match import HTMatch
from pychpp.ht_match_lineup import HTMatchLineup
from pychpp.ht_matches_archive import HTMatchesArchive, HTMatchesArchiveItem
from pychpp.ht_skill import HTSkill, HTSkillYouth
from pychpp.ht_challenge import HTChallengeManager
from pychpp.ht_league import HTLeague
from pychpp.ht_rank import HTRank
from pychpp.ht_world import HTCountry, HTCup, HTCountryLeague, HTRegionItem, HTWorld
from pychpp.ht_error import HTUnauthorizedAction, UnknownLeagueError
PYCHPP_CONSUMER_KEY = os.environ["PYCHPP_CONSUMER_KEY"]
PYCHPP_CONSUMER_SECRET = os.environ["PYCHPP_CONSUMER_SECRET"]
PYCHPP_ACCESS_TOKEN_KEY = os.environ["PYCHPP_ACCESS_TOKEN_KEY"]
PYCHPP_ACCESS_TOKEN_SECRET = os.environ["PYCHPP_ACCESS_TOKEN_SECRET"]
PYCHPP_SCOPE = os.environ["PYCHPP_SCOPE"]
YOUTH_PLAYER_PATTERN = r"https://www.hattrick.org/goto.ashx\?path=/Club/Players/YouthPlayer.aspx\?YouthPlayerID=(\d+)"
PLAYER_PATTERN = r"https://www.hattrick.org/goto.ashx\?path=/Club/Players/Player.aspx\?playerId=(\d+)"
YOUTH_TEAM_PATTERN = r"https://www.hattrick.org/goto.ashx\?path=/Club/Youth/\?YouthTeamID=(\d+)"
ARENA_PATTERN = r"https://www.hattrick.org/goto.ashx\?path=/Club/Arena/\?ArenaID=(\d+)"
USER_PATTERN = r"https://www.hattrick.org/goto.ashx\?path=/Club/Manager/\?userId=(\d+)"
REGION_PATTERN = r"https://www.hattrick.org/goto.ashx\?path=/World/Regions/Region.aspx\?RegionID=(\d+)"
MATCH_ARCHIVE_PATTERN = r"https://www.hattrick.org/goto.ashx\?path=%2FClub%2FMatches%2FArchive.aspx%3F(TeamID%3D(\d*))?(%26)?(season%3D(\d*))?"
MATCH_PATTERN = r"https://www.hattrick.org/goto.ashx\?path=/Club/Matches/Match.aspx\?matchID=(\d+)"
COUNTRY_LEAGUE_PATTERN = r"https://www.hattrick.org/goto.ashx\?path=/World/Leagues/League.aspx\?LeagueID=(\d+)"
CUP_PATTERN = r"https://www.hattrick.org/goto.ashx\?path=/World/Cup/Cup.aspx\?CupID=(\d+)"
def test_version():
assert __version__ == '0.2.6'
def test_request_token():
chpp = CHPP(consumer_key=PYCHPP_CONSUMER_KEY,
consumer_secret=PYCHPP_CONSUMER_SECRET,
)
auth = chpp.get_auth(scope='')
assert isinstance(auth, dict)
for key in auth.keys():
assert key in ('request_token', 'request_token_secret', 'url',)
assert isinstance(auth['request_token'], str) and auth['request_token']
assert isinstance(auth['request_token_secret'],
str) and auth['request_token_secret']
assert (isinstance(auth['url'], str)
and 'https://chpp.hattrick.org/oauth/authorize.aspx?scope=&oauth_token=' in auth['url'])
@pytest.fixture
def chpp():
return CHPP(consumer_key=PYCHPP_CONSUMER_KEY,
consumer_secret=PYCHPP_CONSUMER_SECRET,
access_token_key=PYCHPP_ACCESS_TOKEN_KEY,
access_token_secret=PYCHPP_ACCESS_TOKEN_SECRET,
)
def test_get_current_team(chpp):
team = chpp.team()
assert isinstance(team, HTTeam)
assert isinstance(team.ht_id, int)
assert isinstance(team.name, str)
assert isinstance(team.url, str)
assert isinstance(team.is_bot, bool)
youth_team = team.youth_team
assert isinstance(youth_team, HTYouthTeam) or youth_team is None
user = team.user
test_user = chpp.user()
assert user.ht_id == test_user.ht_id
players = team.players
assert isinstance(players, list)
for p in players:
assert isinstance(p, HTPlayer)
def test_get_specific_team(chpp):
team = chpp.team(ht_id=591993)
assert isinstance(team, HTTeam)
assert team.ht_id == 591993
assert team.name == "thekiki's"
assert team.short_name == 'thekikis'
assert team.is_primary_club is True
assert team.is_bot is False
assert team.power_rating > 0
assert team.url == "https://www.hattrick.org/goto.ashx?path=/Club/?TeamID=591993"
user = team.user
assert isinstance(user, HTUser)
assert user.ht_id == 6336642
assert user.username == 'thekiki76'
assert user.supporter_tier == 'platinum'
assert user.url == "https://www.hattrick.org/goto.ashx?path=/Club/Manager/?userId=6336642"
youthteam = team.youth_team
assert isinstance(youthteam, HTYouthTeam)
assert youthteam.name == 'thebabykikis'
assert re.match(YOUTH_TEAM_PATTERN, youthteam.url)
arena = team.arena
assert isinstance(arena, HTArena)
assert arena.name == "thekiki's evil"
assert re.match(ARENA_PATTERN, arena.url)
def test_get_secondary_team(chpp):
team = chpp.team(ht_id=44307)
assert isinstance(team, HTTeam)
assert team.ht_id == 44307
assert team.name == "<NAME>"
assert team.short_name == 'Grynvalla'
assert team.is_primary_club is False
assert team.url == "https://www.hattrick.org/goto.ashx?path=/Club/?TeamID=44307"
user = team.user
assert isinstance(user, HTUser)
assert user.ht_id == 182085
assert user.username == "Kvarak"
assert user.url == "https://www.hattrick.org/goto.ashx?path=/Club/Manager/?userId=182085"
youthteam = team.youth_team
assert isinstance(youthteam, HTYouthTeam)
assert youthteam.name == "<NAME>"
assert re.match(YOUTH_TEAM_PATTERN, youthteam.url)
arena = team.arena
assert isinstance(arena, HTArena)
assert arena.name == "Grynvallen"
assert re.match(ARENA_PATTERN, arena.url)
def test_get_current_user(chpp):
user = chpp.user()
assert isinstance(user, HTUser)
assert isinstance(user.ht_id, int)
assert isinstance(user.username, str)
assert isinstance(user.url, str)
assert re.match(USER_PATTERN, user.url)
def test_get_player(chpp):
player = chpp.player(ht_id=432002549)
assert isinstance(player, HTPlayer)
assert isinstance(player.skills, dict)
assert {i for i in player.skills.keys()}.issubset(HTSkill.SKILLS_NAME)
assert player.owner_notes is None
assert player.ht_id == 432002549
assert player.agreeability == 2
assert player.aggressiveness == 3
assert player.honesty == 3
assert player.url == "https://www.hattrick.org/goto.ashx?path=/Club/Players/Player.aspx?playerId=432002549"
assert isinstance(player.skills, dict)
assert len(player.skills) == 8
for i in player.skills.keys():
assert i in ("stamina", "keeper", "defender", "playmaker",
"winger", "scorer", "passing", "set_pieces")
assert isinstance(player.tsi, int)
assert isinstance(player.injury_level, int)
def test_get_youth_player(chpp):
youthteam = chpp.youth_team()
assert isinstance(youthteam, HTYouthTeam)
if youthteam.ht_id != 0:
youthplayer = youthteam.players[0]
assert isinstance(youthplayer, HTYouthPlayer)
assert {i for i in youthplayer.skills.keys()}.issubset(
HTSkillYouth.SKILLS_TAG)
assert re.match(YOUTH_PLAYER_PATTERN, youthplayer.url)
def test_get_current_user_arena(chpp):
arena = chpp.arena()
assert isinstance(arena, HTArena)
assert isinstance(arena.ht_id, int) or arena.ht_id is None
assert isinstance(arena.name, str)
assert isinstance(arena.url, str)
assert re.match(ARENA_PATTERN, arena.url)
def test_get_specific_arena(chpp):
arena = chpp.arena(ht_id=295023)
assert isinstance(arena, HTArena)
assert arena.ht_id == 295023
assert arena.name == 'Les piments verts Arena'
assert arena.url == "https://www.hattrick.org/goto.ashx?path=/Club/Arena/?ArenaID=295023"
team = arena.team
assert isinstance(team, HTTeam)
assert team.ht_id == 295023
assert team.name == 'Les piments verts'
assert team.url == "https://www.hattrick.org/goto.ashx?path=/Club/?TeamID=295023"
def test_get_current_user_region(chpp):
region = chpp.region()
assert isinstance(region, HTRegion)
assert isinstance(region.ht_id, int)
assert isinstance(region.name, str)
assert isinstance(region.number_of_users, int)
assert isinstance(region.number_of_online, int)
assert isinstance(region.weather, int)
assert isinstance(region.tomorrow_weather, int)
assert isinstance(region.url, str)
assert re.match(REGION_PATTERN, region.url)
def test_get_specific_region(chpp):
region = chpp.region(ht_id=149)
assert isinstance(region, HTRegion)
assert region.ht_id == 149
assert region.name == "Provence-Alpes-Côte d'Azur"
assert isinstance(region.number_of_users, int)
assert isinstance(region.number_of_online, int)
assert isinstance(region.weather, int)
assert isinstance(region.tomorrow_weather, int)
assert region.url == "https://www.hattrick.org/goto.ashx?path=/World/Regions/Region.aspx?RegionID=149"
def test_get_current_user_matches_archive(chpp):
ma1 = chpp.matches_archive()
assert isinstance(ma1, HTMatchesArchive)
assert isinstance(ma1.url, str)
assert re.match(MATCH_ARCHIVE_PATTERN, ma1.url)
m = ma1[0]
assert isinstance(m, HTMatchesArchiveItem)
assert isinstance(m.home_team, HTTeam)
assert isinstance(m.url, str)
assert re.match(MATCH_PATTERN, m.url)
ma2 = chpp.matches_archive(ht_id=1165592,
first_match_date=datetime.datetime(2020, 1, 1),
last_match_date=datetime.datetime(2020, 3, 31), )
assert ma2[0].ht_id == 652913955
assert ma2[0].home_team_name == "Les Poitevins de La Chapelle"
assert ma2[0].away_team_name == "FC Traversonne"
assert ma2[0].date == datetime.datetime(2020, 1, 1, 15, 10)
assert ma2[0].type == 5
assert ma2[0].context_id == 0
assert ma2[0].rule_id == 0
assert ma2[0].cup_level == 0
assert ma2[0].cup_level_index == 0
assert ma2[0].home_goals == 2
assert ma2[0].away_goals == 0
assert ma2[0].url == "https://www.hattrick.org/goto.ashx?path=/Club/Matches/Match.aspx?matchID=652913955"
for m in ma2:
assert datetime.datetime(
2020, 1, 1) <= m.date <= datetime.datetime(2020, 3, 31)
def test_get_other_user_matches_archives(chpp):
ma1 = chpp.matches_archive(ht_id=1755906,
first_match_date=datetime.datetime(2018, 4, 10),
last_match_date=datetime.datetime(2018, 4, 30),
)
assert re.match(MATCH_ARCHIVE_PATTERN, ma1.url)
for m in ma1:
assert datetime.datetime(
2018, 4, 10) <= m.date <= datetime.datetime(2018, 6, 30)
assert 1755906 in (m.home_team_id, m.away_team_id)
assert re.match(MATCH_PATTERN, m.url)
ma2 = chpp.matches_archive(ht_id=1755906,
season=60,
)
assert re.match(MATCH_ARCHIVE_PATTERN, ma2.url)
for m in ma2:
assert datetime.datetime(
2015, 10, 26) <= m.date <= datetime.datetime(2016, 2, 14)
assert 1755906 in (m.home_team_id, m.away_team_id)
assert re.match(MATCH_PATTERN, m.url)
def test_get_match(chpp):
m = chpp.match(ht_id=547513790, events=True)
assert isinstance(m, HTMatch)
assert m.ht_id == 547513790
assert m.url == "https://www.hattrick.org/goto.ashx?path=/Club/Matches/Match.aspx?matchID=547513790"
assert m.date == datetime.datetime(2015, 12, 19, 21, 0)
assert m.home_team_name == "<NAME>"
assert m.away_team_name == "<NAME>"
assert m.added_minutes == 0
assert m.arena_id == 1162154
assert len(m.events) >= 0
assert m.events[14]["minute"] == 72
assert m.events[14]["match_part"] == 2
assert m.events[14]["id"] == 285
assert m.events[14]["variation"] == 3
assert m.events[14]["subject_team_id"] == 292366
assert m.events[14]["subject_player_id"] == 373737451
assert m.events[14]["object_player_id"] == 314946894
# Description is localized
# assert "free kick" in m.events[14]["description"]
def test_is_challengeable(chpp):
challenge = HTChallengeManager(chpp)
if "manage_challenges" in PYCHPP_SCOPE:
ich = challenge.is_challengeable(team_ht_id=1750803)
assert isinstance(ich, dict)
for b in ich.values():
assert isinstance(b, bool)
else:
with pytest.raises(HTUnauthorizedAction):
ich = challenge.is_challengeable(team_ht_id=1750803)
def test_league(chpp):
league = chpp.league(ht_id=36378)
assert isinstance(league, HTLeague)
assert league.ht_id == 36378
assert league.name == "VI.390"
assert league.country_id == 5
assert league.url == "https://www.hattrick.org/goto.ashx?path=/World/Series/?LeagueLevelUnitID=36378"
assert isinstance(league.ranks, list)
for r in league.ranks:
assert isinstance(r, HTRank)
assert league.ranks[3].position == 4
def test_get_match_lineup(chpp):
match_lineup = chpp.match_lineup(ht_id=660688698, team_id=86324)
assert isinstance(match_lineup, HTMatchLineup)
assert isinstance(match_lineup.match, HTMatch)
assert match_lineup.ht_id == 660688698
assert match_lineup.home_team_name == "Gazela.f.c"
assert match_lineup.away_team_id == 86324
assert match_lineup.away_team_name == "<NAME>"
assert match_lineup.arena_id == 1420520
assert match_lineup.game_type == 1
assert re.match(MATCH_PATTERN, match_lineup.url)
assert isinstance(match_lineup.arena, HTArena)
assert len(match_lineup.lineup_players) == 20
assert isinstance(match_lineup.lineup_players[0], HTLineupPlayer)
assert isinstance(match_lineup.lineup_players[0].player, HTPlayer)
assert match_lineup.lineup_players[0].ht_id == 453372825
assert match_lineup.lineup_players[0].first_name == "Teodoro"
assert match_lineup.lineup_players[0].role_id == 100
assert match_lineup.lineup_players[0].role_name == "Keeper"
assert match_lineup.lineup_players[15].role_id == 120
assert match_lineup.lineup_players[15].role_name == "Unknown role"
assert re.match(PLAYER_PATTERN, match_lineup.lineup_players[15].url)
match_lineup = chpp.match_lineup(
ht_id=116104524, team_id=2828377, source='youth')
assert isinstance(match_lineup.lineup_players[0], HTLineupPlayer)
assert isinstance(match_lineup.lineup_players[0].player, HTYouthPlayer)
assert re.match(YOUTH_PLAYER_PATTERN, match_lineup.lineup_players[0].url)
def test_get_world_details(chpp):
portugal_details = chpp.world(ht_id=25, include_regions=True)
assert isinstance(portugal_details, HTWorld)
assert isinstance(portugal_details.leagues[0], HTCountryLeague)
assert isinstance(portugal_details.leagues[0].country, HTCountry)
assert isinstance(portugal_details.leagues[0].cups[0], HTCup)
assert len(portugal_details.leagues) == 1
assert portugal_details.league(ht_id=25).league_name == "Portugal"
assert portugal_details.league(name="portugal").ht_id == 25
assert portugal_details.league(ht_id=25).country.country_name == "Portugal"
portugal_regions = portugal_details.league(ht_id=25).country.regions
assert len(portugal_regions) >= 1
assert isinstance(portugal_regions[0], HTRegionItem)
assert isinstance(portugal_regions[0].region, HTRegion)
assert len(portugal_details.league(ht_id=25).cups) >= 1
with pytest.raises(UnknownLeagueError):
portugal_details.league(ht_id=26)
world_details = chpp.world()
assert len(world_details.leagues) > 1
assert world_details.leagues[0].country.regions is None
assert re.match(COUNTRY_LEAGUE_PATTERN, portugal_details.league(ht_id=25).url)
assert re.match(REGION_PATTERN, portugal_regions[0].region.url)
assert re.match(CUP_PATTERN, portugal_details.league(ht_id=25).cups[0].url)
|
[
"pychpp.ht_challenge.HTChallengeManager",
"re.match",
"datetime.datetime",
"pytest.raises",
"pychpp.CHPP"
] |
[((2256, 2334), 'pychpp.CHPP', 'CHPP', ([], {'consumer_key': 'PYCHPP_CONSUMER_KEY', 'consumer_secret': 'PYCHPP_CONSUMER_SECRET'}), '(consumer_key=PYCHPP_CONSUMER_KEY, consumer_secret=PYCHPP_CONSUMER_SECRET)\n', (2260, 2334), False, 'from pychpp import CHPP\n'), ((2912, 3089), 'pychpp.CHPP', 'CHPP', ([], {'consumer_key': 'PYCHPP_CONSUMER_KEY', 'consumer_secret': 'PYCHPP_CONSUMER_SECRET', 'access_token_key': 'PYCHPP_ACCESS_TOKEN_KEY', 'access_token_secret': 'PYCHPP_ACCESS_TOKEN_SECRET'}), '(consumer_key=PYCHPP_CONSUMER_KEY, consumer_secret=\n PYCHPP_CONSUMER_SECRET, access_token_key=PYCHPP_ACCESS_TOKEN_KEY,\n access_token_secret=PYCHPP_ACCESS_TOKEN_SECRET)\n', (2916, 3089), False, 'from pychpp import CHPP\n'), ((4529, 4572), 're.match', 're.match', (['YOUTH_TEAM_PATTERN', 'youthteam.url'], {}), '(YOUTH_TEAM_PATTERN, youthteam.url)\n', (4537, 4572), False, 'import re\n'), ((4688, 4722), 're.match', 're.match', (['ARENA_PATTERN', 'arena.url'], {}), '(ARENA_PATTERN, arena.url)\n', (4696, 4722), False, 'import re\n'), ((5412, 5455), 're.match', 're.match', (['YOUTH_TEAM_PATTERN', 'youthteam.url'], {}), '(YOUTH_TEAM_PATTERN, youthteam.url)\n', (5420, 5455), False, 'import re\n'), ((5567, 5601), 're.match', 're.match', (['ARENA_PATTERN', 'arena.url'], {}), '(ARENA_PATTERN, arena.url)\n', (5575, 5601), False, 'import re\n'), ((5826, 5858), 're.match', 're.match', (['USER_PATTERN', 'user.url'], {}), '(USER_PATTERN, user.url)\n', (5834, 5858), False, 'import re\n'), ((7377, 7411), 're.match', 're.match', (['ARENA_PATTERN', 'arena.url'], {}), '(ARENA_PATTERN, arena.url)\n', (7385, 7411), False, 'import re\n'), ((8361, 8397), 're.match', 're.match', (['REGION_PATTERN', 'region.url'], {}), '(REGION_PATTERN, region.url)\n', (8369, 8397), False, 'import re\n'), ((9079, 9119), 're.match', 're.match', (['MATCH_ARCHIVE_PATTERN', 'ma1.url'], {}), '(MATCH_ARCHIVE_PATTERN, ma1.url)\n', (9087, 9119), False, 'import re\n'), ((9270, 9300), 're.match', 're.match', (['MATCH_PATTERN', 'm.url'], {}), '(MATCH_PATTERN, m.url)\n', (9278, 9300), False, 'import re\n'), ((10494, 10534), 're.match', 're.match', (['MATCH_ARCHIVE_PATTERN', 'ma1.url'], {}), '(MATCH_ARCHIVE_PATTERN, ma1.url)\n', (10502, 10534), False, 'import re\n'), ((10896, 10936), 're.match', 're.match', (['MATCH_ARCHIVE_PATTERN', 'ma2.url'], {}), '(MATCH_ARCHIVE_PATTERN, ma2.url)\n', (10904, 10936), False, 'import re\n'), ((12117, 12141), 'pychpp.ht_challenge.HTChallengeManager', 'HTChallengeManager', (['chpp'], {}), '(chpp)\n', (12135, 12141), False, 'from pychpp.ht_challenge import HTChallengeManager\n'), ((13439, 13480), 're.match', 're.match', (['MATCH_PATTERN', 'match_lineup.url'], {}), '(MATCH_PATTERN, match_lineup.url)\n', (13447, 13480), False, 'import re\n'), ((14113, 14174), 're.match', 're.match', (['PLAYER_PATTERN', 'match_lineup.lineup_players[15].url'], {}), '(PLAYER_PATTERN, match_lineup.lineup_players[15].url)\n', (14121, 14174), False, 'import re\n'), ((14429, 14495), 're.match', 're.match', (['YOUTH_PLAYER_PATTERN', 'match_lineup.lineup_players[0].url'], {}), '(YOUTH_PLAYER_PATTERN, match_lineup.lineup_players[0].url)\n', (14437, 14495), False, 'import re\n'), ((15722, 15778), 're.match', 're.match', (['REGION_PATTERN', 'portugal_regions[0].region.url'], {}), '(REGION_PATTERN, portugal_regions[0].region.url)\n', (15730, 15778), False, 'import re\n'), ((7074, 7121), 're.match', 're.match', (['YOUTH_PLAYER_PATTERN', 'youthplayer.url'], {}), '(YOUTH_PLAYER_PATTERN, youthplayer.url)\n', (7082, 7121), False, 'import re\n'), ((9692, 9729), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(1)', '(1)', '(15)', '(10)'], {}), '(2020, 1, 1, 15, 10)\n', (9709, 9729), False, 'import datetime\n'), ((10731, 10761), 're.match', 're.match', (['MATCH_PATTERN', 'm.url'], {}), '(MATCH_PATTERN, m.url)\n', (10739, 10761), False, 'import re\n'), ((11134, 11164), 're.match', 're.match', (['MATCH_PATTERN', 'm.url'], {}), '(MATCH_PATTERN, m.url)\n', (11142, 11164), False, 'import re\n'), ((11435, 11473), 'datetime.datetime', 'datetime.datetime', (['(2015)', '(12)', '(19)', '(21)', '(0)'], {}), '(2015, 12, 19, 21, 0)\n', (11452, 11473), False, 'import datetime\n'), ((15413, 15446), 'pytest.raises', 'pytest.raises', (['UnknownLeagueError'], {}), '(UnknownLeagueError)\n', (15426, 15446), False, 'import pytest\n'), ((9396, 9425), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(1)', '(1)'], {}), '(2020, 1, 1)\n', (9413, 9425), False, 'import datetime\n'), ((9474, 9504), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(3)', '(31)'], {}), '(2020, 3, 31)\n', (9491, 9504), False, 'import datetime\n'), ((10107, 10136), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(1)', '(1)'], {}), '(2020, 1, 1)\n', (10124, 10136), False, 'import datetime\n'), ((10163, 10193), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(3)', '(31)'], {}), '(2020, 3, 31)\n', (10180, 10193), False, 'import datetime\n'), ((10338, 10368), 'datetime.datetime', 'datetime.datetime', (['(2018)', '(4)', '(10)'], {}), '(2018, 4, 10)\n', (10355, 10368), False, 'import datetime\n'), ((10417, 10447), 'datetime.datetime', 'datetime.datetime', (['(2018)', '(4)', '(30)'], {}), '(2018, 4, 30)\n', (10434, 10447), False, 'import datetime\n'), ((10569, 10599), 'datetime.datetime', 'datetime.datetime', (['(2018)', '(4)', '(10)'], {}), '(2018, 4, 10)\n', (10586, 10599), False, 'import datetime\n'), ((10626, 10656), 'datetime.datetime', 'datetime.datetime', (['(2018)', '(6)', '(30)'], {}), '(2018, 6, 30)\n', (10643, 10656), False, 'import datetime\n'), ((10971, 11002), 'datetime.datetime', 'datetime.datetime', (['(2015)', '(10)', '(26)'], {}), '(2015, 10, 26)\n', (10988, 11002), False, 'import datetime\n'), ((11029, 11059), 'datetime.datetime', 'datetime.datetime', (['(2016)', '(2)', '(14)'], {}), '(2016, 2, 14)\n', (11046, 11059), False, 'import datetime\n'), ((12378, 12413), 'pytest.raises', 'pytest.raises', (['HTUnauthorizedAction'], {}), '(HTUnauthorizedAction)\n', (12391, 12413), False, 'import pytest\n')]
|
"""
Created on Mar 8, 2014
@author: tjoneslo
"""
import os
import logging
from pypdflite import PDFLite
from pypdflite import PDFCursor
from pypdflite.pdfobjects.pdfline import PDFLine
from pypdflite.pdfobjects.pdfellipse import PDFEllipse
from pypdflite.pdfobjects.pdftext import PDFText
from Galaxy import Sector, Galaxy
from Star import Star
from StatCalculation import StatCalculation
class HexMap(object):
"""
Draw the trade routes as calculated, sector by sector onto PDF files.
Used pypdflite to directly generate the PDF files.
"""
def __init__(self, galaxy, routes, min_btn=8):
self.galaxy = galaxy
self.routes = routes
self.ym = 9 # half a hex height
self.xm = 6 # half the length of one side
self.colorStart = 0
self.min_btn = min_btn
self.y_start = 43
self.x_start = 15
def write_maps(self):
"""
Starting point for writing PDF files.
Call this to output the trade maps
"""
logging.getLogger("PyRoute.HexMap").info("writing {:d} sector maps...".format(len(self.galaxy.sectors)))
for sector in self.galaxy.sectors.values():
pdf = self.document(sector)
self.write_base_map(pdf, sector)
self.draw_borders(pdf, sector)
comm_routes = [star for star in self.galaxy.stars.edges(sector.worlds, True) \
if star[2].get('xboat', False) or star[2].get('comm', False)]
for (star, neighbor, data) in comm_routes:
self.comm_line(pdf, [star, neighbor])
sector_trade = [star for star in self.galaxy.stars.edges(sector.worlds, True) \
if star[2]['trade'] > 0 and StatCalculation.trade_to_btn(star[2]['trade']) >= self.min_btn]
logging.getLogger('PyRoute.HexMap').debug("Worlds with trade: {}".format(len(sector_trade)))
sector_trade.sort(key=lambda line: line[2]['trade'])
for (star, neighbor, data) in sector_trade:
self.galaxy.stars[star][neighbor]['trade btn'] = StatCalculation.trade_to_btn(data['trade'])
self.trade_line(pdf, [star, neighbor], data)
# Get all the worlds in this sector
# for (star, neighbor, data) in self.galaxy.stars.edges(sector.worlds, True):
# if star.sector != sector:
# continue#
# if data['trade'] > 0 and self.trade_to_btn(data['trade']) >= self.min_btn:
# self.galaxy.stars[star][neighbor]['trade btn'] = self.trade_to_btn(data['trade'])
# self.trade_line(pdf, [star, neighbor], data)
# elif star.sector != neighbor.sector:
# data = self.galaxy.stars.get_edge_data(neighbor, star)
# if data is not None and \
# data['trade'] > 0 and \
# self.trade_to_btn(data['trade']) >= self.min_btn:
# self.trade_line(pdf, [star, neighbor], data)
for star in sector.worlds:
self.system(pdf, star)
if sector.coreward:
self.coreward_sector(pdf, sector.coreward.name)
if sector.rimward:
self.rimward_sector(pdf, sector.rimward.name)
if sector.spinward:
self.spinward_sector(pdf, sector.spinward.name)
if sector.trailing:
self.trailing_sector(pdf, sector.trailing.name)
self.writer.close()
def write_base_map(self, pdf, sector):
self.sector_name(pdf, sector.name)
self.subsector_grid(pdf)
self.hex_grid(pdf, self._draw_all, 0.5)
def sector_name(self, pdf, name):
cursor = PDFCursor(5, -5, True)
def_font = pdf.get_font()
pdf.set_font('times', size=30)
width = pdf.get_font()._string_width(name)
cursor.x = 306 - (width / 2)
pdf.add_text(name, cursor)
pdf.set_font(font=def_font)
def coreward_sector(self, pdf, name):
cursor = PDFCursor(5, self.y_start - 15, True)
def_font = pdf.get_font()
pdf.set_font('times', size=10)
width = pdf.get_font()._string_width(name) / 2
cursor.x = 306 - width
pdf.add_text(name, cursor)
pdf.set_font(font=def_font)
def rimward_sector(self, pdf, name):
cursor = PDFCursor(306, 767, True)
def_font = pdf.get_font()
pdf.set_font('times', size=10)
cursor.x_plus(-pdf.get_font()._string_width(name) / 2)
pdf.add_text(name, cursor)
pdf.set_font(font=def_font)
def spinward_sector(self, pdf, name):
cursor = PDFCursor(self.x_start - 5, 390, True)
def_font = pdf.get_font()
pdf.set_font('times', size=10)
cursor.y_plus(pdf.get_font()._string_width(name) / 2)
text = PDFText(pdf.session, pdf.page, None, cursor=cursor)
text.text_rotate(90)
text._text(name)
pdf.set_font(font=def_font)
def trailing_sector(self, pdf, name):
cursor = PDFCursor(598, 390, True)
def_font = pdf.get_font()
pdf.set_font('times', size=10)
cursor.y_plus(-(pdf.get_font()._string_width(name) / 2))
text = PDFText(pdf.session, pdf.page, None, cursor=cursor)
text.text_rotate(-90)
text._text(name)
pdf.set_font(font=def_font)
def subsector_grid(self, pdf):
color = pdf.get_color()
color.set_color_by_name('lightgray')
pdf.set_draw_color(color)
vlineStart = PDFCursor(0, self.y_start + self.xm)
vlineEnd = PDFCursor(0, self.y_start + self.xm + (180 * 4))
for x in range(self.x_start, 595, 144):
vlineStart.x = x
vlineEnd.x = x
pdf.add_line(cursor1=vlineStart, cursor2=vlineEnd)
hlineStart = PDFCursor(self.x_start, 0)
hlineEnd = PDFCursor(591, 0)
for y in range(self.y_start + self.xm, 780, 180):
hlineStart.y = y
hlineEnd.y = y
pdf.add_line(cursor1=hlineStart, cursor2=hlineEnd)
def _hline(self, pdf, width, colorname):
hlineStart = PDFCursor(0, 0)
hlineStart.x = 3
hlineStart.y = self.y_start - self.ym
hlineStart.dx = self.xm * 3
hlineStart.dy = self.ym * 2
hlineEnd = PDFCursor(0, 0)
hlineEnd.x = self.xm * 2.5
hlineEnd.y = self.y_start - self.ym
hlineEnd.dx = self.xm * 3
hlineEnd.dy = self.ym * 2
color = pdf.get_color()
color.set_color_by_name(colorname)
hline = PDFLine(pdf.session, pdf.page, hlineStart, hlineEnd, stroke='solid', color=color, size=width)
return (hlineStart, hlineEnd, hline)
def _hline_restart_y(self, x, hlineStart, hlineEnd):
if (x & 1):
hlineStart.y = self.y_start - self.ym
hlineEnd.y = self.y_start - self.ym
else:
hlineStart.y = self.y_start - 2 * self.ym
hlineEnd.y = self.y_start - 2 * self.ym
def _lline(self, pdf, width, colorname):
llineStart = PDFCursor(-10, 0)
llineStart.x = self.x_start
llineStart.dx = self.xm * 3
llineStart.dy = self.ym * 2
llineEnd = PDFCursor(-10, 0)
llineEnd.x = self.x_start + self.xm
llineEnd.dx = self.xm * 3
llineEnd.dy = self.ym * 2
color = pdf.get_color()
color.set_color_by_name(colorname)
lline = PDFLine(pdf.session, pdf.page, llineStart, llineEnd, stroke='solid', color=color, size=width)
return (llineStart, llineEnd, lline)
def _lline_restart_y(self, x, llineStart, llineEnd):
if (x & 1):
llineStart.y = self.y_start - 2 * self.ym
llineEnd.y = self.y_start - self.ym
else:
llineStart.y = self.y_start - self.ym
llineEnd.y = self.y_start - 2 * self.ym
def _rline(self, pdf, width, colorname):
rlineStart = PDFCursor(0, 0)
rlineStart.x = self.x_start + self.xm
rlineStart.dx = self.xm * 3
rlineStart.dy = self.ym * 2
rlineEnd = PDFCursor(0, 0)
rlineEnd.x = self.x_start
rlineEnd.dx = self.xm * 3
rlineEnd.dy = self.ym * 2
color = pdf.get_color()
color.set_color_by_name(colorname)
rline = PDFLine(pdf.session, pdf.page, rlineStart, rlineEnd, stroke='solid', color=color, size=width)
return (rlineStart, rlineEnd, rline)
def _rline_restart_y(self, x, rlineStart, rlineEnd):
if (x & 1):
rlineStart.y = self.y_start - 3 * self.ym
rlineEnd.y = self.y_start - 2 * self.ym
else:
rlineStart.y = self.y_start - 2 * self.ym
rlineEnd.y = self.y_start - 3 * self.ym
def hex_grid(self, pdf, draw, width, colorname='gray'):
hlineStart, hlineEnd, hline = self._hline(pdf, width, colorname)
llineStart, llineEnd, lline = self._lline(pdf, width, colorname)
rlineStart, rlineEnd, rline = self._rline(pdf, width, colorname)
for x in range(33):
hlineStart.x_plus()
hlineEnd.x_plus()
self._hline_restart_y(x, hlineStart, hlineEnd)
self._lline_restart_y(x, llineStart, llineEnd)
self._rline_restart_y(x, rlineStart, rlineEnd)
for y in range(41):
hlineStart.y_plus()
hlineEnd.y_plus()
llineStart.y_plus()
llineEnd.y_plus()
rlineStart.y_plus()
rlineEnd.y_plus()
draw(x, y, hline, lline, rline)
llineStart.x_plus()
llineEnd.x_plus()
rlineStart.x_plus()
rlineEnd.x_plus()
def _draw_all(self, x, y, hline, lline, rline):
if (x < 32):
hline._draw()
lline._draw()
if (y > 0):
rline._draw()
def _draw_borders(self, x, y, hline, lline, rline):
q, r = self.convert_hex_to_axial(x + self.sector.dx, y + self.sector.dy - 1)
if self.galaxy.borders.borders.get((q, r), False):
if self.galaxy.borders.borders[(q, r)] & 1:
hline._draw()
if self.galaxy.borders.borders[(q, r)] & 2 and y > 0:
rline._draw()
if self.galaxy.borders.borders[(q, r)] & 4:
lline._draw()
def draw_borders(self, pdf, sector):
self.sector = sector
self.hex_grid(pdf, self._draw_borders, 1.5, 'salmon')
@staticmethod
def convert_hex_to_axial(row, col):
x = row
z = col - (row - (row & 1)) / 2
return (x, z)
def system(self, pdf, star):
def_font = pdf.get_font()
pdf.set_font('times', size=4)
col = (self.xm * 3 * (star.col))
if (star.col & 1):
row = (self.y_start - self.ym * 2) + (star.row * self.ym * 2)
else:
row = (self.y_start - self.ym) + (star.row * self.ym * 2)
point = PDFCursor(col, row)
self.zone(pdf, star, point.copy())
width = self.string_width(pdf.get_font(), star.uwp)
point.y_plus(7)
point.x_plus(self.ym - (width // 2))
pdf.add_text(star.uwp, point)
if len(star.name) > 0:
for chars in range(len(star.name), 0, -1):
width = self.string_width(pdf.get_font(), star.name[:chars])
if width <= self.xm * 3.5:
break
point.y_plus(3.5)
point.x = col
point.x_plus(self.ym - (width // 2))
pdf.add_text(star.name[:chars], point)
added = star.alg_code
if star.tradeCode.subsector_capital:
added += '+'
elif star.tradeCode.sector_capital or star.tradeCode.other_capital:
added += '*'
else:
added += ' '
added += '{:d}'.format(star.ggCount)
point.y_plus(3.5)
point.x = col
width = pdf.get_font()._string_width(added)
point.x_plus(self.ym - (width // 2))
pdf.add_text(added, point)
added = ''
tradeIn = StatCalculation.trade_to_btn(star.tradeIn)
tradeThrough = StatCalculation.trade_to_btn(star.tradeIn + star.tradeOver)
if self.routes == 'trade':
added += "{:X}{:X}{:X}{:d}".format(star.wtn, tradeIn, tradeThrough, star.starportSize)
elif self.routes == 'comm':
added += "{}{} {}".format(star.baseCode, star.ggCount, star.importance)
elif self.routes == 'xroute':
added += " {}".format(star.importance)
width = pdf.get_font()._string_width(added)
point.y_plus(3.5)
point.x = col
point.x_plus(self.ym - (width // 2))
pdf.add_text(added, point)
pdf.set_font(def_font)
def trade_line(self, pdf, edge, data):
tradeColors = [(255, 0, 0), # Red
(224, 224, 16), # yellow - darker
(0, 255, 0), # green
(0, 255, 255), # Cyan
(96, 96, 255), # blue - lighter
(128, 0, 128), # purple
(148, 0, 211), # violet
]
start = edge[0]
end = edge[1]
trade = StatCalculation.trade_to_btn(data['trade']) - self.min_btn
if trade < 0:
return
if trade > 6:
logging.getLogger('PyRoute.HexMap').warn("trade calculated over %d" % self.min_btn + 6)
trade = 6
tradeColor = tradeColors[trade]
color = pdf.get_color()
color.set_color_by_number(tradeColor[0], tradeColor[1], tradeColor[2])
starty = self.y_start + (self.ym * 2 * (start.row)) - (self.ym * (1 if start.col & 1 else 0))
startx = (self.xm * 3 * (start.col)) + self.ym
endRow = end.row
endCol = end.col
endCircle = True
if (end.sector != start.sector):
endCircle = False
if end.sector.x < start.sector.x:
endCol -= 32
if end.sector.x > start.sector.x:
endCol += 32
if end.sector.y < start.sector.y:
endRow -= 40
if end.sector.y > start.sector.y:
endRow += 40
endy = self.y_start + (self.ym * 2 * (endRow)) - (self.ym * (1 if endCol & 1 else 0))
endx = (self.xm * 3 * endCol) + self.ym
(startx, starty), (endx, endy) = self.clipping(startx, starty, endx, endy)
else:
endy = self.y_start + (self.ym * 2 * (endRow)) - (self.ym * (1 if endCol & 1 else 0))
endx = (self.xm * 3 * endCol) + self.ym
lineStart = PDFCursor(startx, starty)
lineEnd = PDFCursor(endx, endy)
line = PDFLine(pdf.session, pdf.page, lineStart, lineEnd, stroke='solid', color=color, size=1)
line._draw()
radius = PDFCursor(2, 2)
circle = PDFEllipse(pdf.session, pdf.page, lineStart, radius, color, size=3)
circle._draw()
if endCircle:
circle = PDFEllipse(pdf.session, pdf.page, lineEnd, radius, color, size=3)
circle._draw()
def comm_line(self, pdf, edge):
start = edge[0]
end = edge[1]
color = pdf.get_color()
color.set_color_by_number(102, 178, 102)
starty = self.y_start + (self.ym * 2 * (start.row)) - (self.ym * (1 if start.col & 1 else 0))
startx = (self.xm * 3 * (start.col)) + self.ym
endRow = end.row
endCol = end.col
if (end.sector != start.sector):
if end.sector.x < start.sector.x:
endCol -= 32
if end.sector.x > start.sector.x:
endCol += 32
if end.sector.y < start.sector.y:
endRow -= 40
if end.sector.y > start.sector.y:
endRow += 40
endy = self.y_start + (self.ym * 2 * (endRow)) - (self.ym * (1 if endCol & 1 else 0))
endx = (self.xm * 3 * endCol) + self.ym
(startx, starty), (endx, endy) = self.clipping(startx, starty, endx, endy)
else:
endy = self.y_start + (self.ym * 2 * (endRow)) - (self.ym * (1 if endCol & 1 else 0))
endx = (self.xm * 3 * endCol) + self.ym
lineStart = PDFCursor(startx, starty)
lineEnd = PDFCursor(endx, endy)
line = PDFLine(pdf.session, pdf.page, lineStart, lineEnd, stroke='solid', color=color, size=3)
line._draw()
def zone(self, pdf, star, point):
point.x_plus(self.ym)
point.y_plus(self.ym)
color = pdf.get_color()
if star.zone in ['R', 'F']:
color.set_color_by_name('crimson')
elif star.zone in ['A', 'U']:
color.set_color_by_name('goldenrod')
else: # no zone -> do nothing
return
radius = PDFCursor(self.xm, self.xm)
circle = PDFEllipse(pdf.session, pdf.page, point, radius, color, size=2)
circle._draw()
def document(self, sector):
path = os.path.join(self.galaxy.output_path, sector.sector_name() + " Sector.pdf")
self.writer = PDFLite(path)
title = "Sector %s" % sector
subject = "Trade route map generated by PyRoute for Traveller"
author = None
keywords = None
creator = "PyPDFLite"
self.writer.set_information(title, subject, author, keywords, creator)
self.writer.set_compression(True)
document = self.writer.get_document()
document.set_margins(4)
return document
@staticmethod
def string_width(font, string):
w = 0
for i in string:
w += font.character_widths[i] if i in font.character_widths else 600
return w * font.font_size / 1000.0
def clipping(self, startx, starty, endx, endy):
points_t = [0.0, 1.0]
line_pt_1 = [startx, starty]
line_pt_2 = [endx, endy]
if startx == endx:
if starty > endy:
return ((startx, min(max(starty, endy), 780)),
(startx, max(min(starty, endy), 42)))
else:
return ((startx, max(min(starty, endy), 42)),
(startx, min(max(starty, endy), 780)))
if starty == endy:
if startx > endx:
return ((min(max(startx, endx), 600), starty),
(max(min(startx, endx), 15), starty))
else:
return ((max(min(startx, endx), 15), starty),
(min(max(startx, endx), 600), starty))
points_t.append(float(15 - startx) / (endx - startx))
points_t.append(float(600 - startx) / (endx - startx))
points_t.append(float(780 - starty) / (endy - starty))
points_t.append(float(42 - starty) / (endy - starty))
points_t.sort()
result = [(pt_1 + t * (pt_2 - pt_1)) for t in (points_t[2], points_t[3]) for (pt_1, pt_2) in
zip(line_pt_1, line_pt_2)]
logging.getLogger("PyRoute.HexMap").debug(result)
return (result[0], result[1]), (result[2], result[3])
if __name__ == '__main__':
sector = Sector('# Core', '# 0,0')
hexMap = HexMap(None)
pdf = hexMap.document(sector)
hexMap.write_base_map(pdf, sector)
galaxy = Galaxy(0, 0)
star1 = Star(
"0102 <NAME> E551112-7 Lo Po { -3 } (300-3) [1113] B - - 913 9 Im K2 IV M7 V ",
galaxy.starline, 0, 0)
star2 = Star(
"0405 Azimuth B847427-B Ni Pa { 1 } (634+1) [455B] Bc N - 200 13 Im M2 V M7 V ",
galaxy.starline, 0, 0)
hexMap.trade_line(pdf, [star1, star2])
hexMap.system(pdf, star1)
hexMap.system(pdf, star2)
hexMap.writer.close()
|
[
"Star.Star",
"pypdflite.PDFCursor",
"pypdflite.PDFLite",
"pypdflite.pdfobjects.pdfline.PDFLine",
"StatCalculation.StatCalculation.trade_to_btn",
"Galaxy.Sector",
"Galaxy.Galaxy",
"pypdflite.pdfobjects.pdfellipse.PDFEllipse",
"pypdflite.pdfobjects.pdftext.PDFText",
"logging.getLogger"
] |
[((19225, 19250), 'Galaxy.Sector', 'Sector', (['"""# Core"""', '"""# 0,0"""'], {}), "('# Core', '# 0,0')\n", (19231, 19250), False, 'from Galaxy import Sector, Galaxy\n'), ((19364, 19376), 'Galaxy.Galaxy', 'Galaxy', (['(0)', '(0)'], {}), '(0, 0)\n', (19370, 19376), False, 'from Galaxy import Sector, Galaxy\n'), ((19390, 19543), 'Star.Star', 'Star', (['"""0102 <NAME> E551112-7 Lo Po { -3 } (300-3) [1113] B - - 913 9 Im K2 IV M7 V """', 'galaxy.starline', '(0)', '(0)'], {}), "(\n '0102 <NAME> E551112-7 Lo Po { -3 } (300-3) [1113] B - - 913 9 Im K2 IV M7 V '\n , galaxy.starline, 0, 0)\n", (19394, 19543), False, 'from Star import Star\n'), ((19563, 19718), 'Star.Star', 'Star', (['"""0405 Azimuth B847427-B Ni Pa { 1 } (634+1) [455B] Bc N - 200 13 Im M2 V M7 V """', 'galaxy.starline', '(0)', '(0)'], {}), "(\n '0405 Azimuth B847427-B Ni Pa { 1 } (634+1) [455B] Bc N - 200 13 Im M2 V M7 V '\n , galaxy.starline, 0, 0)\n", (19567, 19718), False, 'from Star import Star\n'), ((3770, 3792), 'pypdflite.PDFCursor', 'PDFCursor', (['(5)', '(-5)', '(True)'], {}), '(5, -5, True)\n', (3779, 3792), False, 'from pypdflite import PDFCursor\n'), ((4085, 4122), 'pypdflite.PDFCursor', 'PDFCursor', (['(5)', '(self.y_start - 15)', '(True)'], {}), '(5, self.y_start - 15, True)\n', (4094, 4122), False, 'from pypdflite import PDFCursor\n'), ((4412, 4437), 'pypdflite.PDFCursor', 'PDFCursor', (['(306)', '(767)', '(True)'], {}), '(306, 767, True)\n', (4421, 4437), False, 'from pypdflite import PDFCursor\n'), ((4705, 4743), 'pypdflite.PDFCursor', 'PDFCursor', (['(self.x_start - 5)', '(390)', '(True)'], {}), '(self.x_start - 5, 390, True)\n', (4714, 4743), False, 'from pypdflite import PDFCursor\n'), ((4894, 4945), 'pypdflite.pdfobjects.pdftext.PDFText', 'PDFText', (['pdf.session', 'pdf.page', 'None'], {'cursor': 'cursor'}), '(pdf.session, pdf.page, None, cursor=cursor)\n', (4901, 4945), False, 'from pypdflite.pdfobjects.pdftext import PDFText\n'), ((5096, 5121), 'pypdflite.PDFCursor', 'PDFCursor', (['(598)', '(390)', '(True)'], {}), '(598, 390, True)\n', (5105, 5121), False, 'from pypdflite import PDFCursor\n'), ((5275, 5326), 'pypdflite.pdfobjects.pdftext.PDFText', 'PDFText', (['pdf.session', 'pdf.page', 'None'], {'cursor': 'cursor'}), '(pdf.session, pdf.page, None, cursor=cursor)\n', (5282, 5326), False, 'from pypdflite.pdfobjects.pdftext import PDFText\n'), ((5586, 5622), 'pypdflite.PDFCursor', 'PDFCursor', (['(0)', '(self.y_start + self.xm)'], {}), '(0, self.y_start + self.xm)\n', (5595, 5622), False, 'from pypdflite import PDFCursor\n'), ((5642, 5688), 'pypdflite.PDFCursor', 'PDFCursor', (['(0)', '(self.y_start + self.xm + 180 * 4)'], {}), '(0, self.y_start + self.xm + 180 * 4)\n', (5651, 5688), False, 'from pypdflite import PDFCursor\n'), ((5880, 5906), 'pypdflite.PDFCursor', 'PDFCursor', (['self.x_start', '(0)'], {}), '(self.x_start, 0)\n', (5889, 5906), False, 'from pypdflite import PDFCursor\n'), ((5926, 5943), 'pypdflite.PDFCursor', 'PDFCursor', (['(591)', '(0)'], {}), '(591, 0)\n', (5935, 5943), False, 'from pypdflite import PDFCursor\n'), ((6188, 6203), 'pypdflite.PDFCursor', 'PDFCursor', (['(0)', '(0)'], {}), '(0, 0)\n', (6197, 6203), False, 'from pypdflite import PDFCursor\n'), ((6367, 6382), 'pypdflite.PDFCursor', 'PDFCursor', (['(0)', '(0)'], {}), '(0, 0)\n', (6376, 6382), False, 'from pypdflite import PDFCursor\n'), ((6623, 6721), 'pypdflite.pdfobjects.pdfline.PDFLine', 'PDFLine', (['pdf.session', 'pdf.page', 'hlineStart', 'hlineEnd'], {'stroke': '"""solid"""', 'color': 'color', 'size': 'width'}), "(pdf.session, pdf.page, hlineStart, hlineEnd, stroke='solid', color=\n color, size=width)\n", (6630, 6721), False, 'from pypdflite.pdfobjects.pdfline import PDFLine\n'), ((7126, 7143), 'pypdflite.PDFCursor', 'PDFCursor', (['(-10)', '(0)'], {}), '(-10, 0)\n', (7135, 7143), False, 'from pypdflite import PDFCursor\n'), ((7272, 7289), 'pypdflite.PDFCursor', 'PDFCursor', (['(-10)', '(0)'], {}), '(-10, 0)\n', (7281, 7289), False, 'from pypdflite import PDFCursor\n'), ((7495, 7593), 'pypdflite.pdfobjects.pdfline.PDFLine', 'PDFLine', (['pdf.session', 'pdf.page', 'llineStart', 'llineEnd'], {'stroke': '"""solid"""', 'color': 'color', 'size': 'width'}), "(pdf.session, pdf.page, llineStart, llineEnd, stroke='solid', color=\n color, size=width)\n", (7502, 7593), False, 'from pypdflite.pdfobjects.pdfline import PDFLine\n'), ((7998, 8013), 'pypdflite.PDFCursor', 'PDFCursor', (['(0)', '(0)'], {}), '(0, 0)\n', (8007, 8013), False, 'from pypdflite import PDFCursor\n'), ((8151, 8166), 'pypdflite.PDFCursor', 'PDFCursor', (['(0)', '(0)'], {}), '(0, 0)\n', (8160, 8166), False, 'from pypdflite import PDFCursor\n'), ((8361, 8459), 'pypdflite.pdfobjects.pdfline.PDFLine', 'PDFLine', (['pdf.session', 'pdf.page', 'rlineStart', 'rlineEnd'], {'stroke': '"""solid"""', 'color': 'color', 'size': 'width'}), "(pdf.session, pdf.page, rlineStart, rlineEnd, stroke='solid', color=\n color, size=width)\n", (8368, 8459), False, 'from pypdflite.pdfobjects.pdfline import PDFLine\n'), ((11031, 11050), 'pypdflite.PDFCursor', 'PDFCursor', (['col', 'row'], {}), '(col, row)\n', (11040, 11050), False, 'from pypdflite import PDFCursor\n'), ((12156, 12198), 'StatCalculation.StatCalculation.trade_to_btn', 'StatCalculation.trade_to_btn', (['star.tradeIn'], {}), '(star.tradeIn)\n', (12184, 12198), False, 'from StatCalculation import StatCalculation\n'), ((12222, 12281), 'StatCalculation.StatCalculation.trade_to_btn', 'StatCalculation.trade_to_btn', (['(star.tradeIn + star.tradeOver)'], {}), '(star.tradeIn + star.tradeOver)\n', (12250, 12281), False, 'from StatCalculation import StatCalculation\n'), ((14741, 14766), 'pypdflite.PDFCursor', 'PDFCursor', (['startx', 'starty'], {}), '(startx, starty)\n', (14750, 14766), False, 'from pypdflite import PDFCursor\n'), ((14785, 14806), 'pypdflite.PDFCursor', 'PDFCursor', (['endx', 'endy'], {}), '(endx, endy)\n', (14794, 14806), False, 'from pypdflite import PDFCursor\n'), ((14823, 14915), 'pypdflite.pdfobjects.pdfline.PDFLine', 'PDFLine', (['pdf.session', 'pdf.page', 'lineStart', 'lineEnd'], {'stroke': '"""solid"""', 'color': 'color', 'size': '(1)'}), "(pdf.session, pdf.page, lineStart, lineEnd, stroke='solid', color=\n color, size=1)\n", (14830, 14915), False, 'from pypdflite.pdfobjects.pdfline import PDFLine\n'), ((14950, 14965), 'pypdflite.PDFCursor', 'PDFCursor', (['(2)', '(2)'], {}), '(2, 2)\n', (14959, 14965), False, 'from pypdflite import PDFCursor\n'), ((14983, 15050), 'pypdflite.pdfobjects.pdfellipse.PDFEllipse', 'PDFEllipse', (['pdf.session', 'pdf.page', 'lineStart', 'radius', 'color'], {'size': '(3)'}), '(pdf.session, pdf.page, lineStart, radius, color, size=3)\n', (14993, 15050), False, 'from pypdflite.pdfobjects.pdfellipse import PDFEllipse\n'), ((16349, 16374), 'pypdflite.PDFCursor', 'PDFCursor', (['startx', 'starty'], {}), '(startx, starty)\n', (16358, 16374), False, 'from pypdflite import PDFCursor\n'), ((16393, 16414), 'pypdflite.PDFCursor', 'PDFCursor', (['endx', 'endy'], {}), '(endx, endy)\n', (16402, 16414), False, 'from pypdflite import PDFCursor\n'), ((16431, 16523), 'pypdflite.pdfobjects.pdfline.PDFLine', 'PDFLine', (['pdf.session', 'pdf.page', 'lineStart', 'lineEnd'], {'stroke': '"""solid"""', 'color': 'color', 'size': '(3)'}), "(pdf.session, pdf.page, lineStart, lineEnd, stroke='solid', color=\n color, size=3)\n", (16438, 16523), False, 'from pypdflite.pdfobjects.pdfline import PDFLine\n'), ((16917, 16944), 'pypdflite.PDFCursor', 'PDFCursor', (['self.xm', 'self.xm'], {}), '(self.xm, self.xm)\n', (16926, 16944), False, 'from pypdflite import PDFCursor\n'), ((16963, 17026), 'pypdflite.pdfobjects.pdfellipse.PDFEllipse', 'PDFEllipse', (['pdf.session', 'pdf.page', 'point', 'radius', 'color'], {'size': '(2)'}), '(pdf.session, pdf.page, point, radius, color, size=2)\n', (16973, 17026), False, 'from pypdflite.pdfobjects.pdfellipse import PDFEllipse\n'), ((17196, 17209), 'pypdflite.PDFLite', 'PDFLite', (['path'], {}), '(path)\n', (17203, 17209), False, 'from pypdflite import PDFLite\n'), ((13316, 13359), 'StatCalculation.StatCalculation.trade_to_btn', 'StatCalculation.trade_to_btn', (["data['trade']"], {}), "(data['trade'])\n", (13344, 13359), False, 'from StatCalculation import StatCalculation\n'), ((15118, 15183), 'pypdflite.pdfobjects.pdfellipse.PDFEllipse', 'PDFEllipse', (['pdf.session', 'pdf.page', 'lineEnd', 'radius', 'color'], {'size': '(3)'}), '(pdf.session, pdf.page, lineEnd, radius, color, size=3)\n', (15128, 15183), False, 'from pypdflite.pdfobjects.pdfellipse import PDFEllipse\n'), ((1020, 1055), 'logging.getLogger', 'logging.getLogger', (['"""PyRoute.HexMap"""'], {}), "('PyRoute.HexMap')\n", (1037, 1055), False, 'import logging\n'), ((2103, 2146), 'StatCalculation.StatCalculation.trade_to_btn', 'StatCalculation.trade_to_btn', (["data['trade']"], {}), "(data['trade'])\n", (2131, 2146), False, 'from StatCalculation import StatCalculation\n'), ((19071, 19106), 'logging.getLogger', 'logging.getLogger', (['"""PyRoute.HexMap"""'], {}), "('PyRoute.HexMap')\n", (19088, 19106), False, 'import logging\n'), ((1822, 1857), 'logging.getLogger', 'logging.getLogger', (['"""PyRoute.HexMap"""'], {}), "('PyRoute.HexMap')\n", (1839, 1857), False, 'import logging\n'), ((13450, 13485), 'logging.getLogger', 'logging.getLogger', (['"""PyRoute.HexMap"""'], {}), "('PyRoute.HexMap')\n", (13467, 13485), False, 'import logging\n'), ((1745, 1791), 'StatCalculation.StatCalculation.trade_to_btn', 'StatCalculation.trade_to_btn', (["star[2]['trade']"], {}), "(star[2]['trade'])\n", (1773, 1791), False, 'from StatCalculation import StatCalculation\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Citrix Systems
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netscaler_servicegroup
short_description: Manage service group configuration in Netscaler
description:
- Manage service group configuration in Netscaler.
- This module is intended to run either on the ansible control node or a bastion (jumpserver) with access to the actual netscaler instance.
version_added: "2.4"
author: <NAME> (@giorgos-nikolopoulos)
options:
servicegroupname:
description:
- >-
Name of the service group. Must begin with an ASCII alphabetic or underscore C(_) character, and must
contain only ASCII alphanumeric, underscore C(_), hash C(#), period C(.), space C( ), colon C(:), at C(@), equals
C(=), and hyphen C(-) characters. Can be changed after the name is created.
- "Minimum length = 1"
servicetype:
choices:
- 'HTTP'
- 'FTP'
- 'TCP'
- 'UDP'
- 'SSL'
- 'SSL_BRIDGE'
- 'SSL_TCP'
- 'DTLS'
- 'NNTP'
- 'RPCSVR'
- 'DNS'
- 'ADNS'
- 'SNMP'
- 'RTSP'
- 'DHCPRA'
- 'ANY'
- 'SIP_UDP'
- 'SIP_TCP'
- 'SIP_SSL'
- 'DNS_TCP'
- 'ADNS_TCP'
- 'MYSQL'
- 'MSSQL'
- 'ORACLE'
- 'RADIUS'
- 'RADIUSListener'
- 'RDP'
- 'DIAMETER'
- 'SSL_DIAMETER'
- 'TFTP'
- 'SMPP'
- 'PPTP'
- 'GRE'
- 'SYSLOGTCP'
- 'SYSLOGUDP'
- 'FIX'
- 'SSL_FIX'
description:
- "Protocol used to exchange data with the service."
cachetype:
choices:
- 'TRANSPARENT'
- 'REVERSE'
- 'FORWARD'
description:
- "Cache type supported by the cache server."
maxclient:
description:
- "Maximum number of simultaneous open connections for the service group."
- "Minimum value = C(0)"
- "Maximum value = C(4294967294)"
maxreq:
description:
- "Maximum number of requests that can be sent on a persistent connection to the service group."
- "Note: Connection requests beyond this value are rejected."
- "Minimum value = C(0)"
- "Maximum value = C(65535)"
cacheable:
description:
- "Use the transparent cache redirection virtual server to forward the request to the cache server."
- "Note: Do not set this parameter if you set the Cache Type."
type: bool
cip:
choices:
- 'enabled'
- 'disabled'
description:
- "Insert the Client IP header in requests forwarded to the service."
cipheader:
description:
- >-
Name of the HTTP header whose value must be set to the IP address of the client. Used with the Client
IP parameter. If client IP insertion is enabled, and the client IP header is not specified, the value
of Client IP Header parameter or the value set by the set ns config command is used as client's IP
header name.
- "Minimum length = 1"
usip:
description:
- >-
Use client's IP address as the source IP address when initiating connection to the server. With the
NO setting, which is the default, a mapped IP (MIP) address or subnet IP (SNIP) address is used as
the source IP address to initiate server side connections.
pathmonitor:
description:
- "Path monitoring for clustering."
pathmonitorindv:
description:
- "Individual Path monitoring decisions."
useproxyport:
description:
- >-
Use the proxy port as the source port when initiating connections with the server. With the NO
setting, the client-side connection port is used as the source port for the server-side connection.
- "Note: This parameter is available only when the Use Source IP C(usip) parameter is set to C(yes)."
type: bool
healthmonitor:
description:
- "Monitor the health of this service. Available settings function as follows:"
- "C(yes) - Send probes to check the health of the service."
- >-
C(no) - Do not send probes to check the health of the service. With the NO option, the appliance shows
the service as UP at all times.
type: bool
sp:
description:
- "Enable surge protection for the service group."
type: bool
rtspsessionidremap:
description:
- "Enable RTSP session ID mapping for the service group."
type: bool
clttimeout:
description:
- "Time, in seconds, after which to terminate an idle client connection."
- "Minimum value = C(0)"
- "Maximum value = C(31536000)"
svrtimeout:
description:
- "Time, in seconds, after which to terminate an idle server connection."
- "Minimum value = C(0)"
- "Maximum value = C(31536000)"
cka:
description:
- "Enable client keep-alive for the service group."
type: bool
tcpb:
description:
- "Enable TCP buffering for the service group."
type: bool
cmp:
description:
- "Enable compression for the specified service."
type: bool
maxbandwidth:
description:
- "Maximum bandwidth, in Kbps, allocated for all the services in the service group."
- "Minimum value = C(0)"
- "Maximum value = C(4294967287)"
monthreshold:
description:
- >-
Minimum sum of weights of the monitors that are bound to this service. Used to determine whether to
mark a service as UP or DOWN.
- "Minimum value = C(0)"
- "Maximum value = C(65535)"
downstateflush:
choices:
- 'enabled'
- 'disabled'
description:
- >-
Flush all active transactions associated with all the services in the service group whose state
transitions from UP to DOWN. Do not enable this option for applications that must complete their
transactions.
tcpprofilename:
description:
- "Name of the TCP profile that contains TCP configuration settings for the service group."
- "Minimum length = 1"
- "Maximum length = 127"
httpprofilename:
description:
- "Name of the HTTP profile that contains HTTP configuration settings for the service group."
- "Minimum length = 1"
- "Maximum length = 127"
comment:
description:
- "Any information about the service group."
appflowlog:
choices:
- 'enabled'
- 'disabled'
description:
- "Enable logging of AppFlow information for the specified service group."
netprofile:
description:
- "Network profile for the service group."
- "Minimum length = 1"
- "Maximum length = 127"
autoscale:
choices:
- 'DISABLED'
- 'DNS'
- 'POLICY'
description:
- "Auto scale option for a servicegroup."
memberport:
description:
- "member port."
graceful:
description:
- "Wait for all existing connections to the service to terminate before shutting down the service."
type: bool
servicemembers:
description:
- A list of dictionaries describing each service member of the service group.
suboptions:
ip:
description:
- IP address of the service. Must not overlap with an existing server entity defined by name.
port:
description:
- Server port number.
- Range C(1) - C(65535)
- "* in CLI is represented as 65535 in NITRO API"
hashid:
description:
- The hash identifier for the service.
- This must be unique for each service.
- This parameter is used by hash based load balancing methods.
- Minimum value = C(1)
serverid:
description:
- The identifier for the service.
- This is used when the persistency type is set to Custom Server ID.
servername:
description:
- Name of the server to which to bind the service group.
- The server must already be configured as a named server.
- Minimum length = 1
customserverid:
description:
- The identifier for this IP:Port pair.
- Used when the persistency type is set to Custom Server ID.
weight:
description:
- Weight to assign to the servers in the service group.
- Specifies the capacity of the servers relative to the other servers in the load balancing configuration.
- The higher the weight, the higher the percentage of requests sent to the service.
- Minimum value = C(1)
- Maximum value = C(100)
monitorbindings:
description:
- A list of monitornames to bind to this service
- Note that the monitors must have already been setup possibly using the M(netscaler_lb_monitor) module or some other method
suboptions:
monitorname:
description:
- The monitor name to bind to this servicegroup.
weight:
description:
- Weight to assign to the binding between the monitor and servicegroup.
disabled:
description:
- When set to C(yes) the service group state will be set to DISABLED.
- When set to C(no) the service group state will be set to ENABLED.
- >-
Note that due to limitations of the underlying NITRO API a C(disabled) state change alone
does not cause the module result to report a changed status.
type: bool
default: false
extends_documentation_fragment: netscaler
requirements:
- nitro python sdk
'''
EXAMPLES = '''
# The LB Monitors monitor-1 and monitor-2 must already exist
# Service members defined by C(ip) must not redefine an existing server's ip address.
# Service members defined by C(servername) must already exist.
- name: Setup http service with ip members
delegate_to: localhost
netscaler_servicegroup:
nsip: 172.18.0.2
nitro_user: nsroot
nitro_pass: <PASSWORD>
state: present
servicegroupname: service-group-1
servicetype: HTTP
servicemembers:
- ip: 10.78.78.78
port: 80
weight: 50
- ip: 10.79.79.79
port: 80
weight: 40
- servername: server-1
port: 80
weight: 10
monitorbindings:
- monitorname: monitor-1
weight: 50
- monitorname: monitor-2
weight: 50
'''
RETURN = '''
loglines:
description: list of logged messages by the module
returned: always
type: list
sample: ['message 1', 'message 2']
msg:
description: Message detailing the failure reason
returned: failure
type: str
sample: "Action does not exist"
diff:
description: List of differences between the actual configured object and the configuration specified in the module
returned: failure
type: dict
sample: { 'clttimeout': 'difference. ours: (float) 10.0 other: (float) 20.0' }
'''
from ansible.module_utils.basic import AnsibleModule
import copy
from ansible.module_utils.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, get_immutables_intersection
try:
from nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup import servicegroup
from nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_servicegroupmember_binding import servicegroup_servicegroupmember_binding
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_lbmonitor_binding import servicegroup_lbmonitor_binding
from nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbmonitor_servicegroup_binding import lbmonitor_servicegroup_binding
PYTHON_SDK_IMPORTED = True
except ImportError as e:
PYTHON_SDK_IMPORTED = False
def servicegroup_exists(client, module):
log('Checking if service group exists')
count = servicegroup.count_filtered(client, 'servicegroupname:%s' % module.params['servicegroupname'])
log('count is %s' % count)
if count > 0:
return True
else:
return False
def servicegroup_identical(client, module, servicegroup_proxy):
log('Checking if service group is identical')
servicegroups = servicegroup.get_filtered(client, 'servicegroupname:%s' % module.params['servicegroupname'])
if servicegroup_proxy.has_equal_attributes(servicegroups[0]):
return True
else:
return False
def get_configured_service_members(client, module):
log('get_configured_service_members')
readwrite_attrs = [
'servicegroupname',
'ip',
'port',
'hashid',
'serverid',
'servername',
'customserverid',
'weight'
]
readonly_attrs = [
'delay',
'statechangetimesec',
'svrstate',
'tickssincelaststatechange',
'graceful',
]
members = []
if module.params['servicemembers'] is None:
return members
for config in module.params['servicemembers']:
# Make a copy to update
config = copy.deepcopy(config)
config['servicegroupname'] = module.params['servicegroupname']
member_proxy = ConfigProxy(
actual=servicegroup_servicegroupmember_binding(),
client=client,
attribute_values_dict=config,
readwrite_attrs=readwrite_attrs,
readonly_attrs=readonly_attrs
)
members.append(member_proxy)
return members
def servicemembers_identical(client, module):
log('servicemembers_identical')
try:
# count() raises nitro exception instead of returning 0
count = servicegroup_servicegroupmember_binding.count(client, module.params['servicegroupname'])
if count > 0:
servicegroup_members = servicegroup_servicegroupmember_binding.get(client, module.params['servicegroupname'])
else:
servicegroup_members = []
except nitro_exception as e:
if e.errorcode == 258:
servicegroup_members = []
else:
raise
log('servicemembers %s' % servicegroup_members)
module_servicegroups = get_configured_service_members(client, module)
log('Number of service group members %s' % len(servicegroup_members))
if len(servicegroup_members) != len(module_servicegroups):
return False
# Fallthrough to member evaluation
identical_count = 0
for actual_member in servicegroup_members:
for member in module_servicegroups:
if member.has_equal_attributes(actual_member):
identical_count += 1
break
if identical_count != len(servicegroup_members):
return False
# Fallthrough to success
return True
def sync_service_members(client, module):
log('sync_service_members')
delete_all_servicegroup_members(client, module)
for member in get_configured_service_members(client, module):
member.add()
def delete_all_servicegroup_members(client, module):
log('delete_all_servicegroup_members')
if servicegroup_servicegroupmember_binding.count(client, module.params['servicegroupname']) == 0:
return
servicegroup_members = servicegroup_servicegroupmember_binding.get(client, module.params['servicegroupname'])
log('len %s' % len(servicegroup_members))
log('count %s' % servicegroup_servicegroupmember_binding.count(client, module.params['servicegroupname']))
for member in servicegroup_members:
log('%s' % dir(member))
log('ip %s' % member.ip)
log('servername %s' % member.servername)
if all([
hasattr(member, 'ip'),
member.ip is not None,
hasattr(member, 'servername'),
member.servername is not None,
]):
member.ip = None
member.servicegroupname = module.params['servicegroupname']
servicegroup_servicegroupmember_binding.delete(client, member)
def get_configured_monitor_bindings(client, module):
log('Entering get_configured_monitor_bindings')
bindings = {}
if 'monitorbindings' in module.params and module.params['monitorbindings'] is not None:
for binding in module.params['monitorbindings']:
readwrite_attrs = [
'monitorname',
'servicegroupname',
'weight',
]
readonly_attrs = []
attribute_values_dict = copy.deepcopy(binding)
attribute_values_dict['servicegroupname'] = module.params['servicegroupname']
binding_proxy = ConfigProxy(
actual=lbmonitor_servicegroup_binding(),
client=client,
attribute_values_dict=attribute_values_dict,
readwrite_attrs=readwrite_attrs,
readonly_attrs=readonly_attrs,
)
key = attribute_values_dict['monitorname']
bindings[key] = binding_proxy
return bindings
def get_actual_monitor_bindings(client, module):
log('Entering get_actual_monitor_bindings')
bindings = {}
try:
# count() raises nitro exception instead of returning 0
count = servicegroup_lbmonitor_binding.count(client, module.params['servicegroupname'])
except nitro_exception as e:
if e.errorcode == 258:
return bindings
else:
raise
if count == 0:
return bindings
# Fallthrough to rest of execution
for binding in servicegroup_lbmonitor_binding.get(client, module.params['servicegroupname']):
log('Gettign actual monitor with name %s' % binding.monitor_name)
key = binding.monitor_name
bindings[key] = binding
return bindings
def monitor_bindings_identical(client, module):
log('Entering monitor_bindings_identical')
configured_bindings = get_configured_monitor_bindings(client, module)
actual_bindings = get_actual_monitor_bindings(client, module)
configured_key_set = set(configured_bindings.keys())
actual_key_set = set(actual_bindings.keys())
symmetrical_diff = configured_key_set ^ actual_key_set
for default_monitor in ('tcp-default', 'ping-default'):
if default_monitor in symmetrical_diff:
log('Excluding %s monitor from key comparison' % default_monitor)
symmetrical_diff.remove(default_monitor)
if len(symmetrical_diff) > 0:
return False
# Compare key to key
for key in configured_key_set:
configured_proxy = configured_bindings[key]
log('configured_proxy %s' % [configured_proxy.monitorname, configured_proxy.servicegroupname, configured_proxy.weight])
log('actual_bindings %s' % [actual_bindings[key].monitor_name, actual_bindings[key].servicegroupname, actual_bindings[key].weight])
if any([configured_proxy.monitorname != actual_bindings[key].monitor_name,
configured_proxy.servicegroupname != actual_bindings[key].servicegroupname,
configured_proxy.weight != float(actual_bindings[key].weight)]):
return False
# Fallthrought to success
return True
def sync_monitor_bindings(client, module):
log('Entering sync_monitor_bindings')
# Delete existing bindings
for binding in get_actual_monitor_bindings(client, module).values():
b = lbmonitor_servicegroup_binding()
b.monitorname = binding.monitor_name
b.servicegroupname = module.params['servicegroupname']
# Cannot remove default monitor bindings
if b.monitorname in ('tcp-default', 'ping-default'):
continue
lbmonitor_servicegroup_binding.delete(client, b)
# Apply configured bindings
for binding in get_configured_monitor_bindings(client, module).values():
log('Adding %s' % binding.monitorname)
binding.add()
def diff(client, module, servicegroup_proxy):
servicegroup_list = servicegroup.get_filtered(client, 'servicegroupname:%s' % module.params['servicegroupname'])
diff_object = servicegroup_proxy.diff_object(servicegroup_list[0])
return diff_object
def do_state_change(client, module, servicegroup_proxy):
if module.params['disabled']:
log('Disabling service')
result = servicegroup.disable(client, servicegroup_proxy.actual)
else:
log('Enabling service')
result = servicegroup.enable(client, servicegroup_proxy.actual)
return result
def main():
module_specific_arguments = dict(
servicegroupname=dict(type='str'),
servicetype=dict(
type='str',
choices=[
'HTTP',
'FTP',
'TCP',
'UDP',
'SSL',
'SSL_BRIDGE',
'SSL_TCP',
'DTLS',
'NNTP',
'RPCSVR',
'DNS',
'ADNS',
'SNMP',
'RTSP',
'DHCPRA',
'ANY',
'SIP_UDP',
'SIP_TCP',
'SIP_SSL',
'DNS_TCP',
'ADNS_TCP',
'MYSQL',
'MSSQL',
'ORACLE',
'RADIUS',
'RADIUSListener',
'RDP',
'DIAMETER',
'SSL_DIAMETER',
'TFTP',
'SMPP',
'PPTP',
'GRE',
'SYSLOGTCP',
'SYSLOGUDP',
'FIX',
'SSL_FIX',
]
),
cachetype=dict(
type='str',
choices=[
'TRANSPARENT',
'REVERSE',
'FORWARD',
]
),
maxclient=dict(type='float'),
maxreq=dict(type='float'),
cacheable=dict(type='bool'),
cip=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
cipheader=dict(type='str'),
usip=dict(type='bool'),
pathmonitor=dict(type='bool'),
pathmonitorindv=dict(type='bool'),
useproxyport=dict(type='bool'),
healthmonitor=dict(type='bool'),
sp=dict(type='bool'),
rtspsessionidremap=dict(type='bool'),
clttimeout=dict(type='float'),
svrtimeout=dict(type='float'),
cka=dict(type='bool'),
tcpb=dict(type='bool'),
cmp=dict(type='bool'),
maxbandwidth=dict(type='float'),
monthreshold=dict(type='float'),
downstateflush=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
tcpprofilename=dict(type='str'),
httpprofilename=dict(type='str'),
comment=dict(type='str'),
appflowlog=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
netprofile=dict(type='str'),
autoscale=dict(
type='str',
choices=[
'DISABLED',
'DNS',
'POLICY',
]
),
memberport=dict(type='int'),
graceful=dict(type='bool'),
)
hand_inserted_arguments = dict(
servicemembers=dict(type='list'),
monitorbindings=dict(type='list'),
disabled=dict(
type='bool',
default=False,
),
)
argument_spec = dict()
argument_spec.update(netscaler_common_arguments)
argument_spec.update(module_specific_arguments)
argument_spec.update(hand_inserted_arguments)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
module_result = dict(
changed=False,
failed=False,
loglines=loglines,
)
# Fail the module if imports failed
if not PYTHON_SDK_IMPORTED:
module.fail_json(msg='Could not load nitro python sdk')
# Fallthrough to rest of execution
client = get_nitro_client(module)
try:
client.login()
except nitro_exception as e:
msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg)
except Exception as e:
if str(type(e)) == "<class 'requests.exceptions.ConnectionError'>":
module.fail_json(msg='Connection error %s' % str(e))
elif str(type(e)) == "<class 'requests.exceptions.SSLError'>":
module.fail_json(msg='SSL Error %s' % str(e))
else:
module.fail_json(msg='Unexpected error during login %s' % str(e))
# Instantiate service group configuration object
readwrite_attrs = [
'servicegroupname',
'servicetype',
'cachetype',
'maxclient',
'maxreq',
'cacheable',
'cip',
'cipheader',
'usip',
'pathmonitor',
'pathmonitorindv',
'useproxyport',
'healthmonitor',
'sp',
'rtspsessionidremap',
'clttimeout',
'svrtimeout',
'cka',
'tcpb',
'cmp',
'maxbandwidth',
'monthreshold',
'downstateflush',
'tcpprofilename',
'httpprofilename',
'comment',
'appflowlog',
'netprofile',
'autoscale',
'memberport',
'graceful',
]
readonly_attrs = [
'numofconnections',
'serviceconftype',
'value',
'svrstate',
'ip',
'monstatcode',
'monstatparam1',
'monstatparam2',
'monstatparam3',
'statechangetimemsec',
'stateupdatereason',
'clmonowner',
'clmonview',
'groupcount',
'riseapbrstatsmsgcode2',
'serviceipstr',
'servicegroupeffectivestate'
]
immutable_attrs = [
'servicegroupname',
'servicetype',
'cachetype',
'td',
'cipheader',
'state',
'autoscale',
'memberport',
'servername',
'port',
'serverid',
'monitor_name_svc',
'dup_weight',
'riseapbrstatsmsgcode',
'delay',
'graceful',
'includemembers',
'newname',
]
transforms = {
'pathmonitorindv': ['bool_yes_no'],
'cacheable': ['bool_yes_no'],
'cka': ['bool_yes_no'],
'pathmonitor': ['bool_yes_no'],
'tcpb': ['bool_yes_no'],
'sp': ['bool_on_off'],
'usip': ['bool_yes_no'],
'healthmonitor': ['bool_yes_no'],
'useproxyport': ['bool_yes_no'],
'rtspsessionidremap': ['bool_on_off'],
'graceful': ['bool_yes_no'],
'cmp': ['bool_yes_no'],
'cip': [lambda v: v.upper()],
'downstateflush': [lambda v: v.upper()],
'appflowlog': [lambda v: v.upper()],
}
# Instantiate config proxy
servicegroup_proxy = ConfigProxy(
actual=servicegroup(),
client=client,
attribute_values_dict=module.params,
readwrite_attrs=readwrite_attrs,
readonly_attrs=readonly_attrs,
immutable_attrs=immutable_attrs,
transforms=transforms,
)
try:
if module.params['state'] == 'present':
log('Applying actions for state present')
if not servicegroup_exists(client, module):
if not module.check_mode:
log('Adding service group')
servicegroup_proxy.add()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
elif not servicegroup_identical(client, module, servicegroup_proxy):
# Check if we try to change value of immutable attributes
diff_dict = diff(client, module, servicegroup_proxy)
immutables_changed = get_immutables_intersection(servicegroup_proxy, diff_dict.keys())
if immutables_changed != []:
msg = 'Cannot update immutable attributes %s. Must delete and recreate entity.' % (immutables_changed,)
module.fail_json(msg=msg, diff=diff_dict, **module_result)
if not module.check_mode:
servicegroup_proxy.update()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
module_result['changed'] = False
# Check bindings
if not monitor_bindings_identical(client, module):
if not module.check_mode:
sync_monitor_bindings(client, module)
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
if not servicemembers_identical(client, module):
if not module.check_mode:
sync_service_members(client, module)
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
if not module.check_mode:
res = do_state_change(client, module, servicegroup_proxy)
if res.errorcode != 0:
msg = 'Error when setting disabled state. errorcode: %s message: %s' % (res.errorcode, res.message)
module.fail_json(msg=msg, **module_result)
# Sanity check for state
if not module.check_mode:
log('Sanity checks for state present')
if not servicegroup_exists(client, module):
module.fail_json(msg='Service group is not present', **module_result)
if not servicegroup_identical(client, module, servicegroup_proxy):
module.fail_json(
msg='Service group is not identical to configuration',
diff=diff(client, module, servicegroup_proxy),
**module_result
)
if not servicemembers_identical(client, module):
module.fail_json(msg='Service group members differ from configuration', **module_result)
if not monitor_bindings_identical(client, module):
module.fail_json(msg='Monitor bindings are not identical', **module_result)
elif module.params['state'] == 'absent':
log('Applying actions for state absent')
if servicegroup_exists(client, module):
if not module.check_mode:
servicegroup_proxy.delete()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
module_result['changed'] = False
# Sanity check for state
if not module.check_mode:
log('Sanity checks for state absent')
if servicegroup_exists(client, module):
module.fail_json(msg='Service group is present', **module_result)
except nitro_exception as e:
msg = "nitro exception errorcode=" + str(e.errorcode) + ",message=" + e.message
module.fail_json(msg=msg, **module_result)
client.logout()
module.exit_json(**module_result)
if __name__ == "__main__":
main()
|
[
"nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup.servicegroup.enable",
"nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup.servicegroup.count_filtered",
"ansible.module_utils.netscaler.log",
"copy.deepcopy",
"nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_servicegroupmember_binding.servicegroup_servicegroupmember_binding",
"nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbmonitor_servicegroup_binding.lbmonitor_servicegroup_binding.delete",
"nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup.servicegroup.disable",
"nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_servicegroupmember_binding.servicegroup_servicegroupmember_binding.count",
"ansible.module_utils.netscaler.get_nitro_client",
"nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_servicegroupmember_binding.servicegroup_servicegroupmember_binding.delete",
"nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_servicegroupmember_binding.servicegroup_servicegroupmember_binding.get",
"nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_lbmonitor_binding.servicegroup_lbmonitor_binding.get",
"nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbmonitor_servicegroup_binding.lbmonitor_servicegroup_binding",
"nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_lbmonitor_binding.servicegroup_lbmonitor_binding.count",
"nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup.servicegroup.get_filtered",
"ansible.module_utils.basic.AnsibleModule",
"nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup.servicegroup"
] |
[((13585, 13624), 'ansible.module_utils.netscaler.log', 'log', (['"""Checking if service group exists"""'], {}), "('Checking if service group exists')\n", (13588, 13624), False, 'from ansible.module_utils.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, get_immutables_intersection\n'), ((13637, 13736), 'nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup.servicegroup.count_filtered', 'servicegroup.count_filtered', (['client', "('servicegroupname:%s' % module.params['servicegroupname'])"], {}), "(client, 'servicegroupname:%s' % module.params[\n 'servicegroupname'])\n", (13664, 13736), False, 'from nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup import servicegroup\n'), ((13736, 13762), 'ansible.module_utils.netscaler.log', 'log', (["('count is %s' % count)"], {}), "('count is %s' % count)\n", (13739, 13762), False, 'from ansible.module_utils.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, get_immutables_intersection\n'), ((13902, 13947), 'ansible.module_utils.netscaler.log', 'log', (['"""Checking if service group is identical"""'], {}), "('Checking if service group is identical')\n", (13905, 13947), False, 'from ansible.module_utils.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, get_immutables_intersection\n'), ((13968, 14065), 'nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup.servicegroup.get_filtered', 'servicegroup.get_filtered', (['client', "('servicegroupname:%s' % module.params['servicegroupname'])"], {}), "(client, 'servicegroupname:%s' % module.params[\n 'servicegroupname'])\n", (13993, 14065), False, 'from nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup import servicegroup\n'), ((14236, 14273), 'ansible.module_utils.netscaler.log', 'log', (['"""get_configured_service_members"""'], {}), "('get_configured_service_members')\n", (14239, 14273), False, 'from ansible.module_utils.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, get_immutables_intersection\n'), ((15273, 15304), 'ansible.module_utils.netscaler.log', 'log', (['"""servicemembers_identical"""'], {}), "('servicemembers_identical')\n", (15276, 15304), False, 'from ansible.module_utils.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, get_immutables_intersection\n'), ((15818, 15865), 'ansible.module_utils.netscaler.log', 'log', (["('servicemembers %s' % servicegroup_members)"], {}), "('servicemembers %s' % servicegroup_members)\n", (15821, 15865), False, 'from ansible.module_utils.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, get_immutables_intersection\n'), ((16539, 16566), 'ansible.module_utils.netscaler.log', 'log', (['"""sync_service_members"""'], {}), "('sync_service_members')\n", (16542, 16566), False, 'from ansible.module_utils.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, get_immutables_intersection\n'), ((16766, 16804), 'ansible.module_utils.netscaler.log', 'log', (['"""delete_all_servicegroup_members"""'], {}), "('delete_all_servicegroup_members')\n", (16769, 16804), False, 'from ansible.module_utils.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, get_immutables_intersection\n'), ((16949, 17040), 'nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_servicegroupmember_binding.servicegroup_servicegroupmember_binding.get', 'servicegroup_servicegroupmember_binding.get', (['client', "module.params['servicegroupname']"], {}), "(client, module.params[\n 'servicegroupname'])\n", (16992, 17040), False, 'from nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_servicegroupmember_binding import servicegroup_servicegroupmember_binding\n'), ((17760, 17807), 'ansible.module_utils.netscaler.log', 'log', (['"""Entering get_configured_monitor_bindings"""'], {}), "('Entering get_configured_monitor_bindings')\n", (17763, 17807), False, 'from ansible.module_utils.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, get_immutables_intersection\n'), ((18767, 18810), 'ansible.module_utils.netscaler.log', 'log', (['"""Entering get_actual_monitor_bindings"""'], {}), "('Entering get_actual_monitor_bindings')\n", (18770, 18810), False, 'from ansible.module_utils.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, get_immutables_intersection\n'), ((19225, 19302), 'nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_lbmonitor_binding.servicegroup_lbmonitor_binding.get', 'servicegroup_lbmonitor_binding.get', (['client', "module.params['servicegroupname']"], {}), "(client, module.params['servicegroupname'])\n", (19259, 19302), False, 'from nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_lbmonitor_binding import servicegroup_lbmonitor_binding\n'), ((19520, 19562), 'ansible.module_utils.netscaler.log', 'log', (['"""Entering monitor_bindings_identical"""'], {}), "('Entering monitor_bindings_identical')\n", (19523, 19562), False, 'from ansible.module_utils.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, get_immutables_intersection\n'), ((20921, 20958), 'ansible.module_utils.netscaler.log', 'log', (['"""Entering sync_monitor_bindings"""'], {}), "('Entering sync_monitor_bindings')\n", (20924, 20958), False, 'from ansible.module_utils.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, get_immutables_intersection\n'), ((21656, 21753), 'nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup.servicegroup.get_filtered', 'servicegroup.get_filtered', (['client', "('servicegroupname:%s' % module.params['servicegroupname'])"], {}), "(client, 'servicegroupname:%s' % module.params[\n 'servicegroupname'])\n", (21681, 21753), False, 'from nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup import servicegroup\n'), ((25434, 25502), 'ansible.module_utils.basic.AnsibleModule', 'AnsibleModule', ([], {'argument_spec': 'argument_spec', 'supports_check_mode': '(True)'}), '(argument_spec=argument_spec, supports_check_mode=True)\n', (25447, 25502), False, 'from ansible.module_utils.basic import AnsibleModule\n'), ((25820, 25844), 'ansible.module_utils.netscaler.get_nitro_client', 'get_nitro_client', (['module'], {}), '(module)\n', (25836, 25844), False, 'from ansible.module_utils.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, get_immutables_intersection\n'), ((14808, 14829), 'copy.deepcopy', 'copy.deepcopy', (['config'], {}), '(config)\n', (14821, 14829), False, 'import copy\n'), ((15394, 15487), 'nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_servicegroupmember_binding.servicegroup_servicegroupmember_binding.count', 'servicegroup_servicegroupmember_binding.count', (['client', "module.params['servicegroupname']"], {}), "(client, module.params[\n 'servicegroupname'])\n", (15439, 15487), False, 'from nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_servicegroupmember_binding import servicegroup_servicegroupmember_binding\n'), ((16812, 16905), 'nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_servicegroupmember_binding.servicegroup_servicegroupmember_binding.count', 'servicegroup_servicegroupmember_binding.count', (['client', "module.params['servicegroupname']"], {}), "(client, module.params[\n 'servicegroupname'])\n", (16857, 16905), False, 'from nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_servicegroupmember_binding import servicegroup_servicegroupmember_binding\n'), ((17273, 17297), 'ansible.module_utils.netscaler.log', 'log', (["('ip %s' % member.ip)"], {}), "('ip %s' % member.ip)\n", (17276, 17297), False, 'from ansible.module_utils.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, get_immutables_intersection\n'), ((17306, 17346), 'ansible.module_utils.netscaler.log', 'log', (["('servername %s' % member.servername)"], {}), "('servername %s' % member.servername)\n", (17309, 17346), False, 'from ansible.module_utils.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, get_immutables_intersection\n'), ((17638, 17700), 'nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_servicegroupmember_binding.servicegroup_servicegroupmember_binding.delete', 'servicegroup_servicegroupmember_binding.delete', (['client', 'member'], {}), '(client, member)\n', (17684, 17700), False, 'from nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_servicegroupmember_binding import servicegroup_servicegroupmember_binding\n'), ((18918, 18997), 'nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_lbmonitor_binding.servicegroup_lbmonitor_binding.count', 'servicegroup_lbmonitor_binding.count', (['client', "module.params['servicegroupname']"], {}), "(client, module.params['servicegroupname'])\n", (18954, 18997), False, 'from nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_lbmonitor_binding import servicegroup_lbmonitor_binding\n'), ((19312, 19377), 'ansible.module_utils.netscaler.log', 'log', (["('Gettign actual monitor with name %s' % binding.monitor_name)"], {}), "('Gettign actual monitor with name %s' % binding.monitor_name)\n", (19315, 19377), False, 'from ansible.module_utils.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, get_immutables_intersection\n'), ((20284, 20408), 'ansible.module_utils.netscaler.log', 'log', (["('configured_proxy %s' % [configured_proxy.monitorname, configured_proxy.\n servicegroupname, configured_proxy.weight])"], {}), "('configured_proxy %s' % [configured_proxy.monitorname, configured_proxy\n .servicegroupname, configured_proxy.weight])\n", (20287, 20408), False, 'from ansible.module_utils.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, get_immutables_intersection\n'), ((20412, 20547), 'ansible.module_utils.netscaler.log', 'log', (["('actual_bindings %s' % [actual_bindings[key].monitor_name, actual_bindings\n [key].servicegroupname, actual_bindings[key].weight])"], {}), "('actual_bindings %s' % [actual_bindings[key].monitor_name,\n actual_bindings[key].servicegroupname, actual_bindings[key].weight])\n", (20415, 20547), False, 'from ansible.module_utils.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, get_immutables_intersection\n'), ((21075, 21107), 'nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbmonitor_servicegroup_binding.lbmonitor_servicegroup_binding', 'lbmonitor_servicegroup_binding', ([], {}), '()\n', (21105, 21107), False, 'from nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbmonitor_servicegroup_binding import lbmonitor_servicegroup_binding\n'), ((21355, 21403), 'nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbmonitor_servicegroup_binding.lbmonitor_servicegroup_binding.delete', 'lbmonitor_servicegroup_binding.delete', (['client', 'b'], {}), '(client, b)\n', (21392, 21403), False, 'from nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbmonitor_servicegroup_binding import lbmonitor_servicegroup_binding\n'), ((21523, 21561), 'ansible.module_utils.netscaler.log', 'log', (["('Adding %s' % binding.monitorname)"], {}), "('Adding %s' % binding.monitorname)\n", (21526, 21561), False, 'from ansible.module_utils.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, get_immutables_intersection\n'), ((21944, 21968), 'ansible.module_utils.netscaler.log', 'log', (['"""Disabling service"""'], {}), "('Disabling service')\n", (21947, 21968), False, 'from ansible.module_utils.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, get_immutables_intersection\n'), ((21986, 22041), 'nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup.servicegroup.disable', 'servicegroup.disable', (['client', 'servicegroup_proxy.actual'], {}), '(client, servicegroup_proxy.actual)\n', (22006, 22041), False, 'from nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup import servicegroup\n'), ((22060, 22083), 'ansible.module_utils.netscaler.log', 'log', (['"""Enabling service"""'], {}), "('Enabling service')\n", (22063, 22083), False, 'from ansible.module_utils.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, get_immutables_intersection\n'), ((22101, 22155), 'nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup.servicegroup.enable', 'servicegroup.enable', (['client', 'servicegroup_proxy.actual'], {}), '(client, servicegroup_proxy.actual)\n', (22120, 22155), False, 'from nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup import servicegroup\n'), ((15540, 15631), 'nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_servicegroupmember_binding.servicegroup_servicegroupmember_binding.get', 'servicegroup_servicegroupmember_binding.get', (['client', "module.params['servicegroupname']"], {}), "(client, module.params[\n 'servicegroupname'])\n", (15583, 15631), False, 'from nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_servicegroupmember_binding import servicegroup_servicegroupmember_binding\n'), ((17103, 17196), 'nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_servicegroupmember_binding.servicegroup_servicegroupmember_binding.count', 'servicegroup_servicegroupmember_binding.count', (['client', "module.params['servicegroupname']"], {}), "(client, module.params[\n 'servicegroupname'])\n", (17148, 17196), False, 'from nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_servicegroupmember_binding import servicegroup_servicegroupmember_binding\n'), ((18182, 18204), 'copy.deepcopy', 'copy.deepcopy', (['binding'], {}), '(binding)\n', (18195, 18204), False, 'import copy\n'), ((19989, 20054), 'ansible.module_utils.netscaler.log', 'log', (["('Excluding %s monitor from key comparison' % default_monitor)"], {}), "('Excluding %s monitor from key comparison' % default_monitor)\n", (19992, 20054), False, 'from ansible.module_utils.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, get_immutables_intersection\n'), ((28757, 28771), 'nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup.servicegroup', 'servicegroup', ([], {}), '()\n', (28769, 28771), False, 'from nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup import servicegroup\n'), ((29069, 29110), 'ansible.module_utils.netscaler.log', 'log', (['"""Applying actions for state present"""'], {}), "('Applying actions for state present')\n", (29072, 29110), False, 'from ansible.module_utils.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, get_immutables_intersection\n'), ((14956, 14997), 'nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_servicegroupmember_binding.servicegroup_servicegroupmember_binding', 'servicegroup_servicegroupmember_binding', ([], {}), '()\n', (14995, 14997), False, 'from nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_servicegroupmember_binding import servicegroup_servicegroupmember_binding\n'), ((31401, 31439), 'ansible.module_utils.netscaler.log', 'log', (['"""Sanity checks for state present"""'], {}), "('Sanity checks for state present')\n", (31404, 31439), False, 'from ansible.module_utils.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, get_immutables_intersection\n'), ((32322, 32362), 'ansible.module_utils.netscaler.log', 'log', (['"""Applying actions for state absent"""'], {}), "('Applying actions for state absent')\n", (32325, 32362), False, 'from ansible.module_utils.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, get_immutables_intersection\n'), ((18359, 18391), 'nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbmonitor_servicegroup_binding.lbmonitor_servicegroup_binding', 'lbmonitor_servicegroup_binding', ([], {}), '()\n', (18389, 18391), False, 'from nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbmonitor_servicegroup_binding import lbmonitor_servicegroup_binding\n'), ((29229, 29256), 'ansible.module_utils.netscaler.log', 'log', (['"""Adding service group"""'], {}), "('Adding service group')\n", (29232, 29256), False, 'from ansible.module_utils.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, get_immutables_intersection\n'), ((32810, 32847), 'ansible.module_utils.netscaler.log', 'log', (['"""Sanity checks for state absent"""'], {}), "('Sanity checks for state absent')\n", (32813, 32847), False, 'from ansible.module_utils.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, get_immutables_intersection\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
from megengine import tensor
def test_cross_entropy_with_logits():
data = tensor([[0, 50], [0, -150]]).astype(np.float32)
label = tensor([1, 0]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
label = tensor([0, 1]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 100)
label = np.array([1, 0])
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
def test_cross_entropy():
def softmax(x):
x = np.exp(x)
x /= x.sum(1, keepdims=True)
return x
def ref(x, y):
return np.mean([-np.log(x[i, y[i]]) for i in range(len(y))])
x = (np.random.rand(5, 10) - 0.5) * 4
y = np.random.randint(10, size=(5,))
for i in range(len(x)):
x[i, y[i]] += np.random.rand() * 2
x = softmax(x)
l_ref = ref(x, y)
l = F.nn.cross_entropy(tensor(x, "float32"), tensor(y, "int32"), with_logits=False)
np.testing.assert_allclose(l.numpy(), l_ref)
def test_cross_entropy_reduction():
logits = np.random.randn(16, 10)
label = np.random.randint(10, size=[16])
logits = tensor(logits, dtype="float32")
label = tensor(label, dtype="int32")
perm = np.random.permutation(16)
logits_perm = tensor(logits[perm], dtype="float32")
label_perm = tensor(label[perm], dtype="int32")
loss = F.nn.cross_entropy(logits, label, reduction="none")
loss_perm = F.nn.cross_entropy(logits_perm, label_perm, reduction="none")
np.testing.assert_allclose(loss.numpy()[perm], loss_perm.numpy())
loss_sum = F.nn.cross_entropy(logits, label, reduction="sum")
np.testing.assert_allclose(loss.numpy().sum(), loss_sum.numpy(), rtol=2e-7)
loss_mean = F.nn.cross_entropy(logits, label, reduction="mean")
np.testing.assert_allclose(loss_mean.numpy(), loss_sum.numpy() / 16)
loss_ls = F.nn.cross_entropy(logits, label, reduction="mean", label_smooth=0.1)
loss_ls_none_reduce = F.nn.cross_entropy(
logits, label, reduction="none", label_smooth=0.1
)
np.testing.assert_allclose(
loss_ls.numpy(), loss_ls_none_reduce.numpy().mean(), rtol=2e-7
)
with pytest.raises(ValueError):
F.nn.cross_entropy(logits, label, reduction="MEAN")
with pytest.raises(ValueError):
F.nn.cross_entropy(logits, label, reduction="max")
|
[
"megengine.functional.nn.cross_entropy",
"megengine.tensor",
"numpy.log",
"numpy.random.randn",
"numpy.random.rand",
"pytest.raises",
"numpy.random.randint",
"numpy.array",
"numpy.exp",
"numpy.random.permutation"
] |
[((627, 658), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['data', 'label'], {}), '(data, label)\n', (645, 658), True, 'import megengine.functional as F\n'), ((764, 795), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['data', 'label'], {}), '(data, label)\n', (782, 795), True, 'import megengine.functional as F\n'), ((859, 875), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (867, 875), True, 'import numpy as np\n'), ((887, 918), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['data', 'label'], {}), '(data, label)\n', (905, 918), True, 'import megengine.functional as F\n'), ((1233, 1265), 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': '(5,)'}), '(10, size=(5,))\n', (1250, 1265), True, 'import numpy as np\n'), ((1566, 1589), 'numpy.random.randn', 'np.random.randn', (['(16)', '(10)'], {}), '(16, 10)\n', (1581, 1589), True, 'import numpy as np\n'), ((1602, 1634), 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': '[16]'}), '(10, size=[16])\n', (1619, 1634), True, 'import numpy as np\n'), ((1648, 1679), 'megengine.tensor', 'tensor', (['logits'], {'dtype': '"""float32"""'}), "(logits, dtype='float32')\n", (1654, 1679), False, 'from megengine import tensor\n'), ((1692, 1720), 'megengine.tensor', 'tensor', (['label'], {'dtype': '"""int32"""'}), "(label, dtype='int32')\n", (1698, 1720), False, 'from megengine import tensor\n'), ((1733, 1758), 'numpy.random.permutation', 'np.random.permutation', (['(16)'], {}), '(16)\n', (1754, 1758), True, 'import numpy as np\n'), ((1777, 1814), 'megengine.tensor', 'tensor', (['logits[perm]'], {'dtype': '"""float32"""'}), "(logits[perm], dtype='float32')\n", (1783, 1814), False, 'from megengine import tensor\n'), ((1832, 1866), 'megengine.tensor', 'tensor', (['label[perm]'], {'dtype': '"""int32"""'}), "(label[perm], dtype='int32')\n", (1838, 1866), False, 'from megengine import tensor\n'), ((1879, 1930), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['logits', 'label'], {'reduction': '"""none"""'}), "(logits, label, reduction='none')\n", (1897, 1930), True, 'import megengine.functional as F\n'), ((1947, 2008), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['logits_perm', 'label_perm'], {'reduction': '"""none"""'}), "(logits_perm, label_perm, reduction='none')\n", (1965, 2008), True, 'import megengine.functional as F\n'), ((2095, 2145), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['logits', 'label'], {'reduction': '"""sum"""'}), "(logits, label, reduction='sum')\n", (2113, 2145), True, 'import megengine.functional as F\n'), ((2243, 2294), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['logits', 'label'], {'reduction': '"""mean"""'}), "(logits, label, reduction='mean')\n", (2261, 2294), True, 'import megengine.functional as F\n'), ((2383, 2452), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['logits', 'label'], {'reduction': '"""mean"""', 'label_smooth': '(0.1)'}), "(logits, label, reduction='mean', label_smooth=0.1)\n", (2401, 2452), True, 'import megengine.functional as F\n'), ((2479, 2548), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['logits', 'label'], {'reduction': '"""none"""', 'label_smooth': '(0.1)'}), "(logits, label, reduction='none', label_smooth=0.1)\n", (2497, 2548), True, 'import megengine.functional as F\n'), ((1029, 1038), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (1035, 1038), True, 'import numpy as np\n'), ((1405, 1425), 'megengine.tensor', 'tensor', (['x', '"""float32"""'], {}), "(x, 'float32')\n", (1411, 1425), False, 'from megengine import tensor\n'), ((1427, 1445), 'megengine.tensor', 'tensor', (['y', '"""int32"""'], {}), "(y, 'int32')\n", (1433, 1445), False, 'from megengine import tensor\n'), ((2682, 2707), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2695, 2707), False, 'import pytest\n'), ((2717, 2768), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['logits', 'label'], {'reduction': '"""MEAN"""'}), "(logits, label, reduction='MEAN')\n", (2735, 2768), True, 'import megengine.functional as F\n'), ((2779, 2804), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2792, 2804), False, 'import pytest\n'), ((2814, 2864), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['logits', 'label'], {'reduction': '"""max"""'}), "(logits, label, reduction='max')\n", (2832, 2864), True, 'import megengine.functional as F\n'), ((524, 552), 'megengine.tensor', 'tensor', (['[[0, 50], [0, -150]]'], {}), '([[0, 50], [0, -150]])\n', (530, 552), False, 'from megengine import tensor\n'), ((584, 598), 'megengine.tensor', 'tensor', (['[1, 0]'], {}), '([1, 0])\n', (590, 598), False, 'from megengine import tensor\n'), ((721, 735), 'megengine.tensor', 'tensor', (['[0, 1]'], {}), '([0, 1])\n', (727, 735), False, 'from megengine import tensor\n'), ((1192, 1213), 'numpy.random.rand', 'np.random.rand', (['(5)', '(10)'], {}), '(5, 10)\n', (1206, 1213), True, 'import numpy as np\n'), ((1316, 1332), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1330, 1332), True, 'import numpy as np\n'), ((1138, 1156), 'numpy.log', 'np.log', (['x[i, y[i]]'], {}), '(x[i, y[i]])\n', (1144, 1156), True, 'import numpy as np\n')]
|
from dup.models import Website
def check_all_websites():
for website in Website.objects.all():
check_website(website.pk)
def check_website(website_pk: int) -> None:
website = Website.objects.get(pk=website_pk)
website.update_status()
|
[
"dup.models.Website.objects.get",
"dup.models.Website.objects.all"
] |
[((78, 99), 'dup.models.Website.objects.all', 'Website.objects.all', ([], {}), '()\n', (97, 99), False, 'from dup.models import Website\n'), ((195, 229), 'dup.models.Website.objects.get', 'Website.objects.get', ([], {'pk': 'website_pk'}), '(pk=website_pk)\n', (214, 229), False, 'from dup.models import Website\n')]
|