hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a1135405a7392df4fcab654f5f585d507f9f5e9
| 24,200
|
py
|
Python
|
tornado/netutil.py
|
graingert/tornado
|
e4615991845d12bb2aebec4adb689b5cc7214060
|
[
"Apache-2.0"
] | null | null | null |
tornado/netutil.py
|
graingert/tornado
|
e4615991845d12bb2aebec4adb689b5cc7214060
|
[
"Apache-2.0"
] | null | null | null |
tornado/netutil.py
|
graingert/tornado
|
e4615991845d12bb2aebec4adb689b5cc7214060
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2011 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Miscellaneous network utility code."""
import concurrent.futures
import errno
import os
import sys
import socket
import ssl
import stat
from tornado.concurrent import dummy_executor, run_on_executor
from tornado.ioloop import IOLoop
from tornado.util import Configurable, errno_from_exception
from typing import List, Callable, Any, Type, Dict, Union, Tuple, Awaitable, Optional
# Note that the naming of ssl.Purpose is confusing; the purpose
# of a context is to authenticate the opposite side of the connection.
_client_ssl_defaults = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
_server_ssl_defaults = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
if hasattr(ssl, "OP_NO_COMPRESSION"):
# See netutil.ssl_options_to_context
_client_ssl_defaults.options |= ssl.OP_NO_COMPRESSION
_server_ssl_defaults.options |= ssl.OP_NO_COMPRESSION
# ThreadedResolver runs getaddrinfo on a thread. If the hostname is unicode,
# getaddrinfo attempts to import encodings.idna. If this is done at
# module-import time, the import lock is already held by the main thread,
# leading to deadlock. Avoid it by caching the idna encoder on the main
# thread now.
u"foo".encode("idna")
# For undiagnosed reasons, 'latin1' codec may also need to be preloaded.
u"foo".encode("latin1")
# Default backlog used when calling sock.listen()
_DEFAULT_BACKLOG = 128
def bind_sockets(
port: int,
address: Optional[str] = None,
family: socket.AddressFamily = socket.AF_UNSPEC,
backlog: int = _DEFAULT_BACKLOG,
flags: Optional[int] = None,
reuse_port: bool = False,
) -> List[socket.socket]:
"""Creates listening sockets bound to the given port and address.
Returns a list of socket objects (multiple sockets are returned if
the given address maps to multiple IP addresses, which is most common
for mixed IPv4 and IPv6 use).
Address may be either an IP address or hostname. If it's a hostname,
the server will listen on all IP addresses associated with the
name. Address may be an empty string or None to listen on all
available interfaces. Family may be set to either `socket.AF_INET`
or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise
both will be used if available.
The ``backlog`` argument has the same meaning as for
`socket.listen() <socket.socket.listen>`.
``flags`` is a bitmask of AI_* flags to `~socket.getaddrinfo`, like
``socket.AI_PASSIVE | socket.AI_NUMERICHOST``.
``reuse_port`` option sets ``SO_REUSEPORT`` option for every socket
in the list. If your platform doesn't support this option ValueError will
be raised.
"""
if reuse_port and not hasattr(socket, "SO_REUSEPORT"):
raise ValueError("the platform doesn't support SO_REUSEPORT")
sockets = []
if address == "":
address = None
if not socket.has_ipv6 and family == socket.AF_UNSPEC:
# Python can be compiled with --disable-ipv6, which causes
# operations on AF_INET6 sockets to fail, but does not
# automatically exclude those results from getaddrinfo
# results.
# http://bugs.python.org/issue16208
family = socket.AF_INET
if flags is None:
flags = socket.AI_PASSIVE
bound_port = None
unique_addresses = set() # type: set
for res in sorted(
socket.getaddrinfo(address, port, family, socket.SOCK_STREAM, 0, flags),
key=lambda x: x[0],
):
if res in unique_addresses:
continue
unique_addresses.add(res)
af, socktype, proto, canonname, sockaddr = res
if (
sys.platform == "darwin"
and address == "localhost"
and af == socket.AF_INET6
and sockaddr[3] != 0
):
# Mac OS X includes a link-local address fe80::1%lo0 in the
# getaddrinfo results for 'localhost'. However, the firewall
# doesn't understand that this is a local address and will
# prompt for access (often repeatedly, due to an apparent
# bug in its ability to remember granting access to an
# application). Skip these addresses.
continue
try:
sock = socket.socket(af, socktype, proto)
except socket.error as e:
if errno_from_exception(e) == errno.EAFNOSUPPORT:
continue
raise
if os.name != "nt":
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except socket.error as e:
if errno_from_exception(e) != errno.ENOPROTOOPT:
# Hurd doesn't support SO_REUSEADDR.
raise
if reuse_port:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
if af == socket.AF_INET6:
# On linux, ipv6 sockets accept ipv4 too by default,
# but this makes it impossible to bind to both
# 0.0.0.0 in ipv4 and :: in ipv6. On other systems,
# separate sockets *must* be used to listen for both ipv4
# and ipv6. For consistency, always disable ipv4 on our
# ipv6 sockets and use a separate ipv4 socket when needed.
#
# Python 2.x on windows doesn't have IPPROTO_IPV6.
if hasattr(socket, "IPPROTO_IPV6"):
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
# automatic port allocation with port=None
# should bind on the same port on IPv4 and IPv6
host, requested_port = sockaddr[:2]
if requested_port == 0 and bound_port is not None:
sockaddr = tuple([host, bound_port] + list(sockaddr[2:]))
sock.setblocking(False)
try:
sock.bind(sockaddr)
except OSError as e:
if (
errno_from_exception(e) == errno.EADDRNOTAVAIL
and address == "localhost"
and sockaddr[0] == "::1"
):
# On some systems (most notably docker with default
# configurations), ipv6 is partially disabled:
# socket.has_ipv6 is true, we can create AF_INET6
# sockets, and getaddrinfo("localhost", ...,
# AF_PASSIVE) resolves to ::1, but we get an error
# when binding.
#
# Swallow the error, but only for this specific case.
# If EADDRNOTAVAIL occurs in other situations, it
# might be a real problem like a typo in a
# configuration.
sock.close()
continue
else:
raise
bound_port = sock.getsockname()[1]
sock.listen(backlog)
sockets.append(sock)
return sockets
if hasattr(socket, "AF_UNIX"):
def bind_unix_socket(
file: str, mode: int = 0o600, backlog: int = _DEFAULT_BACKLOG
) -> socket.socket:
"""Creates a listening unix socket.
If a socket with the given name already exists, it will be deleted.
If any other file with that name exists, an exception will be
raised.
Returns a socket object (not a list of socket objects like
`bind_sockets`)
"""
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except socket.error as e:
if errno_from_exception(e) != errno.ENOPROTOOPT:
# Hurd doesn't support SO_REUSEADDR
raise
sock.setblocking(False)
try:
st = os.stat(file)
except FileNotFoundError:
pass
else:
if stat.S_ISSOCK(st.st_mode):
os.remove(file)
else:
raise ValueError("File %s exists and is not a socket", file)
sock.bind(file)
os.chmod(file, mode)
sock.listen(backlog)
return sock
def add_accept_handler(
sock: socket.socket, callback: Callable[[socket.socket, Any], None]
) -> Callable[[], None]:
"""Adds an `.IOLoop` event handler to accept new connections on ``sock``.
When a connection is accepted, ``callback(connection, address)`` will
be run (``connection`` is a socket object, and ``address`` is the
address of the other end of the connection). Note that this signature
is different from the ``callback(fd, events)`` signature used for
`.IOLoop` handlers.
A callable is returned which, when called, will remove the `.IOLoop`
event handler and stop processing further incoming connections.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
.. versionchanged:: 5.0
A callable is returned (``None`` was returned before).
"""
io_loop = IOLoop.current()
removed = [False]
def accept_handler(fd: socket.socket, events: int) -> None:
# More connections may come in while we're handling callbacks;
# to prevent starvation of other tasks we must limit the number
# of connections we accept at a time. Ideally we would accept
# up to the number of connections that were waiting when we
# entered this method, but this information is not available
# (and rearranging this method to call accept() as many times
# as possible before running any callbacks would have adverse
# effects on load balancing in multiprocess configurations).
# Instead, we use the (default) listen backlog as a rough
# heuristic for the number of connections we can reasonably
# accept at once.
for i in range(_DEFAULT_BACKLOG):
if removed[0]:
# The socket was probably closed
return
try:
connection, address = sock.accept()
except BlockingIOError:
# EWOULDBLOCK indicates we have accepted every
# connection that is available.
return
except ConnectionAbortedError:
# ECONNABORTED indicates that there was a connection
# but it was closed while still in the accept queue.
# (observed on FreeBSD).
continue
callback(connection, address)
def remove_handler() -> None:
io_loop.remove_handler(sock)
removed[0] = True
io_loop.add_handler(sock, accept_handler, IOLoop.READ)
return remove_handler
def is_valid_ip(ip: str) -> bool:
"""Returns ``True`` if the given string is a well-formed IP address.
Supports IPv4 and IPv6.
"""
if not ip or "\x00" in ip:
# getaddrinfo resolves empty strings to localhost, and truncates
# on zero bytes.
return False
try:
res = socket.getaddrinfo(
ip, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_NUMERICHOST
)
return bool(res)
except socket.gaierror as e:
if e.args[0] == socket.EAI_NONAME:
return False
raise
except UnicodeError:
# `socket.getaddrinfo` will raise a UnicodeError from the
# `idna` decoder if the input is longer than 63 characters,
# even for socket.AI_NUMERICHOST. See
# https://bugs.python.org/issue32958 for discussion
return False
return True
class Resolver(Configurable):
"""Configurable asynchronous DNS resolver interface.
By default, a blocking implementation is used (which simply calls
`socket.getaddrinfo`). An alternative implementation can be
chosen with the `Resolver.configure <.Configurable.configure>`
class method::
Resolver.configure('tornado.netutil.ThreadedResolver')
The implementations of this interface included with Tornado are
* `tornado.netutil.DefaultExecutorResolver`
* `tornado.netutil.BlockingResolver` (deprecated)
* `tornado.netutil.ThreadedResolver` (deprecated)
* `tornado.netutil.OverrideResolver`
* `tornado.platform.twisted.TwistedResolver`
* `tornado.platform.caresresolver.CaresResolver`
.. versionchanged:: 5.0
The default implementation has changed from `BlockingResolver` to
`DefaultExecutorResolver`.
"""
@classmethod
def configurable_base(cls) -> Type["Resolver"]:
return Resolver
@classmethod
def configurable_default(cls) -> Type["Resolver"]:
return DefaultExecutorResolver
def resolve(
self, host: str, port: int, family: socket.AddressFamily = socket.AF_UNSPEC
) -> Awaitable[List[Tuple[int, Any]]]:
"""Resolves an address.
The ``host`` argument is a string which may be a hostname or a
literal IP address.
Returns a `.Future` whose result is a list of (family,
address) pairs, where address is a tuple suitable to pass to
`socket.connect <socket.socket.connect>` (i.e. a ``(host,
port)`` pair for IPv4; additional fields may be present for
IPv6). If a ``callback`` is passed, it will be run with the
result as an argument when it is complete.
:raises IOError: if the address cannot be resolved.
.. versionchanged:: 4.4
Standardized all implementations to raise `IOError`.
.. versionchanged:: 6.0 The ``callback`` argument was removed.
Use the returned awaitable object instead.
"""
raise NotImplementedError()
def close(self) -> None:
"""Closes the `Resolver`, freeing any resources used.
.. versionadded:: 3.1
"""
pass
def _resolve_addr(
host: str, port: int, family: socket.AddressFamily = socket.AF_UNSPEC
) -> List[Tuple[int, Any]]:
# On Solaris, getaddrinfo fails if the given port is not found
# in /etc/services and no socket type is given, so we must pass
# one here. The socket type used here doesn't seem to actually
# matter (we discard the one we get back in the results),
# so the addresses we return should still be usable with SOCK_DGRAM.
addrinfo = socket.getaddrinfo(host, port, family, socket.SOCK_STREAM)
results = []
for fam, socktype, proto, canonname, address in addrinfo:
results.append((fam, address))
return results # type: ignore
class DefaultExecutorResolver(Resolver):
"""Resolver implementation using `.IOLoop.run_in_executor`.
.. versionadded:: 5.0
"""
async def resolve(
self, host: str, port: int, family: socket.AddressFamily = socket.AF_UNSPEC
) -> List[Tuple[int, Any]]:
result = await IOLoop.current().run_in_executor(
None, _resolve_addr, host, port, family
)
return result
class ExecutorResolver(Resolver):
"""Resolver implementation using a `concurrent.futures.Executor`.
Use this instead of `ThreadedResolver` when you require additional
control over the executor being used.
The executor will be shut down when the resolver is closed unless
``close_resolver=False``; use this if you want to reuse the same
executor elsewhere.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
.. deprecated:: 5.0
The default `Resolver` now uses `.IOLoop.run_in_executor`; use that instead
of this class.
"""
def initialize(
self,
executor: Optional[concurrent.futures.Executor] = None,
close_executor: bool = True,
) -> None:
self.io_loop = IOLoop.current()
if executor is not None:
self.executor = executor
self.close_executor = close_executor
else:
self.executor = dummy_executor
self.close_executor = False
def close(self) -> None:
if self.close_executor:
self.executor.shutdown()
self.executor = None # type: ignore
@run_on_executor
def resolve(
self, host: str, port: int, family: socket.AddressFamily = socket.AF_UNSPEC
) -> List[Tuple[int, Any]]:
return _resolve_addr(host, port, family)
class BlockingResolver(ExecutorResolver):
"""Default `Resolver` implementation, using `socket.getaddrinfo`.
The `.IOLoop` will be blocked during the resolution, although the
callback will not be run until the next `.IOLoop` iteration.
.. deprecated:: 5.0
The default `Resolver` now uses `.IOLoop.run_in_executor`; use that instead
of this class.
"""
def initialize(self) -> None: # type: ignore
super().initialize()
class ThreadedResolver(ExecutorResolver):
"""Multithreaded non-blocking `Resolver` implementation.
Requires the `concurrent.futures` package to be installed
(available in the standard library since Python 3.2,
installable with ``pip install futures`` in older versions).
The thread pool size can be configured with::
Resolver.configure('tornado.netutil.ThreadedResolver',
num_threads=10)
.. versionchanged:: 3.1
All ``ThreadedResolvers`` share a single thread pool, whose
size is set by the first one to be created.
.. deprecated:: 5.0
The default `Resolver` now uses `.IOLoop.run_in_executor`; use that instead
of this class.
"""
_threadpool = None # type: ignore
_threadpool_pid = None # type: int
def initialize(self, num_threads: int = 10) -> None: # type: ignore
threadpool = ThreadedResolver._create_threadpool(num_threads)
super().initialize(executor=threadpool, close_executor=False)
@classmethod
def _create_threadpool(
cls, num_threads: int
) -> concurrent.futures.ThreadPoolExecutor:
pid = os.getpid()
if cls._threadpool_pid != pid:
# Threads cannot survive after a fork, so if our pid isn't what it
# was when we created the pool then delete it.
cls._threadpool = None
if cls._threadpool is None:
cls._threadpool = concurrent.futures.ThreadPoolExecutor(num_threads)
cls._threadpool_pid = pid
return cls._threadpool
class OverrideResolver(Resolver):
"""Wraps a resolver with a mapping of overrides.
This can be used to make local DNS changes (e.g. for testing)
without modifying system-wide settings.
The mapping can be in three formats::
{
# Hostname to host or ip
"example.com": "127.0.1.1",
# Host+port to host+port
("login.example.com", 443): ("localhost", 1443),
# Host+port+address family to host+port
("login.example.com", 443, socket.AF_INET6): ("::1", 1443),
}
.. versionchanged:: 5.0
Added support for host-port-family triplets.
"""
def initialize(self, resolver: Resolver, mapping: dict) -> None:
self.resolver = resolver
self.mapping = mapping
def close(self) -> None:
self.resolver.close()
def resolve(
self, host: str, port: int, family: socket.AddressFamily = socket.AF_UNSPEC
) -> Awaitable[List[Tuple[int, Any]]]:
if (host, port, family) in self.mapping:
host, port = self.mapping[(host, port, family)]
elif (host, port) in self.mapping:
host, port = self.mapping[(host, port)]
elif host in self.mapping:
host = self.mapping[host]
return self.resolver.resolve(host, port, family)
# These are the keyword arguments to ssl.wrap_socket that must be translated
# to their SSLContext equivalents (the other arguments are still passed
# to SSLContext.wrap_socket).
_SSL_CONTEXT_KEYWORDS = frozenset(
["ssl_version", "certfile", "keyfile", "cert_reqs", "ca_certs", "ciphers"]
)
def ssl_options_to_context(
ssl_options: Union[Dict[str, Any], ssl.SSLContext],
server_side: Optional[bool] = None,
) -> ssl.SSLContext:
"""Try to convert an ``ssl_options`` dictionary to an
`~ssl.SSLContext` object.
The ``ssl_options`` dictionary contains keywords to be passed to
`ssl.wrap_socket`. In Python 2.7.9+, `ssl.SSLContext` objects can
be used instead. This function converts the dict form to its
`~ssl.SSLContext` equivalent, and may be used when a component which
accepts both forms needs to upgrade to the `~ssl.SSLContext` version
to use features like SNI or NPN.
.. versionchanged:: 6.2
Added server_side argument. Omitting this argument will
result in a DeprecationWarning on Python 3.10.
"""
if isinstance(ssl_options, ssl.SSLContext):
return ssl_options
assert isinstance(ssl_options, dict)
assert all(k in _SSL_CONTEXT_KEYWORDS for k in ssl_options), ssl_options
# TODO: Now that we have the server_side argument, can we switch to
# create_default_context or would that change behavior?
default_version = ssl.PROTOCOL_TLS
if server_side:
default_version = ssl.PROTOCOL_TLS_SERVER
elif server_side is not None:
default_version = ssl.PROTOCOL_TLS_CLIENT
context = ssl.SSLContext(ssl_options.get("ssl_version", default_version))
if "certfile" in ssl_options:
context.load_cert_chain(
ssl_options["certfile"], ssl_options.get("keyfile", None)
)
if "cert_reqs" in ssl_options:
if ssl_options["cert_reqs"] == ssl.CERT_NONE:
# This may have been set automatically by PROTOCOL_TLS_CLIENT but is
# incompatible with CERT_NONE so we must manually clear it.
context.check_hostname = False
context.verify_mode = ssl_options["cert_reqs"]
if "ca_certs" in ssl_options:
context.load_verify_locations(ssl_options["ca_certs"])
if "ciphers" in ssl_options:
context.set_ciphers(ssl_options["ciphers"])
if hasattr(ssl, "OP_NO_COMPRESSION"):
# Disable TLS compression to avoid CRIME and related attacks.
# This constant depends on openssl version 1.0.
# TODO: Do we need to do this ourselves or can we trust
# the defaults?
context.options |= ssl.OP_NO_COMPRESSION
return context
def ssl_wrap_socket(
socket: socket.socket,
ssl_options: Union[Dict[str, Any], ssl.SSLContext],
server_hostname: Optional[str] = None,
server_side: Optional[bool] = None,
**kwargs: Any
) -> ssl.SSLSocket:
"""Returns an ``ssl.SSLSocket`` wrapping the given socket.
``ssl_options`` may be either an `ssl.SSLContext` object or a
dictionary (as accepted by `ssl_options_to_context`). Additional
keyword arguments are passed to ``wrap_socket`` (either the
`~ssl.SSLContext` method or the `ssl` module function as
appropriate).
.. versionchanged:: 6.2
Added server_side argument. Omitting this argument will
result in a DeprecationWarning on Python 3.10.
"""
context = ssl_options_to_context(ssl_options, server_side=server_side)
if server_side is None:
server_side = False
if ssl.HAS_SNI:
# In python 3.4, wrap_socket only accepts the server_hostname
# argument if HAS_SNI is true.
# TODO: add a unittest (python added server-side SNI support in 3.4)
# In the meantime it can be manually tested with
# python3 -m tornado.httpclient https://sni.velox.ch
return context.wrap_socket(
socket, server_hostname=server_hostname, server_side=server_side, **kwargs
)
else:
return context.wrap_socket(socket, server_side=server_side, **kwargs)
| 37.230769
| 86
| 0.651777
|
4a1135c33c57c85889c442c625d2eb32bbbe8c3f
| 16,364
|
bzl
|
Python
|
apple/bundling/bundling_support.bzl
|
kastiglione/rules_apple
|
3745c3f03b9d29a04671fd4fac96468ca7031fd6
|
[
"Apache-2.0"
] | 2
|
2019-09-01T06:06:40.000Z
|
2020-11-10T00:37:01.000Z
|
apple/bundling/bundling_support.bzl
|
c-parsons/rules_apple
|
f75c4b1be219cb32704d900bd1a42ab200eab445
|
[
"Apache-2.0"
] | null | null | null |
apple/bundling/bundling_support.bzl
|
c-parsons/rules_apple
|
f75c4b1be219cb32704d900bd1a42ab200eab445
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Low-level bundling name helpers."""
load(
"@build_bazel_rules_apple//apple/bundling:product_support.bzl",
"product_support",
)
def _binary_file(ctx, src, dest, executable = False):
"""Returns a bundlable file whose destination is in the binary directory.
Args:
ctx: The Skylark context.
src: The `File` artifact that should be bundled.
dest: The path within the bundle's binary directory where the file should
be placed.
executable: True if the file should be made executable.
Returns:
A bundlable file struct (see `bundling_support.bundlable_file`).
"""
return _bundlable_file(src, _path_in_binary_dir(ctx, dest), executable)
def _bundlable_file(src, dest, executable = False, contents_only = False):
"""Returns a value that represents a bundlable file or ZIP archive.
A "bundlable file" is a struct that maps a file (`"src"`) to a path within a
bundle (`"dest"`). This can be used with plain files, where `dest` denotes
the path within the bundle where the file should be placed (including its
filename, which allows it to be changed), or with ZIP archives, where `dest`
denotes the location within the bundle where the ZIP's contents should be
extracted.
Args:
src: The `File` artifact that should be bundled.
dest: The path within the bundle where the file should be placed.
executable: True if the file should be made executable.
contents_only: If `src` is a directory and this is True, then the _contents_
of the directory will be added at `dest` to the bundle; if this is
False (the default) then the directory _itself_ will be added at `dest`
to the bundle.
Returns:
A struct with `src`, `dest`, and `executable` fields representing the
bundlable file.
"""
return struct(
src = src,
dest = dest,
executable = executable,
contents_only = contents_only,
)
def _bundlable_file_sources(bundlable_files):
"""Returns the source files from the given collection of bundlable files.
This is a convenience function that allows a set of bundlable files to be
quickly turned into a list of files that can be passed to an action's inputs,
for example.
Args:
bundlable_files: A list or set of bundlable file values (as returned by
`bundling_support.bundlable_file`).
Returns:
A `depset` containing the `File` artifacts from the given bundlable files.
"""
return depset([bf.src for bf in bundlable_files])
def _bundle_name(ctx):
"""Returns the name of the bundle.
The name of the bundle is the value of the `bundle_name` attribute if it was
given; if not, then the name of the target will be used instead.
Args:
ctx: The Skylark context.
Returns:
The bundle name.
"""
bundle_name = getattr(ctx.attr, "bundle_name", None)
if not bundle_name:
bundle_name = ctx.label.name
return bundle_name
def _bundle_extension(ctx):
"""Returns the bundle extension.
Args:
ctx: The Skylark context.
Returns:
The bundle extension.
"""
ext = getattr(ctx.attr, "bundle_extension", "")
if ext:
# When the *user* specifies the bundle extension in a public attribute, we
# do *not* require them to include the leading dot, so we add it here.
ext = "." + ext
else:
product_type = product_support.product_type(ctx)
product_type_descriptor = product_support.product_type_descriptor(
product_type,
)
if product_type_descriptor:
ext = product_type_descriptor.bundle_extension
return ext
def _bundle_name_with_extension(ctx):
"""Returns the name of the bundle with its extension.
Args:
ctx: The Skylark context.
Returns:
The bundle name with its extension.
"""
return _bundle_name(ctx) + _bundle_extension(ctx)
def _contents_file(ctx, src, dest, executable = False):
"""Returns a bundlable file whose destination is in the contents directory.
Args:
ctx: The Skylark context.
src: The `File` artifact that should be bundled.
dest: The path within the bundle's contents directory where the file should
be placed.
executable: True if the file should be made executable.
Returns:
A bundlable file struct (see `bundling_support.bundlable_file`).
"""
return _bundlable_file(src, _path_in_contents_dir(ctx, dest), executable)
def _embedded_bundle(
path,
target,
verify_has_child_plist,
parent_bundle_id_reference = None):
"""Returns a value that represents an embedded bundle in another bundle.
These values are used by the bundler to indicate how dependencies that are
themselves bundles (such as extensions or frameworks) should be bundled in
the application or target that depends on them.
Args:
path: The relative path within the depender's bundle where the given bundle
should be located.
target: The target representing the embedded bundle.
verify_has_child_plist: If True, the bundler should verify the info.plist
of this bundle against the parents. That means checking that the bundle
identifier of the depender is a prefix of the bundle identifier of the
embedded bundle; checking that the version numbers are the same, etc.
parent_bundle_id_reference: A list of keys to make a keypath into this
bundle's Info.plist where the parent's bundle_id should be found. The
bundler will then ensure they match the parent's bundle_id.
Returns:
A struct with `path`, `target`, `verify_has_child_plist`, and
`parent_bundle_id_reference` fields equal to the values given in the
arguments.
"""
if parent_bundle_id_reference != None and not verify_has_child_plist:
fail("Internal Error: parent_bundle_id_reference without " +
"verify_has_child_plist does not make sense.")
return struct(
path = path,
target = target,
verify_has_child_plist = verify_has_child_plist,
parent_bundle_id_reference = parent_bundle_id_reference,
)
def _header_prefix(input_file):
"""Sets a file's bundle destination to a "Headers/" subdirectory.
Args:
input_file: The File to be bundled
Returns:
A bundlable file struct with the same File object, but whose path has been
transformed to start with "Headers/".
"""
new_path = "Headers/" + input_file.basename
return _bundlable_file(input_file, new_path)
def _path_in_binary_dir(ctx, path):
"""Makes a path relative to where the bundle's binary is stored.
On iOS/watchOS/tvOS, the binary is placed directly in the bundle's contents
directory (which itself is actually the bundle root). On macOS, the binary is
in a MacOS directory that is inside the bundle's Contents directory.
Args:
ctx: The Skylark context.
path: The path to make relative to where the bundle's binary is stored.
Returns:
The path, made relative to where the bundle's binary is stored.
"""
return _path_in_contents_dir(
ctx,
ctx.attr._bundle_binary_path_format % (path or ""),
)
def _path_in_contents_dir(ctx, path):
"""Makes a path relative to where the bundle's contents are stored.
Contents include files such as:
* A directory of resources (which itself might be flattened into contents)
* A directory for the binary (which might be flattened)
* Directories for Frameworks and PlugIns (extensions)
* The bundle's Info.plist and PkgInfo
* The code signature
Args:
ctx: The Skylark context.
path: The path to make relative to where the bundle's contents are stored.
Returns:
The path, made relative to where the bundle's contents are stored.
"""
return ctx.attr._bundle_contents_path_format % (path or "")
def _path_in_resources_dir(ctx, path):
"""Makes a path relative to where the bundle's resources are stored.
On iOS/watchOS/tvOS, resources are placed directly in the bundle's contents
directory (which itself is actually the bundle root). On macOS, resources are
in a Resources directory that is inside the bundle's Contents directory.
Args:
ctx: The Skylark context.
path: The path to make relative to where the bundle's resources are stored.
Returns:
The path, made relative to where the bundle's resources are stored.
"""
return _path_in_contents_dir(
ctx,
ctx.attr._bundle_resources_path_format % (path or ""),
)
def _resource_file(ctx, src, dest, executable = False, contents_only = False):
"""Returns a bundlable file whose destination is in the resources directory.
Args:
ctx: The Skylark context.
src: The `File` artifact that should be bundled.
dest: The path within the bundle's resources directory where the file
should be placed.
executable: True if the file should be made executable.
contents_only: If `src` is a directory and this is True, then the _contents_
of the directory will be added at `dest` to the bundle; if this is
False (the default) then the directory _itself_ will be added at `dest`
to the bundle.
Returns:
A bundlable file struct (see `bundling_support.bundlable_file`).
"""
return _bundlable_file(
src,
_path_in_resources_dir(ctx, dest),
executable,
contents_only,
)
def _validate_bundle_id(bundle_id):
"""Ensure the value is a valid bundle it or fail the build.
Args:
bundle_id: The string to check.
"""
# Make sure the bundle id seems like a valid one. Apple's docs for
# CFBundleIdentifier are all we have to go on, which are pretty minimal. The
# only they they specifically document is the character set, so the other
# two checks here are just added safety to catch likely errors by developers
# setting things up.
bundle_id_parts = bundle_id.split(".")
for part in bundle_id_parts:
if part == "":
fail("Empty segment in bundle_id: \"%s\"" % bundle_id)
if not part.isalnum():
# Only non alpha numerics that are allowed are '.' and '-'. '.' was
# handled by the split(), so just have to check for '-'.
for i in range(len(part)):
ch = part[i]
if ch != "-" and not ch.isalnum():
fail("Invalid character(s) in bundle_id: \"%s\"" % bundle_id)
def _ensure_single_xcassets_type(attr, files, extension, message = None):
"""Helper for when an xcassets catalog should have a single sub type.
Args:
attr: The attribute to associate with the build failure if the list of
files has an element that is not in a directory with the given
extension.
files: An iterable of files to use.
extension: The extension that should be used for the different asset
type witin the catalog.
message: A custom error message to use, the list of found files that
didn't match will be printed afterwards.
"""
if not message:
message = ("Expected the xcassets directory to only contain files " +
"are in sub-directories with the extension %s") % extension
_ensure_path_format(attr, files, [["xcassets", extension]], message = message)
def _path_is_under_fragments(path, path_fragments):
"""Helper for _ensure_asset_types().
Checks that the given path is under the given set of path fragments.
Args:
path: String of the path to check.
path_fragments: List of string to check for in the path (in order).
Returns:
True/False for if the path includes the ordered fragments.
"""
start_offset = 0
for suffix in path_fragments:
offset = path.find(suffix, start = start_offset)
if offset != -1:
start_offset = offset + len(suffix)
continue
if start_offset and path[start_offset:] == "Contents.json":
# After the first segment was found, always accept a Contents.json file.
return True
return False
return True
def _ensure_path_format(attr, files, path_fragments_list, message = None):
"""Ensure the files match the required path fragments.
TODO(b/77804841): The places calling this should go away and these types of
checks should be done during the resource processing. Right now these checks
are being wedged in at the attribute collection steps, and they then get
combined into a single list of resources; the bundling then resplits them
up in groups to process they by type. So the more validation/splitting done
here the slower things get (as double work is done). The bug is to revisit
all of this and instead pass through individual things in a structured way
so they don't have to be resplit. That would allow the validation to be
done while processing (in a single pass) instead.
Args:
attr: The attribute to associate with the build failure if the list of
files has an element that is not in a directory with the given
extension.
files: An iterable of files to use.
path_fragments_list: A list of lists, each inner lists is a sequence of
extensions that must be on the paths passed in (to ensure proper
nesting).
message: A custom error message to use, the list of found files that
didn't match will be printed afterwards.
"""
formatted_path_fragments_list = []
for x in path_fragments_list:
formatted_path_fragments_list.append([".%s/" % y for y in x])
# Just check that the paths include the expected nesting. More complete
# checks would likely be the number of outer directories with that suffix,
# the number of inner ones, extra directories segments where not expected,
# etc.
bad_paths = {}
for f in files:
path = f.path
was_good = False
for path_fragments in formatted_path_fragments_list:
if _path_is_under_fragments(path, path_fragments):
was_good = True
break # No need to check other fragments
if not was_good:
bad_paths[path] = None
if len(bad_paths):
if not message:
as_paths = [
("*" + "*".join(x) + "...")
for x in formatted_path_fragments_list
]
message = "Expected only files inside directories named '*.%s'" % (
", ".join(as_paths)
)
formatted_paths = "[\n %s\n]" % ",\n ".join(bad_paths.keys())
fail("%s, but found the following: %s" % (message, formatted_paths), attr)
# Define the loadable module that lists the exported symbols in this file.
bundling_support = struct(
binary_file = _binary_file,
bundlable_file = _bundlable_file,
bundlable_file_sources = _bundlable_file_sources,
bundle_name = _bundle_name,
bundle_extension = _bundle_extension,
bundle_name_with_extension = _bundle_name_with_extension,
contents_file = _contents_file,
embedded_bundle = _embedded_bundle,
ensure_path_format = _ensure_path_format,
ensure_single_xcassets_type = _ensure_single_xcassets_type,
header_prefix = _header_prefix,
path_in_binary_dir = _path_in_binary_dir,
path_in_contents_dir = _path_in_contents_dir,
path_in_resources_dir = _path_in_resources_dir,
resource_file = _resource_file,
validate_bundle_id = _validate_bundle_id,
)
| 38.233645
| 84
| 0.683085
|
4a11372478483a8159ac0267ea70862a9765a2b0
| 23,546
|
py
|
Python
|
SDK/test_integration_config_helpers.py
|
queueit/KnownUser.V3.Python
|
2e9e429451221b650209dabd6df6b3e420a8ac34
|
[
"MIT"
] | 2
|
2019-07-04T11:09:45.000Z
|
2021-04-02T17:28:15.000Z
|
SDK/test_integration_config_helpers.py
|
queueit/KnownUser.V3.Python
|
2e9e429451221b650209dabd6df6b3e420a8ac34
|
[
"MIT"
] | null | null | null |
SDK/test_integration_config_helpers.py
|
queueit/KnownUser.V3.Python
|
2e9e429451221b650209dabd6df6b3e420a8ac34
|
[
"MIT"
] | 3
|
2019-06-30T18:51:32.000Z
|
2021-11-15T19:57:11.000Z
|
import unittest
from queueit_knownuserv3.integration_config_helpers import *
from queueit_knownuserv3.http_context_providers import HttpContextProvider
class HttpContextProviderMock(HttpContextProvider):
def __init__(self):
self.headers = {}
self.cookies = {}
self.body = ""
def getHeader(self, header_name):
if header_name not in self.headers:
return None
return self.headers[header_name]
def getCookie(self, cookie_name):
if cookie_name not in self.cookies:
return None
return self.cookies[cookie_name]
def getRequestBodyAsString(self):
return self.body
class TestIntegrationEvaluator(unittest.TestCase):
def test_getMatchedIntegrationConfig_oneTrigger_and_notMatched(self):
integrationConfig = {
"Integrations": [{
"Triggers": [{
"LogicalOperator":
"And",
"TriggerParts": [{
"CookieName": "c1",
"Operator": "Equals",
"ValueToCompare": "value1",
"ValidatorType": "CookieValidator",
"IsIgnoreCase": False,
"IsNegative": False
}, {
"UrlPart": "PageUrl",
"ValidatorType": "UrlValidator",
"ValueToCompare": "test",
"Operator": "Contains",
"IsIgnoreCase": False,
"IsNegative": False
}]
}]
}]
}
url = "http://test.testdomain.com:8080/test?q=2"
testObject = IntegrationEvaluator()
matchedConfig = testObject.getMatchedIntegrationConfig(
integrationConfig, url, HttpContextProviderMock())
assert (matchedConfig == None)
def test_getMatchedIntegrationConfig_oneTrigger_and_matched(self):
integrationConfig = {
"Integrations": [{
"Name":
"integration1",
"Triggers": [{
"LogicalOperator":
"And",
"TriggerParts": [{
"CookieName": "c1",
"Operator": "Equals",
"ValueToCompare": "value1",
"ValidatorType": "CookieValidator",
"IsIgnoreCase": True,
"IsNegative": False
}, {
"UrlPart": "PageUrl",
"ValidatorType": "UrlValidator",
"ValueToCompare": "test",
"Operator": "Contains",
"IsIgnoreCase": False,
"IsNegative": False
}]
}]
}]
}
url = "http://test.testdomain.com:8080/test?q=2"
hcpMock = HttpContextProviderMock()
hcpMock.cookies = {"c2": "ddd", "c1": "Value1"}
testObject = IntegrationEvaluator()
matchedConfig = testObject.getMatchedIntegrationConfig(
integrationConfig, url, hcpMock)
assert (matchedConfig["Name"] == "integration1")
def test_getMatchedIntegrationConfig_oneTrigger_and_notmatched_UserAgent(
self):
integrationConfig = {
"Integrations": [{
"Name":
"integration1",
"Triggers": [{
"LogicalOperator":
"And",
"TriggerParts": [{
"CookieName": "c1",
"Operator": "Equals",
"ValueToCompare": "value1",
"ValidatorType": "CookieValidator",
"IsIgnoreCase": True,
"IsNegative": False
}, {
"UrlPart": "PageUrl",
"ValidatorType": "UrlValidator",
"ValueToCompare": "test",
"Operator": "Contains",
"IsIgnoreCase": False,
"IsNegative": False
}, {
"ValidatorType": "userAgentValidator",
"ValueToCompare": "Googlebot",
"Operator": "Contains",
"IsIgnoreCase": True,
"IsNegative": True
}]
}]
}]
}
url = "http://test.testdomain.com:8080/test?q=2"
hcpMock = HttpContextProviderMock()
hcpMock.headers = {"user-agent": "bot.html google.com googlebot test"}
hcpMock.cookies = {"c2": "ddd", "c1": "Value1"}
testObject = IntegrationEvaluator()
matchedConfig = testObject.getMatchedIntegrationConfig(
integrationConfig, url, hcpMock)
assert (matchedConfig == None)
def test_getMatchedIntegrationConfig_oneTrigger_or_notMatched(self):
integrationConfig = {
"Integrations": [{
"Name":
"integration1",
"Triggers": [{
"LogicalOperator":
"Or",
"TriggerParts": [{
"CookieName": "c1",
"Operator": "Equals",
"ValueToCompare": "value1",
"ValidatorType": "CookieValidator",
"IsIgnoreCase": True,
"IsNegative": True
}, {
"UrlPart": "PageUrl",
"ValidatorType": "UrlValidator",
"ValueToCompare": "test",
"Operator": "Equals",
"IsIgnoreCase": False,
"IsNegative": False
}]
}]
}]
}
url = "http://test.testdomain.com:8080/test?q=2"
hcpMock = HttpContextProviderMock()
hcpMock.cookies = {"c2": "ddd", "c1": "Value1"}
testObject = IntegrationEvaluator()
matchedConfig = testObject.getMatchedIntegrationConfig(
integrationConfig, url, hcpMock)
assert (matchedConfig == None)
def test_getMatchedIntegrationConfig_oneTrigger_or_matched(self):
integrationConfig = {
"Integrations": [{
"Name":
"integration1",
"Triggers": [{
"LogicalOperator":
"Or",
"TriggerParts": [{
"CookieName": "c1",
"Operator": "Equals",
"ValueToCompare": "value1",
"ValidatorType": "CookieValidator",
"IsIgnoreCase": True,
"IsNegative": True
}, {
"UrlPart": "PageUrl",
"ValidatorType": "UrlValidator",
"ValueToCompare": "test",
"Operator": "Equals",
"IsIgnoreCase": False,
"IsNegative": True
}]
}]
}]
}
url = "http://test.testdomain.com:8080/test?q=2"
hcpMock = HttpContextProviderMock()
hcpMock.cookies = {"c2": "ddd", "c1": "Value1"}
testObject = IntegrationEvaluator()
matchedConfig = testObject.getMatchedIntegrationConfig(
integrationConfig, url, hcpMock)
assert (matchedConfig["Name"] == "integration1")
def test_getMatchedIntegrationConfig_twoTriggers_matched(self):
integrationConfig = {
"Integrations": [{
"Name":
"integration1",
"Triggers": [{
"LogicalOperator":
"And",
"TriggerParts": [{
"CookieName": "c1",
"Operator": "Equals",
"ValueToCompare": "value1",
"ValidatorType": "CookieValidator",
"IsIgnoreCase": True,
"IsNegative": True
}]
}, {
"LogicalOperator":
"And",
"TriggerParts": [{
"CookieName": "c1",
"Operator": "Equals",
"ValueToCompare": "Value1",
"ValidatorType": "CookieValidator",
"IsIgnoreCase": False,
"IsNegative": False
}, {
"UrlPart": "PageUrl",
"ValidatorType": "UrlValidator",
"ValueToCompare": "test",
"Operator": "Contains",
"IsIgnoreCase": False,
"IsNegative": False
}]
}]
}]
}
url = "http://test.testdomain.com:8080/test?q=2"
hcpMock = HttpContextProviderMock()
hcpMock.cookies = {"c2": "ddd", "c1": "Value1"}
testObject = IntegrationEvaluator()
matchedConfig = testObject.getMatchedIntegrationConfig(
integrationConfig, url, hcpMock)
assert (matchedConfig["Name"] == "integration1")
def test_getMatchedIntegrationConfig_threeIntegrationsInOrder_secondMatched(
self):
integrationConfig = {
"Integrations": [{
"Name":
"integration0",
"Triggers": [{
"LogicalOperator":
"And",
"TriggerParts": [{
"UrlPart": "PageUrl",
"ValidatorType": "UrlValidator",
"ValueToCompare": "Test",
"Operator": "Contains",
"IsIgnoreCase": False,
"IsNegative": False
}]
}]
}, {
"Name":
"integration1",
"Triggers": [{
"LogicalOperator":
"And",
"TriggerParts": [{
"UrlPart": "PageUrl",
"ValidatorType": "UrlValidator",
"ValueToCompare": "test",
"Operator": "Contains",
"IsIgnoreCase": False,
"IsNegative": False
}]
}]
}, {
"Name":
"integration2",
"Triggers": [{
"LogicalOperator":
"And",
"TriggerParts": [{
"CookieName": "c1",
"ValidatorType": "CookieValidator",
"ValueToCompare": "c1",
"Operator": "Equals",
"IsIgnoreCase": True,
"IsNegative": False
}]
}]
}]
}
url = "http://test.testdomain.com:8080/test?q=2"
hcpMock = HttpContextProviderMock()
hcpMock.cookies = {"c2": "ddd", "c1": "Value1"}
testObject = IntegrationEvaluator()
matchedConfig = testObject.getMatchedIntegrationConfig(
integrationConfig, url, hcpMock)
assert (matchedConfig["Name"] == "integration1")
class TestUrlValidatorHelper(unittest.TestCase):
def test_evaluate(self):
assert (not UrlValidatorHelper.evaluate(None, "notimportant"))
assert (not UrlValidatorHelper.evaluate({}, "notimportant"))
triggerPart = {
"UrlPart": "PageUrl",
"Operator": "Contains",
"IsIgnoreCase": True,
"IsNegative": False,
"ValueToCompare": "http://test.testdomain.com:8080/test?q=1"
}
assert (not UrlValidatorHelper.evaluate(
triggerPart, "http://test.testdomain.com:8080/test?q=2"))
triggerPart = {
"UrlPart": "PagePath",
"Operator": "Equals",
"IsIgnoreCase": True,
"IsNegative": False,
"ValueToCompare": "/Test/t1"
}
assert (UrlValidatorHelper.evaluate(
triggerPart, "http://test.testdomain.com:8080/test/t1?q=2&y02"))
triggerPart = {
"UrlPart": "HostName",
"Operator": "Contains",
"IsIgnoreCase": True,
"IsNegative": False,
"ValueToCompare": "test.testdomain.com"
}
assert (UrlValidatorHelper.evaluate(
triggerPart, "http://m.test.testdomain.com:8080/test?q=2"))
triggerPart = {
"UrlPart": "HostName",
"Operator": "Contains",
"IsIgnoreCase": True,
"IsNegative": True,
"ValueToCompare": "test.testdomain.com"
}
assert (not UrlValidatorHelper.evaluate(
triggerPart, "http://m.test.testdomain.com:8080/test?q=2"))
class TestCookieValidatorHelper(unittest.TestCase):
def test_evaluate(self):
hcpMock = HttpContextProviderMock()
assert (not CookieValidatorHelper.evaluate(None, hcpMock))
assert (not CookieValidatorHelper.evaluate({}, hcpMock))
triggerPart = {
"CookieName": "c1",
"Operator": "Contains",
"IsIgnoreCase": True,
"IsNegative": False,
"ValueToCompare": "1"
}
hcpMock.cookies = {"c1": "hhh"}
assert (not CookieValidatorHelper.evaluate(triggerPart, hcpMock))
triggerPart = {
"CookieName": "c1",
"Operator": "Contains",
"ValueToCompare": "1"
}
hcpMock.cookies = {"c2": "ddd", "c1": "3"}
assert (not CookieValidatorHelper.evaluate(triggerPart, hcpMock))
triggerPart = {
"CookieName": "c1",
"Operator": "Contains",
"IsIgnoreCase": True,
"IsNegative": False,
"ValueToCompare": "1"
}
hcpMock.cookies = {"c2": "ddd", "c1": "1"}
assert (CookieValidatorHelper.evaluate(triggerPart, hcpMock))
triggerPart = {
"CookieName": "c1",
"Operator": "Contains",
"IsIgnoreCase": True,
"IsNegative": True,
"ValueToCompare": "1"
}
hcpMock.cookies = {"c2": "ddd", "c1": "1"}
assert (not CookieValidatorHelper.evaluate(triggerPart, hcpMock))
class TestUserAgentValidatorHelper(unittest.TestCase):
def test_evaluate(self):
hcpMock = HttpContextProviderMock()
assert (not UserAgentValidatorHelper.evaluate(None, hcpMock))
assert (not UserAgentValidatorHelper.evaluate({}, hcpMock))
triggerPart = {
"Operator": "Contains",
"IsIgnoreCase": False,
"IsNegative": False,
"ValueToCompare": "googlebot"
}
hcpMock.headers = {"user-agent": "Googlebot sample useraagent"}
assert (not UserAgentValidatorHelper.evaluate(triggerPart, hcpMock))
triggerPart = {
"Operator": "Equals",
"IsIgnoreCase": True,
"IsNegative": True,
"ValueToCompare": "googlebot"
}
hcpMock.headers = {"user-agent": "ooglebot sample useraagent"}
assert (UserAgentValidatorHelper.evaluate(triggerPart, hcpMock))
triggerPart = {
"Operator": "Contains",
"IsIgnoreCase": False,
"IsNegative": True,
"ValueToCompare": "googlebot"
}
hcpMock.headers = {"user-agent": "googlebot"}
assert (not UserAgentValidatorHelper.evaluate(triggerPart, hcpMock))
triggerPart = {
"Operator": "Contains",
"IsIgnoreCase": True,
"IsNegative": False,
"ValueToCompare": "googlebot"
}
hcpMock.headers = {"user-agent": "Googlebot"}
assert (UserAgentValidatorHelper.evaluate(triggerPart, hcpMock))
class TestHttpHeaderValidatorHelper(unittest.TestCase):
def test_evaluate(self):
hcpMock = HttpContextProviderMock()
assert (not HttpHeaderValidatorHelper.evaluate(None, hcpMock))
assert (not HttpHeaderValidatorHelper.evaluate({}, hcpMock))
triggerPart = {
"HttpHeaderName": "a-header",
"Operator": "Contains",
"IsIgnoreCase": True,
"IsNegative": False,
"ValueToCompare": "value"
}
hcpMock.headers = {'a-header': "VaLuE"}
assert (HttpHeaderValidatorHelper.evaluate(triggerPart, hcpMock))
triggerPart = {
"HttpHeaderName": "a-header",
"Operator": "Contains",
"IsIgnoreCase": True,
"IsNegative": False,
"ValueToCompare": "value"
}
hcpMock.headers = {'a-header': "not"}
assert (not HttpHeaderValidatorHelper.evaluate(triggerPart, hcpMock))
triggerPart = {
"HttpHeaderName": "a-header",
"Operator": "Contains",
"IsNegative": True,
"IsIgnoreCase": False,
"ValueToCompare": "value"
}
hcpMock.headers = {'a-header': "not"}
assert (HttpHeaderValidatorHelper.evaluate(triggerPart, hcpMock))
class TestComparisonOperatorHelper(unittest.TestCase):
def test_evaluate_equals_operator(self):
assert (ComparisonOperatorHelper.evaluate("Equals", False, False, None,
None, None))
assert (ComparisonOperatorHelper.evaluate("Equals", False, False,
"test1", "test1", None))
assert (not ComparisonOperatorHelper.evaluate("Equals", False, False,
"test1", "Test1", None))
assert (ComparisonOperatorHelper.evaluate("Equals", False, True,
"test1", "Test1", None))
assert (ComparisonOperatorHelper.evaluate("Equals", True, False,
"test1", "Test1", None))
assert (not ComparisonOperatorHelper.evaluate("Equals", True, False,
"test1", "test1", None))
assert (not ComparisonOperatorHelper.evaluate("Equals", True, True,
"test1", "Test1", None))
def test_evaluate_contains_operator(self):
assert (ComparisonOperatorHelper.evaluate("Contains", False, False,
None, None, None))
assert (ComparisonOperatorHelper.evaluate(
"Contains", False, False, "test_test1_test", "test1", None))
assert (not ComparisonOperatorHelper.evaluate(
"Contains", False, False, "test_test1_test", "Test1", None))
assert (ComparisonOperatorHelper.evaluate(
"Contains", False, True, "test_test1_test", "Test1", None))
assert (ComparisonOperatorHelper.evaluate(
"Contains", True, False, "test_test1_test", "Test1", None))
assert (not ComparisonOperatorHelper.evaluate(
"Contains", True, True, "test_test1", "Test1", None))
assert (not ComparisonOperatorHelper.evaluate(
"Contains", True, False, "test_test1", "test1", None))
assert (ComparisonOperatorHelper.evaluate(
"Contains", False, False, "test_dsdsdsdtest1", "*", None))
assert (not ComparisonOperatorHelper.evaluate(
"Contains", False, False, "", "*", None))
def test_evaluate_equalsAny_operator(self):
assert (ComparisonOperatorHelper.evaluate("EqualsAny", False, False,
"test1", None, ["test1"]))
assert (not ComparisonOperatorHelper.evaluate(
"EqualsAny", False, False, "test1", None, ["Test1"]))
assert (ComparisonOperatorHelper.evaluate("EqualsAny", False, True,
"test1", None, ["Test1"]))
assert (ComparisonOperatorHelper.evaluate("EqualsAny", True, False,
"test1", None, ["Test1"]))
assert (not ComparisonOperatorHelper.evaluate(
"EqualsAny", True, False, "test1", None, ["test1"]))
assert (not ComparisonOperatorHelper.evaluate(
"EqualsAny", True, True, "test1", None, ["Test1"]))
def test_evaluate_containsAny_operator(self):
assert (ComparisonOperatorHelper.evaluate(
"ContainsAny", False, False, "test_test1_test", None, ["test1"]))
assert (not ComparisonOperatorHelper.evaluate(
"ContainsAny", False, False, "test_test1_test", None, ["Test1"]))
assert (ComparisonOperatorHelper.evaluate(
"ContainsAny", False, True, "test_test1_test", None, ["Test1"]))
assert (ComparisonOperatorHelper.evaluate(
"ContainsAny", True, False, "test_test1_test", None, ["Test1"]))
assert (not ComparisonOperatorHelper.evaluate(
"ContainsAny", True, True, "test_test1", None, ["Test1"]))
assert (not ComparisonOperatorHelper.evaluate(
"ContainsAny", True, False, "test_test1", None, ["test1"]))
assert (ComparisonOperatorHelper.evaluate(
"ContainsAny", False, False, "test_dsdsdsdtest1", None, ["*"]))
def test_evaluate_unsupported_operator(self):
assert (not ComparisonOperatorHelper.evaluate("-not-supported-", False,
False, None, None, None))
class TestRequestBodyValidatorHelper(unittest.TestCase):
def test_evaluate(self):
hcp_mock = HttpContextProviderMock()
assert (not RequestBodyValidatorHelper.evaluate(None, hcp_mock))
assert (not RequestBodyValidatorHelper.evaluate({}, hcp_mock))
trigger_part = {
"Operator": "Contains",
"IsIgnoreCase": True,
"IsNegative": False,
"ValueToCompare": "test body"
}
assert (not RequestBodyValidatorHelper.evaluate(trigger_part, hcp_mock))
hcp_mock.body = "my test body is here"
assert (RequestBodyValidatorHelper.evaluate(trigger_part, hcp_mock))
trigger_part = {
"Operator": "Equals",
"IsIgnoreCase": True,
"IsNegative": False,
"ValueToCompare": "Test"
}
assert (not RequestBodyValidatorHelper.evaluate(trigger_part, hcp_mock))
trigger_part = {
"Operator": "Contains",
"IsIgnoreCase": True,
"IsNegative": True,
"ValueToCompare": "Test"
}
assert (not RequestBodyValidatorHelper.evaluate(trigger_part, hcp_mock))
trigger_part = {
"Operator": "Contains",
"IsIgnoreCase": True,
"IsNegative": True,
"ValueToCompare": "BTest"
}
hcp_mock.body = "my test body is here"
assert (RequestBodyValidatorHelper.evaluate(trigger_part, hcp_mock))
| 39.440536
| 80
| 0.499193
|
4a11377d66d8a74156993558b043b46f32fa7b97
| 2,818
|
py
|
Python
|
main.py
|
prasanthr/sftp-transfer
|
984020501f91265053894998ff0aec93836dab3f
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
prasanthr/sftp-transfer
|
984020501f91265053894998ff0aec93836dab3f
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
prasanthr/sftp-transfer
|
984020501f91265053894998ff0aec93836dab3f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START cloudrun_pubsub_server_setup]
# [START run_pubsub_server_setup]
import base64
import os
import random
import json
from flask import Flask, request
from google.cloud import storage
import pysftp
app = Flask(__name__)
@app.route("/", methods=["POST"])
def index():
envelope = request.get_json()
if not envelope:
msg = "no Pub/Sub message received"
print(f"error: {msg}")
return f"Bad Request: {msg}", 400
if not isinstance(envelope, dict) or "message" not in envelope:
msg = "invalid Pub/Sub message format"
print(f"error: {msg}")
return f"Bad Request: {msg}", 400
pubsub_message = envelope["message"]
message = "<MESSAGE>"
if isinstance(pubsub_message, dict) and "data" in pubsub_message:
message = base64.b64decode(pubsub_message["data"]).decode("utf-8").strip()
#message = envelope["message"]["data"]
print(f"Message: {message}!")
message_dict = json.loads(message)
bucket_name = message_dict["bucket"]
source_blob_name = message_dict["name"]
temp_localfile = "/tmp/tmpfile" + str(random.randint(0,100))
print(f"bucket_name: {bucket_name}!")
print(f"source blob name: {source_blob_name}!")
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(source_blob_name)
blob.download_to_filename(temp_localfile)
print(
"Downloaded storage object {} from bucket {} to local file {}.".format(
source_blob_name, bucket_name, temp_localfile
)
)
cnopts = pysftp.CnOpts()
cnopts.hostkeys = None
with pysftp.Connection(os.getenv("SFTP_SERVER"), username=os.getenv("SFTP_USER"),
private_key='/key/sftp-private-key', cnopts = cnopts) as sftp:
sftp.put(temp_localfile)
print ("file copied to sftp server")
return ("", 204)
# [END run_pubsub_handler]
# [END cloudrun_pubsub_handler]
if __name__ == "__main__":
PORT = int(os.getenv("PORT")) if os.getenv("PORT") else 8080
# This is used when running locally. Gunicorn is used to run the
# application on Cloud Run. See entrypoint in Dockerfile.
app.run(host="127.0.0.1", port=PORT, debug=True)
| 31.662921
| 86
| 0.683109
|
4a11378f4378e739a228a6b5adfb1fd57ae39f3c
| 1,580
|
py
|
Python
|
luta/text_finder.py
|
keremkoseoglu/luta
|
380b83ed9536dd69a573d5b7d34be63573a147fa
|
[
"MIT"
] | null | null | null |
luta/text_finder.py
|
keremkoseoglu/luta
|
380b83ed9536dd69a573d5b7d34be63573a147fa
|
[
"MIT"
] | 2
|
2022-03-13T06:40:56.000Z
|
2022-03-31T09:55:37.000Z
|
luta/text_finder.py
|
keremkoseoglu/luta
|
380b83ed9536dd69a573d5b7d34be63573a147fa
|
[
"MIT"
] | null | null | null |
""" Text finder module """
import re
class FoundText():
_UNCLEANS = ["\n"]
_ESCAPES = [{"from": "&", "to": "&"},
{"from": "<", "to": "<"},
{"from": ">", "to": ">"}]
""" Found text class """
def __init__(self, value: str = "", start: int = 0, end: int = 0, found = False):
self.value = value
self.start = start
self.end = end
self.found = found
self._clean_value = ""
self._clean_value_built = False
@property
def clean_value(self) -> str:
""" Cleans the value """
if not self._clean_value_built:
self._clean_value = self.value
cleanr = re.compile('<.*?>')
self._clean_value = re.sub(cleanr, '', self._clean_value)
for unclean in FoundText._UNCLEANS:
self._clean_value = self._clean_value.replace(unclean, "")
for escape in FoundText._ESCAPES:
self._clean_value = self._clean_value.replace(escape["from"], escape["to"])
self._clean_value = self._clean_value.strip()
self._clean_value_built = True
return self._clean_value
def find_between(doc: str, first: str, last: str) -> FoundText:
""" Finds text in doc """
result = FoundText()
try:
result.start = doc.index(first) + len(first)
result.end = doc.index(last, result.start)
result.value = doc[result.start:result.end]
result.found = True
except ValueError:
result = FoundText()
return result
| 31.6
| 91
| 0.551266
|
4a1137f2740c9aa0ad33c6947ee1133f26a81ad0
| 1,055
|
py
|
Python
|
wntr/tests/test_minor_loss.py
|
algchyhao/WNTR
|
dd4db188a8641a4da16cf80a1557c908fa48c17d
|
[
"BSD-3-Clause"
] | null | null | null |
wntr/tests/test_minor_loss.py
|
algchyhao/WNTR
|
dd4db188a8641a4da16cf80a1557c908fa48c17d
|
[
"BSD-3-Clause"
] | null | null | null |
wntr/tests/test_minor_loss.py
|
algchyhao/WNTR
|
dd4db188a8641a4da16cf80a1557c908fa48c17d
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
from nose import SkipTest
import wntr
class TestMinorLosses(unittest.TestCase):
@classmethod
def setUpClass(self):
pass
@classmethod
def tearDownClass(self):
pass
def test_pipe_minor_loss(self):
wn = wntr.network.WaterNetworkModel()
wn.options.time.duration = 3600 * 2
wn.add_reservoir(name='r1', base_head=20.0)
wn.add_junction(name='j1', base_demand=0.1)
wn.add_pipe(name='p1', start_node_name='r1', end_node_name='j1', minor_loss=100.0)
sim = wntr.sim.WNTRSimulator(wn, mode='DD')
results1 = sim.run_sim()
wn.write_inpfile('temp.inp', 'CMH')
raise SkipTest # EPANET seg faults (on Travis)
wn2 = wntr.network.WaterNetworkModel('temp.inp')
sim = wntr.sim.EpanetSimulator(wn2)
results2 = sim.run_sim()
head1 = results1.node['head'].loc[0, 'j1']
head2 = results2.node['head'].loc[0, 'j1']
head_diff = abs(head1-head2)
self.assertLess(head_diff, 0.01)
| 28.513514
| 90
| 0.621801
|
4a11395b341a127c81515040bf8a9225dfcfd84a
| 2,455
|
py
|
Python
|
load_reward_figure.py
|
ganggit/rlexp
|
d4600df8db260f6d79132e7dc5156d8b5a86d845
|
[
"MIT"
] | null | null | null |
load_reward_figure.py
|
ganggit/rlexp
|
d4600df8db260f6d79132e7dc5156d8b5a86d845
|
[
"MIT"
] | null | null | null |
load_reward_figure.py
|
ganggit/rlexp
|
d4600df8db260f6d79132e7dc5156d8b5a86d845
|
[
"MIT"
] | null | null | null |
import pickle
# libraries
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
makerList = ['o', '*', '+', 'D', 's', '-']
#labelList= ["greedy", "random", "e-greedy", "boltzmann", "bayesian"]
labelList = ["vallina actor-critic", "active actor-critic",
"e-greedy", "boltzmann", "bayesian"]
# load the original
with open('ac_orig_cartpole1.pickle', 'rb') as fp:
[a1, b1, lens1, rewards1] = pickle.load(fp)
with open('ac_active_cartpole.pickle', 'rb') as fp:
[a2, b2, lens2, rewards2] = pickle.load(fp)
smoothing_window = 10
# plot the how episode length change over time
fig1 = plt.figure(figsize=(10, 5))
i = 0
x = range(len(lens1))
plt.plot(x, lens1, marker=makerList[i],
label=labelList[i]) # plotting by columns
i = i + 1
x = range(len(lens1))
plt.plot(x, lens2, marker=makerList[i],
label=labelList[i]) # plotting by columns
plt.xlabel("Episode")
plt.ylabel("Episode Length")
plt.title("Episode Length over Time")
ax = plt.gca()
ax.legend(loc='best')
plt.show()
# plot the how episode reward change over time
fig2 = plt.figure(figsize=(10, 5))
i = 0
x = range(len(rewards1))
rewards_smoothed = pd.Series(rewards1).rolling(
smoothing_window, min_periods=smoothing_window).mean()
x = range(len(rewards_smoothed))
# plotting by columns
plt.plot(x, rewards_smoothed, marker=makerList[i], label=labelList[i])
i = i + 1
rewards_smoothed = pd.Series(rewards2).rolling(
smoothing_window, min_periods=smoothing_window).mean()
plt.plot(x, rewards_smoothed, marker=makerList[i], label=labelList[i])
plt.xlabel("Episode")
plt.ylabel("Episode Reward (Smoothed)")
plt.title("Episode Reward over Time (Smoothed over window size {})".format(
smoothing_window))
# Plot time steps and episode number
i = 0
x = range(len(lens1))
fig3 = plt.figure(figsize=(10, 5))
plt.plot(np.cumsum(lens1), np.arange(len(lens1)),
marker=makerList[i], label=labelList[i])
plt.xlabel("Time Steps")
plt.ylabel("Episode")
plt.title("Episode per time step")
ax = plt.gca()
ax.legend(loc='best')
plt.show()
# multiple line plot
'''
plt.plot( 'x', 'y1', data=df, marker='o', markerfacecolor='blue', markersize=12, color='skyblue', linewidth=4)
plt.plot( 'x', 'y2', data=df, marker='', color='olive', linewidth=2)
plt.plot( 'x', 'y3', data=df, marker='', color='olive', linewidth=2, linestyle='dashed', label="toto")
'''
| 30.308642
| 111
| 0.664766
|
4a113a90d3c77f2bfac116c336170bde2b874dba
| 107
|
py
|
Python
|
dbconfig.py
|
thenriq/data-representation-project
|
37bce3978be94bc8eec896c19e3ee3bdf90c8b35
|
[
"MIT"
] | 1
|
2021-12-27T22:11:41.000Z
|
2021-12-27T22:11:41.000Z
|
dbconfig.py
|
thenriq/data-representation-project
|
37bce3978be94bc8eec896c19e3ee3bdf90c8b35
|
[
"MIT"
] | null | null | null |
dbconfig.py
|
thenriq/data-representation-project
|
37bce3978be94bc8eec896c19e3ee3bdf90c8b35
|
[
"MIT"
] | null | null | null |
mysql={
'host':"localhost",
'user':'root',
'password':'',
'database':'datarepresentation'
}
| 17.833333
| 35
| 0.560748
|
4a113c2689e6e193e6d4ea431911caf493c421cd
| 2,859
|
py
|
Python
|
grasp_python/src/grasp.py
|
dandmetal/Grasp-Based-On-Lateral-Curvatures-and-Geometric-Primitives
|
d07f7110739435f1760694d96d8eae51681edd5d
|
[
"MIT"
] | null | null | null |
grasp_python/src/grasp.py
|
dandmetal/Grasp-Based-On-Lateral-Curvatures-and-Geometric-Primitives
|
d07f7110739435f1760694d96d8eae51681edd5d
|
[
"MIT"
] | null | null | null |
grasp_python/src/grasp.py
|
dandmetal/Grasp-Based-On-Lateral-Curvatures-and-Geometric-Primitives
|
d07f7110739435f1760694d96d8eae51681edd5d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
import pcl
import grasp_filters
import grasp_helper
from obj_parts import ObjParts
import operator
def grasp(cloud, g_h = 0.04, g_w = 0.14, n = 6):
"""
Estimate the best grasp and return the best grasp location.
Parameters:
-----------
cloud : pcl.PointCloud()
g_h: gripper height in meters
g_w: gripper width in meters
n: number of points to calculate curvature
Returns:
-----------
indice: The indice of the best region to grasp
cloud : pcl.PointCloud()
"""
parts = []
obj_parts= []
is_simple = False
indice = -1
#cloud = pcl.load('/home/daniel/pcd256/clamp.pcd')
#file_pre = "/home/daniel/python_tutorial/pcl/results/part"
#file_pos = ".pcd"
min = np.min(cloud, axis=0)
max = np.max(cloud, axis=0)
altura = max[1] - min[1]
largura = max[0] - min[0]
print("Height: ", altura)
print("Width: ", largura)
parts_temp = grasp_filters.crop_height(min,max,g_h,cloud)
size_parts_temp = len(parts_temp)
print("Number of parts: ", size_parts_temp)
for i in range(size_parts_temp):
cluster = grasp_filters.get_clusters(parts_temp[i])
for j in range(len(cluster)):
parts.append(cluster[j])
#parts = parts_temp
size_parts = len(parts)
print("Number of clusters: ", size_parts)
if size_parts == 0:
parts = parts_temp
if parts == 0:
print("Grasp in the center")
else:
for i in range(len(parts)):
print("Part Size: ", parts[i].size)
#file_save = file_pre + str(i) + file_pos
#print("Salvo como: ", file_save)
#pcl.save(parts[i], file_save)
min = np.min(parts[i], axis=0)
max = np.max(parts[i], axis=0)
altura = max[1] - min[1]
largura = max[0] - min[0]
N = parts[i].size/n
r = N*2
curv= grasp_filters.get_curvature_points(parts[i],r)
delta = grasp_filters.get_geometry(parts[i],N)
part = ObjParts(parts[i],altura,largura,curv, delta,i)
obj_parts.append(part)
obj_parts.sort(key=operator.attrgetter('curvatura'))
for i in range(len(obj_parts)):
print(obj_parts[i])
if grasp_helper.check_simple_geometry(obj_parts):
print("Grasp Object in the center")
is_simple = True
else:
indice = grasp_helper.get_grasp(obj_parts, g_w)
print("Best part to grasp: ", indice)
if indice == -2:
return -2,0
elif indice == -1:
return -1, cloud
else:
return indice, obj_parts[indice].cloud
#pcl.save(cloud, "/home/daniel/python_tutorial/pcl/results/cloud.pcd")
| 28.306931
| 74
| 0.583771
|
4a113c6bacb51f68108c17e9310b6ac6420cbea9
| 12,473
|
py
|
Python
|
glom/mutation.py
|
yomagroup/glom
|
d4e2f2c5469db5fb8f727d44b4742d5e143d2874
|
[
"BSD-3-Clause"
] | 1,470
|
2018-04-30T22:58:39.000Z
|
2022-03-30T13:05:44.000Z
|
glom/mutation.py
|
yomagroup/glom
|
d4e2f2c5469db5fb8f727d44b4742d5e143d2874
|
[
"BSD-3-Clause"
] | 230
|
2018-05-01T12:28:37.000Z
|
2022-02-10T06:47:16.000Z
|
glom/mutation.py
|
yomagroup/glom
|
d4e2f2c5469db5fb8f727d44b4742d5e143d2874
|
[
"BSD-3-Clause"
] | 56
|
2018-05-09T23:11:39.000Z
|
2022-02-12T01:36:31.000Z
|
"""By default, glom aims to safely return a transformed copy of your
data. But sometimes you really need to transform an existing object.
When you already have a large or complex bit of nested data that you
are sure you want to modify in-place, glom has you covered, with the
:func:`~glom.assign` function, and the :func:`~glom.Assign` specifier
type.
"""
import operator
from pprint import pprint
from .core import Path, T, S, Spec, glom, UnregisteredTarget, GlomError, PathAccessError, UP
from .core import TType, register_op, TargetRegistry, bbrepr, PathAssignError
try:
basestring
except NameError:
basestring = str
if getattr(__builtins__, '__dict__', None) is not None:
# pypy's __builtins__ is a module, as is CPython's REPL, but at
# normal execution time it's a dict?
__builtins__ = __builtins__.__dict__
class PathDeleteError(PathAssignError):
"""This :exc:`GlomError` subtype is raised when an assignment fails,
stemming from an :func:`~glom.delete` call or other
:class:`~glom.Delete` usage.
One example would be deleting an out-of-range position in a list::
>>> delete(["short", "list"], Path(5))
Traceback (most recent call last):
...
PathDeleteError: could not delete 5 on object at Path(), got error: IndexError(...
Other assignment failures could be due to deleting a read-only
``@property`` or exception being raised inside a ``__delattr__()``.
"""
def get_message(self):
return ('could not delete %r on object at %r, got error: %r'
% (self.dest_name, self.path, self.exc))
class Assign(object):
"""*New in glom 18.3.0*
The ``Assign`` specifier type enables glom to modify the target,
performing a "deep-set" to mirror glom's original deep-get use
case.
``Assign`` can be used to perform spot modifications of large data
structures when making a copy is not desired::
# deep assignment into a nested dictionary
>>> target = {'a': {}}
>>> spec = Assign('a.b', 'value')
>>> _ = glom(target, spec)
>>> pprint(target)
{'a': {'b': 'value'}}
The value to be assigned can also be a :class:`~glom.Spec`, which
is useful for copying values around within the data structure::
# copying one nested value to another
>>> _ = glom(target, Assign('a.c', Spec('a.b')))
>>> pprint(target)
{'a': {'b': 'value', 'c': 'value'}}
Another handy use of Assign is to deep-apply a function::
# sort a deep nested list
>>> target={'a':{'b':[3,1,2]}}
>>> _ = glom(target, Assign('a.b', Spec(('a.b',sorted))))
>>> pprint(target)
{'a': {'b': [1, 2, 3]}}
Like many other specifier types, ``Assign``'s destination path can be
a :data:`~glom.T` expression, for maximum control::
# changing the error message of an exception in an error list
>>> err = ValueError('initial message')
>>> target = {'errors': [err]}
>>> _ = glom(target, Assign(T['errors'][0].args, ('new message',)))
>>> str(err)
'new message'
``Assign`` has built-in support for assigning to attributes of
objects, keys of mappings (like dicts), and indexes of sequences
(like lists). Additional types can be registered through
:func:`~glom.register()` using the ``"assign"`` operation name.
Attempting to assign to an immutable structure, like a
:class:`tuple`, will result in a
:class:`~glom.PathAssignError`. Attempting to assign to a path
that doesn't exist will raise a :class:`~PathAccessError`.
To automatically backfill missing structures, you can pass a
callable to the *missing* argument. This callable will be called
for each path segment along the assignment which is not
present.
>>> target = {}
>>> assign(target, 'a.b.c', 'hi', missing=dict)
{'a': {'b': {'c': 'hi'}}}
"""
def __init__(self, path, val, missing=None):
# TODO: an option like require_preexisting or something to
# ensure that a value is mutated, not just added. Current
# workaround is to do a Check().
if isinstance(path, basestring):
path = Path.from_text(path)
elif type(path) is TType:
path = Path(path)
elif not isinstance(path, Path):
raise TypeError('path argument must be a .-delimited string, Path, T, or S')
try:
self.op, self.arg = path.items()[-1]
except IndexError:
raise ValueError('path must have at least one element')
self._orig_path = path
self.path = path[:-1]
if self.op not in '[.P':
# maybe if we add null-coalescing this should do something?
raise ValueError('last part of path must be setattr or setitem')
self.val = val
if missing is not None:
if not callable(missing):
raise TypeError('expected missing to be callable, not %r' % (missing,))
self.missing = missing
def glomit(self, target, scope):
if type(self.val) is Spec:
val = scope[glom](target, self.val, scope)
else:
val = self.val
op, arg, path = self.op, self.arg, self.path
if self.path.startswith(S):
dest_target = scope[UP]
dest_path = self.path.from_t()
else:
dest_target = target
dest_path = self.path
try:
dest = scope[glom](dest_target, dest_path, scope)
except PathAccessError as pae:
if not self.missing:
raise
remaining_path = self._orig_path[pae.part_idx + 1:]
val = scope[glom](self.missing(), Assign(remaining_path, val, missing=self.missing), scope)
op, arg = self._orig_path.items()[pae.part_idx]
path = self._orig_path[:pae.part_idx]
dest = scope[glom](dest_target, path, scope)
# TODO: forward-detect immutable dest?
if op == '[':
dest[arg] = val
elif op == '.':
setattr(dest, arg, val)
elif op == 'P':
_assign = scope[TargetRegistry].get_handler('assign', dest)
try:
_assign(dest, arg, val)
except Exception as e:
raise PathAssignError(e, path, arg)
return target
def __repr__(self):
cn = self.__class__.__name__
if self.missing is None:
return '%s(%r, %r)' % (cn, self._orig_path, self.val)
return '%s(%r, %r, missing=%s)' % (cn, self._orig_path, self.val, bbrepr(self.missing))
def assign(obj, path, val, missing=None):
"""*New in glom 18.3.0*
The ``assign()`` function provides convenient "deep set"
functionality, modifying nested data structures in-place::
>>> target = {'a': [{'b': 'c'}, {'d': None}]}
>>> _ = assign(target, 'a.1.d', 'e') # let's give 'd' a value of 'e'
>>> pprint(target)
{'a': [{'b': 'c'}, {'d': 'e'}]}
Missing structures can also be automatically created with the
*missing* parameter. For more information and examples, see the
:class:`~glom.Assign` specifier type, which this function wraps.
"""
return glom(obj, Assign(path, val, missing=missing))
_ALL_BUILTIN_TYPES = [v for v in __builtins__.values() if isinstance(v, type)]
_BUILTIN_BASE_TYPES = [v for v in _ALL_BUILTIN_TYPES
if not issubclass(v, tuple([t for t in _ALL_BUILTIN_TYPES
if t not in (v, type, object)]))]
_UNASSIGNABLE_BASE_TYPES = tuple(set(_BUILTIN_BASE_TYPES)
- set([dict, list, BaseException, object, type]))
def _set_sequence_item(target, idx, val):
target[int(idx)] = val
def _assign_autodiscover(type_obj):
# TODO: issubclass or "in"?
if issubclass(type_obj, _UNASSIGNABLE_BASE_TYPES):
return False
if callable(getattr(type_obj, '__setitem__', None)):
if callable(getattr(type_obj, 'index', None)):
return _set_sequence_item
return operator.setitem
return setattr
register_op('assign', auto_func=_assign_autodiscover, exact=False)
class Delete(object):
"""
In addition to glom's core "deep-get" and ``Assign``'s "deep-set",
the ``Delete`` specifier type performs a "deep-del", which can
remove items from larger data structures by key, attribute, and
index.
>>> target = {'dict': {'x': [5, 6, 7]}}
>>> glom(target, Delete('dict.x.1'))
{'dict': {'x': [5, 7]}}
>>> glom(target, Delete('dict.x'))
{'dict': {}}
If a target path is missing, a :exc:`PathDeleteError` will be
raised. To ignore missing targets, use the ``ignore_missing``
flag:
>>> glom(target, Delete('does_not_exist', ignore_missing=True))
{'dict': {}}
``Delete`` has built-in support for deleting attributes of
objects, keys of dicts, and indexes of sequences
(like lists). Additional types can be registered through
:func:`~glom.register()` using the ``"delete"`` operation name.
.. versionadded:: 20.5.0
"""
def __init__(self, path, ignore_missing=False):
if isinstance(path, basestring):
path = Path.from_text(path)
elif type(path) is TType:
path = Path(path)
elif not isinstance(path, Path):
raise TypeError('path argument must be a .-delimited string, Path, T, or S')
try:
self.op, self.arg = path.items()[-1]
except IndexError:
raise ValueError('path must have at least one element')
self._orig_path = path
self.path = path[:-1]
if self.op not in '[.P':
raise ValueError('last part of path must be an attribute or index')
self.ignore_missing = ignore_missing
def glomit(self, target, scope):
op, arg, path = self.op, self.arg, self.path
if self.path.startswith(S):
dest_target = scope[UP]
dest_path = self.path.from_t()
else:
dest_target = target
dest_path = self.path
try:
dest = scope[glom](dest_target, dest_path, scope)
except PathAccessError as pae:
if not self.ignore_missing:
raise
else:
if op == '[':
try:
del dest[arg]
except IndexError as e:
if not self.ignore_missing:
raise PathDeleteError(e, path, arg)
elif op == '.':
try:
delattr(dest, arg)
except AttributeError as e:
if not self.ignore_missing:
raise PathDeleteError(e, path, arg)
elif op == 'P':
_delete = scope[TargetRegistry].get_handler('delete', dest)
try:
_delete(dest, arg)
except Exception as e:
if not self.ignore_missing:
raise PathDeleteError(e, path, arg)
return target
def __repr__(self):
cn = self.__class__.__name__
return '%s(%r)' % (cn, self._orig_path)
def delete(obj, path, ignore_missing=False):
"""
The ``delete()`` function provides "deep del" functionality,
modifying nested data structures in-place::
>>> target = {'a': [{'b': 'c'}, {'d': None}]}
>>> delete(target, 'a.0.b')
{'a': [{}, {'d': None}]}
Attempting to delete missing keys, attributes, and indexes will
raise a :exc:`PathDeleteError`. To ignore these errors, use the
*ignore_missing* argument::
>>> delete(target, 'does_not_exist', ignore_missing=True)
{'a': [{}, {'d': None}]}
For more information and examples, see the :class:`~glom.Delete`
specifier type, which this convenience function wraps.
.. versionadded:: 20.5.0
"""
return glom(obj, Delete(path, ignore_missing=ignore_missing))
def _del_sequence_item(target, idx):
del target[int(idx)]
def _delete_autodiscover(type_obj):
if issubclass(type_obj, _UNASSIGNABLE_BASE_TYPES):
return False
if callable(getattr(type_obj, '__delitem__', None)):
if callable(getattr(type_obj, 'index', None)):
return _del_sequence_item
return operator.delitem
return delattr
register_op('delete', auto_func=_delete_autodiscover, exact=False)
| 34.455801
| 103
| 0.598974
|
4a113daf13e31932187c94eff9ba76b5d56969f1
| 5,920
|
py
|
Python
|
weibo/settings.py
|
YiChengCai1999/SinaWeiboCrawler
|
0f65c6c96926ee0a599b7833a95ef2cc77420683
|
[
"Apache-2.0"
] | null | null | null |
weibo/settings.py
|
YiChengCai1999/SinaWeiboCrawler
|
0f65c6c96926ee0a599b7833a95ef2cc77420683
|
[
"Apache-2.0"
] | null | null | null |
weibo/settings.py
|
YiChengCai1999/SinaWeiboCrawler
|
0f65c6c96926ee0a599b7833a95ef2cc77420683
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Scrapy settings for weibo project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'weibo'
SPIDER_MODULES = ['weibo.spiders']
NEWSPIDER_MODULE = 'weibo.spiders'
COOKIES_ENABLED = False
TELNETCONSOLE_ENABLED = False
LOG_LEVEL = "INFO" # "INFO"
LOG_FILE = "weibo.log"
LOG_ENABLED = False
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 32 # search_com带cookie: 12, 不带cookie: 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# 访问完一个页面再访问下一个时需要等待的时间,默认为10秒
# DOWNLOAD_DELAY = 10
DOWNLOAD_DELAY = 0 # 0.5 关键能瓶颈之一!!但是却是稳定性的保证
DOWNLOAD_TIMEOUT = 3 # 5s仍会出现TimeOut
RETRY_TIMES = 1 # 默认是3
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 4 # 16
CONCURRENT_REQUESTS_PER_IP = 4 # 16
import random
# Configure request header
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-US;q=0.7',
# 'cookie': 'your cookie'
}
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'weibo (+http://www.yourdomain.com)'
USER_AGENT_LIST = [
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36",
"Dalvik/1.6.0 (Linux; U; Android 4.2.1; 2013022 MIUI/JHACNBL30.0)",
"Mozilla/5.0 (Linux; U; Android 4.4.2; zh-cn; HUAWEI MT7-TL00 Build/HuaweiMT7-TL00) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"AndroidDownloadManager",
"Apache-HttpClient/UNAVAILABLE (java 1.4)",
"Dalvik/1.6.0 (Linux; U; Android 4.3; SM-N7508V Build/JLS36C)",
"Android50-AndroidPhone-8000-76-0-Statistics-wifi",
"Dalvik/1.6.0 (Linux; U; Android 4.4.4; MI 3 MIUI/V7.2.1.0.KXCCNDA)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.2; Lenovo A3800-d Build/LenovoA3800-d)",
"Lite 1.0 ( http://litesuits.com )",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727)",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122 Safari/537.36 SE 2.X MetaSr 1.0",
"Mozilla/5.0 (Linux; U; Android 4.1.1; zh-cn; HTC T528t Build/JRO03H) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30; 360browser(securitypay,securityinstalled); 360(android,uppayplugin); 360 Aphone Browser (2.0.4)",
]
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
# 禁用Scrapy自带的代理中间件与UA中间件,启用用户自定义的中间件
'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware': None,
'scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware': None,
'scrapy.contrib.downloadermiddleware.retry.RetryMiddleware': None,
'weibo.middlewares.UAMiddleware': 543,
'weibo.middlewares.ProxyMiddleware': 544,
# 'weibo.middlewares.CookieMiddleware': 545,
'weibo.middlewares.MyRetryMiddleware': 546,
}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'weibo.pipelines.DuplicatesPipeline': 300,
# 'weibo.pipelines.CsvPipeline': 301,
# 'weibo.pipelines.MysqlPipeline': 302,
'weibo.pipelines.MongoPipeline': 303,
# 'weibo.pipelines.MyImagesPipeline': 304,
# 'weibo.pipelines.MyVideoPipeline': 305
}
# @@@@@@@@@@@@@@@@@@@@@@@ 爬虫数据库配置 @@@@@@@@@@@@@@@@@@@@@@@@@@#
# 配置MongoDB数据库(全局)
MONGO_URI = 'YOUR_IP:YOUR_PORT'
MONGO_DATABASE = 'YOUR_DBNAME'
MONGO_USER = 'YOUR_USERNAME'
MONGO_PASSWORD = 'YOUR_PASSWORD'
# 配置MongoDB数据库(spiders)
MONGO_CNAME = {
# 根据数据库中哪个集合来生成爬取任务
'M_TASK': {
'USER': 'm_user_state',
'TWEET': 'm_task_state',
'SUPERTOPIC': 'm_topic_state',
'LONGTWEET': 'm_longtext_state'
}
}
# @@@@@@@@@@@@@@@@@@@@@@@ COM站爬虫配置 @@@@@@@@@@@@@@@@@@@@@@@@@@#
# 要搜索的关键词列表,可写多个, 值可以是由关键词或话题组成的列表,也可以是包含关键词的txt文件路径,
# 如'keyword_list.txt',txt文件中每个关键词占一行
# KEYWORD_LIST = ['抑郁 想死'] # 或者 KEYWORD_LIST = 'keyword_list.txt' #
# KEYWORD_LIST = ['抑郁 一无是处', '度洛西汀', '抑郁 生无可恋', '文拉法辛', '抑郁 舍曲林', '抑郁 没意思', '抑郁 难熬', '抑郁 自残', '抑郁 吃药', '抑郁 想哭', '抑郁 想死']
KEYWORD_LIST = ['#日常#']
# 要搜索的微博类型,0代表搜索全部微博,1代表搜索全部原创微博,2代表热门微博,3代表关注人微博,4代表认证用户微博,5代表媒体微博,6代表观点微博
WEIBO_TYPE = 1
# 筛选结果微博中必需包含的内容,0代表不筛选,获取全部微博,1代表搜索包含图片的微博,2代表包含视频的微博,3代表包含音乐的微博,4代表包含短链接的微博
CONTAIN_TYPE = 0
# 筛选微博的发布地区,精确到省或直辖市,值不应包含“省”或“市”等字,如想筛选北京市的微博请用“北京”而不是“北京市”,想要筛选安徽省的微博请用“安徽”而不是“安徽省”,可以写多个地区,
# 具体支持的地名见region.py文件,注意只支持省或直辖市的名字,省下面的市名及直辖市下面的区县名不支持,不筛选请用”全部“
REGION = ['全部']
# 搜索的起始日期,为yyyy-mm-dd形式,搜索结果包含该日期
# START_DATE = '2018-01-01'
START_DATE = '2014-01-01'
# 搜索的终止日期,为yyyy-mm-dd形式,搜索结果包含该日期
END_DATE = '2020-12-31'
# END_DATE = '2017-12-31'
# 搜索的时间段,0-24,搜索结果包含该时间,注释掉下面两条就是全天模式
# START_TIME = '0'
# END_TIME = '6'
# 进一步细分搜索的阈值,若结果页数大于等于该值,则认为结果没有完全展示,细分搜索条件重新搜索以获取更多微博。数值越大速度越快,也越有可能漏掉微博;数值越小速度越慢,获取的微博就越多。
# 建议数值大小设置在40到50之间。
FURTHER_THRESHOLD = 46
# 图片文件存储路径
IMAGES_STORE = './'
# 视频文件存储路径
FILES_STORE = './'
# 其他配置
USER_TYPE = 0 # 指定爬取的用户类型,对于二分类模型来说,1表示正例(抑郁),0表示负例
META_ONLY = True # 是否只抓取元数据
# @@@@@@@@@@@@@@@@@@@@@@@ M站爬虫配置 @@@@@@@@@@@@@@@@@@@@@@@@@@#
# FILTER_RETWEET = False
MIN_WEIBO_CNT = 50
MAX_WEIBO_CNT = 500
# m_tweet配置
TASK_NUM_PER_ROUND = 1000 # 批量喂入任务,其实不用。。
# m_supertopic配置
TOPIC_ID = 'c86edb545da5818f5aad83caea7d75c1' # 直接从container_id拷贝,前缀100808
TOPIC_NAME = '搞笑'
ORIENT_TYPE = 0
| 41.985816
| 244
| 0.710642
|
4a113db1cef94cd177808f09f4048cc170999085
| 760
|
bzl
|
Python
|
third_party/tf_runtime/workspace.bzl
|
triper1022/tensorflow
|
18c576ac70b7995e0390db80e067cd9b0305e984
|
[
"Apache-2.0"
] | 1
|
2021-06-04T04:02:31.000Z
|
2021-06-04T04:02:31.000Z
|
third_party/tf_runtime/workspace.bzl
|
triper1022/tensorflow
|
18c576ac70b7995e0390db80e067cd9b0305e984
|
[
"Apache-2.0"
] | null | null | null |
third_party/tf_runtime/workspace.bzl
|
triper1022/tensorflow
|
18c576ac70b7995e0390db80e067cd9b0305e984
|
[
"Apache-2.0"
] | null | null | null |
"""Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "ef99b130971cbbc43b8201d3545e9198c1be5ae0"
TFRT_SHA256 = "c8db5cd07d49f8f7fd60ac3bcf8cb29a7922858a6114faa4b040f5c38e24159e"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
"https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
],
)
| 36.190476
| 126
| 0.680263
|
4a11401d3d190472c0f0006fb77a1f01bf01a010
| 1,484
|
py
|
Python
|
batch_rl/fixed_replay/agents/incrementalwrbetting.py
|
pmineiro/batch_rl
|
21ae1411107616bdcdd4f785075d75392623a9e4
|
[
"Apache-2.0"
] | null | null | null |
batch_rl/fixed_replay/agents/incrementalwrbetting.py
|
pmineiro/batch_rl
|
21ae1411107616bdcdd4f785075d75392623a9e4
|
[
"Apache-2.0"
] | null | null | null |
batch_rl/fixed_replay/agents/incrementalwrbetting.py
|
pmineiro/batch_rl
|
21ae1411107616bdcdd4f785075d75392623a9e4
|
[
"Apache-2.0"
] | null | null | null |
class IncrementalWRBetting:
def __init__(self, decay, taumax=0.99):
from math import floor, log
assert 0 < decay <= 1
self.decay = decay
self.n = 0
self.sumwr = 0
self.sumwrsq = 0
assert 0 <= taumax < 1
self.taumax = taumax
def tfhook(self, gamma, w, r):
import numpy as np
q = []
v = self.findv()
tau = self.bet(v)
for gn, wn, rn in zip(gamma, w, r):
wr = (gn * wn).dot(rn) / max(1, len(wn))
q.append(1.0 / (1.0 + tau * (wr - v)))
self.observe(tau, wr)
return np.array(q).astype(np.single)
def observe(self, tau, wr):
from math import log1p
self.n *= self.decay
self.n += 1
self.sumwr *= self.decay
self.sumwr += wr
self.sumwrsq *= self.decay
self.sumwrsq += wr**2
def bet(self, v):
mean = self.sumwr - self.n * v
assert mean >= 0, mean
var = self.sumwrsq - 2 * v * self.sumwr + self.n * v**2
assert var >= 0, var
tauub = min(self.taumax, 0.5 / v if v > 0.5 else 1)
return min(tauub, max(0, mean / (mean + var) if var > 0 else 1 if mean >= 0 else 0))
def findv(self):
from math import sqrt
meanwr = self.sumwr / max(1, self.n)
varwr = sqrt(max(0, self.sumwrsq / max(1, self.n) - meanwr**2))
vadj = max(0, meanwr - 3 * varwr / sqrt(max(1, self.n)))
return vadj
| 28
| 92
| 0.506065
|
4a1141543e20ea44282b9af00f47d9f7c644bdfa
| 337
|
py
|
Python
|
APICanalLuciernaga/programacion/migrations/0002_remove_horaprogramacion_categoria_p.py
|
ErickMurillo/canal-luciernaga
|
3839b2bf03aea4f5336e0b2899845dc6ec38ee04
|
[
"MIT"
] | null | null | null |
APICanalLuciernaga/programacion/migrations/0002_remove_horaprogramacion_categoria_p.py
|
ErickMurillo/canal-luciernaga
|
3839b2bf03aea4f5336e0b2899845dc6ec38ee04
|
[
"MIT"
] | null | null | null |
APICanalLuciernaga/programacion/migrations/0002_remove_horaprogramacion_categoria_p.py
|
ErickMurillo/canal-luciernaga
|
3839b2bf03aea4f5336e0b2899845dc6ec38ee04
|
[
"MIT"
] | 1
|
2019-09-09T22:29:07.000Z
|
2019-09-09T22:29:07.000Z
|
# Generated by Django 2.2.3 on 2019-10-22 19:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('programacion', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='horaprogramacion',
name='categoria_p',
),
]
| 18.722222
| 47
| 0.602374
|
4a11417b2f71f8981ffa465f27360923690a18f8
| 7,740
|
py
|
Python
|
qa/rpc-tests/abandonconflict.py
|
Patrick-W-McMahon/MagMellDollar
|
cb5a139e3a1d8f3196d7f1d25321d9839b51295b
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/abandonconflict.py
|
Patrick-W-McMahon/MagMellDollar
|
cb5a139e3a1d8f3196d7f1d25321d9839b51295b
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/abandonconflict.py
|
Patrick-W-McMahon/MagMellDollar
|
cb5a139e3a1d8f3196d7f1d25321d9839b51295b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Magmelldollar Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import MagmelldollarTestFramework
from test_framework.util import *
import urllib.parse
class AbandonConflictTest(MagmelldollarTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = False
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.001"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug","-logtimemicros"]))
connect_nodes(self.nodes[0], 1)
def run_test(self):
self.nodes[1].generate(100)
sync_blocks(self.nodes)
balance = self.nodes[0].getbalance()
txA = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txB = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txC = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
sync_mempools(self.nodes)
self.nodes[1].generate(1)
sync_blocks(self.nodes)
newbalance = self.nodes[0].getbalance()
assert(balance - newbalance < Decimal("0.1")) #no more than fees lost
balance = newbalance
url = urllib.parse.urlparse(self.nodes[1].url)
self.nodes[0].disconnectnode(url.hostname+":"+str(p2p_port(1)))
# Identify the 10btc outputs
nA = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txA, 1)["vout"]) if vout["value"] == Decimal("10"))
nB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txB, 1)["vout"]) if vout["value"] == Decimal("10"))
nC = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txC, 1)["vout"]) if vout["value"] == Decimal("10"))
inputs =[]
# spend 10btc outputs from txA and txB
inputs.append({"txid":txA, "vout":nA})
inputs.append({"txid":txB, "vout":nB})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("14.998")
outputs[self.nodes[1].getnewaddress()] = Decimal("5")
signed = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs))
txAB1 = self.nodes[0].sendrawtransaction(signed["hex"])
# Identify the 14.99998btc output
nAB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txAB1, 1)["vout"]) if vout["value"] == Decimal("14.998"))
#Create a child tx spending AB1 and C
inputs = []
inputs.append({"txid":txAB1, "vout":nAB})
inputs.append({"txid":txC, "vout":nC})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("24.96")
signed2 = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs))
txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"])
# In mempool txs from self should increase balance from change
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("30") + Decimal("24.96"))
balance = newbalance
# Restart the node with a higher min relay fee so the parent tx is no longer in mempool
# TODO: redo with eviction
# Note had to make sure tx did not have AllowFree priority
stop_node(self.nodes[0],0)
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.01"])
# Verify txs no longer in mempool
assert_equal(len(self.nodes[0].getrawmempool()), 0)
# Not in mempool txs from self should only reduce balance
# inputs are still spent, but change not received
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("24.96"))
# Unconfirmed received funds that are not in mempool, also shouldn't show
# up in unconfirmed balance
unconfbalance = self.nodes[0].getunconfirmedbalance() + self.nodes[0].getbalance()
assert_equal(unconfbalance, newbalance)
# Also shouldn't show up in listunspent
assert(not txABC2 in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)])
balance = newbalance
# Abandon original transaction and verify inputs are available again
# including that the child tx was also abandoned
self.nodes[0].abandontransaction(txAB1)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("30"))
balance = newbalance
# Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned
stop_node(self.nodes[0],0)
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.001"])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(self.nodes[0].getbalance(), balance)
# But if its received again then it is unabandoned
# And since now in mempool, the change is available
# But its child tx remains abandoned
self.nodes[0].sendrawtransaction(signed["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("20") + Decimal("14.998"))
balance = newbalance
# Send child tx again so its unabandoned
self.nodes[0].sendrawtransaction(signed2["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("10") - Decimal("14.998") + Decimal("24.96"))
balance = newbalance
# Remove using high relay fee again
stop_node(self.nodes[0],0)
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.01"])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("24.96"))
balance = newbalance
# Create a double spend of AB1 by spending again from only A's 10 output
# Mine double spend from node 1
inputs =[]
inputs.append({"txid":txA, "vout":nA})
outputs = {}
outputs[self.nodes[1].getnewaddress()] = Decimal("9.99")
tx = self.nodes[0].createrawtransaction(inputs, outputs)
signed = self.nodes[0].signrawtransaction(tx)
self.nodes[1].sendrawtransaction(signed["hex"])
self.nodes[1].generate(1)
connect_nodes(self.nodes[0], 1)
sync_blocks(self.nodes)
# Verify that B and C's 10 MMD outputs are available for spending again because AB1 is now conflicted
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("20"))
balance = newbalance
# There is currently a minor bug around this and so this test doesn't work. See Issue #7315
# Invalidate the block with the double spend and B's 10 MMD output should no longer be available
# Don't think C's should either
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
newbalance = self.nodes[0].getbalance()
#assert_equal(newbalance, balance - Decimal("10"))
print("If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer")
print("conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315")
print(str(balance) + " -> " + str(newbalance) + " ?")
if __name__ == '__main__':
AbandonConflictTest().main()
| 48.074534
| 135
| 0.653101
|
4a11419472acfdfe69dbbb4d01827d914943fe4a
| 3,036
|
py
|
Python
|
isi_sdk_8_2_0/isi_sdk_8_2_0/models/nfs_check_extended.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_8_2_0/isi_sdk_8_2_0/models/nfs_check_extended.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_8_2_0/isi_sdk_8_2_0/models/nfs_check_extended.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 7
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_8_2_0.models.nfs_check import NfsCheck # noqa: F401,E501
class NfsCheckExtended(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'checks': 'list[NfsCheck]'
}
attribute_map = {
'checks': 'checks'
}
def __init__(self, checks=None): # noqa: E501
"""NfsCheckExtended - a model defined in Swagger""" # noqa: E501
self._checks = None
self.discriminator = None
if checks is not None:
self.checks = checks
@property
def checks(self):
"""Gets the checks of this NfsCheckExtended. # noqa: E501
:return: The checks of this NfsCheckExtended. # noqa: E501
:rtype: list[NfsCheck]
"""
return self._checks
@checks.setter
def checks(self, checks):
"""Sets the checks of this NfsCheckExtended.
:param checks: The checks of this NfsCheckExtended. # noqa: E501
:type: list[NfsCheck]
"""
self._checks = checks
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NfsCheckExtended):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.4
| 80
| 0.561924
|
4a1141e093de87b828047b78f1def262d361d6b1
| 1,029
|
py
|
Python
|
ug_term/ugopen.py
|
peteigel/ug-term
|
8394ec8364131c88ed4194ba473d34d62794399d
|
[
"MIT"
] | null | null | null |
ug_term/ugopen.py
|
peteigel/ug-term
|
8394ec8364131c88ed4194ba473d34d62794399d
|
[
"MIT"
] | null | null | null |
ug_term/ugopen.py
|
peteigel/ug-term
|
8394ec8364131c88ed4194ba473d34d62794399d
|
[
"MIT"
] | null | null | null |
import requests
import re
import sys
from . import app_parser
class UGTabData:
def __init__(self, data=None):
self.content = None
if data is not None:
self.parse(data)
def parse(self, data):
try:
self.content = data['data']['tab_view']['wiki_tab']['content']
except KeyError:
pass
def print(self, bold_chords):
open_chord_re = r'\[ch\]'
close_chord_re = r'\[/ch\]'
open_chord_rpl = ''
close_chord_rpl = ''
if bold_chords:
open_chord_rpl = '\033[1m'
close_chord_rpl = '\033[0m'
clean_str = self.content
clean_str = re.sub(open_chord_re, open_chord_rpl, clean_str)
clean_str = re.sub(close_chord_re, close_chord_rpl, clean_str)
sys.stdout.write(clean_str)
return clean_str
def open(url: str):
resp = requests.get(url)
parser = app_parser.UGAppParser()
parser.feed(resp.text)
return UGTabData(parser.app_data)
| 23.930233
| 74
| 0.594752
|
4a11420c8d81adc257958818db7d5c3668785cab
| 807
|
py
|
Python
|
examples/dot3k/basic/ipaddr.py
|
axelsimon/displayotron
|
42315c47eb50d2f95d1fcade389ff57ad73e1b83
|
[
"MIT"
] | 49
|
2017-06-07T05:09:23.000Z
|
2021-10-08T14:32:05.000Z
|
examples/dot3k/basic/ipaddr.py
|
axelsimon/displayotron
|
42315c47eb50d2f95d1fcade389ff57ad73e1b83
|
[
"MIT"
] | 19
|
2017-08-07T21:17:00.000Z
|
2022-01-07T09:02:42.000Z
|
examples/dot3k/basic/ipaddr.py
|
axelsimon/displayotron
|
42315c47eb50d2f95d1fcade389ff57ad73e1b83
|
[
"MIT"
] | 22
|
2017-06-07T05:09:25.000Z
|
2021-08-17T10:52:58.000Z
|
#!/usr/bin/env python
import fcntl
import socket
import struct
import dot3k.lcd as lcd
def get_addr(ifname):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15].encode('utf-8'))
)[20:24])
except IOError:
return 'Not Found!'
wlan0 = get_addr('wlan0')
eth0 = get_addr('eth0')
host = socket.gethostname()
lcd.clear()
lcd.set_cursor_position(0,0)
lcd.write('{}'.format(host))
lcd.set_cursor_position(0,1)
if eth0 != 'Not Found!':
lcd.write(eth0)
else:
lcd.write('eth0 {}'.format(eth0))
lcd.set_cursor_position(0,2)
if wlan0 != 'Not Found!':
lcd.write(wlan0)
else:
lcd.write('wlan0 {}'.format(wlan0))
| 19.682927
| 60
| 0.63197
|
4a11420e81fa7a330540cc4b9656aa8eec2af167
| 1,801
|
py
|
Python
|
tests/conftest.py
|
pbdtools/xfds
|
37c527c4ee5b1f6e2c0b9faa2e403fd49b1cc379
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
pbdtools/xfds
|
37c527c4ee5b1f6e2c0b9faa2e403fd49b1cc379
|
[
"MIT"
] | 16
|
2022-02-20T18:56:35.000Z
|
2022-03-02T17:35:27.000Z
|
tests/conftest.py
|
pbdtools/xfds
|
37c527c4ee5b1f6e2c0b9faa2e403fd49b1cc379
|
[
"MIT"
] | null | null | null |
"""Tests for command line interface."""
from pathlib import Path
from typing import Generator
import pytest
from xfds import core
@pytest.fixture
def xfds_datadir() -> Path:
"""Return the path to the xfds data directory."""
return Path(__file__).parent / "data"
@pytest.fixture
def latest() -> str:
"""Fixture to point to FDS version."""
return "6.7.7"
@pytest.fixture
def fds_file(xfds_datadir: Path) -> Path:
"""Fixture to point to FDS file."""
return xfds_datadir / "fds" / "test.fds"
@pytest.fixture
def stop_file(fds_file: Path) -> Generator[Path, None, None]:
"""Create a stop file."""
_stop_file = fds_file.with_suffix(".stop")
if _stop_file.exists():
_stop_file.unlink()
assert not _stop_file.exists()
yield _stop_file
if _stop_file.exists():
_stop_file.unlink()
@pytest.fixture
def fds_dir(fds_file: Path) -> Path:
"""Fixture to point to FDS directory."""
return fds_file.parent.resolve()
@pytest.fixture
def empty_dir(xfds_datadir: Path) -> Path:
"""Fixture to point to empty directory."""
return xfds_datadir / "no_fds"
@pytest.fixture
def meta_dir(xfds_datadir: Path) -> Path:
"""Fixture to point to empty directory."""
return xfds_datadir / "from_metadata"
@pytest.fixture
def default_cmd_kwargs(fds_file: Path) -> dict:
"""Build the command line arguments for the CLI."""
_interactive = False
_version = "latest"
_volume = core.volume_to_mount(fds_file=fds_file)
_container = core.container_name(
fds_file=fds_file, version=_version, interactive=_interactive
)
return dict(
fds_file=fds_file,
volume=_volume,
interactive=_interactive,
version=_version,
container=_container,
processors=1,
)
| 23.38961
| 69
| 0.674625
|
4a11424247cd4f3cc156c9f16c2ee2cc74536fa5
| 1,450
|
py
|
Python
|
superset/db_engine_specs/dremio.py
|
AmritaTech/superset
|
c685c9ea8fa70ba6646617d0a272c11e1130081c
|
[
"Apache-2.0"
] | 44
|
2021-04-14T10:53:36.000Z
|
2021-09-11T00:29:50.000Z
|
superset/db_engine_specs/dremio.py
|
AmritaTech/superset
|
c685c9ea8fa70ba6646617d0a272c11e1130081c
|
[
"Apache-2.0"
] | 77
|
2020-02-02T07:54:13.000Z
|
2022-03-23T18:22:04.000Z
|
superset/db_engine_specs/dremio.py
|
AmritaTech/superset
|
c685c9ea8fa70ba6646617d0a272c11e1130081c
|
[
"Apache-2.0"
] | 11
|
2021-06-09T08:30:57.000Z
|
2021-11-30T03:16:14.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from superset.db_engine_specs.base import BaseEngineSpec
class DremioBaseEngineSpec(BaseEngineSpec):
engine = "dremio"
engine_name = "Dremio"
_time_grain_expressions = {
None: "{col}",
"PT1S": "DATE_TRUNC('second', {col})",
"PT1M": "DATE_TRUNC('minute', {col})",
"PT1H": "DATE_TRUNC('hour', {col})",
"P1D": "DATE_TRUNC('day', {col})",
"P1W": "DATE_TRUNC('week', {col})",
"P1M": "DATE_TRUNC('month', {col})",
"P0.25Y": "DATE_TRUNC('quarter', {col})",
"P1Y": "DATE_TRUNC('year', {col})",
}
@classmethod
def epoch_to_dttm(cls) -> str:
return "TO_DATE({col})"
| 36.25
| 62
| 0.673793
|
4a11429bfb96fc6b65cb25754bf7a1a17666361a
| 17,003
|
py
|
Python
|
systemrdl/compiler.py
|
jasonpjacobs/systemrdl-compiler
|
e3fdaf53b6c605a24d6e1149817f3636a85aed09
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
systemrdl/compiler.py
|
jasonpjacobs/systemrdl-compiler
|
e3fdaf53b6c605a24d6e1149817f3636a85aed09
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
systemrdl/compiler.py
|
jasonpjacobs/systemrdl-compiler
|
e3fdaf53b6c605a24d6e1149817f3636a85aed09
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
from typing import Set, Type, Any, List, Dict, Optional, Iterable
from antlr4 import InputStream
from . import messages
from . import warnings
from .parser import sa_systemrdl
from .core.ComponentVisitor import RootVisitor
from .core.ExprVisitor import ExprVisitor
from .core.properties import PropertyRuleBook, BuiltinUserProperty
from .core.namespace import NamespaceRegistry
from .core.elaborate import ElabExpressionsListener, PrePlacementValidateListener, LateElabListener
from .core.elaborate import StructuralPlacementListener
from .core.validate import ValidateListener
from . import ast
from . import component as comp
from . import walker
from .node import RootNode
from . import preprocessor
from . import rdltypes
class FileInfo:
def __init__(self, preprocessed_text: str, included_files: Iterable[str]) -> None:
self._pp_text = preprocessed_text
self._incl_files = included_files
@property
def preprocessed_text(self) -> str:
"""
Resolved text after Perl and Verilog preprocessing
"""
return self._pp_text
@property
def included_files(self) -> Iterable[str]:
"""
Iterable of paths that were included while preprocessing this file.
"""
return self._incl_files
class RDLCompiler:
def __init__(self, **kwargs: Any):
"""
RDLCompiler constructor.
Parameters
----------
message_printer: :class:`~systemrdl.messages.MessagePrinter`
Override the default message printer
warning_flags: int
Flags to enable warnings. See :ref:`messages_warnings` for more details.
error_flags: int
Same as ``warning_flags`` but promote them to errors instead.
dedent_desc: bool
Automatically remove any common indentation from multi-line
``desc`` properties.
Set to True by default.
extended_dpa_type_names: bool
Enable extended type name generation that accounts for dynamic
property assignments augmenting the type.
Set to True by default.
See :ref:`dpa_type_generation` for more details.
perl_safe_opcodes: list
Perl preprocessor commands are executed within a
`Perl Safe <https://perldoc.perl.org/Safe.html>`_ compartment to
prevent malicious code execution.
The default set of `Perl opcodes <https://perldoc.perl.org/Opcode.html#Predefined-Opcode-Tags>`_
allowed should be sufficient for most applications, however this
option is exposed in the rare case it is necessary to override the
opcode list in order to make an exception.
Default value::
[
':base_core', ':base_mem', ':base_loop', ':base_orig', ':base_math',
':base_thread', ':filesys_read', ':sys_db', ':load',
'sort', 'tied', 'pack', 'unpack', 'reset'
]
.. versionchanged:: 1.8
Added ``dedent_desc`` option.
.. versionchanged:: 1.9
Added ``extended_dpa_type_names`` option.
.. versionchanged:: 1.10
Added ``perl_safe_opcodes`` option.
"""
self.env = RDLEnvironment(kwargs)
# Check for stray kwargs
if kwargs:
raise TypeError("got an unexpected keyword argument '%s'" % list(kwargs.keys())[0])
#: Reference to the compiler's :class:`~systemrdl.messages.MessageHandler` object
self.msg = self.env.msg
self.namespace = NamespaceRegistry(self.env) # type: NamespaceRegistry
self.visitor = RootVisitor(self)
self.root = self.visitor.component # type: comp.Root # type: ignore
def define_udp(self, name, valid_type, valid_components=None, default=None):
# type: (str, List[Any], Optional[Set[Type[comp.Component]]], Any) -> None
"""
Pre-define a user-defined property.
This is the equivalent to the following RDL:
.. code-block:: none
property <name> {
type = <valid_type>;
component = <valid_components>;
default = <default>
};
Parameters
----------
name: str
Property name
valid_components: set
Set of :class:`~systemrdl.component.Component` types the UDP can be bound to.
If None, then UDP can be bound to all components.
valid_type: type
Assignment type that this UDP will enforce
default:
Default if a value is not specified when the UDP is bound to a component.
Value must be compatible with ``valid_type``
"""
if valid_components is None:
valid_components = {
comp.Field,
comp.Reg,
comp.Regfile,
comp.Addrmap,
comp.Mem,
comp.Signal,
#TODO constraint,
}
if name in self.env.property_rules.rdl_properties:
raise ValueError("name '%s' conflicts with existing built-in RDL property")
udp = BuiltinUserProperty(self.env, name, valid_components, (valid_type,), default)
self.env.property_rules.user_properties[udp.name] = udp
def list_udps(self) -> List[str]:
"""
List all user-defined properties encountered by the compiler.
.. versionadded:: 1.12
"""
return list(self.env.property_rules.user_properties.keys())
def preprocess_file(self, path: str, incl_search_paths: Optional[List[str]]=None) -> FileInfo:
"""
Preprocess a single file without compiling it.
Parameters
----------
path:str
Path to an RDL source file
incl_search_paths:list
List of additional paths to search to resolve includes.
If unset, defaults to an empty list.
Relative include paths are resolved in the following order:
1. Search each path specified in ``incl_search_paths``.
2. Path relative to the source file performing the include.
Raises
------
RDLCompileError
If any fatal preprocessing error is encountered.
Returns
-------
:class:`FileInfo`
File info object
.. versionadded:: 1.20
"""
if incl_search_paths is None:
incl_search_paths = []
input_stream, included_files = preprocessor.preprocess_file(self.env, path, incl_search_paths)
return FileInfo(input_stream.strdata, included_files)
def compile_file(self, path: str, incl_search_paths: Optional[List[str]]=None) -> FileInfo:
"""
Parse & compile a single file and append it to RDLCompiler's root
namespace.
If any exceptions (:class:`~systemrdl.RDLCompileError` or other)
occur during compilation, then the RDLCompiler object should be discarded.
Parameters
----------
path:str
Path to an RDL source file
incl_search_paths:list
List of additional paths to search to resolve includes.
If unset, defaults to an empty list.
Relative include paths are resolved in the following order:
1. Search each path specified in ``incl_search_paths``.
2. Path relative to the source file performing the include.
Raises
------
RDLCompileError
If any fatal compile error is encountered.
Returns
-------
:class:`FileInfo`
File info object
.. versionchanged:: 1.20
Returns a :class:`FileInfo` object instead of ``None``
"""
if incl_search_paths is None:
incl_search_paths = []
input_stream, included_files = preprocessor.preprocess_file(self.env, path, incl_search_paths)
# Run Antlr parser on input
parsed_tree = sa_systemrdl.parse(
input_stream,
"root",
messages.RdlSaErrorListener(self.msg)
)
if self.msg.had_error:
self.msg.fatal("Parse aborted due to previous errors")
# Traverse parse tree with RootVisitor
self.visitor.visit(parsed_tree)
# Reset default property assignments from namespace.
# They should not be shared between files since that would be confusing.
self.namespace.default_property_ns_stack = [{}]
if self.msg.had_error:
self.msg.fatal("Compile aborted due to previous errors")
return FileInfo(input_stream.strdata, included_files)
def elaborate(self, top_def_name: Optional[str]=None, inst_name: Optional[str]=None, parameters: Optional[Dict[str, rdltypes.RDLValue]]=None) -> RootNode:
"""
Elaborates the design for the given top-level addrmap component.
During elaboration, the following occurs:
- An instance of the ``$root`` meta-component is created.
- The addrmap component specified by ``top_def_name`` is instantiated as a
child of ``$root``.
- Expressions, parameters, and inferred address/field placements are elaborated.
- Validation checks are performed.
If a design contains multiple root-level addrmaps, ``elaborate()`` can be
called multiple times in order to elaborate each individually.
If any exceptions (:class:`~systemrdl.RDLCompileError` or other)
occur during elaboration, then the RDLCompiler object should be discarded.
Parameters
----------
top_def_name: str
Explicitly choose which addrmap in the root namespace will be the
top-level component.
If unset, The last addrmap defined will be chosen.
inst_name: str
Overrides the top-component's instantiated name.
By default, instantiated name is the same as ``top_def_name``
parameters: dict
Dictionary of parameter overrides for the top component instance.
Raises
------
RDLCompileError
If any fatal elaboration error is encountered
Returns
-------
:class:`~systemrdl.node.RootNode`
Elaborated root meta-component's Node object.
"""
if parameters is None:
parameters = {}
# Get top-level component definition to elaborate
if top_def_name is not None:
# Lookup top_def_name
if top_def_name not in self.root.comp_defs:
self.msg.fatal("Elaboration target '%s' not found" % top_def_name)
top_def = self.root.comp_defs[top_def_name]
if not isinstance(top_def, comp.Addrmap):
self.msg.fatal("Elaboration target '%s' is not an 'addrmap' component" % top_def_name)
else:
# Not specified. Find the last addrmap defined
for comp_def in reversed(self.root.comp_defs.values()):
if isinstance(comp_def, comp.Addrmap):
top_def = comp_def
top_def_name = comp_def.type_name
break
else:
self.msg.fatal("Could not find any 'addrmap' components to elaborate")
# Create an instance of the root component
root_inst = self.root._copy_for_inst({})
root_inst.is_instance = True
root_inst.original_def = self.root
root_inst.inst_name = "$root"
# Create a top-level instance
top_inst = top_def._copy_for_inst({})
top_inst.is_instance = True
top_inst.original_def = top_def
top_inst.addr_offset = 0
top_inst.external = True # addrmap is always implied as external
if inst_name is not None:
top_inst.inst_name = inst_name
else:
top_inst.inst_name = top_def_name
# Override parameters as needed
for param_name, value in parameters.items():
# Find the parameter to override
parameter = None
for p in top_inst.parameters:
if p.name == param_name:
parameter = p
break
else:
raise ValueError("Parameter '%s' is not available for override" % param_name)
literal_expr = ast.ExternalLiteral(self.env, value)
assign_expr = ast.AssignmentCast(self.env, None, literal_expr, parameter.param_type)
assign_type = assign_expr.predict_type()
if assign_type is None:
raise TypeError("Incorrect type for parameter '%s'" % param_name)
parameter.expr = assign_expr
# instantiate top_inst into the root component instance
root_inst.children.append(top_inst)
root_node = RootNode(root_inst, self.env, None)
# Resolve all expressions
walker.RDLWalker(skip_not_present=False).walk(
root_node,
ElabExpressionsListener(self.msg)
)
# Resolve address and field placement
walker.RDLWalker(skip_not_present=False).walk(
root_node,
PrePlacementValidateListener(self.msg),
StructuralPlacementListener(self.msg),
LateElabListener(self.msg, self.env)
)
# Validate design
# Only need to validate nodes that are present
walker.RDLWalker(skip_not_present=True).walk(root_node, ValidateListener(self.env))
if self.msg.had_error:
self.msg.fatal("Elaborate aborted due to previous errors")
return root_node
def eval(self, expression: str) -> rdltypes.RDLValue:
"""
Evaluate an RDL expression string and return its compiled value.
This function is provided as a helper to simplify overriding top-level
parameters during elaboration.
Parameters
----------
expression: str
This string is parsed and evaluated as a SystemRDL expression.
Any references used in the expression are resolved using the
current contents of the root namespace.
Raises
------
ValueError
If any parse or evaluation error occurs.
.. versionadded:: 1.8
"""
# Create local message handler that suppresses the usual output
# to stderr.
# Instead raises ValueError on any error
msg_printer = messages.MessageExceptionRaiser()
msg_handler = messages.MessageHandler(msg_printer)
input_stream = InputStream(expression)
parsed_tree = sa_systemrdl.parse(
input_stream,
"expr",
messages.RdlSaErrorListener(self.msg)
)
visitor = ExprVisitor(self)
# override visitor to use local message handler
visitor.msg = msg_handler
result = visitor.visit(parsed_tree)
result.predict_type()
return result.get_value()
class RDLEnvironment:
"""
Container object for misc resources that are preserved outside the lifetime
of source compilation
"""
def __init__(self, args_dict: Dict[str, Any]):
# Collect args
message_printer = args_dict.pop('message_printer', messages.MessagePrinter())
w_flags = args_dict.pop('warning_flags', 0)
e_flags = args_dict.pop('error_flags', 0)
self.dedent_desc = args_dict.pop('dedent_desc', True)
self.use_extended_type_name_gen = args_dict.pop('extended_dpa_type_names', True)
self.perl_safe_opcodes = args_dict.pop('perl_safe_opcodes', [
':base_core', ':base_mem', ':base_loop', ':base_orig', ':base_math',
':base_thread', ':filesys_read', ':sys_db', ':load',
'sort', 'tied', 'pack', 'unpack', 'reset'
])
self.chk_missing_reset = self.chk_flag_severity(warnings.MISSING_RESET, w_flags, e_flags)
self.chk_implicit_field_pos = self.chk_flag_severity(warnings.IMPLICIT_FIELD_POS, w_flags, e_flags)
self.chk_implicit_addr = self.chk_flag_severity(warnings.IMPLICIT_ADDR, w_flags, e_flags)
self.chk_stride_not_pow2 = self.chk_flag_severity(warnings.STRIDE_NOT_POW2, w_flags, e_flags)
self.chk_strict_self_align = self.chk_flag_severity(warnings.STRICT_SELF_ALIGN, w_flags, e_flags)
self.chk_sparse_reg_stride = self.chk_flag_severity(warnings.SPARSE_REG_STRIDE, w_flags, e_flags)
self.msg = messages.MessageHandler(message_printer)
self.property_rules = PropertyRuleBook(self)
@staticmethod
def chk_flag_severity(flag: int, w_flags: int, e_flags: int) -> messages.Severity:
if bool(e_flags & flag):
return messages.Severity.ERROR
elif bool(w_flags & flag):
return messages.Severity.WARNING
else:
return messages.Severity.NONE
| 35.130165
| 158
| 0.62489
|
4a1142d258868188817280bbcf4be830bb9513fa
| 1,906
|
py
|
Python
|
tests/h/services/delete_group_test.py
|
pombredanne/h
|
9c4c2dc0d53ed5bed5183936c24b4c27b23070b4
|
[
"BSD-2-Clause"
] | 2,103
|
2015-01-07T12:47:49.000Z
|
2022-03-29T02:38:25.000Z
|
tests/h/services/delete_group_test.py
|
pombredanne/h
|
9c4c2dc0d53ed5bed5183936c24b4c27b23070b4
|
[
"BSD-2-Clause"
] | 4,322
|
2015-01-04T17:18:01.000Z
|
2022-03-31T17:06:02.000Z
|
tests/h/services/delete_group_test.py
|
admariner/h
|
25ef1b8d94889df86ace5a084f1aa0effd9f4e25
|
[
"BSD-2-Clause"
] | 389
|
2015-01-24T04:10:02.000Z
|
2022-03-28T08:00:16.000Z
|
from unittest import mock
import pytest
from h.services.annotation_delete import AnnotationDeleteService
from h.services.delete_group import (
DeleteGroupService,
DeletePublicGroupError,
delete_group_service_factory,
)
@pytest.mark.usefixtures("annotation_delete_service")
class TestDeleteGroupService:
def test_it_does_not_delete_public_group(self, svc, factories):
group = factories.Group()
group.pubid = "__world__"
with pytest.raises(DeletePublicGroupError):
svc.delete(group)
def test_it_deletes_group(self, svc, db_session, factories):
group = factories.Group()
svc.delete(group)
assert group in db_session.deleted
def test_it_deletes_annotations(self, svc, factories, annotation_delete_service):
group = factories.Group()
annotations = [
factories.Annotation(groupid=group.pubid).id,
factories.Annotation(groupid=group.pubid).id,
]
svc.delete(group)
deleted_anns = [
ann.id
for ann in annotation_delete_service.delete_annotations.call_args[0][0]
]
assert sorted(deleted_anns) == sorted(annotations)
@pytest.mark.usefixtures("annotation_delete_service")
class TestDeleteGroupServiceFactory:
def test_it_returns_delete_group_service_instance(self, pyramid_request):
svc = delete_group_service_factory(None, pyramid_request)
assert isinstance(svc, DeleteGroupService)
@pytest.fixture
def svc(db_session, pyramid_request):
pyramid_request.db = db_session
return delete_group_service_factory({}, pyramid_request)
@pytest.fixture
def annotation_delete_service(pyramid_config):
service = mock.create_autospec(
AnnotationDeleteService, spec_set=True, instance=True
)
pyramid_config.register_service(service, name="annotation_delete")
return service
| 28.878788
| 85
| 0.727702
|
4a1142e3012cdf87b79ac8fa303c0ac3700f64d1
| 1,761
|
py
|
Python
|
app/libs/quest_queue.py
|
Jiubei0408/jiudge-backend
|
c743274d76fdfe2be8b16b5bcf5a68c64fbc4272
|
[
"Apache-2.0"
] | null | null | null |
app/libs/quest_queue.py
|
Jiubei0408/jiudge-backend
|
c743274d76fdfe2be8b16b5bcf5a68c64fbc4272
|
[
"Apache-2.0"
] | 1
|
2021-08-02T09:49:15.000Z
|
2021-08-02T09:57:27.000Z
|
app/libs/quest_queue.py
|
Jiubei0408/jiudge-backend
|
c743274d76fdfe2be8b16b5bcf5a68c64fbc4272
|
[
"Apache-2.0"
] | null | null | null |
import json
import redis as Redis
from app.config.secure import REDIS_PASSWORD, REDIS_HOST, REDIS_PORT
redis = Redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PASSWORD, decode_responses=True)
def send_crawl_contest_info(contest_id, oj_id, oj_name, remote_contest_id):
send_quest(oj_name, {
'type': 'crawl_contest_info',
'contest_id': contest_id,
'oj_id': oj_id,
'remote_contest_id': remote_contest_id
})
def send_submit_problem(submission, problem, code, lang, account=None):
data = {
'type': 'submit',
'submission_id': submission.id,
'remote_problem_id': problem.remote_problem_id,
'code': code,
'lang': lang
}
if submission.contest.is_remote():
data['remote_contest_id'] = submission.contest.remote_contest.remote_contest_id
quest = send_quest(problem.oj.name, data, account)
submission.modify(quest_id=quest.id)
def send_crawl_remote_scoreboard(scoreboard_id, oj_name, remote_contest_id):
send_quest(oj_name, {
'type': 'crawl_remote_scoreboard',
'scoreboard_id': scoreboard_id,
'remote_contest_id': remote_contest_id
})
def send_quest(oj_name, dict_data, account=None):
from app.models.quest import Quest
import hashlib
import time
data = dict_data.copy()
now = int(time.time())
quest = Quest.create(time_stamp=now)
token = hashlib.md5(f'{quest.id}{now}'.encode('utf-8')).hexdigest()
quest.modify(token=token)
data.update({
'quest_id': quest.id,
'token': token
})
queue_name = f'quest_queue_{oj_name}'
if account is not None:
queue_name += f':{account}'
redis.rpush(queue_name, json.dumps(data))
return quest
| 30.362069
| 107
| 0.682567
|
4a1143a16d301811d84dff797fb211099e379dd2
| 11,550
|
py
|
Python
|
fattureincloud_python_sdk/model/create_issued_document_response.py
|
fattureincloud/fattureincloud-python-sdk
|
f3a40fac345751014ea389680efdaef90f03bac1
|
[
"MIT"
] | 2
|
2022-02-17T08:33:17.000Z
|
2022-03-22T09:27:00.000Z
|
fattureincloud_python_sdk/model/create_issued_document_response.py
|
fattureincloud/fattureincloud-python-sdk
|
f3a40fac345751014ea389680efdaef90f03bac1
|
[
"MIT"
] | null | null | null |
fattureincloud_python_sdk/model/create_issued_document_response.py
|
fattureincloud/fattureincloud-python-sdk
|
f3a40fac345751014ea389680efdaef90f03bac1
|
[
"MIT"
] | null | null | null |
"""
Fatture in Cloud API v2 - API Reference
Connect your software with Fatture in Cloud, the invoicing platform chosen by more than 400.000 businesses in Italy. The Fatture in Cloud API is based on REST, and makes possible to interact with the user related data prior authorization via OAuth2 protocol. # noqa: E501
The version of the OpenAPI document: 2.0.15
Contact: info@fattureincloud.it
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fattureincloud_python_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fattureincloud_python_sdk.exceptions import ApiAttributeError
def lazy_import():
from fattureincloud_python_sdk.model.issued_document import IssuedDocument
globals()['IssuedDocument'] = IssuedDocument
class CreateIssuedDocumentResponse(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = True
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'data': (IssuedDocument,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'data': 'data', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""CreateIssuedDocumentResponse - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data (IssuedDocument): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""CreateIssuedDocumentResponse - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data (IssuedDocument): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 43.91635
| 278
| 0.579654
|
4a1143d9f3f074942e4f8f2da84c948131c466b2
| 649
|
py
|
Python
|
packages/pyre/units/time.py
|
BryanRiel/pyre
|
179359634a7091979cced427b6133dd0ec4726ea
|
[
"BSD-3-Clause"
] | null | null | null |
packages/pyre/units/time.py
|
BryanRiel/pyre
|
179359634a7091979cced427b6133dd0ec4726ea
|
[
"BSD-3-Clause"
] | null | null | null |
packages/pyre/units/time.py
|
BryanRiel/pyre
|
179359634a7091979cced427b6133dd0ec4726ea
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2018 all rights reserved
#
from .SI import second
from .SI import pico, nano, micro, milli
#
# definitions of common time units
# data taken from Appendix F of Halliday, Resnick, Walker, "Fundamentals of Physics",
# fourth edition, John Willey and Sons, 1993
picosecond = pico*second
nanosecond = nano*second
microsecond = micro*second
millisecond = milli*second
# aliases
s = second
ps = picosecond
ns = nanosecond
us = microsecond
ms = millisecond
# other common units
minute = 60 * second
hour = 60 * minute
day = 24 * hour
year = 365.25 * day
# end of file
| 16.225
| 85
| 0.705701
|
4a1145a90062c41570540e8600b61964e602c7bb
| 1,045
|
py
|
Python
|
test_rbd.py
|
lyqgenius/openstack-test-script
|
b3234001e531dbaeddfab8be1b439b7e06f519d1
|
[
"Apache-2.0"
] | null | null | null |
test_rbd.py
|
lyqgenius/openstack-test-script
|
b3234001e531dbaeddfab8be1b439b7e06f519d1
|
[
"Apache-2.0"
] | null | null | null |
test_rbd.py
|
lyqgenius/openstack-test-script
|
b3234001e531dbaeddfab8be1b439b7e06f519d1
|
[
"Apache-2.0"
] | null | null | null |
import rados
import rbd
import imp
rbd = imp.load_source('rbd', '/usr/lib/python2.7/site-packages/rbd.py')
image_name = 'volume-a4f4daca-2b35-45a5-9e45-a145452050b1'
client = rados.Rados(
rados_id='cinder',
clustername='ceph',
conffile='/etc/ceph/ceph.conf')
pool = 'volumes'
client.connect(timeout=60)
ioctx = client.open_ioctx(pool)
rbd_image = rbd.Image(ioctx, image_name)
rbd_image.list_snaps()
rbd_image.close()
ioctx.close()
client.shutdown()
client1 = rados.Rados(
rados_id='cinder',
clustername='ceph',
conffile='/etc/ceph/ceph.conf')
client1.connect(timeout=60)
ioctx1 = client1.open_ioctx(pool)
rbd_image1 = rbd.Image(ioctx1, image_name)
rbd_image1.list_snaps()
rbd_image1.close()
ioctx1.close()
client1.shutdown()
client2 = rados.Rados(
rados_id='cinder',
clustername='ceph',
conffile='/etc/ceph/ceph.conf')
client2.connect(timeout=60)
ioctx2 = client1.open_ioctx(pool)
rbd_image2 = rbd.Image(ioctx2, image_name)
rbd_image2.list_snaps()
rbd_image2.close()
ioctx2.close()
client2.shutdown()
| 21.770833
| 71
| 0.737799
|
4a1145cca789b44ae14a78b26084452f81e57ef5
| 2,450
|
py
|
Python
|
ref/hb_bop19.py
|
THU-DA-6D-Pose-Group/self6dpp
|
c267cfa55e440e212136a5e9940598720fa21d16
|
[
"Apache-2.0"
] | 33
|
2021-12-15T07:11:47.000Z
|
2022-03-29T08:58:32.000Z
|
ref/hb_bop19.py
|
THU-DA-6D-Pose-Group/self6dpp
|
c267cfa55e440e212136a5e9940598720fa21d16
|
[
"Apache-2.0"
] | 3
|
2021-12-15T11:39:54.000Z
|
2022-03-29T07:24:23.000Z
|
ref/hb_bop19.py
|
THU-DA-6D-Pose-Group/self6dpp
|
c267cfa55e440e212136a5e9940598720fa21d16
|
[
"Apache-2.0"
] | null | null | null |
# encoding: utf-8
"""This file includes necessary params, info."""
import os.path as osp
import numpy as np
# ---------------------------------------------------------------- #
# ROOT PATH INFO
# ---------------------------------------------------------------- #
cur_dir = osp.abspath(osp.dirname(__file__))
root_dir = osp.normpath(osp.join(cur_dir, ".."))
output_dir = osp.join(root_dir, "output") # directory storing experiment data (result, model checkpoints, etc).
data_root = osp.join(root_dir, "datasets")
bop_root = osp.join(data_root, "BOP_DATASETS/")
# ---------------------------------------------------------------- #
# HB DATASET
# ---------------------------------------------------------------- #
dataset_root = osp.join(bop_root, "hb_bop19")
train_dir = osp.join(dataset_root, "train")
test_dir = osp.join(dataset_root, "test")
model_dir = osp.join(dataset_root, "models")
vertex_scale = 0.001
model_eval_dir = osp.join(dataset_root, "models_eval")
# object info
# id2obj = {idx: str(idx) for idx in [1, 3, 4, 8, 9, 10, 12, 15, 17, 18, 19, 22, 23, 29, 32, 33]
# } # only 16 classes are selected
id2obj = {
1: "01_bear",
# 2: "02_benchvise",
3: "03_round_car",
4: "04_thin_cow",
# 5: "05_fat_cow",
# 6: "06_mug",
# 7: "07_driller",
8: "08_green_rabbit",
9: "09_holepuncher",
10: "10",
# 11: "11",
12: "12",
# 13: "13",
# 14: "14",
15: "15",
# 16: "16",
17: "17",
18: "18_jaffa_cakes_box",
19: "19_minions", # small yellow man
# 20: "20_color_dog",
# 21: "21_phone",
22: "22_rhinoceros", # xi niu
23: "23_dog",
# 24: "24",
# 25: "25_car",
# 26: "26_motorcycle",
# 27: "27_high_heels",
# 28: "28_stegosaurus", # jian chi long
29: "29_tea_box",
# 30: "30_triceratops", # san jiao long
# 31: "31_toy_baby",
32: "32_car",
33: "33_yellow_rabbit",
}
objects = [str(obj) for obj in id2obj.values()]
obj_num = len(id2obj)
obj2id = {cls_name: cls_idx for cls_idx, cls_name in id2obj.items()}
model_paths = [osp.join(model_dir, "obj_{:06d}.ply").format(_id) for _id in id2obj]
texture_paths = None
model_colors = [((i + 1) * 10, (i + 1) * 10, (i + 1) * 10) for i in range(obj_num)] # for renderer
# Camera info
width = 640
height = 480
zNear = 0.25
zFar = 6.0
center = (height / 2, width / 2)
camera_matrix = np.array([[537.4799, 0.0, 318.8965], [0.0, 536.1447, 238.3781], [0.0, 0.0, 1.0]])
| 30.625
| 112
| 0.545714
|
4a114647a2353294078287d13ce05c6f06dd8cfc
| 304
|
py
|
Python
|
articles/serializers.py
|
Uncensored-Developer/django-elastic-drf-example
|
bddd5bc2c869425eef4c940228e7a8a122aa5500
|
[
"MIT"
] | 2
|
2020-02-23T11:17:39.000Z
|
2021-01-11T13:20:47.000Z
|
articles/serializers.py
|
Uncensored-Developer/django-elastic-drf-example
|
bddd5bc2c869425eef4c940228e7a8a122aa5500
|
[
"MIT"
] | 2
|
2019-12-05T14:03:53.000Z
|
2019-12-05T14:03:53.000Z
|
articles/serializers.py
|
Uncensored-Developer/django-elastic-drf-example
|
bddd5bc2c869425eef4c940228e7a8a122aa5500
|
[
"MIT"
] | null | null | null |
from django_elasticsearch_dsl_drf.serializers import DocumentSerializer
from .documents import ArticleDocument
class ArticleDocumentSerializer(DocumentSerializer):
class Meta:
document = ArticleDocument
fields = ('id', 'title', 'body', 'author', 'created', 'modified', 'pub_date',)
| 33.777778
| 86
| 0.746711
|
4a11469dd4b17d19a4b39ea50414887a535e4e10
| 4,576
|
py
|
Python
|
tensorflow/core/function/trace_type/trace_type_builder.py
|
Stevanus-Christian/tensorflow
|
d44afcf5ca16c5d704c66f891b99eac804e7cd14
|
[
"Apache-2.0"
] | 2
|
2016-09-27T05:37:33.000Z
|
2019-11-22T06:41:12.000Z
|
tensorflow/core/function/trace_type/trace_type_builder.py
|
Stevanus-Christian/tensorflow
|
d44afcf5ca16c5d704c66f891b99eac804e7cd14
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/core/function/trace_type/trace_type_builder.py
|
Stevanus-Christian/tensorflow
|
d44afcf5ca16c5d704c66f891b99eac804e7cd14
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utitiles for Cache Key generation based on Function Trace Type."""
import collections.abc
from typing import Any, Callable, Hashable
import weakref
from tensorflow.core.function.trace_type import default_types
from tensorflow.core.function.trace_type import util
from tensorflow.python.types import trace
class WeakrefDeletionObserver:
"""An observer for the event of deleting a weakref.
This allows users of FunctionTraceType to be notified when an instance which
depends on a weakref becomes invalid by the deletion of the weakref. In
particular, tf.function caches can use this mechanism to clear the cache of
keys that are no longer valid.
We use the observer pattern and not just basic callbacks because the keys
are typically created before they are used by the cache.
"""
def __init__(self):
self._triggered = False
self._callables = []
def add_listener(self, on_delete: Callable[[], None]):
if self._triggered:
on_delete()
else:
self._callables.append(on_delete)
def weakref_deleted(self):
self._triggered = True
for c in self._callables:
c()
def __call__(self, _):
"""Call handler for convenience of use with weakref."""
self.weakref_deleted()
class InternalTracingContext(trace.TracingContext):
"""Container for variables and flags shared across TraceType generation."""
def __init__(self):
self._deletion_observer = WeakrefDeletionObserver()
self._global_to_local_id = {}
# TODO(b/202772221): Consider dropping after alias pattern matching is
# supported.
def make_reference_type(self, base_type: trace.TraceType,
local_id: Hashable) -> trace.TraceType:
if local_id not in self._global_to_local_id:
self._global_to_local_id[local_id] = len(self._global_to_local_id)
return default_types.Reference(base_type,
self._global_to_local_id[local_id])
@property
def deletion_observer(self):
"""Returns a functor which invalidates the current key when called."""
return self._deletion_observer
def from_object(obj: Any,
context: trace.TracingContext = None) -> trace.TraceType:
"""Returns a TraceType corresponding to the object based on the context.
Args:
obj: The object to generate a TraceType for.
context: The TracingContext to be shared during protocol calls.
Returns:
A TraceType object representing the given object.
"""
if context is None:
context = InternalTracingContext()
if isinstance(obj, trace.SupportsTracingProtocol):
return obj.__tf_tracing_type__(context)
if hasattr(obj, "__wrapped__"):
return from_object(obj.__wrapped__, context)
if isinstance(obj, list):
return default_types.List(*(from_object(c, context) for c in obj))
if isinstance(obj, tuple):
if util.is_namedtuple(obj):
return default_types.NamedTuple(
type(obj), tuple(from_object(c, context) for c in obj))
else:
return default_types.Tuple(*(from_object(c, context) for c in obj))
if isinstance(obj, collections.abc.Mapping):
return default_types.Dict(
{k: from_object(obj[k], context) for k in obj})
if util.is_attrs(obj):
return default_types.Attrs(
type(obj),
tuple(
from_object(getattr(obj, a.name), context)
for a in obj.__attrs_attrs__))
try:
ref = weakref.ref(obj, context.deletion_observer)
if ref is None:
raise TypeError(
f"Deleted objects are not valid tf.function arguments, Got {obj!r}")
else:
return default_types.Weakref(ref)
except TypeError:
try:
return default_types.Generic(obj)
except:
raise TypeError(
f"Python object could not be represented through the generic tracing "
f"type. Consider implementing the Tracing Protocol for it: {obj!r}")
| 33.40146
| 80
| 0.702579
|
4a1146fb194c68b3742804abd36edf8ac40c030b
| 39,261
|
py
|
Python
|
src/werkzeug/urls.py
|
pieterb/werkzeug
|
c78b97998f4a6a27c3a9bf2b419573c620b3a64d
|
[
"BSD-3-Clause"
] | null | null | null |
src/werkzeug/urls.py
|
pieterb/werkzeug
|
c78b97998f4a6a27c3a9bf2b419573c620b3a64d
|
[
"BSD-3-Clause"
] | null | null | null |
src/werkzeug/urls.py
|
pieterb/werkzeug
|
c78b97998f4a6a27c3a9bf2b419573c620b3a64d
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
werkzeug.urls
~~~~~~~~~~~~~
``werkzeug.urls`` used to provide several wrapper functions for Python 2
urlparse, whose main purpose were to work around the behavior of the Py2
stdlib and its lack of unicode support. While this was already a somewhat
inconvenient situation, it got even more complicated because Python 3's
``urllib.parse`` actually does handle unicode properly. In other words,
this module would wrap two libraries with completely different behavior. So
now this module contains a 2-and-3-compatible backport of Python 3's
``urllib.parse``, which is mostly API-compatible.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import codecs
import os
import re
from collections import namedtuple
from ._compat import fix_tuple_repr
from ._compat import implements_to_string
from ._compat import make_literal_wrapper
from ._compat import normalize_string_tuple
from ._compat import PY2
from ._compat import text_type
from ._compat import to_native
from ._compat import to_unicode
from ._compat import try_coerce_native
from ._internal import _decode_idna
from ._internal import _encode_idna
from .datastructures import iter_multi_items
from .datastructures import MultiDict
# A regular expression for what a valid schema looks like
_scheme_re = re.compile(r"^[a-zA-Z0-9+-.]+$")
# Characters that are safe in any part of an URL.
_always_safe = frozenset(
bytearray(
b"abcdefghijklmnopqrstuvwxyz"
b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"0123456789"
b"-._~"
)
)
_hexdigits = "0123456789ABCDEFabcdef"
_hextobyte = dict(
((a + b).encode(), int(a + b, 16)) for a in _hexdigits for b in _hexdigits
)
_bytetohex = [("%%%02X" % char).encode("ascii") for char in range(256)]
_URLTuple = fix_tuple_repr(
namedtuple("_URLTuple", ["scheme", "netloc", "path", "query", "fragment"])
)
class BaseURL(_URLTuple):
"""Superclass of :py:class:`URL` and :py:class:`BytesURL`."""
__slots__ = ()
def replace(self, **kwargs):
"""Return an URL with the same values, except for those parameters
given new values by whichever keyword arguments are specified."""
return self._replace(**kwargs)
@property
def host(self):
"""The host part of the URL if available, otherwise `None`. The
host is either the hostname or the IP address mentioned in the
URL. It will not contain the port.
"""
return self._split_host()[0]
@property
def ascii_host(self):
"""Works exactly like :attr:`host` but will return a result that
is restricted to ASCII. If it finds a netloc that is not ASCII
it will attempt to idna decode it. This is useful for socket
operations when the URL might include internationalized characters.
"""
rv = self.host
if rv is not None and isinstance(rv, text_type):
try:
rv = _encode_idna(rv)
except UnicodeError:
rv = rv.encode("ascii", "ignore")
return to_native(rv, "ascii", "ignore")
@property
def port(self):
"""The port in the URL as an integer if it was present, `None`
otherwise. This does not fill in default ports.
"""
try:
rv = int(to_native(self._split_host()[1]))
if 0 <= rv <= 65535:
return rv
except (ValueError, TypeError):
pass
@property
def auth(self):
"""The authentication part in the URL if available, `None`
otherwise.
"""
return self._split_netloc()[0]
@property
def username(self):
"""The username if it was part of the URL, `None` otherwise.
This undergoes URL decoding and will always be a unicode string.
"""
rv = self._split_auth()[0]
if rv is not None:
return _url_unquote_legacy(rv)
@property
def raw_username(self):
"""The username if it was part of the URL, `None` otherwise.
Unlike :attr:`username` this one is not being decoded.
"""
return self._split_auth()[0]
@property
def password(self):
"""The password if it was part of the URL, `None` otherwise.
This undergoes URL decoding and will always be a unicode string.
"""
rv = self._split_auth()[1]
if rv is not None:
return _url_unquote_legacy(rv)
@property
def raw_password(self):
"""The password if it was part of the URL, `None` otherwise.
Unlike :attr:`password` this one is not being decoded.
"""
return self._split_auth()[1]
def decode_query(self, *args, **kwargs):
"""Decodes the query part of the URL. Ths is a shortcut for
calling :func:`url_decode` on the query argument. The arguments and
keyword arguments are forwarded to :func:`url_decode` unchanged.
"""
return url_decode(self.query, *args, **kwargs)
def join(self, *args, **kwargs):
"""Joins this URL with another one. This is just a convenience
function for calling into :meth:`url_join` and then parsing the
return value again.
"""
return url_parse(url_join(self, *args, **kwargs))
def to_url(self):
"""Returns a URL string or bytes depending on the type of the
information stored. This is just a convenience function
for calling :meth:`url_unparse` for this URL.
"""
return url_unparse(self)
def decode_netloc(self):
"""Decodes the netloc part into a string."""
rv = _decode_idna(self.host or "")
if ":" in rv:
rv = "[%s]" % rv
port = self.port
if port is not None:
rv = "%s:%d" % (rv, port)
auth = ":".join(
filter(
None,
[
_url_unquote_legacy(self.raw_username or "", "/:%@"),
_url_unquote_legacy(self.raw_password or "", "/:%@"),
],
)
)
if auth:
rv = "%s@%s" % (auth, rv)
return rv
def to_uri_tuple(self):
"""Returns a :class:`BytesURL` tuple that holds a URI. This will
encode all the information in the URL properly to ASCII using the
rules a web browser would follow.
It's usually more interesting to directly call :meth:`iri_to_uri` which
will return a string.
"""
return url_parse(iri_to_uri(self).encode("ascii"))
def to_iri_tuple(self):
"""Returns a :class:`URL` tuple that holds a IRI. This will try
to decode as much information as possible in the URL without
losing information similar to how a web browser does it for the
URL bar.
It's usually more interesting to directly call :meth:`uri_to_iri` which
will return a string.
"""
return url_parse(uri_to_iri(self))
def get_file_location(self, pathformat=None):
"""Returns a tuple with the location of the file in the form
``(server, location)``. If the netloc is empty in the URL or
points to localhost, it's represented as ``None``.
The `pathformat` by default is autodetection but needs to be set
when working with URLs of a specific system. The supported values
are ``'windows'`` when working with Windows or DOS paths and
``'posix'`` when working with posix paths.
If the URL does not point to a local file, the server and location
are both represented as ``None``.
:param pathformat: The expected format of the path component.
Currently ``'windows'`` and ``'posix'`` are
supported. Defaults to ``None`` which is
autodetect.
"""
if self.scheme != "file":
return None, None
path = url_unquote(self.path)
host = self.netloc or None
if pathformat is None:
if os.name == "nt":
pathformat = "windows"
else:
pathformat = "posix"
if pathformat == "windows":
if path[:1] == "/" and path[1:2].isalpha() and path[2:3] in "|:":
path = path[1:2] + ":" + path[3:]
windows_share = path[:3] in ("\\" * 3, "/" * 3)
import ntpath
path = ntpath.normpath(path)
# Windows shared drives are represented as ``\\host\\directory``.
# That results in a URL like ``file://///host/directory``, and a
# path like ``///host/directory``. We need to special-case this
# because the path contains the hostname.
if windows_share and host is None:
parts = path.lstrip("\\").split("\\", 1)
if len(parts) == 2:
host, path = parts
else:
host = parts[0]
path = ""
elif pathformat == "posix":
import posixpath
path = posixpath.normpath(path)
else:
raise TypeError("Invalid path format %s" % repr(pathformat))
if host in ("127.0.0.1", "::1", "localhost"):
host = None
return host, path
def _split_netloc(self):
if self._at in self.netloc:
return self.netloc.split(self._at, 1)
return None, self.netloc
def _split_auth(self):
auth = self._split_netloc()[0]
if not auth:
return None, None
if self._colon not in auth:
return auth, None
return auth.split(self._colon, 1)
def _split_host(self):
rv = self._split_netloc()[1]
if not rv:
return None, None
if not rv.startswith(self._lbracket):
if self._colon in rv:
return rv.split(self._colon, 1)
return rv, None
idx = rv.find(self._rbracket)
if idx < 0:
return rv, None
host = rv[1:idx]
rest = rv[idx + 1 :]
if rest.startswith(self._colon):
return host, rest[1:]
return host, None
@implements_to_string
class URL(BaseURL):
"""Represents a parsed URL. This behaves like a regular tuple but
also has some extra attributes that give further insight into the
URL.
"""
__slots__ = ()
_at = "@"
_colon = ":"
_lbracket = "["
_rbracket = "]"
def __str__(self):
return self.to_url()
def encode_netloc(self):
"""Encodes the netloc part to an ASCII safe URL as bytes."""
rv = self.ascii_host or ""
if ":" in rv:
rv = "[%s]" % rv
port = self.port
if port is not None:
rv = "%s:%d" % (rv, port)
auth = ":".join(
filter(
None,
[
url_quote(self.raw_username or "", "utf-8", "strict", "/:%"),
url_quote(self.raw_password or "", "utf-8", "strict", "/:%"),
],
)
)
if auth:
rv = "%s@%s" % (auth, rv)
return to_native(rv)
def encode(self, charset="utf-8", errors="replace"):
"""Encodes the URL to a tuple made out of bytes. The charset is
only being used for the path, query and fragment.
"""
return BytesURL(
self.scheme.encode("ascii"),
self.encode_netloc(),
self.path.encode(charset, errors),
self.query.encode(charset, errors),
self.fragment.encode(charset, errors),
)
class BytesURL(BaseURL):
"""Represents a parsed URL in bytes."""
__slots__ = ()
_at = b"@"
_colon = b":"
_lbracket = b"["
_rbracket = b"]"
def __str__(self):
return self.to_url().decode("utf-8", "replace")
def encode_netloc(self):
"""Returns the netloc unchanged as bytes."""
return self.netloc
def decode(self, charset="utf-8", errors="replace"):
"""Decodes the URL to a tuple made out of strings. The charset is
only being used for the path, query and fragment.
"""
return URL(
self.scheme.decode("ascii"),
self.decode_netloc(),
self.path.decode(charset, errors),
self.query.decode(charset, errors),
self.fragment.decode(charset, errors),
)
_unquote_maps = {frozenset(): _hextobyte}
def _unquote_to_bytes(string, unsafe=""):
if isinstance(string, text_type):
string = string.encode("utf-8")
if isinstance(unsafe, text_type):
unsafe = unsafe.encode("utf-8")
unsafe = frozenset(bytearray(unsafe))
groups = iter(string.split(b"%"))
result = bytearray(next(groups, b""))
try:
hex_to_byte = _unquote_maps[unsafe]
except KeyError:
hex_to_byte = _unquote_maps[unsafe] = {
h: b for h, b in _hextobyte.items() if b not in unsafe
}
for group in groups:
code = group[:2]
if code in hex_to_byte:
result.append(hex_to_byte[code])
result.extend(group[2:])
else:
result.append(37) # %
result.extend(group)
return bytes(result)
def _url_encode_impl(obj, charset, encode_keys, sort, key):
iterable = iter_multi_items(obj)
if sort:
iterable = sorted(iterable, key=key)
for key, value in iterable:
if value is None:
continue
if not isinstance(key, bytes):
key = text_type(key).encode(charset)
if not isinstance(value, bytes):
value = text_type(value).encode(charset)
yield _fast_url_quote_plus(key) + "=" + _fast_url_quote_plus(value)
def _url_unquote_legacy(value, unsafe=""):
try:
return url_unquote(value, charset="utf-8", errors="strict", unsafe=unsafe)
except UnicodeError:
return url_unquote(value, charset="latin1", unsafe=unsafe)
def url_parse(url, scheme=None, allow_fragments=True):
"""Parses a URL from a string into a :class:`URL` tuple. If the URL
is lacking a scheme it can be provided as second argument. Otherwise,
it is ignored. Optionally fragments can be stripped from the URL
by setting `allow_fragments` to `False`.
The inverse of this function is :func:`url_unparse`.
:param url: the URL to parse.
:param scheme: the default schema to use if the URL is schemaless.
:param allow_fragments: if set to `False` a fragment will be removed
from the URL.
"""
s = make_literal_wrapper(url)
is_text_based = isinstance(url, text_type)
if scheme is None:
scheme = s("")
netloc = query = fragment = s("")
i = url.find(s(":"))
if i > 0 and _scheme_re.match(to_native(url[:i], errors="replace")):
# make sure "iri" is not actually a port number (in which case
# "scheme" is really part of the path)
rest = url[i + 1 :]
if not rest or any(c not in s("0123456789") for c in rest):
# not a port number
scheme, url = url[:i].lower(), rest
if url[:2] == s("//"):
delim = len(url)
for c in s("/?#"):
wdelim = url.find(c, 2)
if wdelim >= 0:
delim = min(delim, wdelim)
netloc, url = url[2:delim], url[delim:]
if (s("[") in netloc and s("]") not in netloc) or (
s("]") in netloc and s("[") not in netloc
):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and s("#") in url:
url, fragment = url.split(s("#"), 1)
if s("?") in url:
url, query = url.split(s("?"), 1)
result_type = URL if is_text_based else BytesURL
return result_type(scheme, netloc, url, query, fragment)
def _make_fast_url_quote(charset="utf-8", errors="strict", safe="/:", unsafe=""):
"""Precompile the translation table for a URL encoding function.
Unlike :func:`url_quote`, the generated function only takes the
string to quote.
:param charset: The charset to encode the result with.
:param errors: How to handle encoding errors.
:param safe: An optional sequence of safe characters to never encode.
:param unsafe: An optional sequence of unsafe characters to always encode.
"""
if isinstance(safe, text_type):
safe = safe.encode(charset, errors)
if isinstance(unsafe, text_type):
unsafe = unsafe.encode(charset, errors)
safe = (frozenset(bytearray(safe)) | _always_safe) - frozenset(bytearray(unsafe))
table = [chr(c) if c in safe else "%%%02X" % c for c in range(256)]
if not PY2:
def quote(string):
return "".join([table[c] for c in string])
else:
def quote(string):
return "".join([table[c] for c in bytearray(string)])
return quote
_fast_url_quote = _make_fast_url_quote()
_fast_quote_plus = _make_fast_url_quote(safe=" ", unsafe="+")
def _fast_url_quote_plus(string):
return _fast_quote_plus(string).replace(" ", "+")
def url_quote(string, charset="utf-8", errors="strict", safe="/:", unsafe=""):
"""URL encode a single string with a given encoding.
:param s: the string to quote.
:param charset: the charset to be used.
:param safe: an optional sequence of safe characters.
:param unsafe: an optional sequence of unsafe characters.
.. versionadded:: 0.9.2
The `unsafe` parameter was added.
"""
if not isinstance(string, (text_type, bytes, bytearray)):
string = text_type(string)
if isinstance(string, text_type):
string = string.encode(charset, errors)
if isinstance(safe, text_type):
safe = safe.encode(charset, errors)
if isinstance(unsafe, text_type):
unsafe = unsafe.encode(charset, errors)
safe = (frozenset(bytearray(safe)) | _always_safe) - frozenset(bytearray(unsafe))
rv = bytearray()
for char in bytearray(string):
if char in safe:
rv.append(char)
else:
rv.extend(_bytetohex[char])
return to_native(bytes(rv))
def url_quote_plus(string, charset="utf-8", errors="strict", safe=""):
"""URL encode a single string with the given encoding and convert
whitespace to "+".
:param s: The string to quote.
:param charset: The charset to be used.
:param safe: An optional sequence of safe characters.
"""
return url_quote(string, charset, errors, safe + " ", "+").replace(" ", "+")
def url_unparse(components):
"""The reverse operation to :meth:`url_parse`. This accepts arbitrary
as well as :class:`URL` tuples and returns a URL as a string.
:param components: the parsed URL as tuple which should be converted
into a URL string.
"""
scheme, netloc, path, query, fragment = normalize_string_tuple(components)
s = make_literal_wrapper(scheme)
url = s("")
# We generally treat file:///x and file:/x the same which is also
# what browsers seem to do. This also allows us to ignore a schema
# register for netloc utilization or having to differentiate between
# empty and missing netloc.
if netloc or (scheme and path.startswith(s("/"))):
if path and path[:1] != s("/"):
path = s("/") + path
url = s("//") + (netloc or s("")) + path
elif path:
url += path
if scheme:
url = scheme + s(":") + url
if query:
url = url + s("?") + query
if fragment:
url = url + s("#") + fragment
return url
def url_unquote(string, charset="utf-8", errors="replace", unsafe=""):
"""URL decode a single string with a given encoding. If the charset
is set to `None` no unicode decoding is performed and raw bytes
are returned.
:param s: the string to unquote.
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param errors: the error handling for the charset decoding.
"""
rv = _unquote_to_bytes(string, unsafe)
if charset is not None:
rv = rv.decode(charset, errors)
return rv
def url_unquote_plus(s, charset="utf-8", errors="replace"):
"""URL decode a single string with the given `charset` and decode "+" to
whitespace.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
:exc:`HTTPUnicodeError` is raised.
:param s: The string to unquote.
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param errors: The error handling for the `charset` decoding.
"""
if isinstance(s, text_type):
s = s.replace(u"+", u" ")
else:
s = s.replace(b"+", b" ")
return url_unquote(s, charset, errors)
def url_fix(s, charset="utf-8"):
r"""Sometimes you get an URL by a user that just isn't a real URL because
it contains unsafe characters like ' ' and so on. This function can fix
some of the problems in a similar way browsers handle data entered by the
user:
>>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffskl\xe4rung)')
'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)'
:param s: the string with the URL to fix.
:param charset: The target charset for the URL if the url was given as
unicode string.
"""
# First step is to switch to unicode processing and to convert
# backslashes (which are invalid in URLs anyways) to slashes. This is
# consistent with what Chrome does.
s = to_unicode(s, charset, "replace").replace("\\", "/")
# For the specific case that we look like a malformed windows URL
# we want to fix this up manually:
if s.startswith("file://") and s[7:8].isalpha() and s[8:10] in (":/", "|/"):
s = "file:///" + s[7:]
url = url_parse(s)
path = url_quote(url.path, charset, safe="/%+$!*'(),")
qs = url_quote_plus(url.query, charset, safe=":&%=+$!*'(),")
anchor = url_quote_plus(url.fragment, charset, safe=":&%=+$!*'(),")
return to_native(url_unparse((url.scheme, url.encode_netloc(), path, qs, anchor)))
# not-unreserved characters remain quoted when unquoting to IRI
_to_iri_unsafe = "".join([chr(c) for c in range(128) if c not in _always_safe])
def _codec_error_url_quote(e):
"""Used in :func:`uri_to_iri` after unquoting to re-quote any
invalid bytes.
"""
out = _fast_url_quote(e.object[e.start : e.end])
if PY2:
out = out.decode("utf-8")
return out, e.end
codecs.register_error("werkzeug.url_quote", _codec_error_url_quote)
def uri_to_iri(uri, charset="utf-8", errors="werkzeug.url_quote"):
"""Convert a URI to an IRI. All valid UTF-8 characters are unquoted,
leaving all reserved and invalid characters quoted. If the URL has
a domain, it is decoded from Punycode.
>>> uri_to_iri("http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF")
'http://\\u2603.net/p\\xe5th?q=\\xe8ry%DF'
:param uri: The URI to convert.
:param charset: The encoding to encode unquoted bytes with.
:param errors: Error handler to use during ``bytes.encode``. By
default, invalid bytes are left quoted.
.. versionchanged:: 0.15
All reserved and invalid characters remain quoted. Previously,
only some reserved characters were preserved, and invalid bytes
were replaced instead of left quoted.
.. versionadded:: 0.6
"""
if isinstance(uri, tuple):
uri = url_unparse(uri)
uri = url_parse(to_unicode(uri, charset))
path = url_unquote(uri.path, charset, errors, _to_iri_unsafe)
query = url_unquote(uri.query, charset, errors, _to_iri_unsafe)
fragment = url_unquote(uri.fragment, charset, errors, _to_iri_unsafe)
return url_unparse((uri.scheme, uri.decode_netloc(), path, query, fragment))
# reserved characters remain unquoted when quoting to URI
_to_uri_safe = ":/?#[]@!$&'()*+,;=%"
def iri_to_uri(iri, charset="utf-8", errors="strict", safe_conversion=False):
"""Convert an IRI to a URI. All non-ASCII and unsafe characters are
quoted. If the URL has a domain, it is encoded to Punycode.
>>> iri_to_uri('http://\\u2603.net/p\\xe5th?q=\\xe8ry%DF')
'http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF'
:param iri: The IRI to convert.
:param charset: The encoding of the IRI.
:param errors: Error handler to use during ``bytes.encode``.
:param safe_conversion: Return the URL unchanged if it only contains
ASCII characters and no whitespace. See the explanation below.
There is a general problem with IRI conversion with some protocols
that are in violation of the URI specification. Consider the
following two IRIs::
magnet:?xt=uri:whatever
itms-services://?action=download-manifest
After parsing, we don't know if the scheme requires the ``//``,
which is dropped if empty, but conveys different meanings in the
final URL if it's present or not. In this case, you can use
``safe_conversion``, which will return the URL unchanged if it only
contains ASCII characters and no whitespace. This can result in a
URI with unquoted characters if it was not already quoted correctly,
but preserves the URL's semantics. Werkzeug uses this for the
``Location`` header for redirects.
.. versionchanged:: 0.15
All reserved characters remain unquoted. Previously, only some
reserved characters were left unquoted.
.. versionchanged:: 0.9.6
The ``safe_conversion`` parameter was added.
.. versionadded:: 0.6
"""
if isinstance(iri, tuple):
iri = url_unparse(iri)
if safe_conversion:
# If we're not sure if it's safe to convert the URL, and it only
# contains ASCII characters, return it unconverted.
try:
native_iri = to_native(iri)
ascii_iri = native_iri.encode("ascii")
# Only return if it doesn't have whitespace. (Why?)
if len(ascii_iri.split()) == 1:
return native_iri
except UnicodeError:
pass
iri = url_parse(to_unicode(iri, charset, errors))
path = url_quote(iri.path, charset, errors, _to_uri_safe)
query = url_quote(iri.query, charset, errors, _to_uri_safe)
fragment = url_quote(iri.fragment, charset, errors, _to_uri_safe)
return to_native(
url_unparse((iri.scheme, iri.encode_netloc(), path, query, fragment))
)
def url_decode(
s,
charset="utf-8",
decode_keys=False,
include_empty=True,
errors="replace",
separator="&",
cls=None,
):
"""
Parse a querystring and return it as :class:`MultiDict`. There is a
difference in key decoding on different Python versions. On Python 3
keys will always be fully decoded whereas on Python 2, keys will
remain bytestrings if they fit into ASCII. On 2.x keys can be forced
to be unicode by setting `decode_keys` to `True`.
If the charset is set to `None` no unicode decoding will happen and
raw bytes will be returned.
Per default a missing value for a key will default to an empty key. If
you don't want that behavior you can set `include_empty` to `False`.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
`HTTPUnicodeError` is raised.
.. versionchanged:: 0.5
In previous versions ";" and "&" could be used for url decoding.
This changed in 0.5 where only "&" is supported. If you want to
use ";" instead a different `separator` can be provided.
The `cls` parameter was added.
:param s: a string with the query string to decode.
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param decode_keys: Used on Python 2.x to control whether keys should
be forced to be unicode objects. If set to `True`
then keys will be unicode in all cases. Otherwise,
they remain `str` if they fit into ASCII.
:param include_empty: Set to `False` if you don't want empty values to
appear in the dict.
:param errors: the decoding error behavior.
:param separator: the pair separator to be used, defaults to ``&``
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
"""
if cls is None:
cls = MultiDict
if isinstance(s, text_type) and not isinstance(separator, text_type):
separator = separator.decode(charset or "ascii")
elif isinstance(s, bytes) and not isinstance(separator, bytes):
separator = separator.encode(charset or "ascii")
return cls(
_url_decode_impl(
s.split(separator), charset, decode_keys, include_empty, errors
)
)
def url_decode_stream(
stream,
charset="utf-8",
decode_keys=False,
include_empty=True,
errors="replace",
separator="&",
cls=None,
limit=None,
return_iterator=False,
):
"""Works like :func:`url_decode` but decodes a stream. The behavior
of stream and limit follows functions like
:func:`~werkzeug.wsgi.make_line_iter`. The generator of pairs is
directly fed to the `cls` so you can consume the data while it's
parsed.
.. versionadded:: 0.8
:param stream: a stream with the encoded querystring
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param decode_keys: Used on Python 2.x to control whether keys should
be forced to be unicode objects. If set to `True`,
keys will be unicode in all cases. Otherwise, they
remain `str` if they fit into ASCII.
:param include_empty: Set to `False` if you don't want empty values to
appear in the dict.
:param errors: the decoding error behavior.
:param separator: the pair separator to be used, defaults to ``&``
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
:param limit: the content length of the URL data. Not necessary if
a limited stream is provided.
:param return_iterator: if set to `True` the `cls` argument is ignored
and an iterator over all decoded pairs is
returned
"""
from .wsgi import make_chunk_iter
pair_iter = make_chunk_iter(stream, separator, limit)
decoder = _url_decode_impl(pair_iter, charset, decode_keys, include_empty, errors)
if return_iterator:
return decoder
if cls is None:
cls = MultiDict
return cls(decoder)
def _url_decode_impl(pair_iter, charset, decode_keys, include_empty, errors):
for pair in pair_iter:
if not pair:
continue
s = make_literal_wrapper(pair)
equal = s("=")
if equal in pair:
key, value = pair.split(equal, 1)
else:
if not include_empty:
continue
key = pair
value = s("")
key = url_unquote_plus(key, charset, errors)
if charset is not None and PY2 and not decode_keys:
key = try_coerce_native(key)
yield key, url_unquote_plus(value, charset, errors)
def url_encode(
obj, charset="utf-8", encode_keys=False, sort=False, key=None, separator=b"&"
):
"""URL encode a dict/`MultiDict`. If a value is `None` it will not appear
in the result string. Per default only values are encoded into the target
charset strings. If `encode_keys` is set to ``True`` unicode keys are
supported too.
If `sort` is set to `True` the items are sorted by `key` or the default
sorting algorithm.
.. versionadded:: 0.5
`sort`, `key`, and `separator` were added.
:param obj: the object to encode into a query string.
:param charset: the charset of the query string.
:param encode_keys: set to `True` if you have unicode keys. (Ignored on
Python 3.x)
:param sort: set to `True` if you want parameters to be sorted by `key`.
:param separator: the separator to be used for the pairs.
:param key: an optional function to be used for sorting. For more details
check out the :func:`sorted` documentation.
"""
separator = to_native(separator, "ascii")
return separator.join(_url_encode_impl(obj, charset, encode_keys, sort, key))
def url_encode_stream(
obj,
stream=None,
charset="utf-8",
encode_keys=False,
sort=False,
key=None,
separator=b"&",
):
"""Like :meth:`url_encode` but writes the results to a stream
object. If the stream is `None` a generator over all encoded
pairs is returned.
.. versionadded:: 0.8
:param obj: the object to encode into a query string.
:param stream: a stream to write the encoded object into or `None` if
an iterator over the encoded pairs should be returned. In
that case the separator argument is ignored.
:param charset: the charset of the query string.
:param encode_keys: set to `True` if you have unicode keys. (Ignored on
Python 3.x)
:param sort: set to `True` if you want parameters to be sorted by `key`.
:param separator: the separator to be used for the pairs.
:param key: an optional function to be used for sorting. For more details
check out the :func:`sorted` documentation.
"""
separator = to_native(separator, "ascii")
gen = _url_encode_impl(obj, charset, encode_keys, sort, key)
if stream is None:
return gen
for idx, chunk in enumerate(gen):
if idx:
stream.write(separator)
stream.write(chunk)
def url_join(base, url, allow_fragments=True):
"""Join a base URL and a possibly relative URL to form an absolute
interpretation of the latter.
:param base: the base URL for the join operation.
:param url: the URL to join.
:param allow_fragments: indicates whether fragments should be allowed.
"""
if isinstance(base, tuple):
base = url_unparse(base)
if isinstance(url, tuple):
url = url_unparse(url)
base, url = normalize_string_tuple((base, url))
s = make_literal_wrapper(base)
if not base:
return url
if not url:
return base
bscheme, bnetloc, bpath, bquery, bfragment = url_parse(
base, allow_fragments=allow_fragments
)
scheme, netloc, path, query, fragment = url_parse(url, bscheme, allow_fragments)
if scheme != bscheme:
return url
if netloc:
return url_unparse((scheme, netloc, path, query, fragment))
netloc = bnetloc
if path[:1] == s("/"):
segments = path.split(s("/"))
elif not path:
segments = bpath.split(s("/"))
if not query:
query = bquery
else:
segments = bpath.split(s("/"))[:-1] + path.split(s("/"))
# If the rightmost part is "./" we want to keep the slash but
# remove the dot.
if segments[-1] == s("."):
segments[-1] = s("")
# Resolve ".." and "."
segments = [segment for segment in segments if segment != s(".")]
while 1:
i = 1
n = len(segments) - 1
while i < n:
if segments[i] == s("..") and segments[i - 1] not in (s(""), s("..")):
del segments[i - 1 : i + 1]
break
i += 1
else:
break
# Remove trailing ".." if the URL is absolute
unwanted_marker = [s(""), s("..")]
while segments[:2] == unwanted_marker:
del segments[1]
path = s("/").join(segments)
return url_unparse((scheme, netloc, path, query, fragment))
class Href(object):
"""Implements a callable that constructs URLs with the given base. The
function can be called with any number of positional and keyword
arguments which than are used to assemble the URL. Works with URLs
and posix paths.
Positional arguments are appended as individual segments to
the path of the URL:
>>> href = Href('/foo')
>>> href('bar', 23)
'/foo/bar/23'
>>> href('foo', bar=23)
'/foo/foo?bar=23'
If any of the arguments (positional or keyword) evaluates to `None` it
will be skipped. If no keyword arguments are given the last argument
can be a :class:`dict` or :class:`MultiDict` (or any other dict subclass),
otherwise the keyword arguments are used for the query parameters, cutting
off the first trailing underscore of the parameter name:
>>> href(is_=42)
'/foo?is=42'
>>> href({'foo': 'bar'})
'/foo?foo=bar'
Combining of both methods is not allowed:
>>> href({'foo': 'bar'}, bar=42)
Traceback (most recent call last):
...
TypeError: keyword arguments and query-dicts can't be combined
Accessing attributes on the href object creates a new href object with
the attribute name as prefix:
>>> bar_href = href.bar
>>> bar_href("blub")
'/foo/bar/blub'
If `sort` is set to `True` the items are sorted by `key` or the default
sorting algorithm:
>>> href = Href("/", sort=True)
>>> href(a=1, b=2, c=3)
'/?a=1&b=2&c=3'
.. versionadded:: 0.5
`sort` and `key` were added.
"""
def __init__(self, base="./", charset="utf-8", sort=False, key=None):
if not base:
base = "./"
self.base = base
self.charset = charset
self.sort = sort
self.key = key
def __getattr__(self, name):
if name[:2] == "__":
raise AttributeError(name)
base = self.base
if base[-1:] != "/":
base += "/"
return Href(url_join(base, name), self.charset, self.sort, self.key)
def __call__(self, *path, **query):
if path and isinstance(path[-1], dict):
if query:
raise TypeError("keyword arguments and query-dicts can't be combined")
query, path = path[-1], path[:-1]
elif query:
query = dict(
[(k.endswith("_") and k[:-1] or k, v) for k, v in query.items()]
)
path = "/".join(
[
to_unicode(url_quote(x, self.charset), "ascii")
for x in path
if x is not None
]
).lstrip("/")
rv = self.base
if path:
if not rv.endswith("/"):
rv += "/"
rv = url_join(rv, "./" + path)
if query:
rv += "?" + to_unicode(
url_encode(query, self.charset, sort=self.sort, key=self.key), "ascii"
)
return to_native(rv)
| 34.591189
| 86
| 0.609027
|
4a11470e520ed2b842718bf275b728a4642e06ea
| 473
|
py
|
Python
|
test/test_phones.py
|
kovernik/python_training
|
533630873e1c2a8662cebd18fdf26a9bbc8f7fdf
|
[
"Apache-2.0"
] | null | null | null |
test/test_phones.py
|
kovernik/python_training
|
533630873e1c2a8662cebd18fdf26a9bbc8f7fdf
|
[
"Apache-2.0"
] | 1
|
2017-12-06T10:25:34.000Z
|
2017-12-06T10:26:12.000Z
|
test/test_phones.py
|
kovernik/python_training
|
533630873e1c2a8662cebd18fdf26a9bbc8f7fdf
|
[
"Apache-2.0"
] | null | null | null |
def test_phones_on_home_page(app):
user_from_home_page = app.user.get_user_list()[0]
user_from_edit_page = app.user.get_user_info_from_edit_page(0)
assert user_from_home_page.homephone == user_from_edit_page.homephone
assert user_from_home_page.workphone == user_from_edit_page.workphone
assert user_from_home_page.mobilephone == user_from_edit_page.mobilephone
assert user_from_home_page.secondaryphone == user_from_edit_page.secondaryphone
| 59.125
| 84
| 0.818182
|
4a114736618cd71dfa900120d3d9396f3357dc58
| 872
|
py
|
Python
|
Code/Python/tests/testAngleCom.py
|
antoinejulien/Level-US_GRO400_H21
|
9db779ea0c6c231599254e29d1d6429fee9019a4
|
[
"MIT"
] | 1
|
2021-01-26T23:08:44.000Z
|
2021-01-26T23:08:44.000Z
|
Code/Python/tests/testAngleCom.py
|
antoinejulien/Level-US_GRO400_H21
|
9db779ea0c6c231599254e29d1d6429fee9019a4
|
[
"MIT"
] | 62
|
2021-01-23T23:57:18.000Z
|
2021-04-15T01:37:11.000Z
|
Code/Python/tests/testAngleCom.py
|
antoinejulien/Level-US_GRO400_H21
|
9db779ea0c6c231599254e29d1d6429fee9019a4
|
[
"MIT"
] | 1
|
2022-01-06T20:46:49.000Z
|
2022-01-06T20:46:49.000Z
|
import sys
import os.path
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from modules.anglePlaqueToAngleMoteur_c import getAngles_cffi
from modules.anglePlaqueToAngleMoteur_c.getAngles import getAngles
import time
if __name__ == '__main__':
## asking user for values
print("\n\n\n")
userX = float(input("enter X angle :"))
userY = float(input("enter Y angle :"))
userH = float(input("enter height :"))
print("\n\n\n")
## processing
timestart = time.perf_counter()
anglesMoteurs = getAngles(userX, userY, userH)
timeAfter = time.perf_counter()
## printing results
i = 1
for angles in anglesMoteurs:
print("angle moteur-" + str(i) + " = " + str(angles) + " degres \n")
i += 1
print("Calculation took :" + str(timeAfter-timestart) + " seconds.\n")
| 28.129032
| 77
| 0.658257
|
4a1148d15ede07d9289dbe997b3b9603d2eaf320
| 6,047
|
py
|
Python
|
jhu_primitives/core/JHUGraph.py
|
youngser/D3M
|
a9998ca12644264d61e8ce5258a54f25b5f9f726
|
[
"Apache-2.0"
] | null | null | null |
jhu_primitives/core/JHUGraph.py
|
youngser/D3M
|
a9998ca12644264d61e8ce5258a54f25b5f9f726
|
[
"Apache-2.0"
] | 10
|
2017-09-12T05:45:43.000Z
|
2017-09-18T15:26:43.000Z
|
jhu_primitives/core/JHUGraph.py
|
youngser/D3M
|
a9998ca12644264d61e8ce5258a54f25b5f9f726
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# JHUGraph.py
# Created on 2017-09-13.
from typing import NamedTuple, Sequence, Optional
from primitive_interfaces.clustering import ClusteringPrimitiveBase
from jhu_primitives.wrapper.read_graph_r import read_graph
from jhu_primitives.wrapper.ig_wrapper_r import ig_get_adjacency_matrix
from jhu_primitives.wrapper.ig_wrapper_r import ig_get_num_vertices
from jhu_primitives.wrapper.ig_wrapper_r import ig_get_num_edges
from jhu_primitives.wrapper.ig_wrapper_r import ig_get_dangling_nodes
from jhu_primitives.wrapper.ig_wrapper_r import ig_is_directed
from jhu_primitives.wrapper.ig_wrapper_r import ig_is_weighted
from jhu_primitives.wrapper.ig_wrapper_r import ig_summary
from jhu_primitives.wrapper.ig_wrapper_r import ig_get_dense_matrix
from primitive_interfaces.base import Hyperparams
from d3m_metadata import container, hyperparams, metadata as metadata_module, params, utils
from d3m_metadata.params import Params
import os
from primitive_interfaces.base import CallResult
import numpy as np
Inputs = container.matrix
Outputs = container.ndarray
class Hyperparams(hyperparams.Hyperparams):
# TODO: Fix medatadata parameter
dtype = hyperparams.Hyperparameter[str](default="gml", semantic_types=[
'https://metadata.datadrivendiscovery.org/types/ControlParameter',
'https://metadata.datadrivendiscovery.org/types/TuningParameter'
])
class JHUGraph(ClusteringPrimitiveBase[Inputs, Outputs, Params, Hyperparams]):
# TODO: Create metadata for this
# This should contain only metadata which cannot be automatically determined from the code.
metadata = metadata_module.PrimitiveMetadata({
# Simply an UUID generated once and fixed forever. Generated using "uuid.uuid4()".
'id': 'b940ccbd-9e9b-3166-af50-210bfd79251b',
'version': "crap",
'name': "Monomial Regressor",
# Keywords do not have a controlled vocabulary. Authors can put here whatever they find suitable.
'keywords': ['test primitive'],
'source': {
'name': "boss",
'uris': [
# Unstructured URIs. Link to file and link to repo in this case.
'https://gitlab.com/datadrivendiscovery/tests-data/blob/master/primitives/test_primitives/monomial.py',
'https://gitlab.com/datadrivendiscovery/tests-data.git',
],
},
# A list of dependencies in order. These can be Python packages, system packages, or Docker images.
# Of course Python packages can also have their own dependencies, but sometimes it is necessary to
# install a Python package first to be even able to run setup.py of another package. Or you have
# a dependency which is not on PyPi.
'installation': [{
'type': metadata_module.PrimitiveInstallationType.PIP,
'package_uri': 'git+https://gitlab.com/datadrivendiscovery/tests-data.git@{git_commit}#egg=test_primitives&subdirecto\
ry=primitives'.format(
git_commit=utils.current_git_commit(os.path.dirname(__file__)),
),
}],
# URIs at which one can obtain code for the primitive, if available.
'location_uris': [
'https://gitlab.com/datadrivendiscovery/tests-data/raw/{git_commit}/primitives/test_primitives/monomial.py'.format(
git_commit=utils.current_git_commit(os.path.dirname(__file__)),
),
],
# The same path the primitive is registered with entry points in setup.py.
'python_path': 'd3m.primitives.test.MonomialPrimitive',
# Choose these from a controlled vocabulary in the schema. If anything is missing which would
# best describe the primitive, make a merge request.
'algorithm_types': [
metadata_module.PrimitiveAlgorithmType.LINEAR_REGRESSION,
],
'primitive_family': metadata_module.PrimitiveFamily.REGRESSION,
})
_adjacency_matrix = None
_num_vertices = None
_num_edges = None
_directed = None
_weighted = None
_dangling_nodes = None
def read_graph(self, *, fname: str) -> None:
dtype = self.hyperparams['dtype']
if dtype == "gml":
self._object = read_graph(fname, "gml")
elif dtype.startswith("edge"):
self._object = read_graph(fname, "edge")
else:
raise NotImplementedError("Reading graphs of type '{}'".\
format(dtype))
self._num_vertices = ig_get_num_vertices(self._object)
self._num_edges = ig_get_num_edges(self._object)
self._directed = ig_is_directed(self._object)
self._weighted = ig_is_weighted(self._object)
def compute_statistics(self) -> Outputs:
self._dangling_nodes = ig_get_dangling_nodes(self._object)
def get_adjacency_matrix(self) -> Outputs:
return ig_get_adjacency_matrix(self._object)
def get_dense_matrix(self) -> Outputs:
return ig_get_dense_matrix(self._object)
def get_num_vertices(self) -> int:
return self._num_vertices
def get_num_edges(self) -> int:
return self._num_edges
def is_directed(self) -> bool:
return self._directed
def is_weighted(self) -> bool:
return self._weighted
def get_dangling_nodes(self) -> Outputs:
if (self._dangling_nodes is None):
self.compute_statistics()
return self._dangling_nodes
def summary(self) -> None:
ig_summary(self._object)
def set_training_data(self, *, inputs: Inputs) -> None: # type: ignore
pass
def fit(self, *, timeout: float = None, iterations: int = None) -> CallResult[None]:
return base.CallResult(self.get_adjacency_matrix())
def produce(self, *, inputs: Inputs, timeout: float = None, iterations: int = None) -> CallResult[Outputs]:
return base.CallResult(None)
def get_params(self) -> Params:
return Params(other={})
def set_params(self, *, params: Params) -> None:
return None
| 40.313333
| 130
| 0.696213
|
4a114a5e5ba8c0207293866a07a0c1b864934e20
| 2,588
|
py
|
Python
|
preprocessing/walk_generation.py
|
marlin-github/CTGCN
|
e37ecd89afaf24f94213944e87a217a808468f43
|
[
"MIT"
] | 60
|
2020-03-24T14:37:11.000Z
|
2022-03-22T00:33:59.000Z
|
preprocessing/walk_generation.py
|
marlin-github/CTGCN
|
e37ecd89afaf24f94213944e87a217a808468f43
|
[
"MIT"
] | 7
|
2020-06-01T12:45:12.000Z
|
2022-01-05T12:39:42.000Z
|
preprocessing/walk_generation.py
|
jhljx/CTGCN
|
e37ecd89afaf24f94213944e87a217a808468f43
|
[
"MIT"
] | 19
|
2020-05-14T09:34:15.000Z
|
2022-03-22T00:34:02.000Z
|
# coding: utf-8
import pandas as pd
import os
import time
import multiprocessing
import preprocessing.random_walk as rw
from utils import check_and_make_path, get_sp_adj_mat
# Random Walk Generator
class WalkGenerator:
base_path: str
origin_base_path: str
walk_pair_base_path: str
node_freq_base_path: str
full_node_list: list
walk_time: int
walk_length: int
def __init__(self, base_path, origin_folder, walk_pair_folder, node_freq_folder, node_file, walk_time=100, walk_length=5):
self.base_path = base_path
self.origin_base_path = os.path.abspath(os.path.join(base_path, origin_folder))
self.walk_pair_base_path = os.path.abspath(os.path.join(base_path, walk_pair_folder))
self.node_freq_base_path = os.path.abspath(os.path.join(base_path, node_freq_folder))
node_path = os.path.abspath(os.path.join(base_path, node_file))
nodes_set = pd.read_csv(node_path, names=['node'])
self.full_node_list = nodes_set['node'].tolist()
self.walk_time = walk_time
self.walk_length = walk_length
check_and_make_path(self.walk_pair_base_path)
check_and_make_path(self.node_freq_base_path)
def get_walk_info(self, f_name, original_graph_path, sep='\t', weighted=True):
print('f_name = ', f_name)
t1 = time.time()
spadj = get_sp_adj_mat(original_graph_path, self.full_node_list, sep=sep)
rw.random_walk(spadj, self.walk_pair_base_path, self.node_freq_base_path, f_name, self.walk_length, self.walk_time, weighted)
t2 = time.time()
print('random walk tot time', t2 - t1, ' seconds!')
def get_walk_info_all_time(self, worker=-1, sep='\t', weighted=True):
print("perform random walk for all file(s)...")
f_list = os.listdir(self.origin_base_path)
f_list = sorted(f_list)
if worker <= 0:
for i, f_name in enumerate(f_list):
original_graph_path = os.path.join(self.origin_base_path, f_name)
self.get_walk_info(f_name, original_graph_path=original_graph_path, sep=sep, weighted=weighted)
else:
worker = min(os.cpu_count(), worker)
pool = multiprocessing.Pool(processes=worker)
print("\t\tstart " + str(worker) + " worker(s)")
for i, f_name in enumerate(f_list):
original_graph_path = os.path.join(self.origin_base_path, f_name)
pool.apply_async(self.get_walk_info, (f_name, original_graph_path, sep, weighted))
pool.close()
pool.join()
| 41.741935
| 133
| 0.681607
|
4a114b8e9120b543dd3a6030d7cb5668bd7d4e19
| 1,089
|
py
|
Python
|
test_margins.py
|
Patrick22414/SROIE-dev
|
621ab191c3cd25ffaf664ae50f76b8d3d616faf8
|
[
"MIT"
] | 1
|
2019-09-02T19:06:54.000Z
|
2019-09-02T19:06:54.000Z
|
test_margins.py
|
Patrick22414/SROIE-dev
|
621ab191c3cd25ffaf664ae50f76b8d3d616faf8
|
[
"MIT"
] | 1
|
2019-08-25T17:09:30.000Z
|
2019-08-26T06:07:38.000Z
|
test_margins.py
|
Patrick22414/SROIE-dev
|
621ab191c3cd25ffaf664ae50f76b8d3d616faf8
|
[
"MIT"
] | 2
|
2019-08-05T03:30:22.000Z
|
2020-04-07T22:58:46.000Z
|
import os
import glob
import numpy
import torch
from PIL import Image
from src import LineModel, draw_pred_line
DATA_PATH = "../sroie-data/"
RESO_H = 768
RESO_w = RESO_H // 2
GRID_H = 16
def test():
filenames = [os.path.splitext(f)[0] for f in glob.glob(DATA_PATH + "data_tmp/*.jpg")]
samples = random.sample(filenames, batch_size)
jpg_files = [s + ".jpg" for s in samples]
txt_files = [s + ".txt" for s in samples]
# convert jpg files to NCWH tensor
data = numpy.zeros([batch_size, 3, RESO_H, RESO_W], dtype=numpy.float32)
ratio = numpy.zeros(batch_size)
for i, f in enumerate(jpg_files):
im = Image.open(f).convert("RGB")
ratio[i] = RESO_H / im.height
im = im.resize([RESO_W, RESO_H])
data[i] = numpy.moveaxis(numpy.array(im), 2, 0)
truth = numpy.zeros([batch_size, RESO_H // GRID_H, 3], dtype=numpy.float32)
for i, (f, r) in enumerate(zip(txt_files, ratio)):
truth[i] = txt_to_truth(f, r)
return torch.tensor(data, device=device), torch.tensor(truth, device=device)
if __name__ == "__main__":
| 28.657895
| 89
| 0.654729
|
4a114ba60a2f10f464e0e654c3fb5a347e3bc958
| 202
|
py
|
Python
|
Python/tdw/FBOutput/MachineType.py
|
felixbinder/tdw
|
eb2b00b74b9fcf8ef2dcba1baa62424640c520b1
|
[
"BSD-2-Clause"
] | 307
|
2020-05-20T18:08:49.000Z
|
2022-03-21T19:55:08.000Z
|
Python/tdw/FBOutput/MachineType.py
|
felixbinder/tdw
|
eb2b00b74b9fcf8ef2dcba1baa62424640c520b1
|
[
"BSD-2-Clause"
] | 92
|
2020-07-21T18:29:13.000Z
|
2022-03-28T07:25:54.000Z
|
Python/tdw/FBOutput/MachineType.py
|
felixbinder/tdw
|
eb2b00b74b9fcf8ef2dcba1baa62424640c520b1
|
[
"BSD-2-Clause"
] | 53
|
2020-07-14T15:55:17.000Z
|
2022-03-20T16:20:01.000Z
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: FBOutput
class MachineType(object):
none = 1
motor = 2
hinge = 3
spring = 4
light = 5
| 16.833333
| 69
| 0.623762
|
4a114c91ff77ec066b52400f35f2adb0fcdff857
| 4,006
|
py
|
Python
|
PriceTracker.py
|
mustang519/STOCK-MARKET-PRICE-TRACKER-AND-NOTIFIER
|
99da472a13bbd94439d9005a79b265fd053e7f3e
|
[
"MIT",
"Unlicense"
] | 1
|
2021-07-06T13:10:41.000Z
|
2021-07-06T13:10:41.000Z
|
PriceTracker.py
|
mustang519/STOCK-MARKET-PRICE-TRACKER-AND-NOTIFIER
|
99da472a13bbd94439d9005a79b265fd053e7f3e
|
[
"MIT",
"Unlicense"
] | null | null | null |
PriceTracker.py
|
mustang519/STOCK-MARKET-PRICE-TRACKER-AND-NOTIFIER
|
99da472a13bbd94439d9005a79b265fd053e7f3e
|
[
"MIT",
"Unlicense"
] | null | null | null |
import string
import time
from bs4 import BeautifulSoup
from selenium import webdriver
import html5lib
import pandas as pd
import datetime
import smtplib
import ssl
def stockPrice(s):
url = "https://finance.yahoo.com/quote/" + s + "?p=" + s + "&guccounter=1&guce_referrer=aHR0cHM6Ly93d3cueW91dHViZS5jb20v&guce_referrer_sig=AQAAANyE07Gd730Uia9-wwV0Kvv1cWBrcVkFOydNDjjy3Db7Zzu81yNlcbXKyXUs_IVWyrwxEASQVjAUjXF8eNRB5eruYnFcq4LFvtt9Tw3ugQvfyik3cngJaGAX1KaeEYL8Yo-yW45wEjGOxUtxixvol6KKBYhfdFowq9seIYw2vfxG"
driver = webdriver.Chrome(r"C:\Users\Arundhuti Naskar\Dropbox\My PC (LAPTOP-6H818V0V)\Desktop\chromedriver.exe")
driver.get(url)
# this is just to ensure that the page is loaded
time.sleep(5)
html = driver.page_source
soup = BeautifulSoup(html, 'html5lib')
#print(soup.prettify())
price = soup.find('div', {"class": "My(6px) Pos(r) smartphone_Mt(6px)"})
price = price.find("span")
price = price.text
idx = price.find(",")
if(idx!=-1):
price = price[:idx] + price[idx + 1:]
return float(price)
def check_price(R_price):
for indx in range(0, len(R_price)):
if(flag[indx]==False) :
if ((bors[indx]==False and R_price[indx]<bound[indx]) or (bors[indx]==True and R_price[indx]>bound[indx])):
name = CODE[indx]
send_email(indx)
flag[indx] = True
def send_email(indx) :
smtp_server = "smtp.gmail.com"
port = 587 # For starttls
# Create a secure SSL context
context = ssl.create_default_context()
name = CODE[indx]
if (bors[indx]==False):
adv1 = "dropped below "
adv2 = "buy"
else:
adv1 = "risen above "
adv2 = "sell"
message = "Good news, Trader! The price for " + CODE[indx] + " has " + adv1 + str(bound[indx]) + ". You may " + adv2 + " this stock now."
# Try to log in to server and send email. Exception Handling is done here as we are trying to create an unencrypted connection and upgrade it to an encrypted one.
try:
server = smtplib.SMTP(smtp_server, port) #server object
server.ehlo() # Can be omitted
server.starttls(context=context) # Secure and upgrade the connection
server.ehlo() # Can be omitted
server.login(sender_email, password)
server.sendmail(sender_email, receiver_email, message)
except:
print("Invalid Credentials")
def check():
for i in flag:
if (i == False) :
return True
return False
def plot():
return CODE
CODE = []
bound = []
bors = []
sender_email = "we.are.in.the.stocksgame.now@gmail.com"
password = "Stocks_123"
file = open("PriceData.csv","w")
file.truncate()
receiver_email = input("Enter your gmail id: ")
no = int(input("Enter no of companies to be tracked: "))
for i in range(1,no+1):
CODE.append(input("Enter the code for company #" + str(i) + " to be tracked: "))
condn = int(input("Enter 0 if you are buying otherwise 1 if you are selling: "))
if (condn==0):
bors.append(False)
else:
bors.append(True)
bound.append(float(input("Enter the threshold price for this company: " )))
firstRow = []
flag = []
firstRow.append("Time")
for i in range(0,no) :
flag.append(False)
firstRow.append(CODE[i])
pd.DataFrame(firstRow).T.to_csv("PriceData.csv",mode='a',header=False)
maxCount = 10000
cnt=1
while(check() and cnt<maxCount):
R_price = []
C_date = []
timeSpan = datetime.datetime.now()
timeSpan = timeSpan.strftime("%m/%d/%Y, %H:%M:%S")
C_date.append(timeSpan)
for s in CODE:
R_price.append(stockPrice(s))
C_date = [timeSpan]
C_date.extend(R_price)
df = pd.DataFrame(C_date)
df = df.T
df.to_csv("PriceData.csv",mode='a',header=False)
print(C_date)
check_price(R_price)
cnt = cnt + 1
| 28.013986
| 321
| 0.625312
|
4a115116c7f1b7b791fcfac441ef77d9deaee435
| 1,764
|
py
|
Python
|
tests/utils.py
|
Evpok/camphr
|
d062e5694de6aac99a8ce3236caca62fba087570
|
[
"Apache-2.0"
] | null | null | null |
tests/utils.py
|
Evpok/camphr
|
d062e5694de6aac99a8ce3236caca62fba087570
|
[
"Apache-2.0"
] | null | null | null |
tests/utils.py
|
Evpok/camphr
|
d062e5694de6aac99a8ce3236caca62fba087570
|
[
"Apache-2.0"
] | null | null | null |
import json
import os
import tempfile
from itertools import zip_longest
from pathlib import Path
from typing import Any, Tuple
import spacy
from spacy.tests.util import assert_docs_equal
def check_juman() -> bool:
try:
import pyknp # noqa
except ImportError:
return False
return True
def check_knp() -> bool:
return check_juman()
def check_mecab() -> bool:
try:
import MeCab # noqa
except ImportError:
return False
return True
def check_allennlp() -> bool:
try:
import allennlp # noqa
except ImportError:
return False
return True
checks = {
"ja_mecab": check_mecab,
"ja_juman": check_juman,
"camphr_torch": lambda: True,
}
def check_lang(lang: str):
fn = checks.get(lang)
return fn and fn()
def comp_jsonl(fname1: str, fname2: str) -> Tuple[bool, Any]:
with open(fname1) as f1, open(fname2) as f2:
for line1, line2 in zip_longest(f1, f2, fillvalue=[]):
d1 = json.loads(line1)
d2 = json.loads(line2)
if d1 != d2:
return False, (d1, d2)
return True, ()
def in_ci():
return os.getenv("CI", "") == "true"
def check_serialization(nlp, text: str = "It is a serialization set. 今日はとてもいい天気だった!"):
with tempfile.TemporaryDirectory() as d:
nlp.to_disk(str(d))
nlp2 = spacy.load(str(d))
assert_docs_equal(nlp(text), nlp2(text))
FIXTURE_DIR = (Path(__file__).parent / "fixtures/").absolute()
BERT_JA_DIR = FIXTURE_DIR / "bert-base-japanese-test"
BERT_DIR = FIXTURE_DIR / "bert-test"
XLNET_DIR = FIXTURE_DIR / "xlnet"
DATA_DIR = (Path(__file__).parent / "data/").absolute()
TRF_TESTMODEL_PATH = [str(BERT_JA_DIR), str(XLNET_DIR), str(BERT_DIR)]
| 22.05
| 86
| 0.64229
|
4a1151aa3bcb2b46151d6e7eea14f0941f6a5628
| 13,686
|
py
|
Python
|
master-node-docker/sentinel/client/vpn.py
|
zhou0/sentinel
|
93760961a098b32fb6f39d3ec52ac2ff9200866a
|
[
"MIT"
] | 1
|
2019-05-03T06:15:25.000Z
|
2019-05-03T06:15:25.000Z
|
master-node-docker/sentinel/client/vpn.py
|
robDAO/sentinel
|
93760961a098b32fb6f39d3ec52ac2ff9200866a
|
[
"MIT"
] | null | null | null |
master-node-docker/sentinel/client/vpn.py
|
robDAO/sentinel
|
93760961a098b32fb6f39d3ec52ac2ff9200866a
|
[
"MIT"
] | 1
|
2019-05-03T06:15:29.000Z
|
2019-05-03T06:15:29.000Z
|
# coding=utf-8
import json
import time
from uuid import uuid4
import falcon
import requests
from ..config import COINBASE_ADDRESS
from ..config import DECIMALS
from ..config import REFERRAL_DUMMY
from ..db import db
from ..eth import vpn_service_manager
from ..helpers import eth_helper
def get_vpns_list(vpn_type):
_list = db.nodes.find({
'vpn.status': 'up',
'vpn_type': vpn_type
}, {
'_id': 0,
'account_addr': 1,
'ip': 1,
'price_per_gb': 1,
'location': 1,
'net_speed.upload': 1,
'latency': 1,
'rating': 1,
'net_speed.download': 1,
'enc_method': 1,
'description': 1,
'version': 1,
'load': 1,
'active_connections': 1
}).sort([
('version', -1),
('rating', -1),
('load.cpu', 1)
])
return list(_list)
def get_current_vpn_usage(account_addr, session_name):
result = db.connections.find_one({
'client_addr': account_addr,
'session_name': session_name
}, {
'_id': 0,
'server_usage': 1
})
return {} if result is None else result['server_usage']
class GetVpnCredentials(object):
def on_post(self, req, resp):
"""
@api {post} /client/vpn Get VPN server credentials.
@apiName GetVpnCredentials
@apiGroup VPN
@apiParam {String} account_addr Account address.
@apiParam {String} vpn_addr Account address of the VPN server.
@apiSuccess {String} ip IP address of the VPN server.
@apiSuccess {Number} port Port number of the VPN server.
@apiSuccess {String} token Unique token for validation.
@apiSuccess {String} vpn_addr VPN server account address.
"""
account_addr = str(req.body['account_addr']).lower() if 'account_addr' in req.body else REFERRAL_DUMMY
device_id = str(req.body['device_id']) if 'device_id' in req.body else None
vpn_addr = str(req.body['vpn_addr']).lower()
balances = eth_helper.get_balances(account_addr)
if balances['rinkeby']['sents'] >= (100 * DECIMALS):
error, due_amount = eth_helper.get_due_amount(account_addr)
if error is None:
if due_amount > 0:
message = {
'success': False,
'message': 'You have due amount: ' + str(
due_amount / DECIMALS) + ' SENTs. Please try after clearing the due.'
}
else:
node = db.nodes.find_one({
'account_addr': vpn_addr,
'vpn.status': 'up'
}, {
'_id': 0,
'token': 0
})
if node is None:
message = {
'success': False,
'message': 'Currently VPN server is not available. Please try after sometime.'
}
else:
error, is_paid = eth_helper.get_initial_payment(account_addr)
if error is None:
if is_paid is True:
try:
token = uuid4().hex
ip, port = str(node['ip']), 3000
body = {
# Fixes for SLC
'account_addr': device_id if account_addr == REFERRAL_DUMMY else account_addr,
'token': token
}
url = 'http://{}:{}/token'.format(ip, port)
_ = requests.post(url, json=body, timeout=10)
message = {
'success': True,
'ip': ip,
'port': port,
'token': token,
'vpn_addr': vpn_addr,
'message': 'Started VPN session.'
}
except Exception as err:
message = {
'success': False,
'message': 'Connection timed out while connecting to VPN server.',
'error': str(err)
}
else:
message = {
'success': False,
'account_addr': COINBASE_ADDRESS,
'message': 'Initial VPN payment is not done.'
}
else:
message = {
'success': False,
'message': 'Error occurred while cheking initial payment status.'
}
else:
message = {
'success': False,
'message': 'Error occurred while checking due amount.'
}
else:
message = {
'success': False,
'message': 'Your balance is less than 100 SENTs.'
}
resp.status = falcon.HTTP_200
resp.body = json.dumps(message)
class PayVpnUsage(object):
def on_post(self, req, resp):
"""
@api {post} /client/vpn/pay VPN usage payment.
@apiName PayVpnUsage
@apiGroup VPN
@apiParam {String} payment_type Type of payment {init | normal}
@apiParam {String} tx_data Hex code of the transaction.
@apiParam {String} net Ethereum chain name {main | rinkeby}.
@apiParam {String} from_addr Account address.
@apiParam {Number} amount Amount to be paid to VPN server.
@apiParam {Number} session_id Session ID of the VPN connection.
@apiSuccess {String[]} errors Errors if any.
@apiSuccess {String[]} tx_hashes Transaction hashes.
"""
payment_type = str(req.body['payment_type']).lower() # init OR normal
tx_data = str(req.body['tx_data'])
net = str(req.body['net']).lower()
from_addr = str(req.body['from_addr']).lower()
session_id = str(req.body['session_id']) if 'session_id' in req.body and req.body[
'session_id'] is not None else None
device_id = str(req.body['device_id']) if 'device_id' in req.body else None
errors, tx_hashes = eth_helper.pay_vpn_session(from_addr, session_id, net, tx_data, payment_type, device_id)
if len(errors) > 0:
message = {
'success': False,
'errors': errors,
'tx_hashes': tx_hashes,
'message': 'Error occurred while paying VPN usage.'
}
else:
message = {
'success': True,
'errors': errors,
'tx_hashes': tx_hashes,
'message': 'VPN payment is completed successfully.'
}
resp.status = falcon.HTTP_200
resp.body = json.dumps(message)
class ReportPayment(object):
def on_post(self, req, resp):
"""
@api {post} /client/vpn/report Report VPN payment.
@apiName ReportPayment
@apiGroup VPN
@apiParam {String} from_addr Account address.
@apiParam {Number} amount Amount to be paid to VPN server.
@apiParam {Number} session_id Session ID of the VPN connection.
@apiSuccess {String} tx_hash Transaction hash.
"""
from_addr = str(req.body['from_addr']).lower()
amount = int(req.body['amount'])
session_id = str(req.body['session_id'])
error, tx_hash = vpn_service_manager.pay_vpn_session(from_addr, amount, session_id)
if error is None:
message = {
'success': True,
'tx_hash': tx_hash,
'message': 'Payment Done Successfully.'
}
else:
message = {
'success': False,
'error': error,
'message': 'Vpn payment not successful.'
}
resp.status = falcon.HTTP_200
resp.body = json.dumps(message)
class GetVpnUsage(object):
def on_post(self, req, resp):
"""
@api {post} /client/vpn/usage Get VPN user details of specific account.
@apiName GetVpnUsage
@apiGroup VPN
@apiParam {String} account_addr Account address.
@apiSuccess {Object[]} usage VPN usage details.
"""
account_addr = str(req.body['account_addr']).lower() if 'account_addr' in req.body else REFERRAL_DUMMY
device_id = str(req.body['device_id']) if 'device_id' in req.body else None
error, usage = eth_helper.get_vpn_usage(account_addr, device_id)
if error is None:
message = {
'success': True,
'usage': usage
}
else:
message = {
'success': False,
'error': error,
'message': 'Error occured while fetching the usage data.'
}
resp.status = falcon.HTTP_200
resp.body = json.dumps(message)
class GetVpnsList(object):
def on_get(self, req, resp):
"""
@api {get} /client/vpn/list Get all unoccupied VPN servers list.
@apiName GetVpnsList
@apiGroup VPN
@apiSuccess {Object[]} list Details of all VPN servers.
"""
_list = get_vpns_list('openvpn')
for item in _list:
item['price_per_GB'] = item['price_per_gb']
item.pop('price_per_gb')
message = {
'success': True,
'list': _list
}
resp.status = falcon.HTTP_200
resp.body = json.dumps(message)
class GetSocksList(object):
def on_get(self, req, resp):
"""
@api {get} /client/vpn/socks-list Get all unoccupied Socks servers list.
@apiName GetSocksList
@apiGroup VPN
@apiSuccess {Object[]} list Details of all Socks servers.
"""
_list = get_vpns_list('socks5')
for item in _list:
item['price_per_GB'] = item['price_per_gb']
item.pop('price_per_gb')
message = {
'success': True,
'list': _list
}
resp.status = falcon.HTTP_200
resp.body = json.dumps(message)
class GetVpnCurrentUsage(object):
def on_post(self, req, resp):
"""
@api {post} /client/vpn/current Get current VPN usage.
@apiName GetVpnCurrentUsage
@apiGroup VPN
@apiParam {String} account_addr Account address.
@apiParam {String} session_name Session name of the VPN connection.
@apiSuccess {Object} usage Current VPN usage.
"""
account_addr = str(req.body['account_addr']).lower()
session_name = str(req.body['session_name'])
usage = get_current_vpn_usage(account_addr, session_name)
message = {
'success': True,
'usage': usage
}
resp.status = falcon.HTTP_200
resp.body = json.dumps(message)
class RateVPNSession(object):
def on_post(self, req, resp):
"""
@api {post} /client/vpn/rate Rate vpn session.
@apiName RateVPNSession
@apiGroup VPN
@apiParam {String} vpn_addr VPN Account Address.
@apiParam {String} session_name Session name of the VPN connection.
@apiParam {String} rating Rating to the session.
@apiSuccess {Object} message Response of the request.
"""
vpn_addr = str(req.body['vpn_addr']).lower()
session_name = str(req.body['session_name'])
rating = int(req.body['rating'])
session = db.connections.find_one({
'vpn_addr': vpn_addr,
'session_name': session_name
})
if session is None:
message = {
'success': False,
'message': 'No session found with the given details'
}
else:
_ = db.ratings.insert_one({
'vpn_addr': vpn_addr,
'session_name': session_name,
'rating': rating,
'timestamp': int(time.time())
})
count_dict = []
output = db.ratings.aggregate([
{'$match': {'vpn_addr': vpn_addr}},
{'$project': {'totalRating': '$rating'}},
{'$group': {'_id': 0, 'rating_count': {'$sum': '$totalRating'}}}
])
for doc in output:
count_dict.append(doc)
vpn_total_ratings = int(count_dict[0]['rating_count'])
vpn_total_times = db.ratings.find({'vpn_addr': vpn_addr}).count()
average_rating = vpn_total_ratings / vpn_total_times
_ = db.nodes.find_one_and_update({
'account_addr': vpn_addr
}, {
'$set': {
'rating': average_rating
}
})
message = {
'success': True,
'message': 'Rated Successfully'
}
resp.status = falcon.HTTP_200
resp.body = json.dumps(message)
| 34.824427
| 118
| 0.498977
|
4a1152dd0f6a42e713c89ecca2561b47fe9198e5
| 538
|
py
|
Python
|
atividade036.py
|
henrikysena/Atividades_CursosEmVideo_Python
|
2890c67ea4be2cde6f42c2489d4c96b47d7b597e
|
[
"MIT"
] | null | null | null |
atividade036.py
|
henrikysena/Atividades_CursosEmVideo_Python
|
2890c67ea4be2cde6f42c2489d4c96b47d7b597e
|
[
"MIT"
] | null | null | null |
atividade036.py
|
henrikysena/Atividades_CursosEmVideo_Python
|
2890c67ea4be2cde6f42c2489d4c96b47d7b597e
|
[
"MIT"
] | null | null | null |
print('Olá, Seja bem vindo ao Programa de Financiamento de Imóveis!')
print('')
valor = float(input('Qual o valor total do imóvel? '))
salario = float(input('Qual o valor do seu salário? '))
ano = int(input('Em quantos anos você pretende pagar o imóvel? '))
print('')
parcela = valor / (ano * 12)
totalsal = salario * (30/100)
print('O valor das parcelas serão de R${:.2f}'.format(parcela))
if parcela <= totalsal:
print('Você poderá financiar este imóvel!')
else:
print('Infelizmente você não poderá financiar este imóvel!')
| 29.888889
| 69
| 0.697026
|
4a11537d2b7ca9ca184c0362c2faa0c9417d21c3
| 789
|
py
|
Python
|
setup.py
|
hachiken/report_for_erpn
|
b6b0145e4dfddd503174dc2bc2a764b23b50e754
|
[
"MIT"
] | null | null | null |
setup.py
|
hachiken/report_for_erpn
|
b6b0145e4dfddd503174dc2bc2a764b23b50e754
|
[
"MIT"
] | null | null | null |
setup.py
|
hachiken/report_for_erpn
|
b6b0145e4dfddd503174dc2bc2a764b23b50e754
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from pip.req import parse_requirements
import re, ast
# get version from __version__ variable in my_reports/__init__.py
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('my_reports/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
requirements = parse_requirements("requirements.txt", session="")
setup(
name='my_reports',
version=version,
description='Reports made by me',
author='h@ci',
author_email='hachiken@gmail.com',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=[str(ir.req) for ir in requirements],
dependency_links=[str(ir._link) for ir in requirements if ir._link]
)
| 29.222222
| 68
| 0.736375
|
4a115398f27df3123510743da8e3597b83564111
| 6,272
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/migrate/v20191001preview/move_collection.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/migrate/v20191001preview/move_collection.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/migrate/v20191001preview/move_collection.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['MoveCollection']
class MoveCollection(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
identity: Optional[pulumi.Input[pulumi.InputType['IdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
move_collection_name: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[pulumi.InputType['MoveCollectionPropertiesArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Define the move collection.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['IdentityArgs']] identity: Defines the MSI properties of the Move Collection.
:param pulumi.Input[str] location: The geo-location where the resource lives.
:param pulumi.Input[str] move_collection_name: The Move Collection Name.
:param pulumi.Input[pulumi.InputType['MoveCollectionPropertiesArgs']] properties: Defines the move collection properties.
:param pulumi.Input[str] resource_group_name: The Resource Group Name.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['identity'] = identity
__props__['location'] = location
__props__['move_collection_name'] = move_collection_name
__props__['properties'] = properties
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['etag'] = None
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:migrate:MoveCollection"), pulumi.Alias(type_="azure-nextgen:migrate/latest:MoveCollection"), pulumi.Alias(type_="azure-nextgen:migrate/v20210101:MoveCollection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(MoveCollection, __self__).__init__(
'azure-nextgen:migrate/v20191001preview:MoveCollection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'MoveCollection':
"""
Get an existing MoveCollection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return MoveCollection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
The etag of the resource.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.IdentityResponse']]:
"""
Defines the MSI properties of the Move Collection.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
The geo-location where the resource lives.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output['outputs.MoveCollectionPropertiesResponse']:
"""
Defines the move collection properties.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 39.696203
| 250
| 0.639987
|
4a11542117a6db1ed7ad05aae1a35744a0f27716
| 322
|
py
|
Python
|
dataModels/GlusterFSVolume.py
|
micv-dev/DeepKubeGPUCluster
|
b1f674ea3c251a5287ee83d582b193248e04f9d6
|
[
"Apache-2.0"
] | 2
|
2021-01-22T05:56:40.000Z
|
2021-07-03T17:50:49.000Z
|
dataModels/GlusterFSVolume.py
|
micv-dev/DeepKubeGPUCluster
|
b1f674ea3c251a5287ee83d582b193248e04f9d6
|
[
"Apache-2.0"
] | null | null | null |
dataModels/GlusterFSVolume.py
|
micv-dev/DeepKubeGPUCluster
|
b1f674ea3c251a5287ee83d582b193248e04f9d6
|
[
"Apache-2.0"
] | null | null | null |
from peewee import *
import datetime
from dataModels.BaseModel import BaseModel
class GlusterFSVolume(BaseModel):
pvcName=TextField()
name = TextField(default="")
size=IntegerField()
createdOn = DateTimeField(default=datetime.datetime.now)
modifiedOn = DateTimeField(default=datetime.datetime.now)
| 23
| 61
| 0.763975
|
4a115433cfdee78fcda636ebeb3a725caf17ed61
| 14,894
|
py
|
Python
|
test/test_e2e_asr_transducer.py
|
shaun95/espnet
|
afa8f8ec5b8ec77deb1a3c1531915ebbee7b80e6
|
[
"Apache-2.0"
] | null | null | null |
test/test_e2e_asr_transducer.py
|
shaun95/espnet
|
afa8f8ec5b8ec77deb1a3c1531915ebbee7b80e6
|
[
"Apache-2.0"
] | null | null | null |
test/test_e2e_asr_transducer.py
|
shaun95/espnet
|
afa8f8ec5b8ec77deb1a3c1531915ebbee7b80e6
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import argparse
from packaging.version import parse as V
import tempfile
import json
import numpy as np
import pytest
import torch
from espnet.asr.pytorch_backend.asr_init import load_trained_model
import espnet.lm.pytorch_backend.extlm as extlm_pytorch
from espnet.nets.beam_search_transducer import BeamSearchTransducer
from espnet.nets.pytorch_backend.e2e_asr_transducer import E2E
import espnet.nets.pytorch_backend.lm.default as lm_pytorch
from espnet.nets.pytorch_backend.nets_utils import pad_list
is_torch_1_4_plus = V(torch.__version__) >= V("1.4.0")
is_torch_1_5_plus = V(torch.__version__) >= V("1.5.0")
def get_default_train_args(**kwargs):
train_defaults = dict(
etype="vggblstmp",
elayers=1,
subsample="1_2_2_1_1",
eunits=4,
eprojs=4,
dtype="lstm",
dlayers=1,
dunits=4,
dec_embed_dim=4,
dropout_rate=0.0,
dropout_rate_decoder=0.0,
dropout_rate_embed_decoder=0.0,
joint_dim=2,
joint_activation_type="tanh",
transducer_loss_weight=1.0,
use_ctc_loss=False,
ctc_loss_weight=0.0,
ctc_loss_dropout_rate=0.0,
use_lm_loss=False,
lm_loss_weight=0.0,
use_aux_transducer_loss=False,
aux_transducer_loss_weight=0.0,
aux_transducer_loss_enc_output_layers=[],
use_symm_kl_div_loss=False,
symm_kl_div_loss_weight=0.0,
char_list=["a", "b", "c", "d"],
sym_space="<space>",
sym_blank="<blank>",
report_cer=False,
report_wer=False,
verbose=0,
outdir=None,
rnnlm=None,
model_module="espnet.nets.pytorch_backend.e2e_asr_transducer:E2E",
)
train_defaults.update(kwargs)
return argparse.Namespace(**train_defaults)
def get_default_recog_args(**kwargs):
recog_defaults = dict(
batchsize=0,
beam_size=1,
nbest=1,
verbose=0,
search_type="default",
nstep=1,
max_sym_exp=2,
prefix_alpha=2,
u_max=5,
expansion_gamma=2,
expansion_beta=0.2,
score_norm_transducer=True,
rnnlm=None,
lm_weight=0.1,
)
recog_defaults.update(kwargs)
return argparse.Namespace(**recog_defaults)
def get_default_scope_inputs():
idim = 15
odim = 4
ilens = [12, 8]
olens = [8, 4]
return idim, odim, ilens, olens
def get_lm():
n_layers = 1
n_units = 4
char_list = ["<blank>", "<space>", "a", "b", "c", "d", "<eos>"]
rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(len(char_list), n_layers, n_units, typ="lstm")
)
return rnnlm
def get_wordlm():
n_layers = 1
n_units = 8
char_list = ["<blank>", "<space>", "a", "b", "c", "d", "<eos>"]
word_list = ["<blank>", "<unk>", "ab", "id", "ac", "bd", "<eos>"]
char_dict = {x: i for i, x in enumerate(char_list)}
word_dict = {x: i for i, x in enumerate(word_list)}
word_rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(len(word_list), n_layers, n_units)
)
word_rnnlm = lm_pytorch.ClassifierWithState(
extlm_pytorch.LookAheadWordLM(word_rnnlm.predictor, word_dict, char_dict)
)
return word_rnnlm
def prepare_inputs(idim, odim, ilens, olens, is_cuda=False):
np.random.seed(1)
feats = [np.random.randn(ilen, idim).astype(np.float32) for ilen in ilens]
labels = [np.random.randint(1, odim, olen).astype(np.int32) for olen in olens]
feats_len = np.array([x.shape[0] for x in feats], dtype=np.int32)
feats = pad_list([torch.from_numpy(x).float() for x in feats], 0)
labels = pad_list([torch.from_numpy(y).long() for y in labels], -1)
feats_len = torch.from_numpy(feats_len).long()
if is_cuda:
feats = feats.cuda()
labels = labels.cuda()
feats_len = feats_len.cuda()
return feats, feats_len, labels
@pytest.mark.parametrize(
"train_dic, recog_dic",
[
({}, {}),
({"eprojs": 4}, {}),
({"dlayers": 2}, {}),
({"etype": "gru"}, {}),
({"etype": "blstm"}, {}),
({"etype": "blstmp", "elayers": 2, "eprojs": 4}, {}),
({"etype": "vgggru"}, {}),
({"etype": "vggbru"}, {}),
({"etype": "vgggrup", "elayers": 2, "eprojs": 4}, {}),
({"dtype": "gru"}, {}),
({"dtype": "bgrup"}, {}),
({"dtype": "gru", "dlayers": 2}, {}),
({"joint-activation-type": "relu"}, {}),
({"joint-activation-type": "swish"}, {}),
({}, {"score_norm_transducer": False}),
({"report_cer": True, "report_wer": True}, {}),
({}, {"nbest": 2}),
({}, {"beam_size": 1}),
({}, {"beam_size": 2}),
({}, {"beam_size": 2, "search_type": "nsc"}),
({}, {"beam_size": 2, "search_type": "nsc", "nstep": 2, "prefix_alpha": 1}),
({}, {"beam_size": 2, "search_type": "tsd"}),
({}, {"beam_size": 2, "search_type": "tsd", "max-sym-exp": 3}),
({}, {"beam_size": 2, "search_type": "alsd"}),
({}, {"beam_size": 2, "search_type": "alsd", "u_max": 10}),
({}, {"beam_size": 2, "search_type": "maes", "nstep": 2}),
(
{},
{
"beam_size": 2,
"search_type": "default",
"rnnlm": get_wordlm(),
"lm_weight": 1.0,
},
),
({}, {"beam_size": 2, "search_type": "nsc", "rnnlm": get_lm()}),
({}, {"beam_size": 2, "search_type": "nsc", "rnnlm": get_wordlm()}),
({}, {"beam_size": 2, "search_type": "nsc", "nstep": 2, "rnnlm": get_lm()}),
({}, {"beam_size": 2, "search_type": "nsc", "nstep": 2, "rnnlm": get_wordlm()}),
(
{},
{
"beam_size": 2,
"search_type": "alsd",
"rnnlm": get_lm(),
"lm_weight": 0.2,
},
),
(
{},
{
"beam_size": 2,
"search_type": "alsd",
"rnnlm": get_wordlm(),
"lm_weight": 0.6,
},
),
({}, {"beam_size": 2, "search_type": "tsd", "rnnlm": get_lm()}),
({}, {"beam_size": 2, "search_type": "tsd", "rnnlm": get_wordlm()}),
(
{},
{"beam_size": 2, "search_type": "maes", "nstep": 2, "rnnlm": get_wordlm()},
),
],
)
def test_pytorch_transducer_trainable_and_decodable(train_dic, recog_dic):
idim, odim, ilens, olens = get_default_scope_inputs()
train_args = get_default_train_args(**train_dic)
recog_args = get_default_recog_args(**recog_dic)
model = E2E(idim, odim, train_args)
batch = prepare_inputs(idim, odim, ilens, olens)
# to avoid huge training time, cer/wer report
# is only enabled at validation steps
if train_args.report_cer or train_args.report_wer:
model.training = False
loss = model(*batch)
loss.backward()
beam_search = BeamSearchTransducer(
decoder=model.dec,
joint_network=model.transducer_tasks.joint_network,
beam_size=recog_args.beam_size,
lm=recog_args.rnnlm,
lm_weight=recog_args.lm_weight,
search_type=recog_args.search_type,
max_sym_exp=recog_args.max_sym_exp,
u_max=recog_args.u_max,
nstep=recog_args.nstep,
prefix_alpha=recog_args.prefix_alpha,
score_norm=recog_args.score_norm_transducer,
)
with torch.no_grad():
in_data = np.random.randn(20, idim)
model.recognize(in_data, beam_search)
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="multi gpu required")
@pytest.mark.parametrize(
"train_dic",
[
{"report_cer": True, "report_wer": True},
],
)
@pytest.mark.execution_timeout(3.2)
def test_pytorch_multi_gpu_trainable(train_dic):
idim, odim, ilens, olens = get_default_scope_inputs()
train_args = get_default_train_args(**train_dic)
ngpu = 2
device_ids = list(range(ngpu))
model = E2E(idim, odim, train_args)
model = torch.nn.DataParallel(model, device_ids)
model.cuda()
batch = prepare_inputs(idim, odim, ilens, olens, is_cuda=True)
loss = 1.0 / ngpu * model(*batch)
loss.backward(loss.new_ones(ngpu))
def test_calculate_plot_attention():
idim, odim, ilens, olens = get_default_scope_inputs()
train_args = get_default_train_args()
model = E2E(idim, odim, train_args)
batch = prepare_inputs(idim, odim, ilens, olens, is_cuda=False)
assert model.calculate_all_attentions(*batch) == []
@pytest.mark.parametrize(
"train_dic",
[
{
"elayers": 3,
"use_aux_transducer_loss": True,
"aux_transducer_loss_enc_output_layers": [1],
},
{
"elayers": 2,
"use_ctc_loss": True,
"ctc_loss_weight": 0.5,
"ctc_loss_dropout_rate": 0.1,
},
{
"etype": "vggblstm",
"elayers": 3,
"use_aux_transducer_loss": True,
"aux_transducer_loss": True,
"use_symm_kl_div_loss": True,
"symm_kl_div_loss_weight": 0.5,
"aux_transducer_loss_enc_output_layers": [0, 1],
},
{"dlayers": 2, "use_lm_loss": True, "lm_loss_weight": 0.5},
],
)
def test_auxiliary_task(train_dic):
idim, odim, ilens, olens = get_default_scope_inputs()
train_args = get_default_train_args(**train_dic)
recog_args = get_default_recog_args()
model = E2E(idim, odim, train_args)
batch = prepare_inputs(idim, odim, ilens, olens)
loss = model(*batch)
loss.backward()
beam_search = BeamSearchTransducer(
decoder=model.dec,
joint_network=model.transducer_tasks.joint_network,
beam_size=recog_args.beam_size,
lm=recog_args.rnnlm,
lm_weight=recog_args.lm_weight,
search_type=recog_args.search_type,
max_sym_exp=recog_args.max_sym_exp,
u_max=recog_args.u_max,
nstep=recog_args.nstep,
prefix_alpha=recog_args.prefix_alpha,
score_norm=recog_args.score_norm_transducer,
)
tmpdir = tempfile.mkdtemp(prefix="tmp_", dir="/tmp")
torch.save(model.state_dict(), tmpdir + "/model.dummy.best")
with open(tmpdir + "/model.json", "wb") as f:
f.write(
json.dumps(
(idim, odim, vars(train_args)),
indent=4,
ensure_ascii=False,
sort_keys=True,
).encode("utf_8")
)
with torch.no_grad():
in_data = np.random.randn(20, idim)
model, _ = load_trained_model(tmpdir + "/model.dummy.best", training=False)
model.recognize(in_data, beam_search)
def test_invalid_aux_transducer_loss_enc_layers():
idim, odim, ilens, olens = get_default_scope_inputs()
train_args = get_default_train_args(use_aux_transducer_loss=True)
with pytest.raises(ValueError):
E2E(idim, odim, train_args)
train_args = get_default_train_args(
use_aux_transducer_loss=True, aux_transducer_loss_enc_output_layers="foo"
)
with pytest.raises(ValueError):
E2E(idim, odim, train_args)
train_args = get_default_train_args(
use_aux_transducer_loss=True, aux_transducer_loss_enc_output_layers=[0, 4]
)
with pytest.raises(ValueError):
E2E(idim, odim, train_args)
train_args = get_default_train_args(
use_aux_transducer_loss=True,
use_symm_kl_div_loss=True,
aux_transducer_loss_enc_output_layers=[0],
elayers=3,
etype="blstmp",
subsample="1_2_1",
)
with pytest.raises(ValueError):
E2E(idim, odim, train_args)
@pytest.mark.parametrize(
"train_dic",
[
{},
{"etype": "vggblstm"},
],
)
@pytest.mark.parametrize(
"recog_dic",
[
{},
{"beam_size": 2, "search_type": "default"},
{"beam_size": 2, "search_type": "alsd"},
{"beam_size": 2, "search_type": "tsd"},
{"beam_size": 2, "search_type": "nsc"},
{"beam_size": 2, "search_type": "maes"},
],
)
@pytest.mark.parametrize(
"quantize_dic",
[
{"mod": {torch.nn.Linear}, "dtype": torch.qint8},
{"mod": {torch.nn.Linear}, "dtype": torch.float16},
{"mod": {torch.nn.LSTM}, "dtype": torch.qint8},
{"mod": {torch.nn.LSTM}, "dtype": torch.float16},
{"mod": {torch.nn.Linear, torch.nn.LSTM}, "dtype": torch.qint8},
{"mod": {torch.nn.Linear, torch.nn.LSTM}, "dtype": torch.float16},
],
)
def test_dynamic_quantization(train_dic, recog_dic, quantize_dic):
idim, odim, ilens, olens = get_default_scope_inputs()
train_args = get_default_train_args(**train_dic)
recog_args = get_default_recog_args(**recog_dic)
model = E2E(idim, odim, train_args)
if not is_torch_1_5_plus and (
torch.nn.Linear in quantize_dic["mod"]
and quantize_dic["dtype"] == torch.float16
):
# In recognize(...) from asr.py we raise ValueError however
# AssertionError is originaly raised by torch.
with pytest.raises(AssertionError):
model = torch.quantization.quantize_dynamic(
model,
quantize_dic["mod"],
dtype=quantize_dic["dtype"],
)
pytest.skip("Skip rest of the test after checking AssertionError")
else:
model = torch.quantization.quantize_dynamic(
model,
quantize_dic["mod"],
quantize_dic["dtype"],
)
beam_search = BeamSearchTransducer(
decoder=model.dec,
joint_network=model.transducer_tasks.joint_network,
beam_size=recog_args.beam_size,
lm=recog_args.rnnlm,
lm_weight=recog_args.lm_weight,
search_type=recog_args.search_type,
max_sym_exp=recog_args.max_sym_exp,
u_max=recog_args.u_max,
nstep=recog_args.nstep,
prefix_alpha=recog_args.prefix_alpha,
score_norm=recog_args.score_norm_transducer,
quantization=True,
)
with torch.no_grad():
in_data = np.random.randn(20, idim)
if not is_torch_1_4_plus and torch.nn.LSTM in quantize_dic["mod"]:
# Cf. previous comment
with pytest.raises(AssertionError):
model.recognize(in_data, beam_search)
else:
model.recognize(in_data, beam_search)
@pytest.mark.parametrize(
"train_dic, subsample",
[
({}, 4),
({"etype": "blstm"}, 1),
({"etype": "blstmp"}, 2),
],
)
def test_subsampling(train_dic, subsample):
idim, odim, ilens, olens = get_default_scope_inputs()
train_args = get_default_train_args(**train_dic)
model = E2E(idim, odim, train_args)
assert model.get_total_subsampling_factor() == subsample
| 29.847695
| 88
| 0.594535
|
4a115483c1d4f88d275bca67cce20ad2e551a77a
| 7,239
|
py
|
Python
|
midonet/neutron/db/migration/cli.py
|
NeCTAR-RC/networking-midonet
|
7a69af3eab25f57e77738fd8398b6f4854346fd9
|
[
"Apache-2.0"
] | null | null | null |
midonet/neutron/db/migration/cli.py
|
NeCTAR-RC/networking-midonet
|
7a69af3eab25f57e77738fd8398b6f4854346fd9
|
[
"Apache-2.0"
] | null | null | null |
midonet/neutron/db/migration/cli.py
|
NeCTAR-RC/networking-midonet
|
7a69af3eab25f57e77738fd8398b6f4854346fd9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Midokura SARL
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from alembic import config as alembic_config
from oslo_config import cfg
from oslo_serialization import jsonutils
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from neutron.db.migration import cli as n_cli
from midonet.neutron._i18n import _
import midonet.neutron.db.data_state_db as ds_db
import midonet.neutron.db.data_version_db as dv_db
from midonet.neutron.db import task_db
CONF = n_cli.CONF
def get_session(config):
connection = config.neutron_config.database.connection
engine = create_engine(connection)
Session = sessionmaker(bind=engine)
return Session()
def task_list(config, cmd):
"""Lists all of the tasks in the task table.
Optionally filters tasks to show unprocessed tasks.
:param config: contains neutron configuration, like database connection.
:param cmd: unused, but needed in the function signature by the command
parser.
"""
session = get_session(config)
printer = config.print_stdout
line = "%-7s%-11s%-20s%-40s%-20s"
printer(line, "id", "type", "data type", "resource id", "time")
printer(line, "--", "----", "---------", "-----------", "----")
def print_task(task):
printer(line, task.id, task.type, task.data_type, task.resource_id,
task.created_at)
show_unprocessed = config.neutron_config.command.u
tasks = task_db.get_task_list(session, show_unprocessed)
[print_task(task) for task in tasks]
def task_clean(config, cmd):
"""Removes all of the tasks that have been processed by the cluster.
Remove those tasks from the task table.
:param config: contains neutron configuration, like database connection.
:param cmd: unused, but needed in the function signature by the command
parser.
"""
session = get_session(config)
task_db.task_clean(session)
def task_resource(config, cmd):
"""Lists all of the resources represented in the task table.
This will only show the most updated information, and it will not
consider resources that have been removed from the task table.
:param config: contains neutron configuration, like database connection.
:param cmd: unused, but needed in the function signature by the command
parser.
"""
session = get_session(config)
printer = config.print_stdout
data = task_db.get_current_task_data(session)
for data_type in data:
printer(data_type + "S: \n")
for res in data[data_type]:
data_json = jsonutils.loads(data[data_type][res])
printer(jsonutils.dumps(data_json, indent=4, sort_keys=True))
def data_show(config, cmd):
"""Dumps the contents of the data state table.
:param config: contains neutron configuration, like database connection.
:param cmd: unused, but needed in the function signature by the command
parser.
"""
session = get_session(config)
printer = config.print_stdout
data_state = ds_db.get_data_state(session)
line = "%-25s : %s"
printer(line, "last processed task id", data_state.last_processed_task_id)
printer(line, "last updated", data_state.updated_at)
printer(line, "active version", data_state.active_version)
readonly = "True" if data_state.readonly else "False"
printer(line, "read only", readonly)
def data_readonly(config, cmd):
"""Sets the task table access state to "read only"
:param config: contains neutron configuration, like database connection.
:param cmd: unused, but needed in the function signature by the command
parser.
"""
session = get_session(config)
ds_db.set_readonly(session)
def data_readwrite(config, cmd):
"""Sets the task table access state to "read and write"
:param config: contains neutron configuration, like database connection.
:param cmd: unused, but needed in the function signature by the command
parser.
"""
session = get_session(config)
ds_db.set_readwrite(session)
def data_version_list(config, cmd):
"""Lists the statuses of the versions in the midonet data version table.
:param config: contains neutron configuration, like database connection.
:param cmd: unused, but needed in the function signature by the command
parser.
"""
session = get_session(config)
data_versions = dv_db.get_data_versions(session)
printer = config.print_stdout
line = "%-7s%-20s%-20s%-10s"
printer(line, "id", "sync_status", "sync_tasks_status", "stale")
printer(line, "--", "-----------", "-----------------", "-----")
for dv in data_versions:
printer(line, dv.id, dv.sync_status, dv.sync_tasks_status, dv.stale)
def data_version_sync(config, cmd):
pass
def data_version_activate(config, cmd):
pass
def add_command_parsers(subparsers):
parser = subparsers.add_parser('task-list')
parser.add_argument('-u', action='store_true')
parser.set_defaults(func=task_list)
parser = subparsers.add_parser('task-clean')
parser.set_defaults(func=task_clean)
parser = subparsers.add_parser('task-resource')
parser.set_defaults(func=task_resource)
parser = subparsers.add_parser('data-show')
parser.set_defaults(func=data_show)
parser = subparsers.add_parser('data-readonly')
parser.set_defaults(func=data_readonly)
parser = subparsers.add_parser('data-readwrite')
parser.set_defaults(func=data_readwrite)
parser = subparsers.add_parser('data-version-list')
parser.set_defaults(func=data_version_list)
parser = subparsers.add_parser('data-version-sync')
parser.set_defaults(func=data_version_sync)
parser = subparsers.add_parser('data-version-activate')
parser.set_defaults(func=data_version_activate)
command_opt = cfg.SubCommandOpt('command',
title='Command',
help=_('Available commands'),
handler=add_command_parsers)
# Override the db management options with our own version
CONF.unregister_opt(n_cli.command_opt)
CONF.register_cli_opt(command_opt)
def get_alembic_config():
config = alembic_config.Config(os.path.join(os.path.dirname(__file__),
'alembic.ini'))
config.set_main_option('script_location',
'midonet.neutron.db.migration:alembic_migration')
return config
def main():
CONF(project='neutron')
config = get_alembic_config()
# attach the Neutron conf to the Alembic conf
config.neutron_config = CONF
CONF.command.func(config, CONF.command.name)
| 34.636364
| 78
| 0.698715
|
4a1154e1a5bd03a241effb8e4ef05bc4d8636929
| 6,201
|
py
|
Python
|
PaddleCV/image_classification/models/mobilenet.py
|
FrancisLiang/models-1
|
e14d5bc1ab36d0dd11977f27cff54605bf99c945
|
[
"Apache-2.0"
] | 3
|
2019-08-23T06:34:46.000Z
|
2021-06-08T01:42:29.000Z
|
PaddleCV/image_classification/models/mobilenet.py
|
FrancisLiang/models-1
|
e14d5bc1ab36d0dd11977f27cff54605bf99c945
|
[
"Apache-2.0"
] | null | null | null |
PaddleCV/image_classification/models/mobilenet.py
|
FrancisLiang/models-1
|
e14d5bc1ab36d0dd11977f27cff54605bf99c945
|
[
"Apache-2.0"
] | 5
|
2019-07-18T03:38:39.000Z
|
2021-05-22T16:27:38.000Z
|
#copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle.fluid as fluid
from paddle.fluid.initializer import MSRA
from paddle.fluid.param_attr import ParamAttr
__all__ = ['MobileNet']
train_parameters = {
"input_size": [3, 224, 224],
"input_mean": [0.485, 0.456, 0.406],
"input_std": [0.229, 0.224, 0.225],
"learning_strategy": {
"name": "piecewise_decay",
"batch_size": 256,
"epochs": [30, 60, 90],
"steps": [0.1, 0.01, 0.001, 0.0001]
}
}
class MobileNet():
def __init__(self):
self.params = train_parameters
def net(self, input, class_dim=1000, scale=1.0):
# conv1: 112x112
input = self.conv_bn_layer(
input,
filter_size=3,
channels=3,
num_filters=int(32 * scale),
stride=2,
padding=1,
name="conv1")
# 56x56
input = self.depthwise_separable(
input,
num_filters1=32,
num_filters2=64,
num_groups=32,
stride=1,
scale=scale,
name="conv2_1")
input = self.depthwise_separable(
input,
num_filters1=64,
num_filters2=128,
num_groups=64,
stride=2,
scale=scale,
name="conv2_2")
# 28x28
input = self.depthwise_separable(
input,
num_filters1=128,
num_filters2=128,
num_groups=128,
stride=1,
scale=scale,
name="conv3_1")
input = self.depthwise_separable(
input,
num_filters1=128,
num_filters2=256,
num_groups=128,
stride=2,
scale=scale,
name="conv3_2")
# 14x14
input = self.depthwise_separable(
input,
num_filters1=256,
num_filters2=256,
num_groups=256,
stride=1,
scale=scale,
name="conv4_1")
input = self.depthwise_separable(
input,
num_filters1=256,
num_filters2=512,
num_groups=256,
stride=2,
scale=scale,
name="conv4_2")
# 14x14
for i in range(5):
input = self.depthwise_separable(
input,
num_filters1=512,
num_filters2=512,
num_groups=512,
stride=1,
scale=scale,
name="conv5" + "_" + str(i + 1))
# 7x7
input = self.depthwise_separable(
input,
num_filters1=512,
num_filters2=1024,
num_groups=512,
stride=2,
scale=scale,
name="conv5_6")
input = self.depthwise_separable(
input,
num_filters1=1024,
num_filters2=1024,
num_groups=1024,
stride=1,
scale=scale,
name="conv6")
input = fluid.layers.pool2d(
input=input,
pool_size=0,
pool_stride=1,
pool_type='avg',
global_pooling=True)
output = fluid.layers.fc(input=input,
size=class_dim,
param_attr=ParamAttr(
initializer=MSRA(), name="fc7_weights"),
bias_attr=ParamAttr(name="fc7_offset"))
return output
def conv_bn_layer(self,
input,
filter_size,
num_filters,
stride,
padding,
channels=None,
num_groups=1,
act='relu',
use_cudnn=True,
name=None):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
groups=num_groups,
act=None,
use_cudnn=use_cudnn,
param_attr=ParamAttr(
initializer=MSRA(), name=name + "_weights"),
bias_attr=False)
bn_name = name + "_bn"
return fluid.layers.batch_norm(
input=conv,
act=act,
param_attr=ParamAttr(name=bn_name + "_scale"),
bias_attr=ParamAttr(name=bn_name + "_offset"),
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance')
def depthwise_separable(self,
input,
num_filters1,
num_filters2,
num_groups,
stride,
scale,
name=None):
depthwise_conv = self.conv_bn_layer(
input=input,
filter_size=3,
num_filters=int(num_filters1 * scale),
stride=stride,
padding=1,
num_groups=int(num_groups * scale),
use_cudnn=False,
name=name + "_dw")
pointwise_conv = self.conv_bn_layer(
input=depthwise_conv,
filter_size=1,
num_filters=int(num_filters2 * scale),
stride=1,
padding=0,
name=name + "_sep")
return pointwise_conv
| 29.388626
| 77
| 0.501532
|
4a115544fca4256d2650ca981ec3fb6956ba82ca
| 17,906
|
py
|
Python
|
max7219/led.py
|
clickworkorange/max7219
|
8c5eeec16cb2b53973b3d2e01d7ae9f1d307c130
|
[
"MIT"
] | null | null | null |
max7219/led.py
|
clickworkorange/max7219
|
8c5eeec16cb2b53973b3d2e01d7ae9f1d307c130
|
[
"MIT"
] | null | null | null |
max7219/led.py
|
clickworkorange/max7219
|
8c5eeec16cb2b53973b3d2e01d7ae9f1d307c130
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
from max7219.font import DEFAULT_FONT
from max7219.rotate8x8 import rotate
class constants(object):
MAX7219_REG_NOOP = 0x0
MAX7219_REG_DIGIT0 = 0x1
MAX7219_REG_DIGIT1 = 0x2
MAX7219_REG_DIGIT2 = 0x3
MAX7219_REG_DIGIT3 = 0x4
MAX7219_REG_DIGIT4 = 0x5
MAX7219_REG_DIGIT5 = 0x6
MAX7219_REG_DIGIT6 = 0x7
MAX7219_REG_DIGIT7 = 0x8
MAX7219_REG_DECODEMODE = 0x9
MAX7219_REG_INTENSITY = 0xA
MAX7219_REG_SCANLIMIT = 0xB
MAX7219_REG_SHUTDOWN = 0xC
MAX7219_REG_DISPLAYTEST = 0xF
class device(object):
"""
Base class for handling multiple cascaded MAX7219 devices.
Callers should generally pick either the :py:class:`sevensegment` or
:py:class:`matrix` subclasses instead depending on which application
is required.
A buffer is maintained which holds the bytes that will be cascaded
every time :py:func:`flush` is called.
"""
NUM_DIGITS = 8
def __init__(self, cascaded=1, spi_bus=0, spi_device=0, vertical=False):
"""
Constructor: `cascaded` should be the number of daisy-chained MAX7219
devices that are connected. `vertical` should be set to True if
the text should start from the header instead perpendicularly.
"""
import spidev
assert cascaded > 0, "Must have at least one device!"
self._cascaded = cascaded
self._buffer = [0] * self.NUM_DIGITS * self._cascaded
self._spi = spidev.SpiDev()
self._spi.open(spi_bus, spi_device)
self._spi.max_speed_hz = 500000
self._vertical = vertical
self.command(constants.MAX7219_REG_SCANLIMIT, 7) # show all 8 digits
self.command(constants.MAX7219_REG_DECODEMODE, 0) # use matrix (not digits)
self.command(constants.MAX7219_REG_DISPLAYTEST, 0) # no display test
self.command(constants.MAX7219_REG_SHUTDOWN, 1) # not shutdown mode
self.brightness(7) # intensity: range: 0..15
self.clear()
def command(self, register, data):
"""
Sends a specific register some data, replicated for all cascaded
devices
"""
assert constants.MAX7219_REG_DECODEMODE <= register <= constants.MAX7219_REG_DISPLAYTEST
self._write([register, data] * self._cascaded)
def _write(self, data):
"""
Send the bytes (which should comprise of alternating command,
data values) over the SPI device.
"""
self._spi.xfer2(list(data))
def _values(self, position, buf):
"""
A generator which yields the digit/column position and the data
value from that position for each of the cascaded devices.
"""
for deviceId in range(self._cascaded):
yield position + constants.MAX7219_REG_DIGIT0
yield buf[(deviceId * self.NUM_DIGITS) + position]
def clear(self, deviceId=None):
"""
Clears the buffer the given deviceId if specified (else clears all
devices), and flushes.
"""
assert not deviceId or 0 <= deviceId < self._cascaded, "Invalid deviceId: {0}".format(deviceId)
if deviceId is None:
start = 0
end = self._cascaded
else:
start = deviceId
end = deviceId + 1
for deviceId in range(start, end):
for position in range(self.NUM_DIGITS):
self.set_byte(deviceId,
position + constants.MAX7219_REG_DIGIT0,
0, redraw=False)
self.flush()
def _preprocess_buffer(self, buf):
"""
Overload in subclass to provide custom behaviour: see
matrix implementation for example.
"""
return buf
def flush(self):
"""
For each digit/column, cascade out the contents of the buffer
cells to the SPI device.
"""
# Allow subclasses to pre-process the buffer: they shouldn't
# alter it, so make a copy first.
buf = self._preprocess_buffer(list(self._buffer))
assert len(buf) == len(self._buffer), "Preprocessed buffer is wrong size"
if self._vertical:
tmp_buf = []
for x in range(0, self._cascaded):
tmp_buf += rotate(buf[x*8:x*8+8])
buf = tmp_buf
for posn in range(self.NUM_DIGITS):
self._write(self._values(posn, buf))
def brightness(self, intensity):
"""
Sets the brightness level of all cascaded devices to the same
intensity level, ranging from 0..15. Note that setting the brightness
to a high level will draw more current, and may cause intermittent
issues / crashes if the USB power source is insufficient.
"""
assert 0 <= intensity < 16, "Invalid brightness: {0}".format(intensity)
self.command(constants.MAX7219_REG_INTENSITY, intensity)
def set_byte(self, deviceId, position, value, redraw=True):
"""
Low level mechanism to set a byte value in the buffer array. If redraw
is not suppled, or set to True, will force a redraw of _all_ buffer
items: If you are calling this method rapidly/frequently (e.g in a
loop), it would be more efficient to set to False, and when done,
call :py:func:`flush`.
Prefer to use the higher-level method calls in the subclasses below.
"""
assert 0 <= deviceId < self._cascaded, "Invalid deviceId: {0}".format(deviceId)
assert constants.MAX7219_REG_DIGIT0 <= position <= constants.MAX7219_REG_DIGIT7, "Invalid digit/column: {0}".format(position)
assert 0 <= value < 256, 'Value {0} outside range 0..255'.format(value)
offset = (deviceId * self.NUM_DIGITS) + position - constants.MAX7219_REG_DIGIT0
self._buffer[offset] = value
if redraw:
self.flush()
def rotate_left(self, redraw=True):
"""
Scrolls the buffer one column to the left. The data that scrolls off
the left side re-appears at the right-most position. If redraw
is not suppled, or left set to True, will force a redraw of _all_ buffer
items
"""
t = self._buffer[-1]
for i in range((self.NUM_DIGITS * self._cascaded) - 1, 0, -1):
self._buffer[i] = self._buffer[i - 1]
self._buffer[0] = t
if redraw:
self.flush()
def rotate_right(self, redraw=True):
"""
Scrolls the buffer one column to the right. The data that scrolls off
the right side re-appears at the left-most position. If redraw
is not suppled, or left set to True, will force a redraw of _all_ buffer
items
"""
t = self._buffer[0]
for i in range(0, (self.NUM_DIGITS * self._cascaded) - 1, 1):
self._buffer[i] = self._buffer[i + 1]
self._buffer[-1] = t
if redraw:
self.flush()
def scroll_left(self, redraw=True):
"""
Scrolls the buffer one column to the left. Any data that scrolls off
the left side is lost and does not re-appear on the right. An empty
column is inserted at the right-most position. If redraw
is not suppled, or set to True, will force a redraw of _all_ buffer
items
"""
del self._buffer[0]
self._buffer.append(0)
if redraw:
self.flush()
def scroll_right(self, redraw=True):
"""
Scrolls the buffer one column to the right. Any data that scrolls off
the right side is lost and does not re-appear on the left. An empty
column is inserted at the left-most position. If redraw
is not suppled, or set to True, will force a redraw of _all_ buffer
items
"""
del self._buffer[-1]
self._buffer.insert(0, 0)
if redraw:
self.flush()
class sevensegment(device):
"""
Implementation of MAX7219 devices cascaded with a series of seven-segment
LEDs. It provides a convenient method to write a number to a given device
in octal, decimal or hex, flushed left/right with zero padding. Base 10
numbers can be either integers or floating point (with the number of
decimal points configurable).
"""
_UNDEFINED = 0x08
_RADIX = {8: 'o', 10: 'f', 16: 'x'}
# Some letters cannot be represented by 7 segments, so dictionay lookup
# will default to _UNDEFINED (an underscore) instead.
_DIGITS = {
' ': 0x00,
'-': 0x01,
'_': 0x08,
'0': 0x7e,
'1': 0x30,
'2': 0x6d,
'3': 0x79,
'4': 0x33,
'5': 0x5b,
'6': 0x5f,
'7': 0x70,
'8': 0x7f,
'9': 0x7b,
'a': 0x7d,
'b': 0x1f,
'c': 0x0d,
'd': 0x3d,
'e': 0x6f,
'f': 0x47,
'g': 0x7b,
'h': 0x17,
'i': 0x10,
'j': 0x18,
# 'k': cant represent
'l': 0x06,
# 'm': cant represent
'n': 0x15,
'o': 0x1d,
'p': 0x67,
'q': 0x73,
'r': 0x05,
's': 0x5b,
't': 0x0f,
'u': 0x1c,
'v': 0x1c,
# 'w': cant represent
# 'x': cant represent
'y': 0x3b,
'z': 0x6d,
'A': 0x77,
'B': 0x7f,
'C': 0x4e,
'D': 0x7e,
'E': 0x4f,
'F': 0x47,
'G': 0x5e,
'H': 0x37,
'I': 0x30,
'J': 0x38,
# 'K': cant represent
'L': 0x0e,
# 'M': cant represent
'N': 0x76,
'O': 0x7e,
'P': 0x67,
'Q': 0x73,
'R': 0x46,
'S': 0x5b,
'T': 0x0f,
'U': 0x3e,
'V': 0x3e,
# 'W': cant represent
# 'X': cant represent
'Y': 0x3b,
'Z': 0x6d,
',': 0x80,
'.': 0x80
}
def letter(self, deviceId, position, char, dot=False, redraw=True):
"""
Looks up the most appropriate character representation for char
from the digits table, and writes that bitmap value into the buffer
at the given deviceId / position.
"""
assert dot in [0, 1, False, True]
value = self._DIGITS.get(str(char), self._UNDEFINED) | (dot << 7)
self.set_byte(deviceId, position, value, redraw)
def write_number(self, deviceId, value, base=10, decimalPlaces=0,
zeroPad=False, leftJustify=False):
"""
Formats the value according to the parameters supplied, and displays
on the specified device. If the formatted number is larger than
8 digits, then an OverflowError is raised.
"""
assert 0 <= deviceId < self._cascaded, "Invalid deviceId: {0}".format(deviceId)
assert base in self._RADIX, "Invalid base: {0}".format(base)
# Magic up a printf format string
size = self.NUM_DIGITS
formatStr = '%'
if zeroPad:
formatStr += '0'
if decimalPlaces > 0:
size += 1
if leftJustify:
size *= -1
formatStr = '{fmt}{size}.{dp}{type}'.format(
fmt=formatStr, size=size, dp=decimalPlaces,
type=self._RADIX[base])
position = constants.MAX7219_REG_DIGIT7
strValue = formatStr % value
# Go through each digit in the formatted string,
# updating the buffer accordingly
for char in strValue:
if position < constants.MAX7219_REG_DIGIT0:
self.clear(deviceId)
raise OverflowError('{0} too large for display'.format(strValue))
if char == '.':
continue
dp = (decimalPlaces > 0 and position == decimalPlaces + 1)
self.letter(deviceId, position, char, dot=dp, redraw=False)
position -= 1
self.flush()
def write_text(self, deviceId, text):
"""
Outputs the text (as near as possible) on the specific device. If
text is larger than 8 characters, then an OverflowError is raised.
"""
assert 0 <= deviceId < self._cascaded, "Invalid deviceId: {0}".format(deviceId)
if len(text) > 8:
raise OverflowError('{0} too large for display'.format(text))
for pos, char in enumerate(text.ljust(8)[::-1]):
self.letter(deviceId, constants.MAX7219_REG_DIGIT0 + pos, char, redraw=False)
self.flush()
def show_message(self, text, delay=0.4):
"""
Transitions the text message across the devices from left-to-right
"""
# Add some spaces on (same number as cascaded devices) so that the
# message scrolls off to the left completely.
text += ' ' * self._cascaded * 8
for value in text:
time.sleep(delay)
self.scroll_right(redraw=False)
self._buffer[0] = self._DIGITS.get(value, self._UNDEFINED)
self.flush()
class matrix(device):
"""
Implementation of MAX7219 devices cascaded with a series of 8x8 LED
matrix devices. It provides a convenient methods to write letters
to specific devices, to scroll a large message from left-to-right, or
to set specific pixels. It is assumed the matrices are linearly aligned.
"""
_invert = 0
_orientation = 0
def letter(self, deviceId, asciiCode, font=None, redraw=True):
"""
Writes the ASCII letter code to the given device in the specified font.
"""
assert 0 <= asciiCode < 256
if not font:
font = DEFAULT_FONT
col = constants.MAX7219_REG_DIGIT0
for value in font[asciiCode]:
if col > constants.MAX7219_REG_DIGIT7:
self.clear(deviceId)
raise OverflowError('Font for \'{0}\' too large for display'.format(asciiCode))
self.set_byte(deviceId, col, value, redraw=False)
col += 1
if redraw:
self.flush()
def scroll_up(self, redraw=True):
"""
Scrolls the underlying buffer (for all cascaded devices) up one pixel
"""
self._buffer = [value >> 1 for value in self._buffer]
if redraw:
self.flush()
def scroll_down(self, redraw=True):
"""
Scrolls the underlying buffer (for all cascaded devices) down one pixel
"""
self._buffer = [(value << 1) & 0xff for value in self._buffer]
if redraw:
self.flush()
def show_message(self, text, font=None, delay=0.05, always_scroll=False):
"""
Shows a message on the device. If it's longer then the total width
(or always_scroll=True), it transitions the text message across the
devices from right-to-left.
"""
if not font:
font = DEFAULT_FONT
display_length = self.NUM_DIGITS * self._cascaded
src = [c for ascii_code in text for c in font[ord(ascii_code)]]
scroll = always_scroll or len(src) > display_length
if scroll:
# Add some spaces on (same number as cascaded devices) so that the
# message scrolls off to the left completely.
src += [c for ascii_code in ' ' * self._cascaded
for c in font[ord(ascii_code)]]
else:
# How much margin we need on the left so it's centered
margin = int((display_length - len(src))/2)
# Reset the buffer so no traces of the previous message are left
self._buffer = [0] * display_length
for pos, value in enumerate(src):
if scroll:
time.sleep(delay)
self.scroll_left(redraw=False)
self._buffer[-1] = value
self.flush()
else:
self._buffer[margin+pos] = value
if not scroll:
self.flush()
def pixel(self, x, y, value, redraw=True):
"""
Sets (value = 1) or clears (value = 0) the pixel at the given
co-ordinate. It may be more efficient to batch multiple pixel
operations together with redraw=False, and then call
:py:func:`flush` to redraw just once.
"""
assert 0 <= x < len(self._buffer)
assert 0 <= y < self.NUM_DIGITS
if value:
self._buffer[x] |= (1 << y)
else:
self._buffer[x] &= ~(1 << y)
if redraw:
self.flush()
def _rotate(self, buf):
"""
Rotates tiles in the buffer by the given orientation
"""
result = []
for i in range(0, self._cascaded * self.NUM_DIGITS, self.NUM_DIGITS):
tile = buf[i:i + self.NUM_DIGITS]
for _ in range(self._orientation // 90):
tile = rotate(tile)
result += tile
return result
def _preprocess_buffer(self, buf):
"""
Inverts and/or orientates the buffer before flushing according to
user set parameters
"""
if self._invert:
buf = [~x & 0xff for x in buf]
if self._orientation:
buf = self._rotate(buf)
return super(matrix, self)._preprocess_buffer(buf)
def invert(self, value, redraw=True):
"""
Sets whether the display should be inverted or not when displaying
letters.
"""
assert value in [0, 1, False, True]
self._invert = value
if redraw:
self.flush()
def orientation(self, angle, redraw=True):
"""
Sets the orientation (angle should be 0, 90, 180 or 270) at which
the characters are displayed.
"""
assert angle in [0, 90, 180, 270]
self._orientation = angle
if redraw:
self.flush()
| 33.721281
| 133
| 0.578019
|
4a115565aa0c6dd82894ac350ce43ef44d3e11a5
| 610
|
py
|
Python
|
wrappers/python/tests/pairwise/test_pairwise_exists.py
|
sklump/indy-sdk
|
ee05a89ddf60b42f7483bebf2d89a936e12730df
|
[
"Apache-2.0"
] | 636
|
2017-05-25T07:45:43.000Z
|
2022-03-23T22:30:34.000Z
|
wrappers/python/tests/pairwise/test_pairwise_exists.py
|
Nick-1979/indy-sdk
|
e5f812e14962f0d51cf96f843033754ff841ce30
|
[
"Apache-2.0"
] | 731
|
2017-05-29T07:15:08.000Z
|
2022-03-31T07:55:58.000Z
|
wrappers/python/tests/pairwise/test_pairwise_exists.py
|
Nick-1979/indy-sdk
|
e5f812e14962f0d51cf96f843033754ff841ce30
|
[
"Apache-2.0"
] | 904
|
2017-05-25T07:45:49.000Z
|
2022-03-31T07:43:31.000Z
|
import pytest
from indy import pairwise
@pytest.mark.asyncio
async def test_is_pairwise_exists_works(wallet_handle, identity_my2, identity_trustee1):
(my_did, _) = identity_my2
(their_did, _) = identity_trustee1
await pairwise.create_pairwise(wallet_handle, their_did, my_did, None)
assert await pairwise.is_pairwise_exists(wallet_handle, their_did)
@pytest.mark.asyncio
async def test_is_pairwise_exists_works_for_not_created(wallet_handle, identity_my2, identity_trustee1):
(their_did, _) = identity_trustee1
assert not await pairwise.is_pairwise_exists(wallet_handle, their_did)
| 35.882353
| 104
| 0.813115
|
4a115591a0d42f2a434a462fa400a4f74403e55c
| 5,074
|
py
|
Python
|
run.py
|
DavideA/machine-learning-dgm
|
bb70ce09a86a549fc85e55a9b8f2364f73841e38
|
[
"RSA-MD"
] | null | null | null |
run.py
|
DavideA/machine-learning-dgm
|
bb70ce09a86a549fc85e55a9b8f2364f73841e38
|
[
"RSA-MD"
] | null | null | null |
run.py
|
DavideA/machine-learning-dgm
|
bb70ce09a86a549fc85e55a9b8f2364f73841e38
|
[
"RSA-MD"
] | null | null | null |
# Copyright 2019 SAP SE
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import datetime
import os
import random
import shutil
import time
import numpy as np
import torch.utils.data
from cfg.load_config import opt, cfg_from_file
ts = time.time()
# Arguments
parser = argparse.ArgumentParser(description='xxx')
parser.add_argument('--dataset', default='svhn', type=str, required=False,
choices=['mnist', 'svhn'], help='Dataset name')
parser.add_argument('--method', type=str, required=True,
choices=['dgmw1', 'dgmw2'], help='Method to run.')
# parser.add_argument('--cfg_file',default=None,type=str,required=False, help='Path to the configuration file')
cfg = parser.parse_args()
# if cfg.method == "DGMw":
# if cfg.dataset == "mnist":
# cfg_file = 'cfg/cfg_mnist_dgmw.yml'
# cfg_from_file(cfg_file)
# elif cfg.dataset == "svhn":
# cfg_file = 'cfg/cfg_svhn_dgmw.yml'
# cfg_from_file(cfg_file)
# elif cfg.method == "DGMa":
# raise NotImplementedError
# if cfg.dataset == "mnist":
# cfg_file = 'cfg/cfg_mnist_dgma.yml'
# cfg_from_file(cfg_file)
# elif cfg.dataset == "svhn":
# cfg_file = 'cfg/cfg_svhn_dgma.yml'
# cfg_from_file(cfg_file)
cfg_file = 'cfg/cfg_{}_{}.yml'.format(cfg.dataset, cfg.method)
cfg_from_file(cfg_file)
print(opt)
#######################################################################################################################
opt.device = torch.device(
"cuda:" + str(opt.device) if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
torch.cuda.set_device(opt.device)
print(opt)
try:
os.makedirs(opt.outf)
except OSError:
pass
try:
os.makedirs(opt.outf_models)
except OSError:
pass
try:
os.makedirs(opt.outf + '/mask_histo')
except:
pass
if opt.dataset == "mnist":
from dataloaders import split_MNIST as dataloader
elif opt.dataset == "svhn":
from dataloaders import split_SVHN as dataloader
if opt.method == "DGMw":
from networks import net_DGMw as model
from approaches import DGMw as approach
elif opt.method == "DGMa":
from networks import net_DGMa as model
from approaches import DGMa as approach
if opt.manualSeed is None:
opt.manualSeed = random.randint(1, 10000)
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
np.random.seed(opt.manualSeed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(opt.manualSeed)
print('Load data...')
data, taskcla, inputsize = dataloader.get(seed=opt.manualSeed,
data_root=opt.dataroot + str(
opt.imageSize), n_classes=1,
imageSize=opt.imageSize)
print('Input size =', inputsize, '\nTask info =', taskcla)
for t in range(10):
data[t]['train']['y'].data.fill_(t)
data[t]['test']['y'].data.fill_(t)
data[t]['valid']['y'].data.fill_(t)
nz = int(opt.nz)
ngf = int(opt.ngf)
ndf = int(opt.ndf)
nb_label = 10
if opt.dataset == 'mnist':
nc = 1
elif opt.dataset == 'svhn':
nc = 3
# classes are added one by one, we innitialize G with one head output
netG = model.netG(nz, ngf, nc, opt.smax_g, n_classes=1)
print(netG)
netD = model.netD(ndf, nc)
print(netD)
log_dir = opt.log_dir + datetime.datetime.fromtimestamp(ts).strftime(
'%Y_%m_%d_%H_%M_%S')
if os.path.exists(log_dir):
shutil.rmtree(log_dir)
os.makedirs(log_dir)
appr = approach.App(model, netG, netD, log_dir, opt.outf, niter=opt.niter,
batchSize=opt.batchSize,
imageSize=opt.imageSize, nz=int(opt.nz), nb_label=nb_label,
cuda=torch.cuda.is_available(), beta1=opt.beta1,
lr_D=opt.lr_D, lr_G=opt.lr_G, lamb_G=opt.lamb_G,
reinit_D=opt.reinit_D, lambda_adv=opt.lambda_adv,
lambda_wassersten=opt.lambda_wasserstein,
dataset=opt.dataset, store_model=opt.store_models)
def n_parameters(model):
from operator import mul
from functools import reduce
return sum([reduce(mul, p.shape) for p in model.parameters()])
for t in range(10):
test_acc_task, conf_matrixes_task, mask_G = appr.train(data, t,
smax_g=opt.smax_g,
use_aux_G=opt.aux_G)
print('Task {}: {:,} parameters'.format(t, n_parameters(netG)))
| 33.381579
| 119
| 0.638155
|
4a1156278c12c092d19227850c57362fa9204aee
| 693
|
py
|
Python
|
mysite/polls/models.py
|
KarolBautrel/Django-Docs-Tutorial
|
7da5bdb5369ad9cb14d66e5f2f4c18472b26811f
|
[
"MIT"
] | null | null | null |
mysite/polls/models.py
|
KarolBautrel/Django-Docs-Tutorial
|
7da5bdb5369ad9cb14d66e5f2f4c18472b26811f
|
[
"MIT"
] | null | null | null |
mysite/polls/models.py
|
KarolBautrel/Django-Docs-Tutorial
|
7da5bdb5369ad9cb14d66e5f2f4c18472b26811f
|
[
"MIT"
] | null | null | null |
import datetime
from django.db import models
from django.utils import timezone
# Create your models here.
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
| 25.666667
| 71
| 0.720058
|
4a1156f534fa38002b05774568644e61e49c29e9
| 36,094
|
py
|
Python
|
tensorflow/python/eager/forwardprop_test.py
|
deepmedia/tensorflow
|
ac8aad5c55838566a26ed6725c966d399319c831
|
[
"Apache-2.0"
] | 78
|
2020-08-04T12:36:25.000Z
|
2022-03-25T04:23:40.000Z
|
tensorflow/python/eager/forwardprop_test.py
|
deepmedia/tensorflow
|
ac8aad5c55838566a26ed6725c966d399319c831
|
[
"Apache-2.0"
] | 10
|
2021-08-03T08:42:38.000Z
|
2022-01-03T03:29:12.000Z
|
tensorflow/python/eager/forwardprop_test.py
|
deepmedia/tensorflow
|
ac8aad5c55838566a26ed6725c966d399319c831
|
[
"Apache-2.0"
] | 28
|
2020-02-10T07:03:06.000Z
|
2022-01-12T11:19:20.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import gc
import weakref
from absl.testing import parameterized
import numpy as np
from tensorflow.python import pywrap_tfe
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.eager import backprop
from tensorflow.python.eager import def_function
from tensorflow.python.eager import forwardprop
from tensorflow.python.eager import forwardprop_util
from tensorflow.python.eager import tape as tape_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.module import module
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.parallel_for import control_flow_ops
from tensorflow.python.ops.unconnected_gradients import UnconnectedGradients
from tensorflow.python.platform import test
from tensorflow.python.util import nest
_X11_35_DERIVATIVES = [
1.1 ** 3.5,
3.5 * 1.1 ** 2.5,
3.5 * 2.5 * 1.1 ** 1.5,
3.5 * 2.5 * 1.5 * 1.1 ** 0.5]
# TODO(allenl): Move this somewhere useful once forward gradients are stable.
def _jvp(f, primals, tangents):
"""Compute the jacobian of `f` at `primals` multiplied by `tangents`."""
with forwardprop.ForwardAccumulator(primals, tangents) as acc:
primals_out = f(*primals)
return primals_out, acc.jvp(
primals_out, unconnected_gradients=UnconnectedGradients.ZERO)
def _jacfwd(f, primals):
"""Compute the jacobian of `f` at `primals` using forward-mode autodiff."""
jac_flat = []
flat_primals = nest.flatten(primals)
tangent_mask = [array_ops.zeros_like(primal) for primal in flat_primals]
for primal_index, primal in enumerate(flat_primals):
primal_vector = array_ops.reshape(primal, [-1])
primal_vector_length = array_ops.size(primal_vector)
jac_columns = []
for element_index in math_ops.range(primal_vector_length):
mask = array_ops.one_hot(element_index, primal_vector_length)
tangent_mask[primal_index] = array_ops.reshape(mask,
array_ops.shape(primal))
jac_columns.append(
nest.map_structure(
functools.partial(array_ops.reshape, shape=[-1]),
_jvp(f, primals,
nest.pack_sequence_as(primals, tangent_mask))[1]))
jac_flat.append(array_ops.stack(jac_columns, axis=1))
tangent_mask[primal_index] = array_ops.zeros_like(primal)
return nest.pack_sequence_as(primals, jac_flat)
def _jvp_batch(f, primal, tangents):
tf_function = def_function.function(f)
return control_flow_ops.vectorized_map(
functools.partial(_jvp, tf_function, primal), tangents)
def _jvp_batch_matmul(f, primals, tangent_batch):
"""Compute the jacobian of `f` at `primals` multiplied by `tangents`."""
jac_fwd = _jacfwd(f, primals)
def jac_mul(tangent):
flat_tangent = array_ops.reshape(tangent, shape=[-1])
tangent_vector = array_ops.expand_dims(flat_tangent, 1)
jvp_vector = math_ops.matmul(jac_fwd, tangent_vector)
return array_ops.reshape(jvp_vector, tangent.shape)
return control_flow_ops.vectorized_map(jac_mul, tangent_batch)
def _grad(f, argnums=0):
"""Return a function which computes the gradient of `f`."""
def _f(*params):
with backprop.GradientTape() as tape:
tape.watch(params)
primals_out = f(*params)
return tape.gradient(
primals_out,
params[argnums],
unconnected_gradients=UnconnectedGradients.ZERO)
return _f
def _gradfwd(f, argnums=0, f_out_dtypes=dtypes.float32):
"""Return a function which computes the gradient of `f` in forward mode."""
def _f(*params):
def _single_jvp(param_mask):
with forwardprop.ForwardAccumulator(primals=[params[argnums]],
tangents=param_mask) as acc:
primals_out = f(*params)
return acc.jvp(primals_out)
# Building up a function to run with pfor takes a bit too long since we're
# only running it a handful of times.
return _vectorize_parameters(_single_jvp, [params[argnums]],
use_pfor=False, dtype=f_out_dtypes)
return _f
def _hvp(f, primals, tangents):
"""Compute a forward-over-back Hessian-vector product."""
with forwardprop.ForwardAccumulator(primals, tangents) as acc:
with backprop.GradientTape() as tape:
tape.watch(primals)
f_out = f(*primals)
f_out.shape.assert_is_compatible_with([])
return acc.jvp(tape.gradient(f_out, primals))
def _vectorize_parameters(f, params, use_pfor, dtype):
"""Loop over `params`, providing a one-hot mask to `f` for each."""
parameter_sizes = [array_ops.size(param) for param in params]
total_size = math_ops.add_n(parameter_sizes)
def _wrapper(index):
full_onehot = array_ops.one_hot(index, total_size)
split_onehot = array_ops.split(full_onehot, parameter_sizes)
tangents = [array_ops.reshape(v, array_ops.shape(param))
for param, v in zip(params, split_onehot)]
return f(tangents)
if use_pfor:
return control_flow_ops.vectorized_map(_wrapper, math_ops.range(total_size))
else:
return map_fn.map_fn(_wrapper, math_ops.range(total_size), dtype)
def _forward_over_back_hessian(f, params, use_pfor, dtype=None):
"""Computes the full Hessian matrix for the scalar-valued f(*params).
Args:
f: A function taking `params` and returning a scalar.
params: A possibly nested structure of tensors.
use_pfor: If true, uses `tf.vectorized_map` calls instead of looping.
dtype: Required if `use_pfor=False`. A possibly nested structure of dtypes
(e.g. `tf.float32`) matching the structure of `f`'s returns.
Returns:
A possibly nested structure of matrix slices corresponding to `params`. Each
slice has shape [P, p_s] where `p_s` is the number of parameters (`tf.size`)
in the corresponding element of `params` and `P` is the total number of
parameters (`sum_s(p_s)`). The full matrix can be obtained by concatenating
along the second axis.
"""
return _vectorize_parameters(
functools.partial(_hvp, f, params),
params, use_pfor=use_pfor, dtype=dtype)
def _test_gradients(testcase,
f,
primals,
order,
delta=1e-3,
rtol=1e-2,
atol=1e-6):
"""Tests forward/backward jacobians of `f`'s [0, `order`)-order gradients."""
if order < 1:
raise ValueError(
"`order` should be a positive integer, got '{}'.".format(order))
if order > 1:
_test_gradients(
testcase=testcase,
f=_grad(f),
primals=primals,
order=order - 1,
delta=delta,
rtol=rtol,
atol=atol)
sym_jac_back, num_jac = gradient_checker_v2.compute_gradient(
f, primals, delta=delta)
testcase.assertAllClose(num_jac, sym_jac_back, rtol=rtol, atol=atol)
sym_jac_fwd = _jacfwd(f, primals)
testcase.assertAllClose(num_jac, sym_jac_fwd, rtol=rtol, atol=atol)
# And the symbolic computations should be much closer.
testcase.assertAllClose(sym_jac_back, sym_jac_fwd)
class ForwardpropTest(test.TestCase, parameterized.TestCase):
def testJVPFunction(self):
add_outputs = (constant_op.constant(4.),)
vp, = forwardprop._jvp_dispatch(
op_name="Add",
attr_tuple=(),
inputs=(constant_op.constant(1.), constant_op.constant(3.)),
outputs=add_outputs,
tangents=(
constant_op.constant(1.),
constant_op.constant(5.),
))
self.assertAllClose(1. + 5., self.evaluate(vp))
mul_outputs = (constant_op.constant([20.]),)
vp, = forwardprop._jvp_dispatch(
op_name="Mul",
attr_tuple=(),
inputs=(constant_op.constant([4.]), constant_op.constant([5.])),
outputs=mul_outputs,
tangents=(
constant_op.constant([2.]),
constant_op.constant([3.]),
))
self.assertAllClose([2. * 5. + 3. * 4.], self.evaluate(vp))
def testNonDifferentiableOpWithInputTangent(self):
x = constant_op.constant(1.)
with forwardprop.ForwardAccumulator(x, 2.) as acc1:
with forwardprop.ForwardAccumulator(x, 2.) as acc2:
y = array_ops.zeros_like(x)
self.assertIsNone(acc1.jvp(y))
self.assertIsNone(acc2.jvp(y))
def testRunFunctionsEagerly(self):
try:
original_setting = def_function.functions_run_eagerly()
def_function.run_functions_eagerly(True)
x = constant_op.constant(1.)
with forwardprop.ForwardAccumulator(x, 2.) as acc:
y = x * 3.
self.assertAllClose(6., acc.jvp(y))
finally:
def_function.run_functions_eagerly(original_setting)
def testJVPFunctionUsedByAccumulatorForOps(self):
previous_fn = forwardprop._jvp_dispatch
try:
x = constant_op.constant(1.)
with forwardprop.ForwardAccumulator(x, 2.) as acc:
y = x + x
pywrap_tfe.TFE_Py_RegisterJVPFunction(
lambda *args, **kwargs: [constant_op.constant(-15.)])
z = x + x
self.assertAllClose(4., acc.jvp(y))
self.assertAllClose(-15., acc.jvp(z))
finally:
pywrap_tfe.TFE_Py_RegisterJVPFunction(previous_fn)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testFunctionCacheLimited(self):
# Every time this test is executed, it will create a slightly larger Tensor
# and push it through Add's gradient. Since we check for new pyobjects after
# the warmup, retracing each time without cleaning up old traces fails the
# test. It works because of experimental_relax_shapes.
for _ in range(forwardprop._TRACE_COUNT_LIMIT):
execution_count = getattr(self, "_execution_count", 0)
self._execution_count = execution_count + 1
x = array_ops.zeros([execution_count])
with forwardprop.ForwardAccumulator(
x, array_ops.ones_like(x)) as acc:
y = x + x
self.assertAllClose(2. * array_ops.ones_like(x), acc.jvp(y))
@test_util.assert_no_new_pyobjects_executing_eagerly
def testMultipleWatchesAdd(self):
x = constant_op.constant(-2.)
with self.assertRaisesRegexp(ValueError, "multiple times"):
with forwardprop.ForwardAccumulator(
[x, x], [1., 2.]):
pass
with forwardprop.ForwardAccumulator(
[x], [3.]) as acc:
self.assertAllClose(3., acc.jvp(x))
acc._watch(x, constant_op.constant(10.))
self.assertAllClose(13., acc.jvp(x))
acc._watch(x, constant_op.constant(11.))
self.assertAllClose(24., acc.jvp(x))
y = constant_op.constant(3.) * x
self.assertAllClose(24., acc.jvp(x))
self.assertAllClose(24. * 3., acc.jvp(y))
@test_util.assert_no_new_pyobjects_executing_eagerly
def testReenter(self):
x = constant_op.constant(-2.)
with forwardprop.ForwardAccumulator(x, 1.5) as acc:
self.assertAllClose(1.5, acc.jvp(x))
y = 4. * x
self.assertAllClose(6., acc.jvp(y))
with self.assertRaisesRegexp(ValueError, "already recording"):
with acc:
pass
z = 4. * x
self.assertIsNone(acc.jvp(z))
with acc:
yy = y * y
self.assertAllClose(6. * -8. * 2., acc.jvp(yy))
@test_util.assert_no_new_pyobjects_executing_eagerly
def testDeadTensorsJVPCleared(self):
x = array_ops.ones([100])
x_weak = weakref.ref(x)
grad_tensor = constant_op.constant(array_ops.zeros([100]))
grad_tensor_weak = weakref.ref(grad_tensor)
with forwardprop.ForwardAccumulator(x, grad_tensor) as acc:
derived_tensor = constant_op.constant(2.) * x
del grad_tensor
self.assertAllClose(array_ops.zeros([100]), acc.jvp(x))
del x
self.assertIsNone(x_weak())
self.assertIsNone(grad_tensor_weak())
derived_tensor_weak = weakref.ref(derived_tensor)
derived_tensor_grad = acc.jvp(derived_tensor)
derived_tensor_grad_weak = weakref.ref(derived_tensor_grad)
del derived_tensor
del derived_tensor_grad
self.assertIsNone(derived_tensor_weak())
self.assertIsNone(derived_tensor_grad_weak())
@test_util.assert_no_new_pyobjects_executing_eagerly
def testJVPManual(self):
primal, tangent = _jvp(math_ops.sin, (constant_op.constant(0.1),),
(constant_op.constant(0.2),))
self.assertAllClose(math_ops.sin(0.1), primal)
self.assertAllClose(math_ops.cos(0.1) * 0.2, tangent)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testNumericHigherOrder(self):
def f(x):
pointwise = math_ops.sin(x) * math_ops.tan(x)
return math_ops.reduce_prod(
pointwise + math_ops.reduce_sum(pointwise), axis=1)
_test_gradients(
self, f, [constant_op.constant([[2.0, 3.0], [1.0, 4.0]])], order=3)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testCustomGradient(self):
@custom_gradient.custom_gradient
def f(x):
def grad(dy):
return dy * math_ops.cos(x)
return np.sin(x.numpy()), grad
_test_gradients(self, f, [constant_op.constant([1., 2.])], order=3)
# TODO(allenl): investigate why assert_no_new_pyobjects_executing_eagerly fails around this test?
def testExceptionCustomGradientRecomputeGradForward(self):
@custom_gradient.recompute_grad
def f(x):
return math_ops.reduce_prod(math_ops.tanh(x)**2)
with self.assertRaisesRegexp(NotImplementedError,
"recompute_grad tried to transpose"):
primals = [constant_op.constant([1.])]
sym_jac_fwd = _jacfwd(f, primals)
def testExceptionInCustomGradientNotSwallowed(self):
@custom_gradient.custom_gradient
def f(unused_x):
def grad(unused_dy):
raise ValueError("test_error_string")
return 1., grad
c = constant_op.constant(1.)
d = constant_op.constant(2.)
with forwardprop.ForwardAccumulator(c, d):
with self.assertRaisesRegexp(ValueError, "test_error_string"):
f(c)
@parameterized.named_parameters(
[("EluM5", -0.5, nn_ops.elu),
("EluP5", [0.5], nn_ops.elu),
("SwishP5", 0.5, nn_impl.swish),
("SwishM5", [-0.5], nn_impl.swish)])
def testElementwiseNNOps(self, value, op_fn):
_test_gradients(self, op_fn, [constant_op.constant(value)], order=3)
def testFusedBatchNormGradsInference(self):
if test.is_built_with_rocm():
# This test was addeded recently and has been failing on the ROCm
# platform, since it was added.
# TODO(rocm): do root cause analysis of test failure and fix it.
self.skipTest("Test fails on ROCm platform, needs further analysis")
x_shape = [4, 10, 10, 2]
increment = 3. / math_ops.reduce_prod(
constant_op.constant(x_shape, dtype=dtypes.float32))
x = array_ops.reshape(math_ops.range(-2., 1., increment), x_shape)
scale = constant_op.constant([1., 1.1])
offset = constant_op.constant([-0.5, -0.6])
mean = constant_op.constant([-1.3, 1.4])
variance = constant_op.constant([0.7, 0.9])
epsilon = 0.001
def _bn_fused(x_arg, scale_arg, offset_arg):
return nn_impl.fused_batch_norm(x_arg, scale_arg, offset_arg,
mean, variance,
epsilon=epsilon, is_training=False)[0]
_test_gradients(self, _bn_fused, [x, scale, offset],
order=2, atol=1e-2)
def testPushPopAccumulatorState(self):
# Note that this example is somewhat contrived. push_forwardprop_state is
# probably only useful in practice for building functions that compute jvps
# alongside their usual outputs.
c = constant_op.constant(1.)
d = constant_op.constant(2.)
with forwardprop.ForwardAccumulator(c, d) as acc:
@custom_gradient.custom_gradient
def f(x):
y = math_ops.sin(x.numpy())
def grad(dy):
with forwardprop_util.push_forwardprop_state():
x_copy = constant_op.constant(x.numpy())
acc._watch(x_copy, dy)
y_copy = math_ops.sin(x_copy)
return dy * acc.jvp(y_copy)
return y, grad
output = f(c)
self.assertAllClose(d * math_ops.cos(c), acc.jvp(output))
@parameterized.named_parameters(
[("Order{}".format(order), order, expected)
for order, expected in enumerate(_X11_35_DERIVATIVES)])
@test_util.assert_no_new_pyobjects_executing_eagerly
def testHigherOrderPureForward(self, order, expected):
def _forwardgrad(f):
def _compute_forwardgrad(primal):
tangent = constant_op.constant(1.)
with forwardprop.ForwardAccumulator(primal, tangent) as acc:
primal_out = f(primal)
return acc.jvp(primal_out)
return _compute_forwardgrad
def _forward(x):
return x ** 3.5
f = _forward
primal = constant_op.constant(1.1)
for _ in range(order):
f = _forwardgrad(f)
self.assertAllClose(expected, f(primal))
@parameterized.named_parameters(
[("Function", def_function.function),
("NoFunction", lambda f: f)])
def testGradPureForward(self, decorator):
@decorator
def f(x):
return x ** 3.5
primal = constant_op.constant(1.1)
with forwardprop.ForwardAccumulator(
primal, constant_op.constant(1.)) as outer_acc:
with forwardprop.ForwardAccumulator(
primal, constant_op.constant(1.)) as acc:
primal_out = f(primal)
inner_jvp = acc.jvp(primal_out)
outer_jvp = outer_acc.jvp(inner_jvp)
self.assertAllClose(1.1 ** 3.5, primal_out)
self.assertAllClose(3.5 * 1.1 ** 2.5, inner_jvp)
self.assertAllClose(3.5 * 2.5 * 1.1 ** 1.5, outer_jvp)
self.assertIsNone(acc.jvp(outer_acc.jvp(primal_out)))
@test_util.assert_no_new_pyobjects_executing_eagerly
def testJVPPacking(self):
two = constant_op.constant(2.)
primal_in = constant_op.constant(1.)
inner_jvp = constant_op.constant(3.)
with forwardprop.ForwardAccumulator(
[primal_in, inner_jvp],
[constant_op.constant(2.), constant_op.constant(4.)]) as outer_acc:
with forwardprop.ForwardAccumulator(
primal_in, inner_jvp) as inner_acc:
packed_input_indices, packed_input_tangents = (
forwardprop_util.pack_tangents([primal_in]))
self.assertAllClose([3., 2., 4.], packed_input_tangents)
expected_indices = (
# inner_acc watches primal_in
((0, 1),),
# outer_acc watches primal_in and inner_jvp
((0, 2),
(1, 3)))
self.assertAllEqual(expected_indices, packed_input_indices)
primal_out = primal_in * two
self.assertAllClose(6., inner_acc.jvp(primal_out))
self.assertAllClose(4., outer_acc.jvp(primal_out))
self.assertAllClose(8., outer_acc.jvp(inner_acc.jvp(primal_out)))
packed_output_indices, packed_output_tangents = (
forwardprop_util.pack_tangents([primal_out]))
self.assertAllClose([6., 4., 8.], packed_output_tangents)
self.assertAllEqual(expected_indices, packed_output_indices)
def testFunctionGradInFunctionPureForward(self):
@def_function.function
def take_gradients():
@def_function.function
def f(x):
return x ** 3.5
primal = constant_op.constant(1.1)
with forwardprop.ForwardAccumulator(
primal, constant_op.constant(1.)) as outer_acc:
with forwardprop.ForwardAccumulator(
primal, constant_op.constant(1.)) as acc:
primal_out = f(primal)
inner_jvp = acc.jvp(primal_out)
outer_jvp = outer_acc.jvp(inner_jvp)
self.assertIsNone(acc.jvp(outer_acc.jvp(primal_out)))
return primal_out, inner_jvp, outer_jvp
primal_out, inner_jvp, outer_jvp = take_gradients()
self.assertAllClose(1.1 ** 3.5, primal_out)
self.assertAllClose(3.5 * 1.1 ** 2.5, inner_jvp)
self.assertAllClose(3.5 * 2.5 * 1.1 ** 1.5, outer_jvp)
def testFunctionGrad(self):
@def_function.function
def f(x):
return math_ops.reduce_prod(math_ops.tanh(x)**2)
_test_gradients(
self,
f,
[constant_op.constant([1., 2.])],
order=3)
def testReusingJVP(self):
m1 = random_ops.random_uniform((256, 2096))
m2 = array_ops.identity(m1)
tangent1 = random_ops.random_uniform((256, 2096))
tangent2 = random_ops.random_uniform((256, 2096))
matmul = def_function.function(math_ops.matmul)
with forwardprop.ForwardAccumulator(
primals=[m1, m2], tangents=[tangent1, tangent2]) as acc:
result1 = matmul(m1, m1, transpose_b=True)
result2 = matmul(m2, m2, transpose_b=True)
def _expected(mat, tangent):
return (math_ops.matmul(tangent, mat, transpose_b=True)
+ math_ops.matmul(mat, tangent, transpose_b=True))
self.assertAllClose(result1, result2)
self.assertAllClose(_expected(m1, tangent1), acc.jvp(result1))
self.assertAllClose(_expected(m2, tangent2), acc.jvp(result2))
@test_util.assert_no_new_pyobjects_executing_eagerly
def testHVPMemory(self):
def fun(x):
return math_ops.reduce_prod(math_ops.tanh(x)**2)
primals = constant_op.constant([1., 2., 3.])
tangents = constant_op.constant([3., 4., 5.])
_hvp(fun, (primals,), (tangents,))
@test_util.assert_no_new_pyobjects_executing_eagerly
def testHVPCorrectness(self):
def fun(x):
return math_ops.reduce_prod(math_ops.tanh(x)**2)
primals = constant_op.constant([1., 2., 3.])
tangents = constant_op.constant([3., 4., 5.])
forwardback_hvp_eager, = _hvp(fun, (primals,), (tangents,))
forwardback_hvp_function, = def_function.function(_hvp)(fun, (primals,),
(tangents,))
with backprop.GradientTape(persistent=True) as g:
g.watch(primals)
with backprop.GradientTape() as gg:
gg.watch(primals)
out = fun(primals)
grad = array_ops.unstack(gg.gradient(out, primals))
hessian = []
for i in range(3):
hessian.append(g.gradient(grad[i], primals))
hessian = array_ops.stack(hessian, axis=0)
backback_hvp = math_ops.tensordot(hessian, tangents, axes=1)
self.assertAllClose(backback_hvp, forwardback_hvp_eager)
self.assertAllClose(backback_hvp, forwardback_hvp_function)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testShouldRecordAndStopRecord(self):
c = constant_op.constant(1.)
c_tangent = constant_op.constant(2.)
with forwardprop.ForwardAccumulator(c, c_tangent) as acc:
with backprop.GradientTape() as tape:
self.assertFalse(tape_lib.should_record_backprop([c]))
self.assertEqual(1,
pywrap_tfe.TFE_Py_TapeSetPossibleGradientTypes([c]))
tape.watch(c)
self.assertEqual(2,
pywrap_tfe.TFE_Py_TapeSetPossibleGradientTypes([c]))
self.assertTrue(tape_lib.should_record_backprop([c]))
with tape_lib.stop_recording():
self.assertEqual(0,
pywrap_tfe.TFE_Py_TapeSetPossibleGradientTypes([c]))
self.assertFalse(tape_lib.should_record_backprop([c]))
d = c * 2.
self.assertEqual(2,
pywrap_tfe.TFE_Py_TapeSetPossibleGradientTypes([c]))
self.assertTrue(tape_lib.should_record_backprop([c]))
self.assertFalse(tape_lib.should_record_backprop([d]))
self.assertIsNone(acc.jvp(d))
self.assertIsNone(tape.gradient(d, c))
@test_util.assert_no_new_pyobjects_executing_eagerly
def testRecordingSelectively(self):
c = constant_op.constant(1.)
c_tangent = constant_op.constant(2.)
with forwardprop.ForwardAccumulator(c, c_tangent) as acc:
with backprop.GradientTape(persistent=True) as tape:
tape.watch(c)
with tape_lib.stop_recording():
two = constant_op.constant(2.)
d = c * two
three = constant_op.constant(3.)
e = c * three
self.assertIsNone(acc.jvp(d))
self.assertIsNone(acc.jvp(e))
self.assertIsNone(tape.gradient(d, c))
self.assertIsNone(tape.gradient(e, c))
tape_lib.record_operation_forwardprop_only(
"CustomForwardMul", [d], [c, two],
lambda dd: (two * dd, c * dd), None)
tape_lib.record_operation_backprop_only(
"CustomBackwardMul", [e], [c, three],
lambda de: (three * de, c * de))
self.assertAllClose(4., acc.jvp(d))
self.assertIsNone(acc.jvp(e))
self.assertIsNone(tape.gradient(d, c))
self.assertAllClose(3., tape.gradient(e, c))
@test_util.assert_no_new_pyobjects_executing_eagerly
def testOpWithNoTrainableOutputs(self):
v = variables.Variable(1.)
with forwardprop.ForwardAccumulator(v, 11.):
v.assign_sub(0.5)
self.assertAllClose(0.5, self.evaluate(v))
# TODO(b/141025187): Add a no_new_pyobjects decorator.
def testVariableReadInFunction(self):
v = variables.Variable(1.)
with forwardprop.ForwardAccumulator(v, 11.) as acc:
@def_function.function
def f():
return v.read_value(), 2. * v.read_value()
result = f()
self.assertAllClose((1.0, 2.), result)
self.assertAllClose((11., 22.), acc.jvp(result))
@parameterized.named_parameters(
[("ForwardPropFirst", True),
("TapeFirst", False)])
def testForwardOverBackwardMemoryEfficiency(self, forward_prop_first):
# Watching depends depends on nesting, not creation order
c = constant_op.constant(1.)
if forward_prop_first:
forward_accumulator = forwardprop.ForwardAccumulator(c, .1)
gradient_tape = backprop.GradientTape()
else:
gradient_tape = backprop.GradientTape()
forward_accumulator = forwardprop.ForwardAccumulator(c, .1)
try:
gc.disable()
with gradient_tape as tape:
# Adding and removing the tape multiple times in different nesting
# patterns does not affect watch ordering.
pass
with forward_accumulator as acc:
with gradient_tape as tape:
tape.watch(c)
d = math_ops.cos(c)
self.assertFalse(tape_lib.should_record_backprop((acc.jvp(d),)))
e = math_ops.cos(acc.jvp(d))
math_ops.cos(e)
weak_e = weakref.ref(e)
del e
self.assertIsNone(weak_e())
self.assertIsNone(tape.gradient(acc.jvp(d), c))
finally:
gc.enable()
@parameterized.named_parameters(
[("ForwardPropFirst", True),
("TapeFirst", False)])
def testBackwardOverForward(self, forward_prop_first):
c = constant_op.constant(1.)
# Watching depends depends on nesting, not creation order
if forward_prop_first:
forward_accumulator = forwardprop.ForwardAccumulator(c, .1)
gradient_tape = backprop.GradientTape()
else:
gradient_tape = backprop.GradientTape()
forward_accumulator = forwardprop.ForwardAccumulator(c, .1)
with gradient_tape as tape:
with forward_accumulator as acc:
tape.watch(c)
d = math_ops.cos(c)
self.assertTrue(tape_lib.should_record_backprop((acc.jvp(d),)))
self.assertAllClose(-.1 * math_ops.cos(1.),
tape.gradient(acc.jvp(d), c))
@test_util.assert_no_new_pyobjects_executing_eagerly
def testRecordingWithJVPIndices(self):
c = constant_op.constant(1.)
with forwardprop.ForwardAccumulator(c, 10.) as acc:
packed_input_tangents = forwardprop_util.pack_tangents([c]).tangents
self.assertAllClose([10.], packed_input_tangents)
d = constant_op.constant(2.)
d_tangent = constant_op.constant(3.)
tape_lib.record_operation_forwardprop_only(
"FunctionWithInlineJVPs",
[d] + [d_tangent],
[c] + packed_input_tangents,
None, (((0, 1),),))
self.assertAllClose(3., acc.jvp(d))
@test_util.assert_no_new_pyobjects_executing_eagerly
def testSpecialForwardFunctionUsed(self):
c = constant_op.constant(1.)
d = constant_op.constant(2.)
e = constant_op.constant(3.)
with forwardprop.ForwardAccumulator(c, 10.) as acc:
tape_lib.record_operation(
"ForwardIsSpecial",
[d], [c],
None, lambda jvp: [-2. * jvp])
self.assertAllClose(-20., acc.jvp(d))
tape_lib.record_operation(
"ForwardIsSpecial2",
[], [],
None, lambda: [])
tape_lib.record_operation(
"ForwardIsSpecial3",
[e], [d],
None, lambda x: [x])
self.assertAllClose(-20., acc.jvp(e))
@test_util.assert_no_new_pyobjects_executing_eagerly
def testVariableWatched(self):
v = variables.Variable([1., 2., 3.])
with forwardprop.ForwardAccumulator(
v, constant_op.constant([.1, -.2, .3])) as acc:
self.assertAllClose([.1, -.2, .3], acc.jvp(v))
x = v * 2.
self.assertAllClose([.2, -.4, .6], acc.jvp(x))
x2 = v + .1
self.assertAllClose([.1, -.2, .3], acc.jvp(x2))
def testUnconnectedGradients(self):
x = constant_op.constant(-1.)
with forwardprop.ForwardAccumulator(x, 0.1) as acc:
self.assertAllClose(0.1, acc.jvp(x, unconnected_gradients="zero"))
self.assertAllClose(0.1, acc.jvp(x, unconnected_gradients="none"))
y = constant_op.constant(-2.)
self.assertAllClose(0.0, acc.jvp(y, unconnected_gradients="zero"))
self.assertIsNone(acc.jvp(y, unconnected_gradients="none"))
# TODO(kkb): One weakref instance is created with warmup_iters=2, investigate.
@test_util.assert_no_new_pyobjects_executing_eagerly(warmup_iters=3)
def testVariableWatchedFunction(self):
class _Model(module.Module):
def __init__(self):
self._v = None
@def_function.function
def compute_jvps(self):
if self._v is None:
self._v = variables.Variable([1., 2., 3.])
with forwardprop.ForwardAccumulator(
self._v, constant_op.constant([.1, -.2, .3])) as acc:
x = self._v * 2.
x2 = self._v + .1
return acc.jvp((self._v, x, x2))
model = _Model()
v_jvp, x_jvp, x2_jvp = model.compute_jvps()
self.assertAllClose([.1, -.2, .3], v_jvp)
self.assertAllClose([.2, -.4, .6], x_jvp)
self.assertAllClose([.1, -.2, .3], x2_jvp)
# NOTE: assert_no_new_pyobjects_executing_eagerly fails flakily on this
# test... could be something wrong with the test decorator, or some sort of
# nondeterministic caching.
def testMirroredVariableWatched(self):
def _replicated(input_tangent):
with forwardprop.ForwardAccumulator(v, input_tangent) as acc:
self.assertAllClose([.1, -.2, .3], acc.jvp(v))
x = v * 2.
self.assertAllClose([.2, -.4, .6], acc.jvp(x))
x2 = v + .1
self.assertAllClose([.1, -.2, .3], acc.jvp(x2))
strategy = mirrored_strategy.MirroredStrategy()
with strategy.scope():
v = variables.Variable([1., 2., 3.])
strategy.run(_replicated, args=(constant_op.constant([.1, -.2, .3]),))
# TODO(b/141025187): Add a no_new_pyobjects decorator.
def testArgumentUnused(self):
v = constant_op.constant(1.)
with forwardprop.ForwardAccumulator(v, 11.) as acc:
@def_function.function
def _f(x):
del x
return constant_op.constant(1.)
result = _f(v)
self.assertAllClose(1.0, result)
self.assertIsNone(acc.jvp(result))
@def_function.function
def _has_loop(iters, y):
ret = 0.
for i in math_ops.range(iters):
ret += y * math_ops.cast(i, dtypes.float32)
return ret
@def_function.function
def _has_cond(k, y):
if k > 1:
ret = 3. * y
else:
ret = 0.
return ret
@def_function.function
def _fprop_while(iters, y):
with forwardprop.ForwardAccumulator(y, 1.) as acc:
ret = 0.
for i in math_ops.range(iters):
ret += y * math_ops.cast(i, dtypes.float32)
return acc.jvp(ret)
@def_function.function
def _fprop_cond(k, y):
with forwardprop.ForwardAccumulator(y, 1.) as acc:
if k > 1:
ret = 3. * y
else:
ret = 0.
return acc.jvp(ret)
class ControlFlowTests(test.TestCase):
@test_util.assert_no_new_pyobjects_executing_eagerly
def testOfFunctionWhile(self):
y = constant_op.constant(1.)
with forwardprop.ForwardAccumulator(y, 1.) as acc:
self.assertAllClose(
10., acc.jvp(_has_loop(constant_op.constant(5), y)))
@test_util.assert_no_new_pyobjects_executing_eagerly
def testOfFunctionCond(self):
y = constant_op.constant(1.)
with forwardprop.ForwardAccumulator(y, 1.) as acc:
self.assertAllClose(
3., acc.jvp(_has_cond(constant_op.constant(5), y)))
self.assertAllClose(
0., acc.jvp(_has_cond(constant_op.constant(0), y)))
@test_util.assert_no_new_pyobjects_executing_eagerly
def testInFunctionWhile(self):
self.assertAllClose(
10., _fprop_while(constant_op.constant(5), constant_op.constant(1.)))
@test_util.assert_no_new_pyobjects_executing_eagerly
def testInFunctionCond(self):
self.assertAllClose(
3., _fprop_cond(constant_op.constant(5), constant_op.constant(1.)))
self.assertAllClose(
0., _fprop_cond(constant_op.constant(0), constant_op.constant(1.)))
class HessianTests(test.TestCase, parameterized.TestCase):
def testHessian1D(self):
# Note: stolen from ops/gradients_test.py
m = 4
rng = np.random.RandomState([1, 2, 3])
mat_value = rng.randn(m, m).astype("float32")
x_value = rng.randn(m).astype("float32")
hess_value = mat_value + mat_value.T
mat = variables.Variable(mat_value)
def _f(x):
return math_ops.reduce_sum(x[:, None] * mat * x[None, :])
hessian_eager, = _forward_over_back_hessian(
_f, [constant_op.constant(x_value)],
use_pfor=False, dtype=[dtypes.float32])
self.assertAllClose(hess_value, hessian_eager)
hessian_function, = def_function.function(_forward_over_back_hessian)(
_f, [constant_op.constant(x_value)],
use_pfor=False, dtype=[dtypes.float32])
self.assertAllClose(hess_value, hessian_function)
hessian_pfor, = def_function.function(_forward_over_back_hessian)(
_f, [constant_op.constant(x_value)],
use_pfor=True, dtype=[dtypes.float32])
self.assertAllClose(hess_value, hessian_pfor)
class JacobianTests(test.TestCase, parameterized.TestCase):
@parameterized.parameters([(math_ops.sin, (2, 3), 5),
(math_ops.sin, (2, 3, 4), 10)])
def testJVPBatchCorrectness(self, f, primal_shape, batch_size):
primals = [random_ops.random_uniform(primal_shape)]
tangent_batch = [random_ops.random_uniform([batch_size, *primal_shape])]
self.assertAllClose(
_jvp_batch(f, primals, tangent_batch)[1],
_jvp_batch_matmul(f, primals, *tangent_batch))
if __name__ == "__main__":
# TODO(allenl): Also test with 1.x-style graph mode.
ops.enable_eager_execution()
test.main()
| 36.830612
| 99
| 0.674572
|
4a115762be40b04fc530319cd6dc838932529d3c
| 9,792
|
py
|
Python
|
homeassistant/data_entry_flow.py
|
jasperro/core
|
26d7b2164e8a971506790ae5af06f31abdf278b5
|
[
"Apache-2.0"
] | 4
|
2016-12-23T10:36:36.000Z
|
2021-04-22T12:38:16.000Z
|
homeassistant/data_entry_flow.py
|
jasperro/core
|
26d7b2164e8a971506790ae5af06f31abdf278b5
|
[
"Apache-2.0"
] | 9
|
2022-01-27T06:32:10.000Z
|
2022-03-31T07:07:51.000Z
|
homeassistant/data_entry_flow.py
|
jasperro/core
|
26d7b2164e8a971506790ae5af06f31abdf278b5
|
[
"Apache-2.0"
] | 1
|
2020-02-17T07:44:24.000Z
|
2020-02-17T07:44:24.000Z
|
"""Classes to help gather user submissions."""
import abc
import logging
from typing import Any, Dict, List, Optional, cast
import uuid
import voluptuous as vol
from .core import HomeAssistant, callback
from .exceptions import HomeAssistantError
_LOGGER = logging.getLogger(__name__)
RESULT_TYPE_FORM = "form"
RESULT_TYPE_CREATE_ENTRY = "create_entry"
RESULT_TYPE_ABORT = "abort"
RESULT_TYPE_EXTERNAL_STEP = "external"
RESULT_TYPE_EXTERNAL_STEP_DONE = "external_done"
# Event that is fired when a flow is progressed via external source.
EVENT_DATA_ENTRY_FLOW_PROGRESSED = "data_entry_flow_progressed"
class FlowError(HomeAssistantError):
"""Error while configuring an account."""
class UnknownHandler(FlowError):
"""Unknown handler specified."""
class UnknownFlow(FlowError):
"""Unknown flow specified."""
class UnknownStep(FlowError):
"""Unknown step specified."""
class AbortFlow(FlowError):
"""Exception to indicate a flow needs to be aborted."""
def __init__(self, reason: str, description_placeholders: Optional[Dict] = None):
"""Initialize an abort flow exception."""
super().__init__(f"Flow aborted: {reason}")
self.reason = reason
self.description_placeholders = description_placeholders
class FlowManager(abc.ABC):
"""Manage all the flows that are in progress."""
def __init__(self, hass: HomeAssistant,) -> None:
"""Initialize the flow manager."""
self.hass = hass
self._progress: Dict[str, Any] = {}
@abc.abstractmethod
async def async_create_flow(
self,
handler_key: Any,
*,
context: Optional[Dict[str, Any]] = None,
data: Optional[Dict[str, Any]] = None,
) -> "FlowHandler":
"""Create a flow for specified handler.
Handler key is the domain of the component that we want to set up.
"""
pass
@abc.abstractmethod
async def async_finish_flow(
self, flow: "FlowHandler", result: Dict[str, Any]
) -> Dict[str, Any]:
"""Finish a config flow and add an entry."""
pass
async def async_post_init(
self, flow: "FlowHandler", result: Dict[str, Any]
) -> None:
"""Entry has finished executing its first step asynchronously."""
pass
@callback
def async_progress(self) -> List[Dict]:
"""Return the flows in progress."""
return [
{"flow_id": flow.flow_id, "handler": flow.handler, "context": flow.context}
for flow in self._progress.values()
if flow.cur_step is not None
]
async def async_init(
self, handler: str, *, context: Optional[Dict] = None, data: Any = None
) -> Any:
"""Start a configuration flow."""
if context is None:
context = {}
flow = await self.async_create_flow(handler, context=context, data=data)
if not flow:
raise UnknownFlow("Flow was not created")
flow.hass = self.hass
flow.handler = handler
flow.flow_id = uuid.uuid4().hex
flow.context = context
self._progress[flow.flow_id] = flow
result = await self._async_handle_step(flow, flow.init_step, data)
if result["type"] != RESULT_TYPE_ABORT:
await self.async_post_init(flow, result)
return result
async def async_configure(
self, flow_id: str, user_input: Optional[Dict] = None
) -> Any:
"""Continue a configuration flow."""
flow = self._progress.get(flow_id)
if flow is None:
raise UnknownFlow
cur_step = flow.cur_step
if cur_step.get("data_schema") is not None and user_input is not None:
user_input = cur_step["data_schema"](user_input)
result = await self._async_handle_step(flow, cur_step["step_id"], user_input)
if cur_step["type"] == RESULT_TYPE_EXTERNAL_STEP:
if result["type"] not in (
RESULT_TYPE_EXTERNAL_STEP,
RESULT_TYPE_EXTERNAL_STEP_DONE,
):
raise ValueError(
"External step can only transition to "
"external step or external step done."
)
# If the result has changed from last result, fire event to update
# the frontend.
if cur_step["step_id"] != result.get("step_id"):
# Tell frontend to reload the flow state.
self.hass.bus.async_fire(
EVENT_DATA_ENTRY_FLOW_PROGRESSED,
{"handler": flow.handler, "flow_id": flow_id, "refresh": True},
)
return result
@callback
def async_abort(self, flow_id: str) -> None:
"""Abort a flow."""
if self._progress.pop(flow_id, None) is None:
raise UnknownFlow
async def _async_handle_step(
self, flow: Any, step_id: str, user_input: Optional[Dict]
) -> Dict:
"""Handle a step of a flow."""
method = f"async_step_{step_id}"
if not hasattr(flow, method):
self._progress.pop(flow.flow_id)
raise UnknownStep(
f"Handler {flow.__class__.__name__} doesn't support step {step_id}"
)
try:
result: Dict = await getattr(flow, method)(user_input)
except AbortFlow as err:
result = _create_abort_data(
flow.flow_id, flow.handler, err.reason, err.description_placeholders
)
if result["type"] not in (
RESULT_TYPE_FORM,
RESULT_TYPE_EXTERNAL_STEP,
RESULT_TYPE_CREATE_ENTRY,
RESULT_TYPE_ABORT,
RESULT_TYPE_EXTERNAL_STEP_DONE,
):
raise ValueError(
"Handler returned incorrect type: {}".format(result["type"])
)
if result["type"] in (
RESULT_TYPE_FORM,
RESULT_TYPE_EXTERNAL_STEP,
RESULT_TYPE_EXTERNAL_STEP_DONE,
):
flow.cur_step = result
return result
# We pass a copy of the result because we're mutating our version
result = await self.async_finish_flow(flow, dict(result))
# _async_finish_flow may change result type, check it again
if result["type"] == RESULT_TYPE_FORM:
flow.cur_step = result
return result
# Abort and Success results both finish the flow
self._progress.pop(flow.flow_id)
return result
class FlowHandler:
"""Handle the configuration flow of a component."""
# Set by flow manager
flow_id: str = None # type: ignore
hass: Optional[HomeAssistant] = None
handler: Optional[str] = None
cur_step: Optional[Dict[str, str]] = None
context: Dict
# Set by _async_create_flow callback
init_step = "init"
# Set by developer
VERSION = 1
@callback
def async_show_form(
self,
*,
step_id: str,
data_schema: vol.Schema = None,
errors: Optional[Dict] = None,
description_placeholders: Optional[Dict] = None,
) -> Dict[str, Any]:
"""Return the definition of a form to gather user input."""
return {
"type": RESULT_TYPE_FORM,
"flow_id": self.flow_id,
"handler": self.handler,
"step_id": step_id,
"data_schema": data_schema,
"errors": errors,
"description_placeholders": description_placeholders,
}
@callback
def async_create_entry(
self,
*,
title: str,
data: Dict,
description: Optional[str] = None,
description_placeholders: Optional[Dict] = None,
) -> Dict[str, Any]:
"""Finish config flow and create a config entry."""
return {
"version": self.VERSION,
"type": RESULT_TYPE_CREATE_ENTRY,
"flow_id": self.flow_id,
"handler": self.handler,
"title": title,
"data": data,
"description": description,
"description_placeholders": description_placeholders,
}
@callback
def async_abort(
self, *, reason: str, description_placeholders: Optional[Dict] = None
) -> Dict[str, Any]:
"""Abort the config flow."""
return _create_abort_data(
self.flow_id, cast(str, self.handler), reason, description_placeholders
)
@callback
def async_external_step(
self, *, step_id: str, url: str, description_placeholders: Optional[Dict] = None
) -> Dict[str, Any]:
"""Return the definition of an external step for the user to take."""
return {
"type": RESULT_TYPE_EXTERNAL_STEP,
"flow_id": self.flow_id,
"handler": self.handler,
"step_id": step_id,
"url": url,
"description_placeholders": description_placeholders,
}
@callback
def async_external_step_done(self, *, next_step_id: str) -> Dict[str, Any]:
"""Return the definition of an external step for the user to take."""
return {
"type": RESULT_TYPE_EXTERNAL_STEP_DONE,
"flow_id": self.flow_id,
"handler": self.handler,
"step_id": next_step_id,
}
@callback
def _create_abort_data(
flow_id: str,
handler: str,
reason: str,
description_placeholders: Optional[Dict] = None,
) -> Dict[str, Any]:
"""Return the definition of an external step for the user to take."""
return {
"type": RESULT_TYPE_ABORT,
"flow_id": flow_id,
"handler": handler,
"reason": reason,
"description_placeholders": description_placeholders,
}
| 30.88959
| 88
| 0.59998
|
4a1157b3a29586ba74c7327a5c058745401e9ad0
| 1,554
|
py
|
Python
|
bonus_top_interview_questions/395. Longest Substring with At Least K Repeating Characters.py
|
JacopoPan/leetcode-top100-liked-questions
|
03dc05f087d05805d54b7585ce740338f3128833
|
[
"MIT"
] | null | null | null |
bonus_top_interview_questions/395. Longest Substring with At Least K Repeating Characters.py
|
JacopoPan/leetcode-top100-liked-questions
|
03dc05f087d05805d54b7585ce740338f3128833
|
[
"MIT"
] | null | null | null |
bonus_top_interview_questions/395. Longest Substring with At Least K Repeating Characters.py
|
JacopoPan/leetcode-top100-liked-questions
|
03dc05f087d05805d54b7585ce740338f3128833
|
[
"MIT"
] | null | null | null |
"""
Runtime: 86 ms, faster than 53.12% of Python3 online submissions for Longest Substring with At Least K Repeating Characters.
Memory Usage: 14.4 MB, less than 16.35% of Python3 online submissions for Longest Substring with At Least K Repeating Characters.
"""
from typing import List
from typing import Optional
class Solution:
def longestSubstring(self, s: str, k: int) -> int:
counters = {}
for char in s:
if char not in counters.keys():
counters[char] = 1
else:
counters[char] += 1
if all (count >= k for count in counters.values()):
return len(s)
else:
chars_below_count = []
for key ,v in counters.items():
if v < k:
chars_below_count.append(key)
substrings = []
temp = ''
for idx, char in enumerate(s):
if char not in chars_below_count:
temp += char
else:
if temp != '':
substrings.append(temp)
temp = ''
substrings.append(temp)
results = []
for subs in substrings:
results.append(self.longestSubstring(subs,k))
if results:
return max(results)
else:
return 0
def main():
sol = Solution()
print('Output:', sol.longestSubstring("bbaaacbd", 3))
print('Expected:', 3)
if __name__ == "__main__":
main()
| 32.375
| 129
| 0.517375
|
4a1158870bacfce849fe91319126d6f16ec5cec2
| 5,276
|
py
|
Python
|
src/mpi4py/__init__.py
|
dmitry-kabanov/mpi4py
|
91711bd7e7d72df7a68137080ace15595c681ef8
|
[
"BSD-2-Clause"
] | null | null | null |
src/mpi4py/__init__.py
|
dmitry-kabanov/mpi4py
|
91711bd7e7d72df7a68137080ace15595c681ef8
|
[
"BSD-2-Clause"
] | null | null | null |
src/mpi4py/__init__.py
|
dmitry-kabanov/mpi4py
|
91711bd7e7d72df7a68137080ace15595c681ef8
|
[
"BSD-2-Clause"
] | null | null | null |
# Author: Lisandro Dalcin
# Contact: dalcinl@gmail.com
"""
This is the **MPI for Python** package.
What is *MPI*?
==============
The *Message Passing Interface*, is a standardized and portable
message-passing system designed to function on a wide variety of
parallel computers. The standard defines the syntax and semantics of
library routines and allows users to write portable programs in the
main scientific programming languages (Fortran, C, or C++). Since
its release, the MPI specification has become the leading standard
for message-passing libraries for parallel computers.
What is *MPI for Python*?
=========================
*MPI for Python* provides MPI bindings for the Python programming
language, allowing any Python program to exploit multiple processors.
This package is constructed on top of the MPI-1/2 specifications and
provides an object oriented interface which closely follows MPI-2 C++
bindings.
"""
__version__ = '3.1.0a0'
__author__ = 'Lisandro Dalcin'
__credits__ = 'MPI Forum, MPICH Team, Open MPI Team'
__all__ = ['MPI']
def get_include():
"""Return the directory in the package that contains header files.
Extension modules that need to compile against mpi4py should use
this function to locate the appropriate include directory. Using
Python distutils (or perhaps NumPy distutils)::
import mpi4py
Extension('extension_name', ...
include_dirs=[..., mpi4py.get_include()])
"""
# pylint: disable=import-outside-toplevel
from os.path import join, dirname
return join(dirname(__file__), 'include')
def get_config():
"""Return a dictionary with information about MPI."""
# pylint: disable=import-outside-toplevel
from os.path import join, dirname
try:
from configparser import ConfigParser
except ImportError: # pragma: no cover
from ConfigParser import ConfigParser
parser = ConfigParser()
parser.read(join(dirname(__file__), 'mpi.cfg'))
return dict(parser.items('mpi'))
def rc(**kargs): # pylint: disable=invalid-name
"""Runtime configuration options.
Parameters
----------
initialize : bool
Automatic MPI initialization at import (default: True).
threads : bool
Request for thread support (default: True).
thread_level : {'multiple', 'serialized', 'funneled', 'single'}
Level of thread support to request (default: 'multiple').
finalize : None or bool
Automatic MPI finalization at exit (default: None).
fast_reduce : bool
Use tree-based reductions for objects (default: True).
recv_mprobe : bool
Use matched probes to receive objects (default: True).
errors : {'exception', 'default', 'fatal'}
Error handling policy (default: 'exception').
"""
for key in kargs:
if not hasattr(rc, key):
raise TypeError("unexpected argument '{0}'".format(key))
for key, value in kargs.items():
setattr(rc, key, value)
rc.initialize = True
rc.threads = True
rc.thread_level = 'multiple'
rc.finalize = None
rc.fast_reduce = True
rc.recv_mprobe = True
rc.errors = 'exception'
__import__('sys').modules[__name__ + '.rc'] = rc
def profile(name, **kargs):
"""Support for the MPI profiling interface.
Parameters
----------
name : str
Name of the profiler library to load.
path : list of str, optional
Additional paths to search for the profiler.
logfile : str, optional
Filename prefix for dumping profiler output.
"""
# pylint: disable=import-outside-toplevel
import os
import sys
from .dl import dlopen, dlerror, RTLD_NOW, RTLD_GLOBAL
def lookup_dylib(name, path):
# pylint: disable=missing-docstring
pattern = []
if sys.platform.startswith('win'): # pragma: no cover
pattern.append(('', '.dll'))
elif sys.platform == 'darwin': # pragma: no cover
pattern.append(('lib', '.dylib'))
elif os.name == 'posix': # pragma: no cover
pattern.append(('lib', '.so'))
pattern.append(('', ''))
for pth in path:
for (lib, dso) in pattern:
filename = os.path.join(pth, lib + name + dso)
if os.path.isfile(filename):
return os.path.abspath(filename)
return None
logfile = kargs.pop('logfile', None)
if logfile:
if name in ('mpe',):
if 'MPE_LOGFILE_PREFIX' not in os.environ:
os.environ['MPE_LOGFILE_PREFIX'] = logfile
if name in ('vt', 'vt-mpi', 'vt-hyb'):
if 'VT_FILE_PREFIX' not in os.environ:
os.environ['VT_FILE_PREFIX'] = logfile
path = kargs.pop('path', [])
if isinstance(path, str):
path = [path]
else:
path = list(path)
prefix = os.path.dirname(__file__)
path.append(os.path.join(prefix, 'lib-pmpi'))
filename = lookup_dylib(name, path)
if filename is None:
raise ValueError("profiler '{0}' not found".format(name))
handle = dlopen(filename, RTLD_NOW | RTLD_GLOBAL)
if handle:
profile.registry.append((name, (handle, filename)))
else:
from warnings import warn
warn(dlerror())
profile.registry = []
| 31.975758
| 70
| 0.646513
|
4a11592dbe02d6e3c4b32e1fa32f9e5c83af7335
| 13,608
|
py
|
Python
|
ui_games/gui_games.py
|
ikathuria/PythonGames
|
2f3df15c495ddf4ab7d61dbe9bd7f79432a9ac0d
|
[
"MIT"
] | null | null | null |
ui_games/gui_games.py
|
ikathuria/PythonGames
|
2f3df15c495ddf4ab7d61dbe9bd7f79432a9ac0d
|
[
"MIT"
] | null | null | null |
ui_games/gui_games.py
|
ikathuria/PythonGames
|
2f3df15c495ddf4ab7d61dbe9bd7f79432a9ac0d
|
[
"MIT"
] | null | null | null |
"""Different one player games made with python.
The user interface is designed with PyQt5.
"""
import os
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
# personal modules
import rulebook
from blackjack import Ui_blackjack
from hangman import Ui_hangman
from num_guess import Ui_num_guess
from rps import Ui_rps
from ttt import Ui_tic_tac_toe
def resource_path(relative_path):
"""Get absolute path to resource, works for dev and for PyInstaller.
"""
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
# for stylesheet
button_style = """QPushButton { background-color: rgb(0, 0, 0);
color: rgb(255, 255, 255);
border-radius: 10px; }
QPushButton::pressed { background-color: rgb(255, 255, 255);
color: rgb(0, 0, 0);
border: 1px solid rgb(0, 0, 0);
border-radius: 10px; }
QPushButton::disabled { background-color: rgba(0, 0, 0, 0.2);
color: rgb(75, 75, 75); }"""
class Ui_PythonGames(object):
"""Ui Class."""
def setupUi(self, PythonGames):
"""Setting up the User Interface."""
PythonGames.setObjectName("PythonGames")
# icon
PythonGames.setWindowIcon(QtGui.QIcon(resource_path("static/images/icon.ico")))
# setting size at 800x600
PythonGames.resize(800, 600)
# expanding
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(
PythonGames.sizePolicy().hasHeightForWidth())
PythonGames.setSizePolicy(sizePolicy)
# font
font = QtGui.QFont()
font.setFamily("Microsoft JhengHei UI")
PythonGames.setFont(font)
# stylesheet
PythonGames.setStyleSheet("background-color: rgb(255, 255, 255);")
# CENTRAL WIDGET ########################################################
self.centralwidget = QtWidgets.QWidget(PythonGames)
self.centralwidget.setObjectName("centralwidget")
# the main grid layout
self.main_layout = QtWidgets.QGridLayout(self.centralwidget)
self.main_layout.setContentsMargins(10, 10, 10, 10)
self.main_layout.setSpacing(5)
self.main_layout.setObjectName("main_layout")
# GAMES BOX ############################################################
self.games_box = QtWidgets.QGroupBox(self.centralwidget)
# size
self.games_box.setMaximumSize(QtCore.QSize(300, 16777215))
# stylesheet
self.games_box.setStyleSheet(button_style)
# allignment
self.games_box.setAlignment(QtCore.Qt.AlignCenter)
# object name
self.games_box.setObjectName("games_box")
# layout
self.games_box_layout = QtWidgets.QGridLayout(self.games_box)
self.games_box_layout.setContentsMargins(10, 10, 10, 10)
self.games_box_layout.setSpacing(5)
self.games_box_layout.setObjectName("games_box_layout")
# game buttons #####
# blackjack
self.blackjack_button = QtWidgets.QPushButton(self.games_box)
self.blackjack_button.setMinimumSize(QtCore.QSize(0, 30))
self.blackjack_button.setObjectName("blackjack_button")
self.games_box_layout.addWidget(self.blackjack_button, 0, 0, 1, 1)
# hangman
self.hangman_button = QtWidgets.QPushButton(self.games_box)
self.hangman_button.setMinimumSize(QtCore.QSize(0, 30))
self.hangman_button.setObjectName("hangman_button")
self.games_box_layout.addWidget(self.hangman_button, 1, 0, 1, 1)
# number guessing
self.num_guess_button = QtWidgets.QPushButton(self.games_box)
self.num_guess_button.setMinimumSize(QtCore.QSize(0, 30))
self.num_guess_button.setObjectName("num_guess_button")
self.games_box_layout.addWidget(self.num_guess_button, 2, 0, 1, 1)
# rock paper scissors
self.rps_button = QtWidgets.QPushButton(self.games_box)
self.rps_button.setMinimumSize(QtCore.QSize(0, 30))
self.rps_button.setObjectName("rps_button")
self.games_box_layout.addWidget(self.rps_button, 3, 0, 1, 1)
# tic tac toe
self.ttt_button = QtWidgets.QPushButton(self.games_box)
self.ttt_button.setMinimumSize(QtCore.QSize(0, 30))
self.ttt_button.setObjectName("ttt_button")
self.games_box_layout.addWidget(self.ttt_button, 4, 0, 1, 1)
# adding to main layout
self.main_layout.addWidget(self.games_box, 0, 0, 1, 1)
# THE RULES ######################################################
self.rules_scrollArea = QtWidgets.QScrollArea(self.centralwidget)
# size
self.rules_scrollArea.setMaximumSize(QtCore.QSize(300, 16777215))
self.rules_scrollArea.setWidgetResizable(True)
# stylesheet
self.rules_scrollArea.setStyleSheet(
"background-color: rgb(217, 221, 255);")
# object name
self.rules_scrollArea.setObjectName("rules_scrollArea")
# the content
self.rules_content = QtWidgets.QWidget()
# geometry
self.rules_content.setGeometry(QtCore.QRect(0, 0, 222, 674))
# object name
self.rules_content.setObjectName("rules_content")
# layout
self.rules_layout = QtWidgets.QGridLayout(self.rules_content)
self.rules_layout.setObjectName("rules_layout")
# rules label
self.rules_label = QtWidgets.QLabel(self.rules_content)
# size
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(
self.rules_label.sizePolicy().hasHeightForWidth())
self.rules_label.setSizePolicy(sizePolicy)
self.rules_label.setMaximumSize(QtCore.QSize(300, 16777215))
# allignment
self.rules_label.setAlignment(QtCore.Qt.AlignCenter)
# word wrap
self.rules_label.setWordWrap(True)
# object name
self.rules_label.setObjectName("rules_label")
# adding to layout
self.rules_layout.addWidget(self.rules_label)
# adding to scroll area
self.rules_scrollArea.setWidget(self.rules_content)
# adding to main layout
self.main_layout.addWidget(self.rules_scrollArea, 1, 0, 1, 1)
# EXIT BUTTON #########################################################
self.exit_button = QtWidgets.QPushButton(self.centralwidget)
# size
self.exit_button.setMinimumSize(QtCore.QSize(0, 50))
# stylesheet
self.exit_button.setStyleSheet(button_style)
# object name
self.exit_button.setObjectName("exit_button")
# adding to main layout
self.main_layout.addWidget(self.exit_button, 2, 0, 1, 1)
# GAMES FRAME ##########################################################
self.games_frame = QtWidgets.QFrame(self.centralwidget)
# size
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(True)
self.games_frame.setSizePolicy(sizePolicy)
self.games_frame.setMinimumSize(QtCore.QSize(530, 530))
# stylesheet
self.games_frame.setStyleSheet("background-color: rgb(217, 221, 255);")
# object name
self.games_frame.setObjectName("games_frame")
# layout
self.games_frame_layout = QtWidgets.QGridLayout(self.games_frame)
self.games_frame_layout.setContentsMargins(0, 0, 0, 0)
self.games_frame_layout.setObjectName("games_frame_layout")
# adding to main layout
self.main_layout.addWidget(self.games_frame, 0, 1, 3, 1)
# setting centralwidget
PythonGames.setCentralWidget(self.centralwidget)
# MENUBAR ###############################################################
self.menubar = QtWidgets.QMenuBar(PythonGames)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 26))
self.menubar.setObjectName("menubar")
PythonGames.setMenuBar(self.menubar)
# STATUSBAR ############################################################
self.statusbar = QtWidgets.QStatusBar(PythonGames)
self.statusbar.setObjectName("statusbar")
PythonGames.setStatusBar(self.statusbar)
#########################################################################
self.retranslateUi(PythonGames)
QtCore.QMetaObject.connectSlotsByName(PythonGames)
def retranslateUi(self, PythonGames):
_translate = QtCore.QCoreApplication.translate
# WINDOW TITLE
PythonGames.setWindowTitle(_translate("PythonGames", "Python Games"))
# GAMES BOX TITLE
self.games_box.setTitle(_translate("PythonGames", "The Games"))
# GAME BUTTONS
# blackjack
self.blackjack_button.setText(_translate("PythonGames", "Blackjack"))
# rules
self.blackjack_button.clicked.connect(
lambda: self.show_rules('Blackjack'))
# game
self.blackjack_button.clicked.connect(
lambda: self.start_game('Blackjack'))
# hangman
self.hangman_button.setText(_translate("PythonGames", "Hangman"))
# rules
self.hangman_button.clicked.connect(lambda: self.show_rules('Hangman'))
# game
self.hangman_button.clicked.connect(lambda: self.start_game('Hangman'))
# number guessing
self.num_guess_button.setText(
_translate("PythonGames", "Number Guessing"))
# rules
self.num_guess_button.clicked.connect(
lambda: self.show_rules('Number guessing'))
# game
self.num_guess_button.clicked.connect(
lambda: self.start_game('Number guessing'))
# rock paper scissors
self.rps_button.setText(_translate(
"PythonGames", "Rock Paper Scissors"))
# rules
self.rps_button.clicked.connect(
lambda: self.show_rules('Rock Paper Scissors'))
# game
self.rps_button.clicked.connect(
lambda: self.start_game('Rock Paper Scissors'))
# tic tac toe
self.ttt_button.setText(_translate("PythonGames", "Tic Tac Toe"))
# rules
self.ttt_button.clicked.connect(lambda: self.show_rules('Tic Tac Toe'))
# game
self.ttt_button.clicked.connect(lambda: self.start_game('Tic Tac Toe'))
# RULES TEXT
self.rules_label.setText(_translate(
"PythonGames", "Click on a game to see the rules"))
# EXIT BUTTON TEXT
self.exit_button.setText(_translate("PythonGames", "Exit"))
# on click
self.exit_button.clicked.connect(self.exit_application)
def show_rules(self, game):
"""Rules."""
self.rules_label.setText(game + '\n\n' + rulebook.rule_book(game))
self.rules_label.setAlignment(QtCore.Qt.AlignJustify)
def start_game(self, game):
"""Start game."""
self.replace = QtWidgets.QWidget()
# size
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(
self.replace.sizePolicy().hasHeightForWidth())
self.replace.setSizePolicy(sizePolicy)
self.replace.setMinimumSize(QtCore.QSize(530, 530))
# adding to games frame
self.games_frame_layout.addWidget(self.replace, 0, 0, 1, 1)
game = game.lower()
the_game = False
if game == 'blackjack':
blackjack = QtWidgets.QWidget()
ui = Ui_blackjack()
ui.setupUi(blackjack)
the_game = blackjack
elif game == 'hangman':
hangman = QtWidgets.QWidget()
ui = Ui_hangman()
ui.setupUi(hangman)
the_game = hangman
elif game == 'number guessing':
num_guess = QtWidgets.QWidget()
ui = Ui_num_guess()
ui.setupUi(num_guess)
the_game = num_guess
elif game == 'rock paper scissors':
rps = QtWidgets.QWidget()
ui = Ui_rps()
ui.setupUi(rps)
the_game = rps
elif game == 'tic tac toe':
tic_tac_toe = QtWidgets.QWidget()
ui = Ui_tic_tac_toe()
ui.setupUi(tic_tac_toe)
the_game = tic_tac_toe
if the_game:
self.games_frame_layout.replaceWidget(self.replace, the_game)
def exit_application(self):
"""Exits."""
sys.exit()
def main():
app = QtWidgets.QApplication(sys.argv)
PythonGames = QtWidgets.QMainWindow()
ui = Ui_PythonGames()
ui.setupUi(PythonGames)
PythonGames.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
| 35.810526
| 87
| 0.618533
|
4a115961ba66d8cbd4f0cd1bdf81dcadaad6a11c
| 104
|
py
|
Python
|
_/0349_08_Code/04.py
|
paullewallencom/javascript-978-1-8495-1034-9
|
7e539d042c644931a9ef2418f66d260a1c6892eb
|
[
"Apache-2.0"
] | null | null | null |
_/0349_08_Code/04.py
|
paullewallencom/javascript-978-1-8495-1034-9
|
7e539d042c644931a9ef2418f66d260a1c6892eb
|
[
"Apache-2.0"
] | null | null | null |
_/0349_08_Code/04.py
|
paullewallencom/javascript-978-1-8495-1034-9
|
7e539d042c644931a9ef2418f66d260a1c6892eb
|
[
"Apache-2.0"
] | null | null | null |
(ur'^create/Entity', views.modelform_Entity),
(ur'^create/Location', views.modelform_Location),
| 34.666667
| 53
| 0.711538
|
4a115a1568e651c02c6d4ae512ddd33a4b18907d
| 2,433
|
py
|
Python
|
var/spack/repos/builtin/packages/fbgemm/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11
|
2015-10-04T02:17:46.000Z
|
2018-02-07T18:23:00.000Z
|
var/spack/repos/builtin/packages/fbgemm/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22
|
2017-08-01T22:45:10.000Z
|
2022-03-10T07:46:31.000Z
|
var/spack/repos/builtin/packages/fbgemm/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4
|
2016-06-10T17:57:39.000Z
|
2018-09-11T04:59:38.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Fbgemm(CMakePackage):
"""FBGEMM (Facebook GEneral Matrix Multiplication) is a low-precision,
high-performance matrix-matrix multiplications and convolution library
for server-side inference."""
homepage = "https://github.com/pytorch/FBGEMM"
git = "https://github.com/pytorch/FBGEMM.git"
maintainers = ['dskhudia']
version('master', branch='master', submodules=True)
version('2021-05-10', commit='7794b2950b35ddfa7426091e7fb2f991b1407557', submodules=True) # py-torch@1.9
version('2020-11-13', commit='9b0131179f293a645bfd3409cd66fa5eecc393b0', submodules=True) # py-torch@1.8
version('2020-09-14', commit='1d710393d5b7588f5de3b83f51c22bbddf095229', submodules=True) # py-torch@1.7
version('2020-05-31', commit='7d673046a6a3ad1fa0d15dfb189cd06ffa217041', submodules=True) # py-torch@1.6
version('2020-05-21', commit='e526aadd058f2a0b8ce738be022e0e4ab4233a2d', submodules=True) # py-torch@1.5.1
version('2020-03-22', commit='58c002d1593f32aa420ab56b5c344e60d3fb6d05', submodules=True) # py-torch@1.5.0
version('2019-11-20', commit='399ea148f1403c100e6d601ec3587a621de96a84', submodules=True) # py-torch@1.4
version('2019-09-26', commit='7dfeddb5ba976f47471275b2468909dfd9b577e1', submodules=True) # py-torch@1.3
version('2019-07-22', commit='f712cb2328a2b29424bdaeecb9c0731da2cd997b', submodules=True) # py-torch@1.2
version('2019-04-18', commit='6ec218e6ed5dcb9b5397a608a3b5b8027b236819', submodules=True) # py-torch@1.1
version('2019-01-23', commit='79333308f5e2fc242727879dcd3de3536b6ffc39', submodules=True) # py-torch@1.0.1
version('2018-12-04', commit='0d5a159b944252e70a677236b570f291943e0543', submodules=True) # py-torch@1.0.0
depends_on('cmake@3.5:', type='build')
depends_on('ninja', type='build')
depends_on('python', type='build')
depends_on('llvm-openmp', when='%apple-clang')
conflicts('%gcc@:4', msg='FBGEMM requires GCC 5+')
generator = 'Ninja'
@run_before('cmake')
def check_requirements(self):
if 'avx2' not in self.spec.target:
raise RuntimeError(
'FBGEMM requires a CPU with support for AVX2 instruction set or higher')
| 51.765957
| 111
| 0.726675
|
4a115a80a20c6f9a952e8a8e371ed8074f8e8312
| 1,431
|
py
|
Python
|
ykdl/extractors/acfun/acbase.py
|
panda-mute/ykdl
|
56cea24f1513f21aedbe80b75c25f7c3b1e07704
|
[
"MIT"
] | null | null | null |
ykdl/extractors/acfun/acbase.py
|
panda-mute/ykdl
|
56cea24f1513f21aedbe80b75c25f7c3b1e07704
|
[
"MIT"
] | null | null | null |
ykdl/extractors/acfun/acbase.py
|
panda-mute/ykdl
|
56cea24f1513f21aedbe80b75c25f7c3b1e07704
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from .._common import *
class AcBase(VideoExtractor):
quality_2_id = {
2160: '4K',
1080: 'BD',
720: 'TD',
540: 'HD',
360: 'SD',
270: 'LD'
}
def prepare(self):
info = VideoInfo(self.name)
html = get_content(self.url)
info.title, info.artist, sourceVid, m3u8Info = self.get_page_info(html)
m3u8Info = json.loads(m3u8Info)['adaptationSet'][0]['representation']
self.logger.debug('m3u8Info:\n%s', m3u8Info)
url = random.choice(['url', 'backupUrl'])
for q in m3u8Info:
if q['frameRate'] > 30:
# drop 60 FPS
continue
quality = int(match1(q['qualityType'], '(\d+)'))
stream_type = self.quality_2_id[quality]
stream_profile = q['qualityLabel']
urls = q[url]
if not isinstance(urls, list):
urls = [urls]
if stream_type not in info.streams:
info.stream_types.append(stream_type)
else:
continue
info.streams[stream_type] = {
'container': 'm3u8',
'video_profile': stream_profile,
'src': urls,
'size': 0
}
return info
def prepare_list(self):
return ['https://www.acfun.cn' + p for p in self.get_path_list()]
| 28.62
| 79
| 0.502446
|
4a115b310f561ef8bd8657550fe933583c4778d5
| 107
|
py
|
Python
|
output/models/ms_data/additional/isdefault076_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/ms_data/additional/isdefault076_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/ms_data/additional/isdefault076_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from output.models.ms_data.additional.isdefault076_xsd.isdefault076 import Root
__all__ = [
"Root",
]
| 17.833333
| 79
| 0.766355
|
4a115bd1073d52ce0fd7b4fc4218bc5e3bdea887
| 2,064
|
py
|
Python
|
discordCommandRegistry.py
|
adassimo25/audio-wizzard-service
|
88b3b5e73a7a8a20f5a405811551c9124f1a63d9
|
[
"MIT"
] | null | null | null |
discordCommandRegistry.py
|
adassimo25/audio-wizzard-service
|
88b3b5e73a7a8a20f5a405811551c9124f1a63d9
|
[
"MIT"
] | 17
|
2021-05-01T18:35:47.000Z
|
2021-05-24T12:00:44.000Z
|
discordCommandRegistry.py
|
adassimo25/audio-wizzard-service
|
88b3b5e73a7a8a20f5a405811551c9124f1a63d9
|
[
"MIT"
] | 2
|
2021-05-15T07:40:38.000Z
|
2021-05-23T14:17:30.000Z
|
import requests
import os
from dotenv import load_dotenv
load_dotenv()
APP_ID = os.getenv('APP_ID')
BOT_TOKEN = os.getenv('BOT_TOKEN')
url = f"https://discord.com/api/v8/applications/{APP_ID}/commands"
def register_command(reg_url, reg_headers, reg_json):
response = requests.post(reg_url, headers=reg_headers, json=reg_json)
if response.ok:
print("Registered!")
else:
print("Failed!")
json_account = {
"name": "account",
"description": "Manage your account",
"options": [
{
"name": "delete",
"description": "Delete information about you",
"type": 1,
"required": False
},
{
"name": "set",
"description": "set params value",
"type": 1,
"required": False,
"options": [
{
"name": "age",
"description": "set age",
"type": 4,
"required": False,
},
{
"name": "gender",
"description": "set gender",
"type": 3,
"required": False,
}
],
}
]
}
json_recommend = {
"name": "recommend",
"description": "Recommend a song",
"options": [
{
"name": "number-of-songs",
"description": "Number of songs to recommend",
"type": 4,
"required": False
},
]
}
json_review = {
"name": "review",
"description": "Review a song",
"options": [
{
"name": "content",
"description": "Your review with title like *song title*",
"type": 3,
"required": True
},
]
}
# For authorization, you can use either your bot token
headers = {
"Authorization": f"Bot {BOT_TOKEN}"
}
register_command(url, headers, json_account)
register_command(url, headers, json_recommend)
register_command(url, headers, json_review)
| 23.191011
| 73
| 0.489341
|
4a115c7fb6227693c955216edff084845b49b0ee
| 6,083
|
py
|
Python
|
scripts/relative_planner.py
|
COHRINT/minau
|
52ce74792c2df806c3456c81578e91a5225696a6
|
[
"MIT"
] | null | null | null |
scripts/relative_planner.py
|
COHRINT/minau
|
52ce74792c2df806c3456c81578e91a5225696a6
|
[
"MIT"
] | 1
|
2019-04-22T19:37:55.000Z
|
2019-04-24T15:39:12.000Z
|
scripts/relative_planner.py
|
COHRINT/minau
|
52ce74792c2df806c3456c81578e91a5225696a6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from __future__ import division
"""
STILL IN DEVELOPMENT, NOT USABLE YETx
"""
import rospy
import random
from geometry_msgs.msg import TwistStamped, Vector3, PoseStamped
import numpy as np
from minau.srv import ArmControl, SetHeadingVelocity
from nav_msgs.msg import Odometry
import tf
class Planner:
def __init__(self, name, update_period):
self.name = name
self.pub = rospy.Publisher('new_twist', TwistStamped, queue_size=10)
self.planner = rospy.get_param('planner')
start_twist = rospy.get_param('start_twist')
if start_twist == 'random':
self.twist = self.get_random_twist()
else:
self.twist = self.load_twist_dict(start_twist)
rospy.Subscriber('/' + name + '/pose_gt', Odometry, self.pose_callback)
self.listener = tf.TransformListener()
rospy.wait_for_message('/' + name + '/pose_gt', Odometry)
rospy.Timer(rospy.Duration(update_period), self.update_auv_callback)
rospy.loginfo(name + " Planner Initialized.")
self.seq = 0
if self.planner != 'stopped':
self.arm_uav()
else:
rospy.logwarn("Not arming: " + name)
# Probably make a first timer to help us navigate to the starting position & then begin the 2nd timer
def pose_callback(self, msg):
self.pose = msg.pose.pose
def arm_uav(self):
rospy.loginfo("Arming " + self.name)
srv_name = '/' + self.name + '/uuv_control/arm_control'
rospy.wait_for_service(srv_name)
req = rospy.ServiceProxy(srv_name, ArmControl)
try:
res = req()
# rospy.logfatal(res)
except rospy.ServiceException as exc:
print("No response to " + srv_name)
def load_twist_dict(self, twist_dict):
dot_x = float(twist_dict['x'])
dot_y = float(twist_dict['y'])
dot_z = float(twist_dict['z'])
dot_psi = float(twist_dict['psi'])
twist = TwistStamped()
twist.twist.linear.x = dot_x
twist.twist.linear.y = dot_y
twist.twist.linear.z = dot_z
twist.twist.angular.z = dot_psi
return twist
def get_random_twist(self):
[min_twist, max_twist] = rospy.get_param('/planners/random_linear_vel_range')
[min_ang, max_ang] = rospy.get_param('/planners/random_angular_vel_range')
size = max_twist - min_twist
twist = TwistStamped()
twist.twist.linear.x = random.random() * size + min_twist
twist.twist.linear.y = random.random() * size + min_twist
twist.twist.linear.z = random.random() * size + min_twist
size_ang = max_ang - min_ang
twist.twist.angular.z = random.random() * size_ang + min_ang
return twist
def set_hv(self, heading, velocity):
""" Sends a BlueROV along the desired velocity
Parameters
----------
heading : float
velocity : geometry_msgs/Vector3
Returns
-------
None
"""
srv_name = '/' + self.name + '/uuv_control/set_heading_velocity'
rospy.wait_for_service(srv_name)
req = rospy.ServiceProxy(srv_name, SetHeadingVelocity)
try:
req(heading, velocity)
except rospy.ServiceException as exc:
print("No response to " + srv_name)
def update_auv_callback(self, msg):
if self.planner == "stopped": # Nothing to do if we're stopped
return
# Need to transform self.twist.twist.linear to world ned frame
pose = PoseStamped()
pose.pose.position.x = self.twist.twist.linear.x - self.pose.position.x
pose.pose.position.y = self.twist.twist.linear.y - self.pose.position.y
pose.pose.position.z = self.twist.twist.linear.z - self.pose.position.z
pose.pose.orientation = self.pose.orientation
pose.header.frame_id = self.name + '/base_link'
new_pose = self.listener.transformPose('/world_ned', pose)
rospy.loginfo("sub pose: " + str(self.pose))
rospy.loginfo("new pose: " + str(new_pose))
return
v = Vector3()
v.x = new_pose.pose.position.x
v.y = new_pose.pose.position.y
v.z = new_pose.pose.position.z
quat_list = [ self.pose.orientation.x, \
self.pose.orientation.y, \
self.pose.orientation.z, \
self.pose.orientation.w ]
(roll, pitch, yaw) = tf.transformations.euler_from_quaternion(quat_list)
yaw = (yaw * 180) / ( 2 * np.pi)
rospy.loginfo("yaw: " + str(yaw))
heading = yaw # + self.twist.twist.angular.z
self.set_hv(heading, v)
rospy.loginfo("Updating AUV")
rospy.loginfo("New yaw: " + str(yaw))
def pub_cmd(self, msg):
new_twist = self.get_new_twist()
if new_twist == None: # publish the old velocity
new_twist = self.twist
else:
self.twist = new_twist
new_twist.header.seq = self.seq
new_twist.header.stamp = rospy.Time.now()
new_twist.header.frame_id = self.name + "/base_link"
self.pub.publish(new_twist)
self.seq += 1
def get_new_twist(self):
""" This function provides an easy place to add more complex planners that actively change the velocity """
if self.planner == "linear":
return None
def main():
rospy.init_node('point_planner', anonymous=True)
name = rospy.get_namespace().split('/')[1]
rospy.loginfo("waiting for params to become available")
while not rospy.has_param('/planners/update_freq') and not rospy.is_shutdown(): # wait for the param server to load
pass
rospy.loginfo("params found")
param_name = rospy.search_param('planners/update_freq')
update_period = 1 / int(rospy.get_param(param_name))
p = Planner(name, update_period)
rospy.spin()
if __name__ == "__main__":
try:
main()
except rospy.ROSInterruptException:
pass
| 37.091463
| 119
| 0.617294
|
4a115db308e5adfff1d930b60c0e8b526f88b5e6
| 5,434
|
py
|
Python
|
compile.py
|
jasperla/slae64
|
d564ac7b5dc85a17c141abd0978a22d5b0a04ec0
|
[
"MIT"
] | null | null | null |
compile.py
|
jasperla/slae64
|
d564ac7b5dc85a17c141abd0978a22d5b0a04ec0
|
[
"MIT"
] | null | null | null |
compile.py
|
jasperla/slae64
|
d564ac7b5dc85a17c141abd0978a22d5b0a04ec0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# Copyright (c) 2019 Jasper Lievisse Adriaanse <j@jasper.la>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import argparse
import os.path
import re
import subprocess
import sys
class Compile():
def __init__(self, file, define, linker, compiler, wrapper):
self.sourcefile = file
self.basename = os.path.basename(self.sourcefile.replace('.nasm', ''))
self.objectfile = '{}.o'.format(self.basename)
self.define = define
self.linker = linker
self.compiler = compiler
self.bytecode = []
if wrapper:
self.wrapper = 'shellcode-{}.c'.format(self.basename)
self.wrapper_output = 'shellcode-{}'.format(self.basename)
else:
self.wrapper = None
self.check_progs()
def check_progs(self):
# Ensure the required binaries are available:
progs = ['nasm', self.linker]
if self.wrapper:
progs.append(self.compiler)
for p in progs:
try:
subprocess.call([p, '-v'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except Exception as e:
err('Required binary for {} not found'.format(p))
def assemble(self):
info('Assembling {}'.format(self.sourcefile))
cmd = ['nasm', '-felf64', '-o', self.objectfile, self.sourcefile]
if self.define:
cmd.append('-D{}'.format(self.define))
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
err('Invoked command "{}" failed!\n Captured output: {}\n '.format(' '.join(cmd), str(e.output.strip())))
def link(self):
info('Linking {}'.format(self.objectfile))
cmd = [self.linker, '-o', self.basename, self.objectfile]
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
err('Invoked command "{}" failed!\n Captured output: {}\n '.format(' '.join(cmd), str(e.output.strip())))
def dumpcode(self):
info('Extracting and analyzing byte code')
nulls_found = False
try:
p = subprocess.Popen('objdump -D -M intel {}'.format(self.objectfile).split(), stdout=subprocess.PIPE)
for line in p.stdout:
line = line.decode()
m = re.match('^\s+\w+:\t(.*?)\s+(\t|\n)', line)
if m:
code = m.groups()[0]
[self.bytecode.append(x) for x in code.split()]
except Exception as e:
err('Failed to extract bytecode from {}: {}'.format(self.objectfile, e))
if '00' in self.bytecode:
warn('NULL bytes were found, are you sure this is ok?')
else:
ok('No NULL bytes found')
info('Shellcode length: {}'.format(len(self.bytecode)))
def compilec(self):
if not self.wrapper:
err('You called the wrapper compile function but the wrapper is disabled.')
info('Compiling {}'.format(self.wrapper))
# Turn the bytecode list into a string such as '\x90\x90'
shellcode = ''.join(['\\x{}'.format(x) for x in self.bytecode])
wrapper_template = f"""
#include <stdio.h>
#include <string.h>
char code[] = "{shellcode}";
int
main(int argc, int argv[]) {{
printf("Shellcode length: %ld\\n", strlen(code));
(*(void (*)()) code)();
return 0;
}}
"""
fh = open(self.wrapper, 'w')
fh.write(wrapper_template)
fh.close()
# Compile the wrapper
cmd = [self.compiler, '-o', self.wrapper_output, '-fno-stack-protector', '-z', 'execstack', self.wrapper]
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
err('Invoked command "{}" failed!\n Captured output: {}\n '.format(' '.join(cmd), str(e.output.strip())))
def compile(self):
self.assemble()
self.link()
self.dumpcode()
if self.wrapper:
self.compilec()
def err(msg, return_code=1):
print('[-] {}'.format(msg))
if return_code:
sys.exit(return_code)
def warn(msg):
err(msg, None)
def info(msg):
print('[*] {}'.format(msg))
def ok(msg):
print('[+] {}'.format(msg))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-D', '--define', required=False,
help='Define to pass to NASM when assembling code for specific operating system')
parser.add_argument('-l', '--linker', default='ld')
parser.add_argument('-c', '--compiler', default='cc')
parser.add_argument('-w', '--enable-wrapper', action='store_true', default=True,
help='Compile a shellcode wrapper')
parser.add_argument('file', nargs=1)
args = parser.parse_args()
file = args.file[0]
if not os.path.exists(file):
err('Provided file "{}" does not exist'.format(file))
c = Compile(file, args.define, args.linker, args.compiler, args.enable_wrapper)
c.compile()
if __name__ == '__main__':
main()
| 31.051429
| 116
| 0.653846
|
4a115f71ab72f264013a4ab7abe313ef98d79972
| 1,403
|
py
|
Python
|
models.py
|
agavgavi/py4web-blog-app
|
8b1301a23fe109a738489ae427973dc2b34f6eec
|
[
"MIT"
] | 9
|
2020-09-18T03:30:33.000Z
|
2021-07-13T17:47:24.000Z
|
models.py
|
agavgavi/py4web-blog-app
|
8b1301a23fe109a738489ae427973dc2b34f6eec
|
[
"MIT"
] | 1
|
2021-12-27T15:09:44.000Z
|
2021-12-27T18:32:36.000Z
|
models.py
|
agavgavi/py4web-blog-app
|
8b1301a23fe109a738489ae427973dc2b34f6eec
|
[
"MIT"
] | 4
|
2020-09-18T05:50:50.000Z
|
2021-03-12T07:44:46.000Z
|
"""
This file defines the database models
"""
from .common import db, Field, auth
from py4web import URL
from pydal.validators import IS_NOT_EMPTY
import datetime
from . import settings
# Define your table below
#
# db.define_table('thing', Field('name'))
#
# always commit your models to avoid problems later
#
# db.commit()
#
def get_time():
return datetime.datetime.utcnow()
def get_download_url(picture):
return f"images/{picture}"
def get_user():
return auth.current_user.get("id") if auth.current_user else None
db.define_table(
"post",
Field("title", "string", requires=IS_NOT_EMPTY()),
Field("content", "text", requires=IS_NOT_EMPTY()),
Field("date_posted", "datetime", default=get_time, readable=False, writable=False),
Field(
"author",
"reference auth_user",
default=get_user,
readable=False,
writable=False,
),
)
db.define_table(
"profile",
Field("user", "reference auth_user", readable=False, writable=False),
Field(
"image",
"upload",
default="default.jpg",
uploadfolder=settings.UPLOAD_PATH,
download_url=get_download_url, label="Profile Picture",
),
)
# We do not want these fields to appear in forms by default.
db.post.id.readable = False
db.post.id.writable = False
db.profile.id.readable = False
db.profile.id.writable = False
db.commit()
| 21.257576
| 87
| 0.675695
|
4a1160e8bdb82e280507107a4a511b7c69a783f7
| 7,977
|
py
|
Python
|
pysmartthings/subscription.py
|
north3221/pysmartthings
|
2707d18709cd966a9d4ea51abdb3dea3daeaccc4
|
[
"Apache-2.0"
] | 82
|
2018-12-28T17:54:21.000Z
|
2022-02-18T17:31:23.000Z
|
pysmartthings/subscription.py
|
north3221/pysmartthings
|
2707d18709cd966a9d4ea51abdb3dea3daeaccc4
|
[
"Apache-2.0"
] | 38
|
2019-01-30T15:42:13.000Z
|
2022-01-23T13:27:43.000Z
|
pysmartthings/subscription.py
|
north3221/pysmartthings
|
2707d18709cd966a9d4ea51abdb3dea3daeaccc4
|
[
"Apache-2.0"
] | 28
|
2019-02-17T16:47:41.000Z
|
2022-03-18T03:05:45.000Z
|
"""Define the subscription module."""
from enum import Enum
from typing import Any, Optional
from .api import Api
from .entity import Entity
class SourceType(Enum):
"""Define the source type of a subscription."""
UNKNOWN = "UNKNOWN"
DEVICE = "DEVICE"
CAPABILITY = "CAPABILITY"
class Subscription:
"""Define the subscription class."""
def __init__(self):
"""Initialize a new instance of the subscription class."""
self._subscription_id = None
self._installed_app_id = None
self._source_type = SourceType.UNKNOWN
self._capability = "*"
self._attribute = "*"
self._value = "*"
self._state_change_only = True
self._subscription_name = None
# Capability-specific attributes
self._location_id = None
# Device-specific attributes
self._device_id = None
self._component_id = None
def apply_data(self, data: dict):
"""Set the states of the app with the supplied data."""
self._subscription_id = data["id"]
self._installed_app_id = data["installedAppId"]
self._source_type = SourceType(data["sourceType"])
if self._source_type is SourceType.CAPABILITY:
capability = data["capability"]
self._location_id = capability["locationId"]
self._capability = capability["capability"]
self._attribute = capability.get("attribute", "*")
self._value = capability.get("value", "*")
self._state_change_only = capability.get("stateChangeOnly", True)
self._subscription_name = capability.get("subscriptionName", None)
if self._source_type is SourceType.DEVICE:
device = data["device"]
self._device_id = device["deviceId"]
self._component_id = device.get("componentId", "*")
self._capability = device.get("capability", "*")
self._attribute = device.get("attribute", "*")
self._value = device.get("value", "*")
self._state_change_only = device.get("stateChangeOnly", True)
self._subscription_name = device.get("subscriptionName", None)
def to_data(self) -> dict:
"""Get a data structure representing this entity."""
data = {"sourceType": self._source_type.value}
if self._source_type is SourceType.CAPABILITY:
capability = {
"locationId": self._location_id,
"capability": self._capability,
}
if self._attribute and self._attribute != "*":
capability["attribute"] = self._attribute
if self._value and self._value != "*":
capability["value"] = self._value
if not self._state_change_only:
capability["stateChangeOnly"] = False
if self._subscription_name:
capability["subscriptionName"] = self._subscription_name
data["capability"] = capability
if self._source_type is SourceType.DEVICE:
device = {
"deviceId": self._device_id,
"stateChangeOnly": self._state_change_only,
}
if self._component_id and self._component_id != "*":
device["componentId"] = self._component_id
if self._capability and self._capability != "*":
device["capability"] = self._capability
if self._attribute and self._attribute != "*":
device["attribute"] = self._attribute
if self._value and self._value != "*":
device["value"] = self._value
if self._subscription_name:
device["subscriptionName"] = self._subscription_name
data["device"] = device
return data
@property
def subscription_id(self) -> str:
"""Get the id of the subscription."""
return self._subscription_id
@property
def installed_app_id(self) -> str:
"""Get the id of the subscribing app."""
return self._installed_app_id
@installed_app_id.setter
def installed_app_id(self, value: str):
"""Set the id of the subscripting app."""
self._installed_app_id = value
@property
def source_type(self) -> SourceType:
"""Get the type of the event that is being subscribed to."""
return self._source_type
@source_type.setter
def source_type(self, value: Any):
"""Set the typ eof event that is being subscribed to."""
self._source_type = SourceType(value)
@property
def capability(self) -> str:
"""Get the name of the capability that is subscribed."""
return self._capability
@capability.setter
def capability(self, value: str):
"""Get the name of the capability that is subscribed."""
self._capability = value
@property
def attribute(self) -> str:
"""Get the name of the capabilities attribute or * for all."""
return self._attribute
@attribute.setter
def attribute(self, value: str):
"""Set the name of the capabilities attribute or * for all."""
self._attribute = value
@property
def value(self) -> str:
"""Get the value for that will trigger the subscription."""
return self._value
@value.setter
def value(self, value: str):
"""Set the value for that will trigger the subscription."""
self._value = value
@property
def state_change_only(self) -> bool:
"""Get to execute only on a state change."""
return self._state_change_only
@state_change_only.setter
def state_change_only(self, value: bool):
"""Set to execute only on a state change."""
self._state_change_only = value
@property
def subscription_name(self) -> str:
"""Get a name for the subscription."""
return self._subscription_name
@subscription_name.setter
def subscription_name(self, value: str):
"""Set a name for the subscription."""
self._subscription_name = value
@property
def location_id(self) -> str:
"""Get the location id that both the app and source device are in."""
return self._location_id
@location_id.setter
def location_id(self, value: str):
"""Set the location id that both the app and source device are in."""
self._location_id = value
@property
def device_id(self):
"""Get the GUID of the device that is subscribed to."""
return self._device_id
@device_id.setter
def device_id(self, value: str):
"""Set the GUID of the device that is subscribed to."""
self._device_id = value
@property
def component_id(self) -> str:
"""Get the component ID on the device that is subscribed to."""
return self._component_id
@component_id.setter
def component_id(self, value: str):
"""Set the component ID on the device that is subscribed to."""
self._component_id = value
class SubscriptionEntity(Entity, Subscription):
"""Define a subscription entity."""
def __init__(self, api: Api, data: Optional[dict] = None):
"""Create a new instance of the SubscriptionEntity class."""
Entity.__init__(self, api)
Subscription.__init__(self)
if data:
self.apply_data(data)
async def refresh(self):
"""Refresh the subscription information using the API."""
data = await self._api.get_subscription(
self._installed_app_id, self._subscription_id
)
self.apply_data(data)
async def save(self):
"""Subscriptions cannot be updated."""
raise NotImplementedError
| 36.095023
| 79
| 0.600727
|
4a11619112b5167a1961128a08df47c8c7e5aa31
| 119,144
|
py
|
Python
|
tensorflow/python/data/ops/dataset_ops.py
|
wenming2014/tensorflow
|
a102a6a71844e194f3946f6318768c5367f1f16b
|
[
"Apache-2.0"
] | 5
|
2018-07-04T22:14:02.000Z
|
2018-07-04T22:21:43.000Z
|
tensorflow/python/data/ops/dataset_ops.py
|
wenming2014/tensorflow
|
a102a6a71844e194f3946f6318768c5367f1f16b
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/data/ops/dataset_ops.py
|
wenming2014/tensorflow
|
a102a6a71844e194f3946f6318768c5367f1f16b
|
[
"Apache-2.0"
] | 1
|
2018-11-30T01:35:01.000Z
|
2018-11-30T01:35:01.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for Datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import functools
import threading
import warnings
import numpy as np
import six
from tensorflow.python.compat import compat
from tensorflow.python.data.experimental.ops import stats_options
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import random_seed
from tensorflow.python.data.util import sparse
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed as core_random_seed
from tensorflow.python.framework import smart_cond
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import gen_io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util import function_utils
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.Dataset", v1=[])
@six.add_metaclass(abc.ABCMeta)
class DatasetV2(object):
"""Represents a potentially large set of elements.
A `Dataset` can be used to represent an input pipeline as a
collection of elements (nested structures of tensors) and a "logical
plan" of transformations that act on those elements.
"""
def _as_serialized_graph(self):
"""Produces serialized graph representation of the dataset.
Returns:
A scalar `tf.Tensor` of `tf.string` type, representing this dataset as a
serialized graph.
"""
return gen_dataset_ops.dataset_to_graph(self._as_variant_tensor())
@abc.abstractmethod
def _as_variant_tensor(self):
"""Creates a scalar `tf.Tensor` of `tf.variant` representing this dataset.
Returns:
A scalar `tf.Tensor` of `tf.variant` type, which represents this dataset.
"""
raise NotImplementedError("Dataset._as_variant_tensor")
@abc.abstractmethod
def _inputs(self):
"""Returns a list of the input datasets of the dataset."""
raise NotImplementedError("Dataset._inputs")
def options(self):
"""Returns the options for this dataset and its inputs.
Returns:
A `tf.data.Options` object representing the dataset options.
"""
options = Options()
for input_dataset in self._inputs():
input_options = input_dataset.options()
if input_options is not None:
options = options.merge(input_options)
return options
def _apply_options(self):
"""Apply options, such as optimization configuration, to the dataset."""
dataset = self
options = self.options()
static_optimizations = options._static_optimizations() # pylint: disable=protected-access
if static_optimizations:
dataset = _OptimizeDataset(dataset, static_optimizations)
if options.experimental_autotune is not False:
dataset = _ModelDataset(dataset)
if options.experimental_stats and options.experimental_stats.aggregator: # pylint: disable=line-too-long
dataset = _SetStatsAggregatorDataset( # pylint: disable=protected-access
dataset, options.experimental_stats.aggregator,
options.experimental_stats.prefix,
options.experimental_stats.counter_prefix)
return dataset
def make_initializable_iterator(self, shared_name=None):
"""Creates an `Iterator` for enumerating the elements of this dataset.
Note: The returned iterator will be in an uninitialized state,
and you must run the `iterator.initializer` operation before using it:
```python
dataset = ...
iterator = dataset.make_initializable_iterator()
# ...
sess.run(iterator.initializer)
```
Args:
shared_name: (Optional.) If non-empty, the returned iterator will be
shared under the given name across multiple sessions that share the
same devices (e.g. when using a remote server).
Returns:
An `Iterator` over the elements of this dataset.
Raises:
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError(
"dataset.make_initializable_iterator is not supported when eager "
"execution is enabled.")
dataset = self._apply_options()
if shared_name is None:
shared_name = ""
if compat.forward_compatible(2018, 8, 3):
iterator_resource = gen_dataset_ops.iterator_v2(
container="", shared_name=shared_name, **flat_structure(self))
else:
iterator_resource = gen_dataset_ops.iterator(
container="", shared_name=shared_name, **flat_structure(self))
with ops.colocate_with(iterator_resource):
initializer = gen_dataset_ops.make_iterator(
dataset._as_variant_tensor(), # pylint: disable=protected-access
iterator_resource)
return iterator_ops.Iterator(iterator_resource, initializer,
dataset.output_types, dataset.output_shapes,
dataset.output_classes)
def __iter__(self):
"""Creates an `Iterator` for enumerating the elements of this dataset.
The returned iterator implements the Python iterator protocol and therefore
can only be used in eager mode.
Returns:
An `Iterator` over the elements of this dataset.
Raises:
RuntimeError: If eager execution is not enabled.
"""
if context.executing_eagerly():
dataset = self._apply_options()
return iterator_ops.EagerIterator(dataset)
else:
raise RuntimeError("dataset.__iter__() is only supported when eager "
"execution is enabled.")
def make_one_shot_iterator(self):
"""Creates an `Iterator` for enumerating the elements of this dataset.
Note: The returned iterator will be initialized automatically.
A "one-shot" iterator does not currently support re-initialization.
Returns:
An `Iterator` over the elements of this dataset.
"""
if context.executing_eagerly():
dataset = self._apply_options()
return iterator_ops.EagerIterator(dataset)
graph_level_seed, op_level_seed = core_random_seed.get_seed(None)
# NOTE(mrry): We capture by value here to ensure that `_make_dataset()` is
# a 0-argument function.
@function.Defun(capture_by_value=True)
def _make_dataset():
"""Factory function for a dataset."""
# NOTE(mrry): `Defun` does not capture the graph-level seed from the
# enclosing graph, so if a graph-level seed is present we set the local
# graph seed based on a combination of the graph- and op-level seeds.
if graph_level_seed is not None:
assert op_level_seed is not None
core_random_seed.set_random_seed(
(graph_level_seed + 87654321 * op_level_seed) % (2 ** 63 - 1))
dataset = self._apply_options()
return dataset._as_variant_tensor() # pylint: disable=protected-access
try:
_make_dataset.add_to_graph(ops.get_default_graph())
except ValueError as err:
if "Cannot capture a stateful node" in str(err):
raise ValueError(
"Failed to create a one-shot iterator for a dataset. "
"`Dataset.make_one_shot_iterator()` does not support datasets that "
"capture stateful objects, such as a `Variable` or `LookupTable`. "
"In these cases, use `Dataset.make_initializable_iterator()`. "
"(Original error: %s)" % err)
else:
six.reraise(ValueError, err)
return iterator_ops.Iterator(
gen_dataset_ops.one_shot_iterator(
dataset_factory=_make_dataset, **flat_structure(self)),
None, self.output_types, self.output_shapes, self.output_classes)
@abc.abstractproperty
def output_classes(self):
"""Returns the class of each component of an element of this dataset.
The expected values are `tf.Tensor` and `tf.SparseTensor`.
Returns:
A nested structure of Python `type` objects corresponding to each
component of an element of this dataset.
"""
raise NotImplementedError("Dataset.output_classes")
@abc.abstractproperty
def output_shapes(self):
"""Returns the shape of each component of an element of this dataset.
Returns:
A nested structure of `tf.TensorShape` objects corresponding to each
component of an element of this dataset.
"""
raise NotImplementedError("Dataset.output_shapes")
@abc.abstractproperty
def output_types(self):
"""Returns the type of each component of an element of this dataset.
Returns:
A nested structure of `tf.DType` objects corresponding to each component
of an element of this dataset.
"""
raise NotImplementedError("Dataset.output_types")
def __repr__(self):
output_shapes = nest.map_structure(str, self.output_shapes)
output_shapes = str(output_shapes).replace("'", "")
output_types = nest.map_structure(repr, self.output_types)
output_types = str(output_types).replace("'", "")
return ("<%s shapes: %s, types: %s>" % (type(self).__name__, output_shapes,
output_types))
@staticmethod
def from_tensors(tensors):
"""Creates a `Dataset` with a single element, comprising the given tensors.
Note that if `tensors` contains a NumPy array, and eager execution is not
enabled, the values will be embedded in the graph as one or more
`tf.constant` operations. For large datasets (> 1 GB), this can waste
memory and run into byte limits of graph serialization. If tensors contains
one or more large NumPy arrays, consider the alternative described in
[this guide](https://tensorflow.org/guide/datasets#consuming_numpy_arrays).
Args:
tensors: A nested structure of tensors.
Returns:
Dataset: A `Dataset`.
"""
return TensorDataset(tensors)
@staticmethod
def from_tensor_slices(tensors):
"""Creates a `Dataset` whose elements are slices of the given tensors.
Note that if `tensors` contains a NumPy array, and eager execution is not
enabled, the values will be embedded in the graph as one or more
`tf.constant` operations. For large datasets (> 1 GB), this can waste
memory and run into byte limits of graph serialization. If tensors contains
one or more large NumPy arrays, consider the alternative described in
[this guide](https://tensorflow.org/guide/datasets#consuming_numpy_arrays).
Args:
tensors: A nested structure of tensors, each having the same size in the
0th dimension.
Returns:
Dataset: A `Dataset`.
"""
return TensorSliceDataset(tensors)
class _GeneratorState(object):
"""Stores outstanding iterators created from a Python generator.
This class keeps track of potentially multiple iterators that may have
been created from a generator, e.g. in the case that the dataset is
repeated, or nested within a parallel computation.
"""
def __init__(self, generator):
self._generator = generator
self._lock = threading.Lock()
self._next_id = 0 # GUARDED_BY(self._lock)
self._args = {}
self._iterators = {}
def get_next_id(self, *args):
with self._lock:
ret = self._next_id
self._next_id += 1
self._args[ret] = args
# NOTE(mrry): Explicitly create an array of `np.int64` because implicit
# casting in `py_func()` will create an array of `np.int32` on Windows,
# leading to a runtime error.
return np.array(ret, dtype=np.int64)
def get_iterator(self, iterator_id):
try:
return self._iterators[iterator_id]
except KeyError:
iterator = iter(self._generator(*self._args.pop(iterator_id)))
self._iterators[iterator_id] = iterator
return iterator
def iterator_completed(self, iterator_id):
del self._iterators[iterator_id]
@staticmethod
def from_generator(generator, output_types, output_shapes=None, args=None):
"""Creates a `Dataset` whose elements are generated by `generator`.
The `generator` argument must be a callable object that returns
an object that support the `iter()` protocol (e.g. a generator function).
The elements generated by `generator` must be compatible with the given
`output_types` and (optional) `output_shapes` arguments.
For example:
```python
import itertools
def gen():
for i in itertools.count(1):
yield (i, [1] * i)
ds = Dataset.from_generator(
gen, (tf.int64, tf.int64), (tf.TensorShape([]), tf.TensorShape([None])))
value = ds.make_one_shot_iterator().get_next()
sess.run(value) # (1, array([1]))
sess.run(value) # (2, array([1, 1]))
```
NOTE: The current implementation of `Dataset.from_generator()` uses
`tf.py_func` and inherits the same constraints. In particular, it
requires the `Dataset`- and `Iterator`-related operations to be placed
on a device in the same process as the Python program that called
`Dataset.from_generator()`. The body of `generator` will not be
serialized in a `GraphDef`, and you should not use this method if you
need to serialize your model and restore it in a different environment.
NOTE: If `generator` depends on mutable global variables or other external
state, be aware that the runtime may invoke `generator` multiple times
(in order to support repeating the `Dataset`) and at any time
between the call to `Dataset.from_generator()` and the production of the
first element from the generator. Mutating global variables or external
state can cause undefined behavior, and we recommend that you explicitly
cache any external state in `generator` before calling
`Dataset.from_generator()`.
Args:
generator: A callable object that returns an object that supports the
`iter()` protocol. If `args` is not specified, `generator` must take
no arguments; otherwise it must take as many arguments as there are
values in `args`.
output_types: A nested structure of `tf.DType` objects corresponding to
each component of an element yielded by `generator`.
output_shapes: (Optional.) A nested structure of `tf.TensorShape`
objects corresponding to each component of an element yielded by
`generator`.
args: (Optional.) A tuple of `tf.Tensor` objects that will be evaluated
and passed to `generator` as NumPy-array arguments.
Returns:
Dataset: A `Dataset`.
"""
if not callable(generator):
raise TypeError("`generator` must be callable.")
if output_shapes is None:
output_shapes = nest.map_structure(
lambda _: tensor_shape.TensorShape(None), output_types)
else:
output_shapes = nest.map_structure_up_to(
output_types, tensor_shape.as_shape, output_shapes)
if args is None:
args = ()
else:
args = tuple(ops.convert_n_to_tensor(args, name="args"))
flattened_types = [dtypes.as_dtype(dt) for dt in nest.flatten(output_types)]
flattened_shapes = nest.flatten(output_shapes)
generator_state = DatasetV2._GeneratorState(generator)
def get_iterator_id_fn(unused_dummy):
"""Creates a unique `iterator_id` for each pass over the dataset.
The returned `iterator_id` disambiguates between multiple concurrently
existing iterators.
Args:
unused_dummy: Ignored value.
Returns:
A `tf.int64` tensor whose value uniquely identifies an iterator in
`generator_state`.
"""
return script_ops.py_func(
generator_state.get_next_id, args, dtypes.int64, stateful=True)
def generator_next_fn(iterator_id_t):
"""Generates the next element from iterator with ID `iterator_id_t`.
We map this function across an infinite repetition of the
`iterator_id_t`, and raise `StopIteration` to terminate the iteration.
Args:
iterator_id_t: A `tf.int64` tensor whose value uniquely identifies
the iterator in `generator_state` from which to generate an element.
Returns:
A nested structure of tensors representing an element from the iterator.
"""
def generator_py_func(iterator_id):
"""A `py_func` that will be called to invoke the iterator."""
# `next()` raises `StopIteration` when there are no more
# elements remaining to be generated.
values = next(generator_state.get_iterator(iterator_id))
# Use the same _convert function from the py_func() implementation to
# convert the returned values to arrays early, so that we can inspect
# their values.
try:
flattened_values = nest.flatten_up_to(output_types, values)
except (TypeError, ValueError):
raise TypeError(
"`generator` yielded an element that did not match the expected "
"structure. The expected structure was %s, but the yielded "
"element was %s." % (output_types, values))
ret_arrays = []
for ret, dtype in zip(flattened_values, flattened_types):
try:
ret_arrays.append(script_ops.FuncRegistry._convert( # pylint: disable=protected-access
ret, dtype=dtype.as_numpy_dtype))
except (TypeError, ValueError):
raise TypeError(
"`generator` yielded an element that could not be converted to "
"the expected type. The expected type was %s, but the yielded "
"element was %s." % (dtype.name, ret))
# Additional type and shape checking to ensure that the components
# of the generated element match the `output_types` and `output_shapes`
# arguments.
for (ret_array, expected_dtype, expected_shape) in zip(
ret_arrays, flattened_types, flattened_shapes):
if ret_array.dtype != expected_dtype.as_numpy_dtype:
raise TypeError(
"`generator` yielded an element of type %s where an element "
"of type %s was expected." % (ret_array.dtype,
expected_dtype.as_numpy_dtype))
if not expected_shape.is_compatible_with(ret_array.shape):
raise ValueError(
"`generator` yielded an element of shape %s where an element "
"of shape %s was expected." % (ret_array.shape, expected_shape))
return ret_arrays
flat_values = script_ops.py_func(
generator_py_func, [iterator_id_t], flattened_types, stateful=True)
# The `py_func()` op drops the inferred shapes, so we add them back in
# here.
if output_shapes is not None:
for ret_t, shape in zip(flat_values, flattened_shapes):
ret_t.set_shape(shape)
return nest.pack_sequence_as(output_types, flat_values)
def finalize_fn(iterator_id_t):
"""Releases host-side state for the iterator with ID `iterator_id_t`."""
def finalize_py_func(iterator_id):
generator_state.iterator_completed(iterator_id)
# We return a dummy value so that the `finalize_fn` has a valid
# signature.
# NOTE(mrry): Explicitly create an array of `np.int64` because implicit
# casting in `py_func()` will create an array of `np.int32` on Windows,
# leading to a runtime error.
return np.array(0, dtype=np.int64)
return script_ops.py_func(
finalize_py_func, [iterator_id_t], dtypes.int64, stateful=True)
# This function associates each traversal of `generator` with a unique
# iterator ID.
def flat_map_fn(dummy_arg):
# The `get_iterator_id_fn` gets a unique ID for the current instance of
# of the generator.
# The `generator_next_fn` gets the next element from the iterator with the
# given ID, and raises StopIteration when that iterator contains no
# more elements.
return _GeneratorDataset(dummy_arg, get_iterator_id_fn, generator_next_fn,
finalize_fn)
# A single-element dataset that, each time it is evaluated, contains a
# freshly-generated and unique (for the returned dataset) int64
# ID that will be used to identify the appropriate Python state, which
# is encapsulated in `generator_state`, and captured in
# `get_iterator_id_map_fn`.
dummy = 0
id_dataset = Dataset.from_tensors(dummy)
# A dataset that contains all of the elements generated by a
# single iterator created from `generator`, identified by the
# iterator ID contained in `id_dataset`. Lifting the iteration
# into a flat_map here enables multiple repetitions and/or nested
# versions of the returned dataset to be created, because it forces
# the generation of a new ID for each version.
return id_dataset.flat_map(flat_map_fn)
@staticmethod
def range(*args):
"""Creates a `Dataset` of a step-separated range of values.
For example:
```python
Dataset.range(5) == [0, 1, 2, 3, 4]
Dataset.range(2, 5) == [2, 3, 4]
Dataset.range(1, 5, 2) == [1, 3]
Dataset.range(1, 5, -2) == []
Dataset.range(5, 1) == []
Dataset.range(5, 1, -2) == [5, 3]
```
Args:
*args: follow same semantics as python's xrange.
len(args) == 1 -> start = 0, stop = args[0], step = 1
len(args) == 2 -> start = args[0], stop = args[1], step = 1
len(args) == 3 -> start = args[0], stop = args[1, stop = args[2]
Returns:
Dataset: A `RangeDataset`.
Raises:
ValueError: if len(args) == 0.
"""
return RangeDataset(*args)
@staticmethod
def zip(datasets):
"""Creates a `Dataset` by zipping together the given datasets.
This method has similar semantics to the built-in `zip()` function
in Python, with the main difference being that the `datasets`
argument can be an arbitrary nested structure of `Dataset` objects.
For example:
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
a = { 1, 2, 3 }
b = { 4, 5, 6 }
c = { (7, 8), (9, 10), (11, 12) }
d = { 13, 14 }
# The nested structure of the `datasets` argument determines the
# structure of elements in the resulting dataset.
Dataset.zip((a, b)) == { (1, 4), (2, 5), (3, 6) }
Dataset.zip((b, a)) == { (4, 1), (5, 2), (6, 3) }
# The `datasets` argument may contain an arbitrary number of
# datasets.
Dataset.zip((a, b, c)) == { (1, 4, (7, 8)),
(2, 5, (9, 10)),
(3, 6, (11, 12)) }
# The number of elements in the resulting dataset is the same as
# the size of the smallest dataset in `datasets`.
Dataset.zip((a, d)) == { (1, 13), (2, 14) }
```
Args:
datasets: A nested structure of datasets.
Returns:
Dataset: A `Dataset`.
"""
return ZipDataset(datasets)
def concatenate(self, dataset):
"""Creates a `Dataset` by concatenating given dataset with this dataset.
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
a = { 1, 2, 3 }
b = { 4, 5, 6, 7 }
# Input dataset and dataset to be concatenated should have same
# nested structures and output types.
# c = { (8, 9), (10, 11), (12, 13) }
# d = { 14.0, 15.0, 16.0 }
# a.concatenate(c) and a.concatenate(d) would result in error.
a.concatenate(b) == { 1, 2, 3, 4, 5, 6, 7 }
```
Args:
dataset: `Dataset` to be concatenated.
Returns:
Dataset: A `Dataset`.
"""
return ConcatenateDataset(self, dataset)
def prefetch(self, buffer_size):
"""Creates a `Dataset` that prefetches elements from this dataset.
Args:
buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the
maximum number of elements that will be buffered when prefetching.
Returns:
Dataset: A `Dataset`.
"""
return PrefetchDataset(self, buffer_size)
@staticmethod
def list_files(file_pattern, shuffle=None, seed=None):
"""A dataset of all files matching one or more glob patterns.
NOTE: The default behavior of this method is to return filenames in
a non-deterministic random shuffled order. Pass a `seed` or `shuffle=False`
to get results in a deterministic order.
Example:
If we had the following files on our filesystem:
- /path/to/dir/a.txt
- /path/to/dir/b.py
- /path/to/dir/c.py
If we pass "/path/to/dir/*.py" as the directory, the dataset would
produce:
- /path/to/dir/b.py
- /path/to/dir/c.py
Args:
file_pattern: A string, a list of strings, or a `tf.Tensor` of string type
(scalar or vector), representing the filename glob (i.e. shell wildcard)
pattern(s) that will be matched.
shuffle: (Optional.) If `True`, the file names will be shuffled randomly.
Defaults to `True`.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random
seed that will be used to create the distribution. See
`tf.set_random_seed` for behavior.
Returns:
Dataset: A `Dataset` of strings corresponding to file names.
"""
with ops.name_scope("list_files"):
if shuffle is None:
shuffle = True
file_pattern = ops.convert_to_tensor(
file_pattern, dtype=dtypes.string, name="file_pattern")
matching_files = gen_io_ops.matching_files(file_pattern)
# Raise an exception if `file_pattern` does not match any files.
condition = math_ops.greater(array_ops.shape(matching_files)[0], 0,
name="match_not_empty")
message = math_ops.add(
"No files matched pattern: ",
string_ops.reduce_join(file_pattern, separator=", "), name="message")
assert_not_empty = control_flow_ops.Assert(
condition, [message], summarize=1, name="assert_not_empty")
with ops.control_dependencies([assert_not_empty]):
matching_files = array_ops.identity(matching_files)
dataset = Dataset.from_tensor_slices(matching_files)
if shuffle:
# NOTE(mrry): The shuffle buffer size must be greater than zero, but the
# list of files might be empty.
buffer_size = math_ops.maximum(
array_ops.shape(matching_files, out_type=dtypes.int64)[0], 1)
dataset = dataset.shuffle(buffer_size, seed=seed)
return dataset
def repeat(self, count=None):
"""Repeats this dataset `count` times.
NOTE: If this dataset is a function of global state (e.g. a random number
generator), then different repetitions may produce different elements.
Args:
count: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
number of times the dataset should be repeated. The default behavior
(if `count` is `None` or `-1`) is for the dataset be repeated
indefinitely.
Returns:
Dataset: A `Dataset`.
"""
return RepeatDataset(self, count)
def _enumerate(self, start=0):
max_value = np.iinfo(dtypes.int64.as_numpy_dtype).max
return Dataset.zip((Dataset.range(start, max_value), self))
def shuffle(self, buffer_size, seed=None, reshuffle_each_iteration=None):
"""Randomly shuffles the elements of this dataset.
This dataset fills a buffer with `buffer_size` elements, then randomly
samples elements from this buffer, replacing the selected elements with new
elements. For perfect shuffling, a buffer size greater than or equal to the
full size of the dataset is required.
Args:
buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the
number of elements from this dataset from which the new
dataset will sample.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
random seed that will be used to create the distribution. See
`tf.set_random_seed` for behavior.
reshuffle_each_iteration: (Optional.) A boolean, which if true indicates
that the dataset should be pseudorandomly reshuffled each time it is
iterated over. (Defaults to `True`.)
Returns:
Dataset: A `Dataset`.
"""
return ShuffleDataset(self, buffer_size, seed, reshuffle_each_iteration)
def cache(self, filename=""):
"""Caches the elements in this dataset.
Args:
filename: A `tf.string` scalar `tf.Tensor`, representing the name of a
directory on the filesystem to use for caching tensors in this Dataset.
If a filename is not provided, the dataset will be cached in memory.
Returns:
Dataset: A `Dataset`.
"""
return CacheDataset(self, filename)
def take(self, count):
"""Creates a `Dataset` with at most `count` elements from this dataset.
Args:
count: A `tf.int64` scalar `tf.Tensor`, representing the number of
elements of this dataset that should be taken to form the new dataset.
If `count` is -1, or if `count` is greater than the size of this
dataset, the new dataset will contain all elements of this dataset.
Returns:
Dataset: A `Dataset`.
"""
return TakeDataset(self, count)
def skip(self, count):
"""Creates a `Dataset` that skips `count` elements from this dataset.
Args:
count: A `tf.int64` scalar `tf.Tensor`, representing the number
of elements of this dataset that should be skipped to form the
new dataset. If `count` is greater than the size of this
dataset, the new dataset will contain no elements. If `count`
is -1, skips the entire dataset.
Returns:
Dataset: A `Dataset`.
"""
return SkipDataset(self, count)
def shard(self, num_shards, index):
"""Creates a `Dataset` that includes only 1/`num_shards` of this dataset.
This dataset operator is very useful when running distributed training, as
it allows each worker to read a unique subset.
When reading a single input file, you can skip elements as follows:
```python
d = tf.data.TFRecordDataset(FLAGS.input_file)
d = d.shard(FLAGS.num_workers, FLAGS.worker_index)
d = d.repeat(FLAGS.num_epochs)
d = d.shuffle(FLAGS.shuffle_buffer_size)
d = d.map(parser_fn, num_parallel_calls=FLAGS.num_map_threads)
```
Important caveats:
- Be sure to shard before you use any randomizing operator (such as
shuffle).
- Generally it is best if the shard operator is used early in the dataset
pipeline. For example, when reading from a set of TFRecord files, shard
before converting the dataset to input samples. This avoids reading every
file on every worker. The following is an example of an efficient
sharding strategy within a complete pipeline:
```python
d = Dataset.list_files(FLAGS.pattern)
d = d.shard(FLAGS.num_workers, FLAGS.worker_index)
d = d.repeat(FLAGS.num_epochs)
d = d.shuffle(FLAGS.shuffle_buffer_size)
d = d.interleave(tf.data.TFRecordDataset,
cycle_length=FLAGS.num_readers, block_length=1)
d = d.map(parser_fn, num_parallel_calls=FLAGS.num_map_threads)
```
Args:
num_shards: A `tf.int64` scalar `tf.Tensor`, representing the number of
shards operating in parallel.
index: A `tf.int64` scalar `tf.Tensor`, representing the worker index.
Returns:
Dataset: A `Dataset`.
Raises:
ValueError: if `num_shards` or `index` are illegal values. Note: error
checking is done on a best-effort basis, and aren't guaranteed to be
caught upon dataset creation. (e.g. providing in a placeholder tensor
bypasses the early checking, and will instead result in an error during
a session.run call.)
"""
num_shards = ops.convert_to_tensor(
num_shards, name="num_shards", dtype=dtypes.int64)
num_shards_static = tensor_util.constant_value(num_shards)
index = ops.convert_to_tensor(index, name="index", dtype=dtypes.int64)
index_static = tensor_util.constant_value(index)
if num_shards_static is not None and num_shards_static < 1:
raise ValueError("num_shards must be >= 1; got: %s" % num_shards_static)
if index_static is not None and index_static < 0:
raise ValueError("index must be >= 0; got: %s" % index_static)
if (index_static is not None and num_shards_static is not None and
index_static >= num_shards_static):
raise ValueError("index must be <= num_shards; %s is not < %s" %
(index_static, num_shards_static))
def filter_fn(elem_index, _):
mod_result = math_ops.mod(elem_index, num_shards)
return math_ops.equal(mod_result, index)
return self._enumerate().filter(filter_fn).map(lambda _, elem: elem)
def batch(self, batch_size, drop_remainder=False):
"""Combines consecutive elements of this dataset into batches.
The tensors in the resulting element will have an additional outer
dimension, which will be `batch_size` (or `N % batch_size` for the last
element if `batch_size` does not divide the number of input elements `N`
evenly and `drop_remainder` is `False`). If your program depends on the
batches having the same outer dimension, you should set the `drop_remainder`
argument to `True` to prevent the smaller batch from being produced.
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last batch should be dropped in the case its has fewer than
`batch_size` elements; the default behavior is not to drop the smaller
batch.
Returns:
Dataset: A `Dataset`.
"""
return BatchDataset(self, batch_size, drop_remainder)
def padded_batch(self,
batch_size,
padded_shapes,
padding_values=None,
drop_remainder=False):
"""Combines consecutive elements of this dataset into padded batches.
This transformation combines multiple consecutive elements of the input
dataset into a single element.
Like `tf.data.Dataset.batch`, the tensors in the resulting element will
have an additional outer dimension, which will be `batch_size` (or
`N % batch_size` for the last element if `batch_size` does not divide the
number of input elements `N` evenly and `drop_remainder` is `False`). If
your program depends on the batches having the same outer dimension, you
should set the `drop_remainder` argument to `True` to prevent the smaller
batch from being produced.
Unlike `tf.data.Dataset.batch`, the input elements to be batched may have
different shapes, and this transformation will pad each component to the
respective shape in `padding_shapes`. The `padding_shapes` argument
determines the resulting shape for each dimension of each component in an
output element:
* If the dimension is a constant (e.g. `tf.Dimension(37)`), the component
will be padded out to that length in that dimension.
* If the dimension is unknown (e.g. `tf.Dimension(None)`), the component
will be padded out to the maximum length of all elements in that
dimension.
See also `tf.data.experimental.dense_to_sparse_batch`, which combines
elements that may have different shapes into a `tf.SparseTensor`.
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
padded_shapes: A nested structure of `tf.TensorShape` or
`tf.int64` vector tensor-like objects representing the shape
to which the respective component of each input element should
be padded prior to batching. Any unknown dimensions
(e.g. `tf.Dimension(None)` in a `tf.TensorShape` or `-1` in a
tensor-like object) will be padded to the maximum size of that
dimension in each batch.
padding_values: (Optional.) A nested structure of scalar-shaped
`tf.Tensor`, representing the padding values to use for the
respective components. Defaults are `0` for numeric types and
the empty string for string types.
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last batch should be dropped in the case its has fewer than
`batch_size` elements; the default behavior is not to drop the smaller
batch.
Returns:
Dataset: A `Dataset`.
"""
return PaddedBatchDataset(self, batch_size, padded_shapes, padding_values,
drop_remainder)
def map(self, map_func, num_parallel_calls=None):
"""Maps `map_func` across the elements of this dataset.
This transformation applies `map_func` to each element of this dataset, and
returns a new dataset containing the transformed elements, in the same
order as they appeared in the input.
For example:
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
a = { 1, 2, 3, 4, 5 }
a.map(lambda x: x + 1) = { 2, 3, 4, 5, 6 }
```
The input signature of `map_func` is determined by the structure of each
element in this dataset. For example:
```python
# Each element is a `tf.Tensor` object.
a = { 1, 2, 3, 4, 5 }
# `map_func` takes a single argument of type `tf.Tensor` with the same
# shape and dtype.
result = a.map(lambda x: ...)
# Each element is a tuple containing two `tf.Tensor` objects.
b = { (1, "foo"), (2, "bar"), (3, "baz") }
# `map_func` takes two arguments of type `tf.Tensor`.
result = b.map(lambda x_int, y_str: ...)
# Each element is a dictionary mapping strings to `tf.Tensor` objects.
c = { {"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}, {"a": 3, "b": "baz"} }
# `map_func` takes a single argument of type `dict` with the same keys as
# the elements.
result = c.map(lambda d: ...)
```
The value or values returned by `map_func` determine the structure of each
element in the returned dataset.
```python
# `map_func` returns a scalar `tf.Tensor` of type `tf.float32`.
def f(...):
return tf.constant(37.0)
result = dataset.map(f)
result.output_classes == tf.Tensor
result.output_types == tf.float32
result.output_shapes == [] # scalar
# `map_func` returns two `tf.Tensor` objects.
def g(...):
return tf.constant(37.0), tf.constant(["Foo", "Bar", "Baz"])
result = dataset.map(g)
result.output_classes == (tf.Tensor, tf.Tensor)
result.output_types == (tf.float32, tf.string)
result.output_shapes == ([], [3])
# Python primitives, lists, and NumPy arrays are implicitly converted to
# `tf.Tensor`.
def h(...):
return 37.0, ["Foo", "Bar", "Baz"], np.array([1.0, 2.0] dtype=np.float64)
result = dataset.map(h)
result.output_classes == (tf.Tensor, tf.Tensor, tf.Tensor)
result.output_types == (tf.float32, tf.string, tf.float64)
result.output_shapes == ([], [3], [2])
# `map_func` can return nested structures.
def i(...):
return {"a": 37.0, "b": [42, 16]}, "foo"
result.output_classes == ({"a": tf.Tensor, "b": tf.Tensor}, tf.Tensor)
result.output_types == ({"a": tf.float32, "b": tf.int32}, tf.string)
result.output_shapes == ({"a": [], "b": [2]}, [])
```
In addition to `tf.Tensor` objects, `map_func` can accept as arguments and
return `tf.SparseTensor` objects.
Args:
map_func: A function mapping a nested structure of tensors (having
shapes and types defined by `self.output_shapes` and
`self.output_types`) to another nested structure of tensors.
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number elements to process in parallel. If not
specified, elements will be processed sequentially.
Returns:
Dataset: A `Dataset`.
"""
if num_parallel_calls is None:
return MapDataset(self, map_func)
else:
return ParallelMapDataset(self, map_func, num_parallel_calls)
def flat_map(self, map_func):
"""Maps `map_func` across this dataset and flattens the result.
Use `flat_map` if you want to make sure that the order of your dataset
stays the same. For example, to flatten a dataset of batches into a
dataset of their elements:
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset. '[...]' represents a tensor.
a = {[1,2,3,4,5], [6,7,8,9], [10]}
a.flat_map(lambda x: Dataset.from_tensor_slices(x)) ==
{[1,2,3,4,5,6,7,8,9,10]}
```
`tf.data.Dataset.interleave()` is a generalization of `flat_map`, since
`flat_map` produces the same output as
`tf.data.Dataset.interleave(cycle_length=1)`
Args:
map_func: A function mapping a nested structure of tensors (having shapes
and types defined by `self.output_shapes` and `self.output_types`) to a
`Dataset`.
Returns:
Dataset: A `Dataset`.
"""
return FlatMapDataset(self, map_func)
def interleave(self,
map_func,
cycle_length,
block_length=1,
num_parallel_calls=None):
"""Maps `map_func` across this dataset, and interleaves the results.
For example, you can use `Dataset.interleave()` to process many input files
concurrently:
```python
# Preprocess 4 files concurrently, and interleave blocks of 16 records from
# each file.
filenames = ["/var/data/file1.txt", "/var/data/file2.txt", ...]
dataset = (Dataset.from_tensor_slices(filenames)
.interleave(lambda x:
TextLineDataset(x).map(parse_fn, num_parallel_calls=1),
cycle_length=4, block_length=16))
```
The `cycle_length` and `block_length` arguments control the order in which
elements are produced. `cycle_length` controls the number of input elements
that are processed concurrently. If you set `cycle_length` to 1, this
transformation will handle one input element at a time, and will produce
identical results to `tf.data.Dataset.flat_map`. In general,
this transformation will apply `map_func` to `cycle_length` input elements,
open iterators on the returned `Dataset` objects, and cycle through them
producing `block_length` consecutive elements from each iterator, and
consuming the next input element each time it reaches the end of an
iterator.
For example:
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
a = { 1, 2, 3, 4, 5 }
# NOTE: New lines indicate "block" boundaries.
a.interleave(lambda x: Dataset.from_tensors(x).repeat(6),
cycle_length=2, block_length=4) == {
1, 1, 1, 1,
2, 2, 2, 2,
1, 1,
2, 2,
3, 3, 3, 3,
4, 4, 4, 4,
3, 3,
4, 4,
5, 5, 5, 5,
5, 5,
}
```
NOTE: The order of elements yielded by this transformation is
deterministic, as long as `map_func` is a pure function. If
`map_func` contains any stateful operations, the order in which
that state is accessed is undefined.
Args:
map_func: A function mapping a nested structure of tensors (having shapes
and types defined by `self.output_shapes` and `self.output_types`) to a
`Dataset`.
cycle_length: The number of elements from this dataset that will be
processed concurrently.
block_length: The number of consecutive elements to produce from each
input element before cycling to another input element.
num_parallel_calls: (Optional.) If specified, the implementation creates
a threadpool, which is used to fetch inputs from cycle elements
asynchronously and in parallel. The default behavior is to fetch inputs
from cycle elements synchronously with no parallelism.
Returns:
Dataset: A `Dataset`.
"""
if num_parallel_calls is None:
return InterleaveDataset(self, map_func, cycle_length, block_length)
else:
return ParallelInterleaveDataset(self, map_func, cycle_length,
block_length, num_parallel_calls)
def filter(self, predicate):
"""Filters this dataset according to `predicate`.
Args:
predicate: A function mapping a nested structure of tensors (having shapes
and types defined by `self.output_shapes` and `self.output_types`) to a
scalar `tf.bool` tensor.
Returns:
Dataset: The `Dataset` containing the elements of this dataset for which
`predicate` is `True`.
"""
return FilterDataset(self, predicate)
def apply(self, transformation_func):
"""Applies a transformation function to this dataset.
`apply` enables chaining of custom `Dataset` transformations, which are
represented as functions that take one `Dataset` argument and return a
transformed `Dataset`.
For example:
```
dataset = (dataset.map(lambda x: x ** 2)
.apply(group_by_window(key_func, reduce_func, window_size))
.map(lambda x: x ** 3))
```
Args:
transformation_func: A function that takes one `Dataset` argument and
returns a `Dataset`.
Returns:
Dataset: The `Dataset` returned by applying `transformation_func` to this
dataset.
"""
dataset = transformation_func(self)
if not isinstance(dataset, DatasetV2):
raise TypeError("`transformation_func` must return a Dataset.")
dataset._input_datasets = [self] # pylint: disable=protected-access
return dataset
def window(self, size, shift=None, stride=1, drop_remainder=False):
"""Combines input elements into a dataset of windows.
Each window is a dataset itself and contains `size` elements (or
possibly fewer if there are not enough input elements to fill the window
and `drop_remainder` evaluates to false).
The `stride` argument determines the stride of the input elements,
and the `shift` argument determines the shift of the window.
For example:
- `tf.data.Dataset.range(7).window(2)` produces
`{{0, 1}, {2, 3}, {4, 5}, {6}}`
- `tf.data.Dataset.range(7).window(3, 2, 1, True)` produces
`{{0, 1, 2}, {2, 3, 4}, {4, 5, 6}}`
- `tf.data.Dataset.range(7).window(3, 1, 2, True)` produces
`{{0, 2, 4}, {1, 3, 5}, {2, 4, 6}}`
Args:
size: A `tf.int64` scalar `tf.Tensor`, representing the number of elements
of the input dataset to combine into a window.
shift: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
forward shift of the sliding window in each iteration. Defaults to
`size`.
stride: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
stride of the input elements in the sliding window.
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether a window should be dropped in case its size is smaller than
`window_size`.
Returns:
Dataset: A `Dataset` of windows, each of which is a nested `Dataset` with
the same structure as this dataset, but a finite subsequence of its
elements.
"""
if shift is None:
shift = size
return WindowDataset(self, size, shift, stride, drop_remainder)
def reduce(self, initial_state, reduce_func):
"""Reduces the input dataset to a single element.
The transformation calls `reduce_func` successively on every element of
the input dataset until the dataset is exhausted, aggregating information in
its internal state. The `initial_state` argument is used for the initial
state and the final state is returned as the result.
For example:
- `tf.data.Dataset.range(5).reduce(np.int64(0), lambda x, _: x + 1)`
produces `5`
- `tf.data.Dataset.range(5).reduce(np.int64(0), lambda x, y: x + y)`
produces `10`
Args:
initial_state: A nested structure of tensors, representing the initial
state of the transformation.
reduce_func: A function that maps `(old_state, input_element)` to
`new_state`. It must take two arguments and return a nested structure
of tensors. The structure of `new_state` must match the structure of
`initial_state`.
Returns:
A nested structure of `tf.Tensor` objects, corresponding to the final
state of the transformation.
"""
with ops.name_scope("initial_state"):
# Convert any `SparseTensorValue`s to `SparseTensor`s and all other
# values to tensors.
initial_state = nest.pack_sequence_as(initial_state, [
sparse_tensor_lib.SparseTensor.from_value(t)
if sparse_tensor_lib.is_sparse(t) else ops.convert_to_tensor(
t, name="component_%d" % i)
for i, t in enumerate(nest.flatten(initial_state))
])
# Compute initial values for the state classes, shapes and types based on
# the initial state.
state_classes = sparse.get_classes(initial_state)
state_shapes = nest.pack_sequence_as(
initial_state, [t.get_shape() for t in nest.flatten(initial_state)])
state_types = nest.pack_sequence_as(
initial_state, [t.dtype for t in nest.flatten(initial_state)])
# Iteratively rerun the reduce function until reaching a fixed point on
# `self._state_shapes`.
need_to_rerun = True
while need_to_rerun:
wrapped_func = StructuredFunctionWrapper(
reduce_func,
"reduce()",
input_classes=(state_classes, self.output_classes),
input_shapes=(state_shapes, self.output_shapes),
input_types=(state_types, self.output_types),
add_to_graph=False)
# Extract and validate class information from the returned values.
output_classes = wrapped_func.output_classes
for new_state_class, state_class in zip(
nest.flatten(output_classes), nest.flatten(state_classes)):
if not issubclass(new_state_class, state_class):
raise TypeError(
"The element classes for the new state must match the initial "
"state. Expected %s; got %s." % (state_classes,
wrapped_func.output_classes))
# Extract and validate type information from the returned values.
output_types = wrapped_func.output_types
for new_state_type, state_type in zip(
nest.flatten(output_types), nest.flatten(state_types)):
if new_state_type != state_type:
raise TypeError(
"The element types for the new state must match the initial "
"state. Expected %s; got %s." % (state_types,
wrapped_func.output_types))
# Extract shape information from the returned values.
output_shapes = wrapped_func.output_shapes
flat_state_shapes = nest.flatten(state_shapes)
flat_new_state_shapes = nest.flatten(output_shapes)
weakened_state_shapes = [
original.most_specific_compatible_shape(new)
for original, new in zip(flat_state_shapes, flat_new_state_shapes)
]
need_to_rerun = False
for original_shape, weakened_shape in zip(flat_state_shapes,
weakened_state_shapes):
if original_shape.ndims is not None and (
weakened_shape.ndims is None or
original_shape.as_list() != weakened_shape.as_list()):
need_to_rerun = True
break
if need_to_rerun:
state_shapes = nest.pack_sequence_as(state_shapes,
weakened_state_shapes)
reduce_func = wrapped_func.function
reduce_func.add_to_graph(ops.get_default_graph())
return sparse.deserialize_sparse_tensors(
nest.pack_sequence_as(
output_types,
gen_dataset_ops.reduce_dataset(
self._as_variant_tensor(), # pylint: disable=protected-access
nest.flatten(sparse.serialize_sparse_tensors(initial_state)),
reduce_func.captured_inputs,
f=reduce_func,
output_shapes=nest.flatten(
sparse.as_dense_shapes(output_shapes, output_classes)),
output_types=nest.flatten(
sparse.as_dense_types(output_types, output_classes)))),
output_types,
output_shapes,
output_classes)
def with_options(self, options):
"""Returns a new `tf.data.Dataset` with the given options set.
The options are "global" in the sense they apply to the entire input
pipeline in which the `with_options` transformation is used. If options are
set multiple times, they are merged if possible (see
`tf.data.Options.merge()` for details).
Args:
options: A `tf.data.Options` that identifies the options the use.
Returns:
Dataset: A `Dataset` with the given options.
Raises:
ValueError: if options are set more than once
"""
return _OptionsDataset(self, options)
@tf_export(v1=["data.Dataset"])
class DatasetV1(DatasetV2):
"""Represents a potentially large set of elements.
A `Dataset` can be used to represent an input pipeline as a
collection of elements (nested structures of tensors) and a "logical
plan" of transformations that act on those elements.
"""
def __init__(self):
pass
@staticmethod
@functools.wraps(DatasetV2.from_tensors)
def from_tensors(tensors):
return DatasetV1Adapter(DatasetV2.from_tensors(tensors))
@staticmethod
@functools.wraps(DatasetV2.from_tensor_slices)
def from_tensor_slices(tensors):
return DatasetV1Adapter(DatasetV2.from_tensor_slices(tensors))
@staticmethod
@deprecation.deprecated(None, "Use `tf.data.Dataset.from_tensor_slices()`.")
def from_sparse_tensor_slices(sparse_tensor):
"""Splits each rank-N `tf.SparseTensor` in this dataset row-wise.
Args:
sparse_tensor: A `tf.SparseTensor`.
Returns:
Dataset: A `Dataset` of rank-(N-1) sparse tensors.
"""
return DatasetV1Adapter(SparseTensorSliceDataset(sparse_tensor))
@staticmethod
@functools.wraps(DatasetV2.from_generator)
def from_generator(generator, output_types, output_shapes=None, args=None):
return DatasetV1Adapter(DatasetV2.from_generator(
generator, output_types, output_shapes, args))
@staticmethod
@functools.wraps(DatasetV2.range)
def range(*args):
return DatasetV1Adapter(DatasetV2.range(*args))
@staticmethod
@functools.wraps(DatasetV2.zip)
def zip(datasets):
return DatasetV1Adapter(DatasetV2.zip(datasets))
@functools.wraps(DatasetV2.concatenate)
def concatenate(self, dataset):
return DatasetV1Adapter(super(DatasetV1, self).concatenate(dataset))
@functools.wraps(DatasetV2.prefetch)
def prefetch(self, buffer_size):
return DatasetV1Adapter(super(DatasetV1, self).prefetch(buffer_size))
@staticmethod
@functools.wraps(DatasetV2.list_files)
def list_files(file_pattern, shuffle=None, seed=None):
return DatasetV1Adapter(DatasetV2.list_files(file_pattern, shuffle, seed))
@functools.wraps(DatasetV2.repeat)
def repeat(self, count=None):
return DatasetV1Adapter(super(DatasetV1, self).repeat(count))
@functools.wraps(DatasetV2.shuffle)
def shuffle(self, buffer_size, seed=None, reshuffle_each_iteration=None):
return DatasetV1Adapter(super(DatasetV1, self).shuffle(
buffer_size, seed, reshuffle_each_iteration))
@functools.wraps(DatasetV2.cache)
def cache(self, filename=""):
return DatasetV1Adapter(super(DatasetV1, self).cache(filename))
@functools.wraps(DatasetV2.take)
def take(self, count):
return DatasetV1Adapter(super(DatasetV1, self).take(count))
@functools.wraps(DatasetV2.skip)
def skip(self, count):
return DatasetV1Adapter(super(DatasetV1, self).skip(count))
@functools.wraps(DatasetV2.shard)
def shard(self, num_shards, index):
return DatasetV1Adapter(super(DatasetV1, self).shard(num_shards, index))
@functools.wraps(DatasetV2.batch)
def batch(self, batch_size, drop_remainder=False):
return DatasetV1Adapter(super(DatasetV1, self).batch(
batch_size, drop_remainder))
@functools.wraps(DatasetV2.padded_batch)
def padded_batch(self,
batch_size,
padded_shapes,
padding_values=None,
drop_remainder=False):
return DatasetV1Adapter(super(DatasetV1, self).padded_batch(
batch_size, padded_shapes, padding_values, drop_remainder))
@functools.wraps(DatasetV2.map)
def map(self, map_func, num_parallel_calls=None):
return DatasetV1Adapter(super(DatasetV1, self).map(
map_func, num_parallel_calls))
@functools.wraps(DatasetV2.flat_map)
def flat_map(self, map_func):
return DatasetV1Adapter(super(DatasetV1, self).flat_map(map_func))
@functools.wraps(DatasetV2.interleave)
def interleave(self,
map_func,
cycle_length,
block_length=1,
num_parallel_calls=None):
return DatasetV1Adapter(super(DatasetV1, self).interleave(
map_func, cycle_length, block_length, num_parallel_calls))
@functools.wraps(DatasetV2.filter)
def filter(self, predicate):
return DatasetV1Adapter(super(DatasetV1, self).filter(predicate))
@functools.wraps(DatasetV2.apply)
def apply(self, transformation_func):
return DatasetV1Adapter(super(DatasetV1, self).apply(transformation_func))
@functools.wraps(DatasetV2.window)
def window(self, size, shift=None, stride=1, drop_remainder=False):
return DatasetV1Adapter(super(DatasetV1, self).window(
size, shift, stride, drop_remainder))
@functools.wraps(DatasetV2.with_options)
def with_options(self, options):
return DatasetV1Adapter(super(DatasetV1, self).with_options(options))
# TODO(b/119044825): Until all `tf.data` unit tests are converted to V2, keep
# this alias in place.
Dataset = DatasetV1
class DatasetV1Adapter(DatasetV1):
"""Wraps a V2 `Dataset` object in the `tf.compat.v1.data.Dataset` API."""
def __init__(self, dataset):
super(DatasetV1Adapter, self).__init__()
self._dataset = dataset
def _as_variant_tensor(self):
return self._dataset._as_variant_tensor() # pylint: disable=protected-access
def _inputs(self):
return self._dataset._inputs() # pylint: disable=protected-access
def options(self):
return self._dataset.options()
@property
def output_classes(self):
return self._dataset.output_classes
@property
def output_shapes(self):
return self._dataset.output_shapes
@property
def output_types(self):
return self._dataset.output_types
def make_initializable_iterator(self, shared_name=None):
return self._dataset.make_initializable_iterator(shared_name)
def __iter__(self):
return iter(self._dataset)
def make_one_shot_iterator(self):
return self._dataset.make_one_shot_iterator()
@tf_export("data.Options")
class Options(object):
"""Represents options for tf.data.Dataset.
An `Options` object can be for instance used to control which static
optimizations to apply or whether to use performance modeling to dynamically
tune the parallelism of operations such as `tf.data.Dataset.map` or
`tf.data.Dataset.interleave`.
"""
for _name, _ty, _docstring in [
("experimental_autotune", bool,
"Whether to dynamically adjust the values of tunable parameters (e.g. "
"degrees of parallelism)."),
("experimental_deterministic", bool,
"Whether the outputs need to be produced in deterministic order."),
("experimental_filter_fusion", bool,
"Whether to fuse filter transformations."),
("experimental_hoist_random_uniform", bool,
"Whether to hoist `tf.random_uniform()` ops out of map transformations."
),
("experimental_stats", stats_options.StatsOptions,
"Associate the given statistics options with the dataset pipeline."),
("experimental_map_and_batch_fusion", bool,
"Whether to fuse map and batch transformations."),
("experimental_map_and_filter_fusion", bool,
"Whether to fuse map and filter transformations."),
("experimental_map_fusion", bool, "Whether to fuse map transformations."),
("experimental_map_parallelization", bool,
"Whether to parallelize stateless map transformations."),
("experimental_map_vectorization", bool,
"Whether to vectorize map transformations."),
("experimental_noop_elimination", bool,
"Whether to eliminate no-op transformations."),
("experimental_shuffle_and_repeat_fusion", bool,
"Whether to fuse shuffle and repeat transformations."),
("experimental_numa_aware", bool,
"Whether to use NUMA-aware operations."),
]:
def _make_getter(name): # pylint: disable=no-self-argument
def getter(self):
return getattr(self, "_" + name)
return getter
def _make_setter(name, ty): # pylint: disable=no-self-argument
def setter(self, value):
if not isinstance(value, ty):
raise TypeError(
"Attempting to set the option %s to incompatible value: %r when "
"it expects %r" % (name, value, ty))
setattr(self, "_" + name, value)
return setter
vars()["_" + _name] = None
vars()[_name] = property(
_make_getter(_name), _make_setter(_name, _ty), None, _docstring)
def __init__(self):
pass
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def _static_optimizations(self):
"""Produces the list of enabled static optimizations."""
experimental_optimizations = [
"filter_fusion",
"hoist_random_uniform",
"map_and_batch_fusion",
"map_and_filter_fusion",
"map_fusion",
"map_parallelization",
"map_vectorization",
"noop_elimination",
"shuffle_and_repeat_fusion",
]
result = []
for exp_opt in experimental_optimizations:
if getattr(self, "experimental_" + exp_opt):
result.append(exp_opt)
if getattr(self, "experimental_numa_aware"):
result.append("make_numa_aware")
if getattr(self, "experimental_deterministic") is False:
result.append("make_sloppy")
experimental_stats_options = getattr(self, "experimental_stats")
if experimental_stats_options and getattr(experimental_stats_options,
"latency_all_edges"):
result.append("latency_all_edges")
return result
def merge(self, options):
"""Merges itself with the given `tf.data.Options`.
The given `tf.data.Options` can be merged as long as there does not exist an
attribute that is set to different values in `self` and `options`.
Args:
options: a `tf.data.Options` to merge with
Raises:
ValueError: if the given `tf.data.Options` cannot be merged
Returns:
New `tf.data.Options()` object which is the result of merging self with
the input `tf.data.Options`.
"""
result = Options()
for other in [self, options]:
for name in [
"experimental_autotune",
"experimental_deterministic",
"experimental_filter_fusion",
"experimental_hoist_random_uniform",
"experimental_map_and_batch_fusion",
"experimental_map_and_filter_fusion",
"experimental_map_fusion",
"experimental_map_parallelization",
"experimental_map_vectorization",
"experimental_noop_elimination",
"experimental_numa_aware",
"experimental_shuffle_and_repeat_fusion",
"experimental_stats",
]:
this = getattr(result, name)
that = getattr(other, name)
if that is not None:
if this is None:
setattr(result, name, that)
elif this != that:
raise ValueError(
"Cannot merge incompatible values of option: %s" % (name))
return result
class DatasetSource(DatasetV2):
"""Abstract class representing a dataset with no inputs."""
def _inputs(self):
return []
class UnaryDataset(DatasetV2):
"""Abstract class representing a dataset with one input."""
def __init__(self, input_dataset):
super(UnaryDataset, self).__init__()
self._input_dataset = input_dataset
def _inputs(self):
return [self._input_dataset]
class UnaryUnchangedStructureDataset(UnaryDataset):
"""Represents a unary dataset with the same input and output structure."""
@property
def output_classes(self):
return self._input_dataset.output_classes # pylint: disable=protected-access
@property
def output_shapes(self):
return self._input_dataset.output_shapes # pylint: disable=protected-access
@property
def output_types(self):
return self._input_dataset.output_types # pylint: disable=protected-access
class TensorDataset(DatasetSource):
"""A `Dataset` with a single element, viz. a nested structure of tensors."""
def __init__(self, tensors):
"""See `Dataset.from_tensors()` for details."""
super(TensorDataset, self).__init__()
with ops.name_scope("tensors"):
tensors = nest.pack_sequence_as(tensors, [
sparse_tensor_lib.SparseTensor.from_value(t)
if sparse_tensor_lib.is_sparse(t) else ops.convert_to_tensor(
t, name="component_%d" % i)
for i, t in enumerate(nest.flatten(tensors))
])
self._tensors = sparse.serialize_sparse_tensors(tensors)
self._output_classes = sparse.get_classes(tensors)
self._output_shapes = nest.pack_sequence_as(
tensors, [t.get_shape() for t in nest.flatten(tensors)])
self._output_types = nest.pack_sequence_as(
tensors, [t.dtype for t in nest.flatten(tensors)])
def _as_variant_tensor(self):
return gen_dataset_ops.tensor_dataset(
nest.flatten(self._tensors),
output_shapes=nest.flatten(
sparse.as_dense_shapes(self.output_shapes, self.output_classes)))
@property
def output_classes(self):
return self._output_classes
@property
def output_shapes(self):
return self._output_shapes
@property
def output_types(self):
return self._output_types
class TensorSliceDataset(DatasetSource):
"""A `Dataset` of slices from a nested structure of tensors."""
def __init__(self, tensors):
"""See `Dataset.from_tensor_slices()` for details."""
super(TensorSliceDataset, self).__init__()
with ops.name_scope("tensors"):
tensors = nest.pack_sequence_as(tensors, [
sparse_tensor_lib.SparseTensor.from_value(t)
if sparse_tensor_lib.is_sparse(t) else ops.convert_to_tensor(
t, name="component_%d" % i)
for i, t in enumerate(nest.flatten(tensors))
])
flat_tensors = nest.flatten(tensors)
batch_dim = tensor_shape.Dimension(tensor_shape.dimension_value(
flat_tensors[0].get_shape()[0]))
for t in flat_tensors[1:]:
batch_dim.assert_is_compatible_with(tensor_shape.Dimension(
tensor_shape.dimension_value(t.get_shape()[0])))
self._tensors = sparse.serialize_many_sparse_tensors(tensors)
self._output_classes = sparse.get_classes(tensors)
self._output_shapes = nest.pack_sequence_as(
tensors, [t.get_shape()[1:] for t in nest.flatten(tensors)])
self._output_types = nest.pack_sequence_as(
tensors, [t.dtype for t in nest.flatten(tensors)])
def _as_variant_tensor(self):
return gen_dataset_ops.tensor_slice_dataset(
nest.flatten(self._tensors),
output_shapes=nest.flatten(
sparse.as_dense_shapes(self.output_shapes, self.output_classes)))
@property
def output_classes(self):
return self._output_classes
@property
def output_shapes(self):
return self._output_shapes
@property
def output_types(self):
return self._output_types
class SparseTensorSliceDataset(DatasetSource):
"""A `Dataset` that splits a rank-N `tf.SparseTensor` into its rows."""
def __init__(self, sparse_tensor):
"""See `Dataset.from_sparse_tensor_slices()` for details."""
super(SparseTensorSliceDataset, self).__init__()
if not isinstance(sparse_tensor, sparse_tensor_lib.SparseTensor):
raise TypeError("`sparse_tensor` must be a `tf.SparseTensor` object.")
self._sparse_tensor = sparse_tensor
def _as_variant_tensor(self):
return gen_dataset_ops.sparse_tensor_slice_dataset(
self._sparse_tensor.indices, self._sparse_tensor.values,
self._sparse_tensor.dense_shape)
@property
def output_classes(self):
return (ops.Tensor, ops.Tensor, ops.Tensor)
@property
def output_shapes(self):
indices_shape = self._sparse_tensor.indices.get_shape()
shape_shape = self._sparse_tensor.dense_shape.get_shape()
rank = (indices_shape.dims[1] - 1).merge_with(shape_shape.dims[0] - 1)
num_values = tensor_shape.Dimension(None)
return (tensor_shape.TensorShape([num_values, rank]),
tensor_shape.TensorShape([num_values]),
tensor_shape.TensorShape([rank]))
@property
def output_types(self):
return (dtypes.int64, self._sparse_tensor.dtype, dtypes.int64)
class _NestedDatasetComponent(object):
"""The structure of a `Dataset` nested in a component of another `Dataset`.
A `StructuredFunctionWrapper` around a function that returns a `Dataset` as
one of its components will have a `NestedDatasetComponent` in the
corresponding position in the `output_classes`, `output_shapes`, and
`output_types` properties.
TODO(b/110122868): Add this class, or something equivalent, to the public API.
We are considering revising the public API for accessing Dataset structure
(`output_classes` etc.) based on experience with nested datasets and other
custom component types.
"""
def __init__(self,
dataset=None,
output_shapes=None,
output_types=None,
output_classes=None):
if dataset is None:
if (output_classes is None or output_shapes is None or
output_types is None):
raise ValueError(
"Either `dataset`, or all of `output_classes`, "
"`output_shapes`, and `output_types` must be specified.")
self._output_classes = output_classes
self._output_shapes = output_shapes
self._output_types = output_types
else:
if not (output_classes is None and output_shapes is None and
output_types is None):
raise ValueError(
"Either `dataset`, or all of `output_classes`, "
"`output_shapes`, and `output_types` must be specified.")
self._output_classes = dataset.output_classes
self._output_shapes = dataset.output_shapes
self._output_types = dataset.output_types
@property
def output_classes(self):
return self._output_classes
@property
def output_shapes(self):
return self._output_shapes
@property
def output_types(self):
return self._output_types
class _VariantDataset(DatasetV2):
"""A Dataset wrapper around a `tf.variant`-typed function argument."""
def __init__(self, dataset_variant, structure):
super(_VariantDataset, self).__init__()
self._dataset_variant = dataset_variant
self._structure = structure
def _as_variant_tensor(self):
return self._dataset_variant
def _inputs(self):
return []
@property
def output_classes(self):
return self._structure.output_classes
@property
def output_shapes(self):
return self._structure.output_shapes
@property
def output_types(self):
return self._structure.output_types
class StructuredFunctionWrapper(object):
"""A wrapper for `Defun` that supports structured arguments and return values.
"""
def __init__(self,
func,
transformation_name,
dataset=None,
input_classes=None,
input_shapes=None,
input_types=None,
add_to_graph=True,
defun_kwargs=None):
"""Creates a new `StructuredFunctionWrapper` for the given function.
Args:
func: A function from a nested structure to another nested structure.
transformation_name: Human-readable name of the transformation in which
this function is being instantiated, for error messages.
dataset: (Optional.) A `tf.data.Dataset`. If given, the structure of this
dataset will be assumed as the structure for `func` arguments; otherwise
`input_classes`, `input_shapes`, and `input_types` must be defined.
input_classes: (Optional.) A nested structure of `type`. If given, this
argument defines the Python types for `func` arguments.
input_shapes: (Optional.) A nested structure of `tf.TensorShape`. If
given, this argument defines the shapes and structure for `func`
arguments.
input_types: (Optional.) A nested structure of `tf.DType`. If given, this
argument defines the element types and structure for `func` arguments.
add_to_graph: (Optional.) If `True`, the function will be added to the
default graph.
defun_kwargs: (Optional.) A dictionary mapping string argument names to
values. If supplied, will be passed to `function.Defun()` as keyword
arguments.
Raises:
ValueError: If an invalid combination of `dataset`, `input_classes`,
`input_shapes`, and `input_types` is passed.
"""
if dataset is None:
if input_classes is None or input_shapes is None or input_types is None:
raise ValueError("Either `dataset`, or all of `input_classes`, "
"`input_shapes`, and `input_types` must be specified.")
self._input_shapes = input_shapes
self._input_types = input_types
self._input_classes = input_classes
else:
if not (input_classes is None and input_shapes is None and
input_types is None):
raise ValueError("Either `dataset`, or all of `input_classes`, "
"`input_shapes`, and `input_types` must be specified.")
self._input_shapes = dataset.output_shapes
self._input_types = dataset.output_types
self._input_classes = dataset.output_classes
self._transformation_name = transformation_name
readable_transformation_name = transformation_name.replace(
".", "_")[:-2] if len(transformation_name) > 2 else ""
self._func_name = "_".join([
readable_transformation_name,
function_utils.get_func_name(func),
str(ops.uid())
])
if defun_kwargs is None:
defun_kwargs = {}
@function.Defun(
*self._defun_args(), func_name=self._func_name, **defun_kwargs)
def tf_data_structured_function_wrapper(*args):
"""Wrapper for passing nested structures to and from tf.data functions."""
flat_args = []
for arg, arg_class, arg_shape, arg_type in zip(
args,
nest.flatten(self._input_classes),
nest.flatten(self._input_shapes),
nest.flatten(self._input_types)):
# TODO(b/110122868): Add a registration mechanism for new component
# types.
if arg_class is sparse_tensor_lib.SparseTensor:
arg = sparse.deserialize_sparse_tensors(
arg, arg_type, arg_shape, arg_class)
arg.indices.set_shape([None, arg_shape.ndims])
arg.dense_shape.set_shape([arg_shape.ndims])
elif isinstance(arg_class, _NestedDatasetComponent):
arg = _VariantDataset(arg, arg_class)
else:
arg.set_shape(arg_shape)
flat_args.append(arg)
nested_args = nest.pack_sequence_as(self._input_classes, flat_args)
if not _should_unpack_args(nested_args):
nested_args = (nested_args,)
ret = func(*nested_args)
# If `func` returns a list of tensors, `nest.flatten()` and
# `ops.convert_to_tensor()` would conspire to attempt to stack
# those tensors into a single tensor, because the customized
# version of `nest.flatten()` does not recurse into lists. Since
# it is more likely that the list arose from returning the
# result of an operation (such as `tf.py_func()`) that returns a
# list of not-necessarily-stackable tensors, we treat the
# returned value is a `tuple` instead. A user wishing to pack
# the return value into a single tensor can use an explicit
# `tf.stack()` before returning.
if isinstance(ret, list):
ret = tuple(ret)
# Convert any `SparseTensorValue`s to `SparseTensor`s and all other
# values to tensors.
flat_ret = []
flat_classes = []
flat_shapes = []
flat_types = []
for t in nest.flatten(ret):
# TODO(b/110122868): Add a registration mechanism for new component
# types.
if sparse_tensor_lib.is_sparse(t):
t = sparse_tensor_lib.SparseTensor.from_value(t)
flat_ret.append(sparse.serialize_sparse_tensors(t))
flat_classes.append(sparse_tensor_lib.SparseTensor)
flat_shapes.append(t.get_shape())
flat_types.append(t.dtype)
elif isinstance(t, DatasetV2):
flat_ret.append(t._as_variant_tensor()) # pylint: disable=protected-access
component = _NestedDatasetComponent(t)
flat_classes.append(component)
flat_shapes.append(component)
flat_types.append(component)
if t.options() != Options():
warnings.warn("Encountered a nested dataset with non-default "
"options. These options will not be propagated to "
"the outer dataset.")
else:
try:
t = ops.convert_to_tensor(t)
except (ValueError, TypeError):
raise TypeError("Unsupported return value from function passed to "
"%s: %s." % (transformation_name, t))
flat_ret.append(t)
flat_classes.append(ops.Tensor)
flat_shapes.append(t.get_shape())
flat_types.append(t.dtype)
ret = nest.pack_sequence_as(ret, flat_ret)
self._output_classes = nest.pack_sequence_as(ret, flat_classes)
self._output_shapes = nest.pack_sequence_as(ret, flat_shapes)
self._output_types = nest.pack_sequence_as(ret, flat_types)
_warn_if_collections(transformation_name)
return flat_ret
self._function = tf_data_structured_function_wrapper
if add_to_graph:
self._function.add_to_graph(ops.get_default_graph())
else:
# Use the private method that will execute
# `tf_data_structured_function_wrapper` but delay adding it to the graph
# in case (e.g.) we need to rerun the function.
self._function._create_definition_if_needed() # pylint: disable=protected-access
def _defun_args(self):
"""Returns a flat list of `tf.DType` for the input element structure."""
ret = []
for input_type, input_class in zip(nest.flatten(self._input_types),
nest.flatten(self._input_classes)):
# TODO(b/110122868): Add a registration mechanism for new component types.
if input_class is sparse_tensor_lib.SparseTensor:
ret.append(dtypes.variant)
elif isinstance(input_class, _NestedDatasetComponent):
ret.append(dtypes.variant)
else:
assert isinstance(input_type, dtypes.DType)
ret.append(input_type)
return ret
@property
def output_classes(self):
return self._output_classes
@property
def output_shapes(self):
return self._output_shapes
@property
def output_types(self):
return self._output_types
@property
def function(self):
return self._function
def flat_structure(dataset):
"""Helper for setting `output_shapes` and `output_types` attrs of Dataset ops.
Most Dataset op constructors expect `output_shapes` and `output_types`
arguments that represent the flattened structure of an element. This helper
function generates these attrs as a keyword argument dictionary, allowing
`Dataset._as_variant_tensor()` implementations to pass
`**flat_structure(self)` to the op constructor.
Args:
dataset: A `tf.data.Dataset`.
Returns:
A dictionary of keyword arguments that can be passed to many Dataset op
constructors.
"""
output_classes = []
output_shapes = []
output_types = []
for output_class, output_shape, output_type in zip(
nest.flatten(dataset.output_classes), nest.flatten(dataset.output_shapes),
nest.flatten(dataset.output_types)):
if isinstance(output_class, _NestedDatasetComponent):
output_classes.append(output_class.output_classes)
output_shapes.append(output_shape.output_shapes)
output_types.append(output_type.output_types)
else:
output_classes.append(output_class)
output_shapes.append(output_shape)
output_types.append(output_type)
output_classes = nest.pack_sequence_as(dataset.output_classes, output_classes)
output_shapes = nest.pack_sequence_as(dataset.output_shapes, output_shapes)
output_types = nest.pack_sequence_as(dataset.output_types, output_types)
return {
"output_shapes":
nest.flatten(sparse.as_dense_shapes(output_shapes, output_classes)),
"output_types":
nest.flatten(sparse.as_dense_types(output_types, output_classes)),
}
class _GeneratorDataset(DatasetSource):
"""A `Dataset` that generates elements by invoking a function."""
def __init__(self, init_args, init_func, next_func, finalize_func):
"""Constructs a `_GeneratorDataset`.
Args:
init_args: A nested structure representing the arguments to `init_func`.
init_func: A TensorFlow function that will be called on `init_args` each
time a C++ iterator over this dataset is constructed. Returns a nested
structure representing the "state" of the dataset.
next_func: A TensorFlow function that will be called on the result of
`init_func` to produce each element, and that raises `OutOfRangeError`
to terminate iteration.
finalize_func: A TensorFlow function that will be called on the result of
`init_func` immediately before a C++ iterator over this dataset is
destroyed. The return value is ignored.
"""
super(_GeneratorDataset, self).__init__()
# These members will be initialized by `tf_init_func`.
self._state_classes = None
self._state_shapes = None
self._state_types = None
self._init_args = init_args
init_args_classes = sparse.get_classes(init_args)
init_args_shapes = nest.pack_sequence_as(
init_args, [t.get_shape() for t in nest.flatten(init_args)])
init_args_types = nest.pack_sequence_as(
init_args, [t.dtype for t in nest.flatten(init_args)])
wrapped_init_func = StructuredFunctionWrapper(
init_func,
self._transformation_name(),
input_classes=init_args_classes,
input_shapes=init_args_shapes,
input_types=init_args_types)
self._state_classes = wrapped_init_func.output_classes
self._state_shapes = wrapped_init_func.output_shapes
self._state_types = wrapped_init_func.output_types
self._init_func = wrapped_init_func.function
wrapped_next_func = StructuredFunctionWrapper(
next_func,
self._transformation_name(),
input_classes=self._state_classes,
input_shapes=self._state_shapes,
input_types=self._state_types)
self._output_classes = wrapped_next_func.output_classes
self._output_shapes = wrapped_next_func.output_shapes
self._output_types = wrapped_next_func.output_types
self._next_func = wrapped_next_func.function
wrapped_finalize_func = StructuredFunctionWrapper(
finalize_func,
self._transformation_name(),
input_classes=self._state_classes,
input_shapes=self._state_shapes,
input_types=self._state_types)
self._finalize_func = wrapped_finalize_func.function
def _as_variant_tensor(self):
return gen_dataset_ops.generator_dataset(
nest.flatten(self._init_args) + self._init_func.captured_inputs,
self._next_func.captured_inputs,
self._finalize_func.captured_inputs,
init_func=self._init_func,
next_func=self._next_func,
finalize_func=self._finalize_func,
**flat_structure(self))
@property
def output_classes(self):
return self._output_classes
@property
def output_shapes(self):
return self._output_shapes
@property
def output_types(self):
return self._output_types
def _transformation_name(self):
return "Dataset.from_generator()"
class ZipDataset(DatasetV2):
"""A `Dataset` that zips its inputs together."""
def __init__(self, datasets):
"""See `Dataset.zip()` for details."""
super(ZipDataset, self).__init__()
for ds in nest.flatten(datasets):
if not isinstance(ds, DatasetV2):
if isinstance(ds, list):
message = ("The argument to `Dataset.zip()` must be a nested "
"structure of `Dataset` objects. Nested structures do not "
"support Python lists; please use a tuple instead.")
else:
message = ("The argument to `Dataset.zip()` must be a nested "
"structure of `Dataset` objects.")
raise TypeError(message)
self._datasets = datasets
def _as_variant_tensor(self):
# pylint: disable=protected-access
return gen_dataset_ops.zip_dataset(
[ds._as_variant_tensor() for ds in nest.flatten(self._datasets)],
**flat_structure(self))
# pylint: enable=protected-access
def _inputs(self):
return nest.flatten(self._datasets)
@property
def output_classes(self):
return nest.pack_sequence_as(
self._datasets,
[ds.output_classes for ds in nest.flatten(self._datasets)])
@property
def output_shapes(self):
return nest.pack_sequence_as(
self._datasets,
[ds.output_shapes for ds in nest.flatten(self._datasets)])
@property
def output_types(self):
return nest.pack_sequence_as(
self._datasets,
[ds.output_types for ds in nest.flatten(self._datasets)])
class ConcatenateDataset(DatasetV2):
"""A `Dataset` that concatenates its input with given dataset."""
def __init__(self, input_dataset, dataset_to_concatenate):
"""See `Dataset.concatenate()` for details."""
super(ConcatenateDataset, self).__init__()
self._input_dataset = input_dataset
self._dataset_to_concatenate = dataset_to_concatenate
self._output_types = input_dataset.output_types
if self._output_types != dataset_to_concatenate.output_types:
raise TypeError(
"Two datasets to concatenate have different types %s and %s" %
(self._output_types, dataset_to_concatenate.output_types))
self._output_classes = input_dataset.output_classes
if self._output_classes != dataset_to_concatenate.output_classes:
raise TypeError(
"Two datasets to concatenate have different classes %s and %s" %
(self._output_classes, dataset_to_concatenate.output_classes))
input_shapes = self._input_dataset.output_shapes
self._output_shapes = nest.pack_sequence_as(input_shapes, [
ts1.most_specific_compatible_shape(ts2)
for (ts1, ts2) in zip(
nest.flatten(input_shapes),
nest.flatten(self._dataset_to_concatenate.output_shapes))
])
self._input_datasets = [input_dataset, dataset_to_concatenate]
def _as_variant_tensor(self):
# pylint: disable=protected-access
return gen_dataset_ops.concatenate_dataset(
self._input_dataset._as_variant_tensor(),
self._dataset_to_concatenate._as_variant_tensor(),
**flat_structure(self))
# pylint: enable=protected-access
def _inputs(self):
return [self._input_dataset, self._dataset_to_concatenate]
@property
def output_classes(self):
return self._output_classes
@property
def output_shapes(self):
return self._output_shapes
@property
def output_types(self):
return self._output_types
class RepeatDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that repeats its input several times."""
def __init__(self, input_dataset, count):
"""See `Dataset.repeat()` for details."""
super(RepeatDataset, self).__init__(input_dataset)
self._input_dataset = input_dataset
if count is None:
self._count = constant_op.constant(-1, dtype=dtypes.int64, name="count")
else:
self._count = ops.convert_to_tensor(
count, dtype=dtypes.int64, name="count")
def _as_variant_tensor(self):
return gen_dataset_ops.repeat_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
count=self._count,
**flat_structure(self))
class RangeDataset(DatasetSource):
"""A `Dataset` of a step separated range of values."""
def __init__(self, *args):
"""See `Dataset.range()` for details."""
super(RangeDataset, self).__init__()
self._parse_args(*args)
def _parse_args(self, *args):
"""Parse arguments according to the same rules as the `range()` builtin."""
if len(args) == 1:
self._start = self._build_tensor(0, "start")
self._stop = self._build_tensor(args[0], "stop")
self._step = self._build_tensor(1, "step")
elif len(args) == 2:
self._start = self._build_tensor(args[0], "start")
self._stop = self._build_tensor(args[1], "stop")
self._step = self._build_tensor(1, "step")
elif len(args) == 3:
self._start = self._build_tensor(args[0], "start")
self._stop = self._build_tensor(args[1], "stop")
self._step = self._build_tensor(args[2], "step")
else:
raise ValueError("Invalid arguments to RangeDataset: %s" % str(args))
def _build_tensor(self, int64_value, name):
return ops.convert_to_tensor(int64_value, dtype=dtypes.int64, name=name)
def _as_variant_tensor(self):
return gen_dataset_ops.range_dataset(
start=self._start,
stop=self._stop,
step=self._step,
**flat_structure(self))
@property
def output_classes(self):
return ops.Tensor
@property
def output_shapes(self):
return tensor_shape.scalar()
@property
def output_types(self):
return dtypes.int64
class CacheDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that caches elements of its input."""
def __init__(self, input_dataset, filename):
"""See `Dataset.cache()` for details."""
super(CacheDataset, self).__init__(input_dataset)
self._input_dataset = input_dataset
self._filename = ops.convert_to_tensor(
filename, dtype=dtypes.string, name="filename")
def _as_variant_tensor(self):
return gen_dataset_ops.cache_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
filename=self._filename,
**flat_structure(self))
class ShuffleDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that randomly shuffles the elements of its input."""
def __init__(self,
input_dataset,
buffer_size,
seed=None,
reshuffle_each_iteration=None):
"""Randomly shuffles the elements of this dataset.
Args:
input_dataset: The input dataset.
buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the
number of elements from this dataset from which the new
dataset will sample.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
random seed that will be used to create the distribution. See
`tf.set_random_seed` for behavior.
reshuffle_each_iteration: (Optional.) A boolean, which if true indicates
that the dataset should be pseudorandomly reshuffled each time it is
iterated over. (Defaults to `True`.)
Returns:
A `Dataset`.
Raises:
ValueError: if invalid arguments are provided.
"""
super(ShuffleDataset, self).__init__(input_dataset)
self._input_dataset = input_dataset
self._buffer_size = ops.convert_to_tensor(
buffer_size, dtype=dtypes.int64, name="buffer_size")
self._seed, self._seed2 = random_seed.get_seed(seed)
if reshuffle_each_iteration is None:
self._reshuffle_each_iteration = True
else:
self._reshuffle_each_iteration = reshuffle_each_iteration
def _as_variant_tensor(self):
return gen_dataset_ops.shuffle_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
buffer_size=self._buffer_size,
seed=self._seed,
seed2=self._seed2,
reshuffle_each_iteration=self._reshuffle_each_iteration,
**flat_structure(self))
class TakeDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` containing the first `count` elements from its input."""
def __init__(self, input_dataset, count):
"""See `Dataset.take()` for details."""
super(TakeDataset, self).__init__(input_dataset)
self._input_dataset = input_dataset
self._count = ops.convert_to_tensor(count, dtype=dtypes.int64, name="count")
def _as_variant_tensor(self):
return gen_dataset_ops.take_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
count=self._count,
**flat_structure(self))
class SkipDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` skipping the first `count` elements from its input."""
def __init__(self, input_dataset, count):
"""See `Dataset.skip()` for details."""
super(SkipDataset, self).__init__(input_dataset)
self._input_dataset = input_dataset
self._count = ops.convert_to_tensor(count, dtype=dtypes.int64, name="count")
def _as_variant_tensor(self):
return gen_dataset_ops.skip_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
count=self._count,
**flat_structure(self))
class BatchDataset(UnaryDataset):
"""A `Dataset` that batches contiguous elements from its input."""
def __init__(self, input_dataset, batch_size, drop_remainder):
"""See `Dataset.batch()` for details."""
super(BatchDataset, self).__init__(input_dataset)
self._input_dataset = input_dataset
self._batch_size = ops.convert_to_tensor(
batch_size, dtype=dtypes.int64, name="batch_size")
self._drop_remainder = ops.convert_to_tensor(
drop_remainder, dtype=dtypes.bool, name="drop_remainder")
def _as_variant_tensor(self):
# TODO(jsimsa): Switch to using v2 only any time after 6/30/2018.
if smart_cond.smart_constant_value(self._drop_remainder) is False:
return gen_dataset_ops.batch_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
batch_size=self._batch_size,
**flat_structure(self))
else:
return gen_dataset_ops.batch_dataset_v2(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
batch_size=self._batch_size,
drop_remainder=self._drop_remainder,
**flat_structure(self))
@property
def output_classes(self):
return self._input_dataset.output_classes
@property
def output_shapes(self):
input_shapes = self._input_dataset.output_shapes
return nest.pack_sequence_as(input_shapes, [
tensor_shape.vector(
tensor_util.constant_value(self._batch_size) if smart_cond.
smart_constant_value(self._drop_remainder) else None).concatenate(s)
for s in nest.flatten(self._input_dataset.output_shapes)
])
@property
def output_types(self):
return self._input_dataset.output_types
def _is_padded_shape_compatible_with(padded_shape, input_component_shape):
"""Returns `True` if `input_component_shape` can be padded to `padded_shape`.
Args:
padded_shape: A `tf.TensorShape`.
input_component_shape: A `tf.TensorShape`.
Returns:
`True` if `input_component_shape` can be padded to `padded_shape`, otherwise
`False`.
"""
if padded_shape.dims is None or input_component_shape.dims is None:
return True
if len(padded_shape.dims) != len(input_component_shape.dims):
return False
for padded_dim, input_dim in zip(
padded_shape.dims, input_component_shape.dims):
if (padded_dim.value is not None and input_dim.value is not None
and padded_dim.value < input_dim.value):
return False
return True
def _padded_shape_to_tensor(padded_shape, input_component_shape):
"""Converts `padded_shape` to a `tf.Tensor` representing that shape.
Args:
padded_shape: A shape-like object, which may be a `tf.TensorShape`, a Python
sequence, or a 1-D `tf.Tensor` of `tf.int64` elements.
input_component_shape: A `tf.TensorShape`, with which `padded_shape` must
be compatible.
Returns:
A 1-D `tf.Tensor` of `tf.int64` elements, representing `padded_shape`.
Raises:
ValueError: If `padded_shape` is not a shape or not compatible with
`input_component_shape`.
TypeError: If `padded_shape` is not convertible to a `tf.int64` tensor.
"""
try:
# Try to convert the `padded_shape` to a `tf.TensorShape`
padded_shape_as_shape = tensor_shape.as_shape(padded_shape)
# We will return the "canonical" tensor representation, which uses
# `-1` in place of `None`.
ret = ops.convert_to_tensor(
[dim if dim is not None else -1
for dim in padded_shape_as_shape.as_list()], dtype=dtypes.int64)
except (TypeError, ValueError):
# The argument was not trivially convertible to a
# `tf.TensorShape`, so fall back on the conversion to tensor
# machinery.
ret = ops.convert_to_tensor(padded_shape, preferred_dtype=dtypes.int64)
if ret.shape.dims is not None and len(ret.shape.dims) != 1:
raise ValueError(
"Padded shape %s must be a 1-D tensor of tf.int64 values, but its "
"shape was %s." % (padded_shape, ret.shape))
if ret.dtype != dtypes.int64:
raise TypeError(
"Padded shape %s must be a 1-D tensor of tf.int64 values, but its "
"element type was %s." % (padded_shape, ret.dtype.name))
padded_shape_as_shape = tensor_util.constant_value_as_shape(ret)
if not _is_padded_shape_compatible_with(padded_shape_as_shape,
input_component_shape):
raise ValueError("The padded shape %s is not compatible with the "
"corresponding input component shape %s."
% (padded_shape_as_shape, input_component_shape))
return ret
def _padding_value_to_tensor(value, output_type):
"""Converts the padding value to a tensor.
Args:
value: The padding value.
output_type: Its expected dtype.
Returns:
A scalar `Tensor`.
Raises:
ValueError: if the padding value is not a scalar.
TypeError: if the padding value's type does not match `output_type`.
"""
value = ops.convert_to_tensor(value, name="padding_value")
if not value.shape.is_compatible_with(tensor_shape.scalar()):
raise ValueError("Padding value should be a scalar, but is not: %s" % value)
if value.dtype != output_type:
raise TypeError("Padding value tensor (%s) does not match output type: %s" %
(value, output_type))
return value
def _default_padding(input_dataset):
"""Returns default padding tensors in a structure matching `input_dataset`."""
def make_zero(t):
if t.base_dtype == dtypes.string:
return ""
elif t.base_dtype == dtypes.variant:
raise TypeError("Unable to create padding for field of type 'variant'")
else:
return np.zeros_like(t.as_numpy_dtype())
return nest.map_structure(make_zero, input_dataset.output_types)
class PaddedBatchDataset(UnaryDataset):
"""A `Dataset` that batches and pads contiguous elements from its input."""
def __init__(self, input_dataset, batch_size, padded_shapes, padding_values,
drop_remainder):
"""See `Dataset.batch()` for details."""
super(PaddedBatchDataset, self).__init__(input_dataset)
if sparse.any_sparse(input_dataset.output_classes):
# TODO(b/63669786): support batching of sparse tensors
raise TypeError(
"Batching of padded sparse tensors is not currently supported")
self._input_dataset = input_dataset
self._batch_size = ops.convert_to_tensor(
batch_size, dtype=dtypes.int64, name="batch_size")
padding_values = (
padding_values
if padding_values is not None else _default_padding(input_dataset))
flat_padded_shapes = nest.flatten_up_to(input_dataset.output_shapes,
padded_shapes)
flat_padded_shapes_as_tensors = []
for input_component_shape, padded_shape in zip(
nest.flatten(input_dataset.output_shapes), flat_padded_shapes):
flat_padded_shapes_as_tensors.append(
_padded_shape_to_tensor(padded_shape, input_component_shape))
self._padded_shapes = nest.pack_sequence_as(input_dataset.output_shapes,
flat_padded_shapes_as_tensors)
self._padding_values = nest.map_structure_up_to(
input_dataset.output_shapes, _padding_value_to_tensor, padding_values,
input_dataset.output_types)
self._drop_remainder = ops.convert_to_tensor(
drop_remainder, dtype=dtypes.bool, name="drop_remainder")
def _as_variant_tensor(self):
# TODO(jsimsa): Switch to using v2 only any time after 6/30/2018.
if smart_cond.smart_constant_value(self._drop_remainder) is False:
return gen_dataset_ops.padded_batch_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
batch_size=self._batch_size,
padded_shapes=[
ops.convert_to_tensor(s, dtype=dtypes.int64)
for s in nest.flatten(self._padded_shapes)
],
padding_values=nest.flatten(self._padding_values),
output_shapes=nest.flatten(
sparse.as_dense_shapes(self.output_shapes, self.output_classes)))
else:
return gen_dataset_ops.padded_batch_dataset_v2(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
batch_size=self._batch_size,
padded_shapes=[
ops.convert_to_tensor(s, dtype=dtypes.int64)
for s in nest.flatten(self._padded_shapes)
],
padding_values=nest.flatten(self._padding_values),
drop_remainder=self._drop_remainder,
output_shapes=nest.flatten(
sparse.as_dense_shapes(self.output_shapes, self.output_classes)))
@property
def output_classes(self):
return self._input_dataset.output_classes
@property
def output_shapes(self):
def _padded_shape_to_batch_shape(s):
return tensor_shape.vector(
tensor_util.constant_value(self._batch_size) if smart_cond.
smart_constant_value(self._drop_remainder) else None).concatenate(
tensor_util.constant_value_as_shape(s))
return nest.map_structure(_padded_shape_to_batch_shape, self._padded_shapes)
@property
def output_types(self):
return self._input_dataset.output_types
def _should_unpack_args(args):
"""Returns `True` if `args` should be `*args` when passed to a callable."""
return type(args) is tuple # pylint: disable=unidiomatic-typecheck
def _warn_if_collections(transformation_name):
"""Prints warning message if the current graph uses common graph collections.
NOTE(mrry): Currently a warning is only generated for lookup tables. Any
variables created will be automatically hoisted out to the outermost scope
using `init_scope()`. Some collections (such as for control-flow contexts)
are benign and should not generate a warning.
Args:
transformation_name: A human-readable name for the transformation.
"""
if ops.get_default_graph().get_collection(ops.GraphKeys.TABLE_INITIALIZERS):
warnings.warn("Creating lookup tables inside a function passed to %s is not"
" supported. Create each table outside the function, and "
"capture it inside the function to use it."
% transformation_name)
class MapDataset(UnaryDataset):
"""A `Dataset` that maps a function over elements in its input."""
def __init__(self, input_dataset, map_func, use_inter_op_parallelism=True):
"""See `Dataset.map()` for details."""
super(MapDataset, self).__init__(input_dataset)
self._input_dataset = input_dataset
self._use_inter_op_parallelism = use_inter_op_parallelism
wrapped_func = StructuredFunctionWrapper(
map_func, self._transformation_name(), dataset=input_dataset)
self._output_classes = wrapped_func.output_classes
self._output_shapes = wrapped_func.output_shapes
self._output_types = wrapped_func.output_types
self._map_func = wrapped_func.function
def _as_variant_tensor(self):
input_t = self._input_dataset._as_variant_tensor() # pylint: disable=protected-access
return gen_dataset_ops.map_dataset(
input_t,
self._map_func.captured_inputs,
f=self._map_func,
use_inter_op_parallelism=self._use_inter_op_parallelism,
**flat_structure(self))
@property
def output_classes(self):
return self._output_classes
@property
def output_shapes(self):
return self._output_shapes
@property
def output_types(self):
return self._output_types
def _transformation_name(self):
return "Dataset.map()"
class MatchingFilesDataset(DatasetSource):
"""A `Dataset` that list the files according to the input patterns."""
def __init__(self, patterns):
super(MatchingFilesDataset, self).__init__()
self._patterns = ops.convert_to_tensor(
patterns, dtype=dtypes.string, name="patterns")
def _as_variant_tensor(self):
return gen_dataset_ops.matching_files_dataset(self._patterns)
@property
def output_classes(self):
return ops.Tensor
@property
def output_shapes(self):
return tensor_shape.scalar()
@property
def output_types(self):
return dtypes.string
class ParallelMapDataset(MapDataset):
"""A `Dataset` that maps a function over elements in its input in parallel."""
def __init__(self,
input_dataset,
map_func,
num_parallel_calls,
use_inter_op_parallelism=True):
"""See `Dataset.map()` for details."""
super(ParallelMapDataset, self).__init__(input_dataset, map_func,
use_inter_op_parallelism)
self._num_parallel_calls = ops.convert_to_tensor(
num_parallel_calls, dtype=dtypes.int32, name="num_parallel_calls")
def _as_variant_tensor(self):
input_t = self._input_dataset._as_variant_tensor() # pylint: disable=protected-access
# pylint: disable=protected-access
return gen_dataset_ops.parallel_map_dataset(
input_t,
self._map_func.captured_inputs,
f=self._map_func,
num_parallel_calls=self._num_parallel_calls,
use_inter_op_parallelism=self._use_inter_op_parallelism,
**flat_structure(self))
# pylint: enable=protected-access
class FlatMapDataset(UnaryDataset):
"""A `Dataset` that maps a function over its input and flattens the result."""
def __init__(self, input_dataset, map_func):
"""See `Dataset.flat_map()` for details."""
super(FlatMapDataset, self).__init__(input_dataset)
self._input_dataset = input_dataset
wrapped_func = StructuredFunctionWrapper(
map_func, self._transformation_name(), dataset=input_dataset)
if not isinstance(wrapped_func.output_classes, _NestedDatasetComponent):
raise TypeError("`map_func` must return a `Dataset` object.")
self._output_classes = wrapped_func.output_classes.output_classes
self._output_types = wrapped_func.output_types.output_types
self._output_shapes = wrapped_func.output_shapes.output_shapes
self._map_func = wrapped_func.function
def _as_variant_tensor(self):
return gen_dataset_ops.flat_map_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
self._map_func.captured_inputs,
f=self._map_func,
**flat_structure(self))
@property
def output_classes(self):
return self._output_classes
@property
def output_shapes(self):
return self._output_shapes
@property
def output_types(self):
return self._output_types
def _transformation_name(self):
return "Dataset.flat_map()"
class InterleaveDataset(FlatMapDataset):
"""A `Dataset` that maps a function over its input and interleaves the result.
"""
def __init__(self, input_dataset, map_func, cycle_length, block_length):
"""See `Dataset.interleave()` for details."""
super(InterleaveDataset, self).__init__(input_dataset, map_func)
self._cycle_length = ops.convert_to_tensor(
cycle_length, dtype=dtypes.int64, name="cycle_length")
self._block_length = ops.convert_to_tensor(
block_length, dtype=dtypes.int64, name="block_length")
def _as_variant_tensor(self):
return gen_dataset_ops.interleave_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
self._map_func.captured_inputs, # pylint: disable=protected-access
self._cycle_length,
self._block_length,
f=self._map_func, # pylint: disable=protected-access
**flat_structure(self))
def _transformation_name(self):
return "Dataset.interleave()"
class ParallelInterleaveDataset(FlatMapDataset):
"""A `Dataset` that maps a function over its input and interleaves the result.
"""
def __init__(self, input_dataset, map_func, cycle_length, block_length,
num_parallel_calls):
"""See `Dataset.interleave()` for details."""
super(ParallelInterleaveDataset, self).__init__(input_dataset, map_func)
self._cycle_length = ops.convert_to_tensor(
cycle_length, dtype=dtypes.int64, name="cycle_length")
self._block_length = ops.convert_to_tensor(
block_length, dtype=dtypes.int64, name="block_length")
self._num_parallel_calls = ops.convert_to_tensor(
num_parallel_calls, dtype=dtypes.int64, name="num_parallel_calls")
def _as_variant_tensor(self):
return gen_dataset_ops.parallel_interleave_dataset_v2(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
self._map_func.captured_inputs, # pylint: disable=protected-access
self._cycle_length,
self._block_length,
self._num_parallel_calls,
f=self._map_func, # pylint: disable=protected-access
**flat_structure(self))
def _transformation_name(self):
return "Dataset.interleave()"
class FilterDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that filters its input according to a predicate function."""
def __init__(self, input_dataset, predicate):
"""See `Dataset.filter()` for details."""
super(FilterDataset, self).__init__(input_dataset)
self._input_dataset = input_dataset
wrapped_func = StructuredFunctionWrapper(
predicate, self._transformation_name(), dataset=input_dataset)
if not (
wrapped_func.output_types == dtypes.bool and
wrapped_func.output_shapes.is_compatible_with(tensor_shape.scalar())):
raise ValueError("`predicate` must return a scalar boolean tensor.")
self._predicate = wrapped_func.function
def _as_variant_tensor(self):
return gen_dataset_ops.filter_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
other_arguments=self._predicate.captured_inputs,
predicate=self._predicate,
**flat_structure(self))
def _transformation_name(self):
return "Dataset.filter()"
class PrefetchDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that asynchronously prefetches its input."""
def __init__(self, input_dataset, buffer_size):
"""See `Dataset.prefetch()` for details."""
super(PrefetchDataset, self).__init__(input_dataset)
self._input_dataset = input_dataset
if buffer_size is None:
buffer_size = -1 # This is the sentinel for auto-tuning.
self._buffer_size = ops.convert_to_tensor(
buffer_size, dtype=dtypes.int64, name="buffer_size")
def _as_variant_tensor(self):
return gen_dataset_ops.prefetch_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
buffer_size=self._buffer_size,
**flat_structure(self))
class WindowDataset(UnaryDataset):
"""A dataset that creates window datasets from the input elements."""
def __init__(self, input_dataset, size, shift, stride, drop_remainder):
"""See `window_dataset()` for more details."""
super(WindowDataset, self).__init__(input_dataset)
self._input_dataset = input_dataset
self._size = ops.convert_to_tensor(size, dtype=dtypes.int64, name="size")
self._shift = ops.convert_to_tensor(shift, dtype=dtypes.int64, name="shift")
self._stride = ops.convert_to_tensor(
stride, dtype=dtypes.int64, name="stride")
self._drop_remainder = ops.convert_to_tensor(
drop_remainder, dtype=dtypes.bool, name="drop_remainder")
self._output_classes = nest.pack_sequence_as(
input_dataset.output_classes,
[
_NestedDatasetComponent( # pylint: disable=protected-access
output_classes=output_class,
output_shapes=output_shape,
output_types=output_type)
for output_class, output_shape, output_type in zip(
nest.flatten(input_dataset.output_classes),
nest.flatten(input_dataset.output_shapes),
nest.flatten(input_dataset.output_types))
])
self._output_shapes = self._output_classes
self._output_types = self._output_classes
def _as_variant_tensor(self):
return gen_dataset_ops.window_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
self._size,
self._shift,
self._stride,
self._drop_remainder,
**flat_structure(self))
@property
def output_classes(self):
return self._output_classes
@property
def output_shapes(self):
return self._output_shapes
@property
def output_types(self):
return self._output_types
class _OptionsDataset(UnaryUnchangedStructureDataset):
"""An identity `Dataset` that stores options."""
def __init__(self, input_dataset, options):
super(_OptionsDataset, self).__init__(input_dataset)
self._input_dataset = input_dataset
self._options = input_dataset.options()
if self._options:
self._options = self._options.merge(options)
else:
self._options = options
def _as_variant_tensor(self):
return self._input_dataset._as_variant_tensor() # pylint: disable=protected-access
def options(self):
return self._options
class _ModelDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that acts as an identity, and models performance."""
def __init__(self, input_dataset):
"""See `optimize()` for details."""
super(_ModelDataset, self).__init__(input_dataset)
self._input_dataset = input_dataset
def _as_variant_tensor(self):
return gen_dataset_ops.model_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
**flat_structure(self))
class _OptimizeDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that acts as an identity, and applies optimizations."""
def __init__(self, input_dataset, optimizations):
"""See `optimize()` for details."""
super(_OptimizeDataset, self).__init__(input_dataset)
self._input_dataset = input_dataset
if optimizations is None:
optimizations = []
self._optimizations = ops.convert_to_tensor(
optimizations, dtype=dtypes.string, name="optimizations")
def _as_variant_tensor(self):
return gen_dataset_ops.optimize_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
self._optimizations,
**flat_structure(self))
class _SetStatsAggregatorDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that acts as an identity, and sets stats aggregator."""
def __init__(self, input_dataset, aggregator, prefix, counter_prefix):
super(_SetStatsAggregatorDataset, self).__init__(input_dataset)
self._input_dataset = input_dataset
self._stats_aggregator = aggregator
self._prefix = prefix
self._counter_prefix = counter_prefix
def _as_variant_tensor(self):
return gen_dataset_ops.set_stats_aggregator_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
self._stats_aggregator._resource, # pylint: disable=protected-access
self._prefix,
self._counter_prefix,
**flat_structure(self))
| 37.775523
| 109
| 0.690148
|
4a116191c21392d76edf277406ec4c7d5774d028
| 600
|
py
|
Python
|
Assingments/module03/week02/monday/sulfaroa.inclass.py
|
tonysulfaro/MI-250
|
e746fd261107531e100a5cb98d6b6c90bd9234d8
|
[
"MIT"
] | null | null | null |
Assingments/module03/week02/monday/sulfaroa.inclass.py
|
tonysulfaro/MI-250
|
e746fd261107531e100a5cb98d6b6c90bd9234d8
|
[
"MIT"
] | null | null | null |
Assingments/module03/week02/monday/sulfaroa.inclass.py
|
tonysulfaro/MI-250
|
e746fd261107531e100a5cb98d6b6c90bd9234d8
|
[
"MIT"
] | null | null | null |
# use the imgur API and get the most popular cat picture
"""
sulfaroa
9/10/18
In-class activity
"""
from imgurpython import ImgurClient
import requests
client_id = '82d55c3dec952aa'
client_secret = '3e2e0865723e9a6b94100702980b58ca95a0fd25'
client = ImgurClient(client_id, client_secret)
# get images with cat
tag_collection = client.gallery_search('cat', advanced=None, sort='top', page=0)
# slice out first image
image_url = tag_collection[0].link
# get image then write it
img_data = requests.get(image_url).content
with open('cat_picture.jpg', 'wb') as handler:
handler.write(image_url)
| 24
| 80
| 0.775
|
4a11631ee3b04428f0166309f92fd9cdb264257a
| 187
|
py
|
Python
|
project_template/submodule1/example_submodule.py
|
tomaskala/python-template
|
0b4ea38425fa432092d388530224240dc9c46854
|
[
"Unlicense"
] | null | null | null |
project_template/submodule1/example_submodule.py
|
tomaskala/python-template
|
0b4ea38425fa432092d388530224240dc9c46854
|
[
"Unlicense"
] | null | null | null |
project_template/submodule1/example_submodule.py
|
tomaskala/python-template
|
0b4ea38425fa432092d388530224240dc9c46854
|
[
"Unlicense"
] | null | null | null |
from project_template.submodule1.subsubmodule.example_subsubmodule import (
example as sub_example,
)
def example():
print("Example submodule 1")
sub_example("submodule 1")
| 20.777778
| 75
| 0.754011
|
4a1163218f0de99a21635c413e2a0f025eb0746c
| 2,137
|
py
|
Python
|
tests/test_app/models.py
|
crazyscientist/django-admin-view-permission
|
e6385622e2a708f23c5041ec031e32891e7e267e
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_app/models.py
|
crazyscientist/django-admin-view-permission
|
e6385622e2a708f23c5041ec031e32891e7e267e
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_app/models.py
|
crazyscientist/django-admin-view-permission
|
e6385622e2a708f23c5041ec031e32891e7e267e
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import unicode_literals
from django.db import models
from parler.models import TranslatableModel, TranslatedFields
class TestModel0(models.Model):
var1 = models.CharField(max_length=200)
var2 = models.TextField()
var3 = models.IntegerField()
class TestModel1(models.Model):
var1 = models.CharField(max_length=200)
var2 = models.TextField()
var3 = models.IntegerField()
var4 = models.ManyToManyField(TestModel0)
var5 = models.TextField(editable=False)
@property
def var6(self):
return 'readonly_field'
class TestModel2(models.Model):
var1 = models.ForeignKey(TestModel1, on_delete=models.CASCADE)
var2 = models.CharField(max_length=200)
var3 = models.TextField()
var4 = models.IntegerField
class TestModel3(models.Model):
var1 = models.ForeignKey(TestModel1, on_delete=models.CASCADE)
var2 = models.CharField(max_length=200)
var3 = models.TextField()
var4 = models.IntegerField()
class TestModel4(models.Model):
var1 = models.ForeignKey(TestModel1, on_delete=models.CASCADE)
var2 = models.CharField(max_length=200)
var3 = models.TextField()
var4 = models.IntegerField()
# Copy of the TestModel1 to exam model with different key
class TestModel5(models.Model):
var0 = models.AutoField(primary_key=True)
var1 = models.CharField(max_length=200)
var2 = models.TextField()
var3 = models.IntegerField()
var4 = models.ManyToManyField(TestModel0)
# Copy of the TestModel4 to exam model with different key
class TestModel6(models.Model):
var0 = models.AutoField(primary_key=True)
var1 = models.ForeignKey(TestModel1, on_delete=models.CASCADE)
var2 = models.CharField(max_length=200)
var3 = models.TextField()
var4 = models.IntegerField()
class TestModelParler(TranslatableModel):
var1 = models.CharField(max_length=200)
var2 = models.TextField()
var3 = models.IntegerField()
translations = TranslatedFields(
var4=models.CharField(max_length=20),
var5=models.TextField(),
)
@property
def var6(self):
return 'readonly_field'
| 27.753247
| 66
| 0.722508
|
4a116353f68e1495d84e07ad19415a1580342eee
| 101
|
py
|
Python
|
allennlp_models/tokenizers/spacy_tokenizer.py
|
alvinwatner/allennlp-models
|
7e767f00da836c5af59e39dc74852b484b45ef46
|
[
"Apache-2.0"
] | null | null | null |
allennlp_models/tokenizers/spacy_tokenizer.py
|
alvinwatner/allennlp-models
|
7e767f00da836c5af59e39dc74852b484b45ef46
|
[
"Apache-2.0"
] | null | null | null |
allennlp_models/tokenizers/spacy_tokenizer.py
|
alvinwatner/allennlp-models
|
7e767f00da836c5af59e39dc74852b484b45ef46
|
[
"Apache-2.0"
] | null | null | null |
# from allennlp.data.tokenizers import Token, Tokenizer, SpacyTokenizer
#
# SpacyTokenizer(language=)
| 33.666667
| 71
| 0.811881
|
4a1164f1cd085474cf62d8182e7ba6eb37e9b95f
| 26,135
|
py
|
Python
|
msgraph/cli/command_modules/groups/azext_groups/vendored_sdks/groups/aio/operations/_groups_events_calendar_calendar_view_operations.py
|
microsoftgraph/msgraph-cli-archived
|
489f70bf4ede1ce67b84bfb31e66da3e4db76062
|
[
"MIT"
] | null | null | null |
msgraph/cli/command_modules/groups/azext_groups/vendored_sdks/groups/aio/operations/_groups_events_calendar_calendar_view_operations.py
|
microsoftgraph/msgraph-cli-archived
|
489f70bf4ede1ce67b84bfb31e66da3e4db76062
|
[
"MIT"
] | 22
|
2022-03-29T22:54:37.000Z
|
2022-03-29T22:55:27.000Z
|
msgraph/cli/command_modules/groups/azext_groups/vendored_sdks/groups/aio/operations/_groups_events_calendar_calendar_view_operations.py
|
microsoftgraph/msgraph-cli-archived
|
489f70bf4ede1ce67b84bfb31e66da3e4db76062
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class GroupsEventsCalendarCalendarViewOperations:
"""GroupsEventsCalendarCalendarViewOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~groups.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def accept(
self,
group_id: str,
event_id: str,
event_id1: str,
body: "models.Paths1Knpax0GroupsGroupIdEventsEventIdCalendarCalendarviewEventId1MicrosoftGraphAcceptPostRequestbodyContentApplicationJsonSchema",
**kwargs
) -> None:
"""Invoke action accept.
Invoke action accept.
:param group_id: key: id of group.
:type group_id: str
:param event_id: key: id of event.
:type event_id: str
:param event_id1: key: id of event.
:type event_id1: str
:param body: Action parameters.
:type body: ~groups.models.Paths1Knpax0GroupsGroupIdEventsEventIdCalendarCalendarviewEventId1MicrosoftGraphAcceptPostRequestbodyContentApplicationJsonSchema
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.accept.metadata['url'] # type: ignore
path_format_arguments = {
'group-id': self._serialize.url("group_id", group_id, 'str'),
'event-id': self._serialize.url("event_id", event_id, 'str'),
'event-id1': self._serialize.url("event_id1", event_id1, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'Paths1Knpax0GroupsGroupIdEventsEventIdCalendarCalendarviewEventId1MicrosoftGraphAcceptPostRequestbodyContentApplicationJsonSchema')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
accept.metadata = {'url': '/groups/{group-id}/events/{event-id}/calendar/calendarView/{event-id1}/microsoft.graph.accept'} # type: ignore
async def cancel(
self,
group_id: str,
event_id: str,
event_id1: str,
body: "models.PathsMo4WjzGroupsGroupIdEventsEventIdCalendarCalendarviewEventId1MicrosoftGraphCancelPostRequestbodyContentApplicationJsonSchema",
**kwargs
) -> None:
"""Invoke action cancel.
Invoke action cancel.
:param group_id: key: id of group.
:type group_id: str
:param event_id: key: id of event.
:type event_id: str
:param event_id1: key: id of event.
:type event_id1: str
:param body: Action parameters.
:type body: ~groups.models.PathsMo4WjzGroupsGroupIdEventsEventIdCalendarCalendarviewEventId1MicrosoftGraphCancelPostRequestbodyContentApplicationJsonSchema
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.cancel.metadata['url'] # type: ignore
path_format_arguments = {
'group-id': self._serialize.url("group_id", group_id, 'str'),
'event-id': self._serialize.url("event_id", event_id, 'str'),
'event-id1': self._serialize.url("event_id1", event_id1, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'PathsMo4WjzGroupsGroupIdEventsEventIdCalendarCalendarviewEventId1MicrosoftGraphCancelPostRequestbodyContentApplicationJsonSchema')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
cancel.metadata = {'url': '/groups/{group-id}/events/{event-id}/calendar/calendarView/{event-id1}/microsoft.graph.cancel'} # type: ignore
async def decline(
self,
group_id: str,
event_id: str,
event_id1: str,
body: "models.PathsUsenpeGroupsGroupIdEventsEventIdCalendarCalendarviewEventId1MicrosoftGraphDeclinePostRequestbodyContentApplicationJsonSchema",
**kwargs
) -> None:
"""Invoke action decline.
Invoke action decline.
:param group_id: key: id of group.
:type group_id: str
:param event_id: key: id of event.
:type event_id: str
:param event_id1: key: id of event.
:type event_id1: str
:param body: Action parameters.
:type body: ~groups.models.PathsUsenpeGroupsGroupIdEventsEventIdCalendarCalendarviewEventId1MicrosoftGraphDeclinePostRequestbodyContentApplicationJsonSchema
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.decline.metadata['url'] # type: ignore
path_format_arguments = {
'group-id': self._serialize.url("group_id", group_id, 'str'),
'event-id': self._serialize.url("event_id", event_id, 'str'),
'event-id1': self._serialize.url("event_id1", event_id1, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'PathsUsenpeGroupsGroupIdEventsEventIdCalendarCalendarviewEventId1MicrosoftGraphDeclinePostRequestbodyContentApplicationJsonSchema')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
decline.metadata = {'url': '/groups/{group-id}/events/{event-id}/calendar/calendarView/{event-id1}/microsoft.graph.decline'} # type: ignore
async def dismiss_reminder(
self,
group_id: str,
event_id: str,
event_id1: str,
**kwargs
) -> None:
"""Invoke action dismissReminder.
Invoke action dismissReminder.
:param group_id: key: id of group.
:type group_id: str
:param event_id: key: id of event.
:type event_id: str
:param event_id1: key: id of event.
:type event_id1: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.dismiss_reminder.metadata['url'] # type: ignore
path_format_arguments = {
'group-id': self._serialize.url("group_id", group_id, 'str'),
'event-id': self._serialize.url("event_id", event_id, 'str'),
'event-id1': self._serialize.url("event_id1", event_id1, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
dismiss_reminder.metadata = {'url': '/groups/{group-id}/events/{event-id}/calendar/calendarView/{event-id1}/microsoft.graph.dismissReminder'} # type: ignore
async def forward(
self,
group_id: str,
event_id: str,
event_id1: str,
body: "models.Paths1164OkqGroupsGroupIdEventsEventIdCalendarCalendarviewEventId1MicrosoftGraphForwardPostRequestbodyContentApplicationJsonSchema",
**kwargs
) -> None:
"""Invoke action forward.
Invoke action forward.
:param group_id: key: id of group.
:type group_id: str
:param event_id: key: id of event.
:type event_id: str
:param event_id1: key: id of event.
:type event_id1: str
:param body: Action parameters.
:type body: ~groups.models.Paths1164OkqGroupsGroupIdEventsEventIdCalendarCalendarviewEventId1MicrosoftGraphForwardPostRequestbodyContentApplicationJsonSchema
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.forward.metadata['url'] # type: ignore
path_format_arguments = {
'group-id': self._serialize.url("group_id", group_id, 'str'),
'event-id': self._serialize.url("event_id", event_id, 'str'),
'event-id1': self._serialize.url("event_id1", event_id1, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'Paths1164OkqGroupsGroupIdEventsEventIdCalendarCalendarviewEventId1MicrosoftGraphForwardPostRequestbodyContentApplicationJsonSchema')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
forward.metadata = {'url': '/groups/{group-id}/events/{event-id}/calendar/calendarView/{event-id1}/microsoft.graph.forward'} # type: ignore
async def snooze_reminder(
self,
group_id: str,
event_id: str,
event_id1: str,
body: "models.PathsMzzc3KGroupsGroupIdEventsEventIdCalendarCalendarviewEventId1MicrosoftGraphSnoozereminderPostRequestbodyContentApplicationJsonSchema",
**kwargs
) -> None:
"""Invoke action snoozeReminder.
Invoke action snoozeReminder.
:param group_id: key: id of group.
:type group_id: str
:param event_id: key: id of event.
:type event_id: str
:param event_id1: key: id of event.
:type event_id1: str
:param body: Action parameters.
:type body: ~groups.models.PathsMzzc3KGroupsGroupIdEventsEventIdCalendarCalendarviewEventId1MicrosoftGraphSnoozereminderPostRequestbodyContentApplicationJsonSchema
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.snooze_reminder.metadata['url'] # type: ignore
path_format_arguments = {
'group-id': self._serialize.url("group_id", group_id, 'str'),
'event-id': self._serialize.url("event_id", event_id, 'str'),
'event-id1': self._serialize.url("event_id1", event_id1, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'PathsMzzc3KGroupsGroupIdEventsEventIdCalendarCalendarviewEventId1MicrosoftGraphSnoozereminderPostRequestbodyContentApplicationJsonSchema')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
snooze_reminder.metadata = {'url': '/groups/{group-id}/events/{event-id}/calendar/calendarView/{event-id1}/microsoft.graph.snoozeReminder'} # type: ignore
async def tentatively_accept(
self,
group_id: str,
event_id: str,
event_id1: str,
body: "models.Paths1Ndx18XGroupsGroupIdEventsEventIdCalendarCalendarviewEventId1MicrosoftGraphTentativelyacceptPostRequestbodyContentApplicationJsonSchema",
**kwargs
) -> None:
"""Invoke action tentativelyAccept.
Invoke action tentativelyAccept.
:param group_id: key: id of group.
:type group_id: str
:param event_id: key: id of event.
:type event_id: str
:param event_id1: key: id of event.
:type event_id1: str
:param body: Action parameters.
:type body: ~groups.models.Paths1Ndx18XGroupsGroupIdEventsEventIdCalendarCalendarviewEventId1MicrosoftGraphTentativelyacceptPostRequestbodyContentApplicationJsonSchema
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.tentatively_accept.metadata['url'] # type: ignore
path_format_arguments = {
'group-id': self._serialize.url("group_id", group_id, 'str'),
'event-id': self._serialize.url("event_id", event_id, 'str'),
'event-id1': self._serialize.url("event_id1", event_id1, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'Paths1Ndx18XGroupsGroupIdEventsEventIdCalendarCalendarviewEventId1MicrosoftGraphTentativelyacceptPostRequestbodyContentApplicationJsonSchema')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
tentatively_accept.metadata = {'url': '/groups/{group-id}/events/{event-id}/calendar/calendarView/{event-id1}/microsoft.graph.tentativelyAccept'} # type: ignore
async def delta(
self,
group_id: str,
event_id: str,
**kwargs
) -> List["models.MicrosoftGraphEvent"]:
"""Invoke function delta.
Invoke function delta.
:param group_id: key: id of group.
:type group_id: str
:param event_id: key: id of event.
:type event_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of MicrosoftGraphEvent, or the result of cls(response)
:rtype: list[~groups.models.MicrosoftGraphEvent]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["models.MicrosoftGraphEvent"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delta.metadata['url'] # type: ignore
path_format_arguments = {
'group-id': self._serialize.url("group_id", group_id, 'str'),
'event-id': self._serialize.url("event_id", event_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[MicrosoftGraphEvent]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delta.metadata = {'url': '/groups/{group-id}/events/{event-id}/calendar/calendarView/microsoft.graph.delta()'} # type: ignore
| 46.586453
| 193
| 0.670901
|
4a116616f1dc8b47b1cfbd310e858127e6794149
| 898
|
py
|
Python
|
mythirdpjt.py
|
helloworldtang/python-spider-study
|
b65bc646e716bd3cd421aa9c395507fded7aff06
|
[
"Apache-2.0"
] | null | null | null |
mythirdpjt.py
|
helloworldtang/python-spider-study
|
b65bc646e716bd3cd421aa9c395507fded7aff06
|
[
"Apache-2.0"
] | null | null | null |
mythirdpjt.py
|
helloworldtang/python-spider-study
|
b65bc646e716bd3cd421aa9c395507fded7aff06
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = ''
__author__ = 'tangcheng'
__mtime__ = '12/12/2017'
"""
import urllib.request
import re
from db.DBUtil import persist
def getContent(url):
headers = ("User-Agent",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.91 Safari/537.36")
opener = urllib.request.build_opener()
opener.addheaders = [headers]
urllib.request.install_opener(opener)
urlopen = urllib.request.urlopen(url)
data = urlopen.read().decode("utf-8")
# print(data)
userpat = '<title>(.*?)<'
titleList = re.compile(userpat, re.S).findall(data)
print("titleList-->", titleList)
for title in titleList:
print("data:", title)
persist(url,title)
if __name__ == '__main__':
getContent("http://jd.com")
getContent("http://chaojihao.net")
| 24.944444
| 132
| 0.640312
|
4a1166498ba471b211847412d2dd18859e9d4b82
| 3,944
|
py
|
Python
|
Datasets/mat2csv.py
|
fyancy/Meta-Learning-in-Fault-Diagnosis
|
8ac907fe67984552c2ca18db760338a29d30a11c
|
[
"MIT"
] | 20
|
2021-09-22T08:31:04.000Z
|
2022-03-28T12:28:52.000Z
|
Datasets/mat2csv.py
|
fyancy/Meta-Learning-in-Fault-Diagnosis
|
8ac907fe67984552c2ca18db760338a29d30a11c
|
[
"MIT"
] | 2
|
2021-11-26T02:55:01.000Z
|
2022-03-28T04:56:55.000Z
|
Datasets/mat2csv.py
|
fyancy/Meta-Learning-in-Fault-Diagnosis
|
8ac907fe67984552c2ca18db760338a29d30a11c
|
[
"MIT"
] | 8
|
2021-10-31T19:50:57.000Z
|
2022-03-17T00:54:51.000Z
|
import csv
import numpy as np
import pandas as pd
import scipy
from scipy import io
import os
def get_filename(file_dir):
"""
:param file_dir:
"""
file_name = os.listdir(file_dir)
if len(file_name) != 1:
print('===========!!!===========!!!===========')
print('There are {} files in [{}]'.format(len(file_name), file_dir))
print(file_name)
new_file = None
exit()
else:
new_file = os.path.join(file_dir, file_name[-1])
return new_file
def add_csv(file_dir):
"""
:param file_dir:
"""
split_file = os.path.split(file_dir)
outputFile = os.path.join(split_file[0], split_file[1][:-4] + '.csv')
return outputFile
def mat2csv_sa(file_dir, name='Data', channel=4): # for SA
"""
:param channel:
:param file_dir:
:param name:
"""
mat_file = scipy.io.loadmat(file_dir)
name_list = list(mat_file.keys())
outputFile = add_csv(file_dir)
if name in name_list:
data = mat_file[name]
index = channel - 1
data = pd.DataFrame(data)[index]
data.to_csv(outputFile, header=0, index=False)
print('oooooooooooooooooooooooooooooooooooooo')
print('Transform the file to csv format at: \n', outputFile)
def mat2csv_cw(file_dir, name='DE_time'): # for CW data
"""
:param file_dir:
:param name:
"""
mat_file = scipy.io.loadmat(file_dir)
name_list = list(mat_file.keys())
print(name_list)
outputFile = add_csv(file_dir)
name_new = []
for n in name_list:
if name in n:
name_new.append(n)
if len(name_new) > 1:
print("More than 1 file named {}:\n {}".format(name, name_new))
exit()
else:
print(name_new)
data = mat_file[name_new[0]]
# print(data[:5])
index = 0
data = pd.DataFrame(data)[index]
data.to_csv(outputFile, header=0, index=False)
print('oooooooooooooooooooooooooooooooooooooo')
print('Transform the file to csv format at: \n', outputFile)
def get_data_csv(file_dir, num=100000, header=0, shift_step=200):
"""
:param shift_step:
:param num:
:param header:
:param file_dir:
"""
data = pd.read_csv(file_dir, header=header).values.reshape(-1) # DataFrame ==> array
while data.shape[0] < num:
# print("Need: {}, Got from a file [{}]: {}".format(num, file_dir, data.shape[0]))
# print('We implement the operation with Shift_step: {}\n'.format(shift_step))
header = header + shift_step
data_ = pd.read_csv(file_dir, header=header).values.reshape(-1)
data = np.concatenate((data, data_), axis=0)
data = data[:num]
# data = np.transpose(data, axes=[1, 0]).reshape(-1)
return data
if __name__ == "__main__":
# for SA data
# file_dir = r'F:\dataset\何水龙天线试验台数据\保持架故障\保持架故障\20121130保持架故障\第二次\正转\1400'
# file_ = get_filename(file_dir)
# mat2csv_sa(file_, channel=2) # 行星减速器输出轴x向
# file_ = r'F:\dataset\何水龙天线试验台数据\外圈严重故障\外圈严重故障\20121123外圈严重测点变换\第一次\正转\1500\20121123 正转1500 18.MAT'
# mat2csv_sa(file_, channel=2) # 行星减速器输出轴x向
# for CW data
# file_ = r'F:\dataset\casedata_12khz\ball\ball_021\ball021_0.mat'
# mat2csv_cw(file_, name='DE_time')
# file_ = r'F:\dataset\casedata_12khz\ball\ball_021\ball021_1.mat'
# mat2csv_cw(file_, name='DE_time')
# file_ = r'F:\dataset\casedata_12khz\ball\ball_021\ball021_2.mat'
# mat2csv_cw(file_, name='DE_time')
# file_ = r'F:\dataset\casedata_12khz\ball\ball_021\ball021_3.mat'
# mat2csv_cw(file_, name='DE_time')
# check SA
file_ = r'F:\dataset\何水龙天线试验台数据\外圈点焊故障\外圈点焊故障\20121124外圈电焊测点变换\第二次\正转\800\2012112—点焊故障— 5K—正转800 3.csv'
data = get_data_csv(file_)
print(data[:5])
# check CW
n = 1024 * 200
file_ = r'F:\dataset\casedata_12khz\ball\ball_021\ball021_3.csv'
data = get_data_csv(file_, num=n)
print(data.shape)
print(data[:5])
| 29.878788
| 107
| 0.629817
|
4a1166ee51226b95a296db3864794065fc084d65
| 1,803
|
py
|
Python
|
week_11_DS_N_Algorithm/02_Data_Structure/03_DS_LinkedList_Q_Stack/실습2_연결 리스트에서 노드 삭제하기.py
|
bky373/elice-racer-1st
|
ddea8079a1083796ed4f59c38650ff8f4333e6ef
|
[
"FSFAP"
] | 1
|
2021-11-03T18:27:37.000Z
|
2021-11-03T18:27:37.000Z
|
week_11_DS_N_Algorithm/02_Data_Structure/03_DS_LinkedList_Q_Stack/실습2_연결 리스트에서 노드 삭제하기.py
|
bky373/elice-racer-1st
|
ddea8079a1083796ed4f59c38650ff8f4333e6ef
|
[
"FSFAP"
] | null | null | null |
week_11_DS_N_Algorithm/02_Data_Structure/03_DS_LinkedList_Q_Stack/실습2_연결 리스트에서 노드 삭제하기.py
|
bky373/elice-racer-1st
|
ddea8079a1083796ed4f59c38650ff8f4333e6ef
|
[
"FSFAP"
] | 1
|
2021-02-10T15:21:53.000Z
|
2021-02-10T15:21:53.000Z
|
# 연결 리스트의 노드. 단일 연결 리스트의 경우입니다.
class Node:
def __init__(self, val):
self.val = val
self.next = None
def __str__(self):
return str(self.val)
# 연결 리스트 클래스. head 와 tail을 가지고 있으며, 가장 뒤에 새로운 노드를 추가하는 addToEnd 함수가 있습니다.
class LinkedList:
def __init__(self, head):
self.head = head
self.tail = head
def addToEnd(self, node):
self.tail.next = node
self.tail = node
def __str__(self):
node = self.head
toPrint = []
while node:
toPrint.append(str(node.val))
node = node.next
return "->".join(toPrint)
# 주어진 배열을 linkedlist로 변환해서 돌려줍니다. 실습 3-1을 참조하세요
def toLinkedList(lst):
ll = LinkedList(Node(lst[0]))
for i in range(1, len(lst)):
ll.addToEnd(Node(lst[i]))
return ll
####################################################################################################################################
def deleteNode(ll, valToDelete):
# 노드가 아예 없을 때
if ll.head is None:
return None
# 처음 노드를 삭제할 때
if ll.head.val == valToDelete:
ll.head = ll.head.next
return None
# 중간 또는 마지막 노드를 삭제할 때
now = ll.head
next_node = now.next
while next_node:
if next_node.val == valToDelete:
now.next = next_node.next
# 마지막 노드를 삭제할 때
if next_node == ll.tail:
ll.tail = now
break
now = next_node
next_node = now.next
return None
def main():
nums = [2,8,19,37,4,5]
ll = toLinkedList(nums)
print(ll)
deleteNode(ll, 5)
print(ll) # 19를 삭제하였으므로, 2->8->37->4->5
deleteNode(ll, 3)
print(ll) # 3이 없으므로, 2->8->37->4->5
if __name__ == "__main__":
main()
| 24.04
| 132
| 0.49695
|
4a11670c8314150cff3688cb0347e0e67441bcb7
| 3,868
|
py
|
Python
|
scripts/val_pseudo.py
|
IssamLaradji/affinity_lcfcn
|
2c38500c83dc9c063ea2c910aadc94f14a18f3a5
|
[
"Apache-2.0"
] | 15
|
2020-09-30T14:28:29.000Z
|
2022-03-16T11:53:15.000Z
|
scripts/val_pseudo.py
|
IssamLaradji/affinity_lcfcn
|
2c38500c83dc9c063ea2c910aadc94f14a18f3a5
|
[
"Apache-2.0"
] | 2
|
2020-11-25T00:57:55.000Z
|
2021-06-16T20:07:08.000Z
|
scripts/val_pseudo.py
|
IssamLaradji/affinity_lcfcn
|
2c38500c83dc9c063ea2c910aadc94f14a18f3a5
|
[
"Apache-2.0"
] | 2
|
2020-11-06T22:38:56.000Z
|
2020-12-02T04:45:05.000Z
|
import sys, os
path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0, path)
from haven import haven_chk as hc
from haven import haven_results as hr
from haven import haven_utils as hu
import torch
import torchvision
import tqdm
import pandas as pd
import pprint
import itertools
import os
import pylab as plt
import time
import numpy as np
from src import models
from src import datasets
from src import utils as ut
import argparse
from torch.utils.data import sampler
from torch.utils.data.sampler import RandomSampler
from torch.backends import cudnn
from torch.nn import functional as F
from torch.utils.data import DataLoader
cudnn.benchmark = True
if __name__ == "__main__":
savedir_base = '/mnt/public/results/toolkit/weak_supervision'
# hash_list = []
# # lcfcn loss with affinity+shared+pseudo mask on seg dataset
# hash_list += ['37bc7b4aa55e77592f10d60d2a9ebdc3']
# # lcfcn loss with affinity+shared on seg dataset
# hash_list += ['66ffec29b63f1ade0e7c79b23997d0b3']
hash_list = ['a55d2c5dda331b1a0e191b104406dd1c',
'13b0f4e395b6dc5368f7965c20e75612',
'fcc1acac9ff5c2fa776d65ac76c3892b']
# # lcfcn loss wiith shared on localization dataset
# hash_list += ['a55d2c5dda331b1a0e191b104406dd1c']
# # use this one a55d2c5dda331b1a0e191b104406dd1c for all the lcfcn results
# hash_list += ['a55d2c5dda331b1a0e191b104406dd1c']
# # lcfcn loss wiith affinity+shared on segmentation dataset
# hash_list += ['9c7533a7c61f72919b9afd749dbb88e1']
# lcfcn loss with_affinity=True
# hash_id = '84ced18cf5c1fb3ad5820cc1b55a38fa'
# point level
# hash_id = 'd7040c9534b08e765f48c6cb034b26b2'
# LCFCN
# hash_id = 'bcba046296675e9e3af5cd9f353d217b'
for hash_id in hash_list:
exp_dict = hu.load_json(os.path.join(savedir_base, hash_id, 'exp_dict.json'))
fname = '.tmp/train_dict_%s.pkl' % hash_id
datadir = '/mnt/public/datasets/DeepFish/'
if os.path.exists(fname) and 0:
train_dict = hu.load_pkl(fname)
else:
split = 'train'
exp_dict['model']['count_mode'] = 0
train_set = datasets.get_dataset(dataset_dict=exp_dict["dataset"],
split=split,
datadir=datadir,
exp_dict=exp_dict,
dataset_size=exp_dict['dataset_size'])
train_loader = DataLoader(train_set,
# sampler=val_sampler,
batch_size=1,
collate_fn=ut.collate_fn,
num_workers=0)
# Model
# ==================
model = models.get_model(model_dict=exp_dict['model'],
exp_dict=exp_dict,
train_set=train_set).cuda()
model_path = os.path.join(savedir_base, hash_id, 'model_best.pth')
# load best model
model.load_state_dict(hu.torch_load(model_path))
train_dict = model.val_on_loader(train_loader)
hu.save_pkl(fname, train_dict)
print('results for hash: %s' % hash_id)
pprint.pprint(train_dict)
# loop over the val_loader and saves image
# for i, batch in enumerate(train_loader):
# image_name = batch['meta'][0]['name']
# savedir_image = os.path.join('/mnt/public/predictions', "pseudo_masks", hash_id, split, "%s.png" % (image_name))
# img_pred = model.predict_on_batch(batch)
# hu.save_image(savedir_image, img_pred)
# print('saved: %d/%d' %(i, len(train_loader)))
| 35.486239
| 126
| 0.613237
|
4a1167176648517b5437e89201bdbf56157a2c8e
| 789
|
py
|
Python
|
setup.py
|
ezeportela/pydoge-ds
|
842bb5d4e242565fde484965da23eae91b38973f
|
[
"MIT"
] | null | null | null |
setup.py
|
ezeportela/pydoge-ds
|
842bb5d4e242565fde484965da23eae91b38973f
|
[
"MIT"
] | null | null | null |
setup.py
|
ezeportela/pydoge-ds
|
842bb5d4e242565fde484965da23eae91b38973f
|
[
"MIT"
] | null | null | null |
import setuptools
__version__ = '0.1.1'
with open('README.md', 'r') as fh:
long_description = fh.read()
with open('requirements.txt', 'r') as f:
install_requires = f.read().splitlines()
setuptools.setup(
name='PyDoge-ds',
version=__version__,
author='Ezequiel Portela',
author_email='eportelab@gmail.com',
description='A driver library',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/ezeportela/pydoge-ds',
packages=setuptools.find_packages(),
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
install_requires=install_requires,
python_requires='>=3.6'
)
| 27.206897
| 50
| 0.674271
|
4a11681d8927ead4525bb5251c024b3abe0dd90d
| 1,090
|
py
|
Python
|
pyghelpers_test/Dodger/Main_Dodger.py
|
IrvKalb/pyghelpers
|
0c1f11f8f779fe3b1a249ac6704fd4689a45fc4f
|
[
"BSD-2-Clause"
] | 38
|
2021-11-16T03:04:42.000Z
|
2022-03-27T05:57:50.000Z
|
pyghelpers_test/Dodger/Main_Dodger.py
|
IrvKalb/pyghelpers
|
0c1f11f8f779fe3b1a249ac6704fd4689a45fc4f
|
[
"BSD-2-Clause"
] | null | null | null |
pyghelpers_test/Dodger/Main_Dodger.py
|
IrvKalb/pyghelpers
|
0c1f11f8f779fe3b1a249ac6704fd4689a45fc4f
|
[
"BSD-2-Clause"
] | 22
|
2021-11-11T15:57:58.000Z
|
2022-03-18T12:58:07.000Z
|
# Dodger main program
#
# Instantiates 3 scenes, creates and starts the scene manager
#
# Original version by Al Sweigart from his book "Invent With Python"
# (concept, graphics, and sounds used by permission from Al Sweigart)
# 1 - Import packages
import os
# The next line is here just in case you are running from the command line
os.chdir(os.path.dirname(os.path.abspath(__file__)))
import pygame
import pyghelpers
from SceneSplash import *
from ScenePlay import *
from SceneHighScores import *
# 2 - Define constants
FRAMES_PER_SECOND = 40
# 3 - Initialize the world
pygame.init()
window = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))
# 4 - Load assets: image(s), sounds, etc.
# 5 - Initialize variables
# Instantiate all scenes and store them in a list
scenesList = [SceneSplash(window),
SceneHighScores(window),
ScenePlay(window)]
# Create the scene manager, passing in the scenes list and the FPS
oSceneMgr = pyghelpers.SceneMgr(scenesList, FRAMES_PER_SECOND)
# Tell the Scene Manager to start running
oSceneMgr.run()
| 27.948718
| 74
| 0.738532
|
4a1168bfa013c83977d4d4d5af1433d8eab87ad1
| 30,752
|
py
|
Python
|
dragonchain/lib/authorization_utest.py
|
deanshelton913/dragonchain
|
36f93192b6679b703edbf643f6913292df9be953
|
[
"Apache-2.0"
] | null | null | null |
dragonchain/lib/authorization_utest.py
|
deanshelton913/dragonchain
|
36f93192b6679b703edbf643f6913292df9be953
|
[
"Apache-2.0"
] | null | null | null |
dragonchain/lib/authorization_utest.py
|
deanshelton913/dragonchain
|
36f93192b6679b703edbf643f6913292df9be953
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Dragonchain, Inc.
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
# You may obtain a copy of the Apache License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
import json
import datetime
import unittest
from unittest.mock import patch, MagicMock
from dragonchain import test_env # noqa: F401
from dragonchain.lib import authorization
from dragonchain import exceptions
class TestAuthorization(unittest.TestCase):
def assertRaisesWithMessage(self, exception, msg, func, *args, **kwargs): # noqa N802
"""
Helper to assert a particular exception with a certain message is raised via our UserException
"""
try:
func(*args, **kwargs)
self.assertFail()
except exception as e:
self.assertEqual(str(e), msg)
def test_datetime(self):
self.assertIsInstance(authorization.get_now_datetime(), datetime.datetime)
def test_gen_auth_key(self):
auth_key = authorization.gen_auth_key()
self.assertRegex(auth_key, r"[a-zA-Z0-9]{43}")
def test_gen_auth_key_id(self):
auth_key_id = authorization.gen_auth_key_id()
self.assertRegex(auth_key_id, r"[A-Z]{12}")
auth_key_id = authorization.gen_auth_key_id(True)
self.assertRegex(auth_key_id, r"SC_[A-Z]{12}")
def test_get_hmac_string(self):
http_verb = "TEST"
full_path = "/somepath"
dcid = "test_dcid"
timestamp = "timestamp_str"
content_type = "mimetype"
content = b"some content"
hmac_hash_type = "SHA256"
hash_type = authorization.get_supported_hmac_hash(hmac_hash_type)
hmac_str = authorization.get_hmac_message_string(http_verb, full_path, dcid, timestamp, content_type, content, hash_type)
self.assertEqual(hmac_str, "TEST\n/somepath\ntest_dcid\ntimestamp_str\nmimetype\nKQ9JPET11j0Gs3TQpavSkvrji5LKsvrl7+/hsOk0f1Y=")
@patch("dragonchain.lib.authorization.get_hmac_message_string", return_value="hmac_string")
def test_get_authorization(self, mock_hmac_string):
self.assertEqual(
authorization.get_authorization("id", "key", "TEST", "/path", "dcid", "timestamp", "mimetype", b"content", "SHA256"),
"DC1-HMAC-SHA256 id:G0ufeozs9/jOZCvIAkEfWhwCxx0NBDrvapnqdqShxWA=",
)
@patch("dragonchain.lib.authorization.storage.get_json_from_object", return_value={"key": "thing"})
def test_get_auth_key(self, mock_storage):
self.assertEqual(authorization.get_auth_key("test", False), "thing")
mock_storage.assert_called_with("KEYS/test")
@patch("dragonchain.lib.authorization.storage.get_json_from_object", return_value={"key": "thing"})
def test_get_auth_key_interchain(self, mock_storage):
self.assertEqual(authorization.get_auth_key("test", True), "thing")
mock_storage.assert_called_with("KEYS/INTERCHAIN/test")
@patch("dragonchain.lib.authorization.storage.get_json_from_object", side_effect=exceptions.NotFound)
def test_get_auth_key_returns_none_on_not_found(self, mock_storage):
self.assertIsNone(authorization.get_auth_key("test", False))
@patch("dragonchain.lib.authorization.storage.get_json_from_object", return_value=None)
def test_get_auth_key_returns_none_on_empty_storage_get(self, mock_storage):
self.assertIsNone(authorization.get_auth_key("test", False))
@patch("dragonchain.lib.authorization.storage.delete", return_value=True)
def test_remove_auth_key(self, mock_storage):
self.assertTrue(authorization.remove_auth_key("test"))
mock_storage.assert_called_with("KEYS/test")
@patch("dragonchain.lib.authorization.storage.delete", return_value=True)
def test_remove_auth_key_interchain(self, mock_storage):
self.assertTrue(authorization.remove_auth_key("test", True))
mock_storage.assert_called_with("KEYS/INTERCHAIN/test")
@patch("dragonchain.lib.authorization.storage.delete", return_value=True)
def test_remove_auth_key_returns_false_on_error(self, mock_storage):
mock_storage.side_effect = RuntimeError
self.assertFalse(authorization.remove_auth_key("test"))
@patch("dragonchain.lib.authorization.gen_auth_key", return_value="test_key")
@patch("dragonchain.lib.authorization.gen_auth_key_id", return_value="test_key_id")
@patch("dragonchain.lib.authorization.storage.put_object_as_json")
@patch("dragonchain.lib.authorization.get_auth_key", return_value=False)
def test_register_new_auth_key_with_valid_data(self, mock_get_auth_key, mock_storage, mock_gen_key_id, mock_gen_key):
self.assertRaises(ValueError, authorization.register_new_auth_key, False, None, "id")
result = authorization.register_new_auth_key()
mock_storage.assert_called_with("KEYS/test_key_id", result)
self.assertEqual(result["key"], "test_key")
self.assertEqual(result["id"], "test_key_id")
@patch("dragonchain.lib.authorization.storage.put_object_as_json")
def test_register_new_auth_key_supplying_both_key_and_id(self, mock_storage):
result = authorization.register_new_auth_key(auth_key="test", auth_key_id="yes")
mock_storage.assert_called_with("KEYS/yes", result)
self.assertEqual(result["key"], "test")
self.assertEqual(result["id"], "yes")
@patch("dragonchain.lib.authorization.storage.put_object_as_json")
def test_register_new_interchain_key_returns_true_on_success(self, mock_storage):
self.assertTrue(authorization.save_interchain_auth_key("test", "key"))
mock_storage.assert_called_once()
@patch("dragonchain.lib.authorization.storage.put_object_as_json", side_effect=Exception)
def test_register_new_interchain_key_returns_false_on_error(self, mock_storage):
self.assertFalse(authorization.save_interchain_auth_key("test", "key"))
@patch("dragonchain.lib.authorization.keys.get_public_id", return_value="test_dcid")
@patch("dragonchain.lib.authorization.keys.get_my_keys", return_value=MagicMock(make_signature=MagicMock(return_value="sig")))
@patch("dragonchain.lib.authorization.save_interchain_auth_key", return_value=True)
@patch("dragonchain.lib.authorization.requests.post", return_value=MagicMock(status_code=201))
@patch("dragonchain.lib.authorization.gen_auth_key", return_value="key")
@patch("dragonchain.lib.authorization.matchmaking.get_dragonchain_address", return_value="https://someurl")
def test_register_interchain_key_with_remote_returns_valid(self, mock_get_address, mock_gen_auth, mock_post, mock_save, mock_keys, mock_dcid):
remote_dcid = "remote"
url = "https://someurl/v1/interchain-auth-register"
expected_key = {"dcid": "test_dcid", "key": "key", "signature": "sig"}
self.assertEqual(authorization.register_new_interchain_key_with_remote(remote_dcid), "key")
mock_post.assert_called_with(url, json=expected_key, timeout=30)
@patch("dragonchain.lib.keys.get_public_id", return_value="z7S3WADvnjCyFkUmL48cPGqrSHDrQghNxLFMwBEwwtMa")
@patch("dragonchain.lib.authorization.keys.get_my_keys")
@patch("dragonchain.lib.authorization.save_interchain_auth_key", return_value=True)
@patch("dragonchain.lib.authorization.requests.post", return_value=MagicMock(status_code=100))
@patch("dragonchain.lib.authorization.gen_auth_key", return_value="key")
@patch("dragonchain.lib.authorization.matchmaking.get_dragonchain_address", return_value="https://someurl")
def test_register_interchain_key_raises_with_bad_status_code(self, mock_get_address, mock_gen_auth, mock_post, mock_save, mock_keys, mock_get_id):
self.assertRaises(RuntimeError, authorization.register_new_interchain_key_with_remote, "thing")
@patch("dragonchain.lib.keys.get_public_id", return_value="z7S3WADvnjCyFkUmL48cPGqrSHDrQghNxLFMwBEwwtMa")
@patch("dragonchain.lib.authorization.keys.get_my_keys")
@patch("dragonchain.lib.authorization.save_interchain_auth_key", return_value=False)
@patch("dragonchain.lib.authorization.requests.post", return_value=MagicMock(status_code=201))
@patch("dragonchain.lib.authorization.gen_auth_key", return_value="key")
@patch("dragonchain.lib.authorization.matchmaking.get_dragonchain_address", return_value="https://someurl")
def test_register_interchain_key_raises_with_failure_to_register_interchain_key(
self, mock_get_address, mock_gen_auth, mock_post, mock_save, mock_keys, mock_get_id
):
self.assertRaises(RuntimeError, authorization.register_new_interchain_key_with_remote, "thing")
@patch("dragonchain.lib.authorization.keys.get_public_id", return_value="test_dcid")
@patch("dragonchain.lib.authorization.register_new_interchain_key_with_remote", return_value="key")
@patch("dragonchain.lib.authorization.get_now_datetime", return_value=MagicMock(isoformat=MagicMock(return_value="timestamp")))
@patch("dragonchain.lib.authorization.get_auth_key", return_value=None)
def test_gen_interchain_request_dcid(self, mock_get_auth_key, date_mock, mock_register, mock_dcid):
dcid = "adcid"
full_path = "/path"
json_content = {"thing": "test"}
json_str = json.dumps(json_content, separators=(",", ":")).encode("utf-8")
expected_headers = {
"Content-Type": "application/json",
"timestamp": "timestampZ",
"dragonchain": dcid,
"Authorization": "DC1-HMAC-SHA256 test_dcid:1oJseWBqbZokioWGWjb2jq1iq493MkgUyc3FkQND5XM=",
}
# Test valid SHA256
headers, content = authorization.generate_authenticated_request("POST", dcid, full_path, json_content, "SHA256")
self.assertEqual(content, json_str)
self.assertDictEqual(headers, expected_headers)
# Test valid BLAKE2b512
headers, content = authorization.generate_authenticated_request("POST", dcid, full_path, json_content, "BLAKE2b512")
expected_headers[
"Authorization"
] = "DC1-HMAC-BLAKE2b512 test_dcid:JJiXbVuTjJ03/hNW8fZipw5DUiktO2lJSyml824eWS++mmilth7/BABgDYPvprAa99PHzFzYPA41iL45bI4p1w=="
self.assertEqual(content, json_str)
self.assertDictEqual(headers, expected_headers)
# Test valid SHA3-256
headers, content = authorization.generate_authenticated_request("POST", dcid, full_path, json_content, "SHA3-256")
expected_headers["Authorization"] = "DC1-HMAC-SHA3-256 test_dcid:ANsT9nToNzhWbxtoank/oLMDZoish5tFVuhAMzF/obo="
self.assertEqual(content, json_str)
self.assertDictEqual(headers, expected_headers)
@patch("dragonchain.lib.authorization.keys.get_public_id", return_value="test_dcid")
@patch("dragonchain.lib.authorization.get_matchmaking_key", return_value=None)
@patch("dragonchain.lib.authorization.register_new_key_with_matchmaking", return_value="key")
@patch("dragonchain.lib.authorization.get_now_datetime", return_value=MagicMock(isoformat=MagicMock(return_value="timestamp")))
def test_gen_interchain_request_matchmaking(self, date_mock, mock_register, mock_get, mock_dcid):
full_path = "/path"
json_content = {"thing": "test"}
json_str = json.dumps(json_content, separators=(",", ":")).encode("utf-8")
expected_headers = {
"Content-Type": "application/json",
"timestamp": "timestampZ",
"Authorization": "DC1-HMAC-SHA256 test_dcid:ab+hEQC0NNJB7mHwpqsfQqEcOyolNOmDEQe9gvUZTYI=",
}
headers, content = authorization.generate_authenticated_request("POST", "matchmaking", full_path, json_content, "SHA256")
self.assertEqual(content, json_str)
self.assertDictEqual(headers, expected_headers)
@patch("dragonchain.lib.authorization.redis.get_sync", return_value=True)
def test_sig_replay_returns_true_with_existing_replay(self, mock_get_sync):
self.assertTrue(authorization.signature_is_replay("thing"))
@patch("dragonchain.lib.authorization.redis.get_sync", return_value=False)
@patch("dragonchain.lib.authorization.redis.set_sync")
def test_sig_replay_returns_false_when_valid(self, mock_set, mock_get):
self.assertFalse(authorization.signature_is_replay("thing"))
mock_set.assert_called_once()
@patch("dragonchain.lib.authorization.RATE_LIMIT", 0)
@patch("dragonchain.lib.authorization.keys.get_public_id", return_value="test_dcid")
@patch("dragonchain.lib.authorization.signature_is_replay", return_value=False)
@patch("dragonchain.lib.authorization.get_now_datetime", return_value=datetime.datetime(2018, 11, 14, 9, 5, 25, 128176))
@patch("dragonchain.lib.authorization.get_auth_key", return_value="key")
def test_verify_req_auth_raises_with_wrong_dc_id(self, mock_get_auth_key, mock_date, mock_is_replay, mock_dcid):
auth_str = "DC1-HMAC-SHA256 id:gr1FvIvTe1oOmFZqHgRQUhi6s/EyBvZmJWqH1oWV+UQ="
http_verb = "GET"
full_path = "/path"
timestamp = "2018-11-14T09:05:25.128176Z"
self.assertRaisesWithMessage(
exceptions.UnauthorizedException,
"Incorrect Dragonchain ID",
authorization.verify_request_authorization,
auth_str,
http_verb,
full_path,
"not_matching",
timestamp,
"",
b"",
False,
False,
)
@patch("dragonchain.lib.authorization.RATE_LIMIT", 0)
@patch("dragonchain.lib.authorization.keys.get_public_id", return_value="test_dcid")
@patch("dragonchain.lib.authorization.signature_is_replay", return_value=False)
@patch("dragonchain.lib.authorization.get_now_datetime", return_value=datetime.datetime(2018, 11, 14, 9, 5, 25, 128176))
@patch("dragonchain.lib.authorization.get_auth_key", return_value="key")
def test_verify_req_auth_raises_with_unsupported_auth_version(self, mock_get_auth_key, mock_date, mock_is_replay, mock_dcid):
http_verb = "GET"
full_path = "/path"
dcid = "test_dcid"
timestamp = "2018-11-14T09:05:25.128176Z"
self.assertRaisesWithMessage(
exceptions.UnauthorizedException,
"Unsupported DC Authorization Version",
authorization.verify_request_authorization,
"DC9-HMAC",
http_verb,
full_path,
dcid,
timestamp,
"",
b"",
False,
False,
)
@patch("dragonchain.lib.authorization.RATE_LIMIT", 0)
@patch("dragonchain.lib.authorization.keys.get_public_id", return_value="test_dcid")
@patch("dragonchain.lib.authorization.signature_is_replay", return_value=False)
@patch("dragonchain.lib.authorization.get_now_datetime", return_value=datetime.datetime(2018, 11, 14, 9, 5, 25, 128176))
@patch("dragonchain.lib.authorization.get_auth_key", return_value="key")
def test_verify_req_auth_raises_with_unsupported_hmac_hash(self, mock_get_auth_key, mock_date, mock_is_replay, mock_dcid):
http_verb = "GET"
full_path = "/path"
dcid = "test_dcid"
timestamp = "2018-11-14T09:05:25.128176Z"
self.assertRaisesWithMessage(
exceptions.UnauthorizedException,
"Unsupported HMAC Hash Type",
authorization.verify_request_authorization,
"DC1-HMAC-INVALID thing",
http_verb,
full_path,
dcid,
timestamp,
"",
b"",
False,
False,
)
@patch("dragonchain.lib.authorization.RATE_LIMIT", 0)
@patch("dragonchain.lib.authorization.keys.get_public_id", return_value="test_dcid")
@patch("dragonchain.lib.authorization.signature_is_replay", return_value=False)
@patch("dragonchain.lib.authorization.get_now_datetime", return_value=datetime.datetime(2018, 11, 14, 9, 5, 25, 128176))
@patch("dragonchain.lib.authorization.get_auth_key", return_value="key")
def test_verify_req_auth_raises_with_old_timestamp(self, mock_get_auth_key, mock_date, mock_is_replay, mock_dcid):
auth_str = "DC1-HMAC-SHA256 id:gr1FvIvTe1oOmFZqHgRQUhi6s/EyBvZmJWqH1oWV+UQ="
http_verb = "GET"
full_path = "/path"
dcid = "test_dcid"
self.assertRaisesWithMessage(
exceptions.UnauthorizedException,
"Timestamp of request too skewed",
authorization.verify_request_authorization,
auth_str,
http_verb,
full_path,
dcid,
"2019-11-14T09:05:25Z",
"",
b"",
False,
False,
)
@patch("dragonchain.lib.authorization.RATE_LIMIT", 0)
@patch("dragonchain.lib.authorization.keys.get_public_id", return_value="test_dcid")
@patch("dragonchain.lib.authorization.signature_is_replay", return_value=False)
@patch("dragonchain.lib.authorization.get_now_datetime", return_value=datetime.datetime(2018, 11, 14, 9, 5, 25, 128176))
@patch("dragonchain.lib.authorization.get_auth_key", return_value="key")
def test_verify_req_auth_raises_with_malformed_authorization(self, mock_get_auth_key, mock_date, mock_is_replay, mock_dcid):
http_verb = "GET"
full_path = "/path"
dcid = "test_dcid"
timestamp = "2018-11-14T09:05:25.128176Z"
self.assertRaisesWithMessage(
exceptions.UnauthorizedException,
"Malformed Authorization Header",
authorization.verify_request_authorization,
"DC1-HMAC-SHA256 thing",
http_verb,
full_path,
dcid,
timestamp,
"",
b"",
False,
False,
)
self.assertRaisesWithMessage(
exceptions.UnauthorizedException,
"Malformed Authorization Header",
authorization.verify_request_authorization,
"bad_auth",
http_verb,
full_path,
dcid,
timestamp,
"",
b"",
False,
False,
)
@patch("dragonchain.lib.authorization.RATE_LIMIT", 0)
@patch("dragonchain.lib.authorization.keys.get_public_id", return_value="test_dcid")
@patch("dragonchain.lib.authorization.signature_is_replay", return_value=False)
@patch("dragonchain.lib.authorization.get_now_datetime", return_value=datetime.datetime(2018, 11, 14, 9, 5, 25, 128176))
@patch("dragonchain.lib.authorization.get_auth_key", return_value="key")
def test_verify_req_auth_raises_with_invalid_hmac(self, mock_get_auth_key, mock_date, mock_is_replay, mock_dcid):
http_verb = "GET"
full_path = "/path"
dcid = "test_dcid"
timestamp = "2018-11-14T09:05:25.128176Z"
self.assertRaisesWithMessage(
exceptions.UnauthorizedException,
"Invalid HMAC Authentication",
authorization.verify_request_authorization,
"DC1-HMAC-SHA256 id:badsignaturemFZqHgRQUhi6s/EyBvZmJWqH1oWV+UQ=",
http_verb,
full_path,
dcid,
timestamp,
"",
b"",
False,
False,
)
@patch("dragonchain.lib.authorization.RATE_LIMIT", 0)
@patch("dragonchain.lib.authorization.keys.get_public_id", return_value="test_dcid")
@patch("dragonchain.lib.authorization.signature_is_replay", return_value=False)
@patch("dragonchain.lib.authorization.get_now_datetime", return_value=datetime.datetime(2018, 11, 14, 9, 5, 25, 128176))
@patch("dragonchain.lib.authorization.get_auth_key", return_value="key")
def test_verify_req_auth_passes_when_valid(self, mock_get_auth_key, mock_date, mock_is_replay, mock_dcid):
auth_str = "DC1-HMAC-SHA256 id:gr1FvIvTe1oOmFZqHgRQUhi6s/EyBvZmJWqH1oWV+UQ="
http_verb = "GET"
full_path = "/path"
dcid = "test_dcid"
timestamp = "2018-11-14T09:05:25.128176Z"
# Test valid SHA256
authorization.verify_request_authorization(auth_str, http_verb, full_path, dcid, timestamp, "", b"", False, False)
# Test valid BLAKE2b512
authorization.verify_request_authorization(
"DC1-HMAC-BLAKE2b512 id:x1PrKtbs51CR1X6/NTIxyjwOPmZF3rxIXdtJARDialRV+H3FbmUxLmqDuCQvPKEOLN9rNUFhsZa3QZVf8+kXkA==",
http_verb,
full_path,
dcid,
timestamp,
"",
b"",
False,
False,
)
# Test valid SHA3-256
authorization.verify_request_authorization(
"DC1-HMAC-SHA3-256 id:IjPhj3dzTyj0VhcI5oUl5vcFapX8/GpJaO5M82SD3dE=", http_verb, full_path, dcid, timestamp, "", b"", False, False
)
@patch("dragonchain.lib.authorization.RATE_LIMIT", 0)
@patch("dragonchain.lib.authorization.keys.get_public_id", return_value="test_dcid")
@patch("dragonchain.lib.authorization.signature_is_replay", return_value=True)
@patch("dragonchain.lib.authorization.get_now_datetime", return_value=datetime.datetime(2018, 11, 14, 9, 5, 25, 128176))
@patch("dragonchain.lib.authorization.get_auth_key", return_value="key")
def test_verify_req_auth_raises_on_replay(self, mock_get_auth_key, mock_date, mock_is_replay, mock_dcid):
auth_str = "DC1-HMAC-SHA256 id:gr1FvIvTe1oOmFZqHgRQUhi6s/EyBvZmJWqH1oWV+UQ="
http_verb = "GET"
full_path = "/path"
dcid = "test_dcid"
timestamp = "2018-11-14T09:05:25.128176Z"
self.assertRaisesWithMessage(
exceptions.UnauthorizedException,
"Previous matching request found (no replays allowed)",
authorization.verify_request_authorization,
auth_str,
http_verb,
full_path,
dcid,
timestamp,
"",
b"",
False,
False,
)
@patch("dragonchain.lib.authorization.RATE_LIMIT", 0)
@patch("dragonchain.lib.authorization.keys.get_public_id", return_value="test_dcid")
@patch("dragonchain.lib.authorization.signature_is_replay", return_value=False)
@patch("dragonchain.lib.authorization.should_rate_limit", return_value=True)
@patch("dragonchain.lib.authorization.get_now_datetime", return_value=datetime.datetime(2018, 11, 14, 9, 5, 25, 128176))
@patch("dragonchain.lib.authorization.get_auth_key", return_value="key")
def test_verify_req_auth_raises_with_rate_limit(self, mock_get_auth_key, mock_date, mock_should_limit, mock_is_replay, mock_dcid):
auth_str = "DC1-HMAC-SHA256 id:gr1FvIvTe1oOmFZqHgRQUhi6s/EyBvZmJWqH1oWV+UQ="
http_verb = "GET"
full_path = "/path"
dcid = "test_dcid"
timestamp = "2018-11-14T09:05:25.128176Z"
self.assertRaisesWithMessage(
exceptions.APIRateLimitException,
f"API Rate Limit Exceeded. {authorization.RATE_LIMIT} requests allowed per minute.",
authorization.verify_request_authorization,
auth_str,
http_verb,
full_path,
dcid,
timestamp,
"",
b"",
False,
False,
)
@patch("dragonchain.lib.authorization.RATE_LIMIT", 0)
@patch("dragonchain.lib.authorization.keys.get_public_id", return_value="test_dcid")
@patch("dragonchain.lib.authorization.signature_is_replay", return_value=False)
@patch("dragonchain.lib.authorization.get_now_datetime", return_value=datetime.datetime(2018, 11, 14, 9, 5, 25, 128176))
@patch("dragonchain.lib.authorization.get_auth_key", return_value=None)
def test_verify_req_auth_raises_with_no_key(self, mock_get_auth_key, mock_date, mock_is_replay, mock_dcid):
auth_str = "DC1-HMAC-SHA256 id:gr1FvIvTe1oOmFZqHgRQUhi6s/EyBvZmJWqH1oWV+UQ="
http_verb = "GET"
full_path = "/path"
dcid = "test_dcid"
timestamp = "2018-11-14T09:05:25.128176Z"
self.assertRaisesWithMessage(
exceptions.UnauthorizedException,
"Invalid HMAC Authentication",
authorization.verify_request_authorization,
auth_str,
http_verb,
full_path,
dcid,
timestamp,
"",
b"",
False,
False,
)
@patch("dragonchain.lib.authorization.RATE_LIMIT", 0)
@patch("dragonchain.lib.authorization.keys.get_public_id", return_value="test_dcid")
@patch("dragonchain.lib.authorization.signature_is_replay", return_value=False)
@patch("dragonchain.lib.authorization.get_now_datetime", return_value=datetime.datetime(2018, 11, 14, 9, 5, 25, 128176))
@patch("dragonchain.lib.authorization.get_auth_key", side_effect=Exception)
def test_verify_req_auth_raises_on_get_key_error(self, mock_get_auth_key, mock_date, mock_is_replay, mock_dcid):
auth_str = "DC1-HMAC-SHA256 id:gr1FvIvTe1oOmFZqHgRQUhi6s/EyBvZmJWqH1oWV+UQ="
http_verb = "GET"
full_path = "/path"
dcid = "test_dcid"
timestamp = "2018-11-14T09:05:25.128176Z"
self.assertRaisesWithMessage(
exceptions.UnauthorizedException,
"Invalid HMAC Format",
authorization.verify_request_authorization,
auth_str,
http_verb,
full_path,
dcid,
timestamp,
"",
b"",
False,
False,
)
@patch("dragonchain.lib.authorization.keys.get_public_id", return_value="test_dcid")
@patch("dragonchain.lib.authorization.signature_is_replay", return_value=False)
@patch("dragonchain.lib.authorization.get_now_datetime", return_value=datetime.datetime(2018, 11, 14, 9, 5, 25, 128176))
@patch("dragonchain.lib.authorization.get_auth_key", return_value="key")
def test_generated_authenticated_request_with_verifier(self, mock_get_auth_key, mock_date, mock_is_replay, mock_get_id):
"""
This is more of psuedo integration test, ensuring that
the generate_authenticated_request 'POST', will generate things are properly
validated by verify_request_authorization
If this test ever fails, it means that interchain communication will be broken
even if all other tests pass
"""
full_path = "/path"
dcid = "test_dcid"
timestamp = "2018-11-14T09:05:25.128176Z"
json_content = {"thing": "test"}
headers, content = authorization.generate_authenticated_request("POST", dcid, full_path, json_content, "SHA256")
auth_str = headers["Authorization"]
# Test with SHA256 HMAC Auth
authorization.verify_request_authorization(auth_str, "POST", full_path, dcid, timestamp, "application/json", content, False, False)
headers, content = authorization.generate_authenticated_request("POST", dcid, full_path, json_content, "BLAKE2b512")
auth_str = headers["Authorization"]
# Test with BLAKE2b512 HMAC Auth
authorization.verify_request_authorization(auth_str, "POST", full_path, dcid, timestamp, "application/json", content, False, False)
headers, content = authorization.generate_authenticated_request("POST", dcid, full_path, json_content, "SHA3-256")
auth_str = headers["Authorization"]
# Test with SHA3-256 HMAC Auth
authorization.verify_request_authorization(auth_str, "POST", full_path, dcid, timestamp, "application/json", content, False, False)
@patch("dragonchain.lib.authorization.RATE_LIMIT", 0)
@patch("dragonchain.lib.authorization.redis.lindex_sync")
def test_should_rate_limit_disabled_on_0(self, mock_lindex):
self.assertFalse(authorization.should_rate_limit("test"))
mock_lindex.assert_not_called()
@patch("dragonchain.lib.authorization.RATE_LIMIT", 1)
@patch("dragonchain.lib.authorization.redis.lindex_sync")
@patch("dragonchain.lib.authorization.redis.ltrim_sync")
@patch("dragonchain.lib.authorization.redis.lpush_sync")
@patch("dragonchain.lib.authorization.time.time", return_value=1554249099.7634845)
def test_should_rate_limit_calls_lpush_when_returning_false(self, mock_time, mock_lpush, mock_ltrim, mock_lindex):
self.assertFalse(authorization.should_rate_limit("test"))
mock_lpush.assert_called_once_with("request:test", "1554249099.7634845")
@patch("dragonchain.lib.authorization.RATE_LIMIT", 2)
@patch("dragonchain.lib.authorization.redis.lindex_sync")
@patch("dragonchain.lib.authorization.redis.lpush_sync")
@patch("dragonchain.lib.authorization.random.randint", return_value=0)
@patch("dragonchain.lib.authorization.redis.ltrim_sync")
def test_should_rate_limit_calls_ltrim(self, mock_ltrim, mock_rand, mock_lpush, mock_lindex):
authorization.should_rate_limit("test")
mock_ltrim.assert_called_once_with("request:test", 0, 1)
@patch("dragonchain.lib.authorization.RATE_LIMIT", 2)
@patch("dragonchain.lib.authorization.redis.lpush_sync")
@patch("dragonchain.lib.authorization.redis.ltrim_sync")
@patch("dragonchain.lib.authorization.redis.lindex_sync")
def test_should_rate_limit_calls_lindex(self, mock_lindex, mock_ltrim, mock_lpush):
authorization.should_rate_limit("test")
mock_lindex.assert_called_once_with("request:test", 1, decode=False)
@patch("dragonchain.lib.authorization.RATE_LIMIT", 2)
@patch("dragonchain.lib.authorization.redis.lindex_sync", return_value=b"1554249095.7634845")
@patch("dragonchain.lib.authorization.redis.ltrim_sync")
@patch("dragonchain.lib.authorization.time.time", return_value=1554249099.7634845)
def test_should_rate_limit_returns_true_when_limited(self, mock_time, mock_ltrim, mock_lindex):
self.assertTrue(authorization.should_rate_limit("test"))
| 52.033841
| 150
| 0.7061
|
4a1169ab051b84dcf054319b9638f7c182fc3dcb
| 130
|
py
|
Python
|
application/server/main/config.py
|
Barometre-de-la-Science-Ouverte/bso3-harvest-publication
|
06c729a1e44ed87e8f73b4c2bd456f5e09a73e34
|
[
"MIT"
] | null | null | null |
application/server/main/config.py
|
Barometre-de-la-Science-Ouverte/bso3-harvest-publication
|
06c729a1e44ed87e8f73b4c2bd456f5e09a73e34
|
[
"MIT"
] | 5
|
2021-06-04T07:12:06.000Z
|
2021-08-25T15:17:41.000Z
|
application/server/main/config.py
|
Barometre-de-la-Science-Ouverte/bso3-harvest-publication
|
06c729a1e44ed87e8f73b4c2bd456f5e09a73e34
|
[
"MIT"
] | null | null | null |
import os
# Load the application environment
APP_ENV = os.getenv('APP_ENV')
# Export config
config = {
'APP_ENV': APP_ENV
}
| 13
| 34
| 0.7
|
4a1169b7b24f0298833951ed328b4198a5025ea2
| 87,442
|
py
|
Python
|
src/olympia/addons/models.py
|
zjzh/addons-server
|
bbac7ff70f94443336f5fb0543ab8126a6adf76a
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/addons/models.py
|
zjzh/addons-server
|
bbac7ff70f94443336f5fb0543ab8126a6adf76a
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/addons/models.py
|
zjzh/addons-server
|
bbac7ff70f94443336f5fb0543ab8126a6adf76a
|
[
"BSD-3-Clause"
] | null | null | null |
import hashlib
import itertools
import os
import re
import time
import uuid
from datetime import datetime
from urllib.parse import urlsplit
from django.conf import settings
from django.contrib.staticfiles.storage import staticfiles_storage
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from django.db import models, transaction
from django.db.models import F, Max, Min, Q, signals as dbsignals
from django.db.models.expressions import Func
from django.dispatch import receiver
from django.urls import reverse
from django.utils import translation
from django.utils.functional import cached_property
from django.utils.translation import trans_real, gettext_lazy as _
from django_statsd.clients import statsd
import olympia.core.logger
from olympia import activity, amo, core
from olympia.addons.utils import generate_addon_guid
from olympia.amo.decorators import use_primary_db
from olympia.amo.fields import PositiveAutoField
from olympia.amo.models import (
BasePreview,
BaseQuerySet,
FilterableManyToManyField,
LongNameIndex,
ManagerBase,
ModelBase,
OnChangeMixin,
SaveUpdateMixin,
)
from olympia.amo.templatetags import jinja_helpers
from olympia.amo.utils import (
StopWatch,
attach_trans_dict,
find_language,
send_mail,
slugify,
sorted_groupby,
timer,
to_language,
)
from olympia.constants.categories import CATEGORIES_BY_ID
from olympia.constants.promoted import NOT_PROMOTED, PRE_REVIEW_GROUPS, RECOMMENDED
from olympia.constants.reviewers import REPUTATION_CHOICES
from olympia.files.models import File
from olympia.files.utils import extract_translations, resolve_i18n_message
from olympia.ratings.models import Rating
from olympia.tags.models import Tag
from olympia.translations.fields import (
LinkifiedField,
PurifiedField,
TranslatedField,
save_signal,
)
from olympia.translations.hold import translation_saved
from olympia.translations.models import Translation
from olympia.users.models import UserProfile
from olympia.users.utils import get_task_user
from olympia.versions.compare import version_int
from olympia.versions.models import (
Version,
VersionPreview,
VersionReviewerFlags,
inherit_nomination,
)
from . import signals
log = olympia.core.logger.getLogger('z.addons')
MAX_SLUG_INCREMENT = 999
SLUG_INCREMENT_SUFFIXES = set(range(1, MAX_SLUG_INCREMENT + 1))
GUID_REUSE_FORMAT = 'guid-reused-by-pk-{}'
class GuidAlreadyDeniedError(RuntimeError):
pass
def get_random_slug():
"""Return a 20 character long random string"""
return ''.join(str(uuid.uuid4()).split('-')[:-1])
def clean_slug(instance, slug_field='slug'):
"""Cleans a model instance slug.
This strives to be as generic as possible but is only used
by Add-ons at the moment.
:param instance: The instance to clean the slug for.
:param slug_field: The field where to get the currently set slug from.
"""
slug = getattr(instance, slug_field, None) or instance.name
if not slug:
# Initialize the slug with what we have available: a name translation
# or in last resort a random slug.
translations = Translation.objects.filter(id=instance.name_id)
if translations.exists():
slug = translations[0]
max_length = instance._meta.get_field(slug_field).max_length
# We have to account for slug being reduced to '' by slugify
slug = slugify(slug or '')[:max_length] or get_random_slug()
if DeniedSlug.blocked(slug):
slug = slug[: max_length - 1] + '~'
# The following trick makes sure we are using a manager that returns
# all the objects, as otherwise we could have a slug clash on our hands.
# Eg with the "Addon.objects" manager, which doesn't list deleted addons,
# we could have a "clean" slug which is in fact already assigned to an
# already existing (deleted) addon. Also, make sure we use the base class.
manager = models.Manager()
manager.model = instance._meta.proxy_for_model or instance.__class__
qs = manager.values_list(slug_field, flat=True) # Get list of all slugs.
if instance.id:
qs = qs.exclude(pk=instance.id) # Can't clash with itself.
# We first need to make sure there's a clash, before trying to find a
# suffix that is available. Eg, if there's a "foo-bar" slug, "foo" is still
# available.
clash = qs.filter(**{slug_field: slug})
if clash.exists():
max_postfix_length = len(str(MAX_SLUG_INCREMENT))
slug = slugify(slug)[: max_length - max_postfix_length]
# There is a clash, so find a suffix that will make this slug unique.
lookup = {'%s__startswith' % slug_field: slug}
clashes = qs.filter(**lookup)
prefix_len = len(slug)
used_slug_numbers = [value[prefix_len:] for value in clashes]
# find the next free slug number
slug_numbers = {int(i) for i in used_slug_numbers if i.isdigit()}
unused_numbers = SLUG_INCREMENT_SUFFIXES - slug_numbers
if unused_numbers:
num = min(unused_numbers)
elif max_length is None:
num = max(slug_numbers) + 1
else:
# This could happen. The current implementation (using
# ``[:max_length -2]``) only works for the first 100 clashes in the
# worst case (if the slug is equal to or longuer than
# ``max_length - 2`` chars).
# After that, {verylongslug}-100 will be trimmed down to
# {verylongslug}-10, which is already assigned, but it's the last
# solution tested.
raise RuntimeError(f'No suitable slug increment for {slug} found')
slug = f'{slug}{num}'
setattr(instance, slug_field, slug)
return instance
class AddonQuerySet(BaseQuerySet):
def id_or_slug(self, val):
"""Get add-ons by id or slug."""
if isinstance(val, str) and not val.isdigit():
return self.filter(slug=val)
return self.filter(id=val)
def public(self):
"""Get reviewed add-ons only"""
return self.filter(self.valid_q(amo.REVIEWED_STATUSES))
def valid(self):
"""Get valid, enabled add-ons only"""
return self.filter(self.valid_q(amo.VALID_ADDON_STATUSES))
def not_disabled_by_mozilla(self):
"""Get all add-ons not disabled by Mozilla."""
return self.exclude(status=amo.STATUS_DISABLED)
def valid_q(self, statuses):
"""
Return a Q object that selects a valid Addon with the given statuses.
An add-on is valid if not disabled and has a current version.
"""
return Q(
_current_version__isnull=False, disabled_by_user=False, status__in=statuses
)
class AddonManager(ManagerBase):
_queryset_class = AddonQuerySet
def __init__(self, include_deleted=False):
# DO NOT change the default value of include_deleted unless you've read
# through the comment just above the Addon managers
# declaration/instantiation and understand the consequences.
ManagerBase.__init__(self)
self.include_deleted = include_deleted
def get_queryset(self):
qs = super().get_queryset()
if not self.include_deleted:
qs = qs.exclude(status=amo.STATUS_DELETED)
return qs.transform(Addon.transformer)
def id_or_slug(self, val):
"""Get add-ons by id or slug."""
return self.get_queryset().id_or_slug(val)
def public(self):
"""Get public add-ons only"""
return self.get_queryset().public()
def valid(self):
"""Get valid, enabled add-ons only"""
return self.get_queryset().valid()
def not_disabled_by_mozilla(self):
"""Get all add-ons not disabled by Mozilla."""
return self.get_queryset().not_disabled_by_mozilla()
def get_base_queryset_for_queue(
self,
admin_reviewer=False,
content_review=False,
theme_review=False,
exclude_listed_pending_rejection=True,
select_related_fields_for_listed=True,
):
qs = (
self.get_queryset()
# We don't want the default transformer, it does too much, and
# crucially, it prevents the
# select_related('_current_version__autoapprovalsummary') from
# working, because it overrides the _current_version with the one
# it fetches. We want translations though, but only for the name.
.only_translations().defer(
*[x.name for x in Addon._meta.translated_fields if x.name != 'name']
)
)
# Useful joins to avoid extra queries.
select_related_fields = [
'reviewerflags',
'addonapprovalscounter',
]
if select_related_fields_for_listed:
# Most listed queues need these to avoid extra queries because
# they display the score, flags, promoted status, link to files
# etc.
select_related_fields.extend(
(
'_current_version',
'_current_version__autoapprovalsummary',
'_current_version__file',
'_current_version__reviewerflags',
'promotedaddon',
)
)
qs = qs.select_related(*select_related_fields)
if exclude_listed_pending_rejection:
qs = qs.filter(
_current_version__reviewerflags__pending_rejection__isnull=True
)
if not admin_reviewer:
if content_review:
qs = qs.exclude(reviewerflags__needs_admin_content_review=True)
elif theme_review:
qs = qs.exclude(reviewerflags__needs_admin_theme_review=True)
else:
qs = qs.exclude(reviewerflags__needs_admin_code_review=True)
return qs
def get_listed_pending_manual_approval_queue(
self,
admin_reviewer=False,
recommendable=False,
statuses=amo.VALID_ADDON_STATUSES,
types=amo.GROUP_TYPE_ADDON,
):
if types not in (amo.GROUP_TYPE_ADDON, amo.GROUP_TYPE_THEME):
raise ImproperlyConfigured(
'types needs to be either GROUP_TYPE_ADDON or GROUP_TYPE_THEME'
)
theme_review = types == amo.GROUP_TYPE_THEME
qs = self.get_base_queryset_for_queue(
admin_reviewer=admin_reviewer,
# The select related needed to avoid extra queries in other queues
# typically depend on current_version, but we don't care here, as
# it's a pre-review queue. We'll make the select_related() calls we
# need ourselves.
select_related_fields_for_listed=False,
# We'll filter on pending_rejection below without limiting
# ourselves to the current_version.
exclude_listed_pending_rejection=False,
theme_review=theme_review,
)
filters = (
Q(
status__in=statuses,
type__in=types,
versions__channel=amo.RELEASE_CHANNEL_LISTED,
versions__file__status=amo.STATUS_AWAITING_REVIEW,
versions__reviewerflags__pending_rejection=None,
)
& ~Q(disabled_by_user=True)
)
if recommendable:
filters &= Q(promotedaddon__group_id=RECOMMENDED.id)
elif not theme_review:
filters &= ~Q(promotedaddon__group_id=RECOMMENDED.id) & (
Q(reviewerflags__auto_approval_disabled=True)
| Q(reviewerflags__auto_approval_disabled_until_next_approval=True)
| Q(reviewerflags__auto_approval_delayed_until__gt=datetime.now())
| Q(
promotedaddon__group_id__in=(
group.id for group in PRE_REVIEW_GROUPS
)
)
)
return (
# We passed select_related_fields_for_listed=False but there are
# still base select_related() fields in that queryset that are
# applied in all cases that we don't want, so reset them away.
qs.select_related(None)
.filter(filters)
.select_related('reviewerflags')
.annotate(
first_version_nominated=Min('versions__nomination'),
# Because of the Min(), a GROUP BY addon.id is created.
# Unfortunately if we were to annotate with just
# F('versions__version') Django would add version.version to
# the GROUP BY, ruining it. To prevent that, we wrap it into
# a harmless Func() - we need a no-op function to do that,
# hence the LOWER().
latest_version=Func(F('versions__version'), function='LOWER'),
)
)
def get_addons_with_unlisted_versions_queue(self, admin_reviewer=False):
qs = self.get_base_queryset_for_queue(
select_related_fields_for_listed=False,
exclude_listed_pending_rejection=False,
admin_reviewer=admin_reviewer,
)
return (
qs.filter(versions__channel=amo.RELEASE_CHANNEL_UNLISTED).exclude(
status=amo.STATUS_DISABLED
)
# Reset select_related() made by get_base_queryset_for_queue(), we
# don't want them for the unlisted queue.
.select_related(None)
)
def get_unlisted_pending_manual_approval_queue(self, admin_reviewer=False):
qs = self.get_base_queryset_for_queue(
select_related_fields_for_listed=False,
exclude_listed_pending_rejection=False,
admin_reviewer=admin_reviewer,
)
filters = Q(
versions__channel=amo.RELEASE_CHANNEL_UNLISTED,
versions__file__status=amo.STATUS_AWAITING_REVIEW,
type__in=amo.GROUP_TYPE_ADDON,
versions__reviewerflags__pending_rejection__isnull=True,
) & (
Q(reviewerflags__auto_approval_disabled_unlisted=True)
| Q(reviewerflags__auto_approval_disabled_until_next_approval_unlisted=True)
| Q(reviewerflags__auto_approval_delayed_until__gt=datetime.now())
)
return qs.filter(filters).annotate(
# These annotations should be applied to versions that match the
# filters above. They'll be used to sort the results or just
# display the data to reviewers in the queue.
first_version_created=Min('versions__created'),
worst_score=Max('versions__autoapprovalsummary__score'),
)
def get_auto_approved_queue(self, admin_reviewer=False):
"""Return a queryset of Addon objects that have been auto-approved but
not confirmed by a human yet."""
success_verdict = amo.AUTO_APPROVED
qs = (
self.get_base_queryset_for_queue(admin_reviewer=admin_reviewer)
.public()
.filter(_current_version__autoapprovalsummary__verdict=success_verdict)
.exclude(_current_version__autoapprovalsummary__confirmed=True)
.order_by(
'-_current_version__autoapprovalsummary__score',
'-_current_version__autoapprovalsummary__weight',
'addonapprovalscounter__last_human_review',
'created',
)
)
return qs
def get_content_review_queue(self, admin_reviewer=False):
"""Return a queryset of Addon objects that need content review."""
qs = (
self.get_base_queryset_for_queue(
admin_reviewer=admin_reviewer, content_review=True
)
.valid()
.filter(
addonapprovalscounter__last_content_review=None,
# Only content review extensions and dictionaries. See
# https://github.com/mozilla/addons-server/issues/11796 &
# https://github.com/mozilla/addons-server/issues/12065
type__in=(amo.ADDON_EXTENSION, amo.ADDON_DICT),
)
.order_by('created')
)
return qs
def get_scanners_queue(self, admin_reviewer=False):
"""Return a queryset of Addon objects that have been approved but
contain versions that were automatically flagged as needing human
review (regardless of channel)."""
return (
self.get_base_queryset_for_queue(admin_reviewer=admin_reviewer)
# All valid statuses, plus incomplete as well because the add-on
# could be purely unlisted (so we can't use valid_q(), which
# filters out current_version=None). We know the add-ons are likely
# to have a version since they got the needs_human_review flag, so
# returning incomplete ones is acceptable.
.filter(
status__in=[amo.STATUS_APPROVED, amo.STATUS_NOMINATED, amo.STATUS_NULL],
versions__file__status__in=[
amo.STATUS_APPROVED,
amo.STATUS_AWAITING_REVIEW,
],
versions__needs_human_review=True,
).order_by('created')
# There could be several versions matching for a single add-on so
# we need a distinct.
.distinct()
)
def get_mad_queue(self, admin_reviewer=False):
return (
self.get_base_queryset_for_queue(admin_reviewer=admin_reviewer)
# All valid statuses, plus incomplete as well because the add-on
# could be purely unlisted (so we can't use valid_q(), which
# filters out current_version=None). We know the add-ons are likely
# to have a version since they got the needs_human_review_by_mad
# flag, so returning incomplete ones is acceptable.
.filter(
Q(
status__in=[
amo.STATUS_APPROVED,
amo.STATUS_NOMINATED,
amo.STATUS_NULL,
]
),
Q(
versions__file__status__in=[
amo.STATUS_APPROVED,
amo.STATUS_AWAITING_REVIEW,
]
),
(
Q(
versions__reviewerflags__needs_human_review_by_mad=True,
versions__channel=amo.RELEASE_CHANNEL_UNLISTED,
)
| Q(
_current_version__reviewerflags__needs_human_review_by_mad=( # noqa
True
)
)
),
).order_by('created')
# There could be several versions matching for a single add-on so
# we need a distinct.
.distinct()
)
def get_pending_rejection_queue(self, admin_reviewer=False):
filter_kwargs = {
'_current_version__reviewerflags__pending_rejection__isnull': False
}
return (
self.get_base_queryset_for_queue(
admin_reviewer=admin_reviewer,
exclude_listed_pending_rejection=False,
)
.filter(**filter_kwargs)
.order_by('_current_version__reviewerflags__pending_rejection')
)
class Addon(OnChangeMixin, ModelBase):
id = PositiveAutoField(primary_key=True)
STATUS_CHOICES = amo.STATUS_CHOICES_ADDON
guid = models.CharField(max_length=255, unique=True, null=True)
slug = models.CharField(max_length=30, unique=True, null=True)
name = TranslatedField()
default_locale = models.CharField(
max_length=10, default=settings.LANGUAGE_CODE, db_column='defaultlocale'
)
type = models.PositiveIntegerField(
choices=amo.ADDON_TYPE.items(),
db_column='addontype_id',
default=amo.ADDON_EXTENSION,
)
status = models.PositiveIntegerField(
choices=STATUS_CHOICES.items(), default=amo.STATUS_NULL
)
icon_type = models.CharField(max_length=25, blank=True, db_column='icontype')
icon_hash = models.CharField(max_length=8, blank=True, null=True)
homepage = TranslatedField()
support_email = TranslatedField(db_column='supportemail')
support_url = TranslatedField(db_column='supporturl')
description = PurifiedField(short=False)
summary = LinkifiedField()
developer_comments = PurifiedField(db_column='developercomments')
eula = PurifiedField()
privacy_policy = PurifiedField(db_column='privacypolicy')
average_rating = models.FloatField(
max_length=255, default=0, null=True, db_column='averagerating'
)
bayesian_rating = models.FloatField(default=0, db_column='bayesianrating')
total_ratings = models.PositiveIntegerField(default=0, db_column='totalreviews')
text_ratings_count = models.PositiveIntegerField(
default=0, db_column='textreviewscount'
)
weekly_downloads = models.PositiveIntegerField(
default=0, db_column='weeklydownloads'
)
hotness = models.FloatField(default=0)
average_daily_users = models.PositiveIntegerField(default=0)
last_updated = models.DateTimeField(
null=True, help_text='Last time this add-on had a file/version update'
)
disabled_by_user = models.BooleanField(default=False, db_column='inactive')
target_locale = models.CharField(
max_length=255,
blank=True,
null=True,
help_text='For dictionaries and language packs. Identifies the '
'language and, optionally, region that this add-on is '
'written for. Examples: en-US, fr, and de-AT',
)
contributions = models.URLField(max_length=255, blank=True)
authors = FilterableManyToManyField(
'users.UserProfile',
through='AddonUser',
related_name='addons',
q_filter=~Q(addonuser__role=amo.AUTHOR_ROLE_DELETED),
)
_current_version = models.ForeignKey(
Version,
db_column='current_version',
related_name='+',
null=True,
on_delete=models.SET_NULL,
)
is_experimental = models.BooleanField(default=False, db_column='experimental')
reputation = models.SmallIntegerField(
default=0,
null=True,
choices=REPUTATION_CHOICES.items(),
help_text='The higher the reputation value, the further down the '
'add-on will be in the auto-approved review queue. '
'A value of 0 has no impact',
)
requires_payment = models.BooleanField(default=False)
unfiltered = AddonManager(include_deleted=True)
objects = AddonManager()
class Meta:
db_table = 'addons'
# This is very important:
# The default base manager will be used for relations like
# `version.addon`. We thus want one that is NOT filtered in any case,
# we don't want a 500 if the addon is not found (because it has the
# status amo.STATUS_DELETED for example).
# The CLASS of the one configured here will also be used for "many to
# many relations" like `collection.addons`. In that case, we do want
# the filtered version by default, to make sure we're not displaying
# stuff by mistake. You thus want the filtered one configured
# as `base_manager_name`.
# We don't control the instantiation, but AddonManager sets
# include_deleted to False by default, so filtering is enabled by
# default.
base_manager_name = 'unfiltered'
indexes = [
models.Index(fields=('bayesian_rating',), name='bayesianrating'),
models.Index(fields=('created',), name='addons_created_idx'),
models.Index(fields=('_current_version',), name='current_version'),
models.Index(fields=('disabled_by_user',), name='inactive'),
models.Index(fields=('hotness',), name='hotness_idx'),
models.Index(fields=('last_updated',), name='last_updated'),
models.Index(fields=('modified',), name='modified_idx'),
models.Index(fields=('status',), name='addons_status_idx'),
models.Index(fields=('target_locale',), name='target_locale'),
models.Index(fields=('type',), name='addontype_id'),
models.Index(fields=('weekly_downloads',), name='weeklydownloads_idx'),
models.Index(fields=('average_daily_users', 'type'), name='adus_type_idx'),
models.Index(fields=('bayesian_rating', 'type'), name='rating_type_idx'),
models.Index(fields=('created', 'type'), name='created_type_idx'),
models.Index(fields=('last_updated', 'type'), name='last_updated_type_idx'),
models.Index(fields=('modified', 'type'), name='modified_type_idx'),
models.Index(
fields=('type', 'status', 'disabled_by_user'),
name='type_status_inactive_idx',
),
models.Index(
fields=('weekly_downloads', 'type'), name='downloads_type_idx'
),
models.Index(
fields=('type', 'status', 'disabled_by_user', '_current_version'),
name='visible_idx',
),
models.Index(fields=('name', 'status', 'type'), name='name_2'),
]
def __str__(self):
return f'{self.id}: {self.name}'
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
def save(self, **kw):
self.clean_slug()
super().save(**kw)
@use_primary_db
def clean_slug(self, slug_field='slug'):
if self.status == amo.STATUS_DELETED:
return
clean_slug(self, slug_field)
def force_disable(self):
activity.log_create(amo.LOG.FORCE_DISABLE, self)
log.info(
'Addon "%s" status force-changed to: %s', self.slug, amo.STATUS_DISABLED
)
self.update(status=amo.STATUS_DISABLED)
self.update_version()
# See: https://github.com/mozilla/addons-server/issues/13194
self.disable_all_files()
def force_enable(self):
activity.log_create(amo.LOG.FORCE_ENABLE, self)
log.info(
'Addon "%s" status force-changed to: %s', self.slug, amo.STATUS_APPROVED
)
self.update(status=amo.STATUS_APPROVED)
# Call update_status() to fix the status if the add-on is not actually
# in a state that allows it to be public.
self.update_status()
def deny_resubmission(self):
if not self.guid:
raise RuntimeError('No GUID on this add-on')
if self.is_guid_denied:
raise GuidAlreadyDeniedError('GUID already denied')
activity.log_create(amo.LOG.DENIED_GUID_ADDED, self)
log.info('Deny resubmission for addon "%s"', self.slug)
DeniedGuid.objects.create(guid=self.guid)
def allow_resubmission(self):
if not self.is_guid_denied:
raise RuntimeError('GUID already allowed')
activity.log_create(amo.LOG.DENIED_GUID_DELETED, self)
log.info('Allow resubmission for addon "%s"', self.slug)
DeniedGuid.objects.filter(guid=self.guid).delete()
def disable_all_files(self):
File.objects.filter(version__addon=self).update(status=amo.STATUS_DISABLED)
@property
def is_guid_denied(self):
return DeniedGuid.objects.filter(guid=self.guid).exists()
def is_soft_deleteable(self):
return self.status or Version.unfiltered.filter(addon=self).exists()
def _prepare_deletion_email(self, msg, reason):
user = core.get_user()
# Don't localize email to admins, use 'en-US' always.
with translation.override(settings.LANGUAGE_CODE):
# The types are lazy translated in apps/constants/base.py.
atype = amo.ADDON_TYPE.get(self.type, 'unknown').upper()
context = {
'atype': atype,
'authors': [u.email for u in self.authors.all()],
'adu': self.average_daily_users,
'guid': self.guid,
'id': self.id,
'msg': msg,
'reason': reason,
'name': self.name,
'slug': self.slug,
'weekly_downloads': self.weekly_downloads,
'url': jinja_helpers.absolutify(self.get_url_path()),
'user_str': (
f'{user.name}, {user.email} ({user.id})' if user else 'Unknown'
),
}
email_msg = (
"""
The following %(atype)s was deleted.
%(atype)s: %(name)s
URL: %(url)s
DELETED BY: %(user_str)s
ID: %(id)s
GUID: %(guid)s
AUTHORS: %(authors)s
WEEKLY DOWNLOADS: %(weekly_downloads)s
AVERAGE DAILY USERS: %(adu)s
NOTES: %(msg)s
REASON GIVEN BY USER FOR DELETION: %(reason)s
"""
% context
)
log.info('Sending delete email for %(atype)s %(id)s' % context)
subject = 'Deleting %(atype)s %(slug)s (%(id)d)' % context
return subject, email_msg
@transaction.atomic
def delete(self, msg='', reason='', send_delete_email=True):
# To avoid a circular import
from . import tasks
from olympia.versions import tasks as version_tasks
from olympia.files import tasks as file_tasks
# Check for soft deletion path. Happens only if the addon status isn't
# 0 (STATUS_INCOMPLETE) with no versions.
soft_deletion = self.is_soft_deleteable()
if soft_deletion and self.status == amo.STATUS_DELETED:
# We're already done.
return
id = self.id
# Fetch previews before deleting the addon instance, so that we can
# pass the list of files to delete to the delete_preview_files task
# after the addon is deleted.
previews = list(
Preview.objects.filter(addon__id=id).values_list('id', flat=True)
)
version_previews = list(
VersionPreview.objects.filter(version__addon__id=id).values_list(
'id', flat=True
)
)
if soft_deletion:
# /!\ If we ever stop using soft deletion, and remove this code, we
# need to make sure that the logs created below aren't cascade
# deleted!
log.info('Deleting add-on: %s' % self.id)
if send_delete_email:
email_to = [settings.DELETION_EMAIL]
subject, email_msg = self._prepare_deletion_email(msg, reason)
else:
email_to, subject, email_msg = [], '', ''
# If the add-on was disabled by Mozilla, add the guid to
# DeniedGuids to prevent resubmission after deletion.
if self.status == amo.STATUS_DISABLED:
try:
with transaction.atomic():
self.deny_resubmission()
except RuntimeError:
# If the guid is already in DeniedGuids, we are good.
pass
# Update or NULL out various fields.
models.signals.pre_delete.send(sender=Addon, instance=self)
self._ratings.all().delete()
# We avoid triggering signals for Version & File on purpose to
# avoid extra work. Files will be moved to the correct storage
# location with hide_disabled_files task or hide_disabled_files
# cron as a fallback.
self.disable_all_files()
file_tasks.hide_disabled_files.delay(addon_id=self.id)
self.versions.all().update(deleted=True)
VersionReviewerFlags.objects.filter(version__addon=self).update(
pending_rejection=None
)
# The last parameter is needed to automagically create an AddonLog.
activity.log_create(amo.LOG.DELETE_ADDON, self.pk, str(self.guid), self)
self.update(
status=amo.STATUS_DELETED,
slug=None,
_current_version=None,
modified=datetime.now(),
)
models.signals.post_delete.send(sender=Addon, instance=self)
if send_delete_email:
send_mail(subject, email_msg, recipient_list=email_to)
else:
# Real deletion path.
super().delete()
for preview in previews:
tasks.delete_preview_files.delay(preview)
for preview in version_previews:
version_tasks.delete_preview_files.delay(preview)
return True
@classmethod
def initialize_addon_from_upload(cls, data, upload, channel, user):
timer = StopWatch('addons.models.initialize_addon_from_upload.')
timer.start()
fields = [field.name for field in cls._meta.get_fields()]
guid = data.get('guid')
old_guid_addon = None
if guid: # It's an extension.
# Reclaim GUID from deleted add-on.
try:
old_guid_addon = Addon.unfiltered.get(guid=guid)
old_guid_addon.update(guid=None)
except ObjectDoesNotExist:
pass
if not data.get('guid'):
data['guid'] = guid = generate_addon_guid()
timer.log_interval('1.guids')
data = cls.resolve_webext_translations(data, upload)
timer.log_interval('2.resolve_translations')
if channel == amo.RELEASE_CHANNEL_UNLISTED:
data['slug'] = get_random_slug()
timer.log_interval('3.get_random_slug')
addon = Addon(**{k: v for k, v in data.items() if k in fields})
timer.log_interval('4.instance_init')
addon.status = amo.STATUS_NULL
locale_is_set = (
addon.default_locale
and addon.default_locale in settings.AMO_LANGUAGES
and data.get('default_locale') == addon.default_locale
)
if not locale_is_set:
addon.default_locale = to_language(trans_real.get_language())
timer.log_interval('5.default_locale')
addon.save()
timer.log_interval('6.addon_save')
if guid:
AddonGUID.objects.create(addon=addon, guid=guid)
if old_guid_addon:
old_guid_addon.update(guid=GUID_REUSE_FORMAT.format(addon.pk))
log.info(
f'GUID {guid} from addon [{old_guid_addon.pk}] reused '
f'by addon [{addon.pk}].'
)
if user:
AddonUser(addon=addon, user=user).save()
activity.log_create(amo.LOG.CREATE_ADDON, addon)
log.info(f'New addon {addon!r} from {upload!r}')
timer.log_interval('7.end')
return addon
@classmethod
def from_upload(
cls,
upload,
selected_apps,
channel=amo.RELEASE_CHANNEL_LISTED,
parsed_data=None,
user=None,
):
"""
Create an Addon instance, a Version and corresponding File(s) from a
FileUpload, a list of compatible app ids, a channel id and the
parsed_data generated by parse_addon().
Note that it's the caller's responsability to ensure the file is valid.
We can't check for that here because an admin may have overridden the
validation results.
"""
assert parsed_data is not None
addon = cls.initialize_addon_from_upload(parsed_data, upload, channel, user)
Version.from_upload(
upload=upload,
addon=addon,
channel=channel,
selected_apps=selected_apps,
parsed_data=parsed_data,
)
return addon
@classmethod
def resolve_webext_translations(cls, data, upload):
"""Resolve all possible translations from an add-on.
This returns a modified `data` dictionary accordingly with proper
translations filled in.
"""
default_locale = find_language(data.get('default_locale'))
if not default_locale:
# Don't change anything if we don't meet the requirements
return data
# find_language might have expanded short to full locale, so update it.
data['default_locale'] = default_locale
fields = ('name', 'homepage', 'summary')
messages = extract_translations(upload)
for field in fields:
data[field] = {
locale: resolve_i18n_message(
data[field],
locale=locale,
default_locale=default_locale,
messages=messages,
)
for locale in messages
}
return data
def get_url_path(self, add_prefix=True):
if not self._current_version_id:
return ''
return reverse('addons.detail', args=[self.slug], add_prefix=add_prefix)
def get_dev_url(self, action='edit', args=None, prefix_only=False):
args = args or []
prefix = 'devhub'
if not prefix_only:
prefix += '.addons'
view_name = f'{prefix}.{action}'
return reverse(view_name, args=[self.slug] + args)
def get_detail_url(self, action='detail', args=None):
if args is None:
args = []
return reverse('addons.%s' % action, args=[self.slug] + args)
@property
def ratings_url(self):
return reverse('addons.ratings.list', args=[self.slug])
@property
def versions_url(self):
return reverse('addons.versions', args=[self.slug])
@cached_property
def listed_authors(self):
return self.authors.filter(addons=self, addonuser__listed=True).order_by(
'addonuser__position'
)
@classmethod
def get_fallback(cls):
return cls._meta.get_field('default_locale')
@property
def ratings(self):
return Rating.objects.filter(addon=self, reply_to=None)
def language_ascii(self):
lang = settings.LANGUAGE_URL_MAP.get(
trans_real.to_language(self.default_locale)
)
return settings.AMO_LANGUAGES.get(lang, {}).get('native')
@property
def valid_file_statuses(self):
if self.status == amo.STATUS_APPROVED:
return [amo.STATUS_APPROVED]
return amo.VALID_FILE_STATUSES
def find_latest_public_listed_version(self):
"""Retrieve the latest public listed version of an addon.
If the add-on is not public, it can return a listed version awaiting
review (since non-public add-ons should not have public versions)."""
return (
self.versions.filter(
channel=amo.RELEASE_CHANNEL_LISTED,
file__status__in=self.valid_file_statuses,
)
.order_by('created')
.last()
)
def find_latest_version(self, channel, exclude=((amo.STATUS_DISABLED,))):
"""Retrieve the latest version of an add-on for the specified channel.
If channel is None either channel is returned.
Keyword arguments:
exclude -- exclude versions for which all files have one
of those statuses (default STATUS_DISABLED)."""
# If the add-on is deleted or hasn't been saved yet, it should not
# have a latest version.
if not self.id or self.status == amo.STATUS_DELETED:
return None
# Avoid most transformers - keep translations because they don't
# get automatically fetched if you just access the field without
# having made the query beforehand, and we don't know what callers
# will want ; but for the rest of them, since it's a single
# instance there is no reason to call the default transformers.
return (
self.versions.exclude(file__status__in=exclude)
.filter(
**{'channel': channel} if channel is not None else {},
file__isnull=False,
)
.only_translations()
.order_by('created')
.last()
)
@use_primary_db
def update_version(self, ignore=None, _signal=True):
"""
Update the current_version field on this add-on if necessary.
Returns True if we updated the current_version field.
The optional ``ignore`` parameter, if present, is a a version
to not consider as part of the update, since it may be in the
process of being deleted.
Pass ``_signal=False`` if you want to no signals fired at all.
"""
new_current_version = self.find_latest_public_listed_version()
updated = {}
send_signal = False
if self._current_version != new_current_version:
updated['_current_version'] = new_current_version
send_signal = True
# update_version can be called by a post_delete signal (such
# as File's) when deleting a version. If so, we should avoid putting
# that version-being-deleted in any fields.
if ignore is not None:
updated = {k: v for k, v in updated.items() if v != ignore}
if updated:
diff = [self._current_version, new_current_version]
# Pass along _signal to the .update() to prevent it from firing
# signals if we don't want them.
updated['_signal'] = _signal
try:
self.update(**updated)
if send_signal and _signal:
signals.version_changed.send(sender=self.__class__, instance=self)
log.info(
'Version changed from current: %s to %s '
'for addon %s' % tuple(diff + [self])
)
except Exception as e:
log.error(
'Could not save version changes current: %s to %s '
'for addon %s (%s)' % tuple(diff + [self, e])
)
return bool(updated)
def increment_theme_version_number(self):
"""Increment theme version number by 1."""
latest_version = self.find_latest_version(None)
version = latest_version or self.current_version
version.version = str(float(version.version) + 1)
# Set the current version.
self.update(_current_version=version.save())
@property
def current_version(self):
"""Return the latest public listed version of an addon.
If the add-on is not public, it can return a listed version awaiting
review (since non-public add-ons should not have public versions).
If the add-on has not been created yet or is deleted, it returns None.
"""
if not self.id or self.status == amo.STATUS_DELETED:
return None
try:
return self._current_version
except ObjectDoesNotExist:
pass
return None
@cached_property
def latest_unlisted_version(self):
"""Shortcut property for Addon.find_latest_version(
channel=RELEASE_CHANNEL_UNLISTED)."""
return self.find_latest_version(channel=amo.RELEASE_CHANNEL_UNLISTED)
def get_icon_dir(self):
return os.path.join(
jinja_helpers.user_media_path('addon_icons'), '%s' % (self.id // 1000)
)
def get_icon_url(self, size, use_default=True):
"""
Returns the addon's icon url according to icon_type.
If it's a theme and there is no icon set, it will return the default
theme icon.
If it's something else, it will return the default add-on icon, unless
use_default is False, in which case it will return None.
"""
# Get the closest allowed size without going over
if size not in amo.ADDON_ICON_SIZES and size >= amo.ADDON_ICON_SIZES[0]:
size = [s for s in amo.ADDON_ICON_SIZES if s < size][-1]
elif size < amo.ADDON_ICON_SIZES[0]:
size = amo.ADDON_ICON_SIZES[0]
# Figure out what to return for an image URL
if not self.icon_type:
return self.get_default_icon_url(size) if use_default else None
else:
# [1] is the whole ID, [2] is the directory
split_id = re.match(r'((\d*?)\d{1,3})$', str(self.id))
# Use the icon hash if we have one as the cachebusting suffix,
# otherwise fall back to the add-on modification date.
suffix = self.icon_hash or str(int(time.mktime(self.modified.timetuple())))
path = '/'.join(
[
split_id.group(2) or '0',
f'{self.id}-{size}.png?modified={suffix}',
]
)
return jinja_helpers.user_media_url('addon_icons') + path
def get_default_icon_url(self, size):
return staticfiles_storage.url(f'img/addon-icons/default-{size}.png')
@use_primary_db
def update_status(self, ignore_version=None):
self.reload()
if self.status in [amo.STATUS_NULL, amo.STATUS_DELETED] or self.is_disabled:
self.update_version(ignore=ignore_version)
return
versions = self.versions.filter(channel=amo.RELEASE_CHANNEL_LISTED)
status = None
reason = ''
if not versions.exists():
status = amo.STATUS_NULL
reason = 'no listed versions'
elif not versions.filter(file__status__in=amo.VALID_FILE_STATUSES).exists():
status = amo.STATUS_NULL
reason = 'no listed version with valid file'
elif (
self.status == amo.STATUS_APPROVED
and not versions.filter(file__status=amo.STATUS_APPROVED).exists()
):
if versions.filter(file__status=amo.STATUS_AWAITING_REVIEW).exists():
status = amo.STATUS_NOMINATED
reason = 'only an unreviewed file'
else:
status = amo.STATUS_NULL
reason = 'no reviewed files'
elif self.status == amo.STATUS_APPROVED:
latest_version = self.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED
)
if (
latest_version
and latest_version.file.status == amo.STATUS_AWAITING_REVIEW
):
# Addon is public, but its latest file is not (it's the case on
# a new file upload). So, call update, to trigger watch_status,
# which takes care of setting nomination time when needed.
status = self.status
reason = 'triggering watch_status'
if status is not None:
log.info(
'Changing add-on status [%s]: %s => %s (%s).'
% (self.id, self.status, status, reason)
)
self.update(status=status)
# If task_user doesn't exist that's no big issue (i.e. in tests)
try:
task_user = get_task_user()
except UserProfile.DoesNotExist:
task_user = None
activity.log_create(
amo.LOG.CHANGE_STATUS, self, self.status, user=task_user
)
self.update_version(ignore=ignore_version)
@staticmethod
def attach_related_versions(addons, addon_dict=None):
if addon_dict is None:
addon_dict = {addon.id: addon for addon in addons}
all_ids = set(filter(None, (addon._current_version_id for addon in addons)))
versions = list(Version.objects.filter(id__in=all_ids).order_by())
for version in versions:
try:
addon = addon_dict[version.addon_id]
except KeyError:
log.info('Version %s has an invalid add-on id.' % version.id)
continue
if addon._current_version_id == version.id:
addon._current_version = version
version.addon = addon
@staticmethod
def _attach_authors(
addons,
addon_dict=None,
manager='objects',
listed=True,
to_attr='listed_authors',
):
# It'd be nice if this could be done with something like
# qs.prefetch_related(
# Prefetch('authors', queryset=UserProfile.objects.annotate(
# role=F('addonuser__role'), listed=F('addonuser__listed'))))
# instead, but that doesn't work because the prefetch queryset is
# making a different join for addonuser than the one used by the
# manytomanyfield, so the results are completely wrong when there are
# more than one add-on. Also this wouldn't let us customize the
# AddonUser manager to include/exclude deleted roles.
# So instead, we do it via AddonUser, copy the properties on the users
# and throw away the AddonUser instances afterwards.
if addon_dict is None:
addon_dict = {addon.id: addon for addon in addons}
filters = {'addon__in': addons}
if listed is not None:
filters['listed'] = listed
addonuser_qs = getattr(AddonUser, manager).all()
addonuser_qs = (
addonuser_qs.filter(**filters)
.order_by('addon_id', 'position')
.select_related('user')
)
seen = set()
groupby = itertools.groupby(addonuser_qs, key=lambda u: u.addon_id)
for addon_id, addonusers in groupby:
authors = []
for addonuser in addonusers:
setattr(addonuser.user, 'role', addonuser.role)
setattr(addonuser.user, 'listed', addonuser.listed)
authors.append(addonuser.user)
setattr(addon_dict[addon_id], to_attr, authors)
seen.add(addon_id)
# set authors to empty list on addons without any.
[
setattr(addon, to_attr, [])
for addon in addon_dict.values()
if addon.id not in seen
]
@staticmethod
def attach_listed_authors(addons, addon_dict=None):
Addon._attach_authors(addons, addon_dict=addon_dict)
@staticmethod
def attach_all_authors(addons, addon_dict=None):
Addon._attach_authors(
addons,
addon_dict=addon_dict,
manager='unfiltered',
listed=None,
to_attr='all_authors',
)
@staticmethod
def attach_previews(addons, addon_dict=None, no_transforms=False):
if addon_dict is None:
addon_dict = {a.id: a for a in addons}
qs = Preview.objects.filter(addon__in=addons, position__gte=0).order_by()
if no_transforms:
qs = qs.no_transforms()
qs = sorted(qs, key=lambda x: (x.addon_id, x.position, x.created))
seen = set()
for addon_id, previews in itertools.groupby(qs, lambda x: x.addon_id):
addon_dict[addon_id]._all_previews = list(previews)
seen.add(addon_id)
# set _all_previews to empty list on addons without previews.
[
setattr(addon, '_all_previews', [])
for addon in addon_dict.values()
if addon.id not in seen
]
@staticmethod
def attach_static_categories(addons, addon_dict=None):
if addon_dict is None:
addon_dict = {addon.id: addon for addon in addons}
qs = AddonCategory.objects.filter(addon__in=addon_dict.values()).values_list(
'addon_id', 'category_id'
)
for addon_id, cats_iter in itertools.groupby(qs, key=lambda x: x[0]):
# The second value of each tuple in cats_iter are the category ids
# we want.
addon_dict[addon_id].category_ids = sorted(c[1] for c in cats_iter)
addon_dict[addon_id].all_categories = [
CATEGORIES_BY_ID[cat_id]
for cat_id in addon_dict[addon_id].category_ids
if cat_id in CATEGORIES_BY_ID
]
@staticmethod
@timer
def transformer(addons):
if not addons:
return
addon_dict = {a.id: a for a in addons}
# Attach categories.
Addon.attach_static_categories(addons, addon_dict=addon_dict)
# Set _current_version and attach listed authors.
Addon.attach_related_versions(addons, addon_dict=addon_dict)
Addon.attach_listed_authors(addons, addon_dict=addon_dict)
# Attach previews.
Addon.attach_previews(addons, addon_dict=addon_dict)
return addon_dict
@property
def contribution_url(self, lang=settings.LANGUAGE_CODE, app=settings.DEFAULT_APP):
return reverse('addons.contribute', args=[self.slug])
@property
def thumbnail_url(self):
"""
Returns the addon's thumbnail url or a default.
"""
try:
preview = self._all_previews[0]
return preview.thumbnail_url
except IndexError:
return staticfiles_storage.url('img/icons/no-preview.png')
def can_request_review(self):
"""Return whether an add-on can request a review or not."""
if self.is_disabled or self.status in (
amo.STATUS_APPROVED,
amo.STATUS_NOMINATED,
amo.STATUS_DELETED,
):
return False
latest_version = self.find_latest_version(
amo.RELEASE_CHANNEL_LISTED, exclude=()
)
return latest_version is not None and not latest_version.file.reviewed
@property
def is_disabled(self):
"""True if this Addon is disabled.
It could be disabled by an admin or disabled by the developer
"""
return self.status == amo.STATUS_DISABLED or self.disabled_by_user
@property
def is_deleted(self):
return self.status == amo.STATUS_DELETED
def is_unreviewed(self):
return self.status in amo.UNREVIEWED_ADDON_STATUSES
def is_public(self):
return self.status == amo.STATUS_APPROVED and not self.disabled_by_user
def has_complete_metadata(self, has_listed_versions=None):
"""See get_required_metadata for has_listed_versions details."""
return all(self.get_required_metadata(has_listed_versions=has_listed_versions))
def get_required_metadata(self, has_listed_versions=None):
"""If has_listed_versions is not specified this method will return the
current (required) metadata (truthy values if present) for this Addon.
If has_listed_versions is specified then the method will act as if
Addon.has_listed_versions() returns that value. Used to predict if the
addon will require extra metadata before a version is created."""
if has_listed_versions is None:
has_listed_versions = self.has_listed_versions()
if not has_listed_versions:
# Add-ons with only unlisted versions have no required metadata.
return []
# We need to find out if the add-on has a license set. We prefer to
# check the current_version first because that's what would be used for
# public pages, but if there isn't any listed version will do.
version = self.current_version or self.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED, exclude=()
)
return [
self.all_categories,
self.name,
self.summary,
(version and version.license_id),
]
def should_redirect_to_submit_flow(self):
return (
self.status == amo.STATUS_NULL
and not self.has_complete_metadata()
and self.find_latest_version(channel=amo.RELEASE_CHANNEL_LISTED)
)
def can_be_deleted(self):
return not self.is_deleted
def has_listed_versions(self, include_deleted=False):
if include_deleted:
manager = self.versions(manager='unfiltered_for_relations')
else:
manager = self.versions
return (
self._current_version_id
or manager.filter(channel=amo.RELEASE_CHANNEL_LISTED).exists()
)
def has_unlisted_versions(self, include_deleted=False):
if include_deleted:
manager = self.versions(manager='unfiltered_for_relations')
else:
manager = self.versions
return manager.filter(channel=amo.RELEASE_CHANNEL_UNLISTED).exists()
def _is_recommended_theme(self):
from olympia.bandwagon.models import CollectionAddon
return (
self.type == amo.ADDON_STATICTHEME
and CollectionAddon.objects.filter(
collection_id=settings.COLLECTION_FEATURED_THEMES_ID, addon=self
).exists()
)
def promoted_group(self, *, currently_approved=True):
"""Is the addon currently promoted for the current applications?
Returns the group constant, or NOT_PROMOTED (which is falsey)
otherwise.
`currently_approved=True` means only returns True if
self.current_version is approved for the current promotion & apps.
If currently_approved=False then promotions where there isn't approval
are returned too.
"""
from olympia.promoted.models import PromotedAddon
try:
promoted = self.promotedaddon
except PromotedAddon.DoesNotExist:
return NOT_PROMOTED
is_promoted = not currently_approved or promoted.approved_applications
return promoted.group if is_promoted else NOT_PROMOTED
@cached_property
def promoted(self):
promoted_group = self.promoted_group()
if promoted_group:
return self.promotedaddon
else:
from olympia.promoted.models import PromotedTheme
if self._is_recommended_theme():
return PromotedTheme(addon=self, group_id=RECOMMENDED.id)
return None
@cached_property
def compatible_apps(self):
"""Shortcut to get compatible apps for the current version."""
if self.current_version:
return self.current_version.compatible_apps
else:
return {}
def accepts_compatible_apps(self):
"""True if this add-on lists compatible apps."""
return self.type not in amo.NO_COMPAT
def incompatible_latest_apps(self):
"""Returns a list of applications with which this add-on is
incompatible (based on the latest version of each app).
"""
apps = []
for application, version in self.compatible_apps.items():
if not version:
continue
latest_version = version.get_latest_application_version()
if version_int(version.max.version) < version_int(latest_version):
apps.append((application, latest_version))
return apps
def has_author(self, user):
"""True if ``user`` is an author of the add-on."""
if user is None or user.is_anonymous:
return False
return AddonUser.objects.filter(addon=self, user=user).exists()
@classmethod
def _last_updated_queries(cls):
"""
Get the queries used to calculate addon.last_updated.
"""
status_change = Max('versions__file__datestatuschanged')
public = (
Addon.objects.filter(
status=amo.STATUS_APPROVED, versions__file__status=amo.STATUS_APPROVED
)
.values('id')
.annotate(last_updated=status_change)
)
stati = amo.VALID_ADDON_STATUSES
exp = (
Addon.objects.exclude(status__in=stati)
.filter(versions__file__status__in=amo.VALID_FILE_STATUSES)
.values('id')
.annotate(last_updated=Max('versions__file__created'))
)
return {'public': public, 'exp': exp}
@cached_property
def all_categories(self):
return [addoncat.category for addoncat in self.addoncategory_set.all()]
def set_categories(self, categories):
# Add new categories.
for category in set(categories) - set(self.all_categories):
AddonCategory.objects.create(addon=self, category=category)
# Remove old categories.
for category in set(self.all_categories) - set(categories):
AddonCategory.objects.filter(addon=self, category_id=category.id).delete()
# Update categories cache on the model.
self.all_categories = categories
# Make sure the add-on is properly re-indexed
update_search_index(Addon, self)
@cached_property
def current_previews(self):
"""Previews for the current version, or all of them if not a
static theme."""
if self.has_per_version_previews:
if self.current_version:
return self.current_version.previews.all()
return VersionPreview.objects.none()
else:
return self._all_previews
@cached_property
def _all_previews(self):
"""Exclude promo graphics."""
return list(self.previews.exclude(position=-1))
@property
def has_per_version_previews(self):
return self.type == amo.ADDON_STATICTHEME
@property
def app_categories(self):
app_cats = {}
categories = sorted_groupby(
sorted(self.all_categories),
key=lambda x: getattr(amo.APP_IDS.get(x.application), 'short', ''),
)
for app, cats in categories:
app_cats[app] = list(cats)
return app_cats
def remove_locale(self, locale):
"""NULLify strings in this locale for the add-on and versions."""
for o in itertools.chain([self], self.versions.all()):
Translation.objects.remove_for(o, locale)
def should_show_permissions(self, version=None):
version = version or self.current_version
return (
self.type == amo.ADDON_EXTENSION
and version
and version.file
and (version.file.permissions or version.file.optional_permissions)
)
# Aliases for reviewerflags below are not just useful in case
# AddonReviewerFlags does not exist for this add-on: they are also used
# by reviewer tools get_flags() function to return flags shown to reviewers
# in both the review queues and the review page.
@property
def needs_admin_code_review(self):
try:
return self.reviewerflags.needs_admin_code_review
except AddonReviewerFlags.DoesNotExist:
return None
@property
def needs_admin_content_review(self):
try:
return self.reviewerflags.needs_admin_content_review
except AddonReviewerFlags.DoesNotExist:
return None
@property
def needs_admin_theme_review(self):
try:
return self.reviewerflags.needs_admin_theme_review
except AddonReviewerFlags.DoesNotExist:
return None
@property
def auto_approval_disabled(self):
try:
return self.reviewerflags.auto_approval_disabled
except AddonReviewerFlags.DoesNotExist:
return None
@property
def auto_approval_disabled_unlisted(self):
try:
return self.reviewerflags.auto_approval_disabled_unlisted
except AddonReviewerFlags.DoesNotExist:
return None
@property
def auto_approval_disabled_until_next_approval(self):
try:
return self.reviewerflags.auto_approval_disabled_until_next_approval
except AddonReviewerFlags.DoesNotExist:
return None
@property
def auto_approval_disabled_until_next_approval_unlisted(self):
try:
return (
self.reviewerflags.auto_approval_disabled_until_next_approval_unlisted
)
except AddonReviewerFlags.DoesNotExist:
return None
@property
def auto_approval_delayed_until(self):
try:
return self.reviewerflags.auto_approval_delayed_until
except AddonReviewerFlags.DoesNotExist:
return None
@property
def auto_approval_delayed_indefinitely(self):
return self.auto_approval_delayed_until == datetime.max
@property
def auto_approval_delayed_temporarily(self):
return (
bool(self.auto_approval_delayed_until)
and self.auto_approval_delayed_until != datetime.max
and self.auto_approval_delayed_until > datetime.now()
)
def reset_notified_about_auto_approval_delay(self):
"""
Reset notified_about_auto_approval_delay reviewer flag for this addon.
This doesn't create an AddonReviewerFlags if there wasn't one, just
resets notified_about_auto_approval_delay to False if there were flags
for this add-on.
"""
AddonReviewerFlags.objects.filter(addon=self).update(
notified_about_auto_approval_delay=False
)
@classmethod
def get_lookup_field(cls, identifier):
lookup_field = 'pk'
if identifier and not identifier.isdigit():
# If the identifier contains anything other than a digit, it's
# either a slug or a guid. guids need to contain either {} or @,
# which are invalid in a slug.
if amo.ADDON_GUID_PATTERN.match(identifier):
lookup_field = 'guid'
else:
lookup_field = 'slug'
return lookup_field
@property
def addonguid_guid(self):
"""Use this function to avoid having to wrap `addon.addonguid.guid` in
a try...except.
There *should* be a matching AddonGUID record for every Addon with a
guid, but the foreign key is from AddonGUID to Addon so there's a
possiblity of bad data leading to the AddonGUID not existing. Plus we
don't want this to fail if an upload with guid=None somehow ended up
getting through.
"""
return getattr(self, 'addonguid', self).guid
@cached_property
def block(self):
from olympia.blocklist.models import Block
# Block.guid is unique so it's either on the list or not.
return Block.objects.filter(guid=self.addonguid_guid).last()
@cached_property
def blocklistsubmission(self):
from olympia.blocklist.models import BlocklistSubmission
# GUIDs should only exist in one (active) submission at once.
return BlocklistSubmission.get_submissions_from_guid(self.addonguid_guid).last()
@property
def git_extraction_is_in_progress(self):
from olympia.git.models import GitExtractionEntry
return GitExtractionEntry.objects.filter(addon=self, in_progress=True).exists()
@cached_property
def tag_list(self):
attach_tags([self])
return self.tag_list
def set_tag_list(self, new_tag_list):
tag_list_to_add = set(new_tag_list) - set(self.tag_list)
tag_list_to_drop = set(self.tag_list) - set(new_tag_list)
tags = Tag.objects.filter(tag_text__in=(*tag_list_to_add, *tag_list_to_drop))
for tag in tags:
if tag.tag_text in tag_list_to_add:
tag.add_tag(self)
elif tag.tag_text in tag_list_to_drop:
tag.remove_tag(self)
self.tag_list = new_tag_list
dbsignals.pre_save.connect(save_signal, sender=Addon, dispatch_uid='addon_translations')
@receiver(signals.version_changed, dispatch_uid='version_changed')
def version_changed(sender, instance, **kw):
from . import tasks
from olympia.amo.tasks import trigger_sync_objects_to_basket
# watch_changes() also does a sync when it detects a _current_version change, but it
# might not have fired, since it depends on on_change() being sent.
trigger_sync_objects_to_basket('addon', [instance.pk], 'version change')
tasks.version_changed.delay(instance.pk)
@receiver(dbsignals.post_save, sender=Addon, dispatch_uid='addons.search.index')
def update_search_index(sender, instance, **kw):
from . import tasks
if not kw.get('raw'):
tasks.index_addons.delay([instance.id])
@Addon.on_change
def watch_status(old_attr=None, new_attr=None, instance=None, sender=None, **kwargs):
"""
Set nomination date if the addon is new in queue or updating.
The nomination date cannot be reset, say, when a developer cancels
their request for review and re-requests review.
If a version is rejected after nomination, the developer has
to upload a new version.
"""
if old_attr is None:
old_attr = {}
if new_attr is None:
new_attr = {}
new_status = new_attr.get('status')
old_status = old_attr.get('status')
latest_version = instance.find_latest_version(channel=amo.RELEASE_CHANNEL_LISTED)
# Update the author's account profile visibility
if new_status != old_status:
[author.update_is_public() for author in instance.authors.all()]
if (
new_status not in amo.VALID_ADDON_STATUSES
or not new_status
or not latest_version
):
return
if old_status not in amo.UNREVIEWED_ADDON_STATUSES:
# New: will (re)set nomination only if it's None.
latest_version.reset_nomination_time()
else:
# Updating: inherit nomination from last nominated version.
# Calls `inherit_nomination` manually given that signals are
# deactivated to avoid circular calls.
inherit_nomination(None, latest_version)
@Addon.on_change
def watch_disabled(old_attr=None, new_attr=None, instance=None, sender=None, **kwargs):
"""
Move files when an add-on is disabled/enabled.
There is a similar watcher in olympia.files.models that tracks File
status, but this one is useful for when the Files do not change their
status.
"""
if old_attr is None:
old_attr = {}
if new_attr is None:
new_attr = {}
attrs = {
key: value
for key, value in old_attr.items()
if key in ('disabled_by_user', 'status')
}
was_disabled = Addon(**attrs).is_disabled
is_disabled = instance.is_disabled
if was_disabled and not is_disabled:
for file_ in File.objects.filter(version__addon=instance.id):
file_.unhide_disabled_file()
elif is_disabled and not was_disabled:
for file_ in File.objects.filter(version__addon=instance.id):
file_.hide_disabled_file()
@Addon.on_change
def watch_changes(old_attr=None, new_attr=None, instance=None, sender=None, **kwargs):
if old_attr is None:
old_attr = {}
if new_attr is None:
new_attr = {}
changes = {
x for x in new_attr if not x.startswith('_') and new_attr[x] != old_attr.get(x)
}
basket_relevant_changes = (
# Some changes are not tracked here:
# - Any authors changes (separate model)
# - Creation/Deletion of unlisted version (separate model)
# - Name change (separate model/signal, see below)
# - Categories changes (separate model, ignored for now)
# - average_rating changes (ignored for now, happens too often)
# - average_daily_users changes (ignored for now, happens too often)
'_current_version',
'default_locale',
'slug',
'status',
'disabled_by_user',
)
if any(field in changes for field in basket_relevant_changes):
from olympia.amo.tasks import trigger_sync_objects_to_basket
trigger_sync_objects_to_basket('addon', [instance.pk], 'attribute change')
@receiver(translation_saved, sender=Addon, dispatch_uid='watch_addon_name_changes')
def watch_addon_name_changes(sender=None, instance=None, **kw):
field_name = kw.get('field_name')
if instance and field_name == 'name':
from olympia.amo.tasks import trigger_sync_objects_to_basket
trigger_sync_objects_to_basket('addon', [instance.pk], 'name change')
def attach_translations_dict(addons):
"""Put all translations into a translations dict."""
attach_trans_dict(Addon, addons)
def attach_tags(addons):
addon_dict = {addon.id: addon for addon in addons}
for addon in addons:
addon.tag_list = [] # make sure all the addons have the property set
qs = Tag.objects.filter(addons__in=addon_dict).values_list('addons__id', 'tag_text')
for addon, tags in sorted_groupby(qs, lambda x: x[0]):
addon_dict[addon].tag_list = [t[1] for t in tags]
class AddonReviewerFlags(ModelBase):
addon = models.OneToOneField(
Addon, primary_key=True, on_delete=models.CASCADE, related_name='reviewerflags'
)
needs_admin_code_review = models.BooleanField(default=False)
needs_admin_content_review = models.BooleanField(default=False)
needs_admin_theme_review = models.BooleanField(default=False)
auto_approval_disabled = models.BooleanField(default=False)
auto_approval_disabled_unlisted = models.BooleanField(default=None, null=True)
auto_approval_disabled_until_next_approval = models.BooleanField(
default=None, null=True
)
auto_approval_disabled_until_next_approval_unlisted = models.BooleanField(
default=None, null=True
)
auto_approval_delayed_until = models.DateTimeField(default=None, null=True)
notified_about_auto_approval_delay = models.BooleanField(default=None, null=True)
notified_about_expiring_delayed_rejections = models.BooleanField(
default=None, null=True
)
class AddonRegionalRestrictions(ModelBase):
addon = models.OneToOneField(
Addon,
primary_key=True,
on_delete=models.CASCADE,
related_name='regional_restrictions',
help_text='Add-on id this item will point to.',
)
excluded_regions = models.JSONField(
default=list,
help_text='JSON style list of ISO 3166-1 alpha-2 country (region) '
'codes. Codes will be uppercased. E.g. `["CN"]`',
)
class Meta:
verbose_name_plural = 'Addon Regional Restrictions'
def __str__(self):
return '%s: %d' % (self.addon, len(self.excluded_regions))
def clean(self):
super().clean()
self.excluded_regions = [str(item).upper() for item in self.excluded_regions]
class MigratedLWT(OnChangeMixin, ModelBase):
lightweight_theme_id = models.PositiveIntegerField()
getpersonas_id = models.PositiveIntegerField()
static_theme = models.ForeignKey(
Addon, unique=True, related_name='migrated_from_lwt', on_delete=models.CASCADE
)
class Meta:
db_table = 'migrated_personas'
indexes = [
LongNameIndex(
fields=('static_theme',),
name='migrated_personas_static_theme_id_fk_addons_id',
),
LongNameIndex(
fields=('getpersonas_id',), name='migrated_personas_getpersonas_id'
),
]
class AddonCategory(models.Model):
id = PositiveAutoField(primary_key=True)
addon = models.ForeignKey(Addon, on_delete=models.CASCADE)
category_id = models.PositiveIntegerField()
class Meta:
db_table = 'addons_categories'
indexes = [
models.Index(fields=('category_id', 'addon'), name='category_addon_idx'),
]
constraints = [
models.UniqueConstraint(
fields=('addon', 'category_id'),
name='addons_categories_addon_category_id',
),
]
def __init__(self, *args, **kwargs):
if 'category' in kwargs:
kwargs['category_id'] = kwargs.pop('category').id
super().__init__(*args, **kwargs)
@property
def category(self):
return CATEGORIES_BY_ID.get(self.category_id)
class AddonUserManager(ManagerBase):
def __init__(self, include_deleted=False):
# DO NOT change the default value of include_deleted unless you've read
# through the comment just above the Addon managers
# declaration/instantiation and understand the consequences.
super().__init__()
self.include_deleted = include_deleted
def get_queryset(self):
qs = super().get_queryset()
if not self.include_deleted:
qs = qs.exclude(role=amo.AUTHOR_ROLE_DELETED)
return qs
class AddonUser(OnChangeMixin, SaveUpdateMixin, models.Model):
id = PositiveAutoField(primary_key=True)
addon = models.ForeignKey(Addon, on_delete=models.CASCADE)
user = user = models.ForeignKey(UserProfile, on_delete=models.CASCADE)
role = models.SmallIntegerField(
default=amo.AUTHOR_ROLE_OWNER, choices=amo.AUTHOR_CHOICES_UNFILTERED
)
listed = models.BooleanField(_('Listed'), default=True)
position = models.IntegerField(default=0)
unfiltered = AddonUserManager(include_deleted=True)
objects = AddonUserManager()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._original_role = self.role
class Meta:
# see Addon.Meta for details of why this base_manager_name is important
base_manager_name = 'unfiltered'
db_table = 'addons_users'
indexes = [
models.Index(fields=('listed',), name='addons_users_listed_idx'),
LongNameIndex(
fields=('addon', 'user', 'listed'),
name='addons_users_addon_user_listed_idx',
),
models.Index(
fields=('addon', 'listed'), name='addons_users_addon_listed_idx'
),
]
constraints = [
models.UniqueConstraint(
fields=('addon', 'user'), name='addons_users_addon_user'
),
]
def delete(self):
# soft-delete
self.update(role=amo.AUTHOR_ROLE_DELETED)
@property
def is_deleted(self):
return self.role == amo.AUTHOR_ROLE_DELETED
@AddonUser.on_change
def watch_addon_user(
old_attr=None, new_attr=None, instance=None, sender=None, **kwargs
):
instance.user.update_is_public()
# Update ES because authors is included.
update_search_index(sender=sender, instance=instance.addon, **kwargs)
def addon_user_sync(sender=None, instance=None, **kwargs):
# Basket doesn't care what role authors have or whether they are listed
# or not, it just needs to be updated whenever an author is added/removed.
created_or_deleted = kwargs.get('created', True) or instance.is_deleted
if created_or_deleted and instance.addon.status != amo.STATUS_DELETED:
from olympia.amo.tasks import trigger_sync_objects_to_basket
trigger_sync_objects_to_basket('addon', [instance.addon.pk], 'addonuser change')
models.signals.post_delete.connect(
watch_addon_user, sender=AddonUser, dispatch_uid='delete_addon_user'
)
models.signals.post_delete.connect(
addon_user_sync, sender=AddonUser, dispatch_uid='delete_addon_user_sync'
)
models.signals.post_save.connect(
addon_user_sync, sender=AddonUser, dispatch_uid='save_addon_user_sync'
)
class AddonUserPendingConfirmation(SaveUpdateMixin, models.Model):
id = PositiveAutoField(primary_key=True)
addon = models.ForeignKey(Addon, on_delete=models.CASCADE)
user = user = models.ForeignKey(UserProfile, on_delete=models.CASCADE)
role = models.SmallIntegerField(
default=amo.AUTHOR_ROLE_OWNER, choices=amo.AUTHOR_CHOICES
)
listed = models.BooleanField(_('Listed'), default=True)
# Note: we don't bother with position for authors waiting confirmation,
# because it's impossible to properly reconcile it with the confirmed
# authors. Instead, authors waiting confirmation are displayed in the order
# they have been added, and when they are confirmed they end up in the
# last position by default.
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._original_role = self.role
class Meta:
db_table = 'addons_users_pending_confirmation'
constraints = [
models.UniqueConstraint(
fields=('addon', 'user'),
name='addons_users_pending_confirmation_'
'addon_id_user_id_38e3bb32_uniq',
),
]
class AddonApprovalsCounter(ModelBase):
"""Model holding a counter of the number of times a listed version
belonging to an add-on has been approved by a human. Reset everytime a
listed version is auto-approved for this add-on.
Holds 2 additional date fields:
- last_human_review, the date of the last time a human fully reviewed the
add-on
- last_content_review, the date of the last time a human fully reviewed the
add-on content (not code).
"""
addon = models.OneToOneField(Addon, primary_key=True, on_delete=models.CASCADE)
counter = models.PositiveIntegerField(default=0)
last_human_review = models.DateTimeField(null=True)
last_content_review = models.DateTimeField(null=True, db_index=True)
def __str__(self):
return '%s: %d' % (str(self.pk), self.counter) if self.pk else ''
@classmethod
def increment_for_addon(cls, addon):
"""
Increment approval counter for the specified addon, setting the last
human review date and last content review date to now.
If an AddonApprovalsCounter already exists, it updates it, otherwise it
creates and saves a new instance.
"""
now = datetime.now()
data = {
'counter': 1,
'last_human_review': now,
'last_content_review': now,
}
obj, created = cls.objects.get_or_create(addon=addon, defaults=data)
if not created:
data['counter'] = F('counter') + 1
obj.update(**data)
return obj
@classmethod
def reset_for_addon(cls, addon):
"""
Reset the approval counter (but not the dates) for the specified addon.
"""
obj, created = cls.objects.update_or_create(
addon=addon, defaults={'counter': 0}
)
return obj
@classmethod
def approve_content_for_addon(cls, addon, now=None):
"""
Set last_content_review for this addon.
"""
if now is None:
now = datetime.now()
return cls.reset_content_for_addon(addon, reset_to=now)
@classmethod
def reset_content_for_addon(cls, addon, reset_to=None):
"""
Reset the last_content_review date for this addon so it triggers
another review.
"""
obj, created = cls.objects.update_or_create(
addon=addon, defaults={'last_content_review': reset_to}
)
return obj
class DeniedGuid(ModelBase):
id = PositiveAutoField(primary_key=True)
guid = models.CharField(max_length=255, unique=True)
comments = models.TextField(default='', blank=True)
class Meta:
db_table = 'denied_guids'
def __str__(self):
return self.guid
class Preview(BasePreview, ModelBase):
id = PositiveAutoField(primary_key=True)
addon = models.ForeignKey(Addon, related_name='previews', on_delete=models.CASCADE)
caption = TranslatedField()
position = models.IntegerField(default=0)
sizes = models.JSONField(default=dict)
class Meta:
db_table = 'previews'
ordering = ('position', 'created')
indexes = [
models.Index(fields=('addon',), name='previews_addon_idx'),
models.Index(
fields=('addon', 'position', 'created'),
name='addon_position_created_idx',
),
]
def get_format(self, for_size):
return self.sizes.get(
f'{for_size}_format',
# If self.sizes doesn't contain the requested format, it's probably
# because the Preview was just created but not yet resized down.
# We try to guess the format if it's in ADDON_PREVIEW_SIZES,
# falling back to `png` like BasePreview does otherwise.
amo.ADDON_PREVIEW_SIZES.get(f'{for_size}_format', 'png'),
)
dbsignals.pre_save.connect(
save_signal, sender=Preview, dispatch_uid='preview_translations'
)
models.signals.post_delete.connect(
Preview.delete_preview_files, sender=Preview, dispatch_uid='delete_preview_files'
)
class DeniedSlug(ModelBase):
name = models.CharField(max_length=255, unique=True, default='')
class Meta:
db_table = 'addons_denied_slug'
def __str__(self):
return self.name
@classmethod
def blocked(cls, slug):
return slug.isdigit() or cls.objects.filter(name=slug).exists()
class FrozenAddon(models.Model):
"""Add-ons in this table never get a hotness score."""
id = PositiveAutoField(primary_key=True)
addon = models.ForeignKey(Addon, on_delete=models.CASCADE)
class Meta:
db_table = 'frozen_addons'
def __str__(self):
return 'Frozen: %s' % self.addon_id
@receiver(dbsignals.post_save, sender=FrozenAddon)
def freezer(sender, instance, **kw):
# Adjust the hotness of the FrozenAddon.
if instance.addon_id:
Addon.objects.get(id=instance.addon_id).update(hotness=0)
class ReplacementAddon(ModelBase):
guid = models.CharField(max_length=255, unique=True, null=True)
path = models.CharField(
max_length=255,
null=True,
help_text=_('Addon and collection paths need to end with "/"'),
)
class Meta:
db_table = 'replacement_addons'
@staticmethod
def path_is_external(path):
return urlsplit(path).scheme in ['http', 'https']
def has_external_url(self):
return self.path_is_external(self.path)
def track_new_status(sender, instance, *args, **kw):
if kw.get('raw'):
# The addon is being loaded from a fixure.
return
if kw.get('created'):
track_addon_status_change(instance)
models.signals.post_save.connect(
track_new_status, sender=Addon, dispatch_uid='track_new_addon_status'
)
@Addon.on_change
def track_status_change(old_attr=None, new_attr=None, **kw):
if old_attr is None:
old_attr = {}
if new_attr is None:
new_attr = {}
new_status = new_attr.get('status')
old_status = old_attr.get('status')
if new_status != old_status:
track_addon_status_change(kw['instance'])
def track_addon_status_change(addon):
statsd.incr(f'addon_status_change.all.status_{addon.status}')
class AddonGUID(ModelBase):
"""
Addons + guids will be added to this table whenever an addon is created.
For deleted addons it will contain an fk to the Addon instance even after
Addon.guid has been set to null (i.e. when it's been reuploaded).
"""
guid = models.CharField(max_length=255, null=False, db_index=True)
addon = models.OneToOneField(
Addon, null=False, on_delete=models.CASCADE, unique=True
)
hashed_guid = models.CharField(max_length=64, null=False)
class Meta:
db_table = 'addons_reusedguid'
def save(self, *args, **kwargs):
self.hashed_guid = hashlib.sha256(self.guid.encode()).hexdigest()
super().save(*args, **kwargs)
| 36.833193
| 92
| 0.637634
|
4a1169f28477173ae5d519367943ff17a57d7bc4
| 51,781
|
py
|
Python
|
dialogflow_v2/gapic/entity_types_client.py
|
onurtimur/dialogflow-python-client-v2
|
94e025232ab10459a844a853e75142a666581668
|
[
"Apache-2.0"
] | 2
|
2019-10-29T14:39:57.000Z
|
2019-11-01T11:49:04.000Z
|
dialogflow_v2/gapic/entity_types_client.py
|
onurtimur/dialogflow-python-client-v2
|
94e025232ab10459a844a853e75142a666581668
|
[
"Apache-2.0"
] | null | null | null |
dialogflow_v2/gapic/entity_types_client.py
|
onurtimur/dialogflow-python-client-v2
|
94e025232ab10459a844a853e75142a666581668
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.cloud.dialogflow.v2 EntityTypes API."""
import functools
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.client_options
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.gapic_v1.routing_header
import google.api_core.grpc_helpers
import google.api_core.operation
import google.api_core.operations_v1
import google.api_core.page_iterator
import google.api_core.path_template
import google.api_core.protobuf_helpers
import grpc
from dialogflow_v2.gapic import entity_types_client_config
from dialogflow_v2.gapic import enums
from dialogflow_v2.gapic.transports import entity_types_grpc_transport
from dialogflow_v2.proto import agent_pb2
from dialogflow_v2.proto import agent_pb2_grpc
from dialogflow_v2.proto import context_pb2
from dialogflow_v2.proto import context_pb2_grpc
from dialogflow_v2.proto import entity_type_pb2
from dialogflow_v2.proto import entity_type_pb2_grpc
from google.longrunning import operations_pb2
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
from google.protobuf import struct_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("dialogflow").version
class EntityTypesClient(object):
"""
Entities are extracted from user input and represent parameters that are
meaningful to your application. For example, a date range, a proper name
such as a geographic location or landmark, and so on. Entities represent
actionable data for your application.
When you define an entity, you can also include synonyms that all map to
that entity. For example, "soft drink", "soda", "pop", and so on.
There are three types of entities:
- **System** - entities that are defined by the Dialogflow API for
common data types such as date, time, currency, and so on. A system
entity is represented by the ``EntityType`` type.
- **Developer** - entities that are defined by you that represent
actionable data that is meaningful to your application. For example,
you could define a ``pizza.sauce`` entity for red or white pizza
sauce, a ``pizza.cheese`` entity for the different types of cheese on
a pizza, a ``pizza.topping`` entity for different toppings, and so
on. A developer entity is represented by the ``EntityType`` type.
- **User** - entities that are built for an individual user such as
favorites, preferences, playlists, and so on. A user entity is
represented by the ``SessionEntityType`` type.
For more information about entity types, see the `Dialogflow
documentation <https://cloud.google.com/dialogflow/docs/entities-overview>`__.
"""
SERVICE_ADDRESS = "dialogflow.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = "google.cloud.dialogflow.v2.EntityTypes"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
dialogflow_v2.EntityTypesClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def entity_type_path(cls, project, entity_type):
"""Return a fully-qualified entity_type string."""
return google.api_core.path_template.expand(
"projects/{project}/agent/entityTypes/{entity_type}",
project=project,
entity_type=entity_type,
)
@classmethod
def project_agent_path(cls, project):
"""Return a fully-qualified project_agent string."""
return google.api_core.path_template.expand(
"projects/{project}/agent", project=project
)
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
client_options=None,
):
"""Constructor.
Args:
transport (Union[~.EntityTypesGrpcTransport,
Callable[[~.Credentials, type], ~.EntityTypesGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
client_options (Union[dict, google.api_core.client_options.ClientOptions]):
Client options used to set user options on the client. API Endpoint
should be set through client_options.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = entity_types_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
api_endpoint = self.SERVICE_ADDRESS
if client_options:
if type(client_options) == dict:
client_options = google.api_core.client_options.from_dict(
client_options
)
if client_options.api_endpoint:
api_endpoint = client_options.api_endpoint
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=entity_types_grpc_transport.EntityTypesGrpcTransport,
address=api_endpoint,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = entity_types_grpc_transport.EntityTypesGrpcTransport(
address=api_endpoint, channel=channel, credentials=credentials
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME]
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def list_entity_types(
self,
parent,
language_code=None,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Returns the list of all entity types in the specified agent.
Example:
>>> import dialogflow_v2
>>>
>>> client = dialogflow_v2.EntityTypesClient()
>>>
>>> parent = client.project_agent_path('[PROJECT]')
>>>
>>> # Iterate over all results
>>> for element in client.list_entity_types(parent):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_entity_types(parent).pages:
... for element in page:
... # process element
... pass
Args:
parent (str): Required. The agent to list all entity types from. Format:
``projects/<Project ID>/agent``.
language_code (str): Optional. The language to list entity synonyms for. If not specified,
the agent's default language is used. `Many
languages <https://cloud.google.com/dialogflow/docs/reference/language>`__
are supported. Note: languages must be enabled in the agent before they
can be used.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.api_core.page_iterator.PageIterator` instance.
An iterable of :class:`~google.cloud.dialogflow_v2.types.EntityType` instances.
You can also iterate over the pages of the response
using its `pages` property.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_entity_types" not in self._inner_api_calls:
self._inner_api_calls[
"list_entity_types"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_entity_types,
default_retry=self._method_configs["ListEntityTypes"].retry,
default_timeout=self._method_configs["ListEntityTypes"].timeout,
client_info=self._client_info,
)
request = entity_type_pb2.ListEntityTypesRequest(
parent=parent, language_code=language_code, page_size=page_size
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["list_entity_types"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="entity_types",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator
def get_entity_type(
self,
name,
language_code=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Retrieves the specified entity type.
Example:
>>> import dialogflow_v2
>>>
>>> client = dialogflow_v2.EntityTypesClient()
>>>
>>> name = client.entity_type_path('[PROJECT]', '[ENTITY_TYPE]')
>>>
>>> response = client.get_entity_type(name)
Args:
name (str): Required. The name of the entity type. Format:
``projects/<Project ID>/agent/entityTypes/<EntityType ID>``.
language_code (str): Optional. The language to retrieve entity synonyms for. If not
specified, the agent's default language is used. `Many
languages <https://cloud.google.com/dialogflow/docs/reference/language>`__
are supported. Note: languages must be enabled in the agent before they
can be used.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2.types.EntityType` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_entity_type" not in self._inner_api_calls:
self._inner_api_calls[
"get_entity_type"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_entity_type,
default_retry=self._method_configs["GetEntityType"].retry,
default_timeout=self._method_configs["GetEntityType"].timeout,
client_info=self._client_info,
)
request = entity_type_pb2.GetEntityTypeRequest(
name=name, language_code=language_code
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["get_entity_type"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def create_entity_type(
self,
parent,
entity_type,
language_code=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates an entity type in the specified agent.
Example:
>>> import dialogflow_v2
>>>
>>> client = dialogflow_v2.EntityTypesClient()
>>>
>>> parent = client.project_agent_path('[PROJECT]')
>>>
>>> # TODO: Initialize `entity_type`:
>>> entity_type = {}
>>>
>>> response = client.create_entity_type(parent, entity_type)
Args:
parent (str): Required. The agent to create a entity type for. Format:
``projects/<Project ID>/agent``.
entity_type (Union[dict, ~google.cloud.dialogflow_v2.types.EntityType]): Required. The entity type to create.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2.types.EntityType`
language_code (str): Optional. The language of entity synonyms defined in ``entity_type``. If
not specified, the agent's default language is used. `Many
languages <https://cloud.google.com/dialogflow/docs/reference/language>`__
are supported. Note: languages must be enabled in the agent before they
can be used.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2.types.EntityType` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "create_entity_type" not in self._inner_api_calls:
self._inner_api_calls[
"create_entity_type"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_entity_type,
default_retry=self._method_configs["CreateEntityType"].retry,
default_timeout=self._method_configs["CreateEntityType"].timeout,
client_info=self._client_info,
)
request = entity_type_pb2.CreateEntityTypeRequest(
parent=parent, entity_type=entity_type, language_code=language_code
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["create_entity_type"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def update_entity_type(
self,
entity_type,
language_code=None,
update_mask=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Updates the specified entity type.
Example:
>>> import dialogflow_v2
>>>
>>> client = dialogflow_v2.EntityTypesClient()
>>>
>>> # TODO: Initialize `entity_type`:
>>> entity_type = {}
>>>
>>> response = client.update_entity_type(entity_type)
Args:
entity_type (Union[dict, ~google.cloud.dialogflow_v2.types.EntityType]): Required. The entity type to update.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2.types.EntityType`
language_code (str): Optional. The language of entity synonyms defined in ``entity_type``. If
not specified, the agent's default language is used. `Many
languages <https://cloud.google.com/dialogflow/docs/reference/language>`__
are supported. Note: languages must be enabled in the agent before they
can be used.
update_mask (Union[dict, ~google.cloud.dialogflow_v2.types.FieldMask]): Optional. The mask to control which fields get updated.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2.types.FieldMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2.types.EntityType` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "update_entity_type" not in self._inner_api_calls:
self._inner_api_calls[
"update_entity_type"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_entity_type,
default_retry=self._method_configs["UpdateEntityType"].retry,
default_timeout=self._method_configs["UpdateEntityType"].timeout,
client_info=self._client_info,
)
request = entity_type_pb2.UpdateEntityTypeRequest(
entity_type=entity_type,
language_code=language_code,
update_mask=update_mask,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("entity_type.name", entity_type.name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["update_entity_type"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def delete_entity_type(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Deletes the specified entity type.
Example:
>>> import dialogflow_v2
>>>
>>> client = dialogflow_v2.EntityTypesClient()
>>>
>>> name = client.entity_type_path('[PROJECT]', '[ENTITY_TYPE]')
>>>
>>> client.delete_entity_type(name)
Args:
name (str): Required. The name of the entity type to delete. Format:
``projects/<Project ID>/agent/entityTypes/<EntityType ID>``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "delete_entity_type" not in self._inner_api_calls:
self._inner_api_calls[
"delete_entity_type"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_entity_type,
default_retry=self._method_configs["DeleteEntityType"].retry,
default_timeout=self._method_configs["DeleteEntityType"].timeout,
client_info=self._client_info,
)
request = entity_type_pb2.DeleteEntityTypeRequest(name=name)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
self._inner_api_calls["delete_entity_type"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def batch_update_entity_types(
self,
parent,
entity_type_batch_uri=None,
entity_type_batch_inline=None,
language_code=None,
update_mask=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Updates/Creates multiple entity types in the specified agent.
Operation <response: ``BatchUpdateEntityTypesResponse``>
Example:
>>> import dialogflow_v2
>>>
>>> client = dialogflow_v2.EntityTypesClient()
>>>
>>> parent = client.project_agent_path('[PROJECT]')
>>>
>>> response = client.batch_update_entity_types(parent)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
parent (str): Required. The name of the agent to update or create entity types in.
Format: ``projects/<Project ID>/agent``.
entity_type_batch_uri (str): The URI to a Google Cloud Storage file containing entity types to update
or create. The file format can either be a serialized proto (of
EntityBatch type) or a JSON object. Note: The URI must start with
"gs://".
entity_type_batch_inline (Union[dict, ~google.cloud.dialogflow_v2.types.EntityTypeBatch]): The collection of entity types to update or create.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2.types.EntityTypeBatch`
language_code (str): Optional. The language of entity synonyms defined in ``entity_types``.
If not specified, the agent's default language is used. `Many
languages <https://cloud.google.com/dialogflow/docs/reference/language>`__
are supported. Note: languages must be enabled in the agent before they
can be used.
update_mask (Union[dict, ~google.cloud.dialogflow_v2.types.FieldMask]): Optional. The mask to control which fields get updated.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2.types.FieldMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "batch_update_entity_types" not in self._inner_api_calls:
self._inner_api_calls[
"batch_update_entity_types"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.batch_update_entity_types,
default_retry=self._method_configs["BatchUpdateEntityTypes"].retry,
default_timeout=self._method_configs["BatchUpdateEntityTypes"].timeout,
client_info=self._client_info,
)
# Sanity check: We have some fields which are mutually exclusive;
# raise ValueError if more than one is sent.
google.api_core.protobuf_helpers.check_oneof(
entity_type_batch_uri=entity_type_batch_uri,
entity_type_batch_inline=entity_type_batch_inline,
)
request = entity_type_pb2.BatchUpdateEntityTypesRequest(
parent=parent,
entity_type_batch_uri=entity_type_batch_uri,
entity_type_batch_inline=entity_type_batch_inline,
language_code=language_code,
update_mask=update_mask,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
operation = self._inner_api_calls["batch_update_entity_types"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
entity_type_pb2.BatchUpdateEntityTypesResponse,
metadata_type=struct_pb2.Struct,
)
def batch_delete_entity_types(
self,
parent,
entity_type_names,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Deletes entity types in the specified agent.
Operation <response: ``google.protobuf.Empty``>
Example:
>>> import dialogflow_v2
>>>
>>> client = dialogflow_v2.EntityTypesClient()
>>>
>>> parent = client.project_agent_path('[PROJECT]')
>>>
>>> # TODO: Initialize `entity_type_names`:
>>> entity_type_names = []
>>>
>>> response = client.batch_delete_entity_types(parent, entity_type_names)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
parent (str): Required. The name of the agent to delete all entities types for.
Format: ``projects/<Project ID>/agent``.
entity_type_names (list[str]): Required. The names entity types to delete. All names must point to the
same agent as ``parent``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "batch_delete_entity_types" not in self._inner_api_calls:
self._inner_api_calls[
"batch_delete_entity_types"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.batch_delete_entity_types,
default_retry=self._method_configs["BatchDeleteEntityTypes"].retry,
default_timeout=self._method_configs["BatchDeleteEntityTypes"].timeout,
client_info=self._client_info,
)
request = entity_type_pb2.BatchDeleteEntityTypesRequest(
parent=parent, entity_type_names=entity_type_names
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
operation = self._inner_api_calls["batch_delete_entity_types"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
empty_pb2.Empty,
metadata_type=struct_pb2.Struct,
)
def batch_create_entities(
self,
parent,
entities,
language_code=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates multiple new entities in the specified entity type.
Operation <response: ``google.protobuf.Empty``>
Example:
>>> import dialogflow_v2
>>>
>>> client = dialogflow_v2.EntityTypesClient()
>>>
>>> parent = client.entity_type_path('[PROJECT]', '[ENTITY_TYPE]')
>>>
>>> # TODO: Initialize `entities`:
>>> entities = []
>>>
>>> response = client.batch_create_entities(parent, entities)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
parent (str): Required. The name of the entity type to create entities in. Format:
``projects/<Project ID>/agent/entityTypes/<Entity Type ID>``.
entities (list[Union[dict, ~google.cloud.dialogflow_v2.types.Entity]]): Required. The entities to create.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2.types.Entity`
language_code (str): Optional. The language of entity synonyms defined in ``entities``. If
not specified, the agent's default language is used. `Many
languages <https://cloud.google.com/dialogflow/docs/reference/language>`__
are supported. Note: languages must be enabled in the agent before they
can be used.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "batch_create_entities" not in self._inner_api_calls:
self._inner_api_calls[
"batch_create_entities"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.batch_create_entities,
default_retry=self._method_configs["BatchCreateEntities"].retry,
default_timeout=self._method_configs["BatchCreateEntities"].timeout,
client_info=self._client_info,
)
request = entity_type_pb2.BatchCreateEntitiesRequest(
parent=parent, entities=entities, language_code=language_code
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
operation = self._inner_api_calls["batch_create_entities"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
empty_pb2.Empty,
metadata_type=struct_pb2.Struct,
)
def batch_update_entities(
self,
parent,
entities,
language_code=None,
update_mask=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Updates or creates multiple entities in the specified entity type. This
method does not affect entities in the entity type that aren't
explicitly specified in the request.
Operation <response: ``google.protobuf.Empty``>
Example:
>>> import dialogflow_v2
>>>
>>> client = dialogflow_v2.EntityTypesClient()
>>>
>>> parent = client.entity_type_path('[PROJECT]', '[ENTITY_TYPE]')
>>>
>>> # TODO: Initialize `entities`:
>>> entities = []
>>>
>>> response = client.batch_update_entities(parent, entities)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
parent (str): Required. The name of the entity type to update or create entities in.
Format: ``projects/<Project ID>/agent/entityTypes/<Entity Type ID>``.
entities (list[Union[dict, ~google.cloud.dialogflow_v2.types.Entity]]): Required. The entities to update or create.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2.types.Entity`
language_code (str): Optional. The language of entity synonyms defined in ``entities``. If
not specified, the agent's default language is used. `Many
languages <https://cloud.google.com/dialogflow/docs/reference/language>`__
are supported. Note: languages must be enabled in the agent before they
can be used.
update_mask (Union[dict, ~google.cloud.dialogflow_v2.types.FieldMask]): Optional. The mask to control which fields get updated.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2.types.FieldMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "batch_update_entities" not in self._inner_api_calls:
self._inner_api_calls[
"batch_update_entities"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.batch_update_entities,
default_retry=self._method_configs["BatchUpdateEntities"].retry,
default_timeout=self._method_configs["BatchUpdateEntities"].timeout,
client_info=self._client_info,
)
request = entity_type_pb2.BatchUpdateEntitiesRequest(
parent=parent,
entities=entities,
language_code=language_code,
update_mask=update_mask,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
operation = self._inner_api_calls["batch_update_entities"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
empty_pb2.Empty,
metadata_type=struct_pb2.Struct,
)
def batch_delete_entities(
self,
parent,
entity_values,
language_code=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Deletes entities in the specified entity type.
Operation <response: ``google.protobuf.Empty``>
Example:
>>> import dialogflow_v2
>>>
>>> client = dialogflow_v2.EntityTypesClient()
>>>
>>> parent = client.entity_type_path('[PROJECT]', '[ENTITY_TYPE]')
>>>
>>> # TODO: Initialize `entity_values`:
>>> entity_values = []
>>>
>>> response = client.batch_delete_entities(parent, entity_values)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
parent (str): Required. The name of the entity type to delete entries for. Format:
``projects/<Project ID>/agent/entityTypes/<Entity Type ID>``.
entity_values (list[str]): Required. The canonical ``values`` of the entities to delete. Note that
these are not fully-qualified names, i.e. they don't start with
``projects/<Project ID>``.
language_code (str): Optional. The language of entity synonyms defined in ``entities``. If
not specified, the agent's default language is used. `Many
languages <https://cloud.google.com/dialogflow/docs/reference/language>`__
are supported. Note: languages must be enabled in the agent before they
can be used.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "batch_delete_entities" not in self._inner_api_calls:
self._inner_api_calls[
"batch_delete_entities"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.batch_delete_entities,
default_retry=self._method_configs["BatchDeleteEntities"].retry,
default_timeout=self._method_configs["BatchDeleteEntities"].timeout,
client_info=self._client_info,
)
request = entity_type_pb2.BatchDeleteEntitiesRequest(
parent=parent, entity_values=entity_values, language_code=language_code
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
operation = self._inner_api_calls["batch_delete_entities"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
empty_pb2.Empty,
metadata_type=struct_pb2.Struct,
)
| 43.007475
| 154
| 0.609644
|
4a116a170640d63f8b19da0b7757b435111b8cc5
| 645
|
py
|
Python
|
code/merge_intervals.py
|
shenhuaze/leetcode-python
|
b81bdb27d0f9da5620e83e2476c9ef585f4a0001
|
[
"MIT"
] | 1
|
2019-06-17T04:37:39.000Z
|
2019-06-17T04:37:39.000Z
|
code/merge_intervals.py
|
shenhuaze/leetcode-python
|
b81bdb27d0f9da5620e83e2476c9ef585f4a0001
|
[
"MIT"
] | null | null | null |
code/merge_intervals.py
|
shenhuaze/leetcode-python
|
b81bdb27d0f9da5620e83e2476c9ef585f4a0001
|
[
"MIT"
] | null | null | null |
"""
@author Huaze Shen
@date 2019-08-07
"""
def merge(intervals):
if intervals is None or len(intervals) == 0 or intervals[0] is None or len(intervals[0]) == 0:
return []
intervals = sorted(intervals, key=lambda d: d[0])
results = [intervals[0]]
for i in range(1, len(intervals)):
if intervals[i][0] > results[len(results) - 1][1]:
results.append(intervals[i])
else:
results[len(results) - 1][1] = max(results[len(results) - 1][1], intervals[i][1])
return results
if __name__ == '__main__':
intervals_ = [[1, 3], [2, 6], [8, 10], [15, 18]]
print(merge(intervals_))
| 28.043478
| 98
| 0.582946
|
4a116b1a4b2666ba5166ce30f47dbc3afe5d98ba
| 408
|
py
|
Python
|
instagram/notifications/serializers.py
|
bbamsa/instagramCloning
|
199df9f7b3376ef35b91032ceee578bdcc0d0b82
|
[
"MIT"
] | null | null | null |
instagram/notifications/serializers.py
|
bbamsa/instagramCloning
|
199df9f7b3376ef35b91032ceee578bdcc0d0b82
|
[
"MIT"
] | 15
|
2020-06-05T19:05:57.000Z
|
2022-03-08T22:50:35.000Z
|
instagram/notifications/serializers.py
|
shkimroy/instagram-clone
|
f711d82d94b4de92d1bea05fae4706b0ba496d64
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from . import models
from instagram.users import serializers as user_serializers
from instagram.images import serializers as image_serializers
class NotificationSerializer(serializers.ModelSerializer):
creator = user_serializers.ListUserSerializer()
image = image_serializers.ImageSerializer()
class Meta:
model = models.Notification
fields = '__all__'
| 31.384615
| 61
| 0.821078
|
4a116bb2f56fba3dd4f5e73822dffbacaf0a55bc
| 3,182
|
py
|
Python
|
generate_data.py
|
awsalialem/aws-datalake-glue-studio
|
3185b15d4f747a1f4f42a245edb2914486ae362c
|
[
"MIT"
] | null | null | null |
generate_data.py
|
awsalialem/aws-datalake-glue-studio
|
3185b15d4f747a1f4f42a245edb2914486ae362c
|
[
"MIT"
] | null | null | null |
generate_data.py
|
awsalialem/aws-datalake-glue-studio
|
3185b15d4f747a1f4f42a245edb2914486ae362c
|
[
"MIT"
] | null | null | null |
import os
import io
import sys
import json
import random
import boto3
import argparse
import datetime as dt
from faker import *
from faker import Faker, providers
# Create a client with aws service and region
def create_client(service, region):
return boto3.client(service, region_name=region)
class Ventilator(providers.BaseProvider):
fake = Faker()
ventilators = []
i = 1
while i < 10000:
ventilators.append({'id':i, 'serialnumber': fake.uuid4(), 'manufacturer':random.choice(['3M', 'GE', 'Vyaire', 'Getinge']), 'hospitalid': fake.pyint(min_value=1, max_value=500)})
i += 1
def ventilator(self):
return random.choice(self.ventilators)
class RecordGenerator(object):
'''
A class used to generate ventilator data used as input for Glue Streaming ETL.
'''
def __init__(self):
self.ventilatorid = 0
self.hospitalid = 0
self.eventtime = None
self.serialnumber = ""
self.pressurecontrol = 0
self.o2stats = 0
self.minutevolume = 0
self.manufacturer = None
def get_ventilator_record(self, fake):
'''
Generates fake ventilator metrics
'''
ventilator = fake.ventilator()
record = {
'ventilatorid': ventilator['id'],
'hospitalid': ventilator['hospitalid'],
'eventtime': fake.date_time_between(start_date='-10m', end_date='now').isoformat(),
'serialnumber': ventilator['serialnumber'],
'pressurecontrol': fake.pyint(min_value=3, max_value=40),
'o2stats': fake.pyint(min_value=90, max_value=100),
'minutevolume': fake.pyint(min_value=2, max_value=10),
'manufacturer': ventilator['manufacturer']
}
data = json.dumps(record)
return {'Data': bytes(data, 'utf-8'), 'PartitionKey': 'partition_key'}
def get_ventilator_records(self, rate, fake):
return [self.get_ventilator_record(fake) for _ in range(rate)]
def dumps_lines(objs):
for obj in objs:
yield json.dumps(obj, separators=(',',':')) + '\n'
# main function
def main():
parser = argparse.ArgumentParser(description='Faker based streaming data generator')
parser.add_argument('--streamname', action='store', dest='stream_name', help='Provide Kinesis Data Stream name to stream data')
parser.add_argument('--region', action='store', dest='region', default='us-east-1')
args = parser.parse_args()
#print (args)
# Make sure to set your profile here
session = boto3.Session(profile_name='default')
try:
# Intialize Faker library
fake = Faker()
fake.add_provider(Ventilator)
# Kinesis settings
kinesis_client = session.client('kinesis', args.region)
# Rate at which records are generated
rate = 500
generator = RecordGenerator()
# Generates ventilator data
while True:
fake_ventilator_records = generator.get_ventilator_records(rate, fake)
#print (fake_ventilator_records)
kinesis_client.put_records(StreamName=args.stream_name, Records=fake_ventilator_records)
#fakeIO = StringIO()
#fakeIO.write(str(''.join(dumps_lines(fake_ventilator_records))))
#fakeIO.close()
except:
print("Error:", sys.exc_info()[0])
raise
if __name__ == "__main__":
# run main
main()
| 28.666667
| 182
| 0.691389
|
4a116c11aac46628f2bfb46d7d919eb74311697b
| 876
|
py
|
Python
|
test/test_customer_input.py
|
altoyield/python-beanieclient
|
448b8dd328054eaf32dd7d0bdff700e603b5c27d
|
[
"Apache-2.0"
] | null | null | null |
test/test_customer_input.py
|
altoyield/python-beanieclient
|
448b8dd328054eaf32dd7d0bdff700e603b5c27d
|
[
"Apache-2.0"
] | null | null | null |
test/test_customer_input.py
|
altoyield/python-beanieclient
|
448b8dd328054eaf32dd7d0bdff700e603b5c27d
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Beanie ERP API
An API specification for interacting with the Beanie ERP system # noqa: E501
OpenAPI spec version: 0.2
Contact: dev@bean.ie
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import beanie
from beanie.models.customer_input import CustomerInput # noqa: E501
from beanie.rest import ApiException
class TestCustomerInput(unittest.TestCase):
"""CustomerInput unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testCustomerInput(self):
"""Test CustomerInput"""
# FIXME: construct object with mandatory attributes with example values
# model = beanie.models.customer_input.CustomerInput() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 21.365854
| 81
| 0.694064
|
4a116eb3a1730797224586d8204021779c9708e8
| 19,437
|
py
|
Python
|
camkes/runner/Context.py
|
aisamanra/camkes-tool
|
4bcf3f22ef7e73f8755ca1b5e7165dd6a23e89f3
|
[
"BSD-2-Clause"
] | null | null | null |
camkes/runner/Context.py
|
aisamanra/camkes-tool
|
4bcf3f22ef7e73f8755ca1b5e7165dd6a23e89f3
|
[
"BSD-2-Clause"
] | null | null | null |
camkes/runner/Context.py
|
aisamanra/camkes-tool
|
4bcf3f22ef7e73f8755ca1b5e7165dd6a23e89f3
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2017, Data61
# Commonwealth Scientific and Industrial Research Organisation (CSIRO)
# ABN 41 687 119 230.
#
# This software may be distributed and modified according to the terms of
# the BSD 2-Clause license. Note that NO WARRANTY is provided.
# See "LICENSE_BSD2.txt" for details.
#
# @TAG(DATA61_BSD)
#
'''
The jinja2 template code runs in a very restricted environment during
rendering. For example, you can't call functions like `map`. To expose certain
functions to the template code we need to explicitly pass these in during
rendering. This module encapsulates extra context elements we want to make
available to the template code.
'''
from __future__ import absolute_import, division, print_function, \
unicode_literals
from camkes.internal.seven import cmp, filter, map, zip
from functools import partial
import capdl, code, collections, copy, inspect, itertools, functools, numbers, \
orderedset, os, pdb, re, six, sys, textwrap, math
from capdl.Allocator import seL4_TCBObject, seL4_EndpointObject, \
seL4_NotificationObject, seL4_CanRead, seL4_CanWrite, seL4_AllRights, \
seL4_ARM_SmallPageObject, seL4_FrameObject, seL4_IRQControl, \
seL4_UntypedObject, seL4_IA32_IOPort, seL4_IA32_IOSpace, \
seL4_ARM_IOSpace, \
seL4_ARM_SectionObject, seL4_ARM_SuperSectionObject, \
seL4_SchedContextObject, seL4_SchedControl, seL4_RTReplyObject
# Depending on what kernel branch we are on, we may or may not have ASIDs.
# There are separate python-capdl branches for this, but this import allows us
# to easily interoperate with both.
try:
from capdl.Allocator import seL4_ASID_Pool
except ImportError:
seL4_ASID_Pool = None
import camkes.ast as AST
from camkes.internal.Counter import Counter
from camkes.internal.version import version
from camkes.templates import macros, TemplateError
from .NameMangling import TEMPLATES, FILTERS, Perspective
def new_context(entity, assembly, obj_space, cap_space, shmem, kept_symbols, fill_frames, templates, **kwargs):
'''Create a new default context for rendering.'''
return dict(list(__builtins__.items()) + list({
# Kernel object allocator
'alloc_obj':(lambda name, type, **kwargs:
alloc_obj((entity.label(), obj_space), obj_space,
'%s_%s' % (entity.label(), name), type, label=entity.label(), **kwargs))
if obj_space else None,
'seL4_EndpointObject':seL4_EndpointObject,
'seL4_NotificationObject':seL4_NotificationObject,
'seL4_TCBObject':seL4_TCBObject,
'seL4_ARM_SmallPageObject':seL4_ARM_SmallPageObject,
'seL4_ARM_SectionObject':seL4_ARM_SectionObject,
'seL4_ARM_SuperSectionObject':seL4_ARM_SuperSectionObject,
'seL4_FrameObject':seL4_FrameObject,
'seL4_UntypedObject':seL4_UntypedObject,
'seL4_IA32_IOPort':seL4_IA32_IOPort,
'seL4_IA32_IOSpace':seL4_IA32_IOSpace,
'seL4_ARM_IOSpace':seL4_ARM_IOSpace,
'seL4_SchedContextObject':seL4_SchedContextObject,
'seL4_SchedControl':seL4_SchedControl,
'seL4_RTReplyObject':seL4_RTReplyObject,
'seL4_ASID_Pool':seL4_ASID_Pool,
# Cap allocator
'alloc_cap':(lambda name, obj, **kwargs:
alloc_cap((entity.label(), cap_space), cap_space, name, obj, **kwargs)) \
if cap_space else None,
'seL4_CanRead':seL4_CanRead,
'seL4_CanWrite':seL4_CanWrite,
'seL4_AllRights':seL4_AllRights,
'seL4_IRQControl':seL4_IRQControl,
# The CNode root of your CSpace. Should only be necessary in cases
# where you need to allocate a cap to it.
'my_cnode':cap_space.cnode if cap_space is not None else None,
# Batched object and cap allocation for when you don't need a reference
# to the object. Probably best not to look directly at this one. When
# you see `set y = alloc('foo', bar, moo)` in template code, think:
# set x = alloc_obj('foo_obj', bar)
# set y = alloc_cap('foo_cap', x, moo)
'alloc':(lambda name, type, **kwargs:
alloc_cap((entity.label(), cap_space), cap_space, name,
alloc_obj((entity.label(), obj_space), obj_space,
'%s_%s' % (entity.label(), name), type, label=entity.label(),
**kwargs),
**kwargs)) if cap_space else None,
# Functionality for templates to inform us that they've emitted a C
# variable that's intended to map to a shared variable. It is
# (deliberately) left to the template authors to ensure global names
# (gnames) only collide when intended; i.e. when they should map to the
# same shared variable. The local name (lname) will later be used by us
# to locate the relevant ELF frame(s) to remap. Note that we assume
# address spaces and CSpaces are 1-to-1.
'register_shared_variable':None if cap_space is None else \
(lambda gname, lname, perm='RWX', paddr=None, frames=None:
register_shared_variable(shmem, gname, cap_space.cnode.name,
lname, perm, paddr, frames)),
# Function for templates to inform us that they would like certain
# 'fill' information to get placed into the provided symbol. Provided
# symbol should be page size and aligned. The 'fill' parameter is
# an arbitrary string that will be set as the 'fill' parameter on the
# capDL frame object. The meaning of fill is completely dependent
# on the underlying loader
'register_fill_frame':(lambda symbol, fill:
register_fill_frame(fill_frames, symbol, fill, entity)),
# Inform the linker that a C symbol should not be removed, even if
# it not used by any C code.
'keep_symbol':(lambda symbol: keep_symbol(kept_symbols, symbol, entity)),
# Returns an iterator over all the C symbols declared to be kept
# by a given component instance (specified by name).
'kept_symbols':(lambda name: iter(kept_symbols[name] if name in kept_symbols else ())),
# A `self`-like reference to the current AST object. It would be nice
# to actually call this `self` to lead to more pythonic templates, but
# `self` inside template blocks refers to the jinja2 parser.
'me':entity,
# The AST assembly's configuration.
'configuration':assembly.configuration,
# The AST assembly's composition
'composition':assembly.composition,
# Shared memory metadata. Templates should only need to modify this if
# they're doing something cross-component.
'shmem':shmem if entity is not None else None,
# Cross-template variable passing helpers. These are quite low-level.
# Avoid calling them unless necessary.
'stash':partial(stash, entity.label()),
'pop':partial(pop, entity.label()),
'guard':partial(guard, entity.label()),
# If the previous group of functions are considered harmful, these are
# to be considered completely off limits. These expose a mechanism for
# passing data between unrelated templates (_stash and _pop) and a way
# of running arbitrary Python statements and expressions. They come
# with significant caveats. E.g. _stash and _pop will likely not behave
# as expected with the template cache enabled.
'_stash':partial(stash, ''),
'_pop':partial(pop, ''),
'exec':_exec,
# Helpers for creating unique symbols within templates.
'c_symbol':partial(symbol, '_camkes_%(tag)s_%(counter)d'),
'isabelle_symbol':partial(symbol, '%(tag)s%(counter)d\'', 's'),
# Expose some library functions
'assert':_assert,
'itertools':itertools,
'functools':functools,
'lambda':lambda s: eval('lambda %s' % s),
'numbers':numbers,
'os':os,
'pdb':pdb,
'raise':_raise,
're':re,
'six':six,
'set':orderedset.OrderedSet,
'textwrap':textwrap,
'copy':copy,
'zip':zip,
'math':math,
'enumerate':enumerate,
# Allocation pools. In general, do not touch these in templates, but
# interact with them through the alloc* functions. They are only in the
# context to allow unanticipated template extensions.
'obj_space':obj_space,
'cap_space':cap_space,
# Debugging functions
'breakpoint':_breakpoint,
'sys':sys,
# Work around for Jinja's bizarre scoping rules.
'Counter':Counter,
# Support for name mangling in the templates. See existing usage for
# examples.
'Perspective':lambda **kwargs:Perspective(TEMPLATES, **kwargs),
# Low-level access to name mangling. Should only be required when you
# need to access both mangling phases.
'NameMangling':collections.namedtuple('NameMangling',
['FILTERS', 'TEMPLATES', 'Perspective'])(FILTERS, TEMPLATES,
Perspective),
# Return a list of distinct elements. Normally you would just do this
# as list(set(xs)), but this turns out to be non-deterministic in the
# template environment for some reason.
'uniq':lambda xs: reduce(lambda ys, z: ys if z in ys else (ys + [z]), xs, []),
# Functional helpers.
'flatMap':lambda f, xs: list(itertools.chain.from_iterable(map(f, xs))),
'flatten':lambda xss: list(itertools.chain.from_iterable(xss)),
# Macros for common operations.
'macros':macros,
# This function abstracts away the differences between the RT kernel's
# seL4_Recv and the master kernel's seL4_Recv. Namely, the RT kernel's
# seL4_Recv takes an extra reply object cap.
#
# seL4_Recv is distinct from seL4_Wait, in that a seL4_Recv() call
# expects to potentially get a reply cap from the sender.
'generate_seL4_Recv': generate_seL4_Recv,
# This function is similar to generate_seL4_Recv, in that it also
# abstracts away the differences between the RT and master kernels.
# This function specifically abstracts away the differences between
# seL4_SignalRecv (on master) and seL4_NBSendRecv (on RT).
'generate_seL4_SignalRecv': generate_seL4_SignalRecv,
# This function is similar to generate_seL4_Recv as well, but it
# abstracts away the differences between seL4_ReplyRecv between the
# RT and master branches.
'generate_seL4_ReplyRecv': generate_seL4_ReplyRecv,
# Give template authors access to AST types just in case. Templates
# should never be constructing objects of these types, but they may
# need to do `isinstance` testing.
'camkes':collections.namedtuple('camkes', ['ast'])(AST),
# Expose CapDL module for `isinstance` testing.
'capdl':capdl,
# Give the template authors a mechanism for writing C-style include
# guards. Use the following idiom to guard an include target:
# /*- if 'template.filename' not in included' -*/
# /*- do included.add('template.filename') -*/
# ... my template ...
# /*- endif -*/
'included':set(),
# Expose an exception class templates can use to throw errors related
# to invalid input specification.
'TemplateError':TemplateError,
# Version information. Templates are unlikely to depend on this, but we
# emit it to give component instances a runtime-discoverable CAmkES
# version.
'camkes_version':version(),
# Look up a template
'lookup_template':lambda path, entity: templates.lookup(path, entity),
}.items()) + list(kwargs.items()))
# For all three of these functions below, for the 'badge_var_name' variable,
# be sure that you pass in an ampersand character prefixed to the argument if
# your badge variable isn't a pointer.
def generate_seL4_Recv(options, ep_cap, badge_var_name, reply_cap):
if options.realtime:
return 'seL4_Recv(%s, %s, %s)' % (ep_cap, badge_var_name, reply_cap)
else:
return 'seL4_Recv(%s, %s)' % (ep_cap, badge_var_name)
def generate_seL4_SignalRecv(options, dest_ntfn_cap, dest_msginfo_var_name, src_ep_cap, badge_var_name, reply_cap):
if options.realtime:
return 'seL4_NBSendRecv(%s, %s, %s, %s, %s)' % (dest_ntfn_cap, dest_msginfo_var_name, src_ep_cap, badge_var_name, reply_cap)
else:
return 'seL4_SignalRecv(%s, %s, %s)' % (dest_ntfn_cap, src_ep_cap, badge_var_name)
def generate_seL4_ReplyRecv(options, src_ep_cap, dest_msginfo_var_name, badge_var_name, reply_cap):
if options.realtime:
return 'seL4_ReplyRecv(%s, %s, %s, %s)' % (src_ep_cap, dest_msginfo_var_name, badge_var_name, reply_cap)
else:
return 'seL4_ReplyRecv(%s, %s, %s)' % (src_ep_cap, dest_msginfo_var_name, badge_var_name)
def _assert(condition, msg=None):
'''Hack to reify assert as a callable'''
if msg is not None:
assert condition, msg
else:
assert condition
return ''
def _exec(statement):
'''Hack to reify exec as a callable'''
# Jinja seems to invoke this through a variable level of indirection.
# Search up our stack for the caller's context, identifiable by their 'me'
# variable. This is a bit fragile, but since _exec should only be a tool of
# last resort, I think it's acceptable.
stack_frames = inspect.stack()
caller = None
for i, f in enumerate(stack_frames):
if 'me' in f[0].f_locals:
# Found it.
caller = i
break
if caller is None:
raise Exception('_exec: failed to find caller\'s context')
six.exec_(statement, stack_frames[caller][0].f_globals,
stack_frames[caller][0].f_locals)
return ''
def _raise(exception):
'''Hack to reify raise as a callable'''
if isinstance(exception, Exception):
raise exception
else:
assert hasattr(exception, '__call__')
raise exception()
return ''
def _breakpoint():
'''Debugging function to be called from templates. This drops you into the
Python interpreter with a brief attempt to align your locals() with the
template's.'''
kwargs = {
'banner':'Breakpoint triggered',
}
# Try and locate the stack frame containing the template context. This is a
# bit error prone, but it's nice if we can find it because then we can fake
# the template context to the interpreter prompt.
for f in inspect.stack():
if 'context' in f[0].f_locals:
kwargs['local'] = f[0].f_globals.copy()
kwargs['local'].update(f[0].f_locals['context'])
break
code.interact(**kwargs)
# It's possible the template called this from inside a /*? ?*/ block, so
# make sure we don't mess up the output:
return ''
# Functionality for carrying variables between related templates. The idea is
# that one template performs stash('foo', 'bar') to save 'bar' and the other
# template performs pop('foo') to retrieve 'bar'. This pattern relies on the
# order of instantiation of templates. To avoid this, use the guard function
# below. See the templates for examples.
store = {}
def stash(client, key, value):
if client not in store:
store[client] = {}
store[client][key] = value
return ''
def pop(client, key):
if client not in store or key not in store[client]:
return None
value = store[client][key]
del store[client][key]
if not store[client]:
del store[client]
return value
def guard(client, func, key, **kwargs):
'''Retrieve the value for key from the stash. If it does not exist, call
func to get a value. In either event re-stash the resulting value under the
same key and return it.'''
value = pop(client, key)
if value is None:
value = func(**kwargs)
stash(client, key, value)
return value
symbol_counter = 0
def symbol(pattern, default_tag='', tag=None):
'''Allocate a symbol to be used in a template. This is useful for avoiding
colliding with user symbols.'''
global symbol_counter
s = pattern % {
'counter':symbol_counter,
'tag':tag or default_tag,
}
symbol_counter += 1
return s
def alloc_obj(client, space, name, type, label=None, **kwargs):
'''Guarded allocation of an object. That is, if the object we're trying to
allocate already exists, just return it. Otherwise allocate and save the
object.'''
return guard(client, space.alloc, '%s_obj' % name, type=type, name=name,
label=label, **kwargs)
def alloc_cap(client, space, name, obj, **kwargs):
'''Guarded cap allocation. Works similarly to alloc_obj above.'''
cap = guard(client, space.alloc, '%s_cap' % name, name=name, obj=obj, **kwargs)
if obj is None:
# The caller was allocating a free slot. No rights are relevant.
return cap
# Upgrade the cap's rights if required. This can occur in a situation where
# we have connected an outgoing interface of a component instance to an
# incoming interface of the same component. alloc will be called twice on
# the EP with different permissions and we want to take the union of them.
if not space.cnode[cap].read and kwargs.get('read', False):
space.cnode[cap].read = True
if not space.cnode[cap].write and kwargs.get('write', False):
space.cnode[cap].write = True
if not space.cnode[cap].grant and kwargs.get('grant', False):
space.cnode[cap].grant = True
return cap
def register_shared_variable(shmem, global_name, local_context, local_name,
permissions='RWX', paddr=None, frames=None):
'''Track a variable that is intended to map to a cross-address-space shared
variable.
shmem - The dictionary to use for tracking
global_name - The system-wide name for this variable
local_context - The owner's CNode name
local_name - The name of this variable in the owner's address space
'''
shmem[global_name][local_context].append((local_name, permissions, paddr, frames))
# Return code to:
# 1. page-align the shared variable;
# 2. make it visible in the final ELF; and
# 3. Check that it is page-sized.
return 'extern typeof(%(sym)s) %(sym)s ALIGN(PAGE_SIZE_4K) VISIBLE;\n' \
'static_assert(sizeof(%(sym)s) %% PAGE_SIZE_4K == 0,\n' \
' "%(sym)s not page-sized. Template bug in its declaration? ' \
'Suggested formulation: `char %(sym)s[ROUND_UP_UNSAFE(sizeof(...), ' \
'PAGE_SIZE_4K)];`");' % {'sym':local_name}
def keep_symbol(kept_symbols, symbol, entity):
name = entity.instance.name
if name not in kept_symbols:
kept_symbols[name] = set()
kept_symbols[name].add(symbol)
def register_fill_frame(fill_frames, symbol, fill, entity):
name = entity.instance.name
if name not in fill_frames:
fill_frames[name]=set()
fill_frames[name].add((symbol, fill))
return 'static_assert(sizeof(%(sym)s) == PAGE_SIZE_4K,\n' \
' "%(sym)s not page sized. Templage bug in its declaration?");' \
% {'sym':symbol}
| 42.346405
| 132
| 0.666821
|
4a116ecfd8681b240766859ac4029218d363d6d9
| 5,573
|
py
|
Python
|
handlers/users/new_list.py
|
Ilya-koala/VSL_Bot
|
03399a49a2bc8baa97cc5d6f84a7406dbaafd262
|
[
"MIT"
] | null | null | null |
handlers/users/new_list.py
|
Ilya-koala/VSL_Bot
|
03399a49a2bc8baa97cc5d6f84a7406dbaafd262
|
[
"MIT"
] | 1
|
2021-06-25T18:53:54.000Z
|
2021-06-30T16:15:54.000Z
|
handlers/users/new_list.py
|
Ilya-koala/VSL_Bot
|
03399a49a2bc8baa97cc5d6f84a7406dbaafd262
|
[
"MIT"
] | 3
|
2021-04-18T09:39:59.000Z
|
2021-05-18T16:47:19.000Z
|
from os import path
from aiogram.types import InlineKeyboardMarkup, InlineKeyboardButton, CallbackQuery
from aiogram.types import CallbackQuery, Message
from aiogram.dispatcher import FSMContext
from loader import dp, db
from states.new_list import NewList
from keyboards.inline.lists_markups import start_page_keyboard
from utils.voice_recognition import voice_to_text
@dp.callback_query_handler(text="new_list")
async def new_list(call: CallbackQuery, state: FSMContext):
back = InlineKeyboardMarkup(
inline_keyboard=[
[InlineKeyboardButton(text="◀️Назад", callback_data="back_to_start_page")]
]
)
print(back)
last_message = await call.message.edit_text(
"Пожалуйста, введи название твоего списка\nP.S. Ты также можешь просто отправить голосовое сообщение с названием твоего списка",
reply_markup=back,
)
await state.update_data(last_message=last_message)
await NewList.list_name.set()
@dp.message_handler(state=NewList.list_name)
async def new_values(message: Message, state: FSMContext):
back = InlineKeyboardMarkup(
inline_keyboard=[
[InlineKeyboardButton(text="◀️Назад", callback_data="back_to_list_name")]
]
)
list_name = message.text
await state.update_data(list_name=list_name)
await message.delete()
data = await state.get_data()
last_message = data.get("last_message")
await last_message.edit_text(
f"Название списка: <strong>{list_name}</strong>\n\nОтлично! Пожалуйста, введи продукты, которые ты хочешь добавить в свой список.\nПример:\n<code>Кефир, молоко, стейки, вино</code>\nИли можешь отправить голосовое сообщение, в которым ты назовешь продукты, которые ты хотел бы добавить в список",
reply_markup=back,
)
await NewList.new_values.set()
@dp.message_handler(content_types=["voice"], state=NewList.list_name)
async def voice_list_name(message: Message, state: FSMContext):
back = InlineKeyboardMarkup(
inline_keyboard=[
[InlineKeyboardButton(text="◀️Назад", callback_data="back_to_list_name")]
]
)
dir_name = path.join(path.dirname(path.realpath(__file__)), "../../data/")
# dir_name = "/".join(__file__.split("\\")[:-3]) + "/data/"
voice_name = (await message.voice.download(destination=dir_name)).name
list_name = voice_to_text(voice_name)
data = await state.get_data()
last_message = data.get("last_message")
try:
msg = data.get("msg")
user_msg = data.get("user_msg")
await msg.delete()
await user_msg.delete()
except Exception as e:
print(e)
if not list_name:
msg = await message.answer(
"Ничего не понятно =(\nПожалуйста попробуй отправить свое голосовое сообщение еще раз"
)
await state.update_data(msg=msg, user_msg=message)
return
await state.update_data(list_name=list_name)
await message.delete()
await last_message.edit_text(
f"Название списка: <strong>{list_name}</strong>\n\nОтлично! Пожалуйста, введи продукты, которые ты хочешь добавить в свой список.\nПример:\n<code>Кефир, молоко, стейки, вино</code>\nИли можешь отправить голосовое сообщение, в которым ты назовешь продукты, которые ты хотел бы добавить в список",
reply_markup=back,
)
await NewList.new_values.set()
@dp.message_handler(state=NewList.new_values)
async def get_list_name(message: Message, state: FSMContext):
data = await state.get_data()
user_id = str(message.from_user.id)
new_values = list(set([x.strip() for x in message.text.lower().split(",")]))
list_name = data.get("list_name").replace(":", " ")
last_message = data.get("last_message")
db.new_list(user_id, list_name, new_values)
await message.delete()
text = (
f"{message.from_user.full_name}, я сохранил твой список и готов дальше к работе"
)
await last_message.edit_text(text, reply_markup=start_page_keyboard)
await state.finish()
@dp.message_handler(content_types=["voice"], state=NewList.new_values)
async def new_list_values_voice(message: Message, state: FSMContext):
dir_name = path.join(
path.dirname(path.realpath(__file__)), "../../data/"
) # for linux
# dir_name = "/".join(__file__.split("\\")[:-3]) + "/data/" # for win10
data = await state.get_data()
user_id = str(message.from_user.id)
list_name = data.get("list_name").replace(":", " ")
voice_name = (await message.voice.download(destination=dir_name)).name
new_values = voice_to_text(voice_name)
last_message = data.get("last_message")
try:
msg = data.get("msg2")
user_msg = data.get("user_msg2")
await msg.delete()
await user_msg.delete()
except Exception as e:
print(e)
if not new_values:
msg = await message.answer(
"Ничего не понятно =(\nПожалуйста попробуй отправить свое голосовое сообщение еще раз"
)
await state.update_data(msg2=msg, user_msg2=message)
return
new_values = list(set(new_values.split()))
db.new_list(user_id, list_name, new_values)
await message.delete()
text = (
f"{message.from_user.full_name}, я сохранил твой список и готов дальше к работе"
)
await last_message.edit_text(text, reply_markup=start_page_keyboard)
await state.finish()
@dp.callback_query_handler(text="back_to_list_name", state="*")
async def back_to_list_name(call: CallbackQuery, state: FSMContext):
await new_list(call, state)
| 33.981707
| 303
| 0.695317
|
4a116f08198cf67388009caa49a582965bb9e6be
| 2,942
|
py
|
Python
|
proxypool/db.py
|
linearxian/ProxyPool
|
eee64e8a6ca8908d78f428edfc9f8e0b7cbaf90e
|
[
"Apache-2.0"
] | null | null | null |
proxypool/db.py
|
linearxian/ProxyPool
|
eee64e8a6ca8908d78f428edfc9f8e0b7cbaf90e
|
[
"Apache-2.0"
] | null | null | null |
proxypool/db.py
|
linearxian/ProxyPool
|
eee64e8a6ca8908d78f428edfc9f8e0b7cbaf90e
|
[
"Apache-2.0"
] | null | null | null |
import sys
sys.path.append("/Users/xianwu1/Documents/crawler/ProxyPool")
import redis
from proxypool.error import PoolEmptyError
from proxypool.setting import REDIS_HOST, REDIS_PORT, REDIS_PASSWORD, REDIS_KEY
from proxypool.setting import MAX_SCORE, MIN_SCORE, INITIAL_SCORE
from random import choice
import re
class RedisClient(object):
def __init__(self, host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PASSWORD):
"""
初始化
:param host: Redis 地址
:param port: Redis 端口
:param password: Redis密码
"""
self.db = redis.StrictRedis(host=host, port=port, password=password, decode_responses=True)
def add(self, proxy, score=INITIAL_SCORE):
"""
添加代理,设置分数为最高
:param proxy: 代理
:param score: 分数
:return: 添加结果
"""
if not re.match('\d+\.\d+\.\d+\.\d+\:\d+', proxy):
print('代理不符合规范', proxy, '丢弃')
return
if not self.db.zscore(REDIS_KEY, proxy):
return self.db.zadd(REDIS_KEY, score, proxy)
def random(self):
"""
随机获取有效代理,首先尝试获取最高分数代理,如果不存在,按照排名获取,否则异常
:return: 随机代理
"""
result = self.db.zrangebyscore(REDIS_KEY, MAX_SCORE, MAX_SCORE)
if len(result):
return choice(result)
else:
result = self.db.zrevrange(REDIS_KEY, 0, 100)
if len(result):
return choice(result)
else:
raise PoolEmptyError
def decrease(self, proxy):
"""
代理值减一分,小于最小值则删除
:param proxy: 代理
:return: 修改后的代理分数
"""
score = self.db.zscore(REDIS_KEY, proxy)
if score and score > MIN_SCORE:
print('代理', proxy, '当前分数', score, '减1')
return self.db.zincrby(REDIS_KEY, proxy, -1)
else:
print('代理', proxy, '当前分数', score, '移除')
return self.db.zrem(REDIS_KEY, proxy)
def exists(self, proxy):
"""
判断是否存在
:param proxy: 代理
:return: 是否存在
"""
return not self.db.zscore(REDIS_KEY, proxy) == None
def max(self, proxy):
"""
将代理设置为MAX_SCORE
:param proxy: 代理
:return: 设置结果
"""
print('代理', proxy, '可用,设置为', MAX_SCORE)
return self.db.zadd(REDIS_KEY, MAX_SCORE, proxy)
def count(self):
"""
获取数量
:return: 数量
"""
return self.db.zcard(REDIS_KEY)
def all(self):
"""
获取全部代理
:return: 全部代理列表
"""
return self.db.zrangebyscore(REDIS_KEY, MIN_SCORE, MAX_SCORE)
def batch(self, start, stop):
"""
批量获取
:param start: 开始索引
:param stop: 结束索引
:return: 代理列表
"""
return self.db.zrevrange(REDIS_KEY, start, stop - 1)
if __name__ == '__main__':
conn = RedisClient()
result = conn.random()
print(result)
| 27.240741
| 99
| 0.551666
|
4a116f34c49e66a7c55bc34d425d0b46954c7907
| 1,870
|
py
|
Python
|
displ/build/submit_pw_post.py
|
tflovorn/displ
|
094c194c54f02d463353075c6ca82f457f1247fa
|
[
"MIT"
] | 4
|
2018-04-09T20:39:24.000Z
|
2021-06-19T12:21:52.000Z
|
displ/build/submit_pw_post.py
|
tflovorn/displ
|
094c194c54f02d463353075c6ca82f457f1247fa
|
[
"MIT"
] | null | null | null |
displ/build/submit_pw_post.py
|
tflovorn/displ
|
094c194c54f02d463353075c6ca82f457f1247fa
|
[
"MIT"
] | 4
|
2018-04-09T20:39:41.000Z
|
2021-06-19T12:21:53.000Z
|
import argparse
import os
from copy import deepcopy
from displ.build.util import _global_config
from displ.build.build import get_prefix_groups
from displ.queue.queuefile import mpi_procs_per_node
from displ.queue.internal import enqueue
def submit_pw_post(base_path, config, prefix_groups):
config["base_path"] = base_path
for i in range(len(prefix_groups)):
dv_config = deepcopy(config)
dv_config["calc"] = "pw_post_group"
dv_config["prefix"] = str(i)
enqueue(dv_config)
def _main():
parser = argparse.ArgumentParser("Run postprocessing to set up Wannier90 calculation",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--subdir", type=str, default=None,
help="Subdirectory under work_base to run calculation")
parser.add_argument("--global_prefix", type=str, default="WSe2_WSe2_WSe2",
help="Prefix for calculation")
args = parser.parse_args()
gconf = _global_config()
base_path = os.path.expandvars(gconf["work_base"])
if args.subdir is not None:
base_path = os.path.join(base_path, args.subdir)
if "qe_bands" in gconf:
qe_bands_dir = os.path.expandvars(gconf["qe_bands"])
qe_bands_path = os.path.join(qe_bands_dir, "bands.x")
else:
qe_bands_path = "bands.x"
calc = "pw_post"
prefix_groups = get_prefix_groups(base_path, args.global_prefix)
machine = "stampede2"
cores_per_node = mpi_procs_per_node(machine)
config = {"machine": machine, "cores": cores_per_node, "nodes": 1, "queue": "normal",
"hours": 48, "minutes": 0, "wannier": True, "project": "A-ph9",
"global_prefix": args.global_prefix, "max_jobs": 24,
"qe_bands": qe_bands_path}
submit_pw_post(base_path, config, prefix_groups)
if __name__ == "__main__":
_main()
| 35.961538
| 90
| 0.689305
|
4a11700ef6f5275e0dd2b0804ac93ae058787fab
| 2,689
|
py
|
Python
|
src/discurses/ui/member_list.py
|
StarlitGhost/Discurses
|
7ba4882aab9cd8e1de1e089d02877942c9b70a16
|
[
"MIT"
] | 168
|
2016-07-25T17:04:37.000Z
|
2022-01-23T22:49:36.000Z
|
src/discurses/ui/member_list.py
|
StarlitGhost/Discurses
|
7ba4882aab9cd8e1de1e089d02877942c9b70a16
|
[
"MIT"
] | 68
|
2016-07-26T14:00:43.000Z
|
2019-11-05T10:36:16.000Z
|
src/discurses/ui/member_list.py
|
StarlitGhost/Discurses
|
7ba4882aab9cd8e1de1e089d02877942c9b70a16
|
[
"MIT"
] | 29
|
2016-10-06T08:51:18.000Z
|
2021-06-14T17:48:47.000Z
|
import os
import urwid
import discord
import discurses.keymaps as keymaps
class MemberList(urwid.WidgetWrap):
def __init__(self, chat_widget):
self.chat_widget = chat_widget
self.list_walker = urwid.SimpleListWalker([])
self.w_listbox = urwid.ListBox(self.list_walker)
self.update_list()
self.__super.__init__(urwid.Padding(self.w_listbox, left=2))
keymaps.GLOBAL.add_command("redraw", self.update_list)
def updlst(*args, **kwargs):
self.update_list()
self.chat_widget.discord.add_event_handler("on_member_join", updlst)
self.chat_widget.discord.add_event_handler("on_member_remove", updlst)
self.chat_widget.discord.add_event_handler("on_member_update", updlst)
def _get_user_attr(self, member):
if member.status == discord.Status.online:
return "sidebar_user_on"
if member.status == discord.Status.offline:
return "sidebar_user_off"
if member.status == discord.Status.idle:
return "sidebar_user_idle"
def mouse_event(self, size, event, button, col, row, focus):
if event == 'mouse press':
if button == 4:
return self.w_listbox.keypress(size, "up") is not None
if button == 5:
return self.w_listbox.keypress(size, "down") is not None
return self.w_listbox.mouse_event(size, event, button, col, row, focus)
def update_list(self):
async def callback():
servers = set()
memberset = set()
for ch in self.chat_widget.channels:
if not ch.is_private:
servers.add(ch.server)
for serv in servers:
for member in serv.members:
memberset.add(member)
items = []
on = []
idle = []
off = []
for member in memberset:
if member.status == discord.Status.online:
on.append(member)
if member.status == discord.Status.offline:
off.append(member)
if member.status == discord.Status.idle:
idle.append(member)
members = on + idle + off
for member in members:
items.append(
urwid.AttrMap(
urwid.Padding(
urwid.Text(member.display_name), left=1, right=1),
self._get_user_attr(member),
self._get_user_attr(member)))
self.list_walker[:] = items
self.chat_widget.discord.async_do(callback())
| 37.347222
| 79
| 0.566753
|
4a117118b3b382f4d488fd4e6208e3f2bf4c240f
| 754
|
py
|
Python
|
test/model/financeiroservicovalor.py
|
robertons/dbmodel
|
628c4d6a2d0b23d9137015e5b0ea4551b20943bd
|
[
"MIT"
] | null | null | null |
test/model/financeiroservicovalor.py
|
robertons/dbmodel
|
628c4d6a2d0b23d9137015e5b0ea4551b20943bd
|
[
"MIT"
] | null | null | null |
test/model/financeiroservicovalor.py
|
robertons/dbmodel
|
628c4d6a2d0b23d9137015e5b0ea4551b20943bd
|
[
"MIT"
] | null | null | null |
#-*- coding: utf-8 -*-
from dbmodel.entity import *
class FinanceiroServicoValor(Entity):
__primary_key__ = ['id']
# FIELDS
@Int(pk=True, auto_increment=True, not_null=True, precision = 10, scale=0)
def id(self): pass
@String(not_null=True, max=45)
def fin_srv_nome(self): pass
@String(not_null=True, max=255)
def fin_srv_descricao(self): pass
@Int(not_null=True, precision = 10, scale=0)
def fin_srv_codigo(self): pass
@Decimal(not_null=True, precision = 19, scale=2)
def fin_srv_valor(self): pass
@Int(fk=True, not_null=True, precision = 10, scale=0)
def id_servico(self): pass
# One-to-One
@Object(name="FinanceiroServico", key="id", reference="id_servico", table="financeiros_servicos")
def financeiros_servicos(self):pass
| 24.322581
| 98
| 0.725464
|
4a117119facf8ec83a747be7eaa59f91bb31a1e9
| 158
|
py
|
Python
|
about/admin.py
|
aduuna/edsa-ug
|
70fae3cd10ed0139c8a3523154e52db86fa7bb72
|
[
"MIT"
] | null | null | null |
about/admin.py
|
aduuna/edsa-ug
|
70fae3cd10ed0139c8a3523154e52db86fa7bb72
|
[
"MIT"
] | null | null | null |
about/admin.py
|
aduuna/edsa-ug
|
70fae3cd10ed0139c8a3523154e52db86fa7bb72
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Position, Person
# Register your models here.
admin.site.register(Position)
admin.site.register(Person)
| 22.571429
| 36
| 0.810127
|
4a1172064d7fcc4545c445e392461f6b3018e534
| 1,349
|
py
|
Python
|
var/spack/repos/builtin/packages/voropp/package.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360
|
2017-11-06T08:47:01.000Z
|
2022-03-31T14:45:33.000Z
|
var/spack/repos/builtin/packages/voropp/package.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838
|
2017-11-04T07:49:45.000Z
|
2022-03-31T23:38:39.000Z
|
var/spack/repos/builtin/packages/voropp/package.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793
|
2017-11-04T07:45:50.000Z
|
2022-03-30T14:31:53.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Voropp(MakefilePackage):
"""Voro++ is a open source software library for the computation of the
Voronoi diagram, a widely-used tessellation that has applications in many
scientific fields."""
homepage = "http://math.lbl.gov/voro++/about.html"
url = "http://math.lbl.gov/voro++/download/dir/voro++-0.4.6.tar.gz"
variant('pic', default=True,
description='Position independent code')
version('0.4.6', sha256='ef7970071ee2ce3800daa8723649ca069dc4c71cc25f0f7d22552387f3ea437e')
def edit(self, spec, prefix):
filter_file(r'CC=g\+\+',
'CC={0}'.format(self.compiler.cxx),
'config.mk')
filter_file(r'PREFIX=/usr/local',
'PREFIX={0}'.format(self.prefix),
'config.mk')
# We can safely replace the default CFLAGS which are:
# CFLAGS=-Wall -ansi -pedantic -O3
cflags = ''
if '+pic' in spec:
cflags += self.compiler.cc_pic_flag
filter_file(r'CFLAGS=.*',
'CFLAGS={0}'.format(cflags),
'config.mk')
| 36.459459
| 95
| 0.610082
|
4a1172400043038d53cc5722c4902ea20be12c36
| 22,936
|
py
|
Python
|
examples/dmm/dmm.py
|
cweniger/pyro
|
ba104f07ca17865d2600e8765d920d549fcb3fbc
|
[
"MIT"
] | 10
|
2020-03-18T14:41:25.000Z
|
2021-07-04T08:49:57.000Z
|
examples/dmm/dmm.py
|
cweniger/pyro
|
ba104f07ca17865d2600e8765d920d549fcb3fbc
|
[
"MIT"
] | 19
|
2018-10-30T13:45:31.000Z
|
2019-09-27T14:16:57.000Z
|
examples/dmm/dmm.py
|
cweniger/pyro
|
ba104f07ca17865d2600e8765d920d549fcb3fbc
|
[
"MIT"
] | 5
|
2020-06-21T23:40:35.000Z
|
2021-11-09T16:18:42.000Z
|
"""
An implementation of a Deep Markov Model in Pyro based on reference [1].
This is essentially the DKS variant outlined in the paper. The primary difference
between this implementation and theirs is that in our version any KL divergence terms
in the ELBO are estimated via sampling, while they make use of the analytic formulae.
We also illustrate the use of normalizing flows in the variational distribution (in which
case analytic formulae for the KL divergences are in any case unavailable).
Reference:
[1] Structured Inference Networks for Nonlinear State Space Models [arXiv:1609.09869]
Rahul G. Krishnan, Uri Shalit, David Sontag
"""
import argparse
import time
from os.path import exists
import numpy as np
import torch
import torch.nn as nn
import polyphonic_data_loader as poly
import pyro
import pyro.distributions as dist
import pyro.poutine as poutine
from pyro.distributions import InverseAutoregressiveFlow, TransformedDistribution
from pyro.infer import SVI, JitTrace_ELBO, Trace_ELBO
from pyro.nn import AutoRegressiveNN
from pyro.optim import ClippedAdam
from util import get_logger
class Emitter(nn.Module):
"""
Parameterizes the bernoulli observation likelihood `p(x_t | z_t)`
"""
def __init__(self, input_dim, z_dim, emission_dim):
super(Emitter, self).__init__()
# initialize the three linear transformations used in the neural network
self.lin_z_to_hidden = nn.Linear(z_dim, emission_dim)
self.lin_hidden_to_hidden = nn.Linear(emission_dim, emission_dim)
self.lin_hidden_to_input = nn.Linear(emission_dim, input_dim)
# initialize the two non-linearities used in the neural network
self.relu = nn.ReLU()
def forward(self, z_t):
"""
Given the latent z at a particular time step t we return the vector of
probabilities `ps` that parameterizes the bernoulli distribution `p(x_t|z_t)`
"""
h1 = self.relu(self.lin_z_to_hidden(z_t))
h2 = self.relu(self.lin_hidden_to_hidden(h1))
ps = torch.sigmoid(self.lin_hidden_to_input(h2))
return ps
class GatedTransition(nn.Module):
"""
Parameterizes the gaussian latent transition probability `p(z_t | z_{t-1})`
See section 5 in the reference for comparison.
"""
def __init__(self, z_dim, transition_dim):
super(GatedTransition, self).__init__()
# initialize the six linear transformations used in the neural network
self.lin_gate_z_to_hidden = nn.Linear(z_dim, transition_dim)
self.lin_gate_hidden_to_z = nn.Linear(transition_dim, z_dim)
self.lin_proposed_mean_z_to_hidden = nn.Linear(z_dim, transition_dim)
self.lin_proposed_mean_hidden_to_z = nn.Linear(transition_dim, z_dim)
self.lin_sig = nn.Linear(z_dim, z_dim)
self.lin_z_to_loc = nn.Linear(z_dim, z_dim)
# modify the default initialization of lin_z_to_loc
# so that it's starts out as the identity function
self.lin_z_to_loc.weight.data = torch.eye(z_dim)
self.lin_z_to_loc.bias.data = torch.zeros(z_dim)
# initialize the three non-linearities used in the neural network
self.relu = nn.ReLU()
self.softplus = nn.Softplus()
def forward(self, z_t_1):
"""
Given the latent `z_{t-1}` corresponding to the time step t-1
we return the mean and scale vectors that parameterize the
(diagonal) gaussian distribution `p(z_t | z_{t-1})`
"""
# compute the gating function
_gate = self.relu(self.lin_gate_z_to_hidden(z_t_1))
gate = torch.sigmoid(self.lin_gate_hidden_to_z(_gate))
# compute the 'proposed mean'
_proposed_mean = self.relu(self.lin_proposed_mean_z_to_hidden(z_t_1))
proposed_mean = self.lin_proposed_mean_hidden_to_z(_proposed_mean)
# assemble the actual mean used to sample z_t, which mixes a linear transformation
# of z_{t-1} with the proposed mean modulated by the gating function
loc = (1 - gate) * self.lin_z_to_loc(z_t_1) + gate * proposed_mean
# compute the scale used to sample z_t, using the proposed mean from
# above as input the softplus ensures that scale is positive
scale = self.softplus(self.lin_sig(self.relu(proposed_mean)))
# return loc, scale which can be fed into Normal
return loc, scale
class Combiner(nn.Module):
"""
Parameterizes `q(z_t | z_{t-1}, x_{t:T})`, which is the basic building block
of the guide (i.e. the variational distribution). The dependence on `x_{t:T}` is
through the hidden state of the RNN (see the PyTorch module `rnn` below)
"""
def __init__(self, z_dim, rnn_dim):
super(Combiner, self).__init__()
# initialize the three linear transformations used in the neural network
self.lin_z_to_hidden = nn.Linear(z_dim, rnn_dim)
self.lin_hidden_to_loc = nn.Linear(rnn_dim, z_dim)
self.lin_hidden_to_scale = nn.Linear(rnn_dim, z_dim)
# initialize the two non-linearities used in the neural network
self.tanh = nn.Tanh()
self.softplus = nn.Softplus()
def forward(self, z_t_1, h_rnn):
"""
Given the latent z at at a particular time step t-1 as well as the hidden
state of the RNN `h(x_{t:T})` we return the mean and scale vectors that
parameterize the (diagonal) gaussian distribution `q(z_t | z_{t-1}, x_{t:T})`
"""
# combine the rnn hidden state with a transformed version of z_t_1
h_combined = 0.5 * (self.tanh(self.lin_z_to_hidden(z_t_1)) + h_rnn)
# use the combined hidden state to compute the mean used to sample z_t
loc = self.lin_hidden_to_loc(h_combined)
# use the combined hidden state to compute the scale used to sample z_t
scale = self.softplus(self.lin_hidden_to_scale(h_combined))
# return loc, scale which can be fed into Normal
return loc, scale
class DMM(nn.Module):
"""
This PyTorch Module encapsulates the model as well as the
variational distribution (the guide) for the Deep Markov Model
"""
def __init__(self, input_dim=88, z_dim=100, emission_dim=100,
transition_dim=200, rnn_dim=600, num_layers=1, rnn_dropout_rate=0.0,
num_iafs=0, iaf_dim=50, use_cuda=False):
super(DMM, self).__init__()
# instantiate PyTorch modules used in the model and guide below
self.emitter = Emitter(input_dim, z_dim, emission_dim)
self.trans = GatedTransition(z_dim, transition_dim)
self.combiner = Combiner(z_dim, rnn_dim)
# dropout just takes effect on inner layers of rnn
rnn_dropout_rate = 0. if num_layers == 1 else rnn_dropout_rate
self.rnn = nn.RNN(input_size=input_dim, hidden_size=rnn_dim, nonlinearity='relu',
batch_first=True, bidirectional=False, num_layers=num_layers,
dropout=rnn_dropout_rate)
# if we're using normalizing flows, instantiate those too
self.iafs = [InverseAutoregressiveFlow(AutoRegressiveNN(z_dim, [iaf_dim])) for _ in range(num_iafs)]
self.iafs_modules = nn.ModuleList(self.iafs)
# define a (trainable) parameters z_0 and z_q_0 that help define the probability
# distributions p(z_1) and q(z_1)
# (since for t = 1 there are no previous latents to condition on)
self.z_0 = nn.Parameter(torch.zeros(z_dim))
self.z_q_0 = nn.Parameter(torch.zeros(z_dim))
# define a (trainable) parameter for the initial hidden state of the rnn
self.h_0 = nn.Parameter(torch.zeros(1, 1, rnn_dim))
self.use_cuda = use_cuda
# if on gpu cuda-ize all PyTorch (sub)modules
if use_cuda:
self.cuda()
# the model p(x_{1:T} | z_{1:T}) p(z_{1:T})
def model(self, mini_batch, mini_batch_reversed, mini_batch_mask,
mini_batch_seq_lengths, annealing_factor=1.0):
# this is the number of time steps we need to process in the mini-batch
T_max = mini_batch.size(1)
# register all PyTorch (sub)modules with pyro
# this needs to happen in both the model and guide
pyro.module("dmm", self)
# set z_prev = z_0 to setup the recursive conditioning in p(z_t | z_{t-1})
z_prev = self.z_0.expand(mini_batch.size(0), self.z_0.size(0))
# we enclose all the sample statements in the model in a plate.
# this marks that each datapoint is conditionally independent of the others
with pyro.plate("z_minibatch", len(mini_batch)):
# sample the latents z and observed x's one time step at a time
for t in range(1, T_max + 1):
# the next chunk of code samples z_t ~ p(z_t | z_{t-1})
# note that (both here and elsewhere) we use poutine.scale to take care
# of KL annealing. we use the mask() method to deal with raggedness
# in the observed data (i.e. different sequences in the mini-batch
# have different lengths)
# first compute the parameters of the diagonal gaussian distribution p(z_t | z_{t-1})
z_loc, z_scale = self.trans(z_prev)
# then sample z_t according to dist.Normal(z_loc, z_scale)
# note that we use the reshape method so that the univariate Normal distribution
# is treated as a multivariate Normal distribution with a diagonal covariance.
with poutine.scale(scale=annealing_factor):
z_t = pyro.sample("z_%d" % t,
dist.Normal(z_loc, z_scale)
.mask(mini_batch_mask[:, t - 1:t])
.to_event(1))
# compute the probabilities that parameterize the bernoulli likelihood
emission_probs_t = self.emitter(z_t)
# the next statement instructs pyro to observe x_t according to the
# bernoulli distribution p(x_t|z_t)
pyro.sample("obs_x_%d" % t,
dist.Bernoulli(emission_probs_t)
.mask(mini_batch_mask[:, t - 1:t])
.to_event(1),
obs=mini_batch[:, t - 1, :])
# the latent sampled at this time step will be conditioned upon
# in the next time step so keep track of it
z_prev = z_t
# the guide q(z_{1:T} | x_{1:T}) (i.e. the variational distribution)
def guide(self, mini_batch, mini_batch_reversed, mini_batch_mask,
mini_batch_seq_lengths, annealing_factor=1.0):
# this is the number of time steps we need to process in the mini-batch
T_max = mini_batch.size(1)
# register all PyTorch (sub)modules with pyro
pyro.module("dmm", self)
# if on gpu we need the fully broadcast view of the rnn initial state
# to be in contiguous gpu memory
h_0_contig = self.h_0.expand(1, mini_batch.size(0), self.rnn.hidden_size).contiguous()
# push the observed x's through the rnn;
# rnn_output contains the hidden state at each time step
rnn_output, _ = self.rnn(mini_batch_reversed, h_0_contig)
# reverse the time-ordering in the hidden state and un-pack it
rnn_output = poly.pad_and_reverse(rnn_output, mini_batch_seq_lengths)
# set z_prev = z_q_0 to setup the recursive conditioning in q(z_t |...)
z_prev = self.z_q_0.expand(mini_batch.size(0), self.z_q_0.size(0))
# we enclose all the sample statements in the guide in a plate.
# this marks that each datapoint is conditionally independent of the others.
with pyro.plate("z_minibatch", len(mini_batch)):
# sample the latents z one time step at a time
for t in range(1, T_max + 1):
# the next two lines assemble the distribution q(z_t | z_{t-1}, x_{t:T})
z_loc, z_scale = self.combiner(z_prev, rnn_output[:, t - 1, :])
# if we are using normalizing flows, we apply the sequence of transformations
# parameterized by self.iafs to the base distribution defined in the previous line
# to yield a transformed distribution that we use for q(z_t|...)
if len(self.iafs) > 0:
z_dist = TransformedDistribution(dist.Normal(z_loc, z_scale), self.iafs)
assert z_dist.event_shape == (self.z_q_0.size(0),)
assert z_dist.batch_shape == (len(mini_batch),)
else:
z_dist = dist.Normal(z_loc, z_scale)
assert z_dist.event_shape == ()
assert z_dist.batch_shape == (len(mini_batch), self.z_q_0.size(0))
# sample z_t from the distribution z_dist
with pyro.poutine.scale(scale=annealing_factor):
if len(self.iafs) > 0:
# in output of normalizing flow, all dimensions are correlated (event shape is not empty)
z_t = pyro.sample("z_%d" % t,
z_dist.mask(mini_batch_mask[:, t - 1]))
else:
# when no normalizing flow used, ".to_event(1)" indicates latent dimensions are independent
z_t = pyro.sample("z_%d" % t,
z_dist.mask(mini_batch_mask[:, t - 1:t])
.to_event(1))
# the latent sampled at this time step will be conditioned upon in the next time step
# so keep track of it
z_prev = z_t
# setup, training, and evaluation
def main(args):
# setup logging
log = get_logger(args.log)
log(args)
data = poly.load_data(poly.JSB_CHORALES)
training_seq_lengths = data['train']['sequence_lengths']
training_data_sequences = data['train']['sequences']
test_seq_lengths = data['test']['sequence_lengths']
test_data_sequences = data['test']['sequences']
val_seq_lengths = data['valid']['sequence_lengths']
val_data_sequences = data['valid']['sequences']
N_train_data = len(training_seq_lengths)
N_train_time_slices = float(torch.sum(training_seq_lengths))
N_mini_batches = int(N_train_data / args.mini_batch_size +
int(N_train_data % args.mini_batch_size > 0))
log("N_train_data: %d avg. training seq. length: %.2f N_mini_batches: %d" %
(N_train_data, training_seq_lengths.float().mean(), N_mini_batches))
# how often we do validation/test evaluation during training
val_test_frequency = 50
# the number of samples we use to do the evaluation
n_eval_samples = 1
# package repeated copies of val/test data for faster evaluation
# (i.e. set us up for vectorization)
def rep(x):
rep_shape = torch.Size([x.size(0) * n_eval_samples]) + x.size()[1:]
repeat_dims = [1] * len(x.size())
repeat_dims[0] = n_eval_samples
return x.repeat(repeat_dims).reshape(n_eval_samples, -1).transpose(1, 0).reshape(rep_shape)
# get the validation/test data ready for the dmm: pack into sequences, etc.
val_seq_lengths = rep(val_seq_lengths)
test_seq_lengths = rep(test_seq_lengths)
val_batch, val_batch_reversed, val_batch_mask, val_seq_lengths = poly.get_mini_batch(
torch.arange(n_eval_samples * val_data_sequences.shape[0]), rep(val_data_sequences),
val_seq_lengths, cuda=args.cuda)
test_batch, test_batch_reversed, test_batch_mask, test_seq_lengths = poly.get_mini_batch(
torch.arange(n_eval_samples * test_data_sequences.shape[0]), rep(test_data_sequences),
test_seq_lengths, cuda=args.cuda)
# instantiate the dmm
dmm = DMM(rnn_dropout_rate=args.rnn_dropout_rate, num_iafs=args.num_iafs,
iaf_dim=args.iaf_dim, use_cuda=args.cuda)
# setup optimizer
adam_params = {"lr": args.learning_rate, "betas": (args.beta1, args.beta2),
"clip_norm": args.clip_norm, "lrd": args.lr_decay,
"weight_decay": args.weight_decay}
adam = ClippedAdam(adam_params)
# setup inference algorithm
elbo = JitTrace_ELBO() if args.jit else Trace_ELBO()
svi = SVI(dmm.model, dmm.guide, adam, loss=elbo)
# now we're going to define some functions we need to form the main training loop
# saves the model and optimizer states to disk
def save_checkpoint():
log("saving model to %s..." % args.save_model)
torch.save(dmm.state_dict(), args.save_model)
log("saving optimizer states to %s..." % args.save_opt)
adam.save(args.save_opt)
log("done saving model and optimizer checkpoints to disk.")
# loads the model and optimizer states from disk
def load_checkpoint():
assert exists(args.load_opt) and exists(args.load_model), \
"--load-model and/or --load-opt misspecified"
log("loading model from %s..." % args.load_model)
dmm.load_state_dict(torch.load(args.load_model))
log("loading optimizer states from %s..." % args.load_opt)
adam.load(args.load_opt)
log("done loading model and optimizer states.")
# prepare a mini-batch and take a gradient step to minimize -elbo
def process_minibatch(epoch, which_mini_batch, shuffled_indices):
if args.annealing_epochs > 0 and epoch < args.annealing_epochs:
# compute the KL annealing factor approriate for the current mini-batch in the current epoch
min_af = args.minimum_annealing_factor
annealing_factor = min_af + (1.0 - min_af) * \
(float(which_mini_batch + epoch * N_mini_batches + 1) /
float(args.annealing_epochs * N_mini_batches))
else:
# by default the KL annealing factor is unity
annealing_factor = 1.0
# compute which sequences in the training set we should grab
mini_batch_start = (which_mini_batch * args.mini_batch_size)
mini_batch_end = np.min([(which_mini_batch + 1) * args.mini_batch_size, N_train_data])
mini_batch_indices = shuffled_indices[mini_batch_start:mini_batch_end]
# grab a fully prepped mini-batch using the helper function in the data loader
mini_batch, mini_batch_reversed, mini_batch_mask, mini_batch_seq_lengths \
= poly.get_mini_batch(mini_batch_indices, training_data_sequences,
training_seq_lengths, cuda=args.cuda)
# do an actual gradient step
loss = svi.step(mini_batch, mini_batch_reversed, mini_batch_mask,
mini_batch_seq_lengths, annealing_factor)
# keep track of the training loss
return loss
# helper function for doing evaluation
def do_evaluation():
# put the RNN into evaluation mode (i.e. turn off drop-out if applicable)
dmm.rnn.eval()
# compute the validation and test loss n_samples many times
val_nll = svi.evaluate_loss(val_batch, val_batch_reversed, val_batch_mask,
val_seq_lengths) / torch.sum(val_seq_lengths)
test_nll = svi.evaluate_loss(test_batch, test_batch_reversed, test_batch_mask,
test_seq_lengths) / torch.sum(test_seq_lengths)
# put the RNN back into training mode (i.e. turn on drop-out if applicable)
dmm.rnn.train()
return val_nll, test_nll
# if checkpoint files provided, load model and optimizer states from disk before we start training
if args.load_opt != '' and args.load_model != '':
load_checkpoint()
#################
# TRAINING LOOP #
#################
times = [time.time()]
for epoch in range(args.num_epochs):
# if specified, save model and optimizer states to disk every checkpoint_freq epochs
if args.checkpoint_freq > 0 and epoch > 0 and epoch % args.checkpoint_freq == 0:
save_checkpoint()
# accumulator for our estimate of the negative log likelihood (or rather -elbo) for this epoch
epoch_nll = 0.0
# prepare mini-batch subsampling indices for this epoch
shuffled_indices = torch.randperm(N_train_data)
# process each mini-batch; this is where we take gradient steps
for which_mini_batch in range(N_mini_batches):
epoch_nll += process_minibatch(epoch, which_mini_batch, shuffled_indices)
# report training diagnostics
times.append(time.time())
epoch_time = times[-1] - times[-2]
log("[training epoch %04d] %.4f \t\t\t\t(dt = %.3f sec)" %
(epoch, epoch_nll / N_train_time_slices, epoch_time))
# do evaluation on test and validation data and report results
if val_test_frequency > 0 and epoch > 0 and epoch % val_test_frequency == 0:
val_nll, test_nll = do_evaluation()
log("[val/test epoch %04d] %.4f %.4f" % (epoch, val_nll, test_nll))
# parse command-line arguments and execute the main method
if __name__ == '__main__':
assert pyro.__version__.startswith('0.3.4')
parser = argparse.ArgumentParser(description="parse args")
parser.add_argument('-n', '--num-epochs', type=int, default=5000)
parser.add_argument('-lr', '--learning-rate', type=float, default=0.0003)
parser.add_argument('-b1', '--beta1', type=float, default=0.96)
parser.add_argument('-b2', '--beta2', type=float, default=0.999)
parser.add_argument('-cn', '--clip-norm', type=float, default=10.0)
parser.add_argument('-lrd', '--lr-decay', type=float, default=0.99996)
parser.add_argument('-wd', '--weight-decay', type=float, default=2.0)
parser.add_argument('-mbs', '--mini-batch-size', type=int, default=20)
parser.add_argument('-ae', '--annealing-epochs', type=int, default=1000)
parser.add_argument('-maf', '--minimum-annealing-factor', type=float, default=0.2)
parser.add_argument('-rdr', '--rnn-dropout-rate', type=float, default=0.1)
parser.add_argument('-iafs', '--num-iafs', type=int, default=0)
parser.add_argument('-id', '--iaf-dim', type=int, default=100)
parser.add_argument('-cf', '--checkpoint-freq', type=int, default=0)
parser.add_argument('-lopt', '--load-opt', type=str, default='')
parser.add_argument('-lmod', '--load-model', type=str, default='')
parser.add_argument('-sopt', '--save-opt', type=str, default='')
parser.add_argument('-smod', '--save-model', type=str, default='')
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--jit', action='store_true')
parser.add_argument('-l', '--log', type=str, default='dmm.log')
args = parser.parse_args()
main(args)
| 49.645022
| 115
| 0.650113
|
4a11737af2ce14dacb94be951b8e3bb38eeeebec
| 252
|
py
|
Python
|
training/mtcnn_config.py
|
thaiph99/MTCNN
|
d6acfcdba972beb47751d63a34f3cf168d0488d2
|
[
"MIT"
] | 50
|
2017-09-18T16:11:01.000Z
|
2022-03-28T15:54:04.000Z
|
training/mtcnn_config.py
|
thaiph99/MTCNN
|
d6acfcdba972beb47751d63a34f3cf168d0488d2
|
[
"MIT"
] | 7
|
2017-09-18T16:11:36.000Z
|
2018-05-05T01:39:24.000Z
|
training/mtcnn_config.py
|
thaiph99/MTCNN
|
d6acfcdba972beb47751d63a34f3cf168d0488d2
|
[
"MIT"
] | 22
|
2018-03-22T06:31:14.000Z
|
2020-03-10T07:20:04.000Z
|
#coding:utf-8
from easydict import EasyDict as edict
config = edict()
config.BATCH_SIZE = 384
config.CLS_OHEM = True
config.CLS_OHEM_RATIO = 0.7
config.BBOX_OHEM = False
config.BBOX_OHEM_RATIO = 0.7
config.EPS = 1e-14
config.LR_EPOCH = [6, 14, 20]
| 16.8
| 38
| 0.746032
|
4a117444d278335cb1e43fb1e5ee405647c2a97a
| 685
|
py
|
Python
|
app/core/migrations/0003_ingredient.py
|
nabeelmehmood/recipe-app-api
|
2bca83f50e291a5c484c5ff8aef2122ba9c6492a
|
[
"MIT"
] | null | null | null |
app/core/migrations/0003_ingredient.py
|
nabeelmehmood/recipe-app-api
|
2bca83f50e291a5c484c5ff8aef2122ba9c6492a
|
[
"MIT"
] | null | null | null |
app/core/migrations/0003_ingredient.py
|
nabeelmehmood/recipe-app-api
|
2bca83f50e291a5c484c5ff8aef2122ba9c6492a
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.3 on 2020-02-28 08:53
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0002_tag'),
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 28.541667
| 118
| 0.617518
|
4a11759e6d223573d843c45e825ba3d11be7ae10
| 1,776
|
py
|
Python
|
Mundo3/Desafio092.py
|
Marcoakira/Desafios_Python_do_Curso_Guanabara
|
c49b774148a2232f8f3c21b83e3dc97610480757
|
[
"MIT"
] | null | null | null |
Mundo3/Desafio092.py
|
Marcoakira/Desafios_Python_do_Curso_Guanabara
|
c49b774148a2232f8f3c21b83e3dc97610480757
|
[
"MIT"
] | null | null | null |
Mundo3/Desafio092.py
|
Marcoakira/Desafios_Python_do_Curso_Guanabara
|
c49b774148a2232f8f3c21b83e3dc97610480757
|
[
"MIT"
] | null | null | null |
# Desasfio092 esse programa le: o nome, ano de nascimento CTPS. no dicionario vai guardar nome, idade, CTPS, ano de contratação, e ano que ira se aposentar.
from datetime import date
cadastro_unico = []
cadastro_temporario = {}
#atual = date.today().year
while True:
cadastro_temporario['nome'] = str(input('Nome: '))
# ano de nacimento vai calcular e retornar a idade aproximada da pessoa
#cadastro_temporario['ano_nacimento'] = str(input('data de nascimento: '))
ano_temporario = int(input('ano de nascimento: '))
cadastro_temporario['idade'] = date.today().year - ano_temporario
# se o numero for 0, o usuario nao trabalha. se for diferente. peça o ano da contratação e salario. retorna tempo para aposentar.
cadastro_temporario['numero_CTPS'] = int(input('Numero da Carteira de Trabalho ( digite 0 caso nao trabalhe): '))
if cadastro_temporario['numero_CTPS'] != 0:
cadastro_temporario['ano_contratacao'] = int(input('Ano de contratação: '))
# aqui iria o salario, mas optei por nao colocar.
# poderia fazer com que idade ele iria se aposertra, mas prefiri fazer o ano, para ser mais preciso.
ano_aposentadoria_temp = 35 - (date.today().year - cadastro_temporario['ano_contratacao'])
ano_aposentadoria = date.today().year + ano_aposentadoria_temp
if ano_aposentadoria <= date.today().year:
cadastro_temporario['vai_aposentar'] = 'aposentado'
if ano_aposentadoria > date.today().year:
cadastro_temporario['vai_aposentar'] = ano_aposentadoria
print(cadastro_temporario)
break
cadastro_unico.append(cadastro_temporario.copy())
cadastro_temporario.clear()
for c in cadastro_unico:
for k,d in c.items():
print(k,d)
#print(cadastro_unico)
| 45.538462
| 156
| 0.712275
|
4a11762dd8532188555ccf405132f24eb8760d37
| 1,512
|
py
|
Python
|
temboardagent/postgres.py
|
pierrehilbert/temboard-agent
|
40c670dd607c6fea651630a353ebe639f4ba661e
|
[
"PostgreSQL"
] | null | null | null |
temboardagent/postgres.py
|
pierrehilbert/temboard-agent
|
40c670dd607c6fea651630a353ebe639f4ba661e
|
[
"PostgreSQL"
] | null | null | null |
temboardagent/postgres.py
|
pierrehilbert/temboard-agent
|
40c670dd607c6fea651630a353ebe639f4ba661e
|
[
"PostgreSQL"
] | null | null | null |
# coding: utf-8
from __future__ import unicode_literals
from .spc import connector
from .spc import error as PostgresError
from .errors import UserError
class ConnectionManager(object):
def __init__(self, postgres):
self.postgres = postgres
def __enter__(self):
self.conn = connector(
host=self.postgres.host,
port=self.postgres.port,
user=self.postgres.user,
password=self.postgres.password,
database=self.postgres.dbname
)
try:
self.conn.connect()
except PostgresError as e:
raise UserError("Failed to connect to Postgres: %s" % (e,))
return self.conn
def __exit__(self, *a):
self.conn.close()
class Postgres(object):
def __init__(
self, host=None, port=5432, user=None, password=None, dbname=None,
**kw):
self.host = host
self.port = port
self.user = user
self.password = password
self.dbname = dbname
self._server_version = None
def __repr__(self):
return '<%s on %s@%s:%s/%s>' % (
self.__class__.__name__,
self.user, self.host, self.port, self.dbname,
)
def connect(self):
return ConnectionManager(self)
def fetch_version(self):
if self._server_version is None:
with self.connect() as conn:
self._server_version = conn.get_pg_version()
return self._server_version
| 26.526316
| 78
| 0.594577
|
4a1176370c3ee01ea5f92526c2405a08b2c25bc6
| 2,793
|
py
|
Python
|
PyQuante/GridPoint.py
|
certik/pyquante
|
f5cae27f519b1c1b70afbebfe8b5c83cb4b3c2a6
|
[
"DOC"
] | null | null | null |
PyQuante/GridPoint.py
|
certik/pyquante
|
f5cae27f519b1c1b70afbebfe8b5c83cb4b3c2a6
|
[
"DOC"
] | null | null | null |
PyQuante/GridPoint.py
|
certik/pyquante
|
f5cae27f519b1c1b70afbebfe8b5c83cb4b3c2a6
|
[
"DOC"
] | 1
|
2022-01-07T19:20:27.000Z
|
2022-01-07T19:20:27.000Z
|
"""\
GridPoint.py: A class to hold grid point data
This program is part of the PyQuante quantum chemistry program suite.
Copyright (c) 2004, Richard P. Muller. All Rights Reserved.
PyQuante version 1.2 and later is covered by the modified BSD
license. Please see the file LICENSE that is part of this
distribution.
"""
from math import sqrt
from NumWrap import zeros,reshape,dot,array,matrixmultiply
class GridPoint:
def __init__(self,x,y,z,w=1.0,**opts):
self.do_grad_dens = opts.get('do_grad_dens',False)
self._x = float(x)
self._y = float(y)
self._z = float(z)
self._w = float(w)
self.xyz = array((self._x,self._y,self._z),'d')
self._r = sqrt(self._x*self._x+self._y*self._y+self._z*self._z)
self._gamma = None
self._dens = 0
self._dens0 = None
self._grad = None
self.bfs = []
return
def xyzw(self): return (self._x,self._y,self._z,self._w)
def dens(self): return self._dens
def gamma(self): return self._gamma
def grad(self): return self._grad
def weight(self): return self._w
def nbf(self): return len(self.bfs)
def r(self): return self._r
def setweight(self,w): self._w = w
def zeroweight(self): self._w = 0
def set_bf_amps(self,bfs):
x,y,z,w = self.xyzw()
nbf = len(bfs)
self.bfs = zeros(nbf,'d')
for i in range(nbf):
self.bfs[i] = bfs[i].amp(x,y,z)
# This *if* statement is potentially slow. If it becomes
# a factor, pull it up to AtomicGrids and have two
# explicit cases here.
if self.do_grad_dens:
self.bfgrads = zeros((nbf,3),'d')
for i in range(nbf):
self.bfgrads[i,:] = bfs[i].grad(x,y,z)
return
def setdens(self,D):
self._dens = 2*dot(self.bfs,matrixmultiply(D,self.bfs))
# This *if* statement is potentially slow. If it becomes
# a factor, pull it up to AtomicGrids and have two
# explicit cases here.
if self.do_grad_dens:
#gx = 2*dot(self.bfs,matrixmultiply(D,self.bfgrads[:,0])) + \
# 2*dot(self.bfgrads[:,0],matrixmultiply(D,self.bfs))
#gy = 2*dot(self.bfs,matrixmultiply(D,self.bfgrads[:,1])) + \
# 2*dot(self.bfgrads[:,1],matrixmultiply(D,self.bfs))
#gz = 2*dot(self.bfs,matrixmultiply(D,self.bfgrads[:,2])) + \
# 2*dot(self.bfgrads[:,2],matrixmultiply(D,self.bfs))
#self._grad = array((gx,gy,gz))
self._grad = 2*dot(self.bfs.T,matrixmultiply(D,self.bfgrads)) +\
2*dot(self.bfgrads.T,matrixmultiply(D,self.bfs))
self._gamma = dot(self._grad,self._grad)
return
| 36.272727
| 76
| 0.588256
|
4a1176e1a256fe0632b6ab69175271cf738c8fd2
| 12,139
|
py
|
Python
|
simulation-ros/src/turtlebot2i/turtlebot2i_safety/tools/Labeling_tools/3ApplyRulesOnData/label_dataset.py
|
EricssonResearch/scott-eu
|
aad7fd2f767a3c5e7d89223a593fd979ad596db3
|
[
"Apache-2.0"
] | 19
|
2017-06-29T07:41:26.000Z
|
2021-11-03T18:48:48.000Z
|
simulation-ros/src/turtlebot2i/turtlebot2i_safety/tools/Labeling_tools/3ApplyRulesOnData/label_dataset.py
|
EricssonResearch/scott-eu
|
aad7fd2f767a3c5e7d89223a593fd979ad596db3
|
[
"Apache-2.0"
] | 175
|
2017-06-29T09:37:43.000Z
|
2021-07-09T12:55:28.000Z
|
simulation-ros/src/turtlebot2i/turtlebot2i_safety/tools/Labeling_tools/3ApplyRulesOnData/label_dataset.py
|
EricssonResearch/scott-eu
|
aad7fd2f767a3c5e7d89223a593fd979ad596db3
|
[
"Apache-2.0"
] | 8
|
2017-10-31T08:53:12.000Z
|
2021-07-21T06:14:43.000Z
|
#!/usr/bin/env python
'''-------------------------------------------
--We will not label the data set in this way--
-------------------------------------------'''
import os
import re
import csv
''' #Disable images
import matplotlib.pyplot as plt # plt to show img
#%matplotlib inline
plt.ion() # Don't forget this
plt.axis('off')
import matplotlib.image as mpimg # mpimg to read img
'''
import numpy as np
import skfuzzy as fuzz
from skfuzzy import control as ctrl
import time
def init_Var():
print("Init Var")
global IZW # Safety zone size
IZW = 0.4 # Static robot.
global sample_number
sample_number = 0 # counter
def init_Path(): #open 'labels' folder and creat a result file
global labels_folder
#All files will be saved to /home/usr/labels
labels_folder = os.path.join(os.path.expanduser('~'),'labels')
global result_file
if not os.path.exists(labels_folder):
raise Exception("Label folder doesn't exist! Make sure you already have unsupervised data.")
else:
print "Label folder exists"
result_file = open(labels_folder+"/supervised_data.csv",'wb')
global myWriter
myWriter = csv.writer(result_file)
myWriter.writerow(["Obj Type","Obj Distance","Obj Orientation","Obj Direction","Obj Speed","Obj Risk"])# Remove "Obj Name"
#result_file.close() # Don't forget to close it in the finishing part
print "Result csv file created!"
def init_RegEx():
global data_file_pattern,img_file_pattern
data_file_pattern="(\w+\_?\w+#?\d*)"+"\.csv"
img_file_pattern ="(Vrep_shot\d*)"+"\.png" #No need to open the image
def init_rule_based_system():
# Antecedent/Consequent objects hold universe variables and membership functions
step_meter = 0.02 # If the step are large, the Gaussian MF will regress to Triangular MF
step_meter_per_second = 0.02
step_risk = 0.05
range_type = np.arange(0, 2+1, 1)
range_degree = np.arange(-180, 180+1, 1.0) # Range: -180 degree ~ 180 degree for direction and orientation
range_meter = np.arange(0, 3.0+step_meter, step_meter) # Range: 0 meter ~ 3 meter for distance
range_meter_per_second = np.arange(0, 2.0+step_meter_per_second, step_meter_per_second)#Range: 0 mps ~ 2 mps for speed
range_risk = np.arange(0, 5+step_risk, step_risk) # Range: 0,1,2,3,4 for risk
object_type = ctrl.Antecedent(range_type, 'type')
object_distance = ctrl.Antecedent(range_meter, 'distance')
object_orientation = ctrl.Antecedent(range_degree , 'orientation')#-180~180 degree
object_direction = ctrl.Antecedent(range_degree , 'direction') # -180~180 degree
object_speed = ctrl.Antecedent(range_meter_per_second , 'speed') #0- 3 m/s
object_risk = ctrl.Consequent(range_risk, 'risk')
# Custom membership functions can be built interactively with a familiar Pythonic API
# Type
object_type['StaObj'] = fuzz.trimf(range_type, [0, 0, 0.1])
object_type['DynObj'] = fuzz.trimf(range_type, [0.9, 1, 1.1])
object_type['Human'] = fuzz.trimf(range_type, [1.9, 2, 2])
# Distance
object_distance['Near'] = fuzz.trapmf(range_meter, [0, 0, IZW, 2*IZW])
object_distance['Medium']= fuzz.trimf(range_meter, [IZW, 2*IZW, 4*IZW])
object_distance['Far'] = fuzz.trapmf(range_meter, [2*IZW, 4*IZW, 3, 3])
#object_distance.view()
# Direction -180~180
rear_d_p2 = fuzz.trapmf(range_degree, [-180, -180, -135, -90])
object_direction['Right'] = fuzz.trimf(range_degree, [-135, -90, -45])
object_direction['FrontRight'] = fuzz.trimf(range_degree, [-90, -45, 0])
object_direction['Front'] = fuzz.trimf(range_degree, [-45, 0, 45])
object_direction['FrontLeft']= fuzz.trimf(range_degree, [0, 45, 90])
object_direction['Left']= fuzz.trimf(range_degree, [45, 90, 135])
rear_d_p1 = fuzz.trapmf(range_degree, [90, 135, 180,180])
null,object_direction['BigRear'] =fuzz.fuzzy_or(range_degree,rear_d_p1,range_degree,rear_d_p2)
print("init_fls_common_part")
#object_direction.view()
# Speed
object_speed['Slow'] = fuzz.trapmf(range_meter_per_second, [0, 0, 0.5, 1.0])
object_speed['Medium']= fuzz.trapmf(range_meter_per_second, [0.5, 1.0, 1.0, 1.5])
object_speed['Fast'] = fuzz.trimf(range_meter_per_second,[1.0,1.5,1.5])
#object_speed.view()
# Orientation
object_orientation['Front'] = fuzz.trimf(range_degree, [-45, 0, 45])
object_orientation['FrontLeft']=fuzz.trimf(range_degree, [0, 45, 90])
object_orientation['Left']= fuzz.trimf(range_degree, [45, 90, 135])
object_orientation['RearLeft']= fuzz.trimf(range_degree, [90, 135, 180])
rear_p1 = fuzz.trimf(range_degree, [135, 180,180])
rear_p2 = fuzz.trimf(range_degree, [-180,-180,-135])
null,object_orientation['Rear'] =fuzz.fuzzy_or(range_degree,rear_p1,range_degree,rear_p2)
object_orientation['RearRight'] = fuzz.trimf(range_degree, [-180,-135,-90])
object_orientation['Right'] = fuzz.trimf(range_degree, [-135,-90,-45])
object_orientation['FrontRight'] = fuzz.trimf(range_degree, [-90,-45, 0])
#object_orientation.view()
# Risk
object_risk['VeryLow'] = fuzz.trimf(range_risk, [0, 0, 1])
object_risk['Low'] = fuzz.trimf(range_risk, [0, 1, 2])
object_risk['Medium'] = fuzz.trimf(range_risk, [1, 2, 3])
object_risk['High'] = fuzz.trimf(range_risk, [2, 3, 4])
object_risk['VeryHigh'] = fuzz.trimf(range_risk, [3, 4, 4])
"""
Fuzzy rules
-----------
"""
#time_previous = time.time()
#from rules_demo import rule_list_generator
#from rules import rule_list_generator
#rule_list=rule_list_generator(object_type,object_distance,object_direction, object_speed, object_orientation, object_risk)
#run_time = time.time() - time_previous
#print 'execute time=',one_run_time,'s'
#print 'setting rules time=',run_time,'sec'
"""
Control System Creation and Simulation
---------------------------------------
"""
global ra_fls
import cPickle as pickle
fls_name = "ra_full.data"
if os.path.exists(fls_name):
print("FLS exists!")
f = open(fls_name,'rb')
ra_fls = pickle.load(f)
else:
print("Init FLS")
from assessment_rules import rule_list_generator
assessment_rule_list=rule_list_generator(object_type,object_distance,object_direction, object_speed, object_orientation, object_risk)
ra_fls = ctrl.ControlSystem(assessment_rule_list)
f = open(fls_name,'wb')
pickle.dump(ra_fls,f)
f.close
"""
In order to simulate this control system, we will create a
``ControlSystemSimulation``. Think of this object representing our controller
applied to a specific set of cirucmstances.
"""
global risk_assessment_instance
risk_assessment_instance = ctrl.ControlSystemSimulation(ra_fls)
def add_label_with_complex_rules(object_type,object_distance,object_orientation,object_direction,object_speed): #Add risk label
global risk_assessment_instance
time_previous = time.time()
"""
We can now simulate our control system by simply specifying the inputs
and calling the ``compute`` method.
"""
# Pass inputs to the ControlSystem using Antecedent labels with Pythonic API
risk_assessment_instance.input['type'] = object_type
risk_assessment_instance.input['distance'] = object_distance
risk_assessment_instance.input['direction'] = object_direction
if object_speed>1.5:
object_speed = 1.5
risk_assessment_instance.input['speed'] = object_speed
risk_assessment_instance.input['orientation'] = object_orientation
# Crunch the numbers
risk_assessment_instance.compute()
run_time = time.time() - time_previous
#object_risk.view(sim=risk_assessment_instance) #visualize the result
print 'calculate a instance=',run_time,'sec'
"""
Once computed, we can view the result as well as visualize it.
"""
return risk_assessment_instance.output['risk']
def read_csv(file_path):
with open(file_path,'rb') as myFile:
lines=csv.reader(myFile) #We want second line
lines = list(lines) # Convert '_csv.reader' type to 'list' type
str_data = lines[1] # Type: list
#print type(str_data[0]) # <type 'str'>
name = str_data[0]
data = str_data[1:6]
#hashvalue= str_data[6]
#robot_speed = str_data[7]
data = [float(i) for i in data]
return name,data[0],data[1],data[2],data[3],data[4] #label: data[5] is invalid
def add_label_result_to_file(object_name,object_type,object_distance,object_orientation,object_direction,object_speed,risk_label):
#TODO
object_type = int(object_type)
myWriter.writerow([object_type,object_distance,object_orientation,object_direction,object_speed,risk_label]) #remove "Obj Name"
print("One new line is added")
def finish():
result_file.close()
print "Labeling finished!"
def label_dataset():
folder_list = os.listdir(labels_folder)
print "We have",len(folder_list)-1,"folders"
print "===================================================================="
for one_folder in folder_list: #NOTE: Not in order 0-9
#print folder_list
folder_path = os.path.join(labels_folder, one_folder)
print "One folder:",folder_path # Path
if one_folder == "supervised_data.csv":
folder_list.remove(one_folder)
#print folder_list
else:
# Read files
files = os.listdir(folder_path)
#NO need to open image
for one_file in files:
matchObj_img = re.match(img_file_pattern, one_file,re.M|re.I) #It Works
if matchObj_img:
print "The picture ",one_file," is here!"
'''
file_path = os.path.join(folder_path, one_file)
scene_img = mpimg.imread(file_path)
plt.imshow(scene_img)
#plt.show() # Since we have plt.ion, this is useless.
#raw_input()
'''
files.remove(one_file)
for one_file in files:
matchObj_csv = re.match(data_file_pattern, one_file,re.M|re.I)
print "--------------------------------------------------------------------"
print "--------------------------------------------------------------------"
#print one_file
if matchObj_csv:
file_path = os.path.join(folder_path, one_file)
print "We have a data file",one_file,". Now label it! "
object_name,object_type,object_distance,object_orientation,object_direction,object_speed=read_csv(file_path)
try:
risk_label = add_label_with_complex_rules(object_type,object_distance,object_orientation,object_direction,object_speed)
except ValueError:
print "Value error with:",object_type,object_distance,object_orientation,object_direction,object_speed
else:
#print "Risk = ",risk_label
add_label_result_to_file(object_name,object_type,object_distance,object_orientation,object_direction,object_speed,risk_label)
global sample_number
sample_number = sample_number+1
else:
#raise Exception("No match! What is this file?", one_file)
print "No match! What is this file?", one_file
print "--------------------------------------------------------------------"
print "We have ",sample_number,"samples"
""" Main program """
if __name__ == "__main__":
init_Var()
init_Path()
init_RegEx()
init_rule_based_system()
label_dataset()
finish()
| 43.046099
| 149
| 0.629871
|
4a117899f1d841d1e5671a721c66e6d805699216
| 3,047
|
py
|
Python
|
tests/protocol/test_protocol_util.py
|
koifans/WALinuxAgent
|
236c6c12d89757589411651ae015640d371251a4
|
[
"Apache-2.0"
] | 1
|
2020-11-23T10:48:28.000Z
|
2020-11-23T10:48:28.000Z
|
tests/protocol/test_protocol_util.py
|
koifans/WALinuxAgent
|
236c6c12d89757589411651ae015640d371251a4
|
[
"Apache-2.0"
] | 1
|
2019-06-06T13:24:55.000Z
|
2019-06-06T13:24:55.000Z
|
tests/protocol/test_protocol_util.py
|
koifans/WALinuxAgent
|
236c6c12d89757589411651ae015640d371251a4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
from tests.tools import *
from azurelinuxagent.common.exception import *
from azurelinuxagent.common.protocol import get_protocol_util, \
TAG_FILE_NAME
@patch("time.sleep")
class TestProtocolUtil(AgentTestCase):
@patch("azurelinuxagent.common.protocol.util.MetadataProtocol")
@patch("azurelinuxagent.common.protocol.util.WireProtocol")
def test_detect_protocol(self, WireProtocol, MetadataProtocol, _):
WireProtocol.return_value = MagicMock()
MetadataProtocol.return_value = MagicMock()
protocol_util = get_protocol_util()
protocol_util.dhcp_handler = MagicMock()
protocol_util.dhcp_handler.endpoint = "foo.bar"
#Test wire protocol is available
protocol = protocol_util.get_protocol()
self.assertEquals(WireProtocol.return_value, protocol)
#Test wire protocol is not available
protocol_util.clear_protocol()
WireProtocol.return_value.detect.side_effect = ProtocolError()
protocol = protocol_util.get_protocol()
self.assertEquals(MetadataProtocol.return_value, protocol)
#Test no protocol is available
protocol_util.clear_protocol()
WireProtocol.return_value.detect.side_effect = ProtocolError()
MetadataProtocol.return_value.detect.side_effect = ProtocolError()
self.assertRaises(ProtocolError, protocol_util.get_protocol)
def test_detect_protocol_by_file(self, _):
protocol_util = get_protocol_util()
protocol_util._detect_wire_protocol = Mock()
protocol_util._detect_metadata_protocol = Mock()
tag_file = os.path.join(self.tmp_dir, TAG_FILE_NAME)
#Test tag file doesn't exist
protocol_util.get_protocol(by_file=True)
protocol_util._detect_wire_protocol.assert_any_call()
protocol_util._detect_metadata_protocol.assert_not_called()
#Test tag file exists
protocol_util.clear_protocol()
protocol_util._detect_wire_protocol.reset_mock()
protocol_util._detect_metadata_protocol.reset_mock()
with open(tag_file, "w+") as tag_fd:
tag_fd.write("")
protocol_util.get_protocol(by_file=True)
protocol_util._detect_metadata_protocol.assert_any_call()
protocol_util._detect_wire_protocol.assert_not_called()
if __name__ == '__main__':
unittest.main()
| 37.158537
| 74
| 0.72235
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.