hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7fb75ee533a5364bc88e6a20902d87f866ac6ba | 1,456 | py | Python | datasets.py | bartwojcik/cifar-template | c603d9120f54619863b7418574158572c2d5f659 | [
"MIT"
] | null | null | null | datasets.py | bartwojcik/cifar-template | c603d9120f54619863b7418574158572c2d5f659 | [
"MIT"
] | null | null | null | datasets.py | bartwojcik/cifar-template | c603d9120f54619863b7418574158572c2d5f659 | [
"MIT"
] | null | null | null | from torchvision import datasets, transforms
def get_mnist(dataset_path):
path_str = str(dataset_path.resolve())
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, ), std=(0.5, )),
])
train_data = datasets.MNIST(path_str, train=True, download=True, transform=transform)
train_eval_data = train_data
test_data = datasets.MNIST(path_str, train=False, download=True, transform=transform)
return train_data, train_eval_data, test_data
def get_cifar10(dataset_path, proper_normalization=True):
if proper_normalization:
mean, std = (0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.262)
else:
mean, std = (0.5, 0.5, 0.5), (0.5, 0.5, 0.5)
path_str = str(dataset_path.resolve())
transform_eval = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean, std),
])
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean, std),
])
train_data = datasets.CIFAR10(path_str, train=True, download=True, transform=transform_train)
train_eval_data = datasets.CIFAR10(path_str, train=True, download=True, transform=transform_eval)
test_data = datasets.CIFAR10(path_str, train=False, download=True, transform=transform_eval)
return train_data, train_eval_data, test_data
| 40.444444 | 101 | 0.699863 | from torchvision import datasets, transforms
def get_mnist(dataset_path):
path_str = str(dataset_path.resolve())
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, ), std=(0.5, )),
])
train_data = datasets.MNIST(path_str, train=True, download=True, transform=transform)
train_eval_data = train_data
test_data = datasets.MNIST(path_str, train=False, download=True, transform=transform)
return train_data, train_eval_data, test_data
def get_cifar10(dataset_path, proper_normalization=True):
if proper_normalization:
mean, std = (0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.262)
else:
mean, std = (0.5, 0.5, 0.5), (0.5, 0.5, 0.5)
path_str = str(dataset_path.resolve())
transform_eval = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean, std),
])
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean, std),
])
train_data = datasets.CIFAR10(path_str, train=True, download=True, transform=transform_train)
train_eval_data = datasets.CIFAR10(path_str, train=True, download=True, transform=transform_eval)
test_data = datasets.CIFAR10(path_str, train=False, download=True, transform=transform_eval)
return train_data, train_eval_data, test_data
| true | true |
f7fb778e596b28cd0f529e0a5ff7b9e43b561a3c | 1,331 | py | Python | brian2/tests/features/monitors.py | CharleeSF/brian2 | d2be1ed33a8ac51b1891f89a2544123a937c43ff | [
"BSD-2-Clause"
] | 2 | 2020-03-20T13:30:19.000Z | 2020-03-20T13:30:57.000Z | brian2/tests/features/monitors.py | CharleeSF/brian2 | d2be1ed33a8ac51b1891f89a2544123a937c43ff | [
"BSD-2-Clause"
] | 13 | 2015-10-05T15:33:42.000Z | 2015-11-18T08:31:42.000Z | brian2/tests/features/monitors.py | moritzaugustin/brian2 | d98ea0cb4af0c9426e71c8ee7659ddb13aea8593 | [
"BSD-2-Clause"
] | null | null | null | '''
Check that various monitors work correctly.
'''
from brian2 import *
from brian2.tests.features import FeatureTest, InaccuracyError
class SpikeMonitorTest(FeatureTest):
category = "Monitors"
name = "SpikeMonitor"
tags = ["NeuronGroup", "run",
"SpikeMonitor"]
def run(self):
N = 100
tau = 10*ms
eqs = '''
dv/dt = (I-v)/tau : 1
I : 1
'''
self.G = G = NeuronGroup(N, eqs, threshold='v>1', reset='v=0')
G.I = linspace(0, 2, N)
self.M = M = SpikeMonitor(G)
run(100*ms)
def results(self):
return {'i': self.M.i[:], 't': self.M.t[:]}
compare = FeatureTest.compare_arrays
class StateMonitorTest(FeatureTest):
category = "Monitors"
name = "StateMonitor"
tags = ["NeuronGroup", "run",
"StateMonitor"]
def run(self):
N = 10
tau = 10*ms
eqs = '''
dv/dt = (I-v)/tau : 1
I : 1
'''
self.G = G = NeuronGroup(N, eqs, threshold='v>1', reset='v=0.1')
G.v = 0.1
G.I = linspace(1.1, 2, N)
self.M = M = StateMonitor(G, 'v', record=True)
run(100*ms)
def results(self):
return self.M.v[:]
compare = FeatureTest.compare_arrays
| 22.948276 | 72 | 0.500376 |
from brian2 import *
from brian2.tests.features import FeatureTest, InaccuracyError
class SpikeMonitorTest(FeatureTest):
category = "Monitors"
name = "SpikeMonitor"
tags = ["NeuronGroup", "run",
"SpikeMonitor"]
def run(self):
N = 100
tau = 10*ms
eqs = '''
dv/dt = (I-v)/tau : 1
I : 1
'''
self.G = G = NeuronGroup(N, eqs, threshold='v>1', reset='v=0')
G.I = linspace(0, 2, N)
self.M = M = SpikeMonitor(G)
run(100*ms)
def results(self):
return {'i': self.M.i[:], 't': self.M.t[:]}
compare = FeatureTest.compare_arrays
class StateMonitorTest(FeatureTest):
category = "Monitors"
name = "StateMonitor"
tags = ["NeuronGroup", "run",
"StateMonitor"]
def run(self):
N = 10
tau = 10*ms
eqs = '''
dv/dt = (I-v)/tau : 1
I : 1
'''
self.G = G = NeuronGroup(N, eqs, threshold='v>1', reset='v=0.1')
G.v = 0.1
G.I = linspace(1.1, 2, N)
self.M = M = StateMonitor(G, 'v', record=True)
run(100*ms)
def results(self):
return self.M.v[:]
compare = FeatureTest.compare_arrays
| true | true |
f7fb78784a38735392a7ce30348e004c69d74ceb | 19,826 | py | Python | uproot4/source/http.py | tamasgal/uproot4 | c94fc036b8e8f35a36478a358203029224654606 | [
"BSD-3-Clause"
] | null | null | null | uproot4/source/http.py | tamasgal/uproot4 | c94fc036b8e8f35a36478a358203029224654606 | [
"BSD-3-Clause"
] | null | null | null | uproot4/source/http.py | tamasgal/uproot4 | c94fc036b8e8f35a36478a358203029224654606 | [
"BSD-3-Clause"
] | null | null | null | # BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/master/LICENSE
"""
Physical layer for remote files, accessed via HTTP(S).
Defines a :py:class:`~uproot4.source.http.HTTPResource` (stateless) and two sources:
:py:class:`~uproot4.source.http.MultithreadedHTTPSource` and
:py:class:`~uproot4.source.http.HTTPSource`. The multi-threaded source only requires
the server to support byte range requests (code 206), but the general source
requires the server to support multi-part byte range requests. If the server
does not support multi-part GET, :py:class:`~uproot4.source.http.HTTPSource`
automatically falls back to :py:class:`~uproot4.source.http.MultithreadedHTTPSource`.
Despite the name, both sources support secure HTTPS (selected by URL scheme).
"""
from __future__ import absolute_import
import sys
import re
try:
from http.client import HTTPConnection
from http.client import HTTPSConnection
from urllib.parse import urlparse
except ImportError:
from httplib import HTTPConnection
from httplib import HTTPSConnection
from urlparse import urlparse
try:
import queue
except ImportError:
import Queue as queue
import uproot4.source.futures
import uproot4.source.chunk
import uproot4._util
def make_connection(parsed_url, timeout):
"""
Args:
parsed_url (``urllib.parse.ParseResult``): The URL to connect to, which
may be HTTP or HTTPS.
timeout (None or float): An optional timeout in seconds.
Creates a ``http.client.HTTPConnection`` or a ``http.client.HTTPSConnection``,
depending on the URL scheme.
"""
if parsed_url.scheme == "https":
if uproot4._util.py2:
return HTTPSConnection(
parsed_url.hostname, parsed_url.port, None, None, False, timeout
)
else:
return HTTPSConnection(
parsed_url.hostname, parsed_url.port, None, None, timeout
)
elif parsed_url.scheme == "http":
if uproot4._util.py2:
return HTTPConnection(parsed_url.hostname, parsed_url.port, False, timeout)
else:
return HTTPConnection(parsed_url.hostname, parsed_url.port, timeout)
else:
raise ValueError(
"unrecognized URL scheme for HTTP MultipartSource: {0}".format(
parsed_url.scheme
)
)
def full_path(parsed_url):
"""
Returns the ``parsed_url.path`` with ``"?"`` and the ``parsed_url.query``
if it exists, just the path otherwise.
"""
if parsed_url.query:
return parsed_url.path + "?" + parsed_url.query
else:
return parsed_url.path
def get_num_bytes(file_path, parsed_url, timeout):
"""
Args:
file_path (str): The URL to access as a raw string.
parsed_url (``urllib.parse.ParseResult``): The URL to access.
timeout (None or float): An optional timeout in seconds.
Returns the number of bytes in the file by making a HEAD request.
"""
connection = make_connection(parsed_url, timeout)
connection.request("HEAD", full_path(parsed_url))
response = connection.getresponse()
if response.status == 404:
connection.close()
raise uproot4._util._file_not_found(file_path, "HTTP(S) returned 404")
if response.status != 200:
connection.close()
raise OSError(
"""HTTP response was {0}, rather than 200, in attempt to get file size
in file {1}""".format(
response.status, file_path
)
)
for k, x in response.getheaders():
if k.lower() == "content-length" and x.strip() != "0":
connection.close()
return int(x)
else:
connection.close()
raise OSError(
"""response headers did not include content-length: {0}
in file {1}""".format(
dict(response.getheaders()), file_path
)
)
class HTTPResource(uproot4.source.chunk.Resource):
"""
Args:
file_path (str): A URL of the file to open.
timeout (None or float): An optional timeout in seconds.
A :py:class:`~uproot4.source.chunk.Resource` for HTTP(S) connections.
For simplicity, this resource does not manage a live
``http.client.HTTPConnection`` or ``http.client.HTTPSConnection``, though
in principle, it could.
"""
def __init__(self, file_path, timeout):
self._file_path = file_path
self._timeout = timeout
self._parsed_url = urlparse(file_path)
@property
def timeout(self):
"""
The timeout in seconds or None.
"""
return self._timeout
@property
def parsed_url(self):
"""
A ``urllib.parse.ParseResult`` version of the ``file_path``.
"""
return self._parsed_url
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
pass
def get(self, connection, start, stop):
"""
Args:
start (int): Seek position of the first byte to include.
stop (int): Seek position of the first byte to exclude
(one greater than the last byte to include).
Returns a Python buffer of data between ``start`` and ``stop``.
"""
response = connection.getresponse()
if response.status == 404:
connection.close()
raise uproot4._util._file_not_found(self.file_path, "HTTP(S) returned 404")
if response.status != 206:
connection.close()
raise OSError(
"""remote server does not support HTTP range requests
for URL {0}""".format(
self._file_path
)
)
try:
return response.read()
finally:
connection.close()
@staticmethod
def future(source, start, stop):
"""
Args:
source (:py:class:`~uproot4.source.chunk.HTTPSource` or :py:class:`~uproot4.source.chunk.MultithreadedHTTPSource`): The
data source.
start (int): Seek position of the first byte to include.
stop (int): Seek position of the first byte to exclude
(one greater than the last byte to include).
Returns a :py:class:`~uproot4.source.futures.ResourceFuture` that calls
:py:meth:`~uproot4.source.file.HTTPResource.get` with ``start`` and ``stop``.
"""
connection = make_connection(source.parsed_url, source.timeout)
connection.request(
"GET",
full_path(source.parsed_url),
headers={"Range": "bytes={0}-{1}".format(start, stop - 1)},
)
def task(resource):
return resource.get(connection, start, stop)
return uproot4.source.futures.ResourceFuture(task)
@staticmethod
def multifuture(source, ranges, futures, results):
u"""
Args:
source (:py:class:`~uproot4.source.chunk.HTTPSource`): The data source.
ranges (list of (int, int) 2-tuples): Intervals to fetch
as (start, stop) pairs in a single request, if possible.
futures (dict of (int, int) \u2192 :py:class:`~uproot4.source.futures.ResourceFuture`): Mapping
from (start, stop) to a future that is awaiting its result.
results (dict of (int, int) \u2192 None or ``numpy.ndarray`` of ``numpy.uint8``): Mapping
from (start, stop) to None or results.
Returns a :py:class:`~uproot4.source.futures.ResourceFuture` that attempts
to perform an HTTP(S) multipart GET, filling ``results`` to satisfy
the individual :py:class:`~uproot4.source.chunk.Chunk`'s ``futures`` with
its multipart response.
If the server does not support multipart GET, that same future
sets :py:attr:`~uproot4.source.chunk.HTTPSource.fallback` and retries the
request without multipart, using a
:py:class:`~uproot4.source.http.MultithreadedHTTPSource` to fill the same
``results`` and ``futures``. Subsequent attempts would immediately
use the :py:attr:`~uproot4.source.chunk.HTTPSource.fallback`.
"""
connection = make_connection(source.parsed_url, source.timeout)
range_strings = []
for start, stop in ranges:
range_strings.append("{0}-{1}".format(start, stop - 1))
connection.request(
"GET",
full_path(source.parsed_url),
headers={"Range": "bytes=" + ", ".join(range_strings)},
)
def task(resource):
try:
response = connection.getresponse()
multipart_supported = resource.is_multipart_supported(ranges, response)
if not multipart_supported:
resource.handle_no_multipart(source, ranges, futures, results)
else:
resource.handle_multipart(source, futures, results, response)
except Exception:
excinfo = sys.exc_info()
for future in futures.values():
future._set_excinfo(excinfo)
finally:
connection.close()
return uproot4.source.futures.ResourceFuture(task)
_content_range_size = re.compile(b"Content-Range: bytes ([0-9]+-[0-9]+)/([0-9]+)")
_content_range = re.compile(b"Content-Range: bytes ([0-9]+-[0-9]+)")
def is_multipart_supported(self, ranges, response):
"""
Helper function for :py:meth:`~uproot4.source.http.HTTPResource.multifuture`
to check for multipart GET support.
"""
if response.status != 206:
return False
for k, x in response.getheaders():
if k.lower() == "content-length":
content_length = int(x)
for start, stop in ranges:
if content_length == stop - start:
return False
else:
return True
def handle_no_multipart(self, source, ranges, futures, results):
"""
Helper function for :py:meth:`~uproot4.source.http.HTTPResource.multifuture`
to handle a lack of multipart GET support.
"""
source._set_fallback()
notifications = queue.Queue()
source.fallback.chunks(ranges, notifications)
for x in uproot4._util.range(len(ranges)):
chunk = notifications.get()
results[chunk.start, chunk.stop] = chunk.raw_data
futures[chunk.start, chunk.stop]._run(self)
def handle_multipart(self, source, futures, results, response):
"""
Helper function for :py:meth:`~uproot4.source.http.HTTPResource.multifuture`
to handle the multipart GET response.
"""
for i in uproot4._util.range(len(futures)):
range_string, size = self.next_header(response)
if range_string is None:
raise OSError(
"""found {0} of {1} expected headers in HTTP multipart
for URL {2}""".format(
i, len(futures), self._file_path
)
)
start, last = range_string.split(b"-")
start, last = int(start), int(last)
stop = last + 1
future = futures.get((start, stop))
if future is None:
raise OSError(
"""unrecognized byte range in headers of HTTP multipart: {0}
for URL {1}""".format(
repr(range_string.decode()), self._file_path
)
)
length = stop - start
results[start, stop] = response.read(length)
if len(results[start, stop]) != length:
raise OSError(
"""wrong chunk length {0} (expected {1}) for byte range {2} "
"in HTTP multipart
for URL {3}""".format(
len(results[start, stop]),
length,
repr(range_string.decode()),
self._file_path,
)
)
future._run(self)
def next_header(self, response):
"""
Helper function for :py:meth:`~uproot4.source.http.HTTPResource.multifuture`
to return the next header from the ``response``.
"""
line = response.fp.readline()
range_string, size = None, None
while range_string is None:
m = self._content_range_size.match(line)
if m is not None:
range_string = m.group(1)
size = int(m.group(2))
else:
m = self._content_range.match(line)
if m is not None:
range_string = m.group(1)
size = None
line = response.fp.readline()
if len(line.strip()) == 0:
break
return range_string, size
@staticmethod
def partfuture(results, start, stop):
"""
Returns a :py:class:`~uproot4.source.futures.ResourceFuture` to simply select
the ``(start, stop)`` item from the ``results`` dict.
In :py:meth:`~uproot4.source.http.HTTPSource.chunks`, each chunk has a
:py:meth:`~uproot4.source.http.HTTPResource.partfuture` that are collectively
filled by a single :py:meth:`~uproot4.source.http.HTTPResource.multifuture`.
"""
def task(resource):
return results[start, stop]
return uproot4.source.futures.ResourceFuture(task)
class HTTPSource(uproot4.source.chunk.Source):
"""
Args:
file_path (str): A URL of the file to open.
options: Must include ``"num_fallback_workers"`` and ``"timeout"``.
A :py:class:`~uproot4.source.chunk.Source` that first attempts an HTTP(S)
multipart GET, but if the server doesn't support it, it falls back to many
HTTP(S) connections in threads
(:py:class:`~uproot4.source.http.MultithreadedHTTPSource`).
Since the multipart GET is a single request and response, it needs only one
thread, but it is a background thread (a single
:py:class:`~uproot4.source.futures.ResourceWorker` in a
:py:class:`~uproot4.source.futures.ResourceThreadPoolExecutor`).
"""
ResourceClass = HTTPResource
def __init__(self, file_path, **options):
num_fallback_workers = options["num_fallback_workers"]
timeout = options["timeout"]
self._num_requests = 0
self._num_requested_chunks = 0
self._num_requested_bytes = 0
self._file_path = file_path
self._timeout = timeout
self._num_bytes = None
self._executor = uproot4.source.futures.ResourceThreadPoolExecutor(
[HTTPResource(file_path, timeout)]
)
self._fallback = None
self._fallback_options = dict(options)
self._fallback_options["num_workers"] = num_fallback_workers
def __repr__(self):
path = repr(self._file_path)
if len(self._file_path) > 10:
path = repr("..." + self._file_path[-10:])
fallback = ""
if self._fallback is not None:
fallback = " with fallback"
return "<{0} {1}{2} at 0x{3:012x}>".format(
type(self).__name__, path, fallback, id(self)
)
def chunk(self, start, stop):
self._num_requests += 1
self._num_requested_chunks += 1
self._num_requested_bytes += stop - start
future = self.ResourceClass.future(self, start, stop)
chunk = uproot4.source.chunk.Chunk(self, start, stop, future)
self._executor.submit(future)
return chunk
def chunks(self, ranges, notifications):
if self._fallback is None:
self._num_requests += 1
self._num_requested_chunks += len(ranges)
self._num_requested_bytes += sum(stop - start for start, stop in ranges)
futures = {}
results = {}
chunks = []
for start, stop in ranges:
partfuture = self.ResourceClass.partfuture(results, start, stop)
futures[start, stop] = partfuture
results[start, stop] = None
chunk = uproot4.source.chunk.Chunk(self, start, stop, partfuture)
partfuture._set_notify(
uproot4.source.chunk.notifier(chunk, notifications)
)
chunks.append(chunk)
self._executor.submit(
self.ResourceClass.multifuture(self, ranges, futures, results)
)
return chunks
else:
return self._fallback.chunks(ranges, notifications)
@property
def executor(self):
"""
The :py:class:`~uproot4.source.futures.ResourceThreadPoolExecutor` that
manages this source's single background thread.
"""
return self._executor
@property
def closed(self):
return self._executor.closed
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self._executor.shutdown()
@property
def timeout(self):
"""
The timeout in seconds or None.
"""
return self._timeout
@property
def num_bytes(self):
if self._num_bytes is None:
self._num_bytes = get_num_bytes(
self._file_path, self.parsed_url, self._timeout
)
return self._num_bytes
@property
def parsed_url(self):
"""
A ``urllib.parse.ParseResult`` version of the ``file_path``.
"""
return self._executor.workers[0].resource.parsed_url
@property
def fallback(self):
"""
If None, the source has not encountered an unsuccessful multipart GET
and no fallback is needed yet.
Otherwise, this is a :py:class:`~uproot4.source.http.MultithreadedHTTPSource`
to which all requests are forwarded.
"""
return self._fallback
def _set_fallback(self):
self._fallback = MultithreadedHTTPSource(
self._file_path,
**self._fallback_options # NOTE: a comma after **fallback_options breaks Python 2
)
class MultithreadedHTTPSource(uproot4.source.chunk.MultithreadedSource):
"""
Args:
file_path (str): A URL of the file to open.
options: Must include ``"num_workers"`` and ``"timeout"``.
A :py:class:`~uproot4.source.chunk.MultithreadedSource` that manages many
:py:class:`~uproot4.source.http.HTTPResource` objects.
"""
ResourceClass = HTTPResource
def __init__(self, file_path, **options):
num_workers = options["num_workers"]
timeout = options["timeout"]
self._num_requests = 0
self._num_requested_chunks = 0
self._num_requested_bytes = 0
self._file_path = file_path
self._num_bytes = None
self._timeout = timeout
self._executor = uproot4.source.futures.ResourceThreadPoolExecutor(
[HTTPResource(file_path, timeout) for x in uproot4._util.range(num_workers)]
)
@property
def timeout(self):
"""
The timeout in seconds or None.
"""
return self._timeout
@property
def num_bytes(self):
if self._num_bytes is None:
self._num_bytes = get_num_bytes(
self._file_path, self.parsed_url, self._timeout
)
return self._num_bytes
@property
def parsed_url(self):
"""
A ``urllib.parse.ParseResult`` version of the ``file_path``.
"""
return self._executor.workers[0].resource.parsed_url
| 34.006861 | 131 | 0.600222 |
from __future__ import absolute_import
import sys
import re
try:
from http.client import HTTPConnection
from http.client import HTTPSConnection
from urllib.parse import urlparse
except ImportError:
from httplib import HTTPConnection
from httplib import HTTPSConnection
from urlparse import urlparse
try:
import queue
except ImportError:
import Queue as queue
import uproot4.source.futures
import uproot4.source.chunk
import uproot4._util
def make_connection(parsed_url, timeout):
if parsed_url.scheme == "https":
if uproot4._util.py2:
return HTTPSConnection(
parsed_url.hostname, parsed_url.port, None, None, False, timeout
)
else:
return HTTPSConnection(
parsed_url.hostname, parsed_url.port, None, None, timeout
)
elif parsed_url.scheme == "http":
if uproot4._util.py2:
return HTTPConnection(parsed_url.hostname, parsed_url.port, False, timeout)
else:
return HTTPConnection(parsed_url.hostname, parsed_url.port, timeout)
else:
raise ValueError(
"unrecognized URL scheme for HTTP MultipartSource: {0}".format(
parsed_url.scheme
)
)
def full_path(parsed_url):
if parsed_url.query:
return parsed_url.path + "?" + parsed_url.query
else:
return parsed_url.path
def get_num_bytes(file_path, parsed_url, timeout):
connection = make_connection(parsed_url, timeout)
connection.request("HEAD", full_path(parsed_url))
response = connection.getresponse()
if response.status == 404:
connection.close()
raise uproot4._util._file_not_found(file_path, "HTTP(S) returned 404")
if response.status != 200:
connection.close()
raise OSError(
"""HTTP response was {0}, rather than 200, in attempt to get file size
in file {1}""".format(
response.status, file_path
)
)
for k, x in response.getheaders():
if k.lower() == "content-length" and x.strip() != "0":
connection.close()
return int(x)
else:
connection.close()
raise OSError(
"""response headers did not include content-length: {0}
in file {1}""".format(
dict(response.getheaders()), file_path
)
)
class HTTPResource(uproot4.source.chunk.Resource):
def __init__(self, file_path, timeout):
self._file_path = file_path
self._timeout = timeout
self._parsed_url = urlparse(file_path)
@property
def timeout(self):
return self._timeout
@property
def parsed_url(self):
return self._parsed_url
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
pass
def get(self, connection, start, stop):
response = connection.getresponse()
if response.status == 404:
connection.close()
raise uproot4._util._file_not_found(self.file_path, "HTTP(S) returned 404")
if response.status != 206:
connection.close()
raise OSError(
"""remote server does not support HTTP range requests
for URL {0}""".format(
self._file_path
)
)
try:
return response.read()
finally:
connection.close()
@staticmethod
def future(source, start, stop):
connection = make_connection(source.parsed_url, source.timeout)
connection.request(
"GET",
full_path(source.parsed_url),
headers={"Range": "bytes={0}-{1}".format(start, stop - 1)},
)
def task(resource):
return resource.get(connection, start, stop)
return uproot4.source.futures.ResourceFuture(task)
@staticmethod
def multifuture(source, ranges, futures, results):
connection = make_connection(source.parsed_url, source.timeout)
range_strings = []
for start, stop in ranges:
range_strings.append("{0}-{1}".format(start, stop - 1))
connection.request(
"GET",
full_path(source.parsed_url),
headers={"Range": "bytes=" + ", ".join(range_strings)},
)
def task(resource):
try:
response = connection.getresponse()
multipart_supported = resource.is_multipart_supported(ranges, response)
if not multipart_supported:
resource.handle_no_multipart(source, ranges, futures, results)
else:
resource.handle_multipart(source, futures, results, response)
except Exception:
excinfo = sys.exc_info()
for future in futures.values():
future._set_excinfo(excinfo)
finally:
connection.close()
return uproot4.source.futures.ResourceFuture(task)
_content_range_size = re.compile(b"Content-Range: bytes ([0-9]+-[0-9]+)/([0-9]+)")
_content_range = re.compile(b"Content-Range: bytes ([0-9]+-[0-9]+)")
def is_multipart_supported(self, ranges, response):
if response.status != 206:
return False
for k, x in response.getheaders():
if k.lower() == "content-length":
content_length = int(x)
for start, stop in ranges:
if content_length == stop - start:
return False
else:
return True
def handle_no_multipart(self, source, ranges, futures, results):
source._set_fallback()
notifications = queue.Queue()
source.fallback.chunks(ranges, notifications)
for x in uproot4._util.range(len(ranges)):
chunk = notifications.get()
results[chunk.start, chunk.stop] = chunk.raw_data
futures[chunk.start, chunk.stop]._run(self)
def handle_multipart(self, source, futures, results, response):
for i in uproot4._util.range(len(futures)):
range_string, size = self.next_header(response)
if range_string is None:
raise OSError(
"""found {0} of {1} expected headers in HTTP multipart
for URL {2}""".format(
i, len(futures), self._file_path
)
)
start, last = range_string.split(b"-")
start, last = int(start), int(last)
stop = last + 1
future = futures.get((start, stop))
if future is None:
raise OSError(
"""unrecognized byte range in headers of HTTP multipart: {0}
for URL {1}""".format(
repr(range_string.decode()), self._file_path
)
)
length = stop - start
results[start, stop] = response.read(length)
if len(results[start, stop]) != length:
raise OSError(
"""wrong chunk length {0} (expected {1}) for byte range {2} "
"in HTTP multipart
for URL {3}""".format(
len(results[start, stop]),
length,
repr(range_string.decode()),
self._file_path,
)
)
future._run(self)
def next_header(self, response):
line = response.fp.readline()
range_string, size = None, None
while range_string is None:
m = self._content_range_size.match(line)
if m is not None:
range_string = m.group(1)
size = int(m.group(2))
else:
m = self._content_range.match(line)
if m is not None:
range_string = m.group(1)
size = None
line = response.fp.readline()
if len(line.strip()) == 0:
break
return range_string, size
@staticmethod
def partfuture(results, start, stop):
def task(resource):
return results[start, stop]
return uproot4.source.futures.ResourceFuture(task)
class HTTPSource(uproot4.source.chunk.Source):
ResourceClass = HTTPResource
def __init__(self, file_path, **options):
num_fallback_workers = options["num_fallback_workers"]
timeout = options["timeout"]
self._num_requests = 0
self._num_requested_chunks = 0
self._num_requested_bytes = 0
self._file_path = file_path
self._timeout = timeout
self._num_bytes = None
self._executor = uproot4.source.futures.ResourceThreadPoolExecutor(
[HTTPResource(file_path, timeout)]
)
self._fallback = None
self._fallback_options = dict(options)
self._fallback_options["num_workers"] = num_fallback_workers
def __repr__(self):
path = repr(self._file_path)
if len(self._file_path) > 10:
path = repr("..." + self._file_path[-10:])
fallback = ""
if self._fallback is not None:
fallback = " with fallback"
return "<{0} {1}{2} at 0x{3:012x}>".format(
type(self).__name__, path, fallback, id(self)
)
def chunk(self, start, stop):
self._num_requests += 1
self._num_requested_chunks += 1
self._num_requested_bytes += stop - start
future = self.ResourceClass.future(self, start, stop)
chunk = uproot4.source.chunk.Chunk(self, start, stop, future)
self._executor.submit(future)
return chunk
def chunks(self, ranges, notifications):
if self._fallback is None:
self._num_requests += 1
self._num_requested_chunks += len(ranges)
self._num_requested_bytes += sum(stop - start for start, stop in ranges)
futures = {}
results = {}
chunks = []
for start, stop in ranges:
partfuture = self.ResourceClass.partfuture(results, start, stop)
futures[start, stop] = partfuture
results[start, stop] = None
chunk = uproot4.source.chunk.Chunk(self, start, stop, partfuture)
partfuture._set_notify(
uproot4.source.chunk.notifier(chunk, notifications)
)
chunks.append(chunk)
self._executor.submit(
self.ResourceClass.multifuture(self, ranges, futures, results)
)
return chunks
else:
return self._fallback.chunks(ranges, notifications)
@property
def executor(self):
return self._executor
@property
def closed(self):
return self._executor.closed
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self._executor.shutdown()
@property
def timeout(self):
return self._timeout
@property
def num_bytes(self):
if self._num_bytes is None:
self._num_bytes = get_num_bytes(
self._file_path, self.parsed_url, self._timeout
)
return self._num_bytes
@property
def parsed_url(self):
return self._executor.workers[0].resource.parsed_url
@property
def fallback(self):
return self._fallback
def _set_fallback(self):
self._fallback = MultithreadedHTTPSource(
self._file_path,
**self._fallback_options
)
class MultithreadedHTTPSource(uproot4.source.chunk.MultithreadedSource):
ResourceClass = HTTPResource
def __init__(self, file_path, **options):
num_workers = options["num_workers"]
timeout = options["timeout"]
self._num_requests = 0
self._num_requested_chunks = 0
self._num_requested_bytes = 0
self._file_path = file_path
self._num_bytes = None
self._timeout = timeout
self._executor = uproot4.source.futures.ResourceThreadPoolExecutor(
[HTTPResource(file_path, timeout) for x in uproot4._util.range(num_workers)]
)
@property
def timeout(self):
return self._timeout
@property
def num_bytes(self):
if self._num_bytes is None:
self._num_bytes = get_num_bytes(
self._file_path, self.parsed_url, self._timeout
)
return self._num_bytes
@property
def parsed_url(self):
return self._executor.workers[0].resource.parsed_url
| true | true |
f7fb7964179468eaa79b757cd24bf19739e8243d | 7,150 | py | Python | problog/test/test_lfi.py | jselvam11/problog | 35d92989b7b49591963d5076ac5307613ebdf99e | [
"Apache-2.0"
] | 1 | 2021-09-23T11:50:44.000Z | 2021-09-23T11:50:44.000Z | problog/test/test_lfi.py | thuwzy/problog | 35d92989b7b49591963d5076ac5307613ebdf99e | [
"Apache-2.0"
] | null | null | null | problog/test/test_lfi.py | thuwzy/problog | 35d92989b7b49591963d5076ac5307613ebdf99e | [
"Apache-2.0"
] | null | null | null | """
Module name
"""
from __future__ import print_function
from problog import root_path
from problog.util import subprocess_call, subprocess_check_output
import unittest
import os
import sys
import glob
import subprocess, traceback
from problog.learning.lfi import lfi_wrapper, LFIProblem
if __name__ == "__main__":
sys.path.insert(
0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../../"))
)
try:
from pysdd import sdd
has_sdd = True
except Exception as err:
print("SDD library not available due to error: ", err, file=sys.stderr)
has_sdd = False
class TestLFI(unittest.TestCase):
def setUp(self):
try:
self.assertSequenceEqual = self.assertItemsEqual
except AttributeError:
self.assertSequenceEqual = self.assertCountEqual
def read_result(filename):
results = []
with open(filename) as f:
reading = False
for l in f:
l = l.strip()
if l.startswith("%Expected outcome:"):
reading = True
elif reading:
if l.lower().startswith("% error: "):
return l[len("% error: ") :].strip()
elif l.startswith("% "):
res = l[2:]
results.append(res)
else:
reading = False
return results
def createTestLFI(filename):
def test(self):
for eval_name in evaluatables:
with self.subTest(evaluatable=eval_name):
test_func(self, evaluatable=eval_name)
def test_func(self, evaluatable="ddnnf"):
model = filename
examples = filename.replace(".pl", ".ev")
expectedlines = read_result(model)
if not os.path.exists(examples):
raise Exception("Evidence file is missing: {}".format(examples))
try:
d = {
"max_iter": 10000,
"min_improv": 1e-10,
"leakprob": None,
"propagate_evidence": True,
"eps": 0.0001,
"normalize": True,
"web": False,
"args": None,
}
score, weights, names, iterations, lfi = lfi_wrapper(
model, [examples], evaluatable, d
)
outlines = lfi.get_model()
except Exception as err:
# print(expectedlines)
# print(err)
# This test is specifically for test/lfi/AD/relatedAD_1 and test/lfi/AD/relatedAD_2
assert expectedlines == "NonGroundProbabilisticClause"
return
outlines = outlines.split("\n")[:-1]
assert len(expectedlines) == len(outlines)
# Compare expected program and learned program line by line
for expectedline, outline in zip(expectedlines, outlines):
# When there are probabilities
if "::" in outline:
# Break the lines into components where each component has exactly one probability
expectedline_comps = expectedline.split(";")
outline_comps = outline.split(";")
new_expectedline_comps = []
new_outline_comps = []
assert len(expectedline_comps) == len(outline_comps)
# Compare one expected probability and one learned probability at a time
for expectedline_comp, outline_comp in zip(
expectedline_comps, outline_comps
):
outline_comp = outline_comp.strip()
expectedline_comp = expectedline_comp.strip()
# When the learned prob in outline_component does not matter,
# discard the learned probability
if "<RAND>" in expectedline_comp:
outline_comp = "<RAND>::" + outline_comp.split("::")[1]
else:
# Round the expected and learned probabilities
rounded_outline_comp_prob = "{:.6f}".format(
float(outline_comp.split("::")[0])
)
rounded_expectedline_comp_prob = "{:.6f}".format(
float(expectedline_comp.split("::")[0])
)
# Update the expected component probability
expectedline_comp = (
rounded_expectedline_comp_prob
+ "::"
+ expectedline_comp.split("::")[1]
)
# If the learned probability is close enough to the expected probability
if (
abs(
float(rounded_outline_comp_prob)
- float(rounded_expectedline_comp_prob)
)
< 0.00001
):
# Make the two lines identical
outline_comp = (
rounded_expectedline_comp_prob
+ "::"
+ outline_comp.split("::")[1]
)
new_outline_comps.append(outline_comp)
new_expectedline_comps.append(expectedline_comp)
new_outline = "; ".join(new_outline_comps)
new_expectedline = "; ".join(new_expectedline_comps)
# print(new_expectedline)
# print(new_outline)
assert new_expectedline == new_outline
return test
def ignore_previous_output(path):
# dir_name = "../../test/lfi/unit_tests/"
test = os.listdir(path)
for item in test:
if item.endswith(".out"):
os.remove(os.path.join(path, item))
if __name__ == "__main__":
filenames = sys.argv[1:]
else:
AD_filenames = glob.glob(root_path("test", "lfi", "AD", "*.pl"))
simple_filenames = glob.glob(root_path("test", "lfi", "Simple", "*.pl"))
misc_filenames = glob.glob(root_path("test", "lfi", "Misc", "*.pl"))
evaluatables = ["ddnnf"]
# evaluatables = []
if has_sdd:
evaluatables.append("sdd")
evaluatables.append("sddx")
else:
print("No SDD support - The system tests are not performed with SDDs.")
# tests for ADs
for testfile in AD_filenames:
testname = "test_lfi_AD_" + os.path.splitext(os.path.basename(testfile))[0]
setattr(TestLFI, testname, createTestLFI(testfile))
# tests for simple unit tests
for testfile in simple_filenames:
testname = "test_lfi_Simple_" + os.path.splitext(os.path.basename(testfile))[0]
setattr(TestLFI, testname, createTestLFI(testfile))
# tests for Miscellaneous files
for testfile in misc_filenames:
testname = "test_lfi_Misc_" + os.path.splitext(os.path.basename(testfile))[0]
setattr(TestLFI, testname, createTestLFI(testfile))
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestLFI)
unittest.TextTestRunner(verbosity=2).run(suite)
| 36.294416 | 98 | 0.54951 |
from __future__ import print_function
from problog import root_path
from problog.util import subprocess_call, subprocess_check_output
import unittest
import os
import sys
import glob
import subprocess, traceback
from problog.learning.lfi import lfi_wrapper, LFIProblem
if __name__ == "__main__":
sys.path.insert(
0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../../"))
)
try:
from pysdd import sdd
has_sdd = True
except Exception as err:
print("SDD library not available due to error: ", err, file=sys.stderr)
has_sdd = False
class TestLFI(unittest.TestCase):
def setUp(self):
try:
self.assertSequenceEqual = self.assertItemsEqual
except AttributeError:
self.assertSequenceEqual = self.assertCountEqual
def read_result(filename):
results = []
with open(filename) as f:
reading = False
for l in f:
l = l.strip()
if l.startswith("%Expected outcome:"):
reading = True
elif reading:
if l.lower().startswith("% error: "):
return l[len("% error: ") :].strip()
elif l.startswith("% "):
res = l[2:]
results.append(res)
else:
reading = False
return results
def createTestLFI(filename):
def test(self):
for eval_name in evaluatables:
with self.subTest(evaluatable=eval_name):
test_func(self, evaluatable=eval_name)
def test_func(self, evaluatable="ddnnf"):
model = filename
examples = filename.replace(".pl", ".ev")
expectedlines = read_result(model)
if not os.path.exists(examples):
raise Exception("Evidence file is missing: {}".format(examples))
try:
d = {
"max_iter": 10000,
"min_improv": 1e-10,
"leakprob": None,
"propagate_evidence": True,
"eps": 0.0001,
"normalize": True,
"web": False,
"args": None,
}
score, weights, names, iterations, lfi = lfi_wrapper(
model, [examples], evaluatable, d
)
outlines = lfi.get_model()
except Exception as err:
assert expectedlines == "NonGroundProbabilisticClause"
return
outlines = outlines.split("\n")[:-1]
assert len(expectedlines) == len(outlines)
for expectedline, outline in zip(expectedlines, outlines):
if "::" in outline:
expectedline_comps = expectedline.split(";")
outline_comps = outline.split(";")
new_expectedline_comps = []
new_outline_comps = []
assert len(expectedline_comps) == len(outline_comps)
for expectedline_comp, outline_comp in zip(
expectedline_comps, outline_comps
):
outline_comp = outline_comp.strip()
expectedline_comp = expectedline_comp.strip()
if "<RAND>" in expectedline_comp:
outline_comp = "<RAND>::" + outline_comp.split("::")[1]
else:
rounded_outline_comp_prob = "{:.6f}".format(
float(outline_comp.split("::")[0])
)
rounded_expectedline_comp_prob = "{:.6f}".format(
float(expectedline_comp.split("::")[0])
)
expectedline_comp = (
rounded_expectedline_comp_prob
+ "::"
+ expectedline_comp.split("::")[1]
)
if (
abs(
float(rounded_outline_comp_prob)
- float(rounded_expectedline_comp_prob)
)
< 0.00001
):
outline_comp = (
rounded_expectedline_comp_prob
+ "::"
+ outline_comp.split("::")[1]
)
new_outline_comps.append(outline_comp)
new_expectedline_comps.append(expectedline_comp)
new_outline = "; ".join(new_outline_comps)
new_expectedline = "; ".join(new_expectedline_comps)
assert new_expectedline == new_outline
return test
def ignore_previous_output(path):
test = os.listdir(path)
for item in test:
if item.endswith(".out"):
os.remove(os.path.join(path, item))
if __name__ == "__main__":
filenames = sys.argv[1:]
else:
AD_filenames = glob.glob(root_path("test", "lfi", "AD", "*.pl"))
simple_filenames = glob.glob(root_path("test", "lfi", "Simple", "*.pl"))
misc_filenames = glob.glob(root_path("test", "lfi", "Misc", "*.pl"))
evaluatables = ["ddnnf"]
if has_sdd:
evaluatables.append("sdd")
evaluatables.append("sddx")
else:
print("No SDD support - The system tests are not performed with SDDs.")
for testfile in AD_filenames:
testname = "test_lfi_AD_" + os.path.splitext(os.path.basename(testfile))[0]
setattr(TestLFI, testname, createTestLFI(testfile))
for testfile in simple_filenames:
testname = "test_lfi_Simple_" + os.path.splitext(os.path.basename(testfile))[0]
setattr(TestLFI, testname, createTestLFI(testfile))
for testfile in misc_filenames:
testname = "test_lfi_Misc_" + os.path.splitext(os.path.basename(testfile))[0]
setattr(TestLFI, testname, createTestLFI(testfile))
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestLFI)
unittest.TextTestRunner(verbosity=2).run(suite)
| true | true |
f7fb7ab1cf810c21bbb095e32187edc28ef8e24e | 12,750 | py | Python | tests/test_device_services.py | deichmab-draeger/sdc11073-1 | 2cbd4daaa32dc8a52723ecb8209f39a7d19b3c1b | [
"MIT"
] | null | null | null | tests/test_device_services.py | deichmab-draeger/sdc11073-1 | 2cbd4daaa32dc8a52723ecb8209f39a7d19b3c1b | [
"MIT"
] | null | null | null | tests/test_device_services.py | deichmab-draeger/sdc11073-1 | 2cbd4daaa32dc8a52723ecb8209f39a7d19b3c1b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import unittest
import os
import uuid
from lxml import etree as etree_
import logging
import logging.handlers
from sdc11073.wsdiscovery import WSDiscoveryWhitelist
from sdc11073.location import SdcLocation
from sdc11073.namespaces import msgTag, domTag, nsmap
from sdc11073.namespaces import Prefix_Namespace as Prefix
from sdc11073.pysoap.soapenvelope import GenericNode, WsAddress, Soap12Envelope, AddressedSoap12Envelope
from sdc11073.definitions_sdc import SDC_v1_Definitions
from tests import mockstuff
_msg_ns = Prefix.MSG.namespace
_sdc_ns = Prefix.SDC.namespace
class TestDeviceServices(unittest.TestCase):
def setUp(self):
''' validate test data'''
print ('############### setUp {}... ##############'.format(self._testMethodName))
self.wsDiscovery = WSDiscoveryWhitelist(['127.0.0.1'])
self.wsDiscovery.start()
my_uuid = None # let device create one
self.sdcDevice_final = mockstuff.SomeDevice.fromMdibFile(self.wsDiscovery, my_uuid, '70041_MDIB_Final.xml')
self.sdcDevice_final.startAll()
self._alldevices = (self.sdcDevice_final,)
print ('############### setUp done {} ##############'.format(self._testMethodName))
def tearDown(self):
print ('############### tearDown {}... ##############'.format(self._testMethodName))
for d in self._alldevices:
if d:
d.stopAll()
self.wsDiscovery.stop()
print ('############### tearDown {} done ##############'.format(self._testMethodName))
def _mkGetRequest(self, sdcDevice, porttype, method, endpoint_reference):
if sdcDevice is self.sdcDevice_final:
ns = sdcDevice.mdib.sdc_definitions.DPWS_SDCNamespace
else:
ns = sdcDevice.mdib.sdc_definitions.MessageModelNamespace
action = '{}/{}/{}'.format(ns, porttype, method)
bodyNode = etree_.Element(msgTag(method))
soapEnvelope = Soap12Envelope(Prefix.partialMap(Prefix.S12, Prefix.WSA, Prefix.MSG))
identifier = uuid.uuid4().urn
soapEnvelope.addHeaderObject(WsAddress(messageId=identifier,
action=action,
to=endpoint_reference))
soapEnvelope.addBodyObject(GenericNode(bodyNode))
soapEnvelope.validateBody(sdcDevice.mdib.bicepsSchema.bmmSchema)
return soapEnvelope
def test_dispatch_final(self):
self._test_dispatch(self.sdcDevice_final)
def _test_dispatch(self, sdcDevice):
dispatcher = sdcDevice._handler._httpServerThread.devices_dispatcher
endpoint_reference = sdcDevice._handler._GetDispatcher.hostingService.epr
getService = sdcDevice._handler._GetDispatcher
getEnv = self._mkGetRequest(sdcDevice, getService.port_type_string, 'GetMdib', endpoint_reference)
httpHeader = {}
response_string = dispatcher.on_post(endpoint_reference, httpHeader, getEnv.as_xml())
self.assertTrue('/{}/GetMdibResponse'.format(getService.port_type_string).encode('utf-8') in response_string)
endpoint_reference = sdcDevice._handler._ContextDispatcher.hostingService.epr
contextService = sdcDevice._handler._ContextDispatcher
getEnv = self._mkGetRequest(sdcDevice, contextService.port_type_string, 'GetContextStates', endpoint_reference)
httpHeader = {}
response_string = dispatcher.on_post(endpoint_reference, httpHeader, getEnv.as_xml())
self.assertTrue('/{}/GetContextStatesResponse'.format(contextService.port_type_string).encode('utf-8') in response_string)
def test_getMdib(self):
for sdcDevice in self._alldevices:
getService = sdcDevice._handler._GetDispatcher
endpoint_reference = '123'
getEnv = self._mkGetRequest(sdcDevice, getService.port_type_string, 'GetMdib', endpoint_reference)
receivedEnv = AddressedSoap12Envelope.fromXMLString(getEnv.as_xml())
httpHeader = {}
response = getService._onGetMdib(httpHeader, receivedEnv)
response.validateBody(sdcDevice.mdib.bicepsSchema.bmmSchema)
def test_getMdState(self):
for sdcDevice in self._alldevices:
getService = sdcDevice._handler._GetDispatcher
endpoint_reference = '123'
getEnv = self._mkGetRequest(sdcDevice, getService.port_type_string, 'GetMdState', endpoint_reference)
receivedEnv = AddressedSoap12Envelope.fromXMLString(getEnv.as_xml())
httpHeader = {}
response = getService.dispatchSoapRequest(None, httpHeader, receivedEnv)
response.validateBody(sdcDevice.mdib.bicepsSchema.bmmSchema)
def test_getMdDescription(self):
for sdcDevice in self._alldevices:
getService = sdcDevice._handler._GetDispatcher
endpoint_reference = '123'
getEnv = self._mkGetRequest(sdcDevice, getService.port_type_string, 'GetMdDescription', endpoint_reference)
receivedEnv = AddressedSoap12Envelope.fromXMLString(getEnv.as_xml())
httpHeader = {}
response = getService.dispatchSoapRequest(None, httpHeader, receivedEnv)
response.validateBody(sdcDevice.mdib.bicepsSchema.bmmSchema)
def test_changeAlarmPrio(self):
''' This is a test for defect SDCSIM-129
The order of children of '''
for sdcDevice in self._alldevices:
getService = sdcDevice._handler._GetDispatcher
endpoint_reference = '123'
with sdcDevice.mdib.mdibUpdateTransaction() as tr:
alarmConditionDescriptor = tr.getDescriptor('0xD3C00109')
alarmConditionDescriptor.Priority='Lo'
getEnv = self._mkGetRequest(sdcDevice, getService.port_type_string, 'GetMdDescription', endpoint_reference)
receivedEnv = AddressedSoap12Envelope.fromXMLString(getEnv.as_xml())
httpHeader = {}
response = getService.dispatchSoapRequest(None, httpHeader, receivedEnv)
response.validateBody(sdcDevice.mdib.bicepsSchema.bmmSchema)
def test_getContextStates(self):
facility = 'HOSP42'
poc = 'Care Unit 1'
bed = 'my bed'
loc = SdcLocation(fac=facility, poc=poc, bed=bed)
for sdcDevice in self._alldevices:
sdcDevice.mdib.setLocation(loc)
contextService = sdcDevice._handler._ContextDispatcher
endpoint_reference = '123'
getEnv = self._mkGetRequest(sdcDevice, contextService.port_type_string, 'GetContextStates', endpoint_reference)
receivedEnv = AddressedSoap12Envelope.fromXMLString(getEnv.as_xml())
httpHeader = {}
response = contextService.dispatchSoapRequest(None, httpHeader, receivedEnv)
print (response.as_xml(pretty=True))
response.validateBody(sdcDevice.mdib.bicepsSchema.bmmSchema)
_ns = sdcDevice.mdib.nsmapper # shortcut
query = '*/{}[@{}="{}"]'.format(_ns.docName(Prefix.MSG, 'ContextState'),
_ns.docName(Prefix.XSI,'type'),
_ns.docName(Prefix.PM,'LocationContextState'))
locationContextNodes = response.bodyNode.xpath(query, namespaces=_ns.docNssmap)
self.assertEqual(len(locationContextNodes), 1)
identificationNode = locationContextNodes[0].find(domTag('Identification'))
if sdcDevice is self.sdcDevice_final:
self.assertEqual(identificationNode.get('Extension'), '{}///{}//{}'.format(facility, poc, bed))
else:
self.assertEqual(identificationNode.get('Extension'), '{}/{}/{}'.format(facility, poc, bed))
locationDetailNode = locationContextNodes[0].find(domTag('LocationDetail'))
self.assertEqual(locationDetailNode.get('PoC'), poc)
self.assertEqual(locationDetailNode.get('Bed'), bed)
self.assertEqual(locationDetailNode.get('Facility'), facility)
print (response.as_xml(pretty=True))
def test_wsdl_final(self):
'''
check porttype and action namespaces in wsdl
'''
dev = self.sdcDevice_final
for hosted in dev._handler._hostedServices:
wsdl = etree_.fromstring(hosted._wsdlString)
inputs = wsdl.xpath('//wsdl:input', namespaces=nsmap)#{'wsdl':'http://schemas.xmlsoap.org/wsdl/'})
outputs = wsdl.xpath('//wsdl:output', namespaces=nsmap)#{'wsdl':'http://schemas.xmlsoap.org/wsdl/'})
self.assertGreater(len(inputs), 0)
self.assertGreater(len(outputs), 0)
for src in (inputs, outputs):
for i in inputs:
action_keys = [ k for k in i.attrib.keys() if k.endswith('Action')]
for k in action_keys:
action = i.attrib[k]
self.assertTrue(action.startswith(SDC_v1_Definitions.ActionsNamespace))
def test_metadata_final(self):
'''
verifies that
- 7 hosted services exist ( one per port type)
- every port type has BICEPS Message Model as namespace
'''
dev = self.sdcDevice_final
metaDataNode = dev._handler._mkMetaDataNode()
print (etree_.tostring(metaDataNode))
dpws_hosted = metaDataNode.xpath('//dpws:Hosted', namespaces={'dpws': 'http://docs.oasis-open.org/ws-dd/ns/dpws/2009/01'})
self.assertEqual(len(dpws_hosted), 4) #
for h in dpws_hosted:
dpws_types = h.xpath('dpws:Types', namespaces={'dpws': 'http://docs.oasis-open.org/ws-dd/ns/dpws/2009/01'})
for t in dpws_types:
txt = t.text
port_types = txt.split()
for p in port_types:
ns, value = p.split(':')
self.assertEqual(metaDataNode.nsmap[ns], _sdc_ns)
def suite():
return unittest.TestLoader().loadTestsFromTestCase(TestDeviceServices)
if __name__ == '__main__':
def mklogger(logFolder):
applog = logging.getLogger('sdc')
if len(applog.handlers) == 0:
ch = logging.StreamHandler()
# create formatter
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
applog.addHandler(ch)
ch2 = logging.handlers.RotatingFileHandler(os.path.join(logFolder,'sdcdevice.log'),
maxBytes=100000000,
backupCount=100)
ch2.setFormatter(formatter)
# add ch to logger
applog.addHandler(ch2)
applog.setLevel(logging.DEBUG)
# reduce log level for some loggers
tmp = logging.getLogger('sdc.discover')
tmp.setLevel(logging.WARN)
tmp = logging.getLogger('sdc.client.subscr')
tmp.setLevel(logging.INFO)
tmp = logging.getLogger('sdc.client.mdib')
tmp.setLevel(logging.INFO)
tmp = logging.getLogger('sdc.client.wf')
tmp.setLevel(logging.INFO)
tmp = logging.getLogger('sdc.client.Set')
tmp.setLevel(logging.INFO)
tmp = logging.getLogger('sdc.client.Get')
tmp.setLevel(logging.DEBUG)
tmp = logging.getLogger('sdc.device')
tmp.setLevel(logging.DEBUG)
tmp = logging.getLogger('sdc.device.subscrMgr')
tmp.setLevel(logging.DEBUG)
logging.getLogger('sdc.device.GetService').setLevel(logging.DEBUG)
return applog
mklogger('c:/tmp')
# unittest.TextTestRunner(verbosity=2).run(suite())
# unittest.TextTestRunner(verbosity=2).run(unittest.TestLoader().loadTestsFromName('test_device_services.TestDeviceServices.test_getMdib'))
# unittest.TextTestRunner(verbosity=2).run(unittest.TestLoader().loadTestsFromName('test_device_services.TestDeviceServices.test_getContextStates'))
# unittest.TextTestRunner(verbosity=2).run(unittest.TestLoader().loadTestsFromName('test_device_services.TestDeviceServices.test_getMdDescription'))
unittest.TextTestRunner(verbosity=2).run(unittest.TestLoader().loadTestsFromName('test_device_services.TestDeviceServices.test_changeAlarmPrio'))
| 49.418605 | 152 | 0.634353 |
import unittest
import os
import uuid
from lxml import etree as etree_
import logging
import logging.handlers
from sdc11073.wsdiscovery import WSDiscoveryWhitelist
from sdc11073.location import SdcLocation
from sdc11073.namespaces import msgTag, domTag, nsmap
from sdc11073.namespaces import Prefix_Namespace as Prefix
from sdc11073.pysoap.soapenvelope import GenericNode, WsAddress, Soap12Envelope, AddressedSoap12Envelope
from sdc11073.definitions_sdc import SDC_v1_Definitions
from tests import mockstuff
_msg_ns = Prefix.MSG.namespace
_sdc_ns = Prefix.SDC.namespace
class TestDeviceServices(unittest.TestCase):
def setUp(self):
print ('############### setUp {}... ##############'.format(self._testMethodName))
self.wsDiscovery = WSDiscoveryWhitelist(['127.0.0.1'])
self.wsDiscovery.start()
my_uuid = None
self.sdcDevice_final = mockstuff.SomeDevice.fromMdibFile(self.wsDiscovery, my_uuid, '70041_MDIB_Final.xml')
self.sdcDevice_final.startAll()
self._alldevices = (self.sdcDevice_final,)
print ('############### setUp done {} ##############'.format(self._testMethodName))
def tearDown(self):
print ('############### tearDown {}... ##############'.format(self._testMethodName))
for d in self._alldevices:
if d:
d.stopAll()
self.wsDiscovery.stop()
print ('############### tearDown {} done ##############'.format(self._testMethodName))
def _mkGetRequest(self, sdcDevice, porttype, method, endpoint_reference):
if sdcDevice is self.sdcDevice_final:
ns = sdcDevice.mdib.sdc_definitions.DPWS_SDCNamespace
else:
ns = sdcDevice.mdib.sdc_definitions.MessageModelNamespace
action = '{}/{}/{}'.format(ns, porttype, method)
bodyNode = etree_.Element(msgTag(method))
soapEnvelope = Soap12Envelope(Prefix.partialMap(Prefix.S12, Prefix.WSA, Prefix.MSG))
identifier = uuid.uuid4().urn
soapEnvelope.addHeaderObject(WsAddress(messageId=identifier,
action=action,
to=endpoint_reference))
soapEnvelope.addBodyObject(GenericNode(bodyNode))
soapEnvelope.validateBody(sdcDevice.mdib.bicepsSchema.bmmSchema)
return soapEnvelope
def test_dispatch_final(self):
self._test_dispatch(self.sdcDevice_final)
def _test_dispatch(self, sdcDevice):
dispatcher = sdcDevice._handler._httpServerThread.devices_dispatcher
endpoint_reference = sdcDevice._handler._GetDispatcher.hostingService.epr
getService = sdcDevice._handler._GetDispatcher
getEnv = self._mkGetRequest(sdcDevice, getService.port_type_string, 'GetMdib', endpoint_reference)
httpHeader = {}
response_string = dispatcher.on_post(endpoint_reference, httpHeader, getEnv.as_xml())
self.assertTrue('/{}/GetMdibResponse'.format(getService.port_type_string).encode('utf-8') in response_string)
endpoint_reference = sdcDevice._handler._ContextDispatcher.hostingService.epr
contextService = sdcDevice._handler._ContextDispatcher
getEnv = self._mkGetRequest(sdcDevice, contextService.port_type_string, 'GetContextStates', endpoint_reference)
httpHeader = {}
response_string = dispatcher.on_post(endpoint_reference, httpHeader, getEnv.as_xml())
self.assertTrue('/{}/GetContextStatesResponse'.format(contextService.port_type_string).encode('utf-8') in response_string)
def test_getMdib(self):
for sdcDevice in self._alldevices:
getService = sdcDevice._handler._GetDispatcher
endpoint_reference = '123'
getEnv = self._mkGetRequest(sdcDevice, getService.port_type_string, 'GetMdib', endpoint_reference)
receivedEnv = AddressedSoap12Envelope.fromXMLString(getEnv.as_xml())
httpHeader = {}
response = getService._onGetMdib(httpHeader, receivedEnv)
response.validateBody(sdcDevice.mdib.bicepsSchema.bmmSchema)
def test_getMdState(self):
for sdcDevice in self._alldevices:
getService = sdcDevice._handler._GetDispatcher
endpoint_reference = '123'
getEnv = self._mkGetRequest(sdcDevice, getService.port_type_string, 'GetMdState', endpoint_reference)
receivedEnv = AddressedSoap12Envelope.fromXMLString(getEnv.as_xml())
httpHeader = {}
response = getService.dispatchSoapRequest(None, httpHeader, receivedEnv)
response.validateBody(sdcDevice.mdib.bicepsSchema.bmmSchema)
def test_getMdDescription(self):
for sdcDevice in self._alldevices:
getService = sdcDevice._handler._GetDispatcher
endpoint_reference = '123'
getEnv = self._mkGetRequest(sdcDevice, getService.port_type_string, 'GetMdDescription', endpoint_reference)
receivedEnv = AddressedSoap12Envelope.fromXMLString(getEnv.as_xml())
httpHeader = {}
response = getService.dispatchSoapRequest(None, httpHeader, receivedEnv)
response.validateBody(sdcDevice.mdib.bicepsSchema.bmmSchema)
def test_changeAlarmPrio(self):
for sdcDevice in self._alldevices:
getService = sdcDevice._handler._GetDispatcher
endpoint_reference = '123'
with sdcDevice.mdib.mdibUpdateTransaction() as tr:
alarmConditionDescriptor = tr.getDescriptor('0xD3C00109')
alarmConditionDescriptor.Priority='Lo'
getEnv = self._mkGetRequest(sdcDevice, getService.port_type_string, 'GetMdDescription', endpoint_reference)
receivedEnv = AddressedSoap12Envelope.fromXMLString(getEnv.as_xml())
httpHeader = {}
response = getService.dispatchSoapRequest(None, httpHeader, receivedEnv)
response.validateBody(sdcDevice.mdib.bicepsSchema.bmmSchema)
def test_getContextStates(self):
facility = 'HOSP42'
poc = 'Care Unit 1'
bed = 'my bed'
loc = SdcLocation(fac=facility, poc=poc, bed=bed)
for sdcDevice in self._alldevices:
sdcDevice.mdib.setLocation(loc)
contextService = sdcDevice._handler._ContextDispatcher
endpoint_reference = '123'
getEnv = self._mkGetRequest(sdcDevice, contextService.port_type_string, 'GetContextStates', endpoint_reference)
receivedEnv = AddressedSoap12Envelope.fromXMLString(getEnv.as_xml())
httpHeader = {}
response = contextService.dispatchSoapRequest(None, httpHeader, receivedEnv)
print (response.as_xml(pretty=True))
response.validateBody(sdcDevice.mdib.bicepsSchema.bmmSchema)
_ns = sdcDevice.mdib.nsmapper
query = '*/{}[@{}="{}"]'.format(_ns.docName(Prefix.MSG, 'ContextState'),
_ns.docName(Prefix.XSI,'type'),
_ns.docName(Prefix.PM,'LocationContextState'))
locationContextNodes = response.bodyNode.xpath(query, namespaces=_ns.docNssmap)
self.assertEqual(len(locationContextNodes), 1)
identificationNode = locationContextNodes[0].find(domTag('Identification'))
if sdcDevice is self.sdcDevice_final:
self.assertEqual(identificationNode.get('Extension'), '{}///{}//{}'.format(facility, poc, bed))
else:
self.assertEqual(identificationNode.get('Extension'), '{}/{}/{}'.format(facility, poc, bed))
locationDetailNode = locationContextNodes[0].find(domTag('LocationDetail'))
self.assertEqual(locationDetailNode.get('PoC'), poc)
self.assertEqual(locationDetailNode.get('Bed'), bed)
self.assertEqual(locationDetailNode.get('Facility'), facility)
print (response.as_xml(pretty=True))
def test_wsdl_final(self):
dev = self.sdcDevice_final
for hosted in dev._handler._hostedServices:
wsdl = etree_.fromstring(hosted._wsdlString)
inputs = wsdl.xpath('//wsdl:input', namespaces=nsmap)
outputs = wsdl.xpath('//wsdl:output', namespaces=nsmap)
self.assertGreater(len(inputs), 0)
self.assertGreater(len(outputs), 0)
for src in (inputs, outputs):
for i in inputs:
action_keys = [ k for k in i.attrib.keys() if k.endswith('Action')]
for k in action_keys:
action = i.attrib[k]
self.assertTrue(action.startswith(SDC_v1_Definitions.ActionsNamespace))
def test_metadata_final(self):
dev = self.sdcDevice_final
metaDataNode = dev._handler._mkMetaDataNode()
print (etree_.tostring(metaDataNode))
dpws_hosted = metaDataNode.xpath('//dpws:Hosted', namespaces={'dpws': 'http://docs.oasis-open.org/ws-dd/ns/dpws/2009/01'})
self.assertEqual(len(dpws_hosted), 4)
for h in dpws_hosted:
dpws_types = h.xpath('dpws:Types', namespaces={'dpws': 'http://docs.oasis-open.org/ws-dd/ns/dpws/2009/01'})
for t in dpws_types:
txt = t.text
port_types = txt.split()
for p in port_types:
ns, value = p.split(':')
self.assertEqual(metaDataNode.nsmap[ns], _sdc_ns)
def suite():
return unittest.TestLoader().loadTestsFromTestCase(TestDeviceServices)
if __name__ == '__main__':
def mklogger(logFolder):
applog = logging.getLogger('sdc')
if len(applog.handlers) == 0:
ch = logging.StreamHandler()
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch.setFormatter(formatter)
applog.addHandler(ch)
ch2 = logging.handlers.RotatingFileHandler(os.path.join(logFolder,'sdcdevice.log'),
maxBytes=100000000,
backupCount=100)
ch2.setFormatter(formatter)
applog.addHandler(ch2)
applog.setLevel(logging.DEBUG)
tmp = logging.getLogger('sdc.discover')
tmp.setLevel(logging.WARN)
tmp = logging.getLogger('sdc.client.subscr')
tmp.setLevel(logging.INFO)
tmp = logging.getLogger('sdc.client.mdib')
tmp.setLevel(logging.INFO)
tmp = logging.getLogger('sdc.client.wf')
tmp.setLevel(logging.INFO)
tmp = logging.getLogger('sdc.client.Set')
tmp.setLevel(logging.INFO)
tmp = logging.getLogger('sdc.client.Get')
tmp.setLevel(logging.DEBUG)
tmp = logging.getLogger('sdc.device')
tmp.setLevel(logging.DEBUG)
tmp = logging.getLogger('sdc.device.subscrMgr')
tmp.setLevel(logging.DEBUG)
logging.getLogger('sdc.device.GetService').setLevel(logging.DEBUG)
return applog
mklogger('c:/tmp')
unittest.TextTestRunner(verbosity=2).run(unittest.TestLoader().loadTestsFromName('test_device_services.TestDeviceServices.test_changeAlarmPrio'))
| true | true |
f7fb7cbb7cdf542974c4decb099b005b376b41fe | 1,296 | py | Python | aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/RunExecutionPlanRequest.py | xiaozhao1/aliyun-openapi-python-sdk | 7297b69619fbe18a053ce552df9ab378b7c5719f | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/RunExecutionPlanRequest.py | xiaozhao1/aliyun-openapi-python-sdk | 7297b69619fbe18a053ce552df9ab378b7c5719f | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/RunExecutionPlanRequest.py | xiaozhao1/aliyun-openapi-python-sdk | 7297b69619fbe18a053ce552df9ab378b7c5719f | [
"Apache-2.0"
] | 1 | 2021-01-26T05:01:42.000Z | 2021-01-26T05:01:42.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class RunExecutionPlanRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Emr', '2016-04-08', 'RunExecutionPlan')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_Id(self):
return self.get_query_params().get('Id')
def set_Id(self,Id):
self.add_query_param('Id',Id) | 36 | 69 | 0.76466 |
from aliyunsdkcore.request import RpcRequest
class RunExecutionPlanRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Emr', '2016-04-08', 'RunExecutionPlan')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_Id(self):
return self.get_query_params().get('Id')
def set_Id(self,Id):
self.add_query_param('Id',Id) | true | true |
f7fb7ce58aeccb0059790a80d8d9416427e25e58 | 1,438 | py | Python | pharmrep/reports/views.py | boyombo/pharmrep | 2293ceb235dec949c58fa40d1ee43fce172e0ceb | [
"MIT"
] | null | null | null | pharmrep/reports/views.py | boyombo/pharmrep | 2293ceb235dec949c58fa40d1ee43fce172e0ceb | [
"MIT"
] | null | null | null | pharmrep/reports/views.py | boyombo/pharmrep | 2293ceb235dec949c58fa40d1ee43fce172e0ceb | [
"MIT"
] | null | null | null | from django.shortcuts import render
#from django.db.models.aggregates import Sum
from product.models import Customer, Product, Payment, Rep
from core.views import BaseActivityListView
def last_activity(request):
return render(request, 'reports/last_activity.html',
{'reps': Rep.objects.all()})
def balance(request):
return render(request, 'reports/balance.html',
{'customers': Customer.objects.all()})
#customers = Customer.objects.annotate(
# Sum('customer_sales__amount'), Sum('customer_payments__amount'))
#clist = []
#for customer in customers:
# sale = customer.customer_sales__amount__sum or 0
# payment = customer.customer_payments__amount__sum or 0
# clist.append({'name': 'name', 'balance': sale - payment})
#return render(request, 'reports/balance.html', {'customers': clist})
def performance(request):
#products = Product.objects.annotate(
# Sum('product_sales__quantity'), Sum('product_sales__amount'))
return render(request, 'reports/performance.html',
{'products': Product.objects.all()})
class CollectionListView(BaseActivityListView):
model = Payment
order_by = '-receipt_date'
template_name = 'reports/collection.html'
def collection(request):
collections = Payment.objects.all()
return render(
request, 'reports/collection.html', {'collections': collections})
| 33.44186 | 73 | 0.691933 | from django.shortcuts import render
from product.models import Customer, Product, Payment, Rep
from core.views import BaseActivityListView
def last_activity(request):
return render(request, 'reports/last_activity.html',
{'reps': Rep.objects.all()})
def balance(request):
return render(request, 'reports/balance.html',
{'customers': Customer.objects.all()})
def performance(request):
return render(request, 'reports/performance.html',
{'products': Product.objects.all()})
class CollectionListView(BaseActivityListView):
model = Payment
order_by = '-receipt_date'
template_name = 'reports/collection.html'
def collection(request):
collections = Payment.objects.all()
return render(
request, 'reports/collection.html', {'collections': collections})
| true | true |
f7fb7d3f66ec949e8d72964f2c7bc87a4bdffefe | 475 | py | Python | wafw00f/plugins/secureiis.py | biscuitehh/wafw00f | b1a08122ea3d65e2aaaa5120231cca6c37851c5b | [
"BSD-3-Clause"
] | 1 | 2020-01-17T08:09:48.000Z | 2020-01-17T08:09:48.000Z | wafw00f/plugins/secureiis.py | tlsloves/wafw00f | 9682cdbdffc78150719b58390f8c5552b40a40b6 | [
"BSD-3-Clause"
] | null | null | null | wafw00f/plugins/secureiis.py | tlsloves/wafw00f | 9682cdbdffc78150719b58390f8c5552b40a40b6 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
'''
Copyright (C) 2019, WAFW00F Developers.
See the LICENSE file for copying permission.
'''
NAME = 'eEye SecureIIS (BeyondTrust)'
def is_waf(self):
schemes = [
self.matchContent(r'SecureIIS is an internet security application'),
self.matchContent(r'Download SecureIIS Personal Edition'),
self.matchContent(r'https?://www\.eeye\.com/Secure\-?IIS')
]
if any(i for i in schemes):
return True
return False | 26.388889 | 76 | 0.669474 |
NAME = 'eEye SecureIIS (BeyondTrust)'
def is_waf(self):
schemes = [
self.matchContent(r'SecureIIS is an internet security application'),
self.matchContent(r'Download SecureIIS Personal Edition'),
self.matchContent(r'https?://www\.eeye\.com/Secure\-?IIS')
]
if any(i for i in schemes):
return True
return False | true | true |
f7fb7dbcb17db3de7c0030cf3949e04eeea2df2d | 10,102 | py | Python | gluoncv/model_zoo/model_zoo.py | DetectionTeamUCAS/FCOS_GluonCV | 7d032c43bdeb748236d9f46043794eaa97bf3f1b | [
"Apache-2.0"
] | 28 | 2019-05-08T04:52:28.000Z | 2020-04-12T07:22:53.000Z | gluoncv/model_zoo/model_zoo.py | DetectionTeamUCAS/FCOS_GluonCV | 7d032c43bdeb748236d9f46043794eaa97bf3f1b | [
"Apache-2.0"
] | 2 | 2019-05-13T01:46:39.000Z | 2019-05-20T08:04:36.000Z | gluoncv/model_zoo/model_zoo.py | DetectionTeamUCAS/FCOS_GluonCV | 7d032c43bdeb748236d9f46043794eaa97bf3f1b | [
"Apache-2.0"
] | 2 | 2020-07-14T14:50:18.000Z | 2022-01-27T07:07:41.000Z | # pylint: disable=wildcard-import, unused-wildcard-import
"""Model store which handles pretrained models from both
mxnet.gluon.model_zoo.vision and gluoncv.models
"""
from .alexnet import *
from .cifarresnet import *
from .cifarresnext import *
from .cifarwideresnet import *
from .deeplabv3 import *
from .densenet import *
from .faster_rcnn import *
from .fcn import *
from .inception import *
from .mask_rcnn import *
from .mobilenet import *
from .nasnet import *
from .pruned_resnet.resnetv1b_pruned import *
from .pspnet import *
from .quantized import *
from .residual_attentionnet import *
from .resnet import *
from .resnetv1b import *
from .resnext import *
from .senet import *
from .simple_pose.simple_pose_resnet import *
from .squeezenet import *
from .ssd import *
from .vgg import *
from .yolo import *
from .fcos import *
__all__ = ['get_model', 'get_model_list']
_models = {
'resnet18_v1': resnet18_v1,
'resnet34_v1': resnet34_v1,
'resnet50_v1': resnet50_v1,
'resnet101_v1': resnet101_v1,
'resnet152_v1': resnet152_v1,
'resnet18_v2': resnet18_v2,
'resnet34_v2': resnet34_v2,
'resnet50_v2': resnet50_v2,
'resnet101_v2': resnet101_v2,
'resnet152_v2': resnet152_v2,
'se_resnet18_v1': se_resnet18_v1,
'se_resnet34_v1': se_resnet34_v1,
'se_resnet50_v1': se_resnet50_v1,
'se_resnet101_v1': se_resnet101_v1,
'se_resnet152_v1': se_resnet152_v1,
'se_resnet18_v2': se_resnet18_v2,
'se_resnet34_v2': se_resnet34_v2,
'se_resnet50_v2': se_resnet50_v2,
'se_resnet101_v2': se_resnet101_v2,
'se_resnet152_v2': se_resnet152_v2,
'vgg11': vgg11,
'vgg13': vgg13,
'vgg16': vgg16,
'vgg19': vgg19,
'vgg11_bn': vgg11_bn,
'vgg13_bn': vgg13_bn,
'vgg16_bn': vgg16_bn,
'vgg19_bn': vgg19_bn,
'alexnet': alexnet,
'densenet121': densenet121,
'densenet161': densenet161,
'densenet169': densenet169,
'densenet201': densenet201,
'squeezenet1.0': squeezenet1_0,
'squeezenet1.1': squeezenet1_1,
'inceptionv3': inception_v3,
'mobilenet1.0': mobilenet1_0,
'mobilenet0.75': mobilenet0_75,
'mobilenet0.5': mobilenet0_5,
'mobilenet0.25': mobilenet0_25,
'mobilenetv2_1.0': mobilenet_v2_1_0,
'mobilenetv2_0.75': mobilenet_v2_0_75,
'mobilenetv2_0.5': mobilenet_v2_0_5,
'mobilenetv2_0.25': mobilenet_v2_0_25,
'ssd_300_vgg16_atrous_voc': ssd_300_vgg16_atrous_voc,
'ssd_300_vgg16_atrous_coco': ssd_300_vgg16_atrous_coco,
'ssd_300_vgg16_atrous_custom': ssd_300_vgg16_atrous_custom,
'ssd_512_vgg16_atrous_voc': ssd_512_vgg16_atrous_voc,
'ssd_512_vgg16_atrous_coco': ssd_512_vgg16_atrous_coco,
'ssd_512_vgg16_atrous_custom': ssd_512_vgg16_atrous_custom,
'ssd_512_resnet18_v1_voc': ssd_512_resnet18_v1_voc,
'ssd_512_resnet18_v1_coco': ssd_512_resnet18_v1_coco,
'ssd_512_resnet50_v1_voc': ssd_512_resnet50_v1_voc,
'ssd_512_resnet50_v1_coco': ssd_512_resnet50_v1_coco,
'ssd_512_resnet50_v1_custom': ssd_512_resnet50_v1_custom,
'ssd_512_resnet101_v2_voc': ssd_512_resnet101_v2_voc,
'ssd_512_resnet152_v2_voc': ssd_512_resnet152_v2_voc,
'ssd_512_mobilenet1.0_voc': ssd_512_mobilenet1_0_voc,
'ssd_512_mobilenet1.0_coco': ssd_512_mobilenet1_0_coco,
'ssd_512_mobilenet1.0_custom': ssd_512_mobilenet1_0_custom,
'faster_rcnn_resnet50_v1b_voc': faster_rcnn_resnet50_v1b_voc,
'faster_rcnn_resnet50_v1b_coco': faster_rcnn_resnet50_v1b_coco,
'faster_rcnn_fpn_resnet50_v1b_coco': faster_rcnn_fpn_resnet50_v1b_coco,
'faster_rcnn_fpn_bn_resnet50_v1b_coco': faster_rcnn_fpn_bn_resnet50_v1b_coco,
'faster_rcnn_resnet50_v1b_custom': faster_rcnn_resnet50_v1b_custom,
'faster_rcnn_resnet101_v1d_voc': faster_rcnn_resnet101_v1d_voc,
'faster_rcnn_resnet101_v1d_coco': faster_rcnn_resnet101_v1d_coco,
'faster_rcnn_fpn_resnet101_v1d_coco': faster_rcnn_fpn_resnet101_v1d_coco,
'faster_rcnn_resnet101_v1d_custom': faster_rcnn_resnet101_v1d_custom,
'mask_rcnn_resnet50_v1b_coco': mask_rcnn_resnet50_v1b_coco,
'mask_rcnn_fpn_resnet50_v1b_coco': mask_rcnn_fpn_resnet50_v1b_coco,
'mask_rcnn_resnet101_v1d_coco': mask_rcnn_resnet101_v1d_coco,
'mask_rcnn_fpn_resnet101_v1d_coco': mask_rcnn_fpn_resnet101_v1d_coco,
'cifar_resnet20_v1': cifar_resnet20_v1,
'cifar_resnet56_v1': cifar_resnet56_v1,
'cifar_resnet110_v1': cifar_resnet110_v1,
'cifar_resnet20_v2': cifar_resnet20_v2,
'cifar_resnet56_v2': cifar_resnet56_v2,
'cifar_resnet110_v2': cifar_resnet110_v2,
'cifar_wideresnet16_10': cifar_wideresnet16_10,
'cifar_wideresnet28_10': cifar_wideresnet28_10,
'cifar_wideresnet40_8': cifar_wideresnet40_8,
'cifar_resnext29_32x4d': cifar_resnext29_32x4d,
'cifar_resnext29_16x64d': cifar_resnext29_16x64d,
'fcn_resnet50_voc': get_fcn_resnet50_voc,
'fcn_resnet101_coco': get_fcn_resnet101_coco,
'fcn_resnet101_voc': get_fcn_resnet101_voc,
'fcn_resnet50_ade': get_fcn_resnet50_ade,
'fcn_resnet101_ade': get_fcn_resnet101_ade,
'psp_resnet101_coco': get_psp_resnet101_coco,
'psp_resnet101_voc': get_psp_resnet101_voc,
'psp_resnet50_ade': get_psp_resnet50_ade,
'psp_resnet101_ade': get_psp_resnet101_ade,
'psp_resnet101_citys': get_psp_resnet101_citys,
'deeplab_resnet101_coco': get_deeplab_resnet101_coco,
'deeplab_resnet101_voc': get_deeplab_resnet101_voc,
'deeplab_resnet152_coco': get_deeplab_resnet152_coco,
'deeplab_resnet152_voc': get_deeplab_resnet152_voc,
'deeplab_resnet50_ade': get_deeplab_resnet50_ade,
'deeplab_resnet101_ade': get_deeplab_resnet101_ade,
'resnet18_v1b': resnet18_v1b,
'resnet34_v1b': resnet34_v1b,
'resnet50_v1b': resnet50_v1b,
'resnet50_v1b_gn': resnet50_v1b_gn,
'resnet101_v1b_gn': resnet101_v1b_gn,
'resnet101_v1b': resnet101_v1b,
'resnet152_v1b': resnet152_v1b,
'resnet50_v1c': resnet50_v1c,
'resnet101_v1c': resnet101_v1c,
'resnet152_v1c': resnet152_v1c,
'resnet50_v1d': resnet50_v1d,
'resnet101_v1d': resnet101_v1d,
'resnet152_v1d': resnet152_v1d,
'resnet50_v1e': resnet50_v1e,
'resnet101_v1e': resnet101_v1e,
'resnet152_v1e': resnet152_v1e,
'resnet50_v1s': resnet50_v1s,
'resnet101_v1s': resnet101_v1s,
'resnet152_v1s': resnet152_v1s,
'resnext50_32x4d': resnext50_32x4d,
'resnext101_32x4d': resnext101_32x4d,
'resnext101_64x4d': resnext101_64x4d,
'se_resnext50_32x4d': se_resnext50_32x4d,
'se_resnext101_32x4d': se_resnext101_32x4d,
'se_resnext101_64x4d': se_resnext101_64x4d,
'senet_154': senet_154,
'darknet53': darknet53,
'yolo3_darknet53_coco': yolo3_darknet53_coco,
'yolo3_darknet53_voc': yolo3_darknet53_voc,
'yolo3_darknet53_custom': yolo3_darknet53_custom,
'yolo3_mobilenet1.0_coco': yolo3_mobilenet1_0_coco,
'yolo3_mobilenet1.0_voc': yolo3_mobilenet1_0_voc,
'yolo3_mobilenet1.0_custom': yolo3_mobilenet1_0_custom,
'nasnet_4_1056': nasnet_4_1056,
'nasnet_5_1538': nasnet_5_1538,
'nasnet_7_1920': nasnet_7_1920,
'nasnet_6_4032': nasnet_6_4032,
'simple_pose_resnet18_v1b': simple_pose_resnet18_v1b,
'simple_pose_resnet50_v1b': simple_pose_resnet50_v1b,
'simple_pose_resnet101_v1b': simple_pose_resnet101_v1b,
'simple_pose_resnet152_v1b': simple_pose_resnet152_v1b,
'simple_pose_resnet50_v1d': simple_pose_resnet50_v1d,
'simple_pose_resnet101_v1d': simple_pose_resnet101_v1d,
'simple_pose_resnet152_v1d': simple_pose_resnet152_v1d,
'residualattentionnet56': residualattentionnet56,
'residualattentionnet92': residualattentionnet92,
'residualattentionnet128': residualattentionnet128,
'residualattentionnet164': residualattentionnet164,
'residualattentionnet200': residualattentionnet200,
'residualattentionnet236': residualattentionnet236,
'residualattentionnet452': residualattentionnet452,
'cifar_residualattentionnet56': cifar_residualattentionnet56,
'cifar_residualattentionnet92': cifar_residualattentionnet92,
'cifar_residualattentionnet452': cifar_residualattentionnet452,
'resnet18_v1b_0.89': resnet18_v1b_89,
'resnet50_v1d_0.86': resnet50_v1d_86,
'resnet50_v1d_0.48': resnet50_v1d_48,
'resnet50_v1d_0.37': resnet50_v1d_37,
'resnet50_v1d_0.11': resnet50_v1d_11,
'resnet101_v1d_0.76': resnet101_v1d_76,
'resnet101_v1d_0.73': resnet101_v1d_73,
'mobilenet1.0_int8': mobilenet1_0_int8,
'resnet50_v1_int8': resnet50_v1_int8,
'ssd_300_vgg16_atrous_voc_int8': ssd_300_vgg16_atrous_voc_int8,
'ssd_512_mobilenet1.0_voc_int8': ssd_512_mobilenet1_0_voc_int8,
'ssd_512_resnet50_v1_voc_int8': ssd_512_resnet50_v1_voc_int8,
'ssd_512_vgg16_atrous_voc_int8': ssd_512_vgg16_atrous_voc_int8,
'fcos_resnet50_v1b_coco': fcos_resnet50_v1b_coco,
'fcos_resnet101_v1d_coco': fcos_resnet101_v1d_coco,
'fcos_resnet50_v1_coco': fcos_resnet50_v1_coco,
'fcos_se_resnext101_64x4d_coco': fcos_se_resnext101_64x4d_coco,
}
def get_model(name, **kwargs):
"""Returns a pre-defined model by name
Parameters
----------
name : str
Name of the model.
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
classes : int
Number of classes for the output layer.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Returns
-------
HybridBlock
The model.
"""
name = name.lower()
if name not in _models:
err_str = '"%s" is not among the following model list:\n\t' % (name)
err_str += '%s' % ('\n\t'.join(sorted(_models.keys())))
raise ValueError(err_str)
net = _models[name](**kwargs)
return net
def get_model_list():
"""Get the entire list of model names in model_zoo.
Returns
-------
list of str
Entire list of model names in model_zoo.
"""
return _models.keys()
| 40.408 | 88 | 0.763017 |
from .alexnet import *
from .cifarresnet import *
from .cifarresnext import *
from .cifarwideresnet import *
from .deeplabv3 import *
from .densenet import *
from .faster_rcnn import *
from .fcn import *
from .inception import *
from .mask_rcnn import *
from .mobilenet import *
from .nasnet import *
from .pruned_resnet.resnetv1b_pruned import *
from .pspnet import *
from .quantized import *
from .residual_attentionnet import *
from .resnet import *
from .resnetv1b import *
from .resnext import *
from .senet import *
from .simple_pose.simple_pose_resnet import *
from .squeezenet import *
from .ssd import *
from .vgg import *
from .yolo import *
from .fcos import *
__all__ = ['get_model', 'get_model_list']
_models = {
'resnet18_v1': resnet18_v1,
'resnet34_v1': resnet34_v1,
'resnet50_v1': resnet50_v1,
'resnet101_v1': resnet101_v1,
'resnet152_v1': resnet152_v1,
'resnet18_v2': resnet18_v2,
'resnet34_v2': resnet34_v2,
'resnet50_v2': resnet50_v2,
'resnet101_v2': resnet101_v2,
'resnet152_v2': resnet152_v2,
'se_resnet18_v1': se_resnet18_v1,
'se_resnet34_v1': se_resnet34_v1,
'se_resnet50_v1': se_resnet50_v1,
'se_resnet101_v1': se_resnet101_v1,
'se_resnet152_v1': se_resnet152_v1,
'se_resnet18_v2': se_resnet18_v2,
'se_resnet34_v2': se_resnet34_v2,
'se_resnet50_v2': se_resnet50_v2,
'se_resnet101_v2': se_resnet101_v2,
'se_resnet152_v2': se_resnet152_v2,
'vgg11': vgg11,
'vgg13': vgg13,
'vgg16': vgg16,
'vgg19': vgg19,
'vgg11_bn': vgg11_bn,
'vgg13_bn': vgg13_bn,
'vgg16_bn': vgg16_bn,
'vgg19_bn': vgg19_bn,
'alexnet': alexnet,
'densenet121': densenet121,
'densenet161': densenet161,
'densenet169': densenet169,
'densenet201': densenet201,
'squeezenet1.0': squeezenet1_0,
'squeezenet1.1': squeezenet1_1,
'inceptionv3': inception_v3,
'mobilenet1.0': mobilenet1_0,
'mobilenet0.75': mobilenet0_75,
'mobilenet0.5': mobilenet0_5,
'mobilenet0.25': mobilenet0_25,
'mobilenetv2_1.0': mobilenet_v2_1_0,
'mobilenetv2_0.75': mobilenet_v2_0_75,
'mobilenetv2_0.5': mobilenet_v2_0_5,
'mobilenetv2_0.25': mobilenet_v2_0_25,
'ssd_300_vgg16_atrous_voc': ssd_300_vgg16_atrous_voc,
'ssd_300_vgg16_atrous_coco': ssd_300_vgg16_atrous_coco,
'ssd_300_vgg16_atrous_custom': ssd_300_vgg16_atrous_custom,
'ssd_512_vgg16_atrous_voc': ssd_512_vgg16_atrous_voc,
'ssd_512_vgg16_atrous_coco': ssd_512_vgg16_atrous_coco,
'ssd_512_vgg16_atrous_custom': ssd_512_vgg16_atrous_custom,
'ssd_512_resnet18_v1_voc': ssd_512_resnet18_v1_voc,
'ssd_512_resnet18_v1_coco': ssd_512_resnet18_v1_coco,
'ssd_512_resnet50_v1_voc': ssd_512_resnet50_v1_voc,
'ssd_512_resnet50_v1_coco': ssd_512_resnet50_v1_coco,
'ssd_512_resnet50_v1_custom': ssd_512_resnet50_v1_custom,
'ssd_512_resnet101_v2_voc': ssd_512_resnet101_v2_voc,
'ssd_512_resnet152_v2_voc': ssd_512_resnet152_v2_voc,
'ssd_512_mobilenet1.0_voc': ssd_512_mobilenet1_0_voc,
'ssd_512_mobilenet1.0_coco': ssd_512_mobilenet1_0_coco,
'ssd_512_mobilenet1.0_custom': ssd_512_mobilenet1_0_custom,
'faster_rcnn_resnet50_v1b_voc': faster_rcnn_resnet50_v1b_voc,
'faster_rcnn_resnet50_v1b_coco': faster_rcnn_resnet50_v1b_coco,
'faster_rcnn_fpn_resnet50_v1b_coco': faster_rcnn_fpn_resnet50_v1b_coco,
'faster_rcnn_fpn_bn_resnet50_v1b_coco': faster_rcnn_fpn_bn_resnet50_v1b_coco,
'faster_rcnn_resnet50_v1b_custom': faster_rcnn_resnet50_v1b_custom,
'faster_rcnn_resnet101_v1d_voc': faster_rcnn_resnet101_v1d_voc,
'faster_rcnn_resnet101_v1d_coco': faster_rcnn_resnet101_v1d_coco,
'faster_rcnn_fpn_resnet101_v1d_coco': faster_rcnn_fpn_resnet101_v1d_coco,
'faster_rcnn_resnet101_v1d_custom': faster_rcnn_resnet101_v1d_custom,
'mask_rcnn_resnet50_v1b_coco': mask_rcnn_resnet50_v1b_coco,
'mask_rcnn_fpn_resnet50_v1b_coco': mask_rcnn_fpn_resnet50_v1b_coco,
'mask_rcnn_resnet101_v1d_coco': mask_rcnn_resnet101_v1d_coco,
'mask_rcnn_fpn_resnet101_v1d_coco': mask_rcnn_fpn_resnet101_v1d_coco,
'cifar_resnet20_v1': cifar_resnet20_v1,
'cifar_resnet56_v1': cifar_resnet56_v1,
'cifar_resnet110_v1': cifar_resnet110_v1,
'cifar_resnet20_v2': cifar_resnet20_v2,
'cifar_resnet56_v2': cifar_resnet56_v2,
'cifar_resnet110_v2': cifar_resnet110_v2,
'cifar_wideresnet16_10': cifar_wideresnet16_10,
'cifar_wideresnet28_10': cifar_wideresnet28_10,
'cifar_wideresnet40_8': cifar_wideresnet40_8,
'cifar_resnext29_32x4d': cifar_resnext29_32x4d,
'cifar_resnext29_16x64d': cifar_resnext29_16x64d,
'fcn_resnet50_voc': get_fcn_resnet50_voc,
'fcn_resnet101_coco': get_fcn_resnet101_coco,
'fcn_resnet101_voc': get_fcn_resnet101_voc,
'fcn_resnet50_ade': get_fcn_resnet50_ade,
'fcn_resnet101_ade': get_fcn_resnet101_ade,
'psp_resnet101_coco': get_psp_resnet101_coco,
'psp_resnet101_voc': get_psp_resnet101_voc,
'psp_resnet50_ade': get_psp_resnet50_ade,
'psp_resnet101_ade': get_psp_resnet101_ade,
'psp_resnet101_citys': get_psp_resnet101_citys,
'deeplab_resnet101_coco': get_deeplab_resnet101_coco,
'deeplab_resnet101_voc': get_deeplab_resnet101_voc,
'deeplab_resnet152_coco': get_deeplab_resnet152_coco,
'deeplab_resnet152_voc': get_deeplab_resnet152_voc,
'deeplab_resnet50_ade': get_deeplab_resnet50_ade,
'deeplab_resnet101_ade': get_deeplab_resnet101_ade,
'resnet18_v1b': resnet18_v1b,
'resnet34_v1b': resnet34_v1b,
'resnet50_v1b': resnet50_v1b,
'resnet50_v1b_gn': resnet50_v1b_gn,
'resnet101_v1b_gn': resnet101_v1b_gn,
'resnet101_v1b': resnet101_v1b,
'resnet152_v1b': resnet152_v1b,
'resnet50_v1c': resnet50_v1c,
'resnet101_v1c': resnet101_v1c,
'resnet152_v1c': resnet152_v1c,
'resnet50_v1d': resnet50_v1d,
'resnet101_v1d': resnet101_v1d,
'resnet152_v1d': resnet152_v1d,
'resnet50_v1e': resnet50_v1e,
'resnet101_v1e': resnet101_v1e,
'resnet152_v1e': resnet152_v1e,
'resnet50_v1s': resnet50_v1s,
'resnet101_v1s': resnet101_v1s,
'resnet152_v1s': resnet152_v1s,
'resnext50_32x4d': resnext50_32x4d,
'resnext101_32x4d': resnext101_32x4d,
'resnext101_64x4d': resnext101_64x4d,
'se_resnext50_32x4d': se_resnext50_32x4d,
'se_resnext101_32x4d': se_resnext101_32x4d,
'se_resnext101_64x4d': se_resnext101_64x4d,
'senet_154': senet_154,
'darknet53': darknet53,
'yolo3_darknet53_coco': yolo3_darknet53_coco,
'yolo3_darknet53_voc': yolo3_darknet53_voc,
'yolo3_darknet53_custom': yolo3_darknet53_custom,
'yolo3_mobilenet1.0_coco': yolo3_mobilenet1_0_coco,
'yolo3_mobilenet1.0_voc': yolo3_mobilenet1_0_voc,
'yolo3_mobilenet1.0_custom': yolo3_mobilenet1_0_custom,
'nasnet_4_1056': nasnet_4_1056,
'nasnet_5_1538': nasnet_5_1538,
'nasnet_7_1920': nasnet_7_1920,
'nasnet_6_4032': nasnet_6_4032,
'simple_pose_resnet18_v1b': simple_pose_resnet18_v1b,
'simple_pose_resnet50_v1b': simple_pose_resnet50_v1b,
'simple_pose_resnet101_v1b': simple_pose_resnet101_v1b,
'simple_pose_resnet152_v1b': simple_pose_resnet152_v1b,
'simple_pose_resnet50_v1d': simple_pose_resnet50_v1d,
'simple_pose_resnet101_v1d': simple_pose_resnet101_v1d,
'simple_pose_resnet152_v1d': simple_pose_resnet152_v1d,
'residualattentionnet56': residualattentionnet56,
'residualattentionnet92': residualattentionnet92,
'residualattentionnet128': residualattentionnet128,
'residualattentionnet164': residualattentionnet164,
'residualattentionnet200': residualattentionnet200,
'residualattentionnet236': residualattentionnet236,
'residualattentionnet452': residualattentionnet452,
'cifar_residualattentionnet56': cifar_residualattentionnet56,
'cifar_residualattentionnet92': cifar_residualattentionnet92,
'cifar_residualattentionnet452': cifar_residualattentionnet452,
'resnet18_v1b_0.89': resnet18_v1b_89,
'resnet50_v1d_0.86': resnet50_v1d_86,
'resnet50_v1d_0.48': resnet50_v1d_48,
'resnet50_v1d_0.37': resnet50_v1d_37,
'resnet50_v1d_0.11': resnet50_v1d_11,
'resnet101_v1d_0.76': resnet101_v1d_76,
'resnet101_v1d_0.73': resnet101_v1d_73,
'mobilenet1.0_int8': mobilenet1_0_int8,
'resnet50_v1_int8': resnet50_v1_int8,
'ssd_300_vgg16_atrous_voc_int8': ssd_300_vgg16_atrous_voc_int8,
'ssd_512_mobilenet1.0_voc_int8': ssd_512_mobilenet1_0_voc_int8,
'ssd_512_resnet50_v1_voc_int8': ssd_512_resnet50_v1_voc_int8,
'ssd_512_vgg16_atrous_voc_int8': ssd_512_vgg16_atrous_voc_int8,
'fcos_resnet50_v1b_coco': fcos_resnet50_v1b_coco,
'fcos_resnet101_v1d_coco': fcos_resnet101_v1d_coco,
'fcos_resnet50_v1_coco': fcos_resnet50_v1_coco,
'fcos_se_resnext101_64x4d_coco': fcos_se_resnext101_64x4d_coco,
}
def get_model(name, **kwargs):
name = name.lower()
if name not in _models:
err_str = '"%s" is not among the following model list:\n\t' % (name)
err_str += '%s' % ('\n\t'.join(sorted(_models.keys())))
raise ValueError(err_str)
net = _models[name](**kwargs)
return net
def get_model_list():
return _models.keys()
| true | true |
f7fb7e941bb5541cd9711a61fceff96a5672ae33 | 1,713 | py | Python | src/codespaces/azext_codespaces/vendored_sdks/vsonline/models/resource.py | tilnl/azure-cli-extensions | ef9946bbcde34bb51343554a8f2a8dedd1f7d44a | [
"MIT"
] | null | null | null | src/codespaces/azext_codespaces/vendored_sdks/vsonline/models/resource.py | tilnl/azure-cli-extensions | ef9946bbcde34bb51343554a8f2a8dedd1f7d44a | [
"MIT"
] | null | null | null | src/codespaces/azext_codespaces/vendored_sdks/vsonline/models/resource.py | tilnl/azure-cli-extensions | ef9946bbcde34bb51343554a8f2a8dedd1f7d44a | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Resource(Model):
"""An Azure resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:param tags: Tags for the VS Online Account
:type tags: dict[str, str]
:param location: Region where the Azure resource is located.
:type location: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(self, **kwargs):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.tags = kwargs.get('tags', None)
self.location = kwargs.get('location', None)
| 31.722222 | 76 | 0.553999 |
from msrest.serialization import Model
class Resource(Model):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(self, **kwargs):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.tags = kwargs.get('tags', None)
self.location = kwargs.get('location', None)
| true | true |
f7fb7ff02e1360a4e40c3a6bf1888be3e22bc26d | 875 | py | Python | dJango/detect0/qa.py | GursimranSinghKahlon/EdTech | 5944019ccb3d054dbb1f8eb8a81363cb7bae9de6 | [
"MIT"
] | 1 | 2019-05-25T04:22:00.000Z | 2019-05-25T04:22:00.000Z | dJango/detect0/qa.py | GursimranSinghKahlon/EdTech | 5944019ccb3d054dbb1f8eb8a81363cb7bae9de6 | [
"MIT"
] | null | null | null | dJango/detect0/qa.py | GursimranSinghKahlon/EdTech | 5944019ccb3d054dbb1f8eb8a81363cb7bae9de6 | [
"MIT"
] | null | null | null |
def loadModel():
import pickle
filename = 'finalized_model.sav'
pickle.dump(modelLR, open(filename, 'wb'))
# load the model from disk
loaded_model = pickle.load(open(filename, 'rb'))
return loaded_model
q1 = "what is computer?"
q2 = "what are advantage of computer?"
def predictOutput(q1,q2):
model = loadModel()
q=[q1,q2]
#print("hku")
test_x = [extract_features(q)]
test_x = pd.DataFrame(test_x, columns = ['nouns', 'adverbs', 'adjectives',
'verbs', 'words', 'possesives',
'digrms', 'questionWords'])
print(model.predict_proba(test_x)[0][1])
print(model.predict_proba(test_x)[0][1] >= 0.80)
return (model.predict(test_x))
#print(predictOutput(q1,q2,modelLR))
'''
print("done")
result = predictOutput(q1,q2,loaded_model)
print(result)
| 19.444444 | 78 | 0.609143 |
def loadModel():
import pickle
filename = 'finalized_model.sav'
pickle.dump(modelLR, open(filename, 'wb'))
loaded_model = pickle.load(open(filename, 'rb'))
return loaded_model
q1 = "what is computer?"
q2 = "what are advantage of computer?"
def predictOutput(q1,q2):
model = loadModel()
q=[q1,q2]
test_x = [extract_features(q)]
test_x = pd.DataFrame(test_x, columns = ['nouns', 'adverbs', 'adjectives',
'verbs', 'words', 'possesives',
'digrms', 'questionWords'])
print(model.predict_proba(test_x)[0][1])
print(model.predict_proba(test_x)[0][1] >= 0.80)
return (model.predict(test_x))
'''
print("done")
result = predictOutput(q1,q2,loaded_model)
print(result)
| false | true |
f7fb8005bb19d399b2dbe1441873e118df38dd73 | 127 | py | Python | utils/__init__.py | cmiras/BSL-segmentation | 35a66d6c758dcf4734adb0ebc5a40ea7238d7a1d | [
"MIT"
] | 17 | 2021-06-08T07:53:36.000Z | 2022-03-27T02:57:50.000Z | utils/__init__.py | cmiras/BSL-segmentation | 35a66d6c758dcf4734adb0ebc5a40ea7238d7a1d | [
"MIT"
] | 5 | 2021-07-15T09:41:08.000Z | 2022-01-13T14:53:10.000Z | utils/__init__.py | cmiras/BSL-segmentation | 35a66d6c758dcf4734adb0ebc5a40ea7238d7a1d | [
"MIT"
] | 18 | 2021-06-08T15:22:09.000Z | 2022-02-21T19:06:52.000Z |
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), "progress"))
from progress.bar import Bar as Bar | 21.166667 | 68 | 0.771654 |
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), "progress"))
from progress.bar import Bar as Bar | true | true |
f7fb8103640c452dd3046356dbdbd1def614e983 | 373 | py | Python | cadastro_login/forms.py | alyneperez/cadastro_login | 1b59d5e8df9e080590f84d073e7645df971b7f95 | [
"MIT"
] | 2 | 2021-09-28T19:54:26.000Z | 2021-09-29T21:47:31.000Z | cadastro_login/forms.py | alyneperez/cadastro_login | 1b59d5e8df9e080590f84d073e7645df971b7f95 | [
"MIT"
] | null | null | null | cadastro_login/forms.py | alyneperez/cadastro_login | 1b59d5e8df9e080590f84d073e7645df971b7f95 | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, HiddenField
from wtforms.validators import DataRequired, Email
class LoginForm(FlaskForm):
username = StringField("username" , validators=[DataRequired()])
password = PasswordField("password" , validators=[DataRequired()])
remember_me = BooleanField("remember_me")
| 37.3 | 73 | 0.785523 | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, HiddenField
from wtforms.validators import DataRequired, Email
class LoginForm(FlaskForm):
username = StringField("username" , validators=[DataRequired()])
password = PasswordField("password" , validators=[DataRequired()])
remember_me = BooleanField("remember_me")
| true | true |
f7fb8114c6de81e9e8e52c2ebf15cb32444aad14 | 481 | py | Python | folders/chat/client/test.py | angelopassaro/Hacktoberfest-1 | 21f90f5d49efba9b1a27f4d9b923f5017ab43f0e | [
"Apache-2.0"
] | 1 | 2020-10-06T01:20:07.000Z | 2020-10-06T01:20:07.000Z | folders/chat/client/test.py | angelopassaro/Hacktoberfest-1 | 21f90f5d49efba9b1a27f4d9b923f5017ab43f0e | [
"Apache-2.0"
] | null | null | null | folders/chat/client/test.py | angelopassaro/Hacktoberfest-1 | 21f90f5d49efba9b1a27f4d9b923f5017ab43f0e | [
"Apache-2.0"
] | null | null | null | from client import Client
import time
from threading import Thread
b1 = input("enter your name ")
c1 = Client(b1)
def update():
msgs = []
run = True
while run:
time.sleep(0.2)
new_messages = c1.get()
msgs.extend(new_messages)
for msg in new_messages:
print(msg)
if msg == "{quit}":
run = False
break
Thread(target=update).start()
while True:
ma = input()
c1.send(ma)
| 17.178571 | 33 | 0.544699 | from client import Client
import time
from threading import Thread
b1 = input("enter your name ")
c1 = Client(b1)
def update():
msgs = []
run = True
while run:
time.sleep(0.2)
new_messages = c1.get()
msgs.extend(new_messages)
for msg in new_messages:
print(msg)
if msg == "{quit}":
run = False
break
Thread(target=update).start()
while True:
ma = input()
c1.send(ma)
| true | true |
f7fb81ed21611fa63452000da08ce4f754f7e085 | 495 | py | Python | threaded_reader.py | vlaznev/SerialPlotter | 758ba1a5af9b30a53d7d40aedd540bf39441ac0f | [
"MIT"
] | 1 | 2019-04-20T00:56:57.000Z | 2019-04-20T00:56:57.000Z | threaded_reader.py | vlaznev/SerialPlotter | 758ba1a5af9b30a53d7d40aedd540bf39441ac0f | [
"MIT"
] | null | null | null | threaded_reader.py | vlaznev/SerialPlotter | 758ba1a5af9b30a53d7d40aedd540bf39441ac0f | [
"MIT"
] | null | null | null | from Queue import Queue
import threading
class ThreadedReader:
def __init__(self, file, startImmediately=True):
self.queue = Queue()
self.file = file
self.thread = None
if startImmediately:
self.start()
def next(self):
return None if self.queue.empty() else self.queue.get()
def thread_loop(self):
for line in self.file:
self.queue.put(line)
def start(self):
self.thread = threading.Thread(target = self.thread_loop)
self.thread.daemon = True
self.thread.start()
| 21.521739 | 59 | 0.721212 | from Queue import Queue
import threading
class ThreadedReader:
def __init__(self, file, startImmediately=True):
self.queue = Queue()
self.file = file
self.thread = None
if startImmediately:
self.start()
def next(self):
return None if self.queue.empty() else self.queue.get()
def thread_loop(self):
for line in self.file:
self.queue.put(line)
def start(self):
self.thread = threading.Thread(target = self.thread_loop)
self.thread.daemon = True
self.thread.start()
| true | true |
f7fb8217a9ba82eb79640006aba8a3e3dfb8a28b | 22,766 | py | Python | django/db/backends/mysql/base.py | dolfly/django | a971d19bab9bfc33d301669b319b4766bf6d94f6 | [
"BSD-3-Clause"
] | null | null | null | django/db/backends/mysql/base.py | dolfly/django | a971d19bab9bfc33d301669b319b4766bf6d94f6 | [
"BSD-3-Clause"
] | null | null | null | django/db/backends/mysql/base.py | dolfly/django | a971d19bab9bfc33d301669b319b4766bf6d94f6 | [
"BSD-3-Clause"
] | null | null | null | """
MySQL database backend for Django.
Requires MySQLdb: http://sourceforge.net/projects/mysql-python
"""
from __future__ import unicode_literals
import datetime
import re
import sys
import warnings
try:
import MySQLdb as Database
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading MySQLdb module: %s" % e)
# We want version (1, 2, 1, 'final', 2) or later. We can't just use
# lexicographic ordering in this check because then (1, 2, 1, 'gamma')
# inadvertently passes the version test.
version = Database.version_info
if (version < (1, 2, 1) or (version[:3] == (1, 2, 1) and
(len(version) < 5 or version[3] != 'final' or version[4] < 2))):
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("MySQLdb-1.2.1p2 or newer is required; you have %s" % Database.__version__)
from MySQLdb.converters import conversions, Thing2Literal
from MySQLdb.constants import FIELD_TYPE, CLIENT
try:
import pytz
except ImportError:
pytz = None
from django.conf import settings
from django.db import utils
from django.db.backends import (utils as backend_utils, BaseDatabaseFeatures,
BaseDatabaseOperations, BaseDatabaseWrapper)
from django.db.backends.mysql.client import DatabaseClient
from django.db.backends.mysql.creation import DatabaseCreation
from django.db.backends.mysql.introspection import DatabaseIntrospection
from django.db.backends.mysql.validation import DatabaseValidation
from django.utils.encoding import force_str, force_text
from django.db.backends.mysql.schema import DatabaseSchemaEditor
from django.utils.encoding import force_str
from django.utils.functional import cached_property
from django.utils.safestring import SafeBytes, SafeText
from django.utils import six
from django.utils import timezone
# Raise exceptions for database warnings if DEBUG is on
if settings.DEBUG:
warnings.filterwarnings("error", category=Database.Warning)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
# It's impossible to import datetime_or_None directly from MySQLdb.times
parse_datetime = conversions[FIELD_TYPE.DATETIME]
def parse_datetime_with_timezone_support(value):
dt = parse_datetime(value)
# Confirm that dt is naive before overwriting its tzinfo.
if dt is not None and settings.USE_TZ and timezone.is_naive(dt):
dt = dt.replace(tzinfo=timezone.utc)
return dt
def adapt_datetime_with_timezone_support(value, conv):
# Equivalent to DateTimeField.get_db_prep_value. Used only by raw SQL.
if settings.USE_TZ:
if timezone.is_naive(value):
warnings.warn("MySQL received a naive datetime (%s)"
" while time zone support is active." % value,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
value = value.astimezone(timezone.utc).replace(tzinfo=None)
return Thing2Literal(value.strftime("%Y-%m-%d %H:%M:%S"), conv)
# MySQLdb-1.2.1 returns TIME columns as timedelta -- they are more like
# timedelta in terms of actual behavior as they are signed and include days --
# and Django expects time, so we still need to override that. We also need to
# add special handling for SafeText and SafeBytes as MySQLdb's type
# checking is too tight to catch those (see Django ticket #6052).
# Finally, MySQLdb always returns naive datetime objects. However, when
# timezone support is active, Django expects timezone-aware datetime objects.
django_conversions = conversions.copy()
django_conversions.update({
FIELD_TYPE.TIME: backend_utils.typecast_time,
FIELD_TYPE.DECIMAL: backend_utils.typecast_decimal,
FIELD_TYPE.NEWDECIMAL: backend_utils.typecast_decimal,
FIELD_TYPE.DATETIME: parse_datetime_with_timezone_support,
datetime.datetime: adapt_datetime_with_timezone_support,
})
# This should match the numerical portion of the version numbers (we can treat
# versions like 5.0.24 and 5.0.24a as the same). Based on the list of version
# at http://dev.mysql.com/doc/refman/4.1/en/news.html and
# http://dev.mysql.com/doc/refman/5.0/en/news.html .
server_version_re = re.compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})')
# MySQLdb-1.2.1 and newer automatically makes use of SHOW WARNINGS on
# MySQL-4.1 and newer, so the MysqlDebugWrapper is unnecessary. Since the
# point is to raise Warnings as exceptions, this can be done with the Python
# warning module, and this is setup when the connection is created, and the
# standard backend_utils.CursorDebugWrapper can be used. Also, using sql_mode
# TRADITIONAL will automatically cause most warnings to be treated as errors.
class CursorWrapper(object):
"""
A thin wrapper around MySQLdb's normal cursor class so that we can catch
particular exception instances and reraise them with the right types.
Implemented as a wrapper, rather than a subclass, so that we aren't stuck
to the particular underlying representation returned by Connection.cursor().
"""
codes_for_integrityerror = (1048,)
def __init__(self, cursor):
self.cursor = cursor
def execute(self, query, args=None):
try:
# args is None means no string interpolation
return self.cursor.execute(query, args)
except Database.OperationalError as e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e.args[0] in self.codes_for_integrityerror:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def executemany(self, query, args):
try:
return self.cursor.executemany(query, args)
except Database.OperationalError as e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e.args[0] in self.codes_for_integrityerror:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
class DatabaseFeatures(BaseDatabaseFeatures):
empty_fetchmany_value = ()
update_can_self_select = False
allows_group_by_pk = True
related_fields_match_type = True
allow_sliced_subqueries = False
has_bulk_insert = True
has_select_for_update = True
has_select_for_update_nowait = False
supports_forward_references = False
supports_long_model_names = False
supports_microsecond_precision = False
supports_regex_backreferencing = False
supports_date_lookup_using_string = False
supports_timezones = False
requires_explicit_null_ordering_when_grouping = True
allows_primary_key_0 = False
uses_savepoints = True
atomic_transactions = False
supports_check_constraints = False
def __init__(self, connection):
super(DatabaseFeatures, self).__init__(connection)
@cached_property
def _mysql_storage_engine(self):
"Internal method used in Django tests. Don't rely on this from your code"
cursor = self.connection.cursor()
cursor.execute('CREATE TABLE INTROSPECT_TEST (X INT)')
# This command is MySQL specific; the second column
# will tell you the default table type of the created
# table. Since all Django's test tables will have the same
# table type, that's enough to evaluate the feature.
cursor.execute("SHOW TABLE STATUS WHERE Name='INTROSPECT_TEST'")
result = cursor.fetchone()
cursor.execute('DROP TABLE INTROSPECT_TEST')
return result[1]
@cached_property
def can_introspect_foreign_keys(self):
"Confirm support for introspected foreign keys"
return self._mysql_storage_engine != 'MyISAM'
@cached_property
def has_zoneinfo_database(self):
# MySQL accepts full time zones names (eg. Africa/Nairobi) but rejects
# abbreviations (eg. EAT). When pytz isn't installed and the current
# time zone is LocalTimezone (the only sensible value in this
# context), the current time zone name will be an abbreviation. As a
# consequence, MySQL cannot perform time zone conversions reliably.
if pytz is None:
return False
# Test if the time zone definitions are installed.
cursor = self.connection.cursor()
cursor.execute("SELECT 1 FROM mysql.time_zone LIMIT 1")
return cursor.fetchone() is not None
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django.db.backends.mysql.compiler"
def date_extract_sql(self, lookup_type, field_name):
# http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
if lookup_type == 'week_day':
# DAYOFWEEK() returns an integer, 1-7, Sunday=1.
# Note: WEEKDAY() returns 0-6, Monday=0.
return "DAYOFWEEK(%s)" % field_name
else:
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_trunc_sql(self, lookup_type, field_name):
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql
def datetime_extract_sql(self, lookup_type, field_name, tzname):
if settings.USE_TZ:
field_name = "CONVERT_TZ(%s, 'UTC', %%s)" % field_name
params = [tzname]
else:
params = []
# http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
if lookup_type == 'week_day':
# DAYOFWEEK() returns an integer, 1-7, Sunday=1.
# Note: WEEKDAY() returns 0-6, Monday=0.
sql = "DAYOFWEEK(%s)" % field_name
else:
sql = "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
return sql, params
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
if settings.USE_TZ:
field_name = "CONVERT_TZ(%s, 'UTC', %%s)" % field_name
params = [tzname]
else:
params = []
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql, params
def date_interval_sql(self, sql, connector, timedelta):
return "(%s %s INTERVAL '%d 0:0:%d:%d' DAY_MICROSECOND)" % (sql, connector,
timedelta.days, timedelta.seconds, timedelta.microseconds)
def drop_foreignkey_sql(self):
return "DROP FOREIGN KEY"
def force_no_ordering(self):
"""
"ORDER BY NULL" prevents MySQL from implicitly ordering by grouped
columns. If no ordering would otherwise be applied, we don't want any
implicit sorting going on.
"""
return ["NULL"]
def fulltext_search_sql(self, field_name):
return 'MATCH (%s) AGAINST (%%s IN BOOLEAN MODE)' % field_name
def last_executed_query(self, cursor, sql, params):
# With MySQLdb, cursor objects have an (undocumented) "_last_executed"
# attribute where the exact query sent to the database is saved.
# See MySQLdb/cursors.py in the source distribution.
return force_text(getattr(cursor, '_last_executed', None), errors='replace')
def no_limit_value(self):
# 2**64 - 1, as recommended by the MySQL documentation
return 18446744073709551615
def quote_name(self, name):
if name.startswith("`") and name.endswith("`"):
return name # Quoting once is enough.
return "`%s`" % name
def quote_parameter(self, value):
# Inner import to allow module to fail to load gracefully
import MySQLdb.converters
return MySQLdb.escape(value, MySQLdb.converters.conversions)
def random_function_sql(self):
return 'RAND()'
def sql_flush(self, style, tables, sequences, allow_cascade=False):
# NB: The generated SQL below is specific to MySQL
# 'TRUNCATE x;', 'TRUNCATE y;', 'TRUNCATE z;'... style SQL statements
# to clear all tables of all data
if tables:
sql = ['SET FOREIGN_KEY_CHECKS = 0;']
for table in tables:
sql.append('%s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
style.SQL_FIELD(self.quote_name(table)),
))
sql.append('SET FOREIGN_KEY_CHECKS = 1;')
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def sequence_reset_by_name_sql(self, style, sequences):
# Truncate already resets the AUTO_INCREMENT field from
# MySQL version 5.0.13 onwards. Refs #16961.
if self.connection.mysql_version < (5, 0, 13):
return ["%s %s %s %s %s;" %
(style.SQL_KEYWORD('ALTER'),
style.SQL_KEYWORD('TABLE'),
style.SQL_TABLE(self.quote_name(sequence['table'])),
style.SQL_KEYWORD('AUTO_INCREMENT'),
style.SQL_FIELD('= 1'),
) for sequence in sequences]
else:
return []
def validate_autopk_value(self, value):
# MySQLism: zero in AUTO_INCREMENT field does not work. Refs #17653.
if value == 0:
raise ValueError('The database backend does not accept 0 as a '
'value for AutoField.')
return value
def value_to_db_datetime(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = value.astimezone(timezone.utc).replace(tzinfo=None)
else:
raise ValueError("MySQL backend does not support timezone-aware datetimes when USE_TZ is False.")
# MySQL doesn't support microseconds
return six.text_type(value.replace(microsecond=0))
def value_to_db_time(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware times
if timezone.is_aware(value):
raise ValueError("MySQL backend does not support timezone-aware times.")
# MySQL doesn't support microseconds
return six.text_type(value.replace(microsecond=0))
def year_lookup_bounds_for_datetime_field(self, value):
# Again, no microseconds
first, second = super(DatabaseOperations, self).year_lookup_bounds_for_datetime_field(value)
return [first.replace(microsecond=0), second.replace(microsecond=0)]
def max_name_length(self):
return 64
def bulk_insert_sql(self, fields, num_values):
items_sql = "(%s)" % ", ".join(["%s"] * len(fields))
return "VALUES " + ", ".join([items_sql] * num_values)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'mysql'
operators = {
'exact': '= %s',
'iexact': 'LIKE %s',
'contains': 'LIKE BINARY %s',
'icontains': 'LIKE %s',
'regex': 'REGEXP BINARY %s',
'iregex': 'REGEXP %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE BINARY %s',
'endswith': 'LIKE BINARY %s',
'istartswith': 'LIKE %s',
'iendswith': 'LIKE %s',
}
Database = Database
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = DatabaseValidation(self)
def get_connection_params(self):
kwargs = {
'conv': django_conversions,
'charset': 'utf8',
}
if six.PY2:
kwargs['use_unicode'] = True
settings_dict = self.settings_dict
if settings_dict['USER']:
kwargs['user'] = settings_dict['USER']
if settings_dict['NAME']:
kwargs['db'] = settings_dict['NAME']
if settings_dict['PASSWORD']:
kwargs['passwd'] = force_str(settings_dict['PASSWORD'])
if settings_dict['HOST'].startswith('/'):
kwargs['unix_socket'] = settings_dict['HOST']
elif settings_dict['HOST']:
kwargs['host'] = settings_dict['HOST']
if settings_dict['PORT']:
kwargs['port'] = int(settings_dict['PORT'])
# We need the number of potentially affected rows after an
# "UPDATE", not the number of changed rows.
kwargs['client_flag'] = CLIENT.FOUND_ROWS
kwargs.update(settings_dict['OPTIONS'])
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
conn.encoders[SafeText] = conn.encoders[six.text_type]
conn.encoders[SafeBytes] = conn.encoders[bytes]
return conn
def init_connection_state(self):
cursor = self.connection.cursor()
# SQL_AUTO_IS_NULL in MySQL controls whether an AUTO_INCREMENT column
# on a recently-inserted row will return when the field is tested for
# NULL. Disabling this value brings this aspect of MySQL in line with
# SQL standards.
cursor.execute('SET SQL_AUTO_IS_NULL = 0')
cursor.close()
def create_cursor(self):
cursor = self.connection.cursor()
return CursorWrapper(cursor)
def _rollback(self):
try:
BaseDatabaseWrapper._rollback(self)
except Database.NotSupportedError:
pass
def _set_autocommit(self, autocommit):
self.connection.autocommit(autocommit)
def disable_constraint_checking(self):
"""
Disables foreign key checks, primarily for use in adding rows with forward references. Always returns True,
to indicate constraint checks need to be re-enabled.
"""
self.cursor().execute('SET foreign_key_checks=0')
return True
def enable_constraint_checking(self):
"""
Re-enable foreign key checks after they have been disabled.
"""
# Override needs_rollback in case constraint_checks_disabled is
# nested inside transaction.atomic.
self.needs_rollback, needs_rollback = False, self.needs_rollback
try:
self.cursor().execute('SET foreign_key_checks=1')
finally:
self.needs_rollback = needs_rollback
def check_constraints(self, table_names=None):
"""
Checks each table name in `table_names` for rows with invalid foreign key references. This method is
intended to be used in conjunction with `disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint checks were off.
Raises an IntegrityError on the first invalid foreign key reference encountered (if any) and provides
detailed information about the invalid reference in the error message.
Backends can override this method if they can more directly apply constraint checking (e.g. via "SET CONSTRAINTS
ALL IMMEDIATE")
"""
cursor = self.cursor()
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute("""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL"""
% (primary_key_column_name, column_name, table_name, referenced_table_name,
column_name, referenced_column_name, column_name, referenced_column_name))
for bad_row in cursor.fetchall():
raise utils.IntegrityError("The row in table '%s' with primary key '%s' has an invalid "
"foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s."
% (table_name, bad_row[0],
table_name, column_name, bad_row[1],
referenced_table_name, referenced_column_name))
def schema_editor(self, *args, **kwargs):
"Returns a new instance of this backend's SchemaEditor"
return DatabaseSchemaEditor(self, *args, **kwargs)
def is_usable(self):
try:
self.connection.ping()
except DatabaseError:
return False
else:
return True
@cached_property
def mysql_version(self):
with self.temporary_connection():
server_info = self.connection.get_server_info()
match = server_version_re.match(server_info)
if not match:
raise Exception('Unable to determine MySQL version from version string %r' % server_info)
return tuple(int(x) for x in match.groups())
| 41.317604 | 120 | 0.650663 | from __future__ import unicode_literals
import datetime
import re
import sys
import warnings
try:
import MySQLdb as Database
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading MySQLdb module: %s" % e)
# lexicographic ordering in this check because then (1, 2, 1, 'gamma')
# inadvertently passes the version test.
version = Database.version_info
if (version < (1, 2, 1) or (version[:3] == (1, 2, 1) and
(len(version) < 5 or version[3] != 'final' or version[4] < 2))):
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("MySQLdb-1.2.1p2 or newer is required; you have %s" % Database.__version__)
from MySQLdb.converters import conversions, Thing2Literal
from MySQLdb.constants import FIELD_TYPE, CLIENT
try:
import pytz
except ImportError:
pytz = None
from django.conf import settings
from django.db import utils
from django.db.backends import (utils as backend_utils, BaseDatabaseFeatures,
BaseDatabaseOperations, BaseDatabaseWrapper)
from django.db.backends.mysql.client import DatabaseClient
from django.db.backends.mysql.creation import DatabaseCreation
from django.db.backends.mysql.introspection import DatabaseIntrospection
from django.db.backends.mysql.validation import DatabaseValidation
from django.utils.encoding import force_str, force_text
from django.db.backends.mysql.schema import DatabaseSchemaEditor
from django.utils.encoding import force_str
from django.utils.functional import cached_property
from django.utils.safestring import SafeBytes, SafeText
from django.utils import six
from django.utils import timezone
# Raise exceptions for database warnings if DEBUG is on
if settings.DEBUG:
warnings.filterwarnings("error", category=Database.Warning)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
# It's impossible to import datetime_or_None directly from MySQLdb.times
parse_datetime = conversions[FIELD_TYPE.DATETIME]
def parse_datetime_with_timezone_support(value):
dt = parse_datetime(value)
if dt is not None and settings.USE_TZ and timezone.is_naive(dt):
dt = dt.replace(tzinfo=timezone.utc)
return dt
def adapt_datetime_with_timezone_support(value, conv):
if settings.USE_TZ:
if timezone.is_naive(value):
warnings.warn("MySQL received a naive datetime (%s)"
" while time zone support is active." % value,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
value = value.astimezone(timezone.utc).replace(tzinfo=None)
return Thing2Literal(value.strftime("%Y-%m-%d %H:%M:%S"), conv)
# checking is too tight to catch those (see Django ticket #6052).
# Finally, MySQLdb always returns naive datetime objects. However, when
# timezone support is active, Django expects timezone-aware datetime objects.
django_conversions = conversions.copy()
django_conversions.update({
FIELD_TYPE.TIME: backend_utils.typecast_time,
FIELD_TYPE.DECIMAL: backend_utils.typecast_decimal,
FIELD_TYPE.NEWDECIMAL: backend_utils.typecast_decimal,
FIELD_TYPE.DATETIME: parse_datetime_with_timezone_support,
datetime.datetime: adapt_datetime_with_timezone_support,
})
# This should match the numerical portion of the version numbers (we can treat
# versions like 5.0.24 and 5.0.24a as the same). Based on the list of version
# at http://dev.mysql.com/doc/refman/4.1/en/news.html and
# http://dev.mysql.com/doc/refman/5.0/en/news.html .
server_version_re = re.compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})')
# MySQLdb-1.2.1 and newer automatically makes use of SHOW WARNINGS on
# MySQL-4.1 and newer, so the MysqlDebugWrapper is unnecessary. Since the
# point is to raise Warnings as exceptions, this can be done with the Python
# warning module, and this is setup when the connection is created, and the
# standard backend_utils.CursorDebugWrapper can be used. Also, using sql_mode
# TRADITIONAL will automatically cause most warnings to be treated as errors.
class CursorWrapper(object):
codes_for_integrityerror = (1048,)
def __init__(self, cursor):
self.cursor = cursor
def execute(self, query, args=None):
try:
# args is None means no string interpolation
return self.cursor.execute(query, args)
except Database.OperationalError as e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e.args[0] in self.codes_for_integrityerror:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def executemany(self, query, args):
try:
return self.cursor.executemany(query, args)
except Database.OperationalError as e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e.args[0] in self.codes_for_integrityerror:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
class DatabaseFeatures(BaseDatabaseFeatures):
empty_fetchmany_value = ()
update_can_self_select = False
allows_group_by_pk = True
related_fields_match_type = True
allow_sliced_subqueries = False
has_bulk_insert = True
has_select_for_update = True
has_select_for_update_nowait = False
supports_forward_references = False
supports_long_model_names = False
supports_microsecond_precision = False
supports_regex_backreferencing = False
supports_date_lookup_using_string = False
supports_timezones = False
requires_explicit_null_ordering_when_grouping = True
allows_primary_key_0 = False
uses_savepoints = True
atomic_transactions = False
supports_check_constraints = False
def __init__(self, connection):
super(DatabaseFeatures, self).__init__(connection)
@cached_property
def _mysql_storage_engine(self):
cursor = self.connection.cursor()
cursor.execute('CREATE TABLE INTROSPECT_TEST (X INT)')
# This command is MySQL specific; the second column
# will tell you the default table type of the created
# table. Since all Django's test tables will have the same
cursor.execute("SHOW TABLE STATUS WHERE Name='INTROSPECT_TEST'")
result = cursor.fetchone()
cursor.execute('DROP TABLE INTROSPECT_TEST')
return result[1]
@cached_property
def can_introspect_foreign_keys(self):
return self._mysql_storage_engine != 'MyISAM'
@cached_property
def has_zoneinfo_database(self):
# MySQL accepts full time zones names (eg. Africa/Nairobi) but rejects
# abbreviations (eg. EAT). When pytz isn't installed and the current
if pytz is None:
return False
cursor = self.connection.cursor()
cursor.execute("SELECT 1 FROM mysql.time_zone LIMIT 1")
return cursor.fetchone() is not None
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django.db.backends.mysql.compiler"
def date_extract_sql(self, lookup_type, field_name):
if lookup_type == 'week_day':
return "DAYOFWEEK(%s)" % field_name
else:
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_trunc_sql(self, lookup_type, field_name):
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s')
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql
def datetime_extract_sql(self, lookup_type, field_name, tzname):
if settings.USE_TZ:
field_name = "CONVERT_TZ(%s, 'UTC', %%s)" % field_name
params = [tzname]
else:
params = []
if lookup_type == 'week_day':
sql = "DAYOFWEEK(%s)" % field_name
else:
sql = "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
return sql, params
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
if settings.USE_TZ:
field_name = "CONVERT_TZ(%s, 'UTC', %%s)" % field_name
params = [tzname]
else:
params = []
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s')
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql, params
def date_interval_sql(self, sql, connector, timedelta):
return "(%s %s INTERVAL '%d 0:0:%d:%d' DAY_MICROSECOND)" % (sql, connector,
timedelta.days, timedelta.seconds, timedelta.microseconds)
def drop_foreignkey_sql(self):
return "DROP FOREIGN KEY"
def force_no_ordering(self):
return ["NULL"]
def fulltext_search_sql(self, field_name):
return 'MATCH (%s) AGAINST (%%s IN BOOLEAN MODE)' % field_name
def last_executed_query(self, cursor, sql, params):
return force_text(getattr(cursor, '_last_executed', None), errors='replace')
def no_limit_value(self):
return 18446744073709551615
def quote_name(self, name):
if name.startswith("`") and name.endswith("`"):
return name
return "`%s`" % name
def quote_parameter(self, value):
import MySQLdb.converters
return MySQLdb.escape(value, MySQLdb.converters.conversions)
def random_function_sql(self):
return 'RAND()'
def sql_flush(self, style, tables, sequences, allow_cascade=False):
if tables:
sql = ['SET FOREIGN_KEY_CHECKS = 0;']
for table in tables:
sql.append('%s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
style.SQL_FIELD(self.quote_name(table)),
))
sql.append('SET FOREIGN_KEY_CHECKS = 1;')
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def sequence_reset_by_name_sql(self, style, sequences):
if self.connection.mysql_version < (5, 0, 13):
return ["%s %s %s %s %s;" %
(style.SQL_KEYWORD('ALTER'),
style.SQL_KEYWORD('TABLE'),
style.SQL_TABLE(self.quote_name(sequence['table'])),
style.SQL_KEYWORD('AUTO_INCREMENT'),
style.SQL_FIELD('= 1'),
) for sequence in sequences]
else:
return []
def validate_autopk_value(self, value):
if value == 0:
raise ValueError('The database backend does not accept 0 as a '
'value for AutoField.')
return value
def value_to_db_datetime(self, value):
if value is None:
return None
if timezone.is_aware(value):
if settings.USE_TZ:
value = value.astimezone(timezone.utc).replace(tzinfo=None)
else:
raise ValueError("MySQL backend does not support timezone-aware datetimes when USE_TZ is False.")
# MySQL doesn't support microseconds
return six.text_type(value.replace(microsecond=0))
def value_to_db_time(self, value):
if value is None:
return None
if timezone.is_aware(value):
raise ValueError("MySQL backend does not support timezone-aware times.")
# MySQL doesn't support microseconds
return six.text_type(value.replace(microsecond=0))
def year_lookup_bounds_for_datetime_field(self, value):
first, second = super(DatabaseOperations, self).year_lookup_bounds_for_datetime_field(value)
return [first.replace(microsecond=0), second.replace(microsecond=0)]
def max_name_length(self):
return 64
def bulk_insert_sql(self, fields, num_values):
items_sql = "(%s)" % ", ".join(["%s"] * len(fields))
return "VALUES " + ", ".join([items_sql] * num_values)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'mysql'
operators = {
'exact': '= %s',
'iexact': 'LIKE %s',
'contains': 'LIKE BINARY %s',
'icontains': 'LIKE %s',
'regex': 'REGEXP BINARY %s',
'iregex': 'REGEXP %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE BINARY %s',
'endswith': 'LIKE BINARY %s',
'istartswith': 'LIKE %s',
'iendswith': 'LIKE %s',
}
Database = Database
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = DatabaseValidation(self)
def get_connection_params(self):
kwargs = {
'conv': django_conversions,
'charset': 'utf8',
}
if six.PY2:
kwargs['use_unicode'] = True
settings_dict = self.settings_dict
if settings_dict['USER']:
kwargs['user'] = settings_dict['USER']
if settings_dict['NAME']:
kwargs['db'] = settings_dict['NAME']
if settings_dict['PASSWORD']:
kwargs['passwd'] = force_str(settings_dict['PASSWORD'])
if settings_dict['HOST'].startswith('/'):
kwargs['unix_socket'] = settings_dict['HOST']
elif settings_dict['HOST']:
kwargs['host'] = settings_dict['HOST']
if settings_dict['PORT']:
kwargs['port'] = int(settings_dict['PORT'])
kwargs['client_flag'] = CLIENT.FOUND_ROWS
kwargs.update(settings_dict['OPTIONS'])
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
conn.encoders[SafeText] = conn.encoders[six.text_type]
conn.encoders[SafeBytes] = conn.encoders[bytes]
return conn
def init_connection_state(self):
cursor = self.connection.cursor()
cursor.execute('SET SQL_AUTO_IS_NULL = 0')
cursor.close()
def create_cursor(self):
cursor = self.connection.cursor()
return CursorWrapper(cursor)
def _rollback(self):
try:
BaseDatabaseWrapper._rollback(self)
except Database.NotSupportedError:
pass
def _set_autocommit(self, autocommit):
self.connection.autocommit(autocommit)
def disable_constraint_checking(self):
self.cursor().execute('SET foreign_key_checks=0')
return True
def enable_constraint_checking(self):
self.needs_rollback, needs_rollback = False, self.needs_rollback
try:
self.cursor().execute('SET foreign_key_checks=1')
finally:
self.needs_rollback = needs_rollback
def check_constraints(self, table_names=None):
cursor = self.cursor()
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute("""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL"""
% (primary_key_column_name, column_name, table_name, referenced_table_name,
column_name, referenced_column_name, column_name, referenced_column_name))
for bad_row in cursor.fetchall():
raise utils.IntegrityError("The row in table '%s' with primary key '%s' has an invalid "
"foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s."
% (table_name, bad_row[0],
table_name, column_name, bad_row[1],
referenced_table_name, referenced_column_name))
def schema_editor(self, *args, **kwargs):
return DatabaseSchemaEditor(self, *args, **kwargs)
def is_usable(self):
try:
self.connection.ping()
except DatabaseError:
return False
else:
return True
@cached_property
def mysql_version(self):
with self.temporary_connection():
server_info = self.connection.get_server_info()
match = server_version_re.match(server_info)
if not match:
raise Exception('Unable to determine MySQL version from version string %r' % server_info)
return tuple(int(x) for x in match.groups())
| true | true |
f7fb8227f80fc3a082c5948a8aaf08a803d07707 | 528 | py | Python | face_detect/sample/learning_opencv3_with_python_sample/Chapter 6_Code/brief.py | minatuyang/RASP-ATTA | e182248da2f9f131e4e1aca5a2198b6ae910424e | [
"MIT"
] | 1 | 2018-11-14T02:54:24.000Z | 2018-11-14T02:54:24.000Z | face_detect/sample/learning_opencv3_with_python_sample/Chapter 6_Code/brief.py | minatuyang/RASP-ATTA | e182248da2f9f131e4e1aca5a2198b6ae910424e | [
"MIT"
] | null | null | null | face_detect/sample/learning_opencv3_with_python_sample/Chapter 6_Code/brief.py | minatuyang/RASP-ATTA | e182248da2f9f131e4e1aca5a2198b6ae910424e | [
"MIT"
] | null | null | null | import numpy as np
import cv2
from matplotlib import pyplot as plt
"""FeatureDetector_create and DescriptorExtractor_create do not exists in 3.0.0-rc1"""
img = cv2.imread('images/coat_of_arms_single.jpg',0)
# Initiate STAR detector
star = cv2.FeatureDetector_create("STAR")
# Initiate BRIEF extractor
brief = cv2.DescriptorExtractor_create("BRIEF")
# find the keypoints with STAR
kp = star.detect(img,None)
# compute the descriptors with BRIEF
kp, des = brief.compute(img, kp)
print brief.getInt('bytes')
print des.shape
| 22.956522 | 86 | 0.774621 | import numpy as np
import cv2
from matplotlib import pyplot as plt
"""FeatureDetector_create and DescriptorExtractor_create do not exists in 3.0.0-rc1"""
img = cv2.imread('images/coat_of_arms_single.jpg',0)
star = cv2.FeatureDetector_create("STAR")
brief = cv2.DescriptorExtractor_create("BRIEF")
kp = star.detect(img,None)
kp, des = brief.compute(img, kp)
print brief.getInt('bytes')
print des.shape
| false | true |
f7fb82465e215c5a4d9e443f86b84e3a95dc0d61 | 3,900 | py | Python | cupy/_core/__init__.py | prkhrsrvstv1/cupy | ea86c8225b575af9d2855fb77a306cf86fd098ea | [
"MIT"
] | null | null | null | cupy/_core/__init__.py | prkhrsrvstv1/cupy | ea86c8225b575af9d2855fb77a306cf86fd098ea | [
"MIT"
] | null | null | null | cupy/_core/__init__.py | prkhrsrvstv1/cupy | ea86c8225b575af9d2855fb77a306cf86fd098ea | [
"MIT"
] | null | null | null | from cupy._core import core # NOQA
from cupy._core import fusion # NOQA
from cupy._core import internal # NOQA
# internal APIs for testing and developement
from cupy._core._accelerator import set_reduction_accelerators # NOQA
from cupy._core._accelerator import set_routine_accelerators # NOQA
from cupy._core._accelerator import get_reduction_accelerators # NOQA
from cupy._core._accelerator import get_routine_accelerators # NOQA
# import class and function
from cupy._core._kernel import create_ufunc # NOQA
from cupy._core._kernel import ElementwiseKernel # NOQA
from cupy._core._kernel import ufunc # NOQA
from cupy._core._reduction import create_reduction_func # NOQA
from cupy._core._reduction import ReductionKernel # NOQA
from cupy._core._routines_binary import bitwise_and # NOQA
from cupy._core._routines_binary import bitwise_or # NOQA
from cupy._core._routines_binary import bitwise_xor # NOQA
from cupy._core._routines_binary import invert # NOQA
from cupy._core._routines_binary import left_shift # NOQA
from cupy._core._routines_binary import right_shift # NOQA
from cupy._core._routines_linalg import _mat_ptrs # NOQA
from cupy._core._routines_linalg import dot # NOQA
from cupy._core._routines_linalg import get_compute_type # NOQA
from cupy._core._routines_linalg import matmul # NOQA
from cupy._core._routines_linalg import set_compute_type # NOQA
from cupy._core._routines_linalg import tensordot_core # NOQA
from cupy._core._routines_logic import create_comparison # NOQA
from cupy._core._routines_logic import equal # NOQA
from cupy._core._routines_logic import greater # NOQA
from cupy._core._routines_logic import greater_equal # NOQA
from cupy._core._routines_logic import less # NOQA
from cupy._core._routines_logic import less_equal # NOQA
from cupy._core._routines_logic import not_equal # NOQA
from cupy._core._routines_manipulation import array_split # NOQA
from cupy._core._routines_manipulation import broadcast # NOQA
from cupy._core._routines_manipulation import broadcast_to # NOQA
from cupy._core._routines_manipulation import concatenate_method # NOQA
from cupy._core._routines_manipulation import moveaxis # NOQA
from cupy._core._routines_manipulation import rollaxis # NOQA
from cupy._core._routines_manipulation import size # NOQA'
from cupy._core._routines_math import absolute # NOQA
from cupy._core._routines_math import add # NOQA
from cupy._core._routines_math import angle # NOQA
from cupy._core._routines_math import conjugate # NOQA
from cupy._core._routines_math import divide # NOQA
from cupy._core._routines_math import floor_divide # NOQA
from cupy._core._routines_math import multiply # NOQA
from cupy._core._routines_math import negative # NOQA
from cupy._core._routines_math import positive # NOQA
from cupy._core._routines_math import power # NOQA
from cupy._core._routines_math import remainder # NOQA
from cupy._core._routines_math import sqrt # NOQA
from cupy._core._routines_math import subtract # NOQA
from cupy._core._routines_math import true_divide # NOQA
from cupy._core._routines_statistics import nanmax # NOQA
from cupy._core._routines_statistics import nanmin # NOQA
from cupy._core.core import _internal_ascontiguousarray # NOQA
from cupy._core.core import _internal_asfortranarray # NOQA
from cupy._core.core import array # NOQA
from cupy._core.core import ascontiguousarray # NOQA
from cupy._core.core import asfortranarray # NOQA
from cupy._core.core import divmod # NOQA
from cupy._core.core import elementwise_copy # NOQA
from cupy._core.core import ndarray # NOQA
from cupy._core.dlpack import fromDlpack # NOQA
from cupy._core.dlpack import from_dlpack # NOQA
from cupy._core.internal import complete_slice # NOQA
from cupy._core.internal import get_size # NOQA
from cupy._core.raw import RawKernel # NOQA
from cupy._core.raw import RawModule # NOQA
| 52 | 72 | 0.821538 | from cupy._core import core
from cupy._core import fusion
from cupy._core import internal
from cupy._core._accelerator import set_reduction_accelerators
from cupy._core._accelerator import set_routine_accelerators
from cupy._core._accelerator import get_reduction_accelerators
from cupy._core._accelerator import get_routine_accelerators
from cupy._core._kernel import create_ufunc
from cupy._core._kernel import ElementwiseKernel
from cupy._core._kernel import ufunc
from cupy._core._reduction import create_reduction_func
from cupy._core._reduction import ReductionKernel
from cupy._core._routines_binary import bitwise_and
from cupy._core._routines_binary import bitwise_or
from cupy._core._routines_binary import bitwise_xor
from cupy._core._routines_binary import invert
from cupy._core._routines_binary import left_shift
from cupy._core._routines_binary import right_shift
from cupy._core._routines_linalg import _mat_ptrs
from cupy._core._routines_linalg import dot
from cupy._core._routines_linalg import get_compute_type
from cupy._core._routines_linalg import matmul
from cupy._core._routines_linalg import set_compute_type
from cupy._core._routines_linalg import tensordot_core
from cupy._core._routines_logic import create_comparison
from cupy._core._routines_logic import equal
from cupy._core._routines_logic import greater
from cupy._core._routines_logic import greater_equal
from cupy._core._routines_logic import less
from cupy._core._routines_logic import less_equal
from cupy._core._routines_logic import not_equal
from cupy._core._routines_manipulation import array_split
from cupy._core._routines_manipulation import broadcast
from cupy._core._routines_manipulation import broadcast_to
from cupy._core._routines_manipulation import concatenate_method
from cupy._core._routines_manipulation import moveaxis
from cupy._core._routines_manipulation import rollaxis
from cupy._core._routines_manipulation import size
from cupy._core._routines_math import absolute # NOQA
from cupy._core._routines_math import add # NOQA
from cupy._core._routines_math import angle # NOQA
from cupy._core._routines_math import conjugate # NOQA
from cupy._core._routines_math import divide # NOQA
from cupy._core._routines_math import floor_divide # NOQA
from cupy._core._routines_math import multiply # NOQA
from cupy._core._routines_math import negative # NOQA
from cupy._core._routines_math import positive # NOQA
from cupy._core._routines_math import power # NOQA
from cupy._core._routines_math import remainder # NOQA
from cupy._core._routines_math import sqrt # NOQA
from cupy._core._routines_math import subtract # NOQA
from cupy._core._routines_math import true_divide # NOQA
from cupy._core._routines_statistics import nanmax # NOQA
from cupy._core._routines_statistics import nanmin # NOQA
from cupy._core.core import _internal_ascontiguousarray # NOQA
from cupy._core.core import _internal_asfortranarray # NOQA
from cupy._core.core import array # NOQA
from cupy._core.core import ascontiguousarray # NOQA
from cupy._core.core import asfortranarray # NOQA
from cupy._core.core import divmod # NOQA
from cupy._core.core import elementwise_copy # NOQA
from cupy._core.core import ndarray # NOQA
from cupy._core.dlpack import fromDlpack # NOQA
from cupy._core.dlpack import from_dlpack # NOQA
from cupy._core.internal import complete_slice # NOQA
from cupy._core.internal import get_size # NOQA
from cupy._core.raw import RawKernel # NOQA
from cupy._core.raw import RawModule # NOQA
| true | true |
f7fb82c079a5dc8f4b64c83bc2b5743d67714eb1 | 4,573 | py | Python | riptide/tests/integration/testcase_engine.py | theCapypara/riptide-lib | 560106d4196cdc5a5b84235f32ac44c80bc3994e | [
"MIT"
] | 4 | 2019-04-23T17:14:00.000Z | 2019-12-22T11:55:31.000Z | riptide/tests/integration/testcase_engine.py | theCapypara/riptide-lib | 560106d4196cdc5a5b84235f32ac44c80bc3994e | [
"MIT"
] | 15 | 2021-09-22T09:40:42.000Z | 2022-03-07T05:01:07.000Z | riptide/tests/integration/testcase_engine.py | theCapypara/riptide-lib | 560106d4196cdc5a5b84235f32ac44c80bc3994e | [
"MIT"
] | 1 | 2019-11-24T18:08:14.000Z | 2019-11-24T18:08:14.000Z | import asyncio
import unittest
import requests
from typing import re, Union, AnyStr, Pattern
from urllib import request
class EngineTest(unittest.TestCase):
def run_start_test(self, engine, project, services, engine_tester):
# Run async test code
loop = asyncio.get_event_loop()
loop.run_until_complete(self._start_async_test(engine, project, services, engine_tester))
def run_stop_test(self, engine, project, services, engine_tester):
# Run async test code
loop = asyncio.get_event_loop()
loop.run_until_complete(self._stop_async_test(engine, project, services, engine_tester))
def assert_running(self, engine, project, services, engine_tester):
for service_name in services:
service = project["app"]["services"][service_name]
if "port" in service:
# 2. Check if services with port can be resolved to an ip address
address = engine.address_for(project, service_name)
self.assertIsNotNone(address,
f'After starting a service with a port configured, '
f'it has to be resolvable. Service: {service_name}')
# 3. Check if these services can be reached via HTTP
http_address = 'http://' + address[0] + ':' + address[1]
try:
request.urlopen(http_address)
except OSError as err:
raise AssertionError(
f"A service must be reachable on it's address after start. "
f"Service: {service_name}, address: {http_address}"
) from err
# 4. Let engine tester check details
service_objects = [project["app"]["services"][name] for name in services]
engine_tester.assert_running(engine, project, service_objects)
def assert_not_running(self, engine, project, services, engine_tester):
for service in services:
# 2. Check if all services can no longer be resolved to an ip address
address = engine.address_for(project, service)
self.assertIsNone(address,
'After stopping a service it must not be resolvable to an ip address + port.')
# 3. Let engine tester check details
service_objects = [project["app"]["services"][name] for name in services]
engine_tester.assert_not_running(engine, project, service_objects)
def assert_response(self, rsp_message: bytes, engine, project, service_name, sub_path="", msg=None):
(ip, port) = engine.address_for(project, service_name)
response = requests.get('http://' + ip + ':' + port + sub_path)
self.assertEqual(200, response.status_code)
self.assertEqual(rsp_message, response.content, msg)
def assert_response_matches_regex(self, regex: Union[AnyStr, Pattern[AnyStr]], engine, project, service_name):
(ip, port) = engine.address_for(project, service_name)
response = requests.get('http://' + ip + ':' + port)
self.assertEqual(200, response.status_code)
self.assertRegex(response.content.decode('utf-8'), regex)
async def _start_async_test(self, engine, project, services, engine_tester):
"""Start a project with the given services and run all assertions on it"""
failures = {}
async for service_name, status, finished in engine.start_project(project, services):
# We are only interested in failed starts, we collect them and throw them together as errors
if status and finished:
failures[service_name] = str(status)
# 1. No services must fail start
self.maxDiff = 99999
self.assertDictEqual({}, failures, 'No service must fail starting')
self.assert_running(engine, project, services, engine_tester)
async def _stop_async_test(self, engine, project, services, engine_tester):
"""Stop a project with the given services and run all assertions on it"""
failures = []
async for service_name, status, finished in engine.stop_project(project, services):
# We are only interested in failed starts, we collect them and throw them together as errors
if status and finished:
failures.append(str(status))
# 1. No services must fail start
self.assertListEqual([], failures, 'No service must fail stoping')
self.assert_not_running(engine, project, services, engine_tester)
| 47.635417 | 114 | 0.645309 | import asyncio
import unittest
import requests
from typing import re, Union, AnyStr, Pattern
from urllib import request
class EngineTest(unittest.TestCase):
def run_start_test(self, engine, project, services, engine_tester):
loop = asyncio.get_event_loop()
loop.run_until_complete(self._start_async_test(engine, project, services, engine_tester))
def run_stop_test(self, engine, project, services, engine_tester):
loop = asyncio.get_event_loop()
loop.run_until_complete(self._stop_async_test(engine, project, services, engine_tester))
def assert_running(self, engine, project, services, engine_tester):
for service_name in services:
service = project["app"]["services"][service_name]
if "port" in service:
address = engine.address_for(project, service_name)
self.assertIsNotNone(address,
f'After starting a service with a port configured, '
f'it has to be resolvable. Service: {service_name}')
http_address = 'http://' + address[0] + ':' + address[1]
try:
request.urlopen(http_address)
except OSError as err:
raise AssertionError(
f"A service must be reachable on it's address after start. "
f"Service: {service_name}, address: {http_address}"
) from err
# 4. Let engine tester check details
service_objects = [project["app"]["services"][name] for name in services]
engine_tester.assert_running(engine, project, service_objects)
def assert_not_running(self, engine, project, services, engine_tester):
for service in services:
# 2. Check if all services can no longer be resolved to an ip address
address = engine.address_for(project, service)
self.assertIsNone(address,
'After stopping a service it must not be resolvable to an ip address + port.')
# 3. Let engine tester check details
service_objects = [project["app"]["services"][name] for name in services]
engine_tester.assert_not_running(engine, project, service_objects)
def assert_response(self, rsp_message: bytes, engine, project, service_name, sub_path="", msg=None):
(ip, port) = engine.address_for(project, service_name)
response = requests.get('http://' + ip + ':' + port + sub_path)
self.assertEqual(200, response.status_code)
self.assertEqual(rsp_message, response.content, msg)
def assert_response_matches_regex(self, regex: Union[AnyStr, Pattern[AnyStr]], engine, project, service_name):
(ip, port) = engine.address_for(project, service_name)
response = requests.get('http://' + ip + ':' + port)
self.assertEqual(200, response.status_code)
self.assertRegex(response.content.decode('utf-8'), regex)
async def _start_async_test(self, engine, project, services, engine_tester):
failures = {}
async for service_name, status, finished in engine.start_project(project, services):
# We are only interested in failed starts, we collect them and throw them together as errors
if status and finished:
failures[service_name] = str(status)
# 1. No services must fail start
self.maxDiff = 99999
self.assertDictEqual({}, failures, 'No service must fail starting')
self.assert_running(engine, project, services, engine_tester)
async def _stop_async_test(self, engine, project, services, engine_tester):
failures = []
async for service_name, status, finished in engine.stop_project(project, services):
# We are only interested in failed starts, we collect them and throw them together as errors
if status and finished:
failures.append(str(status))
# 1. No services must fail start
self.assertListEqual([], failures, 'No service must fail stoping')
self.assert_not_running(engine, project, services, engine_tester)
| true | true |
f7fb8302e0952238b808eb1d98c853e1f04ba2ca | 7,214 | py | Python | examples/optimization/layout_opt/hybrid_run.py | Matthew-Boyd/HOPP | de4e40efda5bfb28361dc3d9d68d13aa465dcc52 | [
"BSD-3-Clause"
] | null | null | null | examples/optimization/layout_opt/hybrid_run.py | Matthew-Boyd/HOPP | de4e40efda5bfb28361dc3d9d68d13aa465dcc52 | [
"BSD-3-Clause"
] | null | null | null | examples/optimization/layout_opt/hybrid_run.py | Matthew-Boyd/HOPP | de4e40efda5bfb28361dc3d9d68d13aa465dcc52 | [
"BSD-3-Clause"
] | null | null | null | """
A prototype application of the distributed cross-entropy method to the wind optimization problem.
In this basic implementation, the number of turbines is fixed and the generative distribution is uncorrelated.
TODO:
+ Add boundary constraints / penalties
+ Add proximity constraints
+ Better order turbine locations
+ Investigate turbine number as an attribute
+ Investigate modeling parameter covariances
+ Investigate other distribution types
+ Investigate parameter transformations
+ Add solar
+ Add storage
+ Add cabling, etc
+ investigate organic approach
"""
import matplotlib as mpl
mpl.use('Agg')
import os
from dotenv import load_dotenv
import numpy as np
from matplotlib.animation import (
PillowWriter,
)
from matplotlib.lines import Line2D
from tools.optimization import (
setup_run,
DataRecorder
)
from hybrid.sites import make_circular_site, make_irregular_site, SiteInfo
from hybrid.log import opt_logger as logger
from hybrid.sites import locations
from hybrid.keys import set_developer_nrel_gov_key
from hybrid.layout.plot_tools import *
from parametrized_optimization_driver import ParametrizedOptimizationDriver
from hybrid_optimization_problem import HybridOptimizationProblem
from hybrid_parametrization import HybridParametrization
np.set_printoptions(precision=2, threshold=10000, linewidth=240)
# Set API key
load_dotenv()
NREL_API_KEY = os.getenv("NREL_API_KEY")
set_developer_nrel_gov_key(NREL_API_KEY) # Set this key manually here if you are not setting it using the .env
def run(default_config: {}) -> None:
config, output_path, run_name = setup_run(default_config)
recorder = DataRecorder.make_data_recorder(output_path)
max_evaluations = config['max_evaluations']
location_index = config['location']
location = locations[location_index]
site = config['site']
site_data = None
if site == 'circular':
site_data = make_circular_site(lat=location[0], lon=location[1], elev=location[2])
elif site == 'irregular':
site_data = make_irregular_site(lat=location[0], lon=location[1], elev=location[2])
else:
raise Exception("Unknown site '" + site + "'")
site_info = SiteInfo(site_data)
inner_problem = HybridOptimizationProblem(site_info, config['num_turbines'], config['solar_capacity'])
problem = HybridParametrization(inner_problem)
optimizer = ParametrizedOptimizationDriver(problem, recorder=recorder, **config['optimizer_config'])
figure = plt.figure(1)
axes = figure.add_subplot(111)
axes.set_aspect('equal')
plt.grid()
plt.tick_params(which='both', labelsize=15)
plt.xlabel('x (m)', fontsize=15)
plt.ylabel('y (m)', fontsize=15)
site_info.plot()
score, evaluation, best_solution = optimizer.central_solution()
score, evaluation = problem.objective(best_solution) if score is None else score
print(-1, ' ', score, evaluation)
print('setup 1')
num_substeps = 1
figure, axes = plt.subplots(dpi=200)
axes.set_aspect(1)
animation_writer = PillowWriter(2 * num_substeps)
animation_writer.setup(figure, os.path.join(output_path, 'trajectory.gif'), dpi=200)
print('setup 2')
_, _, central_solution = optimizer.central_solution()
print('setup 3')
bounds = problem.inner_problem.site_info.polygon.bounds
site_sw_bound = np.array([bounds[0], bounds[1]])
site_ne_bound = np.array([bounds[2], bounds[3]])
site_center = .5 * (site_sw_bound + site_ne_bound)
max_delta = max(bounds[2] - bounds[0], bounds[3] - bounds[1])
reach = (max_delta / 2) * 1.3
min_plot_bound = site_center - reach
max_plot_bound = site_center + reach
print('setup 4')
best_score, best_evaluation, best_solution = 0.0, 0.0, None
def plot_candidate(candidate):
nonlocal best_score, best_evaluation, best_solution
axes.cla()
axes.set(xlim=(min_plot_bound[0], max_plot_bound[0]), ylim=(min_plot_bound[1], max_plot_bound[1]))
wind_color = (153 / 255, 142 / 255, 195 / 255)
solar_color = (241 / 255, 163 / 255, 64 / 255)
central_color = (.5, .5, .5)
conforming_candidate, _, __ = problem.make_conforming_candidate_and_get_penalty(candidate)
problem.plot_candidate(conforming_candidate, figure, axes, central_color, central_color, alpha=.7)
if best_solution is not None:
conforming_best, _, __ = problem.make_conforming_candidate_and_get_penalty(best_solution)
problem.plot_candidate(conforming_best, figure, axes, wind_color, solar_color, alpha=1.0)
axes.set_xlabel('Best Solution AEP: {}'.format(best_evaluation))
else:
axes.set_xlabel('')
axes.legend([
Line2D([0], [0], color=wind_color, lw=8),
Line2D([0], [0], color=solar_color, lw=8),
Line2D([0], [0], color=central_color, lw=8),
],
['Wind Layout', 'Solar Layout', 'Mean Search Vector'],
loc='lower left')
animation_writer.grab_frame()
print('plot candidate')
plot_candidate(central_solution)
central_prev = central_solution
# TODO: make a smooth transition between points
# TODO: plot exclusion zones
print('begin')
while optimizer.num_evaluations() < max_evaluations:
print('step start')
logger.info("Starting step, num evals {}".format(optimizer.num_evaluations()))
optimizer.step()
print('step end')
proportion = min(1.0, optimizer.num_evaluations() / max_evaluations)
g = 1.0 * proportion
b = 1.0 - g
a = .5
color = (b, g, b)
best_score, best_evaluation, best_solution = optimizer.best_solution()
central_score, central_evaluation, central_solution = optimizer.central_solution()
a1 = optimizer.converter.convert_from(central_prev)
b1 = optimizer.converter.convert_from(central_solution)
a = np.array(a1, dtype=np.float64)
b = np.array(b1, dtype=np.float64)
for i in range(num_substeps):
p = (i + 1) / num_substeps
c = (1 - p) * a + p * b
candidate = optimizer.converter.convert_to(c)
plot_candidate(candidate)
central_prev = central_solution
print(optimizer.num_iterations(), ' ', optimizer.num_evaluations(), best_score, best_evaluation)
animation_writer.finish()
optimizer.close()
print("Results and animation written to " + os.path.abspath(output_path))
default_config = {
'name': 't2',
'location': 1,
'site': 'irregular',
'solar_capacity': 50000, # kW
'num_turbines': 50, #
'max_evaluations': 20,
'optimizer_config': {
'method': 'CMA-ES',
'nprocs': 1,
'generation_size': 5,
'selection_proportion': .33,
'prior_scale': 1.0,
'prior_params': {
# "grid_angle": {
# "mu": 0.1
# }
}
}
}
run(default_config)
| 34.352381 | 111 | 0.659689 |
import matplotlib as mpl
mpl.use('Agg')
import os
from dotenv import load_dotenv
import numpy as np
from matplotlib.animation import (
PillowWriter,
)
from matplotlib.lines import Line2D
from tools.optimization import (
setup_run,
DataRecorder
)
from hybrid.sites import make_circular_site, make_irregular_site, SiteInfo
from hybrid.log import opt_logger as logger
from hybrid.sites import locations
from hybrid.keys import set_developer_nrel_gov_key
from hybrid.layout.plot_tools import *
from parametrized_optimization_driver import ParametrizedOptimizationDriver
from hybrid_optimization_problem import HybridOptimizationProblem
from hybrid_parametrization import HybridParametrization
np.set_printoptions(precision=2, threshold=10000, linewidth=240)
load_dotenv()
NREL_API_KEY = os.getenv("NREL_API_KEY")
set_developer_nrel_gov_key(NREL_API_KEY)
def run(default_config: {}) -> None:
config, output_path, run_name = setup_run(default_config)
recorder = DataRecorder.make_data_recorder(output_path)
max_evaluations = config['max_evaluations']
location_index = config['location']
location = locations[location_index]
site = config['site']
site_data = None
if site == 'circular':
site_data = make_circular_site(lat=location[0], lon=location[1], elev=location[2])
elif site == 'irregular':
site_data = make_irregular_site(lat=location[0], lon=location[1], elev=location[2])
else:
raise Exception("Unknown site '" + site + "'")
site_info = SiteInfo(site_data)
inner_problem = HybridOptimizationProblem(site_info, config['num_turbines'], config['solar_capacity'])
problem = HybridParametrization(inner_problem)
optimizer = ParametrizedOptimizationDriver(problem, recorder=recorder, **config['optimizer_config'])
figure = plt.figure(1)
axes = figure.add_subplot(111)
axes.set_aspect('equal')
plt.grid()
plt.tick_params(which='both', labelsize=15)
plt.xlabel('x (m)', fontsize=15)
plt.ylabel('y (m)', fontsize=15)
site_info.plot()
score, evaluation, best_solution = optimizer.central_solution()
score, evaluation = problem.objective(best_solution) if score is None else score
print(-1, ' ', score, evaluation)
print('setup 1')
num_substeps = 1
figure, axes = plt.subplots(dpi=200)
axes.set_aspect(1)
animation_writer = PillowWriter(2 * num_substeps)
animation_writer.setup(figure, os.path.join(output_path, 'trajectory.gif'), dpi=200)
print('setup 2')
_, _, central_solution = optimizer.central_solution()
print('setup 3')
bounds = problem.inner_problem.site_info.polygon.bounds
site_sw_bound = np.array([bounds[0], bounds[1]])
site_ne_bound = np.array([bounds[2], bounds[3]])
site_center = .5 * (site_sw_bound + site_ne_bound)
max_delta = max(bounds[2] - bounds[0], bounds[3] - bounds[1])
reach = (max_delta / 2) * 1.3
min_plot_bound = site_center - reach
max_plot_bound = site_center + reach
print('setup 4')
best_score, best_evaluation, best_solution = 0.0, 0.0, None
def plot_candidate(candidate):
nonlocal best_score, best_evaluation, best_solution
axes.cla()
axes.set(xlim=(min_plot_bound[0], max_plot_bound[0]), ylim=(min_plot_bound[1], max_plot_bound[1]))
wind_color = (153 / 255, 142 / 255, 195 / 255)
solar_color = (241 / 255, 163 / 255, 64 / 255)
central_color = (.5, .5, .5)
conforming_candidate, _, __ = problem.make_conforming_candidate_and_get_penalty(candidate)
problem.plot_candidate(conforming_candidate, figure, axes, central_color, central_color, alpha=.7)
if best_solution is not None:
conforming_best, _, __ = problem.make_conforming_candidate_and_get_penalty(best_solution)
problem.plot_candidate(conforming_best, figure, axes, wind_color, solar_color, alpha=1.0)
axes.set_xlabel('Best Solution AEP: {}'.format(best_evaluation))
else:
axes.set_xlabel('')
axes.legend([
Line2D([0], [0], color=wind_color, lw=8),
Line2D([0], [0], color=solar_color, lw=8),
Line2D([0], [0], color=central_color, lw=8),
],
['Wind Layout', 'Solar Layout', 'Mean Search Vector'],
loc='lower left')
animation_writer.grab_frame()
print('plot candidate')
plot_candidate(central_solution)
central_prev = central_solution
print('begin')
while optimizer.num_evaluations() < max_evaluations:
print('step start')
logger.info("Starting step, num evals {}".format(optimizer.num_evaluations()))
optimizer.step()
print('step end')
proportion = min(1.0, optimizer.num_evaluations() / max_evaluations)
g = 1.0 * proportion
b = 1.0 - g
a = .5
color = (b, g, b)
best_score, best_evaluation, best_solution = optimizer.best_solution()
central_score, central_evaluation, central_solution = optimizer.central_solution()
a1 = optimizer.converter.convert_from(central_prev)
b1 = optimizer.converter.convert_from(central_solution)
a = np.array(a1, dtype=np.float64)
b = np.array(b1, dtype=np.float64)
for i in range(num_substeps):
p = (i + 1) / num_substeps
c = (1 - p) * a + p * b
candidate = optimizer.converter.convert_to(c)
plot_candidate(candidate)
central_prev = central_solution
print(optimizer.num_iterations(), ' ', optimizer.num_evaluations(), best_score, best_evaluation)
animation_writer.finish()
optimizer.close()
print("Results and animation written to " + os.path.abspath(output_path))
default_config = {
'name': 't2',
'location': 1,
'site': 'irregular',
'solar_capacity': 50000,
'num_turbines': 50,
'max_evaluations': 20,
'optimizer_config': {
'method': 'CMA-ES',
'nprocs': 1,
'generation_size': 5,
'selection_proportion': .33,
'prior_scale': 1.0,
'prior_params': {
}
}
}
run(default_config)
| true | true |
f7fb8354bcbce6b0b82cf820584c5ff4d9710fe0 | 1,838 | py | Python | sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/file_server_location_py3.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | 1 | 2021-06-02T08:01:35.000Z | 2021-06-02T08:01:35.000Z | sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/file_server_location_py3.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | 1 | 2020-03-06T05:57:16.000Z | 2020-03-06T05:57:16.000Z | sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/file_server_location_py3.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .dataset_location_py3 import DatasetLocation
class FileServerLocation(DatasetLocation):
"""The location of file server dataset.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param type: Required. Type of dataset storage location.
:type type: str
:param folder_path: Specify the folder path of dataset. Type: string (or
Expression with resultType string)
:type folder_path: object
:param file_name: Specify the file name of dataset. Type: string (or
Expression with resultType string).
:type file_name: object
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'folder_path': {'key': 'folderPath', 'type': 'object'},
'file_name': {'key': 'fileName', 'type': 'object'},
}
def __init__(self, *, type: str, additional_properties=None, folder_path=None, file_name=None, **kwargs) -> None:
super(FileServerLocation, self).__init__(additional_properties=additional_properties, type=type, folder_path=folder_path, file_name=file_name, **kwargs)
| 39.956522 | 160 | 0.642002 |
from .dataset_location_py3 import DatasetLocation
class FileServerLocation(DatasetLocation):
_validation = {
'type': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'folder_path': {'key': 'folderPath', 'type': 'object'},
'file_name': {'key': 'fileName', 'type': 'object'},
}
def __init__(self, *, type: str, additional_properties=None, folder_path=None, file_name=None, **kwargs) -> None:
super(FileServerLocation, self).__init__(additional_properties=additional_properties, type=type, folder_path=folder_path, file_name=file_name, **kwargs)
| true | true |
f7fb83b3bf0ea0e468a7eaf0fc266fc06350eeb9 | 3,436 | py | Python | cupyimg/version.py | haesleinhuepf/cupyimg | 1fbe5d5ed53a030eb0dfbf618a0b194af1cac2ae | [
"BSD-3-Clause"
] | 39 | 2020-03-28T14:36:45.000Z | 2022-02-26T20:39:24.000Z | cupyimg/version.py | haesleinhuepf/cupyimg | 1fbe5d5ed53a030eb0dfbf618a0b194af1cac2ae | [
"BSD-3-Clause"
] | 10 | 2020-09-02T18:19:37.000Z | 2022-03-11T08:48:29.000Z | cupyimg/version.py | haesleinhuepf/cupyimg | 1fbe5d5ed53a030eb0dfbf618a0b194af1cac2ae | [
"BSD-3-Clause"
] | 4 | 2020-04-13T21:24:14.000Z | 2021-06-17T18:07:22.000Z | from os.path import join as pjoin
# Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z"
_version_major = 0
_version_minor = 1
_version_micro = "" # use "" for first of series, number for 1 and above
_version_extra = "dev0"
# _version_extra = "" # Uncomment this for full releases
# Construct full version string from these.
_ver = [_version_major, _version_minor]
if _version_micro:
_ver.append(_version_micro)
if _version_extra:
_ver.append(_version_extra)
__version__ = ".".join(map(str, _ver))
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering",
]
# Description should be a one-liner:
description = "cupyimg: CuPy-based subset of the skimage, scipy, etc. APIs"
# Long description will go up on the pypi page
long_description = """
CuPy Extensions
===============
This project contains CuPy-based implementations of functions from NumPy,
SciPy and Scikit-image that are not currently available in CuPy itself.
Ideally, much of the NumPy and SciPy-based functionality in this package will
be submitted upstream to the core CuPy project. This will allow more regular
continuous integration on a wider range of hardware.
For now these functions are provided in a separate, standalone package to allow
for rapid implementation / revision.
To get started using cupyimg with your own software, please go to the
repository README_.
.. _README: https://github.com/mritools/cupyimg/blob/master/README.md
License
=======
``cupyimg`` is licensed under the terms of the BSD 3-clause license. See the
file "LICENSE" for information on the history of this software, terms &
conditions for usage, and a DISCLAIMER OF ALL WARRANTIES.
All trademarks referenced herein are property of their respective holders.
Copyright (c) 2019-2020,
Gregory R. Lee, Cincinnati Children's Hospital Medical Center.
"""
NAME = "cupyimg"
MAINTAINER = "Gregory R. Lee"
MAINTAINER_EMAIL = "grlee77@gmail.com"
DESCRIPTION = description
LONG_DESCRIPTION = long_description
URL = "http://github.com/mritools/cupyimg"
DOWNLOAD_URL = ""
LICENSE = "BSD"
AUTHOR = "Gregory R. Lee"
AUTHOR_EMAIL = "grlee77@gmail.com"
PLATFORMS = "OS Independent"
MAJOR = _version_major
MINOR = _version_minor
MICRO = _version_micro
VERSION = __version__
PACKAGE_DATA = {
"cupyimg": [
pjoin("numpy", "core", "tests"),
pjoin("numpy", "lib", "tests"),
pjoin("scipy", "interpolate", "tests"),
pjoin("scipy", "ndimage", "tests"),
pjoin("scipy", "signal", "tests"),
pjoin("scipy", "special", "tests"),
pjoin("scipy", "stats", "tests"),
pjoin("skimage", "color", "tests"),
pjoin("skimage", "exposure", "tests"),
pjoin("skimage", "feature", "tests"),
pjoin("skimage", "filters", "tests"),
pjoin("skimage", "measure", "tests"),
pjoin("skimage", "metrics", "tests"),
pjoin("skimage", "morphology", "tests"),
pjoin("skimage", "registration", "tests"),
pjoin("skimage", "restoration", "tests"),
pjoin("skimage", "_shared", "tests"),
pjoin("skimage", "transform", "tests"),
pjoin("skimage", "util", "tests"),
]
}
REQUIRES = ["numpy"]
PYTHON_REQUIRES = ">= 3.6"
| 33.359223 | 79 | 0.686554 | from os.path import join as pjoin
_version_major = 0
_version_minor = 1
_version_micro = ""
_version_extra = "dev0"
n_minor]
if _version_micro:
_ver.append(_version_micro)
if _version_extra:
_ver.append(_version_extra)
__version__ = ".".join(map(str, _ver))
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering",
]
description = "cupyimg: CuPy-based subset of the skimage, scipy, etc. APIs"
long_description = """
CuPy Extensions
===============
This project contains CuPy-based implementations of functions from NumPy,
SciPy and Scikit-image that are not currently available in CuPy itself.
Ideally, much of the NumPy and SciPy-based functionality in this package will
be submitted upstream to the core CuPy project. This will allow more regular
continuous integration on a wider range of hardware.
For now these functions are provided in a separate, standalone package to allow
for rapid implementation / revision.
To get started using cupyimg with your own software, please go to the
repository README_.
.. _README: https://github.com/mritools/cupyimg/blob/master/README.md
License
=======
``cupyimg`` is licensed under the terms of the BSD 3-clause license. See the
file "LICENSE" for information on the history of this software, terms &
conditions for usage, and a DISCLAIMER OF ALL WARRANTIES.
All trademarks referenced herein are property of their respective holders.
Copyright (c) 2019-2020,
Gregory R. Lee, Cincinnati Children's Hospital Medical Center.
"""
NAME = "cupyimg"
MAINTAINER = "Gregory R. Lee"
MAINTAINER_EMAIL = "grlee77@gmail.com"
DESCRIPTION = description
LONG_DESCRIPTION = long_description
URL = "http://github.com/mritools/cupyimg"
DOWNLOAD_URL = ""
LICENSE = "BSD"
AUTHOR = "Gregory R. Lee"
AUTHOR_EMAIL = "grlee77@gmail.com"
PLATFORMS = "OS Independent"
MAJOR = _version_major
MINOR = _version_minor
MICRO = _version_micro
VERSION = __version__
PACKAGE_DATA = {
"cupyimg": [
pjoin("numpy", "core", "tests"),
pjoin("numpy", "lib", "tests"),
pjoin("scipy", "interpolate", "tests"),
pjoin("scipy", "ndimage", "tests"),
pjoin("scipy", "signal", "tests"),
pjoin("scipy", "special", "tests"),
pjoin("scipy", "stats", "tests"),
pjoin("skimage", "color", "tests"),
pjoin("skimage", "exposure", "tests"),
pjoin("skimage", "feature", "tests"),
pjoin("skimage", "filters", "tests"),
pjoin("skimage", "measure", "tests"),
pjoin("skimage", "metrics", "tests"),
pjoin("skimage", "morphology", "tests"),
pjoin("skimage", "registration", "tests"),
pjoin("skimage", "restoration", "tests"),
pjoin("skimage", "_shared", "tests"),
pjoin("skimage", "transform", "tests"),
pjoin("skimage", "util", "tests"),
]
}
REQUIRES = ["numpy"]
PYTHON_REQUIRES = ">= 3.6"
| true | true |
f7fb842514fa110335d5a59b7f8680010eb0cdef | 1,352 | py | Python | nilearn/tests/test_testing.py | agramfort/nilearn | f075440e6d97b5bf359bb25e9197dbcbbc26e5f2 | [
"BSD-2-Clause"
] | 1 | 2020-12-20T00:22:14.000Z | 2020-12-20T00:22:14.000Z | nilearn/tests/test_testing.py | agramfort/nilearn | f075440e6d97b5bf359bb25e9197dbcbbc26e5f2 | [
"BSD-2-Clause"
] | 3 | 2016-02-23T09:47:05.000Z | 2018-10-12T16:54:38.000Z | nilearn/tests/test_testing.py | agramfort/nilearn | f075440e6d97b5bf359bb25e9197dbcbbc26e5f2 | [
"BSD-2-Clause"
] | 3 | 2017-01-06T09:54:00.000Z | 2020-02-17T12:57:35.000Z | import itertools
import numpy as np
from nose.tools import assert_equal, assert_raises
from nilearn._utils.testing import generate_fake_fmri
def test_generate_fake_fmri():
shapes = [(6, 6, 7), (10, 11, 12)]
lengths = [16, 20]
kinds = ['noise', 'step']
n_blocks = [None, 1, 4]
block_size = [None, 4]
block_type = ['classification', 'regression']
rand_gen = np.random.RandomState(3)
for shape, length, kind, n_block, bsize, btype in itertools.product(
shapes, lengths, kinds, n_blocks, block_size, block_type):
if n_block is None:
fmri, mask = generate_fake_fmri(
shape=shape, length=length, kind=kind,
n_blocks=n_block, block_size=bsize,
block_type=btype,
rand_gen=rand_gen)
else:
fmri, mask, target = generate_fake_fmri(
shape=shape, length=length, kind=kind,
n_blocks=n_block, block_size=bsize,
block_type=btype,
rand_gen=rand_gen)
assert_equal(fmri.shape[:-1], shape)
assert_equal(fmri.shape[-1], length)
if n_block is not None:
assert_equal(target.size, length)
assert_raises(ValueError, generate_fake_fmri, length=10, n_blocks=10,
block_size=None, rand_gen=rand_gen)
| 30.727273 | 73 | 0.611686 | import itertools
import numpy as np
from nose.tools import assert_equal, assert_raises
from nilearn._utils.testing import generate_fake_fmri
def test_generate_fake_fmri():
shapes = [(6, 6, 7), (10, 11, 12)]
lengths = [16, 20]
kinds = ['noise', 'step']
n_blocks = [None, 1, 4]
block_size = [None, 4]
block_type = ['classification', 'regression']
rand_gen = np.random.RandomState(3)
for shape, length, kind, n_block, bsize, btype in itertools.product(
shapes, lengths, kinds, n_blocks, block_size, block_type):
if n_block is None:
fmri, mask = generate_fake_fmri(
shape=shape, length=length, kind=kind,
n_blocks=n_block, block_size=bsize,
block_type=btype,
rand_gen=rand_gen)
else:
fmri, mask, target = generate_fake_fmri(
shape=shape, length=length, kind=kind,
n_blocks=n_block, block_size=bsize,
block_type=btype,
rand_gen=rand_gen)
assert_equal(fmri.shape[:-1], shape)
assert_equal(fmri.shape[-1], length)
if n_block is not None:
assert_equal(target.size, length)
assert_raises(ValueError, generate_fake_fmri, length=10, n_blocks=10,
block_size=None, rand_gen=rand_gen)
| true | true |
f7fb84ff9e639e6e038d329eec5eb7f65a8d7883 | 1,718 | py | Python | tekstovni_vmesnik.py | milaneztim/Racunanje-z-matrikami | 2803f936dc67db1ec308cc1e9be35f434e4b58ef | [
"MIT"
] | null | null | null | tekstovni_vmesnik.py | milaneztim/Racunanje-z-matrikami | 2803f936dc67db1ec308cc1e9be35f434e4b58ef | [
"MIT"
] | null | null | null | tekstovni_vmesnik.py | milaneztim/Racunanje-z-matrikami | 2803f936dc67db1ec308cc1e9be35f434e4b58ef | [
"MIT"
] | null | null | null | import model
def dolzina_maksimalnega_clena(sez):
m = len(sez)
n = len(sez[0])
najvecji = max([sez[i][j] for i in range(m) for j in range(n)])
return len(str(najvecji))
def prikaz_matrike(sez):
m = len(sez)
n = len(sez[0])
razmik = dolzina_maksimalnega_clena(sez)
for i in range(m):
vrstica = '|'
for j in range(n):
clen = sez[i][j]
razlika = razmik - len(str(clen))
for _ in range(razlika // 2 + 1):
vrstica += ' '
vrstica += str(clen)
for _ in range(razlika // 2):
vrstica += ' '
if razlika % 2 == 1:
vrstica += ' '
print(vrstica + ' |')
def zahtevaj_velikost():
return input("Stevilo vrstic: "), input("Stevilo stolpcev: ")
def zahtevaj_vnos(m, n):
return [[int(input("a_{0},{1} = ".format(i, j))) for j in range(1, n+1)]
for i in range(1, m+1)]
def pozeni_vmesnik():
while True:
m, n = zahtevaj_velikost()
sez = zahtevaj_vnos(int(m), int(n))
return sez
def prikazi_matriko(sez):
m = len(sez)
n = len(sez[0])
razmik = dolzina_maksimalnega_clena(sez)
matrika = ''
for i in range(m):
vrstica = '|'
for j in range(n):
clen = sez[i][j]
razlika = razmik - len(str(clen))
for _ in range(razlika // 2 + 1):
vrstica += ' '
vrstica += str(clen)
for _ in range(razlika // 2):
vrstica += ' '
if razlika % 2 == 1:
vrstica += ' '
matrika += (vrstica + ' |\n')
return matrika
#a = pozeni_vmesnik()
#print(prikaz_matrike(a))
| 25.264706 | 76 | 0.495343 | import model
def dolzina_maksimalnega_clena(sez):
m = len(sez)
n = len(sez[0])
najvecji = max([sez[i][j] for i in range(m) for j in range(n)])
return len(str(najvecji))
def prikaz_matrike(sez):
m = len(sez)
n = len(sez[0])
razmik = dolzina_maksimalnega_clena(sez)
for i in range(m):
vrstica = '|'
for j in range(n):
clen = sez[i][j]
razlika = razmik - len(str(clen))
for _ in range(razlika // 2 + 1):
vrstica += ' '
vrstica += str(clen)
for _ in range(razlika // 2):
vrstica += ' '
if razlika % 2 == 1:
vrstica += ' '
print(vrstica + ' |')
def zahtevaj_velikost():
return input("Stevilo vrstic: "), input("Stevilo stolpcev: ")
def zahtevaj_vnos(m, n):
return [[int(input("a_{0},{1} = ".format(i, j))) for j in range(1, n+1)]
for i in range(1, m+1)]
def pozeni_vmesnik():
while True:
m, n = zahtevaj_velikost()
sez = zahtevaj_vnos(int(m), int(n))
return sez
def prikazi_matriko(sez):
m = len(sez)
n = len(sez[0])
razmik = dolzina_maksimalnega_clena(sez)
matrika = ''
for i in range(m):
vrstica = '|'
for j in range(n):
clen = sez[i][j]
razlika = razmik - len(str(clen))
for _ in range(razlika // 2 + 1):
vrstica += ' '
vrstica += str(clen)
for _ in range(razlika // 2):
vrstica += ' '
if razlika % 2 == 1:
vrstica += ' '
matrika += (vrstica + ' |\n')
return matrika
| true | true |
f7fb854f0cf77eb371f83668283aa1310a428d43 | 272 | py | Python | paraVerComoFuncionaAlgumasCoisas/pythonParaAnaliseDeDados/capitulo4-basicosobreONumPy-arrayseprocessamentovetorizado/ipython_3_criando_ndarrays.py | jonasht/pythonEstudos | 5e7d28e7bd82b9d1b08e795867fdbaa743f4b747 | [
"MIT"
] | null | null | null | paraVerComoFuncionaAlgumasCoisas/pythonParaAnaliseDeDados/capitulo4-basicosobreONumPy-arrayseprocessamentovetorizado/ipython_3_criando_ndarrays.py | jonasht/pythonEstudos | 5e7d28e7bd82b9d1b08e795867fdbaa743f4b747 | [
"MIT"
] | null | null | null | paraVerComoFuncionaAlgumasCoisas/pythonParaAnaliseDeDados/capitulo4-basicosobreONumPy-arrayseprocessamentovetorizado/ipython_3_criando_ndarrays.py | jonasht/pythonEstudos | 5e7d28e7bd82b9d1b08e795867fdbaa743f4b747 | [
"MIT"
] | null | null | null | # coding: utf-8
data1 = [6, 7.5, 8., 0., 1.]
import numpy as np
arr1 = np.array(data1)
arr1
data2 = [[1,2,3,4], [5,6,7,8]]
arr2 = np.array(data2)
arr2
arr2.ndim
arr2.shape
arr1.dtype
arr2.dtype
np.zeros(3,6)
np.zeros(10)
np.zeros((3, 6))
np.empty((2, 3, 2))
np.arange(15)
| 15.111111 | 30 | 0.628676 |
data1 = [6, 7.5, 8., 0., 1.]
import numpy as np
arr1 = np.array(data1)
arr1
data2 = [[1,2,3,4], [5,6,7,8]]
arr2 = np.array(data2)
arr2
arr2.ndim
arr2.shape
arr1.dtype
arr2.dtype
np.zeros(3,6)
np.zeros(10)
np.zeros((3, 6))
np.empty((2, 3, 2))
np.arange(15)
| true | true |
f7fb862ec69a2f9eb576e4b18782dadf710db374 | 2,775 | py | Python | pems/originals/analyze.py | start2020/MSGC-Seq2Seq | 70f8db9293c8033a4b4f03f30a0164c360c4bcd0 | [
"MIT"
] | null | null | null | pems/originals/analyze.py | start2020/MSGC-Seq2Seq | 70f8db9293c8033a4b4f03f30a0164c360c4bcd0 | [
"MIT"
] | null | null | null | pems/originals/analyze.py | start2020/MSGC-Seq2Seq | 70f8db9293c8033a4b4f03f30a0164c360c4bcd0 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import datetime
# traffic_file = "PeMS.h5"
# df = pd.read_hdf(traffic_file)
#
# data = df.values
# # tmp_df = df[0]
# # tmf_file_name = "tmp.xlsx" #保存后删除第一列
# # tmp_df.to_excel()
# # print(tmp_df)
#
# new_pf = pd.read_excel('./tmp.xlsx', sheet_name = 0)
# Time = df.index
#
# print(new_pf)
#
# T1 = int(24*60/5)
#
# start_time = "2017-01-01 00:00:00"
# start_time_dt = datetime.datetime.strptime(start_time,"%Y-%m-%d %H:%M:%S")
# new_pf.loc[start_time_dt] = data[0]
# loss_index_num = 0
# for time_index in range(1,df.shape[0]):
# print(time_index/df.shape[0])
# time_delta = Time[time_index] - Time[time_index - 1]
# seconds = time_delta.seconds
# if seconds == 300: # 5分钟
# cur_time = str((start_time_dt + datetime.timedelta(minutes=(time_index+loss_index_num) * 5)).strftime("%Y-%m-%d %H:%M:%S"))
# new_pf.loc[datetime.datetime.strptime(cur_time, "%Y-%m-%d %H:%M:%S")] = data[time_index]
# else:
# err_index = 0
# print(seconds)
# k = seconds//300 #一次补全k个数据
#
# for j in range(k):
# cur_time = str((start_time_dt + datetime.timedelta(minutes=(time_index + loss_index_num + j) * 5)).strftime(
# "%Y-%m-%d %H:%M:%S"))
# res = new_pf.values[(time_index + loss_index_num+ j)-T1*7]#用上一周数据来填补丢失的数据
# new_pf.loc[datetime.datetime.strptime(cur_time, "%Y-%m-%d %H:%M:%S")] = res
# loss_index_num += k
#
# print(new_pf.shape)
#
#
# output_name = "pems_c.h5"
# new_pf.to_hdf(output_name,'obj3',format='table')
# df = pd.read_hdf(output_name)
# print(df.values.shape)
traffic_file = "pems_c.h5"
new_pf = pd.read_hdf(traffic_file)
T1 = int(24*60/5)
print("T1 ",T1)
Time = new_pf.index
data = new_pf.values
N = data.shape[-1]
days = data.shape[0]//T1
dayofweek = np.reshape(Time.weekday, newshape = (-1, 1))
timeofday = (Time.hour * 60 + Time.minute + Time.second / 60) // 5
timeofday = np.reshape(timeofday, newshape = (-1, 1))
dayofyear = np.reshape(Time.dayofyear-1, newshape = (-1, 1))
new_time = np.concatenate((timeofday,dayofweek,dayofyear), axis = -1) #(days*T1, 2)
new_time = np.expand_dims(new_time, axis=1) # (days*T1, 1, 2)
new_time = np.tile(new_time,(1,N, 1)) # (days*T1, N, 2)
print(new_time.shape)
data = np.expand_dims(data, axis=-1) # (days*T1, N, 1)
print(data.shape)
data = np.concatenate([data,new_time ], axis=-1)#(Days*T1,N,3)
print(data.shape)
# data_file = './matrix-gman.npz'
# np.savez_compressed(data_file, data)
data = data.reshape(days,T1,N,4)
data = data.astype(np.float32)
print(data.dtype)
print(data.shape)
data_file = './matrix.npz'
np.savez_compressed(data_file, data)# (Days,T1,N,3)
| 31.896552 | 134 | 0.624505 | import pandas as pd
import numpy as np
import datetime
nt("T1 ",T1)
Time = new_pf.index
data = new_pf.values
N = data.shape[-1]
days = data.shape[0]//T1
dayofweek = np.reshape(Time.weekday, newshape = (-1, 1))
timeofday = (Time.hour * 60 + Time.minute + Time.second / 60) // 5
timeofday = np.reshape(timeofday, newshape = (-1, 1))
dayofyear = np.reshape(Time.dayofyear-1, newshape = (-1, 1))
new_time = np.concatenate((timeofday,dayofweek,dayofyear), axis = -1)
new_time = np.expand_dims(new_time, axis=1)
new_time = np.tile(new_time,(1,N, 1))
print(new_time.shape)
data = np.expand_dims(data, axis=-1)
print(data.shape)
data = np.concatenate([data,new_time ], axis=-1)
print(data.shape)
data = data.reshape(days,T1,N,4)
data = data.astype(np.float32)
print(data.dtype)
print(data.shape)
data_file = './matrix.npz'
np.savez_compressed(data_file, data)
| true | true |
f7fb8659b00606d3e1564c69b93fb9358f73c590 | 11,309 | py | Python | gpytorch/variational/whitened_variational_strategy.py | Xiao-dong-Wang/gpytorch | 92e07cf4dae26083fe0aed926e1dfd483443924e | [
"MIT"
] | 2 | 2020-09-11T12:11:16.000Z | 2020-09-24T03:58:16.000Z | gpytorch/variational/whitened_variational_strategy.py | Xiao-dong-Wang/gpytorch | 92e07cf4dae26083fe0aed926e1dfd483443924e | [
"MIT"
] | null | null | null | gpytorch/variational/whitened_variational_strategy.py | Xiao-dong-Wang/gpytorch | 92e07cf4dae26083fe0aed926e1dfd483443924e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import math
import warnings
import torch
from .. import settings
from ..distributions import MultivariateNormal
from ..lazy import (
BatchRepeatLazyTensor,
CachedCGLazyTensor,
CholLazyTensor,
DiagLazyTensor,
MatmulLazyTensor,
PsdSumLazyTensor,
RootLazyTensor,
)
from ..module import Module
from ..utils.memoize import cached
from .unwhitened_variational_strategy import UnwhitenedVariationalStrategy
# Remove after 1.0
class WhitenedVariationalStrategy(UnwhitenedVariationalStrategy):
def __init__(self, model, inducing_points, variational_distribution, learn_inducing_locations=True):
warnings.warn(
"WhitenedVariationalStrategy is deprecated. Please use VariationalStrategy instead.", DeprecationWarning
)
super().__init__(model, inducing_points, variational_distribution, learn_inducing_locations)
@cached(name="logdet_memo")
def prior_covar_logdet(self):
return -self.prior_distribution.lazy_covariance_matrix.logdet()
@cached(name="covar_trace_memo")
def covar_trace(self):
variational_covar = self.variational_distribution.covariance_matrix
prior_covar = self.prior_distribution.covariance_matrix
batch_shape = prior_covar.shape[:-2]
return (variational_covar * prior_covar).view(*batch_shape, -1).sum(-1)
@cached(name="mean_diff_inv_quad_memo")
def mean_diff_inv_quad(self):
prior_mean = self.prior_distribution.mean
prior_covar = self.prior_distribution.lazy_covariance_matrix
variational_mean = self.variational_distribution.mean
return prior_covar.inv_quad(variational_mean - prior_mean)
def kl_divergence(self):
variational_dist_u = self.variational_distribution
prior_dist = self.prior_distribution
kl_divergence = 0.5 * sum(
[
# log|k| - log|S|
# = log|K| - log|K var_dist_covar K|
# = -log|K| - log|var_dist_covar|
self.prior_covar_logdet(),
-variational_dist_u.lazy_covariance_matrix.logdet(),
# tr(K^-1 S) = tr(K^1 K var_dist_covar K) = tr(K var_dist_covar)
self.covar_trace(),
# (m - \mu u)^T K^-1 (m - \mu u)
# = (K^-1 (m - \mu u)) K (K^1 (m - \mu u))
# = (var_dist_mean)^T K (var_dist_mean)
self.mean_diff_inv_quad(),
# d
-prior_dist.event_shape.numel(),
]
)
return kl_divergence
def initialize_variational_dist(self):
prior_dist = self.prior_distribution
inv_prior_dist = torch.distributions.MultivariateNormal(
prior_dist.mean,
prior_dist.lazy_covariance_matrix.add_jitter()
.evaluate()
.double()
.inverse()
.type_as(prior_dist.covariance_matrix),
)
self.variational_distribution.initialize_variational_distribution(inv_prior_dist)
def forward(self, x):
r"""
The :func:`~gpytorch.variational.VariationalStrategy.forward` method determines how to marginalize out the
inducing point function values. Specifically, forward defines how to transform a variational distribution
over the inducing point values, :math:`q(u)`, in to a variational distribution over the function values at
specified locations x, :math:`q(f|x)`, by integrating :math:`\int p(f|x, u)q(u)du`
:param torch.Tensor x: Locations x to get the variational posterior of the function values at.
:rtype: ~gpytorch.distributions.MultivariateNormal
:return: The distribution :math:`q(f|x)`
"""
variational_dist = self.variational_distribution
inducing_points = self.inducing_points
if inducing_points.dim() < x.dim():
inducing_points = inducing_points.expand(*x.shape[:-2], *inducing_points.shape[-2:])
if len(variational_dist.batch_shape) < x.dim() - 2:
variational_dist = variational_dist.expand(x.shape[:-2])
# If our points equal the inducing points, we're done
if torch.equal(x, inducing_points):
# De-whiten the prior covar
prior_covar = self.prior_distribution.lazy_covariance_matrix
if isinstance(variational_dist.lazy_covariance_matrix, RootLazyTensor):
predictive_covar = RootLazyTensor(prior_covar @ variational_dist.lazy_covariance_matrix.root.evaluate())
else:
predictive_covar = MatmulLazyTensor(prior_covar @ variational_dist.covariance_matrix, prior_covar)
# Cache some values for the KL divergence
if self.training:
self._mean_diff_inv_quad_memo, self._logdet_memo = prior_covar.inv_quad_logdet(
(variational_dist.mean - self.prior_distribution.mean), logdet=True
)
return MultivariateNormal(variational_dist.mean, predictive_covar)
# Otherwise, we have to marginalize
else:
num_induc = inducing_points.size(-2)
full_inputs = torch.cat([inducing_points, x], dim=-2)
full_output = self.model.forward(full_inputs)
full_mean, full_covar = full_output.mean, full_output.lazy_covariance_matrix
# Mean terms
test_mean = full_mean[..., num_induc:]
induc_mean = full_mean[..., :num_induc]
mean_diff = (variational_dist.mean - induc_mean).unsqueeze(-1)
# Covariance terms
induc_induc_covar = full_covar[..., :num_induc, :num_induc].add_jitter()
induc_data_covar = full_covar[..., :num_induc, num_induc:].evaluate()
data_data_covar = full_covar[..., num_induc:, num_induc:]
# If we're less than a certain size, we'll compute the Cholesky decomposition of induc_induc_covar
cholesky = False
if settings.fast_computations.log_prob.off() or (num_induc <= settings.max_cholesky_size.value()):
induc_induc_covar = CholLazyTensor(induc_induc_covar.cholesky())
cholesky = True
# Cache the CG results
# Do not use preconditioning for whitened VI, as it does not seem to improve performance.
with settings.max_preconditioner_size(0):
with torch.no_grad():
eager_rhs = torch.cat([induc_data_covar, mean_diff], -1)
solve, probe_vecs, probe_vec_norms, probe_vec_solves, tmats = CachedCGLazyTensor.precompute_terms(
induc_induc_covar,
eager_rhs.detach(),
logdet_terms=(not cholesky),
include_tmats=(not settings.skip_logdet_forward.on() and not cholesky),
)
eager_rhss = [eager_rhs.detach()]
solves = [solve.detach()]
if settings.skip_logdet_forward.on() and self.training:
eager_rhss.append(torch.cat([probe_vecs, eager_rhs], -1))
solves.append(torch.cat([probe_vec_solves, solve[..., : eager_rhs.size(-1)]], -1))
elif not self.training:
eager_rhss.append(eager_rhs[..., :-1])
solves.append(solve[..., :-1])
induc_induc_covar = CachedCGLazyTensor(
induc_induc_covar,
eager_rhss=eager_rhss,
solves=solves,
probe_vectors=probe_vecs,
probe_vector_norms=probe_vec_norms,
probe_vector_solves=probe_vec_solves,
probe_vector_tmats=tmats,
)
# Compute some terms that will be necessary for the predicitve covariance and KL divergence
if self.training:
interp_data_data_var_plus_mean_diff_inv_quad, logdet = induc_induc_covar.inv_quad_logdet(
torch.cat([induc_data_covar, mean_diff], -1), logdet=True, reduce_inv_quad=False
)
interp_data_data_var = interp_data_data_var_plus_mean_diff_inv_quad[..., :-1]
mean_diff_inv_quad = interp_data_data_var_plus_mean_diff_inv_quad[..., -1]
# Compute predictive mean
predictive_mean = torch.add(
test_mean,
induc_induc_covar.inv_matmul(mean_diff, left_tensor=induc_data_covar.transpose(-1, -2)).squeeze(-1),
)
# Compute the predictive covariance
is_root_lt = isinstance(variational_dist.lazy_covariance_matrix, RootLazyTensor)
is_repeated_root_lt = isinstance(
variational_dist.lazy_covariance_matrix, BatchRepeatLazyTensor
) and isinstance(variational_dist.lazy_covariance_matrix.base_lazy_tensor, RootLazyTensor)
if is_root_lt:
predictive_covar = RootLazyTensor(
induc_data_covar.transpose(-1, -2) @ variational_dist.lazy_covariance_matrix.root.evaluate()
)
elif is_repeated_root_lt:
predictive_covar = RootLazyTensor(
induc_data_covar.transpose(-1, -2)
@ variational_dist.lazy_covariance_matrix.root_decomposition().root.evaluate()
)
else:
predictive_covar = MatmulLazyTensor(
induc_data_covar.transpose(-1, -2), predictive_covar @ induc_data_covar
)
if self.training:
data_covariance = DiagLazyTensor((data_data_covar.diag() - interp_data_data_var).clamp(0, math.inf))
else:
neg_induc_data_data_covar = torch.matmul(
induc_data_covar.transpose(-1, -2).mul(-1), induc_induc_covar.inv_matmul(induc_data_covar)
)
data_covariance = data_data_covar + neg_induc_data_data_covar
predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
# Save the logdet, mean_diff_inv_quad, prior distribution for the ELBO
if self.training:
self._memoize_cache["prior_distribution_memo"] = MultivariateNormal(induc_mean, induc_induc_covar)
self._memoize_cache["logdet_memo"] = -logdet
self._memoize_cache["mean_diff_inv_quad_memo"] = mean_diff_inv_quad
return MultivariateNormal(predictive_mean, predictive_covar)
def __call__(self, x, prior=False):
# If we're in prior mode, then we're done!
if prior:
return self.model.forward(x)
# Delete previously cached items from the training distribution
if self.training:
if hasattr(self, "_memoize_cache"):
delattr(self, "_memoize_cache")
self._memoize_cache = dict()
# (Maybe) initialize variational distribution
if not self.variational_params_initialized.item():
prior_dist = self.prior_distribution
self._variational_distribution.initialize_variational_distribution(prior_dist)
self.variational_params_initialized.fill_(1)
return Module.__call__(self, x)
| 47.120833 | 120 | 0.633212 |
import math
import warnings
import torch
from .. import settings
from ..distributions import MultivariateNormal
from ..lazy import (
BatchRepeatLazyTensor,
CachedCGLazyTensor,
CholLazyTensor,
DiagLazyTensor,
MatmulLazyTensor,
PsdSumLazyTensor,
RootLazyTensor,
)
from ..module import Module
from ..utils.memoize import cached
from .unwhitened_variational_strategy import UnwhitenedVariationalStrategy
class WhitenedVariationalStrategy(UnwhitenedVariationalStrategy):
def __init__(self, model, inducing_points, variational_distribution, learn_inducing_locations=True):
warnings.warn(
"WhitenedVariationalStrategy is deprecated. Please use VariationalStrategy instead.", DeprecationWarning
)
super().__init__(model, inducing_points, variational_distribution, learn_inducing_locations)
@cached(name="logdet_memo")
def prior_covar_logdet(self):
return -self.prior_distribution.lazy_covariance_matrix.logdet()
@cached(name="covar_trace_memo")
def covar_trace(self):
variational_covar = self.variational_distribution.covariance_matrix
prior_covar = self.prior_distribution.covariance_matrix
batch_shape = prior_covar.shape[:-2]
return (variational_covar * prior_covar).view(*batch_shape, -1).sum(-1)
@cached(name="mean_diff_inv_quad_memo")
def mean_diff_inv_quad(self):
prior_mean = self.prior_distribution.mean
prior_covar = self.prior_distribution.lazy_covariance_matrix
variational_mean = self.variational_distribution.mean
return prior_covar.inv_quad(variational_mean - prior_mean)
def kl_divergence(self):
variational_dist_u = self.variational_distribution
prior_dist = self.prior_distribution
kl_divergence = 0.5 * sum(
[
self.prior_covar_logdet(),
-variational_dist_u.lazy_covariance_matrix.logdet(),
self.covar_trace(),
self.mean_diff_inv_quad(),
-prior_dist.event_shape.numel(),
]
)
return kl_divergence
def initialize_variational_dist(self):
prior_dist = self.prior_distribution
inv_prior_dist = torch.distributions.MultivariateNormal(
prior_dist.mean,
prior_dist.lazy_covariance_matrix.add_jitter()
.evaluate()
.double()
.inverse()
.type_as(prior_dist.covariance_matrix),
)
self.variational_distribution.initialize_variational_distribution(inv_prior_dist)
def forward(self, x):
variational_dist = self.variational_distribution
inducing_points = self.inducing_points
if inducing_points.dim() < x.dim():
inducing_points = inducing_points.expand(*x.shape[:-2], *inducing_points.shape[-2:])
if len(variational_dist.batch_shape) < x.dim() - 2:
variational_dist = variational_dist.expand(x.shape[:-2])
if torch.equal(x, inducing_points):
# De-whiten the prior covar
prior_covar = self.prior_distribution.lazy_covariance_matrix
if isinstance(variational_dist.lazy_covariance_matrix, RootLazyTensor):
predictive_covar = RootLazyTensor(prior_covar @ variational_dist.lazy_covariance_matrix.root.evaluate())
else:
predictive_covar = MatmulLazyTensor(prior_covar @ variational_dist.covariance_matrix, prior_covar)
# Cache some values for the KL divergence
if self.training:
self._mean_diff_inv_quad_memo, self._logdet_memo = prior_covar.inv_quad_logdet(
(variational_dist.mean - self.prior_distribution.mean), logdet=True
)
return MultivariateNormal(variational_dist.mean, predictive_covar)
# Otherwise, we have to marginalize
else:
num_induc = inducing_points.size(-2)
full_inputs = torch.cat([inducing_points, x], dim=-2)
full_output = self.model.forward(full_inputs)
full_mean, full_covar = full_output.mean, full_output.lazy_covariance_matrix
# Mean terms
test_mean = full_mean[..., num_induc:]
induc_mean = full_mean[..., :num_induc]
mean_diff = (variational_dist.mean - induc_mean).unsqueeze(-1)
# Covariance terms
induc_induc_covar = full_covar[..., :num_induc, :num_induc].add_jitter()
induc_data_covar = full_covar[..., :num_induc, num_induc:].evaluate()
data_data_covar = full_covar[..., num_induc:, num_induc:]
# If we're less than a certain size, we'll compute the Cholesky decomposition of induc_induc_covar
cholesky = False
if settings.fast_computations.log_prob.off() or (num_induc <= settings.max_cholesky_size.value()):
induc_induc_covar = CholLazyTensor(induc_induc_covar.cholesky())
cholesky = True
# Cache the CG results
# Do not use preconditioning for whitened VI, as it does not seem to improve performance.
with settings.max_preconditioner_size(0):
with torch.no_grad():
eager_rhs = torch.cat([induc_data_covar, mean_diff], -1)
solve, probe_vecs, probe_vec_norms, probe_vec_solves, tmats = CachedCGLazyTensor.precompute_terms(
induc_induc_covar,
eager_rhs.detach(),
logdet_terms=(not cholesky),
include_tmats=(not settings.skip_logdet_forward.on() and not cholesky),
)
eager_rhss = [eager_rhs.detach()]
solves = [solve.detach()]
if settings.skip_logdet_forward.on() and self.training:
eager_rhss.append(torch.cat([probe_vecs, eager_rhs], -1))
solves.append(torch.cat([probe_vec_solves, solve[..., : eager_rhs.size(-1)]], -1))
elif not self.training:
eager_rhss.append(eager_rhs[..., :-1])
solves.append(solve[..., :-1])
induc_induc_covar = CachedCGLazyTensor(
induc_induc_covar,
eager_rhss=eager_rhss,
solves=solves,
probe_vectors=probe_vecs,
probe_vector_norms=probe_vec_norms,
probe_vector_solves=probe_vec_solves,
probe_vector_tmats=tmats,
)
# Compute some terms that will be necessary for the predicitve covariance and KL divergence
if self.training:
interp_data_data_var_plus_mean_diff_inv_quad, logdet = induc_induc_covar.inv_quad_logdet(
torch.cat([induc_data_covar, mean_diff], -1), logdet=True, reduce_inv_quad=False
)
interp_data_data_var = interp_data_data_var_plus_mean_diff_inv_quad[..., :-1]
mean_diff_inv_quad = interp_data_data_var_plus_mean_diff_inv_quad[..., -1]
# Compute predictive mean
predictive_mean = torch.add(
test_mean,
induc_induc_covar.inv_matmul(mean_diff, left_tensor=induc_data_covar.transpose(-1, -2)).squeeze(-1),
)
# Compute the predictive covariance
is_root_lt = isinstance(variational_dist.lazy_covariance_matrix, RootLazyTensor)
is_repeated_root_lt = isinstance(
variational_dist.lazy_covariance_matrix, BatchRepeatLazyTensor
) and isinstance(variational_dist.lazy_covariance_matrix.base_lazy_tensor, RootLazyTensor)
if is_root_lt:
predictive_covar = RootLazyTensor(
induc_data_covar.transpose(-1, -2) @ variational_dist.lazy_covariance_matrix.root.evaluate()
)
elif is_repeated_root_lt:
predictive_covar = RootLazyTensor(
induc_data_covar.transpose(-1, -2)
@ variational_dist.lazy_covariance_matrix.root_decomposition().root.evaluate()
)
else:
predictive_covar = MatmulLazyTensor(
induc_data_covar.transpose(-1, -2), predictive_covar @ induc_data_covar
)
if self.training:
data_covariance = DiagLazyTensor((data_data_covar.diag() - interp_data_data_var).clamp(0, math.inf))
else:
neg_induc_data_data_covar = torch.matmul(
induc_data_covar.transpose(-1, -2).mul(-1), induc_induc_covar.inv_matmul(induc_data_covar)
)
data_covariance = data_data_covar + neg_induc_data_data_covar
predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
# Save the logdet, mean_diff_inv_quad, prior distribution for the ELBO
if self.training:
self._memoize_cache["prior_distribution_memo"] = MultivariateNormal(induc_mean, induc_induc_covar)
self._memoize_cache["logdet_memo"] = -logdet
self._memoize_cache["mean_diff_inv_quad_memo"] = mean_diff_inv_quad
return MultivariateNormal(predictive_mean, predictive_covar)
def __call__(self, x, prior=False):
# If we're in prior mode, then we're done!
if prior:
return self.model.forward(x)
# Delete previously cached items from the training distribution
if self.training:
if hasattr(self, "_memoize_cache"):
delattr(self, "_memoize_cache")
self._memoize_cache = dict()
# (Maybe) initialize variational distribution
if not self.variational_params_initialized.item():
prior_dist = self.prior_distribution
self._variational_distribution.initialize_variational_distribution(prior_dist)
self.variational_params_initialized.fill_(1)
return Module.__call__(self, x)
| true | true |
f7fb86742df7ac32c13221dab28c03b55fcaa9c6 | 648 | py | Python | ciservice/apiv1/resource_services.py | idekerlab/ci-service-template | 5d46f030afe01a959c6afad0af35217347f4483a | [
"MIT"
] | 2 | 2015-10-02T18:41:09.000Z | 2015-10-16T20:57:01.000Z | ciservice/apiv1/resource_services.py | afcarl/ci-service-template | 5d46f030afe01a959c6afad0af35217347f4483a | [
"MIT"
] | 15 | 2015-05-05T22:46:37.000Z | 2021-01-20T22:55:30.000Z | ciservice/apiv1/resource_services.py | afcarl/ci-service-template | 5d46f030afe01a959c6afad0af35217347f4483a | [
"MIT"
] | 7 | 2015-04-20T20:48:36.000Z | 2015-11-13T02:35:36.000Z | # -*- coding: utf-8 -*-
import redis
from flask.ext.restful import Resource
from util_service import ServiceUtil
class ServicesResource(Resource):
"""
List of available services.
"""
def __init__(self):
self.__redis_conn = redis.Redis('redis', 6379)
self.__util = ServiceUtil()
def get(self):
"""
List all registered services.
:return:
"""
registered_services = self.__redis_conn.hgetall('endpoints')
services = []
for key in registered_services.keys():
services.append(self.__util.get_service_details(key))
return services, 200
| 22.344828 | 68 | 0.623457 |
import redis
from flask.ext.restful import Resource
from util_service import ServiceUtil
class ServicesResource(Resource):
def __init__(self):
self.__redis_conn = redis.Redis('redis', 6379)
self.__util = ServiceUtil()
def get(self):
registered_services = self.__redis_conn.hgetall('endpoints')
services = []
for key in registered_services.keys():
services.append(self.__util.get_service_details(key))
return services, 200
| true | true |
f7fb8a415c67646d561d00f301e4aab06e437740 | 656 | py | Python | P2/assignment/testing_structures/enumerated_pairs.py | Pierrefha/introduction-to-ai-practials | 55050d710398d82a5358bc04fea5a4cac6f7b74f | [
"MIT"
] | null | null | null | P2/assignment/testing_structures/enumerated_pairs.py | Pierrefha/introduction-to-ai-practials | 55050d710398d82a5358bc04fea5a4cac6f7b74f | [
"MIT"
] | null | null | null | P2/assignment/testing_structures/enumerated_pairs.py | Pierrefha/introduction-to-ai-practials | 55050d710398d82a5358bc04fea5a4cac6f7b74f | [
"MIT"
] | null | null | null | def grouped(iterable, n):
""" s -> (s0,s1,s2,...sn-1), (sn,sn+1,sn+2,...s2n-1),
(s2n,s2n+1,s2n+2,...s3n-1), ...
"""
return zip(*[iter(iterable)]*n)
if __name__ == '__main__':
list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
it = iter(list)
for index, pair in enumerate(grouped(it, 2)):
print(f"index:{index} pair:{pair} ")
first_num = pair[0]
second_num = pair[1]
print(f"first num:{first_num}, second num:{second_num}")
# adapt first number
list[2*index] = first_num+10
# adapt second number
list[2*index+1] = second_num+100
for item in list:
print(item)
| 29.818182 | 64 | 0.532012 | def grouped(iterable, n):
return zip(*[iter(iterable)]*n)
if __name__ == '__main__':
list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
it = iter(list)
for index, pair in enumerate(grouped(it, 2)):
print(f"index:{index} pair:{pair} ")
first_num = pair[0]
second_num = pair[1]
print(f"first num:{first_num}, second num:{second_num}")
list[2*index] = first_num+10
list[2*index+1] = second_num+100
for item in list:
print(item)
| true | true |
f7fb8abb999e47f25574fc099649d4000d19f0c7 | 1,431 | py | Python | examples/python/grib_set_missing.py | onyb/eccodes | 15b09889a89ff416c38ac7fb4fa336ffa257b12f | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | examples/python/grib_set_missing.py | onyb/eccodes | 15b09889a89ff416c38ac7fb4fa336ffa257b12f | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | examples/python/grib_set_missing.py | onyb/eccodes | 15b09889a89ff416c38ac7fb4fa336ffa257b12f | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | #
# Copyright 2005-2018 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
#
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
import traceback
import sys
from eccodes import *
INPUT = '../../data/tigge/tigge_ecmf_pl_t.grib'
OUTPUT = 'out.p_set_missing.grib'
VERBOSE = 1 # verbose error reporting
def example():
fin = open(INPUT)
fout = open(OUTPUT, 'w')
gid = codes_grib_new_from_file(fin)
codes_set_long(gid, "scaledValueOfFirstFixedSurface", 15)
codes_set_long(gid, "scaleFactorOfFirstFixedSurface", 1)
level = codes_get_double(gid, "level")
assert (level == 1.5)
# set type of level to surface
codes_set(gid, 'typeOfFirstFixedSurface', 'sfc')
codes_set_missing(gid, 'scaleFactorOfFirstFixedSurface')
codes_set_missing(gid, 'scaledValueOfFirstFixedSurface')
codes_write(gid, fout)
codes_release(gid)
fin.close()
fout.close()
def main():
try:
example()
except CodesInternalError as err:
if VERBOSE:
traceback.print_exc(file=sys.stderr)
else:
sys.stderr.write(err.msg + '\n')
return 1
if __name__ == "__main__":
sys.exit(main())
| 24.254237 | 78 | 0.691824 |
import traceback
import sys
from eccodes import *
INPUT = '../../data/tigge/tigge_ecmf_pl_t.grib'
OUTPUT = 'out.p_set_missing.grib'
VERBOSE = 1
def example():
fin = open(INPUT)
fout = open(OUTPUT, 'w')
gid = codes_grib_new_from_file(fin)
codes_set_long(gid, "scaledValueOfFirstFixedSurface", 15)
codes_set_long(gid, "scaleFactorOfFirstFixedSurface", 1)
level = codes_get_double(gid, "level")
assert (level == 1.5)
codes_set(gid, 'typeOfFirstFixedSurface', 'sfc')
codes_set_missing(gid, 'scaleFactorOfFirstFixedSurface')
codes_set_missing(gid, 'scaledValueOfFirstFixedSurface')
codes_write(gid, fout)
codes_release(gid)
fin.close()
fout.close()
def main():
try:
example()
except CodesInternalError as err:
if VERBOSE:
traceback.print_exc(file=sys.stderr)
else:
sys.stderr.write(err.msg + '\n')
return 1
if __name__ == "__main__":
sys.exit(main())
| true | true |
f7fb8be92f70434194551673bf10388f3f18bab5 | 1,096 | py | Python | setup.py | sotpotatis/Eatery-Python | 8669958739f46a432a98f6fe4541060b6b9ec809 | [
"MIT"
] | null | null | null | setup.py | sotpotatis/Eatery-Python | 8669958739f46a432a98f6fe4541060b6b9ec809 | [
"MIT"
] | null | null | null | setup.py | sotpotatis/Eatery-Python | 8669958739f46a432a98f6fe4541060b6b9ec809 | [
"MIT"
] | null | null | null | """EATERY-NOD
Python library for interacting with the Lunchbot API for getting the menu of eatery Kista Nod.
API-docs are available at https://eatery.nero2k.com/api. This library is made by sotpotatis."""
import pathlib
from setuptools import setup
HERE = pathlib.Path(__file__).parent #Get the directory containing this file
README = (HERE / "README.md").read_text() #Read the readme
setup(
name="lunchbot-python",
version="0.1.1",
url="https://github.com/sotpotatis/Lunchbot-Python",
description="Python interface for the Lunchbot API.",
long_description=README,
long_description_content_type="text/markdown",
author="Albin Seijmer",
author_email="albinsmejladress@protonmail.com",
license="MIT",
classifiers=["Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License"],
packages=["eatery_nod"],
include_package_data=True,
requires=["requests", "websocket", "pytz"]
)
| 40.592593 | 96 | 0.694343 | import pathlib
from setuptools import setup
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
setup(
name="lunchbot-python",
version="0.1.1",
url="https://github.com/sotpotatis/Lunchbot-Python",
description="Python interface for the Lunchbot API.",
long_description=README,
long_description_content_type="text/markdown",
author="Albin Seijmer",
author_email="albinsmejladress@protonmail.com",
license="MIT",
classifiers=["Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License"],
packages=["eatery_nod"],
include_package_data=True,
requires=["requests", "websocket", "pytz"]
)
| true | true |
f7fb8c14472c378fd2e6e3881e1f55c47c995a86 | 14,508 | py | Python | src/pyx4_base/test_scripts/pyx4_test_logic.py | jannsta1/pyx4 | b0e72c25b8bb0e3e12d4d9de1af78cee7b13b11f | [
"BSD-2-Clause"
] | 1 | 2020-06-08T08:20:46.000Z | 2020-06-08T08:20:46.000Z | src/pyx4_base/test_scripts/pyx4_test_logic.py | jannsta1/pyx4 | b0e72c25b8bb0e3e12d4d9de1af78cee7b13b11f | [
"BSD-2-Clause"
] | 2 | 2020-07-13T11:23:05.000Z | 2020-07-14T20:50:53.000Z | src/pyx4_base/test_scripts/pyx4_test_logic.py | jannsta1/pyx4 | b0e72c25b8bb0e3e12d4d9de1af78cee7b13b11f | [
"BSD-2-Clause"
] | 2 | 2020-06-26T11:31:40.000Z | 2021-07-06T13:03:43.000Z | #!/usr/bin/env python3
""" ROS node to perform most of the testing logic.
- Manage subscriptions to relevant topics
- Parse all the data needed for testing
- Do the testing
- All the results are published to the /pyx4_test topic
"""
PKG = 'pyx4'
NAME = 'pyx4_test'
import sys, time, os, csv
import numpy as np
import rospy
from pyx4.msg import pyx4_state as Pyx4_msg
from pyx4.msg import pyx4_test as Pyx4_test_msg
from geometry_msgs.msg import PoseStamped, TwistStamped
from mavros_msgs.msg import PositionTarget
from pyx4_base_classes.definitions_pyx4 import TEST_COMP, MISSION_SPECS
from pyx4_base_classes.setpoint_bitmasks import *
class Pyx4Test():
""" Class to handle the main logic, subscribers and publishers
for Pyx4 unit testing.
"""
def __init__(self, mission_file, comp_file):
# Position for each waypoint
self.wpts = Pyx4Test._parse_comp_file(comp_file)
# Expected timeout, type and velocity for each waypoint
(self.timeouts,
self.types,
self.velocities) = Pyx4Test._parse_mission_file(mission_file)
self.total_wpts = len(self.wpts) + 3
# Type masks for each waypoint
self.type_masks = {}
# Start time of the current waypoint
self.wpt_start_time = 0
# Index of the current waypoint
self.current_wpt = 0
# List of velocities for the current waypoint
self.cb_vels = np.empty((0,2), float)
# Current local position of the drone
self.current_pos = []
# Publisher for pyx4_test
self.pyx4_test_pub = rospy.Publisher(NAME + '/pyx4_test',
Pyx4_test_msg, queue_size=10)
# Test types
self.test_types = {'type': 'target_type',
'wpt_position': 'wpt_position',
'velocity': 'average_velocity',
'timeout': 'timeout'}
@staticmethod
def _parse_comp_file(comp_file):
""" Read the test comparison file and return a dictionary of arrays,
one for each waypoint.
:param comp_file: a CSV file (label, x, y, z, yaw)
:return {index: Array(x, y, z, yaw)}
"""
with open(comp_file, 'r') as f:
reader = csv.DictReader(f)
return {i+3: np.array(list(map(float, [dic['x'],
dic['y'],
dic['z'],
dic['yaw']])))
for i, dic in enumerate(reader)}
@staticmethod
def _parse_mission_file(comp_file):
""" Read the test comparison file and return a dictionary of arrays,
one for each waypoint.
:param comp_file: a CSV file (label, x, y, z, yaw)
:return {index: Array(x, y, z, yaw)}
"""
timeouts, targets, velocities = {}, {}, {}
last_pos = np.array([0, 0])
with open(comp_file, 'r') as f:
reader = csv.DictReader(f)
for i, dic in enumerate(reader):
# Arming and takeoff states not in mission file,
# hence we need to shift everything by 3.
iwpt = i + 3
# Getting the timeout
timeouts[iwpt] = int(dic['timeout'])
xy = dic['xy_type']
z = dic['z_type']
yaw = dic['yaw_type']
# Get the velocity at each waypoint if
# xy_type is velocity.
x = float(dic['x_setpoint'])
y = float(dic['y_setpoint'])
# If type velocity and not still.
if xy == 'vel' and (x > 0 or y > 0):
velocities[iwpt] = np.array([x, y])
else: velocities[iwpt] = None
# Getting the bitmask
if xy == 'pos' and z == 'pos' and yaw == 'pos':
targets[iwpt] = MASK_XY_POS__Z_POS_YAW_POS
elif xy == 'pos' and z == 'pos' and yaw == 'vel':
targets[iwpt] = MASK_XY_POS__Z_POS_YAW_RATE
elif xy == 'vel' and z == 'pos' and yaw == 'pos':
targets[iwpt] = MASK_XY_VEL__Z_POS__YAW_POS
elif xy == 'vel' and z == 'pos' and yaw == 'vel':
targets[iwpt] = MASK_XY_VEL__Z_POS_YAW_RATE
elif xy == 'vel' and z == 'vel' and yaw == 'pos':
targets[iwpt] = MASK_XY_VEL__Z_VEL_YAW_POS
elif xy == 'vel' and z == 'vel' and yaw == 'vel':
targets[iwpt] = MASK_XY_VEL__Z_VEL_YAW_RATE
elif xy == 'pos' and z == 'vel' and yaw == 'pos':
targets[iwpt] = MASK_XY_POS__Z_VEL_YAW_POS
return timeouts, targets, velocities
def perform_test_pred(self):
""" Function to see whether the tests should be performed.
:return Bool
"""
return self.current_wpt < self.total_wpts and self.current_wpt >= 3
def type_test(self):
""" Test to check whether the setpoint types.
Calls send_message to publish the result in the /pyx4_test topic.
"""
if self.perform_test_pred():
# Get the type mask that has been published the most
# for this waypoint
type_mask = max(self.type_masks[self.current_wpt],
key=lambda x: self.type_masks[self.current_wpt][x])
passed = (self.types[self.current_wpt] == type_mask)
self.send_message(self.test_types['type'], passed,
self.types[self.current_wpt],
type_mask)
def wpt_position_test(self):
""" Test to check whether the position of the drone when it
reaches each waypoint is correct.
Calls send_message to publish the result in the /pyx4_test topic.
"""
if self.perform_test_pred():
# Compare all elements of both arrays
passed = np.allclose(self.wpts[self.current_wpt],
self.current_pos,
rtol=2, atol=1)
# Round to 2 decimal places for reporting.
expected = list([round(x, 2) for x in self.wpts[self.current_wpt]])
given = list([round(x, 2) for x in self.current_pos])
self.send_message(self.test_types['wpt_position'], passed,
expected, given)
def velocity_test(self, cb_vels):
""" Test to check whether the x and y velocity for setpoints
of type velocity is more or less constant and as specified.
Calls send_message to publish the result in the /pyx4_test topic.
:param cb_vels: a list of the velocities the drone has flown at.
"""
if (self.perform_test_pred() and
self.velocities[self.current_wpt] is not None):
cb_vels = cb_vels[35:-35]
passed = np.allclose(cb_vels, self.velocities[self.current_wpt],
rtol=1.2, atol=0.1)
self.send_message(self.test_types['velocity'], passed,
True, passed)
def timeout_test(self):
""" Test to check whether all the timeouts are being followed.
Calls send_message to publish the result in the /pyx4_test topic.
"""
if self.current_wpt < self.total_wpts and self.current_wpt >= 3:
expected_to = self.timeouts[self.current_wpt]
# If we have spent more time than timeout, with 10% margin
if time.time() - self.wpt_start_time > expected_to * 1.1:
passed = False
given = 'more'
else: passed, given = True, expected_to
# Send message
self.send_message(self.test_types['timeout'], passed,
expected_to, given)
def send_message(self, test_type, passed, expected, given):
""" Construnct a message personalised to each test type
and to whether the test has passed. Then publish the message
and log it to the console.
:param test_type (string): wpt_position, type...
:param passed (Bool): whether the test has passed
:param expected: an expected test result
:param given: the actual test result
"""
# Variables to generate the message
passed_msg = ['FAILED', 'PASSED']
expected_msg = {self.test_types['wpt_position']: 'to finish at',
self.test_types['type']: 'type mask',
self.test_types['velocity']: '',
self.test_types['timeout']: 'to finish in'}
description = """Waypoint {}: {} TEST {}
Waypoint {} {} the {} test.
Expected {} {} and got {}
""".format(self.current_wpt, # Current waypoint
test_type.upper(), # Test type
passed_msg[passed], # FAILED / PASSED
self.current_wpt, # Current wpt
passed_msg[passed], # FAILED / PASSED
test_type, # Test type
expected_msg[test_type], # type mask, to finish at...
expected, given) # Expected and given values
# Create the Pyx4 test message and publish
msg = Pyx4_test_msg()
msg.test_type = test_type
msg.waypoint = str(self.current_wpt)
msg.passed = passed
msg.description = description
self.pyx4_test_pub.publish(msg)
# Show normally if passed,
if passed: rospy.loginfo(description)
# Or as an error otherwise
else: rospy.logerr(description)
def pyx4_callback(self, data):
""" Callback triggered every time the drone reaches a waypoint.
Calls all the test functions and updates the required attributes.
:param data: pyx4_state message from /pyx4_node/pyx4_state
"""
self.type_test()
self.wpt_position_test()
self.velocity_test(self.cb_vels)
self.timeout_test()
self.wpt_start_time = time.time()
self.cb_vels = np.empty((0,2), float)
self.current_wpt += 1
def local_position_callback(self, data):
""" ROS subscription callback that updates the attribute
current_pos.
:param data: PoseStamped from /mavros/local_position/pose
"""
# Update the current position
pos = data.pose.position
self.current_pos = np.array([pos.x, pos.y, pos.z,
data.pose.orientation.z])
def position_target_callback(self, data):
""" ROS subscription callback that gets a target type and
adds it to a dictionary containing a counter of each type.
for each waypoint.
:param data: PositionTarget from /mavros/setpoint_raw/local
"""
tm = data.type_mask
# If we have not seen the current waypoint yet,
# add it to the data
if self.current_wpt not in list(self.type_masks.keys()):
self.type_masks[self.current_wpt] = {}
if tm in list(self.type_masks[self.current_wpt].keys()):
self.type_masks[self.current_wpt][tm] += 1
else:
self.type_masks[self.current_wpt][tm] = 1
def local_position_vel_callback(self, data):
""" ROS subscription callback that adds the current velocity to
an array of velocities.
:param data: TwistStamped from /mavros/local_position/velocity_local
"""
vel = data.twist.linear
self.cb_vels = np.append(self.cb_vels, np.array([[vel.x, vel.y]]),
axis=0)
def main(self):
""" Method to manage subscriptions:
- /pyx4_node/pyx4_state: to know when a waypoint is reached.
Callback: compare the local position in the test data for that
waypoint to the mavros/local_position data.
- mavros/local_position/pose: receive the local position
Callback: update the attribute self.current_pos
- mavros/setpoint_raw_local: receive the target setpoint
Callback: add the setpoint bitmask to self.type_masks,
which contains a counter of each type mask for each wpt.
- mavros/local_position/velocity_local: receive the velocity
Callbacl: add the velocity to a self.cb_vels that is an array
of velocities
"""
# Subscribe to pyx4_state
rospy.Subscriber("pyx4_node/pyx4_state", Pyx4_msg,
self.pyx4_callback)
# Subscribe to mavros/local_position/pose
rospy.Subscriber("mavros/local_position/pose", PoseStamped,
self.local_position_callback)
# Subscribe to mavros/setpoint_raw/local
rospy.Subscriber('mavros/setpoint_raw/local', PositionTarget,
self.position_target_callback)
# Subscribe to mavros/local_position/velocity_local
rospy.Subscriber("mavros/local_position/velocity_local", TwistStamped,
self.local_position_vel_callback)
rospy.init_node(NAME, anonymous=True)
# TODO: Set proper time
timeout_t = time.time() + 10.0*1000 #10 seconds
while not rospy.is_shutdown() and time.time() < timeout_t:
time.sleep(0.1)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="ROS test node")
parser.add_argument('--mission', type=str, default='basic_test.csv')
parser.add_argument('--comp', type=str, default='basic_test.csv')
args = parser.parse_args(rospy.myargv(argv=sys.argv)[1:])
# Mission and comparisson files have the same name by definition
comp_file = os.path.join(TEST_COMP, args.comp)
mission_file = os.path.join(MISSION_SPECS, args.mission)
if not os.path.isfile(mission_file):
raise AttributeError("""Mission file {} not found.
""".format(mission_file))
if not os.path.isfile(comp_file):
raise AttributeError("""file {} does not exist.
Run test_data to create the test data for the selected mission.
""".format(comp_file))
pyx4_test = Pyx4Test(mission_file, comp_file)
pyx4_test.main()
| 42.79646 | 79 | 0.578164 |
PKG = 'pyx4'
NAME = 'pyx4_test'
import sys, time, os, csv
import numpy as np
import rospy
from pyx4.msg import pyx4_state as Pyx4_msg
from pyx4.msg import pyx4_test as Pyx4_test_msg
from geometry_msgs.msg import PoseStamped, TwistStamped
from mavros_msgs.msg import PositionTarget
from pyx4_base_classes.definitions_pyx4 import TEST_COMP, MISSION_SPECS
from pyx4_base_classes.setpoint_bitmasks import *
class Pyx4Test():
def __init__(self, mission_file, comp_file):
self.wpts = Pyx4Test._parse_comp_file(comp_file)
(self.timeouts,
self.types,
self.velocities) = Pyx4Test._parse_mission_file(mission_file)
self.total_wpts = len(self.wpts) + 3
self.type_masks = {}
self.wpt_start_time = 0
self.current_wpt = 0
self.cb_vels = np.empty((0,2), float)
self.current_pos = []
self.pyx4_test_pub = rospy.Publisher(NAME + '/pyx4_test',
Pyx4_test_msg, queue_size=10)
self.test_types = {'type': 'target_type',
'wpt_position': 'wpt_position',
'velocity': 'average_velocity',
'timeout': 'timeout'}
@staticmethod
def _parse_comp_file(comp_file):
with open(comp_file, 'r') as f:
reader = csv.DictReader(f)
return {i+3: np.array(list(map(float, [dic['x'],
dic['y'],
dic['z'],
dic['yaw']])))
for i, dic in enumerate(reader)}
@staticmethod
def _parse_mission_file(comp_file):
timeouts, targets, velocities = {}, {}, {}
last_pos = np.array([0, 0])
with open(comp_file, 'r') as f:
reader = csv.DictReader(f)
for i, dic in enumerate(reader):
iwpt = i + 3
timeouts[iwpt] = int(dic['timeout'])
xy = dic['xy_type']
z = dic['z_type']
yaw = dic['yaw_type']
x = float(dic['x_setpoint'])
y = float(dic['y_setpoint'])
if xy == 'vel' and (x > 0 or y > 0):
velocities[iwpt] = np.array([x, y])
else: velocities[iwpt] = None
if xy == 'pos' and z == 'pos' and yaw == 'pos':
targets[iwpt] = MASK_XY_POS__Z_POS_YAW_POS
elif xy == 'pos' and z == 'pos' and yaw == 'vel':
targets[iwpt] = MASK_XY_POS__Z_POS_YAW_RATE
elif xy == 'vel' and z == 'pos' and yaw == 'pos':
targets[iwpt] = MASK_XY_VEL__Z_POS__YAW_POS
elif xy == 'vel' and z == 'pos' and yaw == 'vel':
targets[iwpt] = MASK_XY_VEL__Z_POS_YAW_RATE
elif xy == 'vel' and z == 'vel' and yaw == 'pos':
targets[iwpt] = MASK_XY_VEL__Z_VEL_YAW_POS
elif xy == 'vel' and z == 'vel' and yaw == 'vel':
targets[iwpt] = MASK_XY_VEL__Z_VEL_YAW_RATE
elif xy == 'pos' and z == 'vel' and yaw == 'pos':
targets[iwpt] = MASK_XY_POS__Z_VEL_YAW_POS
return timeouts, targets, velocities
def perform_test_pred(self):
return self.current_wpt < self.total_wpts and self.current_wpt >= 3
def type_test(self):
if self.perform_test_pred():
type_mask = max(self.type_masks[self.current_wpt],
key=lambda x: self.type_masks[self.current_wpt][x])
passed = (self.types[self.current_wpt] == type_mask)
self.send_message(self.test_types['type'], passed,
self.types[self.current_wpt],
type_mask)
def wpt_position_test(self):
if self.perform_test_pred():
passed = np.allclose(self.wpts[self.current_wpt],
self.current_pos,
rtol=2, atol=1)
expected = list([round(x, 2) for x in self.wpts[self.current_wpt]])
given = list([round(x, 2) for x in self.current_pos])
self.send_message(self.test_types['wpt_position'], passed,
expected, given)
def velocity_test(self, cb_vels):
if (self.perform_test_pred() and
self.velocities[self.current_wpt] is not None):
cb_vels = cb_vels[35:-35]
passed = np.allclose(cb_vels, self.velocities[self.current_wpt],
rtol=1.2, atol=0.1)
self.send_message(self.test_types['velocity'], passed,
True, passed)
def timeout_test(self):
if self.current_wpt < self.total_wpts and self.current_wpt >= 3:
expected_to = self.timeouts[self.current_wpt]
if time.time() - self.wpt_start_time > expected_to * 1.1:
passed = False
given = 'more'
else: passed, given = True, expected_to
self.send_message(self.test_types['timeout'], passed,
expected_to, given)
def send_message(self, test_type, passed, expected, given):
passed_msg = ['FAILED', 'PASSED']
expected_msg = {self.test_types['wpt_position']: 'to finish at',
self.test_types['type']: 'type mask',
self.test_types['velocity']: '',
self.test_types['timeout']: 'to finish in'}
description = """Waypoint {}: {} TEST {}
Waypoint {} {} the {} test.
Expected {} {} and got {}
""".format(self.current_wpt,
test_type.upper(),
passed_msg[passed],
self.current_wpt,
passed_msg[passed],
test_type,
expected_msg[test_type],
expected, given)
msg = Pyx4_test_msg()
msg.test_type = test_type
msg.waypoint = str(self.current_wpt)
msg.passed = passed
msg.description = description
self.pyx4_test_pub.publish(msg)
if passed: rospy.loginfo(description)
else: rospy.logerr(description)
def pyx4_callback(self, data):
self.type_test()
self.wpt_position_test()
self.velocity_test(self.cb_vels)
self.timeout_test()
self.wpt_start_time = time.time()
self.cb_vels = np.empty((0,2), float)
self.current_wpt += 1
def local_position_callback(self, data):
pos = data.pose.position
self.current_pos = np.array([pos.x, pos.y, pos.z,
data.pose.orientation.z])
def position_target_callback(self, data):
tm = data.type_mask
if self.current_wpt not in list(self.type_masks.keys()):
self.type_masks[self.current_wpt] = {}
if tm in list(self.type_masks[self.current_wpt].keys()):
self.type_masks[self.current_wpt][tm] += 1
else:
self.type_masks[self.current_wpt][tm] = 1
def local_position_vel_callback(self, data):
vel = data.twist.linear
self.cb_vels = np.append(self.cb_vels, np.array([[vel.x, vel.y]]),
axis=0)
def main(self):
rospy.Subscriber("pyx4_node/pyx4_state", Pyx4_msg,
self.pyx4_callback)
rospy.Subscriber("mavros/local_position/pose", PoseStamped,
self.local_position_callback)
rospy.Subscriber('mavros/setpoint_raw/local', PositionTarget,
self.position_target_callback)
rospy.Subscriber("mavros/local_position/velocity_local", TwistStamped,
self.local_position_vel_callback)
rospy.init_node(NAME, anonymous=True)
timeout_t = time.time() + 10.0*1000
while not rospy.is_shutdown() and time.time() < timeout_t:
time.sleep(0.1)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="ROS test node")
parser.add_argument('--mission', type=str, default='basic_test.csv')
parser.add_argument('--comp', type=str, default='basic_test.csv')
args = parser.parse_args(rospy.myargv(argv=sys.argv)[1:])
comp_file = os.path.join(TEST_COMP, args.comp)
mission_file = os.path.join(MISSION_SPECS, args.mission)
if not os.path.isfile(mission_file):
raise AttributeError("""Mission file {} not found.
""".format(mission_file))
if not os.path.isfile(comp_file):
raise AttributeError("""file {} does not exist.
Run test_data to create the test data for the selected mission.
""".format(comp_file))
pyx4_test = Pyx4Test(mission_file, comp_file)
pyx4_test.main()
| true | true |
f7fb8c23b3a011a8d70ddbac4f55bfb860b83333 | 21,067 | py | Python | twilio/rest/numbers/v2/regulatory_compliance/bundle/__init__.py | timgates42/twilio-python | ef29d03a4857b62b616df4a8f4f2b7c294afbb99 | [
"MIT"
] | 2 | 2022-01-13T10:58:03.000Z | 2022-03-16T07:12:17.000Z | venv/Lib/site-packages/twilio/rest/numbers/v2/regulatory_compliance/bundle/__init__.py | syt1209/PythonProjects | 0409dbd3c0b0ddf00debc38875059c828eb31dec | [
"MIT"
] | 9 | 2018-05-07T21:59:44.000Z | 2022-01-29T22:49:29.000Z | venv/Lib/site-packages/twilio/rest/numbers/v2/regulatory_compliance/bundle/__init__.py | syt1209/PythonProjects | 0409dbd3c0b0ddf00debc38875059c828eb31dec | [
"MIT"
] | 4 | 2021-03-25T09:00:08.000Z | 2021-08-05T06:54:23.000Z | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
from twilio.rest.numbers.v2.regulatory_compliance.bundle.evaluation import EvaluationList
from twilio.rest.numbers.v2.regulatory_compliance.bundle.item_assignment import ItemAssignmentList
class BundleList(ListResource):
def __init__(self, version):
"""
Initialize the BundleList
:param Version version: Version that contains the resource
:returns: twilio.rest.numbers.v2.regulatory_compliance.bundle.BundleList
:rtype: twilio.rest.numbers.v2.regulatory_compliance.bundle.BundleList
"""
super(BundleList, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/RegulatoryCompliance/Bundles'.format(**self._solution)
def create(self, friendly_name, email, status_callback=values.unset,
regulation_sid=values.unset, iso_country=values.unset,
end_user_type=values.unset, number_type=values.unset):
"""
Create the BundleInstance
:param unicode friendly_name: The string that you assigned to describe the resource
:param unicode email: The email address
:param unicode status_callback: The URL we call to inform your application of status changes.
:param unicode regulation_sid: The unique string of a regulation.
:param unicode iso_country: The ISO country code of the country
:param BundleInstance.EndUserType end_user_type: The type of End User of the Bundle resource
:param unicode number_type: The type of phone number
:returns: The created BundleInstance
:rtype: twilio.rest.numbers.v2.regulatory_compliance.bundle.BundleInstance
"""
data = values.of({
'FriendlyName': friendly_name,
'Email': email,
'StatusCallback': status_callback,
'RegulationSid': regulation_sid,
'IsoCountry': iso_country,
'EndUserType': end_user_type,
'NumberType': number_type,
})
payload = self._version.create(method='POST', uri=self._uri, data=data, )
return BundleInstance(self._version, payload, )
def stream(self, status=values.unset, friendly_name=values.unset,
regulation_sid=values.unset, iso_country=values.unset,
number_type=values.unset, limit=None, page_size=None):
"""
Streams BundleInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param BundleInstance.Status status: The verification status of the Bundle resource
:param unicode friendly_name: The string that you assigned to describe the resource
:param unicode regulation_sid: The unique string of a regulation.
:param unicode iso_country: The ISO country code of the country
:param unicode number_type: The type of phone number
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.numbers.v2.regulatory_compliance.bundle.BundleInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
status=status,
friendly_name=friendly_name,
regulation_sid=regulation_sid,
iso_country=iso_country,
number_type=number_type,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'])
def list(self, status=values.unset, friendly_name=values.unset,
regulation_sid=values.unset, iso_country=values.unset,
number_type=values.unset, limit=None, page_size=None):
"""
Lists BundleInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param BundleInstance.Status status: The verification status of the Bundle resource
:param unicode friendly_name: The string that you assigned to describe the resource
:param unicode regulation_sid: The unique string of a regulation.
:param unicode iso_country: The ISO country code of the country
:param unicode number_type: The type of phone number
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.numbers.v2.regulatory_compliance.bundle.BundleInstance]
"""
return list(self.stream(
status=status,
friendly_name=friendly_name,
regulation_sid=regulation_sid,
iso_country=iso_country,
number_type=number_type,
limit=limit,
page_size=page_size,
))
def page(self, status=values.unset, friendly_name=values.unset,
regulation_sid=values.unset, iso_country=values.unset,
number_type=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of BundleInstance records from the API.
Request is executed immediately
:param BundleInstance.Status status: The verification status of the Bundle resource
:param unicode friendly_name: The string that you assigned to describe the resource
:param unicode regulation_sid: The unique string of a regulation.
:param unicode iso_country: The ISO country code of the country
:param unicode number_type: The type of phone number
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of BundleInstance
:rtype: twilio.rest.numbers.v2.regulatory_compliance.bundle.BundlePage
"""
data = values.of({
'Status': status,
'FriendlyName': friendly_name,
'RegulationSid': regulation_sid,
'IsoCountry': iso_country,
'NumberType': number_type,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(method='GET', uri=self._uri, params=data, )
return BundlePage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of BundleInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of BundleInstance
:rtype: twilio.rest.numbers.v2.regulatory_compliance.bundle.BundlePage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return BundlePage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a BundleContext
:param sid: The unique string that identifies the resource.
:returns: twilio.rest.numbers.v2.regulatory_compliance.bundle.BundleContext
:rtype: twilio.rest.numbers.v2.regulatory_compliance.bundle.BundleContext
"""
return BundleContext(self._version, sid=sid, )
def __call__(self, sid):
"""
Constructs a BundleContext
:param sid: The unique string that identifies the resource.
:returns: twilio.rest.numbers.v2.regulatory_compliance.bundle.BundleContext
:rtype: twilio.rest.numbers.v2.regulatory_compliance.bundle.BundleContext
"""
return BundleContext(self._version, sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Numbers.V2.BundleList>'
class BundlePage(Page):
def __init__(self, version, response, solution):
"""
Initialize the BundlePage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.numbers.v2.regulatory_compliance.bundle.BundlePage
:rtype: twilio.rest.numbers.v2.regulatory_compliance.bundle.BundlePage
"""
super(BundlePage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of BundleInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.numbers.v2.regulatory_compliance.bundle.BundleInstance
:rtype: twilio.rest.numbers.v2.regulatory_compliance.bundle.BundleInstance
"""
return BundleInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Numbers.V2.BundlePage>'
class BundleContext(InstanceContext):
def __init__(self, version, sid):
"""
Initialize the BundleContext
:param Version version: Version that contains the resource
:param sid: The unique string that identifies the resource.
:returns: twilio.rest.numbers.v2.regulatory_compliance.bundle.BundleContext
:rtype: twilio.rest.numbers.v2.regulatory_compliance.bundle.BundleContext
"""
super(BundleContext, self).__init__(version)
# Path Solution
self._solution = {'sid': sid, }
self._uri = '/RegulatoryCompliance/Bundles/{sid}'.format(**self._solution)
# Dependents
self._evaluations = None
self._item_assignments = None
def fetch(self):
"""
Fetch the BundleInstance
:returns: The fetched BundleInstance
:rtype: twilio.rest.numbers.v2.regulatory_compliance.bundle.BundleInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return BundleInstance(self._version, payload, sid=self._solution['sid'], )
def update(self, status=values.unset, status_callback=values.unset,
friendly_name=values.unset, email=values.unset):
"""
Update the BundleInstance
:param BundleInstance.Status status: The verification status of the Bundle resource
:param unicode status_callback: The URL we call to inform your application of status changes.
:param unicode friendly_name: The string that you assigned to describe the resource
:param unicode email: The email address
:returns: The updated BundleInstance
:rtype: twilio.rest.numbers.v2.regulatory_compliance.bundle.BundleInstance
"""
data = values.of({
'Status': status,
'StatusCallback': status_callback,
'FriendlyName': friendly_name,
'Email': email,
})
payload = self._version.update(method='POST', uri=self._uri, data=data, )
return BundleInstance(self._version, payload, sid=self._solution['sid'], )
def delete(self):
"""
Deletes the BundleInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete(method='DELETE', uri=self._uri, )
@property
def evaluations(self):
"""
Access the evaluations
:returns: twilio.rest.numbers.v2.regulatory_compliance.bundle.evaluation.EvaluationList
:rtype: twilio.rest.numbers.v2.regulatory_compliance.bundle.evaluation.EvaluationList
"""
if self._evaluations is None:
self._evaluations = EvaluationList(self._version, bundle_sid=self._solution['sid'], )
return self._evaluations
@property
def item_assignments(self):
"""
Access the item_assignments
:returns: twilio.rest.numbers.v2.regulatory_compliance.bundle.item_assignment.ItemAssignmentList
:rtype: twilio.rest.numbers.v2.regulatory_compliance.bundle.item_assignment.ItemAssignmentList
"""
if self._item_assignments is None:
self._item_assignments = ItemAssignmentList(self._version, bundle_sid=self._solution['sid'], )
return self._item_assignments
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Numbers.V2.BundleContext {}>'.format(context)
class BundleInstance(InstanceResource):
class Status(object):
DRAFT = "draft"
PENDING_REVIEW = "pending-review"
IN_REVIEW = "in-review"
TWILIO_REJECTED = "twilio-rejected"
TWILIO_APPROVED = "twilio-approved"
PROVISIONALLY_APPROVED = "provisionally-approved"
class EndUserType(object):
INDIVIDUAL = "individual"
BUSINESS = "business"
def __init__(self, version, payload, sid=None):
"""
Initialize the BundleInstance
:returns: twilio.rest.numbers.v2.regulatory_compliance.bundle.BundleInstance
:rtype: twilio.rest.numbers.v2.regulatory_compliance.bundle.BundleInstance
"""
super(BundleInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload.get('sid'),
'account_sid': payload.get('account_sid'),
'regulation_sid': payload.get('regulation_sid'),
'friendly_name': payload.get('friendly_name'),
'status': payload.get('status'),
'valid_until': deserialize.iso8601_datetime(payload.get('valid_until')),
'email': payload.get('email'),
'status_callback': payload.get('status_callback'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'url': payload.get('url'),
'links': payload.get('links'),
}
# Context
self._context = None
self._solution = {'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: BundleContext for this BundleInstance
:rtype: twilio.rest.numbers.v2.regulatory_compliance.bundle.BundleContext
"""
if self._context is None:
self._context = BundleContext(self._version, sid=self._solution['sid'], )
return self._context
@property
def sid(self):
"""
:returns: The unique string that identifies the resource.
:rtype: unicode
"""
return self._properties['sid']
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def regulation_sid(self):
"""
:returns: The unique string of a regulation.
:rtype: unicode
"""
return self._properties['regulation_sid']
@property
def friendly_name(self):
"""
:returns: The string that you assigned to describe the resource
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def status(self):
"""
:returns: The verification status of the Bundle resource
:rtype: BundleInstance.Status
"""
return self._properties['status']
@property
def valid_until(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource will be valid until.
:rtype: datetime
"""
return self._properties['valid_until']
@property
def email(self):
"""
:returns: The email address
:rtype: unicode
"""
return self._properties['email']
@property
def status_callback(self):
"""
:returns: The URL we call to inform your application of status changes.
:rtype: unicode
"""
return self._properties['status_callback']
@property
def date_created(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def url(self):
"""
:returns: The absolute URL of the Bundle resource
:rtype: unicode
"""
return self._properties['url']
@property
def links(self):
"""
:returns: The URLs of the Assigned Items of the Bundle resource
:rtype: unicode
"""
return self._properties['links']
def fetch(self):
"""
Fetch the BundleInstance
:returns: The fetched BundleInstance
:rtype: twilio.rest.numbers.v2.regulatory_compliance.bundle.BundleInstance
"""
return self._proxy.fetch()
def update(self, status=values.unset, status_callback=values.unset,
friendly_name=values.unset, email=values.unset):
"""
Update the BundleInstance
:param BundleInstance.Status status: The verification status of the Bundle resource
:param unicode status_callback: The URL we call to inform your application of status changes.
:param unicode friendly_name: The string that you assigned to describe the resource
:param unicode email: The email address
:returns: The updated BundleInstance
:rtype: twilio.rest.numbers.v2.regulatory_compliance.bundle.BundleInstance
"""
return self._proxy.update(
status=status,
status_callback=status_callback,
friendly_name=friendly_name,
email=email,
)
def delete(self):
"""
Deletes the BundleInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
@property
def evaluations(self):
"""
Access the evaluations
:returns: twilio.rest.numbers.v2.regulatory_compliance.bundle.evaluation.EvaluationList
:rtype: twilio.rest.numbers.v2.regulatory_compliance.bundle.evaluation.EvaluationList
"""
return self._proxy.evaluations
@property
def item_assignments(self):
"""
Access the item_assignments
:returns: twilio.rest.numbers.v2.regulatory_compliance.bundle.item_assignment.ItemAssignmentList
:rtype: twilio.rest.numbers.v2.regulatory_compliance.bundle.item_assignment.ItemAssignmentList
"""
return self._proxy.item_assignments
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Numbers.V2.BundleInstance {}>'.format(context)
| 36.385147 | 106 | 0.64537 |
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
from twilio.rest.numbers.v2.regulatory_compliance.bundle.evaluation import EvaluationList
from twilio.rest.numbers.v2.regulatory_compliance.bundle.item_assignment import ItemAssignmentList
class BundleList(ListResource):
def __init__(self, version):
super(BundleList, self).__init__(version)
self._solution = {}
self._uri = '/RegulatoryCompliance/Bundles'.format(**self._solution)
def create(self, friendly_name, email, status_callback=values.unset,
regulation_sid=values.unset, iso_country=values.unset,
end_user_type=values.unset, number_type=values.unset):
data = values.of({
'FriendlyName': friendly_name,
'Email': email,
'StatusCallback': status_callback,
'RegulationSid': regulation_sid,
'IsoCountry': iso_country,
'EndUserType': end_user_type,
'NumberType': number_type,
})
payload = self._version.create(method='POST', uri=self._uri, data=data, )
return BundleInstance(self._version, payload, )
def stream(self, status=values.unset, friendly_name=values.unset,
regulation_sid=values.unset, iso_country=values.unset,
number_type=values.unset, limit=None, page_size=None):
limits = self._version.read_limits(limit, page_size)
page = self.page(
status=status,
friendly_name=friendly_name,
regulation_sid=regulation_sid,
iso_country=iso_country,
number_type=number_type,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'])
def list(self, status=values.unset, friendly_name=values.unset,
regulation_sid=values.unset, iso_country=values.unset,
number_type=values.unset, limit=None, page_size=None):
return list(self.stream(
status=status,
friendly_name=friendly_name,
regulation_sid=regulation_sid,
iso_country=iso_country,
number_type=number_type,
limit=limit,
page_size=page_size,
))
def page(self, status=values.unset, friendly_name=values.unset,
regulation_sid=values.unset, iso_country=values.unset,
number_type=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
data = values.of({
'Status': status,
'FriendlyName': friendly_name,
'RegulationSid': regulation_sid,
'IsoCountry': iso_country,
'NumberType': number_type,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(method='GET', uri=self._uri, params=data, )
return BundlePage(self._version, response, self._solution)
def get_page(self, target_url):
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return BundlePage(self._version, response, self._solution)
def get(self, sid):
return BundleContext(self._version, sid=sid, )
def __call__(self, sid):
return BundleContext(self._version, sid=sid, )
def __repr__(self):
return '<Twilio.Numbers.V2.BundleList>'
class BundlePage(Page):
def __init__(self, version, response, solution):
super(BundlePage, self).__init__(version, response)
self._solution = solution
def get_instance(self, payload):
return BundleInstance(self._version, payload, )
def __repr__(self):
return '<Twilio.Numbers.V2.BundlePage>'
class BundleContext(InstanceContext):
def __init__(self, version, sid):
super(BundleContext, self).__init__(version)
self._solution = {'sid': sid, }
self._uri = '/RegulatoryCompliance/Bundles/{sid}'.format(**self._solution)
self._evaluations = None
self._item_assignments = None
def fetch(self):
payload = self._version.fetch(method='GET', uri=self._uri, )
return BundleInstance(self._version, payload, sid=self._solution['sid'], )
def update(self, status=values.unset, status_callback=values.unset,
friendly_name=values.unset, email=values.unset):
data = values.of({
'Status': status,
'StatusCallback': status_callback,
'FriendlyName': friendly_name,
'Email': email,
})
payload = self._version.update(method='POST', uri=self._uri, data=data, )
return BundleInstance(self._version, payload, sid=self._solution['sid'], )
def delete(self):
return self._version.delete(method='DELETE', uri=self._uri, )
@property
def evaluations(self):
if self._evaluations is None:
self._evaluations = EvaluationList(self._version, bundle_sid=self._solution['sid'], )
return self._evaluations
@property
def item_assignments(self):
if self._item_assignments is None:
self._item_assignments = ItemAssignmentList(self._version, bundle_sid=self._solution['sid'], )
return self._item_assignments
def __repr__(self):
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Numbers.V2.BundleContext {}>'.format(context)
class BundleInstance(InstanceResource):
class Status(object):
DRAFT = "draft"
PENDING_REVIEW = "pending-review"
IN_REVIEW = "in-review"
TWILIO_REJECTED = "twilio-rejected"
TWILIO_APPROVED = "twilio-approved"
PROVISIONALLY_APPROVED = "provisionally-approved"
class EndUserType(object):
INDIVIDUAL = "individual"
BUSINESS = "business"
def __init__(self, version, payload, sid=None):
super(BundleInstance, self).__init__(version)
self._properties = {
'sid': payload.get('sid'),
'account_sid': payload.get('account_sid'),
'regulation_sid': payload.get('regulation_sid'),
'friendly_name': payload.get('friendly_name'),
'status': payload.get('status'),
'valid_until': deserialize.iso8601_datetime(payload.get('valid_until')),
'email': payload.get('email'),
'status_callback': payload.get('status_callback'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'url': payload.get('url'),
'links': payload.get('links'),
}
self._context = None
self._solution = {'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
if self._context is None:
self._context = BundleContext(self._version, sid=self._solution['sid'], )
return self._context
@property
def sid(self):
return self._properties['sid']
@property
def account_sid(self):
return self._properties['account_sid']
@property
def regulation_sid(self):
return self._properties['regulation_sid']
@property
def friendly_name(self):
return self._properties['friendly_name']
@property
def status(self):
return self._properties['status']
@property
def valid_until(self):
return self._properties['valid_until']
@property
def email(self):
return self._properties['email']
@property
def status_callback(self):
return self._properties['status_callback']
@property
def date_created(self):
return self._properties['date_created']
@property
def date_updated(self):
return self._properties['date_updated']
@property
def url(self):
return self._properties['url']
@property
def links(self):
return self._properties['links']
def fetch(self):
return self._proxy.fetch()
def update(self, status=values.unset, status_callback=values.unset,
friendly_name=values.unset, email=values.unset):
return self._proxy.update(
status=status,
status_callback=status_callback,
friendly_name=friendly_name,
email=email,
)
def delete(self):
return self._proxy.delete()
@property
def evaluations(self):
return self._proxy.evaluations
@property
def item_assignments(self):
return self._proxy.item_assignments
def __repr__(self):
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Numbers.V2.BundleInstance {}>'.format(context)
| true | true |
f7fb8c242e97184959fc16022a4706cd55db0ca0 | 998 | py | Python | example/client/healthcheck.py | saloponov/faiss-server | 641aeae0bdc730c5f65b6dbaca7c9434cc7ce855 | [
"MIT"
] | 106 | 2018-07-29T13:08:31.000Z | 2022-02-14T03:29:25.000Z | example/client/healthcheck.py | saloponov/faiss-server | 641aeae0bdc730c5f65b6dbaca7c9434cc7ce855 | [
"MIT"
] | 6 | 2018-07-29T21:39:32.000Z | 2022-03-13T19:09:18.000Z | example/client/healthcheck.py | saloponov/faiss-server | 641aeae0bdc730c5f65b6dbaca7c9434cc7ce855 | [
"MIT"
] | 28 | 2018-08-09T14:51:23.000Z | 2021-12-07T01:20:01.000Z | import argparse
import grpc
import faiss_pb2
import faiss_pb2_grpc
from google.protobuf import empty_pb2
def main(args):
# create channel and stub
address = '{}:{}'.format(args.host, args.port)
channel = grpc.insecure_channel(address)
stub = faiss_pb2_grpc.FaissServiceStub(channel)
response = stub.Heartbeat.future(empty_pb2.Empty(),
args.timeout)
print(response.result())
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--host',
type=str,
default='localhost',
help='host of faiss server (default: localhost)')
parser.add_argument(
'--port',
type=int,
default=8080,
help='port of faiss server (default: 8080)')
parser.add_argument(
'--timeout',
type=float,
default=1,
help=
'a duration of second to respond from tensorflow serving (default: 1)')
main(parser.parse_args())
| 26.972973 | 79 | 0.62024 | import argparse
import grpc
import faiss_pb2
import faiss_pb2_grpc
from google.protobuf import empty_pb2
def main(args):
address = '{}:{}'.format(args.host, args.port)
channel = grpc.insecure_channel(address)
stub = faiss_pb2_grpc.FaissServiceStub(channel)
response = stub.Heartbeat.future(empty_pb2.Empty(),
args.timeout)
print(response.result())
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--host',
type=str,
default='localhost',
help='host of faiss server (default: localhost)')
parser.add_argument(
'--port',
type=int,
default=8080,
help='port of faiss server (default: 8080)')
parser.add_argument(
'--timeout',
type=float,
default=1,
help=
'a duration of second to respond from tensorflow serving (default: 1)')
main(parser.parse_args())
| true | true |
f7fb8c34660b328529140b88cfa2be5c375ac9d2 | 3,131 | py | Python | box_mlc/modules/hierarchy_regularizer.py | iesl/box-mlc | 15439b7e46885458d0c45d530c17f1deac0398f8 | [
"MIT"
] | null | null | null | box_mlc/modules/hierarchy_regularizer.py | iesl/box-mlc | 15439b7e46885458d0c45d530c17f1deac0398f8 | [
"MIT"
] | null | null | null | box_mlc/modules/hierarchy_regularizer.py | iesl/box-mlc | 15439b7e46885458d0c45d530c17f1deac0398f8 | [
"MIT"
] | null | null | null | """Structural Regularization for """
from torch.nn.parameter import Parameter
from allennlp.common import Registrable
from allennlp.data.vocabulary import Vocabulary
from pathlib import Path
from networkx.exception import NetworkXException
from typing import List, Tuple, Union, Dict, Any, Optional
import torch
import networkx as nx
import logging
from box_mlc.dataset_readers.hierarchy_readers.hierarchy_reader import (
HierarchyReader,
)
logger = logging.getLogger(__name__)
class HierarchyRegularizer(torch.nn.Module, Registrable):
"""Base class to satisfy Registrable and to define the common hierarchy initializations"""
def __init__(
self,
alpha: float,
hierarchy_reader: HierarchyReader,
debug_level: int = 0,
) -> None:
"""
Args:
alpha: The regularization parameter that is multiplied with the hierarchy struct loss.
hierarchy_reader: Creates the adjacency_matrix and the mask
debug_level: scale of 0 to 3. 0 meaning no-debug (fastest) and 3 highest debugging possible (slowest).
Returns: (None)
"""
super().__init__() # type:ignore
self.alpha = alpha
self.debug_level = debug_level
self.adjacency_matrix = Parameter(
hierarchy_reader.adjacency_matrix, requires_grad=False
) #: Adj(i,j) =1 => if j is true, i is true.
# self.mask = Parameter(self.initialize_mask(), requires_grad=False) #type: torch.Tensor
self.mask: torch.BoolTensor = ( # pylint: disable
hierarchy_reader.mask # type:ignore
) # noqa
def to(self, *args, **kwargs): # type: ignore # noqa
"""Deligates to `torch.nn.Module.to`. Additionally moves `self.mask` to the correct device.
This is needed because we depend on to() to move the all tensors and params to appropriate device.
Args:
args: same as super class
kwargs: same as super class
"""
super().to(*args, **kwargs)
(
device,
dtype,
non_blocking,
convert_to_format,
) = torch._C._nn._parse_to(*args, **kwargs)
self.mask.to(device=device)
def get_active_adjacency_matrix_and_mask(
self, active_mask: torch.BoolTensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
active_mask: 1D Boolean Tensor of shape (adj.shape[0],) indicating which rows and columns to take.
Returns:
torch.Tensor: masked adj matrix
torch.Tensor: masked mask
"""
assert len(active_mask.shape) == 1
assert active_mask.shape[0] == self.adjacency_matrix.shape[0]
num_active = torch.sum(active_mask)
active_mask_float = active_mask.to(dtype=torch.float)
active_mask_matrix = torch.ger(
active_mask_float, active_mask_float
).to(dtype=torch.bool)
return (self.adjacency_matrix[active_mask_matrix]).reshape(
num_active, num_active
), (self.mask[active_mask_matrix]).reshape(num_active, num_active)
| 35.579545 | 114 | 0.649633 | from torch.nn.parameter import Parameter
from allennlp.common import Registrable
from allennlp.data.vocabulary import Vocabulary
from pathlib import Path
from networkx.exception import NetworkXException
from typing import List, Tuple, Union, Dict, Any, Optional
import torch
import networkx as nx
import logging
from box_mlc.dataset_readers.hierarchy_readers.hierarchy_reader import (
HierarchyReader,
)
logger = logging.getLogger(__name__)
class HierarchyRegularizer(torch.nn.Module, Registrable):
def __init__(
self,
alpha: float,
hierarchy_reader: HierarchyReader,
debug_level: int = 0,
) -> None:
super().__init__()
self.alpha = alpha
self.debug_level = debug_level
self.adjacency_matrix = Parameter(
hierarchy_reader.adjacency_matrix, requires_grad=False
)
torch.BoolTensor = (
hierarchy_reader.mask
)
def to(self, *args, **kwargs): super().to(*args, **kwargs)
(
device,
dtype,
non_blocking,
convert_to_format,
) = torch._C._nn._parse_to(*args, **kwargs)
self.mask.to(device=device)
def get_active_adjacency_matrix_and_mask(
self, active_mask: torch.BoolTensor
) -> Tuple[torch.Tensor, torch.Tensor]:
assert len(active_mask.shape) == 1
assert active_mask.shape[0] == self.adjacency_matrix.shape[0]
num_active = torch.sum(active_mask)
active_mask_float = active_mask.to(dtype=torch.float)
active_mask_matrix = torch.ger(
active_mask_float, active_mask_float
).to(dtype=torch.bool)
return (self.adjacency_matrix[active_mask_matrix]).reshape(
num_active, num_active
), (self.mask[active_mask_matrix]).reshape(num_active, num_active)
| true | true |
f7fb8c735eba4766c2da9793488577acce31d7c5 | 7,976 | py | Python | examples/docs_snippets/docs_snippets_tests/intro_tutorial_tests/test_cli_invocations.py | withshubh/dagster | ff4a0db53e126f44097a337eecef54988cc718ef | [
"Apache-2.0"
] | null | null | null | examples/docs_snippets/docs_snippets_tests/intro_tutorial_tests/test_cli_invocations.py | withshubh/dagster | ff4a0db53e126f44097a337eecef54988cc718ef | [
"Apache-2.0"
] | null | null | null | examples/docs_snippets/docs_snippets_tests/intro_tutorial_tests/test_cli_invocations.py | withshubh/dagster | ff4a0db53e126f44097a337eecef54988cc718ef | [
"Apache-2.0"
] | null | null | null | import json
import os
import runpy
import pytest
from click.testing import CliRunner
from dagit.app import create_app_from_workspace
from dagster.cli.pipeline import pipeline_execute_command
from dagster.cli.workspace import get_workspace_from_kwargs
from dagster.core.instance import DagsterInstance
from dagster.core.test_utils import instance_for_test
from dagster.utils import check_script, pushd, script_relative_path
PIPELINES_OR_ERROR_QUERY = """
{
repositoriesOrError {
... on PythonError {
message
stack
}
... on RepositoryConnection {
nodes {
pipelines {
name
}
}
}
}
}
"""
cli_args = [
# dirname, filename, fn_name, env_yaml, mode, preset, return_code, exception
(
"basics/single_solid_pipeline/",
"hello_cereal.py",
"hello_cereal_pipeline",
None,
None,
None,
0,
None,
),
(
"basics/configuring_solids/",
"configurable_pipeline.py",
"configurable_pipeline",
"run_config.yaml",
None,
None,
0,
None,
),
(
"basics/connecting_solids/",
"serial_pipeline.py",
"serial_pipeline",
None,
None,
None,
0,
None,
),
(
"basics/connecting_solids/",
"complex_pipeline.py",
"complex_pipeline",
None,
None,
None,
0,
None,
),
(
"basics/e04_quality/",
"inputs_typed.py",
"inputs_pipeline",
"inputs_env.yaml",
None,
None,
0,
None,
),
(
"basics/e04_quality/",
"custom_types.py",
"custom_type_pipeline",
"inputs_env.yaml",
None,
None,
0,
None,
),
(
"basics/e04_quality/",
"custom_types_2.py",
"custom_type_pipeline",
"custom_types_2.yaml",
None,
None,
1,
Exception,
),
(
"basics/e04_quality/",
"custom_types_3.py",
"custom_type_pipeline",
"custom_type_input.yaml",
None,
None,
0,
None,
),
(
"basics/e04_quality/",
"custom_types_4.py",
"custom_type_pipeline",
"custom_type_input.yaml",
None,
None,
0,
None,
),
(
"basics/e04_quality/",
"custom_types_5.py",
"custom_type_pipeline",
"custom_type_input.yaml",
None,
None,
0,
None,
),
(
"basics/e04_quality/",
"custom_types_mypy_verbose.py",
"custom_type_pipeline",
"inputs_env.yaml",
None,
None,
0,
None,
),
(
"basics/e04_quality/",
"custom_types_mypy_typing_trick.py",
"custom_type_pipeline",
"inputs_env.yaml",
None,
None,
0,
None,
),
(
"advanced/solids/",
"reusable_solids.py",
"reusable_solids_pipeline",
"reusable_solids.yaml",
None,
None,
0,
None,
),
(
"advanced/solids/",
"composite_solids.py",
"composite_solids_pipeline",
"composite_solids.yaml",
None,
None,
0,
None,
),
(
"advanced/pipelines/",
"resources.py",
"resources_pipeline",
"resources.yaml",
None,
None,
0,
None,
),
(
"advanced/pipelines/",
"required_resources.py",
"resources_pipeline",
"resources.yaml",
None,
None,
0,
None,
),
(
"advanced/pipelines/",
"modes.py",
"modes_pipeline",
"resources.yaml",
"unittest",
None,
0,
None,
),
("advanced/pipelines/", "presets.py", "presets_pipeline", None, None, "unittest", 0, None),
(
"advanced/materializations/",
"materializations.py",
"materialization_pipeline",
"inputs_env.yaml",
None,
None,
0,
None,
),
(
"advanced/materializations/",
"output_materialization.py",
"output_materialization_pipeline",
"output_materialization.yaml",
None,
None,
0,
None,
),
(
"advanced/scheduling/",
"scheduler.py",
"hello_cereal_pipeline",
"inputs_env.yaml",
None,
None,
0,
None,
),
]
def path_to_tutorial_file(path):
return script_relative_path(os.path.join("../../docs_snippets/intro_tutorial/", path))
def load_dagit_for_workspace_cli_args(n_pipelines=1, **kwargs):
instance = DagsterInstance.ephemeral()
with get_workspace_from_kwargs(kwargs) as workspace:
app = create_app_from_workspace(workspace, instance)
client = app.test_client()
res = client.get(
"/graphql?query={query_string}".format(query_string=PIPELINES_OR_ERROR_QUERY)
)
json_res = json.loads(res.data.decode("utf-8"))
assert "data" in json_res
assert "repositoriesOrError" in json_res["data"]
assert "nodes" in json_res["data"]["repositoriesOrError"]
assert len(json_res["data"]["repositoriesOrError"]["nodes"][0]["pipelines"]) == n_pipelines
return res
def dagster_pipeline_execute(args, return_code):
with instance_for_test():
runner = CliRunner()
res = runner.invoke(pipeline_execute_command, args)
assert res.exit_code == return_code, res.exception
return res
@pytest.mark.parametrize(
"dirname,filename,fn_name,_env_yaml,_mode,_preset,_return_code,_exception", cli_args
)
# dagit -f filename -n fn_name
def test_load_pipeline(
dirname, filename, fn_name, _env_yaml, _mode, _preset, _return_code, _exception
):
with pushd(path_to_tutorial_file(dirname)):
filepath = path_to_tutorial_file(os.path.join(dirname, filename))
load_dagit_for_workspace_cli_args(python_file=filepath, fn_name=fn_name)
@pytest.mark.parametrize(
"dirname,filename,fn_name,env_yaml,mode,preset,return_code,_exception", cli_args
)
# dagster pipeline execute -f filename -n fn_name -e env_yaml --preset preset
def test_dagster_pipeline_execute(
dirname, filename, fn_name, env_yaml, mode, preset, return_code, _exception
):
with pushd(path_to_tutorial_file(dirname)):
filepath = path_to_tutorial_file(os.path.join(dirname, filename))
yamlpath = path_to_tutorial_file(os.path.join(dirname, env_yaml)) if env_yaml else None
dagster_pipeline_execute(
["-f", filepath, "-a", fn_name]
+ (["-c", yamlpath] if yamlpath else [])
+ (["--mode", mode] if mode else [])
+ (["--preset", preset] if preset else []),
return_code,
)
@pytest.mark.parametrize(
"dirname,filename,_fn_name,_env_yaml,_mode,_preset,return_code,_exception", cli_args
)
def test_script(dirname, filename, _fn_name, _env_yaml, _mode, _preset, return_code, _exception):
with pushd(path_to_tutorial_file(dirname)):
filepath = path_to_tutorial_file(os.path.join(dirname, filename))
check_script(filepath, return_code)
@pytest.mark.parametrize(
"dirname,filename,_fn_name,_env_yaml,_mode,_preset,_return_code,exception", cli_args
)
def test_runpy(dirname, filename, _fn_name, _env_yaml, _mode, _preset, _return_code, exception):
with pushd(path_to_tutorial_file(dirname)):
filepath = path_to_tutorial_file(os.path.join(dirname, filename))
if exception:
with pytest.raises(exception):
runpy.run_path(filepath, run_name="__main__")
else:
runpy.run_path(filepath, run_name="__main__")
| 24.847352 | 99 | 0.578235 | import json
import os
import runpy
import pytest
from click.testing import CliRunner
from dagit.app import create_app_from_workspace
from dagster.cli.pipeline import pipeline_execute_command
from dagster.cli.workspace import get_workspace_from_kwargs
from dagster.core.instance import DagsterInstance
from dagster.core.test_utils import instance_for_test
from dagster.utils import check_script, pushd, script_relative_path
PIPELINES_OR_ERROR_QUERY = """
{
repositoriesOrError {
... on PythonError {
message
stack
}
... on RepositoryConnection {
nodes {
pipelines {
name
}
}
}
}
}
"""
cli_args = [
(
"basics/single_solid_pipeline/",
"hello_cereal.py",
"hello_cereal_pipeline",
None,
None,
None,
0,
None,
),
(
"basics/configuring_solids/",
"configurable_pipeline.py",
"configurable_pipeline",
"run_config.yaml",
None,
None,
0,
None,
),
(
"basics/connecting_solids/",
"serial_pipeline.py",
"serial_pipeline",
None,
None,
None,
0,
None,
),
(
"basics/connecting_solids/",
"complex_pipeline.py",
"complex_pipeline",
None,
None,
None,
0,
None,
),
(
"basics/e04_quality/",
"inputs_typed.py",
"inputs_pipeline",
"inputs_env.yaml",
None,
None,
0,
None,
),
(
"basics/e04_quality/",
"custom_types.py",
"custom_type_pipeline",
"inputs_env.yaml",
None,
None,
0,
None,
),
(
"basics/e04_quality/",
"custom_types_2.py",
"custom_type_pipeline",
"custom_types_2.yaml",
None,
None,
1,
Exception,
),
(
"basics/e04_quality/",
"custom_types_3.py",
"custom_type_pipeline",
"custom_type_input.yaml",
None,
None,
0,
None,
),
(
"basics/e04_quality/",
"custom_types_4.py",
"custom_type_pipeline",
"custom_type_input.yaml",
None,
None,
0,
None,
),
(
"basics/e04_quality/",
"custom_types_5.py",
"custom_type_pipeline",
"custom_type_input.yaml",
None,
None,
0,
None,
),
(
"basics/e04_quality/",
"custom_types_mypy_verbose.py",
"custom_type_pipeline",
"inputs_env.yaml",
None,
None,
0,
None,
),
(
"basics/e04_quality/",
"custom_types_mypy_typing_trick.py",
"custom_type_pipeline",
"inputs_env.yaml",
None,
None,
0,
None,
),
(
"advanced/solids/",
"reusable_solids.py",
"reusable_solids_pipeline",
"reusable_solids.yaml",
None,
None,
0,
None,
),
(
"advanced/solids/",
"composite_solids.py",
"composite_solids_pipeline",
"composite_solids.yaml",
None,
None,
0,
None,
),
(
"advanced/pipelines/",
"resources.py",
"resources_pipeline",
"resources.yaml",
None,
None,
0,
None,
),
(
"advanced/pipelines/",
"required_resources.py",
"resources_pipeline",
"resources.yaml",
None,
None,
0,
None,
),
(
"advanced/pipelines/",
"modes.py",
"modes_pipeline",
"resources.yaml",
"unittest",
None,
0,
None,
),
("advanced/pipelines/", "presets.py", "presets_pipeline", None, None, "unittest", 0, None),
(
"advanced/materializations/",
"materializations.py",
"materialization_pipeline",
"inputs_env.yaml",
None,
None,
0,
None,
),
(
"advanced/materializations/",
"output_materialization.py",
"output_materialization_pipeline",
"output_materialization.yaml",
None,
None,
0,
None,
),
(
"advanced/scheduling/",
"scheduler.py",
"hello_cereal_pipeline",
"inputs_env.yaml",
None,
None,
0,
None,
),
]
def path_to_tutorial_file(path):
return script_relative_path(os.path.join("../../docs_snippets/intro_tutorial/", path))
def load_dagit_for_workspace_cli_args(n_pipelines=1, **kwargs):
instance = DagsterInstance.ephemeral()
with get_workspace_from_kwargs(kwargs) as workspace:
app = create_app_from_workspace(workspace, instance)
client = app.test_client()
res = client.get(
"/graphql?query={query_string}".format(query_string=PIPELINES_OR_ERROR_QUERY)
)
json_res = json.loads(res.data.decode("utf-8"))
assert "data" in json_res
assert "repositoriesOrError" in json_res["data"]
assert "nodes" in json_res["data"]["repositoriesOrError"]
assert len(json_res["data"]["repositoriesOrError"]["nodes"][0]["pipelines"]) == n_pipelines
return res
def dagster_pipeline_execute(args, return_code):
with instance_for_test():
runner = CliRunner()
res = runner.invoke(pipeline_execute_command, args)
assert res.exit_code == return_code, res.exception
return res
@pytest.mark.parametrize(
"dirname,filename,fn_name,_env_yaml,_mode,_preset,_return_code,_exception", cli_args
)
def test_load_pipeline(
dirname, filename, fn_name, _env_yaml, _mode, _preset, _return_code, _exception
):
with pushd(path_to_tutorial_file(dirname)):
filepath = path_to_tutorial_file(os.path.join(dirname, filename))
load_dagit_for_workspace_cli_args(python_file=filepath, fn_name=fn_name)
@pytest.mark.parametrize(
"dirname,filename,fn_name,env_yaml,mode,preset,return_code,_exception", cli_args
)
def test_dagster_pipeline_execute(
dirname, filename, fn_name, env_yaml, mode, preset, return_code, _exception
):
with pushd(path_to_tutorial_file(dirname)):
filepath = path_to_tutorial_file(os.path.join(dirname, filename))
yamlpath = path_to_tutorial_file(os.path.join(dirname, env_yaml)) if env_yaml else None
dagster_pipeline_execute(
["-f", filepath, "-a", fn_name]
+ (["-c", yamlpath] if yamlpath else [])
+ (["--mode", mode] if mode else [])
+ (["--preset", preset] if preset else []),
return_code,
)
@pytest.mark.parametrize(
"dirname,filename,_fn_name,_env_yaml,_mode,_preset,return_code,_exception", cli_args
)
def test_script(dirname, filename, _fn_name, _env_yaml, _mode, _preset, return_code, _exception):
with pushd(path_to_tutorial_file(dirname)):
filepath = path_to_tutorial_file(os.path.join(dirname, filename))
check_script(filepath, return_code)
@pytest.mark.parametrize(
"dirname,filename,_fn_name,_env_yaml,_mode,_preset,_return_code,exception", cli_args
)
def test_runpy(dirname, filename, _fn_name, _env_yaml, _mode, _preset, _return_code, exception):
with pushd(path_to_tutorial_file(dirname)):
filepath = path_to_tutorial_file(os.path.join(dirname, filename))
if exception:
with pytest.raises(exception):
runpy.run_path(filepath, run_name="__main__")
else:
runpy.run_path(filepath, run_name="__main__")
| true | true |
f7fb8d225050c51183c45b2c72d33c3a0f270735 | 3,509 | py | Python | tests/client/sdk/text2text/test_models.py | sakares/rubrix | 791ffb29815b5d24f2bbbb0fa422f85f8b30098f | [
"Apache-2.0"
] | null | null | null | tests/client/sdk/text2text/test_models.py | sakares/rubrix | 791ffb29815b5d24f2bbbb0fa422f85f8b30098f | [
"Apache-2.0"
] | null | null | null | tests/client/sdk/text2text/test_models.py | sakares/rubrix | 791ffb29815b5d24f2bbbb0fa422f85f8b30098f | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2021-present, the Recognai S.L. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
from datetime import datetime
import pytest
from rubrix.client.models import Text2TextRecord
from rubrix.client.sdk.text2text.models import (
CreationText2TextRecord,
Text2TextAnnotation,
Text2TextBulkData,
Text2TextPrediction,
Text2TextQuery,
)
from rubrix.client.sdk.text2text.models import Text2TextRecord as SdkText2TextRecord
from rubrix.server.tasks.text2text.api.model import (
Text2TextBulkData as ServerText2TextBulkData,
)
from rubrix.server.tasks.text2text.api.model import (
Text2TextQuery as ServerText2TextQuery,
)
def test_bulk_data_schema(helpers):
client_schema = Text2TextBulkData.schema()
server_schema = ServerText2TextBulkData.schema()
assert helpers.remove_description(client_schema) == helpers.remove_description(
server_schema
)
def test_query_schema(helpers):
client_schema = Text2TextQuery.schema()
server_schema = ServerText2TextQuery.schema()
assert helpers.remove_description(client_schema) == helpers.remove_description(
server_schema
)
@pytest.mark.parametrize(
"prediction,expected",
[
(["texto de prueba para text2text", "texto de test para text2text"], 1.0),
([("texto de prueba para text2text", 0.5)], 0.5),
],
)
def test_from_client_prediction(prediction, expected):
record = Text2TextRecord(
text="Test text for text2text",
prediction=prediction,
annotation="texto de prueba para text2text",
id=1,
)
sdk_record = CreationText2TextRecord.from_client(record)
assert len(sdk_record.prediction.sentences) == len(prediction)
assert all(
[sentence.score == expected for sentence in sdk_record.prediction.sentences]
)
@pytest.mark.parametrize(
"agent,expected", [(None, socket.gethostname()), ("agent", "agent")]
)
def test_from_client_agent(agent, expected):
record = Text2TextRecord(
text="test",
prediction=["prueba"],
annotation="prueba",
prediction_agent=agent,
annotation_agent=agent,
)
sdk_record = CreationText2TextRecord.from_client(record)
assert sdk_record.annotation.agent == expected
assert sdk_record.prediction.agent == expected
def test_to_client():
prediction = Text2TextAnnotation(
sentences=[
Text2TextPrediction(text="prueba", score=0.5),
Text2TextPrediction(text="prueba2", score=0.5),
],
agent="agent",
)
sdk_record = SdkText2TextRecord(
text="test",
prediction=prediction,
annotation=prediction,
event_timestamp=datetime(2000, 1, 1),
)
record = sdk_record.to_client()
assert record.prediction == [("prueba", 0.5), ("prueba2", 0.5)]
assert record.prediction_agent == "agent"
assert record.annotation == "prueba"
assert record.annotation_agent == "agent"
| 30.25 | 84 | 0.707324 |
import socket
from datetime import datetime
import pytest
from rubrix.client.models import Text2TextRecord
from rubrix.client.sdk.text2text.models import (
CreationText2TextRecord,
Text2TextAnnotation,
Text2TextBulkData,
Text2TextPrediction,
Text2TextQuery,
)
from rubrix.client.sdk.text2text.models import Text2TextRecord as SdkText2TextRecord
from rubrix.server.tasks.text2text.api.model import (
Text2TextBulkData as ServerText2TextBulkData,
)
from rubrix.server.tasks.text2text.api.model import (
Text2TextQuery as ServerText2TextQuery,
)
def test_bulk_data_schema(helpers):
client_schema = Text2TextBulkData.schema()
server_schema = ServerText2TextBulkData.schema()
assert helpers.remove_description(client_schema) == helpers.remove_description(
server_schema
)
def test_query_schema(helpers):
client_schema = Text2TextQuery.schema()
server_schema = ServerText2TextQuery.schema()
assert helpers.remove_description(client_schema) == helpers.remove_description(
server_schema
)
@pytest.mark.parametrize(
"prediction,expected",
[
(["texto de prueba para text2text", "texto de test para text2text"], 1.0),
([("texto de prueba para text2text", 0.5)], 0.5),
],
)
def test_from_client_prediction(prediction, expected):
record = Text2TextRecord(
text="Test text for text2text",
prediction=prediction,
annotation="texto de prueba para text2text",
id=1,
)
sdk_record = CreationText2TextRecord.from_client(record)
assert len(sdk_record.prediction.sentences) == len(prediction)
assert all(
[sentence.score == expected for sentence in sdk_record.prediction.sentences]
)
@pytest.mark.parametrize(
"agent,expected", [(None, socket.gethostname()), ("agent", "agent")]
)
def test_from_client_agent(agent, expected):
record = Text2TextRecord(
text="test",
prediction=["prueba"],
annotation="prueba",
prediction_agent=agent,
annotation_agent=agent,
)
sdk_record = CreationText2TextRecord.from_client(record)
assert sdk_record.annotation.agent == expected
assert sdk_record.prediction.agent == expected
def test_to_client():
prediction = Text2TextAnnotation(
sentences=[
Text2TextPrediction(text="prueba", score=0.5),
Text2TextPrediction(text="prueba2", score=0.5),
],
agent="agent",
)
sdk_record = SdkText2TextRecord(
text="test",
prediction=prediction,
annotation=prediction,
event_timestamp=datetime(2000, 1, 1),
)
record = sdk_record.to_client()
assert record.prediction == [("prueba", 0.5), ("prueba2", 0.5)]
assert record.prediction_agent == "agent"
assert record.annotation == "prueba"
assert record.annotation_agent == "agent"
| true | true |
f7fb8e38121387cd150b1394c53c7edd1ff3d166 | 4,408 | py | Python | surveillancestation/api.py | k20human/py-surveillance-station | 94861da2dbf496352aabba6b6a3a2ab40bfd94f9 | [
"MIT"
] | 7 | 2017-10-27T06:25:48.000Z | 2020-12-12T15:36:21.000Z | surveillancestation/api.py | k20human/py-surveillance-station | 94861da2dbf496352aabba6b6a3a2ab40bfd94f9 | [
"MIT"
] | null | null | null | surveillancestation/api.py | k20human/py-surveillance-station | 94861da2dbf496352aabba6b6a3a2ab40bfd94f9 | [
"MIT"
] | 2 | 2021-11-25T03:07:30.000Z | 2021-12-14T05:16:28.000Z | import json
import logging
import requests
import urllib3
from .errors import errors
class Api:
def __init__(self, host, user, passwd):
self._host = host
self._user = user
self._passwd = passwd
self._sid = ''
self._logged_in = False
self._session_name = 'SurveillanceStation'
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
self._login()
def _login(self):
data = self.req('SYNO.API.Info', self.endpoint('SYNO.API.Info',
query='SYNO.API.Auth,SYNO.SurveillanceStation.'))
login_endpoint = self.endpoint(
'SYNO.API.Auth',
version=str(data['SYNO.API.Auth']['maxVersion']),
cgi=data['SYNO.API.Auth']['path'],
method='login',
extra={
'account': self._user,
'passwd': self._passwd,
'session': self._session_name,
'format': 'cookie'
}
)
data2 = self.req('SYNO.API.Auth', login_endpoint)
if not 'code' in data2:
self._sid = data2['sid']
self._logged_in = True
def _get_response_data(self, api_name, response):
if response.status_code != 200:
logging.error('HTTP status: ' + str(response.status_code))
# Status 500
if response.status_code == 500:
return {'Error': errors[500]}
try:
response_json = response.json()
except json.JSONDecodeError:
return response.content
if response_json['success'] == True:
if 'data' in response_json.keys():
return response_json['data']
return ''
# Get error message
code = response_json['error']['code']
if code in errors:
error_message = errors[code]
elif api_name in errors and code in errors[api_name]:
error_message = errors[api_name][code]
else:
error_message = str(response_json['error']['code'])
logging.error('failure - ' + str(response_json['error']['code']) + ' - ' + error_message)
raise Exception(response_json['error'])
def _is_response_binary(self, response):
return 'text/plain' not in response.headers['content-type']
def get_max_version(self, api):
data = self.req('SYNO.API.Info', self.endpoint('SYNO.API.Info',
query=api))
return str(data[api]['maxVersion'])
def logout(self):
logout_endpoint = self.endpoint(
'SYNO.API.Auth',
cgi='auth.cgi',
method='logout',
extra={'session': self._session_name}
)
self.req('SYNO.API.Auth', logout_endpoint)
def base_endpoint(self, cgi):
ret = self._host + '/webapi/' + cgi
return ret
def endpoint(self, api, query='', cgi='query.cgi', version='1', method='query', extra={}):
ret = self.base_endpoint(cgi) + '?api=' + api + '&version=' + str(version) + '&method=' + method
if query:
ret += '&query=' + query
for key, value in extra.items():
if value:
if isinstance(value, dict) or isinstance(value, list):
value = json.dumps(value)
else:
value = str(value)
ret += '&' + key + '=' + str(value)
if self._sid:
ret += '&_sid=' + self._sid
return ret
def req(self, api_name, endpoint):
logging.info('GET: ' + endpoint)
r = requests.get(endpoint, verify=False)
return self._get_response_data(api_name, r)
def req_binary(self, api_name, endpoint, **kw):
logging.info('GET: ' + endpoint)
r = requests.get(endpoint, **kw)
if self._is_response_binary(r):
if "stream" in kw:
return r
else:
return r.content
self._get_response_data(api_name, r)
return None
def req_post(self, api_name, endpoint, data, files=None):
logging.info('POST: ' + endpoint)
try:
r = requests.post(endpoint, verify=False, data=data, files=files)
except:
return None
return self._get_response_data(api_name, r)
def getSid(self):
return self._sid
| 30.825175 | 104 | 0.547187 | import json
import logging
import requests
import urllib3
from .errors import errors
class Api:
def __init__(self, host, user, passwd):
self._host = host
self._user = user
self._passwd = passwd
self._sid = ''
self._logged_in = False
self._session_name = 'SurveillanceStation'
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
self._login()
def _login(self):
data = self.req('SYNO.API.Info', self.endpoint('SYNO.API.Info',
query='SYNO.API.Auth,SYNO.SurveillanceStation.'))
login_endpoint = self.endpoint(
'SYNO.API.Auth',
version=str(data['SYNO.API.Auth']['maxVersion']),
cgi=data['SYNO.API.Auth']['path'],
method='login',
extra={
'account': self._user,
'passwd': self._passwd,
'session': self._session_name,
'format': 'cookie'
}
)
data2 = self.req('SYNO.API.Auth', login_endpoint)
if not 'code' in data2:
self._sid = data2['sid']
self._logged_in = True
def _get_response_data(self, api_name, response):
if response.status_code != 200:
logging.error('HTTP status: ' + str(response.status_code))
if response.status_code == 500:
return {'Error': errors[500]}
try:
response_json = response.json()
except json.JSONDecodeError:
return response.content
if response_json['success'] == True:
if 'data' in response_json.keys():
return response_json['data']
return ''
code = response_json['error']['code']
if code in errors:
error_message = errors[code]
elif api_name in errors and code in errors[api_name]:
error_message = errors[api_name][code]
else:
error_message = str(response_json['error']['code'])
logging.error('failure - ' + str(response_json['error']['code']) + ' - ' + error_message)
raise Exception(response_json['error'])
def _is_response_binary(self, response):
return 'text/plain' not in response.headers['content-type']
def get_max_version(self, api):
data = self.req('SYNO.API.Info', self.endpoint('SYNO.API.Info',
query=api))
return str(data[api]['maxVersion'])
def logout(self):
logout_endpoint = self.endpoint(
'SYNO.API.Auth',
cgi='auth.cgi',
method='logout',
extra={'session': self._session_name}
)
self.req('SYNO.API.Auth', logout_endpoint)
def base_endpoint(self, cgi):
ret = self._host + '/webapi/' + cgi
return ret
def endpoint(self, api, query='', cgi='query.cgi', version='1', method='query', extra={}):
ret = self.base_endpoint(cgi) + '?api=' + api + '&version=' + str(version) + '&method=' + method
if query:
ret += '&query=' + query
for key, value in extra.items():
if value:
if isinstance(value, dict) or isinstance(value, list):
value = json.dumps(value)
else:
value = str(value)
ret += '&' + key + '=' + str(value)
if self._sid:
ret += '&_sid=' + self._sid
return ret
def req(self, api_name, endpoint):
logging.info('GET: ' + endpoint)
r = requests.get(endpoint, verify=False)
return self._get_response_data(api_name, r)
def req_binary(self, api_name, endpoint, **kw):
logging.info('GET: ' + endpoint)
r = requests.get(endpoint, **kw)
if self._is_response_binary(r):
if "stream" in kw:
return r
else:
return r.content
self._get_response_data(api_name, r)
return None
def req_post(self, api_name, endpoint, data, files=None):
logging.info('POST: ' + endpoint)
try:
r = requests.post(endpoint, verify=False, data=data, files=files)
except:
return None
return self._get_response_data(api_name, r)
def getSid(self):
return self._sid
| true | true |
f7fb8e5b88c512b73143e15a8110e12e8a049a7f | 233 | py | Python | diff.py | DHDaniel/git-diff-clone | a0b06d4947948b45fbd8f4c70cb0d6032c4ab3d7 | [
"MIT"
] | 2 | 2020-09-13T13:10:41.000Z | 2021-08-14T23:28:42.000Z | diff.py | DHDaniel/git-diff-clone | a0b06d4947948b45fbd8f4c70cb0d6032c4ab3d7 | [
"MIT"
] | 3 | 2020-04-15T15:46:56.000Z | 2020-09-13T13:10:28.000Z | diff.py | DHDaniel/git-diff-clone | a0b06d4947948b45fbd8f4c70cb0d6032c4ab3d7 | [
"MIT"
] | 1 | 2020-09-13T12:38:04.000Z | 2020-09-13T12:38:04.000Z | #!/usr/bin/env python
from src.Differ import Differ
from src.utilities import Document
import sys
if __name__ == '__main__':
doc1 = Document(sys.argv[1])
doc2 = Document(sys.argv[2])
Differ.diff(doc1, doc2)
| 19.416667 | 35 | 0.665236 |
from src.Differ import Differ
from src.utilities import Document
import sys
if __name__ == '__main__':
doc1 = Document(sys.argv[1])
doc2 = Document(sys.argv[2])
Differ.diff(doc1, doc2)
| true | true |
f7fb8f153ed0fc749dd36febca997186eae0584b | 1,478 | py | Python | nimble_iot_bc/app.py | nimble-platform/tracking-iot-blockchain-service | 47917c9a1ea29982e3d88585a084764569e20d3a | [
"MIT"
] | 4 | 2019-12-13T19:19:20.000Z | 2021-08-15T16:28:53.000Z | nimble_iot_bc/app.py | nimble-platform/tracking-iot-blockchain-service | 47917c9a1ea29982e3d88585a084764569e20d3a | [
"MIT"
] | 1 | 2020-01-06T14:24:51.000Z | 2020-01-06T17:26:07.000Z | nimble_iot_bc/app.py | nimble-platform/tracking-iot-blockchain-service | 47917c9a1ea29982e3d88585a084764569e20d3a | [
"MIT"
] | 2 | 2019-11-20T08:53:28.000Z | 2021-03-18T03:17:28.000Z | import logging
from flask import Flask
from flask_cors import CORS
from nimble_iot_bc.apis import blueprint as api
from nimble_iot_bc.databases import mongo, influx
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
main_app = Flask(__name__)
def configure_app(flask_app):
'''Configure the complete Flask App using a `.cfg` file
as an environment variable
'''
logger.info('Configuring Flask App')
try:
flask_app.config.from_envvar('APP_CONFIG')
except Exception as e:
logger.error('No APP_CONFIG variable found. No configuration found.')
raise(e)
def initialize_app(flask_app):
'''Initialize the complete Flask App with a Blueprint
and Database initializations
'''
configure_app(flask_app)
# blueprint = Blueprint('api', __name__, url_prefix='/api')
# api.init_app(blueprint)
flask_app.register_blueprint(api, url_prefix='/api')
# Databases (Mongo, InfluxDB)
mongo.init_app(flask_app)
influx.init_app(flask_app)
def main():
'''Main function that returns the CORS-configured Flask App
'''
logger.info('Starting Server for IoT-Blockchain API')
CORS(main_app)
initialize_app(main_app)
return main_app
def request_context():
''' Request the main App's context
'''
return main_app.app_context()
if __name__ == "__main__":
# Always in Production Mode.
entrypoint = main()
entrypoint.run(debug=False)
| 24.633333 | 77 | 0.70636 | import logging
from flask import Flask
from flask_cors import CORS
from nimble_iot_bc.apis import blueprint as api
from nimble_iot_bc.databases import mongo, influx
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
main_app = Flask(__name__)
def configure_app(flask_app):
logger.info('Configuring Flask App')
try:
flask_app.config.from_envvar('APP_CONFIG')
except Exception as e:
logger.error('No APP_CONFIG variable found. No configuration found.')
raise(e)
def initialize_app(flask_app):
configure_app(flask_app)
flask_app.register_blueprint(api, url_prefix='/api')
mongo.init_app(flask_app)
influx.init_app(flask_app)
def main():
logger.info('Starting Server for IoT-Blockchain API')
CORS(main_app)
initialize_app(main_app)
return main_app
def request_context():
return main_app.app_context()
if __name__ == "__main__":
entrypoint = main()
entrypoint.run(debug=False)
| true | true |
f7fb8f574ca6ebca6871d9c9439eb183a2420dd6 | 13,829 | py | Python | salt/proxy/panos.py | dmyerscough/salt | d7b19ab64f0695568f78c12b4ba209e033903804 | [
"Apache-2.0"
] | 1 | 2021-08-14T13:48:38.000Z | 2021-08-14T13:48:38.000Z | salt/proxy/panos.py | dmyerscough/salt | d7b19ab64f0695568f78c12b4ba209e033903804 | [
"Apache-2.0"
] | null | null | null | salt/proxy/panos.py | dmyerscough/salt | d7b19ab64f0695568f78c12b4ba209e033903804 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Proxy Minion interface module for managing Palo Alto firewall devices.
:codeauthor: :email:`Spencer Ervin <spencer_ervin@hotmail.com>`
:maturity: new
:depends: none
:platform: unix
This proxy minion enables Palo Alto firewalls (hereafter referred to
as simply 'panos') to be treated individually like a Salt Minion.
The panos proxy leverages the XML API functionality on the Palo Alto
firewall. The Salt proxy must have access to the Palo Alto firewall on
HTTPS (tcp/443).
More in-depth conceptual reading on Proxy Minions can be found in the
:ref:`Proxy Minion <proxy-minion>` section of Salt's
documentation.
Configuration
=============
To use this integration proxy module, please configure the following:
Pillar
------
Proxy minions get their configuration from Salt's Pillar. Every proxy must
have a stanza in Pillar and a reference in the Pillar top-file that matches
the ID. There are four connection options available for the panos proxy module.
- Direct Device (Password)
- Direct Device (API Key)
- Panorama Pass-Through (Password)
- Panorama Pass-Through (API Key)
Direct Device (Password)
------------------------
The direct device configuration configures the proxy to connect directly to
the device with username and password.
.. code-block:: yaml
proxy:
proxytype: panos
host: <ip or dns name of panos host>
username: <panos username>
password: <panos password>
proxytype
^^^^^^^^^
The ``proxytype`` key and value pair is critical, as it tells Salt which
interface to load from the ``proxy`` directory in Salt's install hierarchy,
or from ``/srv/salt/_proxy`` on the Salt Master (if you have created your
own proxy module, for example). To use this panos Proxy Module, set this to
``panos``.
host
^^^^
The location, or ip/dns, of the panos host. Required.
username
^^^^^^^^
The username used to login to the panos host. Required.
password
^^^^^^^^
The password used to login to the panos host. Required.
Direct Device (API Key)
------------------------
Palo Alto devices allow for access to the XML API with a generated 'API key'_
instead of username and password.
.. _API key: https://www.paloaltonetworks.com/documentation/71/pan-os/xml-api/get-started-with-the-pan-os-xml-api/get-your-api-key
.. code-block:: yaml
proxy:
proxytype: panos
host: <ip or dns name of panos host>
apikey: <panos generated api key>
proxytype
^^^^^^^^^
The ``proxytype`` key and value pair is critical, as it tells Salt which
interface to load from the ``proxy`` directory in Salt's install hierarchy,
or from ``/srv/salt/_proxy`` on the Salt Master (if you have created your
own proxy module, for example). To use this panos Proxy Module, set this to
``panos``.
host
^^^^
The location, or ip/dns, of the panos host. Required.
apikey
^^^^^^^^
The generated XML API key for the panos host. Required.
Panorama Pass-Through (Password)
------------------------
The Panorama pass-through method sends all connections through the Panorama
management system. It passes the connections to the appropriate device using
the serial number of the Palo Alto firewall.
This option will reduce the number of connections that must be present for the
proxy server. It will only require a connection to the Panorama server.
The username and password will be for authentication to the Panorama server,
not the panos device.
.. code-block:: yaml
proxy:
proxytype: panos
serial: <serial number of panos host>
host: <ip or dns name of the panorama server>
username: <panorama server username>
password: <panorama server password>
proxytype
^^^^^^^^^
The ``proxytype`` key and value pair is critical, as it tells Salt which
interface to load from the ``proxy`` directory in Salt's install hierarchy,
or from ``/srv/salt/_proxy`` on the Salt Master (if you have created your
own proxy module, for example). To use this panos Proxy Module, set this to
``panos``.
serial
^^^^^^
The serial number of the panos host. Required.
host
^^^^
The location, or ip/dns, of the Panorama server. Required.
username
^^^^^^^^
The username used to login to the Panorama server. Required.
password
^^^^^^^^
The password used to login to the Panorama server. Required.
Panorama Pass-Through (API Key)
------------------------
The Panorama server can also utilize a generated 'API key'_ for authentication.
.. _API key: https://www.paloaltonetworks.com/documentation/71/pan-os/xml-api/get-started-with-the-pan-os-xml-api/get-your-api-key
.. code-block:: yaml
proxy:
proxytype: panos
serial: <serial number of panos host>
host: <ip or dns name of the panorama server>
apikey: <panos generated api key>
proxytype
^^^^^^^^^
The ``proxytype`` key and value pair is critical, as it tells Salt which
interface to load from the ``proxy`` directory in Salt's install hierarchy,
or from ``/srv/salt/_proxy`` on the Salt Master (if you have created your
own proxy module, for example). To use this panos Proxy Module, set this to
``panos``.
serial
^^^^^^
The serial number of the panos host. Required.
host
^^^^
The location, or ip/dns, of the Panorama server. Required.
apikey
^^^^^^^^
The generated XML API key for the Panorama server. Required.
'''
from __future__ import absolute_import
# Import Python Libs
import logging
# Import Salt Libs
import salt.exceptions
# This must be present or the Salt loader won't load this module.
__proxyenabled__ = ['panos']
# Variables are scoped to this module so we can have persistent data.
GRAINS_CACHE = {'vendor': 'Palo Alto'}
DETAILS = {}
# Set up logging
log = logging.getLogger(__file__)
# Define the module's virtual name
__virtualname__ = 'panos'
def __virtual__():
'''
Only return if all the modules are available.
'''
return __virtualname__
def init(opts):
'''
This function gets called when the proxy starts up. For
panos devices, a determination is made on the connection type
and the appropriate connection details that must be cached.
'''
if 'host' not in opts['proxy']:
log.critical('No \'host\' key found in pillar for this proxy.')
return False
if 'apikey' not in opts['proxy']:
# If we do not have an apikey, we must have both a username and password
if 'username' not in opts['proxy']:
log.critical('No \'username\' key found in pillar for this proxy.')
return False
if 'password' not in opts['proxy']:
log.critical('No \'passwords\' key found in pillar for this proxy.')
return False
DETAILS['url'] = 'https://{0}/api/'.format(opts['proxy']['host'])
# Set configuration details
DETAILS['host'] = opts['proxy']['host']
if 'serial' in opts['proxy']:
DETAILS['serial'] = opts['proxy'].get('serial')
if 'apikey' in opts['proxy']:
log.debug("Selected pan_key method for panos proxy module.")
DETAILS['method'] = 'pan_key'
DETAILS['apikey'] = opts['proxy'].get('apikey')
else:
log.debug("Selected pan_pass method for panos proxy module.")
DETAILS['method'] = 'pan_pass'
DETAILS['username'] = opts['proxy'].get('username')
DETAILS['password'] = opts['proxy'].get('password')
else:
if 'apikey' in opts['proxy']:
log.debug("Selected dev_key method for panos proxy module.")
DETAILS['method'] = 'dev_key'
DETAILS['apikey'] = opts['proxy'].get('apikey')
else:
log.debug("Selected dev_pass method for panos proxy module.")
DETAILS['method'] = 'dev_pass'
DETAILS['username'] = opts['proxy'].get('username')
DETAILS['password'] = opts['proxy'].get('password')
# Ensure connectivity to the device
log.debug("Attempting to connect to panos proxy host.")
query = {'type': 'op', 'cmd': '<show><system><info></info></system></show>'}
call(query)
log.debug("Successfully connected to panos proxy host.")
DETAILS['initialized'] = True
def call(payload=None):
'''
This function captures the query string and sends it to the Palo Alto device.
'''
ret = {}
try:
if DETAILS['method'] == 'dev_key':
# Pass the api key without the target declaration
conditional_payload = {'key': DETAILS['apikey']}
payload.update(conditional_payload)
r = __utils__['http.query'](DETAILS['url'],
data=payload,
method='POST',
decode_type='xml',
decode=True,
verify_ssl=False,
raise_error=True)
ret = r['dict'][0]
elif DETAILS['method'] == 'dev_pass':
# Pass credentials without the target declaration
r = __utils__['http.query'](DETAILS['url'],
username=DETAILS['username'],
password=DETAILS['password'],
data=payload,
method='POST',
decode_type='xml',
decode=True,
verify_ssl=False,
raise_error=True)
ret = r['dict'][0]
elif DETAILS['method'] == 'pan_key':
# Pass the api key with the target declaration
conditional_payload = {'key': DETAILS['apikey'],
'target': DETAILS['serial']}
payload.update(conditional_payload)
r = __utils__['http.query'](DETAILS['url'],
data=payload,
method='POST',
decode_type='xml',
decode=True,
verify_ssl=False,
raise_error=True)
ret = r['dict'][0]
elif DETAILS['method'] == 'pan_pass':
# Pass credentials with the target declaration
conditional_payload = {'target': DETAILS['serial']}
payload.update(conditional_payload)
r = __utils__['http.query'](DETAILS['url'],
username=DETAILS['username'],
password=DETAILS['password'],
data=payload,
method='POST',
decode_type='xml',
decode=True,
verify_ssl=False,
raise_error=True)
ret = r['dict'][0]
except KeyError as err:
raise salt.exceptions.CommandExecutionError("Did not receive a valid response from host.")
return ret
def is_required_version(required_version='0.0.0'):
'''
Because different versions of Palo Alto support different command sets, this function
will return true if the current version of Palo Alto supports the required command.
'''
if 'sw-version' in DETAILS['grains_cache']:
current_version = DETAILS['grains_cache']['sw-version']
else:
# If we do not have the current sw-version cached, we cannot check version requirements.
return False
required_version_split = required_version.split(".")
current_version_split = current_version.split(".")
try:
if int(current_version_split[0]) > int(required_version_split[0]):
return True
elif int(current_version_split[0]) < int(required_version_split[0]):
return False
if int(current_version_split[1]) > int(required_version_split[1]):
return True
elif int(current_version_split[1]) < int(required_version_split[1]):
return False
if int(current_version_split[2]) > int(required_version_split[2]):
return True
elif int(current_version_split[2]) < int(required_version_split[2]):
return False
# We have an exact match
return True
except Exception as err:
return False
def initialized():
'''
Since grains are loaded in many different places and some of those
places occur before the proxy can be initialized, return whether
our init() function has been called
'''
return DETAILS.get('initialized', False)
def grains():
'''
Get the grains from the proxied device
'''
if not DETAILS.get('grains_cache', {}):
DETAILS['grains_cache'] = GRAINS_CACHE
try:
query = {'type': 'op', 'cmd': '<show><system><info></info></system></show>'}
DETAILS['grains_cache'] = call(query)['system']
except Exception as err:
pass
return DETAILS['grains_cache']
def grains_refresh():
'''
Refresh the grains from the proxied device
'''
DETAILS['grains_cache'] = None
return grains()
def ping():
'''
Returns true if the device is reachable, else false.
'''
try:
query = {'type': 'op', 'cmd': '<show><system><info></info></system></show>'}
if 'system' in call(query):
return True
else:
return False
except Exception as err:
return False
def shutdown():
'''
Shutdown the connection to the proxy device. For this proxy,
shutdown is a no-op.
'''
log.debug('Panos proxy shutdown() called.')
| 33.004773 | 130 | 0.606551 |
from __future__ import absolute_import
import logging
import salt.exceptions
__proxyenabled__ = ['panos']
# Variables are scoped to this module so we can have persistent data.
GRAINS_CACHE = {'vendor': 'Palo Alto'}
DETAILS = {}
# Set up logging
log = logging.getLogger(__file__)
# Define the module's virtual name
__virtualname__ = 'panos'
def __virtual__():
return __virtualname__
def init(opts):
if 'host' not in opts['proxy']:
log.critical('No \'host\' key found in pillar for this proxy.')
return False
if 'apikey' not in opts['proxy']:
if 'username' not in opts['proxy']:
log.critical('No \'username\' key found in pillar for this proxy.')
return False
if 'password' not in opts['proxy']:
log.critical('No \'passwords\' key found in pillar for this proxy.')
return False
DETAILS['url'] = 'https://{0}/api/'.format(opts['proxy']['host'])
DETAILS['host'] = opts['proxy']['host']
if 'serial' in opts['proxy']:
DETAILS['serial'] = opts['proxy'].get('serial')
if 'apikey' in opts['proxy']:
log.debug("Selected pan_key method for panos proxy module.")
DETAILS['method'] = 'pan_key'
DETAILS['apikey'] = opts['proxy'].get('apikey')
else:
log.debug("Selected pan_pass method for panos proxy module.")
DETAILS['method'] = 'pan_pass'
DETAILS['username'] = opts['proxy'].get('username')
DETAILS['password'] = opts['proxy'].get('password')
else:
if 'apikey' in opts['proxy']:
log.debug("Selected dev_key method for panos proxy module.")
DETAILS['method'] = 'dev_key'
DETAILS['apikey'] = opts['proxy'].get('apikey')
else:
log.debug("Selected dev_pass method for panos proxy module.")
DETAILS['method'] = 'dev_pass'
DETAILS['username'] = opts['proxy'].get('username')
DETAILS['password'] = opts['proxy'].get('password')
log.debug("Attempting to connect to panos proxy host.")
query = {'type': 'op', 'cmd': '<show><system><info></info></system></show>'}
call(query)
log.debug("Successfully connected to panos proxy host.")
DETAILS['initialized'] = True
def call(payload=None):
ret = {}
try:
if DETAILS['method'] == 'dev_key':
conditional_payload = {'key': DETAILS['apikey']}
payload.update(conditional_payload)
r = __utils__['http.query'](DETAILS['url'],
data=payload,
method='POST',
decode_type='xml',
decode=True,
verify_ssl=False,
raise_error=True)
ret = r['dict'][0]
elif DETAILS['method'] == 'dev_pass':
r = __utils__['http.query'](DETAILS['url'],
username=DETAILS['username'],
password=DETAILS['password'],
data=payload,
method='POST',
decode_type='xml',
decode=True,
verify_ssl=False,
raise_error=True)
ret = r['dict'][0]
elif DETAILS['method'] == 'pan_key':
conditional_payload = {'key': DETAILS['apikey'],
'target': DETAILS['serial']}
payload.update(conditional_payload)
r = __utils__['http.query'](DETAILS['url'],
data=payload,
method='POST',
decode_type='xml',
decode=True,
verify_ssl=False,
raise_error=True)
ret = r['dict'][0]
elif DETAILS['method'] == 'pan_pass':
conditional_payload = {'target': DETAILS['serial']}
payload.update(conditional_payload)
r = __utils__['http.query'](DETAILS['url'],
username=DETAILS['username'],
password=DETAILS['password'],
data=payload,
method='POST',
decode_type='xml',
decode=True,
verify_ssl=False,
raise_error=True)
ret = r['dict'][0]
except KeyError as err:
raise salt.exceptions.CommandExecutionError("Did not receive a valid response from host.")
return ret
def is_required_version(required_version='0.0.0'):
if 'sw-version' in DETAILS['grains_cache']:
current_version = DETAILS['grains_cache']['sw-version']
else:
return False
required_version_split = required_version.split(".")
current_version_split = current_version.split(".")
try:
if int(current_version_split[0]) > int(required_version_split[0]):
return True
elif int(current_version_split[0]) < int(required_version_split[0]):
return False
if int(current_version_split[1]) > int(required_version_split[1]):
return True
elif int(current_version_split[1]) < int(required_version_split[1]):
return False
if int(current_version_split[2]) > int(required_version_split[2]):
return True
elif int(current_version_split[2]) < int(required_version_split[2]):
return False
return True
except Exception as err:
return False
def initialized():
return DETAILS.get('initialized', False)
def grains():
if not DETAILS.get('grains_cache', {}):
DETAILS['grains_cache'] = GRAINS_CACHE
try:
query = {'type': 'op', 'cmd': '<show><system><info></info></system></show>'}
DETAILS['grains_cache'] = call(query)['system']
except Exception as err:
pass
return DETAILS['grains_cache']
def grains_refresh():
DETAILS['grains_cache'] = None
return grains()
def ping():
try:
query = {'type': 'op', 'cmd': '<show><system><info></info></system></show>'}
if 'system' in call(query):
return True
else:
return False
except Exception as err:
return False
def shutdown():
log.debug('Panos proxy shutdown() called.')
| true | true |
f7fb8f7c89b788e21fd08b45e00aa759617eaa13 | 67,639 | py | Python | src/amuse/community/mesa/interface.py | rknop/amuse | 85d5bdcc29cfc87dc69d91c264101fafd6658aec | [
"Apache-2.0"
] | 131 | 2015-06-04T09:06:57.000Z | 2022-02-01T12:11:29.000Z | src/amuse/community/mesa/interface.py | rknop/amuse | 85d5bdcc29cfc87dc69d91c264101fafd6658aec | [
"Apache-2.0"
] | 690 | 2015-10-17T12:18:08.000Z | 2022-03-31T16:15:58.000Z | src/amuse/community/mesa/interface.py | rieder/amuse | 3ac3b6b8f922643657279ddee5c8ab3fc0440d5e | [
"Apache-2.0"
] | 102 | 2015-01-22T10:00:29.000Z | 2022-02-09T13:29:43.000Z | import os
import numpy
from operator import itemgetter
from amuse.community import *
from amuse.community.interface.se import StellarEvolution, StellarEvolutionInterface, \
InternalStellarStructure, InternalStellarStructureInterface
from amuse.units.quantities import VectorQuantity
from amuse.support.interface import InCodeComponentImplementation
from amuse.support.options import option
class MESAInterface(CodeInterface, LiteratureReferencesMixIn, StellarEvolutionInterface,
InternalStellarStructureInterface, CodeWithDataDirectories):
"""
The software project MESA (Modules for Experiments in Stellar Astrophysics,
http://mesa.sourceforge.net/), aims to provide state-of-the-art, robust,
and efficient open source modules, usable singly or in combination for a
wide range of applications in stellar astrophysics. The AMUSE interface to
MESA can create and evolve stars using the MESA/STAR module. If you order a
metallicity you haven't used before, starting models will be computed
automatically and saved in the `mesa/src/data/star_data/starting_models`
directory (please be patient...). All metallicities are supported, even the
interesting case of Z=0. The supported stellar mass range is from
about 0.1 to 100 Msun.
References:
.. [#] Paxton, Bildsten, Dotter, Herwig, Lesaffre & Timmes 2011, ApJS, arXiv:1009.1622 [2011ApJS..192....3P]
.. [#] http://mesa.sourceforge.net/
"""
def __init__(self, **options):
CodeInterface.__init__(self, name_of_the_worker="mesa_worker", **options)
LiteratureReferencesMixIn.__init__(self)
CodeWithDataDirectories.__init__(self)
@property
def default_path_to_inlist(self):
return os.path.join(self.get_data_directory(), 'AMUSE_inlist')
@option(type="string", sections=('data'))
def default_path_to_MESA_data(self):
return os.path.join(self.amuse_root_directory, 'src', 'amuse', 'community', 'mesa', 'src', 'mesa', 'data')
@legacy_function
def set_MESA_paths():
"""
Set the paths to the MESA inlist and data directories.
"""
function = LegacyFunctionSpecification()
function.addParameter('inlist_path', dtype='string', direction=function.IN,
description = "Path to the inlist file.")
function.addParameter('MESA_data_path', dtype='string', direction=function.IN,
description = "Path to the data directory.")
function.addParameter('local_data_path', dtype='string', direction=function.IN,
description = "Path to the data directory.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
Current value was set
-1 - ERROR
Directory does not exist
"""
return function
@legacy_function
def get_maximum_number_of_stars():
"""
Retrieve the maximum number of stars that can be
handled by this instance.
"""
function = LegacyFunctionSpecification()
function.addParameter('maximum_number_of_stars', dtype='int32', direction=function.OUT,
description = "The current value of the maximum number of stars")
function.result_type = 'int32'
function.result_doc = """
0 - OK
Current value of was retrieved
"""
return function
@legacy_function
def new_zams_model():
function = LegacyFunctionSpecification()
function.addParameter('status', dtype='int32', direction=function.OUT)
return function
@legacy_function
def new_pre_ms_particle():
"""
Define a new pre-main-sequence star in the code. The star will start with the given mass.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.OUT
, description="The new index for the star. This index can be used to refer to this star in other functions")
function.addParameter('mass', dtype='float64', direction=function.IN
, description="The initial mass of the star")
function.result_type = 'int32'
return function
@legacy_function
def set_time_step():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN
, description="The index of the star to get the value of")
function.addParameter('time_step', dtype='float64', direction=function.IN
, description="The next timestep for the star.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
The value has been set.
-1 - ERROR
A star with the given index was not found.
"""
return function
@legacy_function
def get_core_mass():
"""
Retrieve the current core mass of the star, where hydrogen abundance is <= h1_boundary_limit
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN
, description="The index of the star to get the value of")
function.addParameter('core_mass', dtype='float64', direction=function.OUT
, description="The current core mass of the star, where hydrogen abundance is <= h1_boundary_limit")
function.result_type = 'int32'
return function
@legacy_function
def get_mass_loss_rate():
"""
Retrieve the current mass loss rate of the star. (positive for winds, negative for accretion)
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN
, description="The index of the star to get the value of")
function.addParameter('mass_loss_rate', dtype='float64', direction=function.OUT
, description="The current mass loss rate of the star. (positive for winds, negative for accretion)")
function.result_type = 'int32'
return function
@legacy_function
def get_manual_mass_transfer_rate():
"""
Retrieve the current user-specified mass transfer rate of the star. (negative for winds, positive for accretion)
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN
, description="The index of the star to get the value of")
function.addParameter('mass_change', dtype='float64', direction=function.OUT
, description="The current user-specified mass transfer rate of the star. (negative for winds, positive for accretion)")
function.result_type = 'int32'
return function
@legacy_function
def set_manual_mass_transfer_rate():
"""
Set a new user-specified mass transfer rate of the star. (negative for winds, positive for accretion)
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN
, description="The index of the star to get the value of")
function.addParameter('mass_change', dtype='float64', direction=function.IN
, description="The new user-specified mass transfer rate of the star. (negative for winds, positive for accretion)")
function.result_type = 'int32'
return function
@legacy_function
def get_accrete_same_as_surface():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN)
function.addParameter('accrete_same_as_surface_flag', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_accrete_same_as_surface():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN)
function.addParameter('accrete_same_as_surface_flag', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_accrete_composition_non_metals():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN)
function.addParameter('h1', dtype='float64', direction=function.OUT)
function.addParameter('h2', dtype='float64', direction=function.OUT)
function.addParameter('he3', dtype='float64', direction=function.OUT)
function.addParameter('he4', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_accrete_composition_non_metals():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN)
function.addParameter('h1', dtype='float64', direction=function.IN)
function.addParameter('h2', dtype='float64', direction=function.IN)
function.addParameter('he3', dtype='float64', direction=function.IN)
function.addParameter('he4', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_accrete_composition_metals_identifier():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN)
function.addParameter('accrete_composition_metals_identifier', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_accrete_composition_metals_identifier():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN)
function.addParameter('accrete_composition_metals_identifier', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_accrete_composition_metals():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN)
function.addParameter('li', dtype='float64', direction=function.OUT)
function.addParameter('be', dtype='float64', direction=function.OUT)
function.addParameter('b', dtype='float64', direction=function.OUT)
function.addParameter('c', dtype='float64', direction=function.OUT)
function.addParameter('n', dtype='float64', direction=function.OUT)
function.addParameter('o', dtype='float64', direction=function.OUT)
function.addParameter('f', dtype='float64', direction=function.OUT)
function.addParameter('ne', dtype='float64', direction=function.OUT)
function.addParameter('na', dtype='float64', direction=function.OUT)
function.addParameter('mg', dtype='float64', direction=function.OUT)
function.addParameter('al', dtype='float64', direction=function.OUT)
function.addParameter('si', dtype='float64', direction=function.OUT)
function.addParameter('p', dtype='float64', direction=function.OUT)
function.addParameter('s', dtype='float64', direction=function.OUT)
function.addParameter('cl', dtype='float64', direction=function.OUT)
function.addParameter('ar', dtype='float64', direction=function.OUT)
function.addParameter('k', dtype='float64', direction=function.OUT)
function.addParameter('ca', dtype='float64', direction=function.OUT)
function.addParameter('sc', dtype='float64', direction=function.OUT)
function.addParameter('ti', dtype='float64', direction=function.OUT)
function.addParameter('v', dtype='float64', direction=function.OUT)
function.addParameter('cr', dtype='float64', direction=function.OUT)
function.addParameter('mn', dtype='float64', direction=function.OUT)
function.addParameter('fe', dtype='float64', direction=function.OUT)
function.addParameter('co', dtype='float64', direction=function.OUT)
function.addParameter('ni', dtype='float64', direction=function.OUT)
function.addParameter('cu', dtype='float64', direction=function.OUT)
function.addParameter('zn', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_accrete_composition_metals():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN)
function.addParameter('li', dtype='float64', direction=function.IN)
function.addParameter('be', dtype='float64', direction=function.IN)
function.addParameter('b', dtype='float64', direction=function.IN)
function.addParameter('c', dtype='float64', direction=function.IN)
function.addParameter('n', dtype='float64', direction=function.IN)
function.addParameter('o', dtype='float64', direction=function.IN)
function.addParameter('f', dtype='float64', direction=function.IN)
function.addParameter('ne', dtype='float64', direction=function.IN)
function.addParameter('na', dtype='float64', direction=function.IN)
function.addParameter('mg', dtype='float64', direction=function.IN)
function.addParameter('al', dtype='float64', direction=function.IN)
function.addParameter('si', dtype='float64', direction=function.IN)
function.addParameter('p', dtype='float64', direction=function.IN)
function.addParameter('s', dtype='float64', direction=function.IN)
function.addParameter('cl', dtype='float64', direction=function.IN)
function.addParameter('ar', dtype='float64', direction=function.IN)
function.addParameter('k', dtype='float64', direction=function.IN)
function.addParameter('ca', dtype='float64', direction=function.IN)
function.addParameter('sc', dtype='float64', direction=function.IN)
function.addParameter('ti', dtype='float64', direction=function.IN)
function.addParameter('v', dtype='float64', direction=function.IN)
function.addParameter('cr', dtype='float64', direction=function.IN)
function.addParameter('mn', dtype='float64', direction=function.IN)
function.addParameter('fe', dtype='float64', direction=function.IN)
function.addParameter('co', dtype='float64', direction=function.IN)
function.addParameter('ni', dtype='float64', direction=function.IN)
function.addParameter('cu', dtype='float64', direction=function.IN)
function.addParameter('zn', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_number_of_backups_in_a_row():
"""
Retrieve the number_of_backups_in_a_row of the star.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN
, description="The index of the star to get the value of number_of_backups_in_a_row")
function.addParameter('n_backup', dtype='int32', direction=function.OUT
, description="The current number_of_backups_in_a_row of the star.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
The number_of_backups_in_a_row was retrieved.
-1 - ERROR
A star with the given index was not found.
"""
return function
@legacy_function
def reset_number_of_backups_in_a_row():
"""
Reset number_of_backups_in_a_row of the star.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN
, description="The index of the star to reset the value of number_of_backups_in_a_row")
function.result_type = 'int32'
function.result_doc = """
0 - OK
The number_of_backups_in_a_row was reset.
-1 - ERROR
A star with the given index was not found.
"""
return function
@legacy_function
def get_mass_fraction_at_zone():
"""
Retrieve the mass fraction at the specified zone/mesh-cell of the star.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN
, description="The index of the star to get the value of")
function.addParameter('zone', dtype='int32', direction=function.IN
, description="The zone/mesh-cell of the star to get the value of")
function.addParameter('dq_i', dtype='float64', direction=function.OUT
, description="The mass fraction at the specified zone/mesh-cell of the star.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
The value was retrieved.
-1 - ERROR
A star with the given index was not found.
-2 - ERROR
A zone with the given index was not found.
"""
return function
@legacy_function
def set_mass_fraction_at_zone():
"""
Set the mass fraction at the specified zone/mesh-cell of the star.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN
, description="The index of the star to set the value of")
function.addParameter('zone', dtype='int32', direction=function.IN
, description="The zone/mesh-cell of the star to set the value of")
function.addParameter('dq_i', dtype='float64', direction=function.IN
, description="The mass fraction at the specified zone/mesh-cell of the star.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
The value was set.
-1 - ERROR
A star with the given index was not found.
-2 - ERROR
A zone with the given index was not found.
"""
return function
@legacy_function
def get_luminosity_at_zone():
"""
Retrieve the luminosity at the specified zone/mesh-cell of the star.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN
, description="The index of the star to get the value of")
function.addParameter('zone', dtype='int32', direction=function.IN
, description="The zone/mesh-cell of the star to get the value of")
function.addParameter('lum_i', dtype='float64', direction=function.OUT
, description="The luminosity at the specified zone/mesh-cell of the star.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
The value was retrieved.
-1 - ERROR
A star with the given index was not found.
-2 - ERROR
A zone with the given index was not found.
"""
return function
@legacy_function
def set_luminosity_at_zone():
"""
Set the luminosity at the specified zone/mesh-cell of the star.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN
, description="The index of the star to set the value of")
function.addParameter('zone', dtype='int32', direction=function.IN
, description="The zone/mesh-cell of the star to set the value of")
function.addParameter('lum_i', dtype='float64', direction=function.IN
, description="The luminosity at the specified zone/mesh-cell of the star.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
The value was set.
-1 - ERROR
A star with the given index was not found.
-2 - ERROR
A zone with the given index was not found.
"""
return function
@legacy_function
def get_entropy_at_zone():
"""
Retrieve the entropy at the specified zone/mesh-cell of the star.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN
, description="The index of the star to get the value of")
function.addParameter('zone', dtype='int32', direction=function.IN
, description="The zone/mesh-cell of the star to get the value of")
function.addParameter('S_i', dtype='float64', direction=function.OUT
, description="The specific entropy at the specified zone/mesh-cell of the star.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
The value was retrieved.
-1 - ERROR
A star with the given index was not found.
-2 - ERROR
A zone with the given index was not found.
"""
return function
@legacy_function
def get_thermal_energy_at_zone():
"""
Retrieve the entropy at the specified zone/mesh-cell of the star.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN
, description="The index of the star to get the value of")
function.addParameter('zone', dtype='int32', direction=function.IN
, description="The zone/mesh-cell of the star to get the value of")
function.addParameter('E_i', dtype='float64', direction=function.OUT
, description="The specific thermal energy at the specified zone/mesh-cell of the star.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
The value was retrieved.
-1 - ERROR
A star with the given index was not found.
-2 - ERROR
A zone with the given index was not found.
"""
return function
@legacy_function
def get_brunt_vaisala_frequency_squared_at_zone():
"""
Retrieve the Brunt-Vaisala frequency squared at the specified zone/mesh-cell of the star.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN, unit=INDEX)
function.addParameter('zone', dtype='int32', direction=function.IN, unit=NO_UNIT)
function.addParameter('brunt_N2', dtype='float64', direction=function.OUT, unit=units.s**-2)
function.result_type = 'int32'
return function
@legacy_function
def get_id_of_species():
"""
Retrieve the chem_ID of the chemical abundance variable of the star.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN
, description="The index of the star to get the value of")
function.addParameter('species', dtype='int32', direction=function.IN
, description="The species of the star to get the name of")
function.addParameter('species_id', dtype='int32', direction=function.OUT
, description="The chem_ID of the chemical abundance variable of the star.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
The value was retrieved.
-1 - ERROR
A star with the given index was not found.
"""
return function
@legacy_function
def get_mass_of_species():
"""
Retrieve the mass number of the chemical abundance variable of the star.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN
, description="The index of the star to get the value of")
function.addParameter('species', dtype='int32', direction=function.IN
, description="The species of the star to get the mass number of")
function.addParameter('species_mass', dtype='float64', direction=function.OUT
, description="The mass number of the chemical abundance variable of the star.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
The value was retrieved.
-1 - ERROR
A star with the given index was not found.
"""
return function
@legacy_function
def erase_memory():
"""
Erase memory of the star, i.e. copy the current structure over the memory of
the structure of the previous steps. Useful after setting the stucture of
the star, to prevent backup steps to undo changes
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN
, description="The index of the star to get the value of")
function.result_type = 'int32'
return function
@legacy_function
def get_max_age_stop_condition():
"""
Retrieve the current maximum age stop condition of this instance (in years).
Evolution will stop once the star has reached this maximum age.
"""
function = LegacyFunctionSpecification()
function.addParameter('max_age_stop_condition', dtype='float64', direction=function.OUT
, description="The current maximum age stop condition of this instance (in years).")
function.result_type = 'int32'
function.result_doc = """
0 - OK
Current value was retrieved
-1 - ERROR
The code could not retrieve the value.
"""
return function
@legacy_function
def set_max_age_stop_condition():
"""
Set the new maximum age stop condition of this instance (in years).
Evolution will stop once the star has reached this maximum age.
"""
function = LegacyFunctionSpecification()
function.addParameter('max_age_stop_condition', dtype='float64', direction=function.IN
, description="The new maximum age stop condition of this instance (in years).")
function.result_type = 'int32'
function.result_doc = """
0 - OK
The value has been set.
-1 - ERROR
The code could not set the value.
"""
return function
@legacy_function
def get_min_timestep_stop_condition():
"""
Retrieve the current minimum timestep stop condition of this instance (in years).
Evolution will stop if the timestep required by the solver in order to converge
has decreased below this minimum timestep.
"""
function = LegacyFunctionSpecification()
function.addParameter('min_timestep_stop_condition', dtype='float64', direction=function.OUT
, description="The current minimum timestep stop condition of this instance (in years).")
function.result_type = 'int32'
function.result_doc = """
0 - OK
Current value was retrieved
-1 - ERROR
The code could not retrieve the value.
"""
return function
@legacy_function
def set_min_timestep_stop_condition():
"""
Set the new minimum timestep stop condition of this instance (in years).
Evolution will stop if the timestep required by the solver in order to converge
has decreased below this minimum timestep.
"""
function = LegacyFunctionSpecification()
function.addParameter('min_timestep_stop_condition', dtype='float64', direction=function.IN
, description="The new minimum timestep stop condition of this instance (in years).")
function.result_type = 'int32'
function.result_doc = """
0 - OK
The value has been set.
-1 - ERROR
The code could not set the value.
"""
return function
@legacy_function
def get_max_iter_stop_condition():
"""
Retrieve the current maximum number of iterations of this instance. (Negative means no maximum)
Evolution will stop after this number of iterations.
"""
function = LegacyFunctionSpecification()
function.addParameter('max_iter_stop_condition', dtype='int32', direction=function.OUT
, description="The current maximum number of iterations of this instance.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
Current value was retrieved
-1 - ERROR
The code could not retrieve the value.
"""
return function
@legacy_function
def set_max_iter_stop_condition():
"""
Set the new maximum number of iterations of this instance. (Negative means no maximum)
Evolution will stop after this number of iterations.
"""
function = LegacyFunctionSpecification()
function.addParameter('max_iter_stop_condition', dtype='int32', direction=function.IN
, description="The new maximum number of iterations of this instance.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
The value has been set.
-1 - ERROR
The code could not set the value.
"""
return function
@legacy_function
def get_convective_overshoot_parameter():
function = LegacyFunctionSpecification()
function.addParameter('convective_overshoot_parameter', dtype='float64', direction=function.OUT,
description="The current value of the convective overshoot parameter.")
function.result_type = 'int32'
return function
@legacy_function
def set_convective_overshoot_parameter():
function = LegacyFunctionSpecification()
function.addParameter('convective_overshoot_parameter', dtype='float64', direction=function.IN,
description="The new value of the convective overshoot parameter.")
function.result_type = 'int32'
return function
@legacy_function
def get_mixing_length_ratio():
"""
Retrieve the current value of the mixing length ratio.
"""
function = LegacyFunctionSpecification()
function.addParameter('mixing_length_ratio', dtype='float64', direction=function.OUT
, description="The current value of the mixing length ratio.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
Current value was retrieved
-1 - ERROR
The code could not retrieve the value.
"""
return function
@legacy_function
def set_mixing_length_ratio():
"""
Set the value of the mixing length ratio.
"""
function = LegacyFunctionSpecification()
function.addParameter('mixing_length_ratio', dtype='float64', direction=function.IN
, description="The new value of the mixing length ratio.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
The value has been set.
-1 - ERROR
The code could not set the value.
"""
return function
@legacy_function
def get_semi_convection_efficiency():
"""
Retrieve the current value of the efficiency of semi-convection,
after Heger, Langer, & Woosley 2000 (ApJ), which goes back to
Langer, Sugimoto & Fricke 1983 (A&A).
"""
function = LegacyFunctionSpecification()
function.addParameter('semi_convection_efficiency', dtype='float64', direction=function.OUT
, description="The current value of the efficiency of semi-convection.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
Current value was retrieved
-1 - ERROR
The code could not retrieve the value.
"""
return function
@legacy_function
def set_semi_convection_efficiency():
"""
Set the value of the efficiency of semi-convection,
after Heger, Langer, & Woosley 2000 (ApJ), which goes back to
Langer, Sugimoto & Fricke 1983 (A&A).
"""
function = LegacyFunctionSpecification()
function.addParameter('semi_convection_efficiency', dtype='float64', direction=function.IN
, description="The new value of the efficiency of semi-convection.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
The value has been set.
-1 - ERROR
The code could not set the value.
"""
return function
@legacy_function
def get_RGB_wind_scheme():
"""
Retrieve the current wind (mass loss) scheme for RGB stars:
No automatic wind (0)
Reimers (1): e.g. see: Baschek, Kegel, Traving (eds), Springer, Berlin, 1975, p. 229.
Blocker (2): T. Blocker, A&A 297, 727-738 (1995)
de Jager (3): de Jager, C., Nieuwenhuijzen, H., & van der Hucht, K. A. 1988, A&AS, 72, 259
Dutch (4): Glebbeek et al 2009, Vink et al 2001, Nugis & Lamers 2000, de Jager 1990
Mattsson (5)
"""
function = LegacyFunctionSpecification()
function.addParameter('RGB_wind_scheme', dtype='int32', direction=function.OUT
, description="The current wind (mass loss) scheme for RGB stars of this instance.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
Current value was retrieved
-1 - ERROR
The code could not retrieve the value.
"""
return function
@legacy_function
def set_RGB_wind_scheme():
"""
Set the new wind (mass loss) scheme for RGB stars:
No automatic wind (0)
Reimers (1): e.g. see: Baschek, Kegel, Traving (eds), Springer, Berlin, 1975, p. 229.
Blocker (2): T. Blocker, A&A 297, 727-738 (1995)
de Jager (3): de Jager, C., Nieuwenhuijzen, H., & van der Hucht, K. A. 1988, A&AS, 72, 259
Dutch (4): Glebbeek et al 2009, Vink et al 2001, Nugis & Lamers 2000, de Jager 1990
Mattsson (5)
"""
function = LegacyFunctionSpecification()
function.addParameter('RGB_wind_scheme', dtype='int32', direction=function.IN
, description="The new wind (mass loss) scheme for RGB stars of this instance.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
The value has been set.
-1 - ERROR
The code could not set the value.
"""
return function
@legacy_function
def get_AGB_wind_scheme():
"""
Retrieve the current wind (mass loss) scheme for AGB stars:
No automatic wind (0)
Reimers (1): e.g. see: Baschek, Kegel, Traving (eds), Springer, Berlin, 1975, p. 229.
Blocker (2): T. Blocker, A&A 297, 727-738 (1995)
de Jager (3): de Jager, C., Nieuwenhuijzen, H., & van der Hucht, K. A. 1988, A&AS, 72, 259
Dutch (4): Glebbeek et al 2009, Vink et al 2001, Nugis & Lamers 2000, de Jager 1990
Mattsson (5)
"""
function = LegacyFunctionSpecification()
function.addParameter('AGB_wind_scheme', dtype='int32', direction=function.OUT
, description="The current wind (mass loss) scheme for AGB stars of this instance.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
Current value was retrieved
-1 - ERROR
The code could not retrieve the value.
"""
return function
@legacy_function
def set_AGB_wind_scheme():
"""
Set the new wind (mass loss) scheme for AGB stars:
No automatic wind (0)
Reimers (1): e.g. see: Baschek, Kegel, Traving (eds), Springer, Berlin, 1975, p. 229.
Blocker (2): T. Blocker, A&A 297, 727-738 (1995)
de Jager (3): de Jager, C., Nieuwenhuijzen, H., & van der Hucht, K. A. 1988, A&AS, 72, 259
Dutch (4): Glebbeek et al 2009, Vink et al 2001, Nugis & Lamers 2000, de Jager 1990
Mattsson (5)
"""
function = LegacyFunctionSpecification()
function.addParameter('AGB_wind_scheme', dtype='int32', direction=function.IN
, description="The new wind (mass loss) scheme for AGB stars of this instance.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
The value has been set.
-1 - ERROR
The code could not set the value.
"""
return function
@legacy_function
def get_reimers_wind_efficiency():
function = LegacyFunctionSpecification()
function.addParameter('reimers_wind_efficiency', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_reimers_wind_efficiency():
function = LegacyFunctionSpecification()
function.addParameter('reimers_wind_efficiency', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_blocker_wind_efficiency():
function = LegacyFunctionSpecification()
function.addParameter('blocker_wind_efficiency', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_blocker_wind_efficiency():
function = LegacyFunctionSpecification()
function.addParameter('blocker_wind_efficiency', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_de_jager_wind_efficiency():
function = LegacyFunctionSpecification()
function.addParameter('de_jager_wind_efficiency', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_de_jager_wind_efficiency():
function = LegacyFunctionSpecification()
function.addParameter('de_jager_wind_efficiency', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_dutch_wind_efficiency():
function = LegacyFunctionSpecification()
function.addParameter('dutch_wind_efficiency', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_dutch_wind_efficiency():
function = LegacyFunctionSpecification()
function.addParameter('dutch_wind_efficiency', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_stabilize_new_stellar_model_flag():
function = LegacyFunctionSpecification()
function.addParameter('stabilize_new_stellar_model_flag', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_stabilize_new_stellar_model_flag():
function = LegacyFunctionSpecification()
function.addParameter('stabilize_new_stellar_model_flag', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def new_stellar_model():
"""
Define a new star model in the code. The star needs to be finalized
before it can evolve, see 'finalize_stellar_model'.
"""
function = LegacyFunctionSpecification()
function.must_handle_array = True
for par in ['d_mass', 'radius', 'rho', 'temperature', 'luminosity',
'X_H', 'X_He', 'X_C', 'X_N', 'X_O', 'X_Ne', 'X_Mg', 'X_Si', 'X_Fe']:
function.addParameter(par, dtype='float64', direction=function.IN)
function.addParameter('n', 'int32', function.LENGTH)
function.result_type = 'int32'
return function
@legacy_function
def finalize_stellar_model():
"""
Finalize the new star model defined by 'new_stellar_model'.
"""
function = LegacyFunctionSpecification()
function.addParameter('index_of_the_star', dtype='int32',
direction=function.OUT, description = "The new index for the star. "
"This index can be used to refer to this star in other functions")
function.addParameter('age_tag', dtype='float64', direction=function.IN,
description = "The initial age of the star")
function.result_type = 'int32'
return function
class MESA(StellarEvolution, InternalStellarStructure):
def __init__(self, **options):
InCodeComponentImplementation.__init__(self, MESAInterface(**options), **options)
output_dir = self.get_output_directory()
if not self.channel_type == 'distributed':
self.ensure_data_directory_exists(os.path.join(output_dir, 'star_data', 'starting_models'))
self.set_MESA_paths(
self.default_path_to_inlist,
self.default_path_to_MESA_data,
output_dir
)
self.model_time = 0.0 | units.yr
def define_parameters(self, handler):
handler.add_method_parameter(
"get_metallicity",
"set_metallicity",
"metallicity",
"Metallicity of all stars",
default_value = 0.02
)
handler.add_method_parameter(
"get_max_age_stop_condition",
"set_max_age_stop_condition",
"max_age_stop_condition",
"The maximum age stop condition of this instance.",
default_value = 1.0e36 | units.yr
)
handler.add_method_parameter(
"get_min_timestep_stop_condition",
"set_min_timestep_stop_condition",
"min_timestep_stop_condition",
"The minimum timestep stop condition of this instance.",
default_value = 1.0e-6 | units.s
)
handler.add_method_parameter(
"get_max_iter_stop_condition",
"set_max_iter_stop_condition",
"max_iter_stop_condition",
"The maximum number of iterations of this instance. (Negative means no maximum)",
default_value = -1111
)
handler.add_method_parameter(
"get_convective_overshoot_parameter",
"set_convective_overshoot_parameter",
"herwig_convective_overshoot_parameter",
"The convective overshoot parameter (Herwig 2000), f=0.016 is argued to be a reasonable value.",
default_value = 0.0
)
handler.add_method_parameter(
"get_mixing_length_ratio",
"set_mixing_length_ratio",
"mixing_length_ratio",
"The mixing-length ratio (alpha).",
default_value = 2.0
)
handler.add_method_parameter(
"get_semi_convection_efficiency",
"set_semi_convection_efficiency",
"semi_convection_efficiency",
"The efficiency of semi-convection, after Heger, Langer, & Woosley 2000 (ApJ), "
"which goes back to Langer, Sugimoto & Fricke 1983 (A&A).",
default_value = 0.0
)
handler.add_method_parameter(
"get_RGB_wind_scheme",
"set_RGB_wind_scheme",
"RGB_wind_scheme",
"The mass loss scheme for RGB stars: none (0), Reimers (1), "
"Blocker (2), de Jager (3), Dutch (4), Mattsson (5)",
default_value = 1
)
handler.add_method_parameter(
"get_AGB_wind_scheme",
"set_AGB_wind_scheme",
"AGB_wind_scheme",
"The mass loss scheme for AGB stars: none (0), Reimers (1), "
"Blocker (2), de Jager (3), Dutch (4), Mattsson (5)",
default_value = 1
)
handler.add_method_parameter(
"get_reimers_wind_efficiency",
"set_reimers_wind_efficiency",
"reimers_wind_efficiency",
"The Reimers mass loss efficiency. Only used if (RGB/AGB_wind_scheme == 1).",
default_value = 0.5
)
handler.add_method_parameter(
"get_blocker_wind_efficiency",
"set_blocker_wind_efficiency",
"blocker_wind_efficiency",
"The Blocker mass loss efficiency. Only used if (RGB/AGB_wind_scheme == 2).",
default_value = 0.1
)
handler.add_method_parameter(
"get_de_jager_wind_efficiency",
"set_de_jager_wind_efficiency",
"de_jager_wind_efficiency",
"The de Jager mass loss efficiency. Only used if (RGB/AGB_wind_scheme == 3).",
default_value = 0.8
)
handler.add_method_parameter(
"get_dutch_wind_efficiency",
"set_dutch_wind_efficiency",
"dutch_wind_efficiency",
"The Dutch mass loss efficiency. Only used if (RGB/AGB_wind_scheme == 4).",
default_value = 0.8
)
handler.add_boolean_parameter(
"get_stabilize_new_stellar_model_flag",
"set_stabilize_new_stellar_model_flag",
"stabilize_new_stellar_model_flag",
"Flag specifying whether to stabilize any loaded stellar models first.",
default_value = True
)
def define_particle_sets(self, handler):
handler.define_super_set('particles', ['native_stars', 'imported_stars', 'pre_ms_stars'],
index_to_default_set = 0)
handler.define_set('imported_stars', 'index_of_the_star')
handler.set_new('imported_stars', 'finalize_stellar_model')
handler.set_delete('imported_stars', 'delete_star')
handler.define_set('native_stars', 'index_of_the_star')
handler.set_new('native_stars', 'new_particle')
handler.set_delete('native_stars', 'delete_star')
handler.define_set('pre_ms_stars', 'index_of_the_star')
handler.set_new('pre_ms_stars', 'new_pre_ms_particle')
handler.set_delete('pre_ms_stars', 'delete_star')
for particle_set_name in ['native_stars', 'imported_stars', 'pre_ms_stars']:
handler.add_getter(particle_set_name, 'get_radius', names = ('radius',))
handler.add_getter(particle_set_name, 'get_stellar_type', names = ('stellar_type',))
handler.add_getter(particle_set_name, 'get_mass', names = ('mass',))
handler.add_setter(particle_set_name, 'set_mass', names = ('mass',))
handler.add_getter(particle_set_name, 'get_core_mass', names = ('core_mass',))
handler.add_getter(particle_set_name, 'get_mass_loss_rate', names = ('wind',))
handler.add_getter(particle_set_name, 'get_age', names = ('age',))
handler.add_getter(particle_set_name, 'get_time_step', names = ('time_step',))
handler.add_setter(particle_set_name, 'set_time_step', names = ('time_step',))
handler.add_getter(particle_set_name, 'get_luminosity', names = ('luminosity',))
handler.add_getter(particle_set_name, 'get_temperature', names = ('temperature',))
handler.add_getter(particle_set_name, 'get_manual_mass_transfer_rate', names = ('mass_change',))
handler.add_setter(particle_set_name, 'set_manual_mass_transfer_rate', names = ('mass_change',))
handler.add_method(particle_set_name, 'get_accrete_same_as_surface')
handler.add_method(particle_set_name, 'set_accrete_same_as_surface')
handler.add_method(particle_set_name, 'get_accrete_composition_non_metals')
handler.add_method(particle_set_name, 'set_accrete_composition_non_metals')
handler.add_method(particle_set_name, 'get_accrete_composition_metals_identifier')
handler.add_method(particle_set_name, 'set_accrete_composition_metals_identifier')
handler.add_method(particle_set_name, 'get_accrete_composition_metals')
handler.add_method(particle_set_name, 'set_accrete_composition_metals')
handler.add_method(particle_set_name, 'evolve_one_step')
handler.add_method(particle_set_name, 'evolve_for')
InternalStellarStructure.define_particle_sets(
self,
handler,
set_name = particle_set_name
)
handler.add_method(particle_set_name, 'get_mass_profile')
handler.add_method(particle_set_name, 'set_mass_profile')
handler.add_method(particle_set_name, 'get_cumulative_mass_profile')
handler.add_method(particle_set_name, 'get_luminosity_profile')
handler.add_method(particle_set_name, 'set_luminosity_profile')
handler.add_method(particle_set_name, 'get_entropy_profile')
handler.add_method(particle_set_name, 'get_thermal_energy_profile')
handler.add_method(particle_set_name, 'get_brunt_vaisala_frequency_squared_profile')
handler.add_method(particle_set_name, 'get_IDs_of_species')
handler.add_method(particle_set_name, 'get_masses_of_species')
handler.add_method(particle_set_name, 'get_number_of_backups_in_a_row')
handler.add_method(particle_set_name, 'reset_number_of_backups_in_a_row')
def define_state(self, handler):
StellarEvolution.define_state(self, handler)
handler.add_method('EDIT', 'new_pre_ms_particle')
handler.add_method('UPDATE', 'new_pre_ms_particle')
handler.add_transition('RUN', 'UPDATE', 'new_pre_ms_particle', False)
handler.add_method('EDIT', 'finalize_stellar_model')
handler.add_method('UPDATE', 'finalize_stellar_model')
handler.add_transition('RUN', 'UPDATE', 'finalize_stellar_model', False)
def define_errorcodes(self, handler):
InternalStellarStructure.define_errorcodes(self, handler)
handler.add_errorcode(-1, 'Something went wrong...')
handler.add_errorcode(-4, 'Not implemented.')
handler.add_errorcode(-11, 'Evolve terminated: Unspecified stop condition reached.')
handler.add_errorcode(-12, 'Evolve terminated: Maximum age reached.')
handler.add_errorcode(-13, 'Evolve terminated: Maximum number of iterations reached.')
handler.add_errorcode(-14, 'Evolve terminated: Maximum number of backups reached.')
handler.add_errorcode(-15, 'Evolve terminated: Minimum timestep limit reached.')
def define_methods(self, handler):
InternalStellarStructure.define_methods(self, handler)
StellarEvolution.define_methods(self, handler)
handler.add_method(
"new_pre_ms_particle",
(units.MSun),
(handler.INDEX, handler.ERROR_CODE)
)
handler.add_method(
"set_time_step",
(handler.INDEX, units.yr),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_core_mass",
(handler.INDEX,),
(units.MSun, handler.ERROR_CODE,)
)
handler.add_method(
"get_mass_loss_rate",
(handler.INDEX,),
(units.g / units.s, handler.ERROR_CODE,)
)
handler.add_method(
"get_manual_mass_transfer_rate",
(handler.INDEX,),
(units.MSun / units.yr, handler.ERROR_CODE,)
)
handler.add_method(
"set_manual_mass_transfer_rate",
(handler.INDEX, units.MSun / units.yr),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_number_of_backups_in_a_row",
(handler.INDEX,),
(handler.NO_UNIT, handler.ERROR_CODE,)
)
handler.add_method(
"reset_number_of_backups_in_a_row",
(handler.INDEX,),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_mass_fraction_at_zone",
(handler.INDEX,handler.NO_UNIT,),
(handler.NO_UNIT, handler.ERROR_CODE,)
)
handler.add_method(
"set_mass_fraction_at_zone",
(handler.INDEX, handler.NO_UNIT, handler.NO_UNIT,),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_luminosity_at_zone",
(handler.INDEX,handler.NO_UNIT,),
(units.erg/units.s, handler.ERROR_CODE,)
)
handler.add_method(
"set_luminosity_at_zone",
(handler.INDEX, handler.NO_UNIT, units.erg/units.s,),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_entropy_at_zone",
(handler.INDEX,handler.NO_UNIT,),
(units.erg/units.K, handler.ERROR_CODE,)
)
handler.add_method(
"get_thermal_energy_at_zone",
(handler.INDEX,handler.NO_UNIT,),
(units.erg/units.g, handler.ERROR_CODE,)
)
handler.add_method(
"get_id_of_species",
(handler.INDEX,handler.NO_UNIT,),
(handler.NO_UNIT, handler.ERROR_CODE,)
)
handler.add_method(
"get_mass_of_species",
(handler.INDEX,handler.NO_UNIT,),
(units.amu, handler.ERROR_CODE,)
)
handler.add_method(
"erase_memory",
(handler.INDEX,),
(handler.ERROR_CODE,),
public_name = "_erase_memory"
)
handler.add_method(
"new_stellar_model",
(units.MSun, units.cm, units.g / units.cm**3, units.K, units.erg / units.s,
handler.NO_UNIT, handler.NO_UNIT, handler.NO_UNIT, handler.NO_UNIT, handler.NO_UNIT,
handler.NO_UNIT, handler.NO_UNIT, handler.NO_UNIT, handler.NO_UNIT,),
(handler.ERROR_CODE,)
)
handler.add_method(
"finalize_stellar_model",
(units.yr,),
(handler.INDEX, handler.ERROR_CODE,)
)
handler.add_method(
"get_max_age_stop_condition",
(),
(units.yr, handler.ERROR_CODE,)
)
handler.add_method(
"set_max_age_stop_condition",
(units.yr, ),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_min_timestep_stop_condition",
(),
(units.s, handler.ERROR_CODE,)
)
handler.add_method(
"set_min_timestep_stop_condition",
(units.s, ),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_max_iter_stop_condition",
(),
(handler.NO_UNIT, handler.ERROR_CODE,)
)
handler.add_method(
"set_max_iter_stop_condition",
(handler.NO_UNIT, ),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_mixing_length_ratio",
(),
(handler.NO_UNIT, handler.ERROR_CODE,)
)
handler.add_method(
"set_mixing_length_ratio",
(handler.NO_UNIT, ),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_semi_convection_efficiency",
(),
(handler.NO_UNIT, handler.ERROR_CODE,)
)
handler.add_method(
"set_semi_convection_efficiency",
(handler.NO_UNIT, ),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_RGB_wind_scheme",
(),
(handler.NO_UNIT, handler.ERROR_CODE,)
)
handler.add_method(
"set_RGB_wind_scheme",
(handler.NO_UNIT, ),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_AGB_wind_scheme",
(),
(handler.NO_UNIT, handler.ERROR_CODE,)
)
handler.add_method(
"set_AGB_wind_scheme",
(handler.NO_UNIT, ),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_reimers_wind_efficiency",
(),
(handler.NO_UNIT, handler.ERROR_CODE,)
)
handler.add_method(
"set_reimers_wind_efficiency",
(handler.NO_UNIT, ),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_blocker_wind_efficiency",
(),
(handler.NO_UNIT, handler.ERROR_CODE,)
)
handler.add_method(
"set_blocker_wind_efficiency",
(handler.NO_UNIT, ),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_de_jager_wind_efficiency",
(),
(handler.NO_UNIT, handler.ERROR_CODE,)
)
handler.add_method(
"set_de_jager_wind_efficiency",
(handler.NO_UNIT, ),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_dutch_wind_efficiency",
(),
(handler.NO_UNIT, handler.ERROR_CODE,)
)
handler.add_method(
"set_dutch_wind_efficiency",
(handler.NO_UNIT, ),
(handler.ERROR_CODE,)
)
def initialize_module_with_default_parameters(self):
self.parameters.set_defaults()
self.initialize_code()
def initialize_module_with_current_parameters(self):
self.initialize_code()
def commit_parameters(self):
self.parameters.send_not_set_parameters_to_code()
self.parameters.send_cached_parameters_to_code()
self.overridden().commit_parameters()
def get_mass_profile(self, indices_of_the_stars, number_of_zones = None):
indices_of_the_stars = self._check_number_of_indices(indices_of_the_stars, action_string = "Querying mass profiles")
if number_of_zones is None:
number_of_zones = self.get_number_of_zones(indices_of_the_stars)
return self.get_mass_fraction_at_zone([indices_of_the_stars]*number_of_zones, list(range(number_of_zones)) | units.none)
def get_cumulative_mass_profile(self, indices_of_the_stars, number_of_zones = None):
frac_profile = self.get_mass_profile(indices_of_the_stars, number_of_zones = number_of_zones)
return frac_profile.cumsum()
def set_mass_profile(self, indices_of_the_stars, values, number_of_zones = None):
indices_of_the_stars = self._check_number_of_indices(indices_of_the_stars, action_string = "Setting mass profiles")
if number_of_zones is None:
number_of_zones = self.get_number_of_zones(indices_of_the_stars)
self._check_supplied_values(len(values), number_of_zones)
self.set_mass_fraction_at_zone([indices_of_the_stars]*number_of_zones, list(range(number_of_zones)) | units.none, values)
if hasattr(self, "_erase_memory"):
self._erase_memory(indices_of_the_stars)
def get_luminosity_profile(self, indices_of_the_stars, number_of_zones = None):
indices_of_the_stars = self._check_number_of_indices(indices_of_the_stars, action_string = "Querying luminosity profiles")
if number_of_zones is None:
number_of_zones = self.get_number_of_zones(indices_of_the_stars)
return self.get_luminosity_at_zone([indices_of_the_stars]*number_of_zones, list(range(number_of_zones)) | units.none)
def set_luminosity_profile(self, indices_of_the_stars, values, number_of_zones = None):
indices_of_the_stars = self._check_number_of_indices(indices_of_the_stars, action_string = "Setting luminosity profiles")
if number_of_zones is None:
number_of_zones = self.get_number_of_zones(indices_of_the_stars)
self._check_supplied_values(len(values), number_of_zones)
self.set_luminosity_at_zone([indices_of_the_stars]*number_of_zones, list(range(number_of_zones)) | units.none, values)
if hasattr(self, "_erase_memory"):
self._erase_memory(indices_of_the_stars)
def get_entropy_profile(self, indices_of_the_stars, number_of_zones = None):
indices_of_the_stars = self._check_number_of_indices(indices_of_the_stars, action_string = "Querying entropy profiles")
if number_of_zones is None:
number_of_zones = self.get_number_of_zones(indices_of_the_stars)
return self.get_entropy_at_zone([indices_of_the_stars]*number_of_zones, list(range(number_of_zones)) | units.none)
def get_thermal_energy_profile(self, indices_of_the_stars, number_of_zones = None):
indices_of_the_stars = self._check_number_of_indices(indices_of_the_stars, action_string = "Querying thermal energy profiles")
if number_of_zones is None:
number_of_zones = self.get_number_of_zones(indices_of_the_stars)
return self.get_thermal_energy_at_zone([indices_of_the_stars]*number_of_zones, list(range(number_of_zones)) | units.none)
def get_brunt_vaisala_frequency_squared_profile(self, indices_of_the_stars, number_of_zones = None):
indices_of_the_stars = self._check_number_of_indices(indices_of_the_stars, action_string = "Querying brunt-vaisala-frequency-squared profiles")
if number_of_zones is None:
number_of_zones = self.get_number_of_zones(indices_of_the_stars)
return self.get_brunt_vaisala_frequency_squared_at_zone([indices_of_the_stars]*number_of_zones, list(range(number_of_zones)) | units.none)
def get_IDs_of_species(self, indices_of_the_stars, number_of_species = None):
indices_of_the_stars = self._check_number_of_indices(indices_of_the_stars, action_string = "Querying chemical abundance IDs")
if number_of_species is None:
number_of_species = self.get_number_of_species(indices_of_the_stars)
return list(self.get_id_of_species(
[indices_of_the_stars]*number_of_species,
list(range(1,number_of_species+1))
))
def get_masses_of_species(self, indices_of_the_stars, number_of_species = None):
indices_of_the_stars = self._check_number_of_indices(indices_of_the_stars, action_string = "Querying chemical abundance mass numbers")
if number_of_species is None:
number_of_species = self.get_number_of_species(indices_of_the_stars)
return self.get_mass_of_species(
[indices_of_the_stars]*number_of_species,
list(range(1,number_of_species+1))
)
def new_particle_from_model(self, internal_structure, current_age=0|units.Myr, key=None):
if isinstance(internal_structure, dict):
if "dmass" in internal_structure:
mass_profile = internal_structure['dmass'][::-1]
else:
cumulative_mass_profile = [0.0] | units.MSun
cumulative_mass_profile.extend(internal_structure['mass'])
mass_profile = (cumulative_mass_profile[1:] - cumulative_mass_profile[:-1])[::-1]
self.new_stellar_model(
mass_profile,
internal_structure['radius'][::-1],
internal_structure['rho'][::-1],
internal_structure['temperature'][::-1],
internal_structure['luminosity'][::-1],
internal_structure['X_H'][::-1],
internal_structure['X_He'][::-1],
internal_structure['X_C'][::-1],
internal_structure['X_N'][::-1],
internal_structure['X_O'][::-1],
internal_structure['X_Ne'][::-1],
internal_structure['X_Mg'][::-1],
internal_structure['X_Si'][::-1],
internal_structure['X_Fe'][::-1]
)
else:
if hasattr(internal_structure, "dmass"):
mass_profile = internal_structure.dmass[::-1]
else:
cumulative_mass_profile = [0.0] | units.MSun
cumulative_mass_profile.extend(internal_structure.mass)
mass_profile = (cumulative_mass_profile[1:] - cumulative_mass_profile[:-1])[::-1]
self.new_stellar_model(
mass_profile,
internal_structure.radius[::-1],
internal_structure.rho[::-1],
internal_structure.temperature[::-1],
internal_structure.luminosity[::-1],
internal_structure.X_H[::-1],
internal_structure.X_He[::-1],
internal_structure.X_C[::-1],
internal_structure.X_N[::-1],
internal_structure.X_O[::-1],
internal_structure.X_Ne[::-1],
internal_structure.X_Mg[::-1],
internal_structure.X_Si[::-1],
internal_structure.X_Fe[::-1]
)
tmp_star = datamodel.Particle(key=key)
tmp_star.age_tag = current_age
return self.imported_stars.add_particle(tmp_star)
Mesa = MESA
| 43.75097 | 152 | 0.635122 | import os
import numpy
from operator import itemgetter
from amuse.community import *
from amuse.community.interface.se import StellarEvolution, StellarEvolutionInterface, \
InternalStellarStructure, InternalStellarStructureInterface
from amuse.units.quantities import VectorQuantity
from amuse.support.interface import InCodeComponentImplementation
from amuse.support.options import option
class MESAInterface(CodeInterface, LiteratureReferencesMixIn, StellarEvolutionInterface,
InternalStellarStructureInterface, CodeWithDataDirectories):
def __init__(self, **options):
CodeInterface.__init__(self, name_of_the_worker="mesa_worker", **options)
LiteratureReferencesMixIn.__init__(self)
CodeWithDataDirectories.__init__(self)
@property
def default_path_to_inlist(self):
return os.path.join(self.get_data_directory(), 'AMUSE_inlist')
@option(type="string", sections=('data'))
def default_path_to_MESA_data(self):
return os.path.join(self.amuse_root_directory, 'src', 'amuse', 'community', 'mesa', 'src', 'mesa', 'data')
@legacy_function
def set_MESA_paths():
function = LegacyFunctionSpecification()
function.addParameter('inlist_path', dtype='string', direction=function.IN,
description = "Path to the inlist file.")
function.addParameter('MESA_data_path', dtype='string', direction=function.IN,
description = "Path to the data directory.")
function.addParameter('local_data_path', dtype='string', direction=function.IN,
description = "Path to the data directory.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
Current value was set
-1 - ERROR
Directory does not exist
"""
return function
@legacy_function
def get_maximum_number_of_stars():
function = LegacyFunctionSpecification()
function.addParameter('maximum_number_of_stars', dtype='int32', direction=function.OUT,
description = "The current value of the maximum number of stars")
function.result_type = 'int32'
function.result_doc = """
0 - OK
Current value of was retrieved
"""
return function
@legacy_function
def new_zams_model():
function = LegacyFunctionSpecification()
function.addParameter('status', dtype='int32', direction=function.OUT)
return function
@legacy_function
def new_pre_ms_particle():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.OUT
, description="The new index for the star. This index can be used to refer to this star in other functions")
function.addParameter('mass', dtype='float64', direction=function.IN
, description="The initial mass of the star")
function.result_type = 'int32'
return function
@legacy_function
def set_time_step():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN
, description="The index of the star to get the value of")
function.addParameter('time_step', dtype='float64', direction=function.IN
, description="The next timestep for the star.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
The value has been set.
-1 - ERROR
A star with the given index was not found.
"""
return function
@legacy_function
def get_core_mass():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN
, description="The index of the star to get the value of")
function.addParameter('core_mass', dtype='float64', direction=function.OUT
, description="The current core mass of the star, where hydrogen abundance is <= h1_boundary_limit")
function.result_type = 'int32'
return function
@legacy_function
def get_mass_loss_rate():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN
, description="The index of the star to get the value of")
function.addParameter('mass_loss_rate', dtype='float64', direction=function.OUT
, description="The current mass loss rate of the star. (positive for winds, negative for accretion)")
function.result_type = 'int32'
return function
@legacy_function
def get_manual_mass_transfer_rate():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN
, description="The index of the star to get the value of")
function.addParameter('mass_change', dtype='float64', direction=function.OUT
, description="The current user-specified mass transfer rate of the star. (negative for winds, positive for accretion)")
function.result_type = 'int32'
return function
@legacy_function
def set_manual_mass_transfer_rate():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN
, description="The index of the star to get the value of")
function.addParameter('mass_change', dtype='float64', direction=function.IN
, description="The new user-specified mass transfer rate of the star. (negative for winds, positive for accretion)")
function.result_type = 'int32'
return function
@legacy_function
def get_accrete_same_as_surface():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN)
function.addParameter('accrete_same_as_surface_flag', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_accrete_same_as_surface():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN)
function.addParameter('accrete_same_as_surface_flag', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_accrete_composition_non_metals():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN)
function.addParameter('h1', dtype='float64', direction=function.OUT)
function.addParameter('h2', dtype='float64', direction=function.OUT)
function.addParameter('he3', dtype='float64', direction=function.OUT)
function.addParameter('he4', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_accrete_composition_non_metals():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN)
function.addParameter('h1', dtype='float64', direction=function.IN)
function.addParameter('h2', dtype='float64', direction=function.IN)
function.addParameter('he3', dtype='float64', direction=function.IN)
function.addParameter('he4', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_accrete_composition_metals_identifier():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN)
function.addParameter('accrete_composition_metals_identifier', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_accrete_composition_metals_identifier():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN)
function.addParameter('accrete_composition_metals_identifier', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_accrete_composition_metals():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN)
function.addParameter('li', dtype='float64', direction=function.OUT)
function.addParameter('be', dtype='float64', direction=function.OUT)
function.addParameter('b', dtype='float64', direction=function.OUT)
function.addParameter('c', dtype='float64', direction=function.OUT)
function.addParameter('n', dtype='float64', direction=function.OUT)
function.addParameter('o', dtype='float64', direction=function.OUT)
function.addParameter('f', dtype='float64', direction=function.OUT)
function.addParameter('ne', dtype='float64', direction=function.OUT)
function.addParameter('na', dtype='float64', direction=function.OUT)
function.addParameter('mg', dtype='float64', direction=function.OUT)
function.addParameter('al', dtype='float64', direction=function.OUT)
function.addParameter('si', dtype='float64', direction=function.OUT)
function.addParameter('p', dtype='float64', direction=function.OUT)
function.addParameter('s', dtype='float64', direction=function.OUT)
function.addParameter('cl', dtype='float64', direction=function.OUT)
function.addParameter('ar', dtype='float64', direction=function.OUT)
function.addParameter('k', dtype='float64', direction=function.OUT)
function.addParameter('ca', dtype='float64', direction=function.OUT)
function.addParameter('sc', dtype='float64', direction=function.OUT)
function.addParameter('ti', dtype='float64', direction=function.OUT)
function.addParameter('v', dtype='float64', direction=function.OUT)
function.addParameter('cr', dtype='float64', direction=function.OUT)
function.addParameter('mn', dtype='float64', direction=function.OUT)
function.addParameter('fe', dtype='float64', direction=function.OUT)
function.addParameter('co', dtype='float64', direction=function.OUT)
function.addParameter('ni', dtype='float64', direction=function.OUT)
function.addParameter('cu', dtype='float64', direction=function.OUT)
function.addParameter('zn', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_accrete_composition_metals():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN)
function.addParameter('li', dtype='float64', direction=function.IN)
function.addParameter('be', dtype='float64', direction=function.IN)
function.addParameter('b', dtype='float64', direction=function.IN)
function.addParameter('c', dtype='float64', direction=function.IN)
function.addParameter('n', dtype='float64', direction=function.IN)
function.addParameter('o', dtype='float64', direction=function.IN)
function.addParameter('f', dtype='float64', direction=function.IN)
function.addParameter('ne', dtype='float64', direction=function.IN)
function.addParameter('na', dtype='float64', direction=function.IN)
function.addParameter('mg', dtype='float64', direction=function.IN)
function.addParameter('al', dtype='float64', direction=function.IN)
function.addParameter('si', dtype='float64', direction=function.IN)
function.addParameter('p', dtype='float64', direction=function.IN)
function.addParameter('s', dtype='float64', direction=function.IN)
function.addParameter('cl', dtype='float64', direction=function.IN)
function.addParameter('ar', dtype='float64', direction=function.IN)
function.addParameter('k', dtype='float64', direction=function.IN)
function.addParameter('ca', dtype='float64', direction=function.IN)
function.addParameter('sc', dtype='float64', direction=function.IN)
function.addParameter('ti', dtype='float64', direction=function.IN)
function.addParameter('v', dtype='float64', direction=function.IN)
function.addParameter('cr', dtype='float64', direction=function.IN)
function.addParameter('mn', dtype='float64', direction=function.IN)
function.addParameter('fe', dtype='float64', direction=function.IN)
function.addParameter('co', dtype='float64', direction=function.IN)
function.addParameter('ni', dtype='float64', direction=function.IN)
function.addParameter('cu', dtype='float64', direction=function.IN)
function.addParameter('zn', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_number_of_backups_in_a_row():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN
, description="The index of the star to get the value of number_of_backups_in_a_row")
function.addParameter('n_backup', dtype='int32', direction=function.OUT
, description="The current number_of_backups_in_a_row of the star.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
The number_of_backups_in_a_row was retrieved.
-1 - ERROR
A star with the given index was not found.
"""
return function
@legacy_function
def reset_number_of_backups_in_a_row():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN
, description="The index of the star to reset the value of number_of_backups_in_a_row")
function.result_type = 'int32'
function.result_doc = """
0 - OK
The number_of_backups_in_a_row was reset.
-1 - ERROR
A star with the given index was not found.
"""
return function
@legacy_function
def get_mass_fraction_at_zone():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN
, description="The index of the star to get the value of")
function.addParameter('zone', dtype='int32', direction=function.IN
, description="The zone/mesh-cell of the star to get the value of")
function.addParameter('dq_i', dtype='float64', direction=function.OUT
, description="The mass fraction at the specified zone/mesh-cell of the star.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
The value was retrieved.
-1 - ERROR
A star with the given index was not found.
-2 - ERROR
A zone with the given index was not found.
"""
return function
@legacy_function
def set_mass_fraction_at_zone():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN
, description="The index of the star to set the value of")
function.addParameter('zone', dtype='int32', direction=function.IN
, description="The zone/mesh-cell of the star to set the value of")
function.addParameter('dq_i', dtype='float64', direction=function.IN
, description="The mass fraction at the specified zone/mesh-cell of the star.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
The value was set.
-1 - ERROR
A star with the given index was not found.
-2 - ERROR
A zone with the given index was not found.
"""
return function
@legacy_function
def get_luminosity_at_zone():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN
, description="The index of the star to get the value of")
function.addParameter('zone', dtype='int32', direction=function.IN
, description="The zone/mesh-cell of the star to get the value of")
function.addParameter('lum_i', dtype='float64', direction=function.OUT
, description="The luminosity at the specified zone/mesh-cell of the star.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
The value was retrieved.
-1 - ERROR
A star with the given index was not found.
-2 - ERROR
A zone with the given index was not found.
"""
return function
@legacy_function
def set_luminosity_at_zone():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN
, description="The index of the star to set the value of")
function.addParameter('zone', dtype='int32', direction=function.IN
, description="The zone/mesh-cell of the star to set the value of")
function.addParameter('lum_i', dtype='float64', direction=function.IN
, description="The luminosity at the specified zone/mesh-cell of the star.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
The value was set.
-1 - ERROR
A star with the given index was not found.
-2 - ERROR
A zone with the given index was not found.
"""
return function
@legacy_function
def get_entropy_at_zone():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN
, description="The index of the star to get the value of")
function.addParameter('zone', dtype='int32', direction=function.IN
, description="The zone/mesh-cell of the star to get the value of")
function.addParameter('S_i', dtype='float64', direction=function.OUT
, description="The specific entropy at the specified zone/mesh-cell of the star.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
The value was retrieved.
-1 - ERROR
A star with the given index was not found.
-2 - ERROR
A zone with the given index was not found.
"""
return function
@legacy_function
def get_thermal_energy_at_zone():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN
, description="The index of the star to get the value of")
function.addParameter('zone', dtype='int32', direction=function.IN
, description="The zone/mesh-cell of the star to get the value of")
function.addParameter('E_i', dtype='float64', direction=function.OUT
, description="The specific thermal energy at the specified zone/mesh-cell of the star.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
The value was retrieved.
-1 - ERROR
A star with the given index was not found.
-2 - ERROR
A zone with the given index was not found.
"""
return function
@legacy_function
def get_brunt_vaisala_frequency_squared_at_zone():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN, unit=INDEX)
function.addParameter('zone', dtype='int32', direction=function.IN, unit=NO_UNIT)
function.addParameter('brunt_N2', dtype='float64', direction=function.OUT, unit=units.s**-2)
function.result_type = 'int32'
return function
@legacy_function
def get_id_of_species():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN
, description="The index of the star to get the value of")
function.addParameter('species', dtype='int32', direction=function.IN
, description="The species of the star to get the name of")
function.addParameter('species_id', dtype='int32', direction=function.OUT
, description="The chem_ID of the chemical abundance variable of the star.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
The value was retrieved.
-1 - ERROR
A star with the given index was not found.
"""
return function
@legacy_function
def get_mass_of_species():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN
, description="The index of the star to get the value of")
function.addParameter('species', dtype='int32', direction=function.IN
, description="The species of the star to get the mass number of")
function.addParameter('species_mass', dtype='float64', direction=function.OUT
, description="The mass number of the chemical abundance variable of the star.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
The value was retrieved.
-1 - ERROR
A star with the given index was not found.
"""
return function
@legacy_function
def erase_memory():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_star', dtype='int32', direction=function.IN
, description="The index of the star to get the value of")
function.result_type = 'int32'
return function
@legacy_function
def get_max_age_stop_condition():
function = LegacyFunctionSpecification()
function.addParameter('max_age_stop_condition', dtype='float64', direction=function.OUT
, description="The current maximum age stop condition of this instance (in years).")
function.result_type = 'int32'
function.result_doc = """
0 - OK
Current value was retrieved
-1 - ERROR
The code could not retrieve the value.
"""
return function
@legacy_function
def set_max_age_stop_condition():
function = LegacyFunctionSpecification()
function.addParameter('max_age_stop_condition', dtype='float64', direction=function.IN
, description="The new maximum age stop condition of this instance (in years).")
function.result_type = 'int32'
function.result_doc = """
0 - OK
The value has been set.
-1 - ERROR
The code could not set the value.
"""
return function
@legacy_function
def get_min_timestep_stop_condition():
function = LegacyFunctionSpecification()
function.addParameter('min_timestep_stop_condition', dtype='float64', direction=function.OUT
, description="The current minimum timestep stop condition of this instance (in years).")
function.result_type = 'int32'
function.result_doc = """
0 - OK
Current value was retrieved
-1 - ERROR
The code could not retrieve the value.
"""
return function
@legacy_function
def set_min_timestep_stop_condition():
function = LegacyFunctionSpecification()
function.addParameter('min_timestep_stop_condition', dtype='float64', direction=function.IN
, description="The new minimum timestep stop condition of this instance (in years).")
function.result_type = 'int32'
function.result_doc = """
0 - OK
The value has been set.
-1 - ERROR
The code could not set the value.
"""
return function
@legacy_function
def get_max_iter_stop_condition():
function = LegacyFunctionSpecification()
function.addParameter('max_iter_stop_condition', dtype='int32', direction=function.OUT
, description="The current maximum number of iterations of this instance.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
Current value was retrieved
-1 - ERROR
The code could not retrieve the value.
"""
return function
@legacy_function
def set_max_iter_stop_condition():
function = LegacyFunctionSpecification()
function.addParameter('max_iter_stop_condition', dtype='int32', direction=function.IN
, description="The new maximum number of iterations of this instance.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
The value has been set.
-1 - ERROR
The code could not set the value.
"""
return function
@legacy_function
def get_convective_overshoot_parameter():
function = LegacyFunctionSpecification()
function.addParameter('convective_overshoot_parameter', dtype='float64', direction=function.OUT,
description="The current value of the convective overshoot parameter.")
function.result_type = 'int32'
return function
@legacy_function
def set_convective_overshoot_parameter():
function = LegacyFunctionSpecification()
function.addParameter('convective_overshoot_parameter', dtype='float64', direction=function.IN,
description="The new value of the convective overshoot parameter.")
function.result_type = 'int32'
return function
@legacy_function
def get_mixing_length_ratio():
function = LegacyFunctionSpecification()
function.addParameter('mixing_length_ratio', dtype='float64', direction=function.OUT
, description="The current value of the mixing length ratio.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
Current value was retrieved
-1 - ERROR
The code could not retrieve the value.
"""
return function
@legacy_function
def set_mixing_length_ratio():
function = LegacyFunctionSpecification()
function.addParameter('mixing_length_ratio', dtype='float64', direction=function.IN
, description="The new value of the mixing length ratio.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
The value has been set.
-1 - ERROR
The code could not set the value.
"""
return function
@legacy_function
def get_semi_convection_efficiency():
function = LegacyFunctionSpecification()
function.addParameter('semi_convection_efficiency', dtype='float64', direction=function.OUT
, description="The current value of the efficiency of semi-convection.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
Current value was retrieved
-1 - ERROR
The code could not retrieve the value.
"""
return function
@legacy_function
def set_semi_convection_efficiency():
function = LegacyFunctionSpecification()
function.addParameter('semi_convection_efficiency', dtype='float64', direction=function.IN
, description="The new value of the efficiency of semi-convection.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
The value has been set.
-1 - ERROR
The code could not set the value.
"""
return function
@legacy_function
def get_RGB_wind_scheme():
function = LegacyFunctionSpecification()
function.addParameter('RGB_wind_scheme', dtype='int32', direction=function.OUT
, description="The current wind (mass loss) scheme for RGB stars of this instance.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
Current value was retrieved
-1 - ERROR
The code could not retrieve the value.
"""
return function
@legacy_function
def set_RGB_wind_scheme():
function = LegacyFunctionSpecification()
function.addParameter('RGB_wind_scheme', dtype='int32', direction=function.IN
, description="The new wind (mass loss) scheme for RGB stars of this instance.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
The value has been set.
-1 - ERROR
The code could not set the value.
"""
return function
@legacy_function
def get_AGB_wind_scheme():
function = LegacyFunctionSpecification()
function.addParameter('AGB_wind_scheme', dtype='int32', direction=function.OUT
, description="The current wind (mass loss) scheme for AGB stars of this instance.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
Current value was retrieved
-1 - ERROR
The code could not retrieve the value.
"""
return function
@legacy_function
def set_AGB_wind_scheme():
function = LegacyFunctionSpecification()
function.addParameter('AGB_wind_scheme', dtype='int32', direction=function.IN
, description="The new wind (mass loss) scheme for AGB stars of this instance.")
function.result_type = 'int32'
function.result_doc = """
0 - OK
The value has been set.
-1 - ERROR
The code could not set the value.
"""
return function
@legacy_function
def get_reimers_wind_efficiency():
function = LegacyFunctionSpecification()
function.addParameter('reimers_wind_efficiency', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_reimers_wind_efficiency():
function = LegacyFunctionSpecification()
function.addParameter('reimers_wind_efficiency', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_blocker_wind_efficiency():
function = LegacyFunctionSpecification()
function.addParameter('blocker_wind_efficiency', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_blocker_wind_efficiency():
function = LegacyFunctionSpecification()
function.addParameter('blocker_wind_efficiency', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_de_jager_wind_efficiency():
function = LegacyFunctionSpecification()
function.addParameter('de_jager_wind_efficiency', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_de_jager_wind_efficiency():
function = LegacyFunctionSpecification()
function.addParameter('de_jager_wind_efficiency', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_dutch_wind_efficiency():
function = LegacyFunctionSpecification()
function.addParameter('dutch_wind_efficiency', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_dutch_wind_efficiency():
function = LegacyFunctionSpecification()
function.addParameter('dutch_wind_efficiency', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_stabilize_new_stellar_model_flag():
function = LegacyFunctionSpecification()
function.addParameter('stabilize_new_stellar_model_flag', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_stabilize_new_stellar_model_flag():
function = LegacyFunctionSpecification()
function.addParameter('stabilize_new_stellar_model_flag', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def new_stellar_model():
function = LegacyFunctionSpecification()
function.must_handle_array = True
for par in ['d_mass', 'radius', 'rho', 'temperature', 'luminosity',
'X_H', 'X_He', 'X_C', 'X_N', 'X_O', 'X_Ne', 'X_Mg', 'X_Si', 'X_Fe']:
function.addParameter(par, dtype='float64', direction=function.IN)
function.addParameter('n', 'int32', function.LENGTH)
function.result_type = 'int32'
return function
@legacy_function
def finalize_stellar_model():
function = LegacyFunctionSpecification()
function.addParameter('index_of_the_star', dtype='int32',
direction=function.OUT, description = "The new index for the star. "
"This index can be used to refer to this star in other functions")
function.addParameter('age_tag', dtype='float64', direction=function.IN,
description = "The initial age of the star")
function.result_type = 'int32'
return function
class MESA(StellarEvolution, InternalStellarStructure):
def __init__(self, **options):
InCodeComponentImplementation.__init__(self, MESAInterface(**options), **options)
output_dir = self.get_output_directory()
if not self.channel_type == 'distributed':
self.ensure_data_directory_exists(os.path.join(output_dir, 'star_data', 'starting_models'))
self.set_MESA_paths(
self.default_path_to_inlist,
self.default_path_to_MESA_data,
output_dir
)
self.model_time = 0.0 | units.yr
def define_parameters(self, handler):
handler.add_method_parameter(
"get_metallicity",
"set_metallicity",
"metallicity",
"Metallicity of all stars",
default_value = 0.02
)
handler.add_method_parameter(
"get_max_age_stop_condition",
"set_max_age_stop_condition",
"max_age_stop_condition",
"The maximum age stop condition of this instance.",
default_value = 1.0e36 | units.yr
)
handler.add_method_parameter(
"get_min_timestep_stop_condition",
"set_min_timestep_stop_condition",
"min_timestep_stop_condition",
"The minimum timestep stop condition of this instance.",
default_value = 1.0e-6 | units.s
)
handler.add_method_parameter(
"get_max_iter_stop_condition",
"set_max_iter_stop_condition",
"max_iter_stop_condition",
"The maximum number of iterations of this instance. (Negative means no maximum)",
default_value = -1111
)
handler.add_method_parameter(
"get_convective_overshoot_parameter",
"set_convective_overshoot_parameter",
"herwig_convective_overshoot_parameter",
"The convective overshoot parameter (Herwig 2000), f=0.016 is argued to be a reasonable value.",
default_value = 0.0
)
handler.add_method_parameter(
"get_mixing_length_ratio",
"set_mixing_length_ratio",
"mixing_length_ratio",
"The mixing-length ratio (alpha).",
default_value = 2.0
)
handler.add_method_parameter(
"get_semi_convection_efficiency",
"set_semi_convection_efficiency",
"semi_convection_efficiency",
"The efficiency of semi-convection, after Heger, Langer, & Woosley 2000 (ApJ), "
"which goes back to Langer, Sugimoto & Fricke 1983 (A&A).",
default_value = 0.0
)
handler.add_method_parameter(
"get_RGB_wind_scheme",
"set_RGB_wind_scheme",
"RGB_wind_scheme",
"The mass loss scheme for RGB stars: none (0), Reimers (1), "
"Blocker (2), de Jager (3), Dutch (4), Mattsson (5)",
default_value = 1
)
handler.add_method_parameter(
"get_AGB_wind_scheme",
"set_AGB_wind_scheme",
"AGB_wind_scheme",
"The mass loss scheme for AGB stars: none (0), Reimers (1), "
"Blocker (2), de Jager (3), Dutch (4), Mattsson (5)",
default_value = 1
)
handler.add_method_parameter(
"get_reimers_wind_efficiency",
"set_reimers_wind_efficiency",
"reimers_wind_efficiency",
"The Reimers mass loss efficiency. Only used if (RGB/AGB_wind_scheme == 1).",
default_value = 0.5
)
handler.add_method_parameter(
"get_blocker_wind_efficiency",
"set_blocker_wind_efficiency",
"blocker_wind_efficiency",
"The Blocker mass loss efficiency. Only used if (RGB/AGB_wind_scheme == 2).",
default_value = 0.1
)
handler.add_method_parameter(
"get_de_jager_wind_efficiency",
"set_de_jager_wind_efficiency",
"de_jager_wind_efficiency",
"The de Jager mass loss efficiency. Only used if (RGB/AGB_wind_scheme == 3).",
default_value = 0.8
)
handler.add_method_parameter(
"get_dutch_wind_efficiency",
"set_dutch_wind_efficiency",
"dutch_wind_efficiency",
"The Dutch mass loss efficiency. Only used if (RGB/AGB_wind_scheme == 4).",
default_value = 0.8
)
handler.add_boolean_parameter(
"get_stabilize_new_stellar_model_flag",
"set_stabilize_new_stellar_model_flag",
"stabilize_new_stellar_model_flag",
"Flag specifying whether to stabilize any loaded stellar models first.",
default_value = True
)
def define_particle_sets(self, handler):
handler.define_super_set('particles', ['native_stars', 'imported_stars', 'pre_ms_stars'],
index_to_default_set = 0)
handler.define_set('imported_stars', 'index_of_the_star')
handler.set_new('imported_stars', 'finalize_stellar_model')
handler.set_delete('imported_stars', 'delete_star')
handler.define_set('native_stars', 'index_of_the_star')
handler.set_new('native_stars', 'new_particle')
handler.set_delete('native_stars', 'delete_star')
handler.define_set('pre_ms_stars', 'index_of_the_star')
handler.set_new('pre_ms_stars', 'new_pre_ms_particle')
handler.set_delete('pre_ms_stars', 'delete_star')
for particle_set_name in ['native_stars', 'imported_stars', 'pre_ms_stars']:
handler.add_getter(particle_set_name, 'get_radius', names = ('radius',))
handler.add_getter(particle_set_name, 'get_stellar_type', names = ('stellar_type',))
handler.add_getter(particle_set_name, 'get_mass', names = ('mass',))
handler.add_setter(particle_set_name, 'set_mass', names = ('mass',))
handler.add_getter(particle_set_name, 'get_core_mass', names = ('core_mass',))
handler.add_getter(particle_set_name, 'get_mass_loss_rate', names = ('wind',))
handler.add_getter(particle_set_name, 'get_age', names = ('age',))
handler.add_getter(particle_set_name, 'get_time_step', names = ('time_step',))
handler.add_setter(particle_set_name, 'set_time_step', names = ('time_step',))
handler.add_getter(particle_set_name, 'get_luminosity', names = ('luminosity',))
handler.add_getter(particle_set_name, 'get_temperature', names = ('temperature',))
handler.add_getter(particle_set_name, 'get_manual_mass_transfer_rate', names = ('mass_change',))
handler.add_setter(particle_set_name, 'set_manual_mass_transfer_rate', names = ('mass_change',))
handler.add_method(particle_set_name, 'get_accrete_same_as_surface')
handler.add_method(particle_set_name, 'set_accrete_same_as_surface')
handler.add_method(particle_set_name, 'get_accrete_composition_non_metals')
handler.add_method(particle_set_name, 'set_accrete_composition_non_metals')
handler.add_method(particle_set_name, 'get_accrete_composition_metals_identifier')
handler.add_method(particle_set_name, 'set_accrete_composition_metals_identifier')
handler.add_method(particle_set_name, 'get_accrete_composition_metals')
handler.add_method(particle_set_name, 'set_accrete_composition_metals')
handler.add_method(particle_set_name, 'evolve_one_step')
handler.add_method(particle_set_name, 'evolve_for')
InternalStellarStructure.define_particle_sets(
self,
handler,
set_name = particle_set_name
)
handler.add_method(particle_set_name, 'get_mass_profile')
handler.add_method(particle_set_name, 'set_mass_profile')
handler.add_method(particle_set_name, 'get_cumulative_mass_profile')
handler.add_method(particle_set_name, 'get_luminosity_profile')
handler.add_method(particle_set_name, 'set_luminosity_profile')
handler.add_method(particle_set_name, 'get_entropy_profile')
handler.add_method(particle_set_name, 'get_thermal_energy_profile')
handler.add_method(particle_set_name, 'get_brunt_vaisala_frequency_squared_profile')
handler.add_method(particle_set_name, 'get_IDs_of_species')
handler.add_method(particle_set_name, 'get_masses_of_species')
handler.add_method(particle_set_name, 'get_number_of_backups_in_a_row')
handler.add_method(particle_set_name, 'reset_number_of_backups_in_a_row')
def define_state(self, handler):
StellarEvolution.define_state(self, handler)
handler.add_method('EDIT', 'new_pre_ms_particle')
handler.add_method('UPDATE', 'new_pre_ms_particle')
handler.add_transition('RUN', 'UPDATE', 'new_pre_ms_particle', False)
handler.add_method('EDIT', 'finalize_stellar_model')
handler.add_method('UPDATE', 'finalize_stellar_model')
handler.add_transition('RUN', 'UPDATE', 'finalize_stellar_model', False)
def define_errorcodes(self, handler):
InternalStellarStructure.define_errorcodes(self, handler)
handler.add_errorcode(-1, 'Something went wrong...')
handler.add_errorcode(-4, 'Not implemented.')
handler.add_errorcode(-11, 'Evolve terminated: Unspecified stop condition reached.')
handler.add_errorcode(-12, 'Evolve terminated: Maximum age reached.')
handler.add_errorcode(-13, 'Evolve terminated: Maximum number of iterations reached.')
handler.add_errorcode(-14, 'Evolve terminated: Maximum number of backups reached.')
handler.add_errorcode(-15, 'Evolve terminated: Minimum timestep limit reached.')
def define_methods(self, handler):
InternalStellarStructure.define_methods(self, handler)
StellarEvolution.define_methods(self, handler)
handler.add_method(
"new_pre_ms_particle",
(units.MSun),
(handler.INDEX, handler.ERROR_CODE)
)
handler.add_method(
"set_time_step",
(handler.INDEX, units.yr),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_core_mass",
(handler.INDEX,),
(units.MSun, handler.ERROR_CODE,)
)
handler.add_method(
"get_mass_loss_rate",
(handler.INDEX,),
(units.g / units.s, handler.ERROR_CODE,)
)
handler.add_method(
"get_manual_mass_transfer_rate",
(handler.INDEX,),
(units.MSun / units.yr, handler.ERROR_CODE,)
)
handler.add_method(
"set_manual_mass_transfer_rate",
(handler.INDEX, units.MSun / units.yr),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_number_of_backups_in_a_row",
(handler.INDEX,),
(handler.NO_UNIT, handler.ERROR_CODE,)
)
handler.add_method(
"reset_number_of_backups_in_a_row",
(handler.INDEX,),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_mass_fraction_at_zone",
(handler.INDEX,handler.NO_UNIT,),
(handler.NO_UNIT, handler.ERROR_CODE,)
)
handler.add_method(
"set_mass_fraction_at_zone",
(handler.INDEX, handler.NO_UNIT, handler.NO_UNIT,),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_luminosity_at_zone",
(handler.INDEX,handler.NO_UNIT,),
(units.erg/units.s, handler.ERROR_CODE,)
)
handler.add_method(
"set_luminosity_at_zone",
(handler.INDEX, handler.NO_UNIT, units.erg/units.s,),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_entropy_at_zone",
(handler.INDEX,handler.NO_UNIT,),
(units.erg/units.K, handler.ERROR_CODE,)
)
handler.add_method(
"get_thermal_energy_at_zone",
(handler.INDEX,handler.NO_UNIT,),
(units.erg/units.g, handler.ERROR_CODE,)
)
handler.add_method(
"get_id_of_species",
(handler.INDEX,handler.NO_UNIT,),
(handler.NO_UNIT, handler.ERROR_CODE,)
)
handler.add_method(
"get_mass_of_species",
(handler.INDEX,handler.NO_UNIT,),
(units.amu, handler.ERROR_CODE,)
)
handler.add_method(
"erase_memory",
(handler.INDEX,),
(handler.ERROR_CODE,),
public_name = "_erase_memory"
)
handler.add_method(
"new_stellar_model",
(units.MSun, units.cm, units.g / units.cm**3, units.K, units.erg / units.s,
handler.NO_UNIT, handler.NO_UNIT, handler.NO_UNIT, handler.NO_UNIT, handler.NO_UNIT,
handler.NO_UNIT, handler.NO_UNIT, handler.NO_UNIT, handler.NO_UNIT,),
(handler.ERROR_CODE,)
)
handler.add_method(
"finalize_stellar_model",
(units.yr,),
(handler.INDEX, handler.ERROR_CODE,)
)
handler.add_method(
"get_max_age_stop_condition",
(),
(units.yr, handler.ERROR_CODE,)
)
handler.add_method(
"set_max_age_stop_condition",
(units.yr, ),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_min_timestep_stop_condition",
(),
(units.s, handler.ERROR_CODE,)
)
handler.add_method(
"set_min_timestep_stop_condition",
(units.s, ),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_max_iter_stop_condition",
(),
(handler.NO_UNIT, handler.ERROR_CODE,)
)
handler.add_method(
"set_max_iter_stop_condition",
(handler.NO_UNIT, ),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_mixing_length_ratio",
(),
(handler.NO_UNIT, handler.ERROR_CODE,)
)
handler.add_method(
"set_mixing_length_ratio",
(handler.NO_UNIT, ),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_semi_convection_efficiency",
(),
(handler.NO_UNIT, handler.ERROR_CODE,)
)
handler.add_method(
"set_semi_convection_efficiency",
(handler.NO_UNIT, ),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_RGB_wind_scheme",
(),
(handler.NO_UNIT, handler.ERROR_CODE,)
)
handler.add_method(
"set_RGB_wind_scheme",
(handler.NO_UNIT, ),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_AGB_wind_scheme",
(),
(handler.NO_UNIT, handler.ERROR_CODE,)
)
handler.add_method(
"set_AGB_wind_scheme",
(handler.NO_UNIT, ),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_reimers_wind_efficiency",
(),
(handler.NO_UNIT, handler.ERROR_CODE,)
)
handler.add_method(
"set_reimers_wind_efficiency",
(handler.NO_UNIT, ),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_blocker_wind_efficiency",
(),
(handler.NO_UNIT, handler.ERROR_CODE,)
)
handler.add_method(
"set_blocker_wind_efficiency",
(handler.NO_UNIT, ),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_de_jager_wind_efficiency",
(),
(handler.NO_UNIT, handler.ERROR_CODE,)
)
handler.add_method(
"set_de_jager_wind_efficiency",
(handler.NO_UNIT, ),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_dutch_wind_efficiency",
(),
(handler.NO_UNIT, handler.ERROR_CODE,)
)
handler.add_method(
"set_dutch_wind_efficiency",
(handler.NO_UNIT, ),
(handler.ERROR_CODE,)
)
def initialize_module_with_default_parameters(self):
self.parameters.set_defaults()
self.initialize_code()
def initialize_module_with_current_parameters(self):
self.initialize_code()
def commit_parameters(self):
self.parameters.send_not_set_parameters_to_code()
self.parameters.send_cached_parameters_to_code()
self.overridden().commit_parameters()
def get_mass_profile(self, indices_of_the_stars, number_of_zones = None):
indices_of_the_stars = self._check_number_of_indices(indices_of_the_stars, action_string = "Querying mass profiles")
if number_of_zones is None:
number_of_zones = self.get_number_of_zones(indices_of_the_stars)
return self.get_mass_fraction_at_zone([indices_of_the_stars]*number_of_zones, list(range(number_of_zones)) | units.none)
def get_cumulative_mass_profile(self, indices_of_the_stars, number_of_zones = None):
frac_profile = self.get_mass_profile(indices_of_the_stars, number_of_zones = number_of_zones)
return frac_profile.cumsum()
def set_mass_profile(self, indices_of_the_stars, values, number_of_zones = None):
indices_of_the_stars = self._check_number_of_indices(indices_of_the_stars, action_string = "Setting mass profiles")
if number_of_zones is None:
number_of_zones = self.get_number_of_zones(indices_of_the_stars)
self._check_supplied_values(len(values), number_of_zones)
self.set_mass_fraction_at_zone([indices_of_the_stars]*number_of_zones, list(range(number_of_zones)) | units.none, values)
if hasattr(self, "_erase_memory"):
self._erase_memory(indices_of_the_stars)
def get_luminosity_profile(self, indices_of_the_stars, number_of_zones = None):
indices_of_the_stars = self._check_number_of_indices(indices_of_the_stars, action_string = "Querying luminosity profiles")
if number_of_zones is None:
number_of_zones = self.get_number_of_zones(indices_of_the_stars)
return self.get_luminosity_at_zone([indices_of_the_stars]*number_of_zones, list(range(number_of_zones)) | units.none)
def set_luminosity_profile(self, indices_of_the_stars, values, number_of_zones = None):
indices_of_the_stars = self._check_number_of_indices(indices_of_the_stars, action_string = "Setting luminosity profiles")
if number_of_zones is None:
number_of_zones = self.get_number_of_zones(indices_of_the_stars)
self._check_supplied_values(len(values), number_of_zones)
self.set_luminosity_at_zone([indices_of_the_stars]*number_of_zones, list(range(number_of_zones)) | units.none, values)
if hasattr(self, "_erase_memory"):
self._erase_memory(indices_of_the_stars)
def get_entropy_profile(self, indices_of_the_stars, number_of_zones = None):
indices_of_the_stars = self._check_number_of_indices(indices_of_the_stars, action_string = "Querying entropy profiles")
if number_of_zones is None:
number_of_zones = self.get_number_of_zones(indices_of_the_stars)
return self.get_entropy_at_zone([indices_of_the_stars]*number_of_zones, list(range(number_of_zones)) | units.none)
def get_thermal_energy_profile(self, indices_of_the_stars, number_of_zones = None):
indices_of_the_stars = self._check_number_of_indices(indices_of_the_stars, action_string = "Querying thermal energy profiles")
if number_of_zones is None:
number_of_zones = self.get_number_of_zones(indices_of_the_stars)
return self.get_thermal_energy_at_zone([indices_of_the_stars]*number_of_zones, list(range(number_of_zones)) | units.none)
def get_brunt_vaisala_frequency_squared_profile(self, indices_of_the_stars, number_of_zones = None):
indices_of_the_stars = self._check_number_of_indices(indices_of_the_stars, action_string = "Querying brunt-vaisala-frequency-squared profiles")
if number_of_zones is None:
number_of_zones = self.get_number_of_zones(indices_of_the_stars)
return self.get_brunt_vaisala_frequency_squared_at_zone([indices_of_the_stars]*number_of_zones, list(range(number_of_zones)) | units.none)
def get_IDs_of_species(self, indices_of_the_stars, number_of_species = None):
indices_of_the_stars = self._check_number_of_indices(indices_of_the_stars, action_string = "Querying chemical abundance IDs")
if number_of_species is None:
number_of_species = self.get_number_of_species(indices_of_the_stars)
return list(self.get_id_of_species(
[indices_of_the_stars]*number_of_species,
list(range(1,number_of_species+1))
))
def get_masses_of_species(self, indices_of_the_stars, number_of_species = None):
indices_of_the_stars = self._check_number_of_indices(indices_of_the_stars, action_string = "Querying chemical abundance mass numbers")
if number_of_species is None:
number_of_species = self.get_number_of_species(indices_of_the_stars)
return self.get_mass_of_species(
[indices_of_the_stars]*number_of_species,
list(range(1,number_of_species+1))
)
def new_particle_from_model(self, internal_structure, current_age=0|units.Myr, key=None):
if isinstance(internal_structure, dict):
if "dmass" in internal_structure:
mass_profile = internal_structure['dmass'][::-1]
else:
cumulative_mass_profile = [0.0] | units.MSun
cumulative_mass_profile.extend(internal_structure['mass'])
mass_profile = (cumulative_mass_profile[1:] - cumulative_mass_profile[:-1])[::-1]
self.new_stellar_model(
mass_profile,
internal_structure['radius'][::-1],
internal_structure['rho'][::-1],
internal_structure['temperature'][::-1],
internal_structure['luminosity'][::-1],
internal_structure['X_H'][::-1],
internal_structure['X_He'][::-1],
internal_structure['X_C'][::-1],
internal_structure['X_N'][::-1],
internal_structure['X_O'][::-1],
internal_structure['X_Ne'][::-1],
internal_structure['X_Mg'][::-1],
internal_structure['X_Si'][::-1],
internal_structure['X_Fe'][::-1]
)
else:
if hasattr(internal_structure, "dmass"):
mass_profile = internal_structure.dmass[::-1]
else:
cumulative_mass_profile = [0.0] | units.MSun
cumulative_mass_profile.extend(internal_structure.mass)
mass_profile = (cumulative_mass_profile[1:] - cumulative_mass_profile[:-1])[::-1]
self.new_stellar_model(
mass_profile,
internal_structure.radius[::-1],
internal_structure.rho[::-1],
internal_structure.temperature[::-1],
internal_structure.luminosity[::-1],
internal_structure.X_H[::-1],
internal_structure.X_He[::-1],
internal_structure.X_C[::-1],
internal_structure.X_N[::-1],
internal_structure.X_O[::-1],
internal_structure.X_Ne[::-1],
internal_structure.X_Mg[::-1],
internal_structure.X_Si[::-1],
internal_structure.X_Fe[::-1]
)
tmp_star = datamodel.Particle(key=key)
tmp_star.age_tag = current_age
return self.imported_stars.add_particle(tmp_star)
Mesa = MESA
| true | true |
f7fb8f81600e0b3d8bd12850f84d936d411cae09 | 1,909 | py | Python | tests/st/ops/ascend/vector/test_elemwise_mul_ad_001.py | tianjiashuo/akg | a9cbf642063fb1086a93e8bc6be6feb145689817 | [
"Apache-2.0"
] | 286 | 2020-06-23T06:40:44.000Z | 2022-03-30T01:27:49.000Z | tests/st/ops/ascend/vector/test_elemwise_mul_ad_001.py | tianjiashuo/akg | a9cbf642063fb1086a93e8bc6be6feb145689817 | [
"Apache-2.0"
] | 10 | 2020-07-31T03:26:59.000Z | 2021-12-27T15:00:54.000Z | tests/st/ops/ascend/vector/test_elemwise_mul_ad_001.py | tianjiashuo/akg | a9cbf642063fb1086a93e8bc6be6feb145689817 | [
"Apache-2.0"
] | 30 | 2020-07-17T01:04:14.000Z | 2021-12-27T14:05:19.000Z | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
mul test cast
"""
import os
import pytest
from tests.common.base import TestBase
from tests.common.test_run.ascend.elemwise_mul_ad_run import elemwise_mul_ad_run
class TestCase(TestBase):
def setup(self):
case_name = "test_autodiff_elemwise_mul_ad_001"
case_path = os.getcwd()
self.params_init(case_name, case_path)
self.caseresult = True
self._log.info("============= {0} Setup case============".format(self.casename))
self.testarg = [
# testflag, opfuncname, testRunArgs, dimArgs
("elemwise_mul_ad_run_3_3", elemwise_mul_ad_run, ([3, 3], "float16")),
]
self.testarg_cloud = [
# testflag, opfuncname, testRunArgs, dimArgs
("elemwise_mul_ad_run_3_3", elemwise_mul_ad_run, ([3, 3], "float32")),
]
return
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_run(self):
self.common_run(self.testarg)
def test_run_cloud(self):
self.common_run(self.testarg_cloud)
def teardown(self):
"""
clean environment
:return:
"""
self._log.info("============= {0} Teardown============".format(self.casename))
return
| 31.816667 | 88 | 0.663698 |
import os
import pytest
from tests.common.base import TestBase
from tests.common.test_run.ascend.elemwise_mul_ad_run import elemwise_mul_ad_run
class TestCase(TestBase):
def setup(self):
case_name = "test_autodiff_elemwise_mul_ad_001"
case_path = os.getcwd()
self.params_init(case_name, case_path)
self.caseresult = True
self._log.info("============= {0} Setup case============".format(self.casename))
self.testarg = [
("elemwise_mul_ad_run_3_3", elemwise_mul_ad_run, ([3, 3], "float16")),
]
self.testarg_cloud = [
("elemwise_mul_ad_run_3_3", elemwise_mul_ad_run, ([3, 3], "float32")),
]
return
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_run(self):
self.common_run(self.testarg)
def test_run_cloud(self):
self.common_run(self.testarg_cloud)
def teardown(self):
self._log.info("============= {0} Teardown============".format(self.casename))
return
| true | true |
f7fb8fff4b6a671f41bc2286c696128d85e52c59 | 468 | py | Python | _app/comments/models.py | OmarThinks/DRF-Social-Project | e012c0d9e42e07948ef2fd7e391211ecf566a79a | [
"MIT"
] | null | null | null | _app/comments/models.py | OmarThinks/DRF-Social-Project | e012c0d9e42e07948ef2fd7e391211ecf566a79a | [
"MIT"
] | null | null | null | _app/comments/models.py | OmarThinks/DRF-Social-Project | e012c0d9e42e07948ef2fd7e391211ecf566a79a | [
"MIT"
] | null | null | null | from django.db import models
from django.conf import settings
from posts.models import Post
class Comment(models.Model):
post = models.ForeignKey(Post,
on_delete=models.CASCADE,related_name='comments')
content = models.CharField(max_length=1000)
author = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)
def __str__(self):
return (str(self.id) + " By "+ self.author.username
+ " on post"+ str(self.post_id) +") " + self.content)
| 31.2 | 57 | 0.745726 | from django.db import models
from django.conf import settings
from posts.models import Post
class Comment(models.Model):
post = models.ForeignKey(Post,
on_delete=models.CASCADE,related_name='comments')
content = models.CharField(max_length=1000)
author = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)
def __str__(self):
return (str(self.id) + " By "+ self.author.username
+ " on post"+ str(self.post_id) +") " + self.content)
| true | true |
f7fb90fb0475755686c72bb5f11a9b5d29b43359 | 395 | py | Python | my_nubank.py | janjitsu/nuflow | 47715218c8ea11407a767fda63a2d06cd1f8031d | [
"MIT"
] | 2 | 2018-03-16T18:11:20.000Z | 2018-03-16T23:14:09.000Z | my_nubank.py | janjitsu/nuflow | 47715218c8ea11407a767fda63a2d06cd1f8031d | [
"MIT"
] | null | null | null | my_nubank.py | janjitsu/nuflow | 47715218c8ea11407a767fda63a2d06cd1f8031d | [
"MIT"
] | null | null | null | # This class will fetch a Pynubank class with user credentials
import json
from pynubank import Nubank
class MyNubank:
def __new__(self):
credentials = json.load(open('credentials/nubank_credentials.json'))
return Nubank(credentials['cpf'],credentials['password'])
if __name__ == '__main__':
from pprint import pprint
my_nubank = MyNubank();
pprint(my_nubank)
| 26.333333 | 76 | 0.721519 |
import json
from pynubank import Nubank
class MyNubank:
def __new__(self):
credentials = json.load(open('credentials/nubank_credentials.json'))
return Nubank(credentials['cpf'],credentials['password'])
if __name__ == '__main__':
from pprint import pprint
my_nubank = MyNubank();
pprint(my_nubank)
| true | true |
f7fb91bda1f8426ece53df6d0828880601147d94 | 5,794 | py | Python | benchmarks/classification/svm_voc07/extract.py | Westlake-AI/openmixup | ea81250819e740dd823e30cb7ce382d14a3c1b91 | [
"Apache-2.0"
] | 10 | 2021-12-30T10:22:27.000Z | 2022-03-30T02:31:38.000Z | benchmarks/classification/svm_voc07/extract.py | Westlake-AI/openmixup | ea81250819e740dd823e30cb7ce382d14a3c1b91 | [
"Apache-2.0"
] | 3 | 2022-01-20T21:02:48.000Z | 2022-03-19T13:49:45.000Z | benchmarks/classification/svm_voc07/extract.py | Westlake-AI/openmixup | ea81250819e740dd823e30cb7ce382d14a3c1b91 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import time
import mmcv
import numpy as np
import torch
from mmcv import DictAction
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, init_dist, load_checkpoint
from openmixup.datasets import build_dataloader, build_dataset
from openmixup.models import build_model
from openmixup.hooks import MultiExtractProcess
from openmixup.utils import get_root_logger
def parse_args():
parser = argparse.ArgumentParser(
description='MMSelfSup extract features of a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('--checkpoint', default=None, help='checkpoint file')
parser.add_argument(
'--dataset-config',
default='configs/benchmarks/classification/svm_voc07.py',
help='extract dataset config file path')
parser.add_argument(
'--layer-ind',
type=str,
help='layer indices, separated by comma, e.g., "0,1,2,3,4"')
parser.add_argument(
'--work_dir',
type=str,
default=None,
help='the dir to save logs and models')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = mmcv.Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
work_type = args.config.split('/')[1]
cfg.work_dir = osp.join('./work_dirs', work_type,
osp.splitext(osp.basename(args.config))[0])
# get out_indices from args
layer_ind = [int(idx) for idx in args.layer_ind.split(',')]
cfg.model.backbone.out_indices = layer_ind
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'extract_{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# build the dataloader
dataset_cfg = mmcv.Config.fromfile(args.dataset_config)
dataset = build_dataset(dataset_cfg.data.extract)
data_loader = build_dataloader(
dataset,
imgs_per_gpu=dataset_cfg.data.imgs_per_gpu,
workers_per_gpu=dataset_cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# build the model
model = build_model(cfg.model)
model.init_weights()
# model is determined in this priority: init_cfg > checkpoint > random
if getattr(cfg.model.backbone.init_cfg, 'type', None) == 'Pretrained':
logger.info(
f'Use pretrained model: '
f'{cfg.model.backbone.init_cfg.checkpoint} to extract features')
elif args.checkpoint is not None:
logger.info(f'Use checkpoint: {args.checkpoint} to extract features')
load_checkpoint(model, args.checkpoint, map_location='cpu')
else:
logger.info('No pretrained or checkpoint is given, use random init.')
if not distributed:
model = MMDataParallel(model, device_ids=[0])
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
# build extraction processor
extractor = MultiExtractProcess(
pool_type='specified', backbone='resnet50', layer_indices=layer_ind)
# run
outputs = extractor.extract(model, data_loader, distributed=distributed)
rank, _ = get_dist_info()
mmcv.mkdir_or_exist(f'{args.work_dir}/features/')
if rank == 0:
for key, val in outputs.items():
split_num = len(dataset_cfg.split_name)
split_at = dataset_cfg.split_at
for ss in range(split_num):
output_file = f'{args.work_dir}/features/' \
f'{dataset_cfg.split_name[ss]}_{key}.npy'
if ss == 0:
np.save(output_file, val[:split_at[0]])
elif ss == split_num - 1:
np.save(output_file, val[split_at[-1]:])
else:
np.save(output_file, val[split_at[ss - 1]:split_at[ss]])
if __name__ == '__main__':
main()
| 37.380645 | 79 | 0.651018 |
import argparse
import os
import os.path as osp
import time
import mmcv
import numpy as np
import torch
from mmcv import DictAction
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, init_dist, load_checkpoint
from openmixup.datasets import build_dataloader, build_dataset
from openmixup.models import build_model
from openmixup.hooks import MultiExtractProcess
from openmixup.utils import get_root_logger
def parse_args():
parser = argparse.ArgumentParser(
description='MMSelfSup extract features of a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('--checkpoint', default=None, help='checkpoint file')
parser.add_argument(
'--dataset-config',
default='configs/benchmarks/classification/svm_voc07.py',
help='extract dataset config file path')
parser.add_argument(
'--layer-ind',
type=str,
help='layer indices, separated by comma, e.g., "0,1,2,3,4"')
parser.add_argument(
'--work_dir',
type=str,
default=None,
help='the dir to save logs and models')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = mmcv.Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
if args.work_dir is not None:
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
work_type = args.config.split('/')[1]
cfg.work_dir = osp.join('./work_dirs', work_type,
osp.splitext(osp.basename(args.config))[0])
layer_ind = [int(idx) for idx in args.layer_ind.split(',')]
cfg.model.backbone.out_indices = layer_ind
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'extract_{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
dataset_cfg = mmcv.Config.fromfile(args.dataset_config)
dataset = build_dataset(dataset_cfg.data.extract)
data_loader = build_dataloader(
dataset,
imgs_per_gpu=dataset_cfg.data.imgs_per_gpu,
workers_per_gpu=dataset_cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
model = build_model(cfg.model)
model.init_weights()
if getattr(cfg.model.backbone.init_cfg, 'type', None) == 'Pretrained':
logger.info(
f'Use pretrained model: '
f'{cfg.model.backbone.init_cfg.checkpoint} to extract features')
elif args.checkpoint is not None:
logger.info(f'Use checkpoint: {args.checkpoint} to extract features')
load_checkpoint(model, args.checkpoint, map_location='cpu')
else:
logger.info('No pretrained or checkpoint is given, use random init.')
if not distributed:
model = MMDataParallel(model, device_ids=[0])
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
extractor = MultiExtractProcess(
pool_type='specified', backbone='resnet50', layer_indices=layer_ind)
outputs = extractor.extract(model, data_loader, distributed=distributed)
rank, _ = get_dist_info()
mmcv.mkdir_or_exist(f'{args.work_dir}/features/')
if rank == 0:
for key, val in outputs.items():
split_num = len(dataset_cfg.split_name)
split_at = dataset_cfg.split_at
for ss in range(split_num):
output_file = f'{args.work_dir}/features/' \
f'{dataset_cfg.split_name[ss]}_{key}.npy'
if ss == 0:
np.save(output_file, val[:split_at[0]])
elif ss == split_num - 1:
np.save(output_file, val[split_at[-1]:])
else:
np.save(output_file, val[split_at[ss - 1]:split_at[ss]])
if __name__ == '__main__':
main()
| true | true |
f7fb92077bc5cb50669d9a8947dcdd3be5894730 | 5,257 | py | Python | code/mpu6050.py | piNp187/gyroscope | f23b8fb47ab2a05319e4212a85261f3197592daa | [
"CC-BY-4.0"
] | 1 | 2022-03-24T09:43:05.000Z | 2022-03-24T09:43:05.000Z | code/mpu6050.py | piNp187/gyroscope | f23b8fb47ab2a05319e4212a85261f3197592daa | [
"CC-BY-4.0"
] | null | null | null | code/mpu6050.py | piNp187/gyroscope | f23b8fb47ab2a05319e4212a85261f3197592daa | [
"CC-BY-4.0"
] | null | null | null | import machine
from machine import I2C, Pin
import time
MPU_ADDR = const(0X68)
MPU_DEVICE_ID_REG = 0x75
MPU_PWR_MGMT1_REG = 0x6B
MPU_PWR_MGMT2_REG = 0x6C
MPU_SELF_TESTX_REG = 0x0D
MPU_SELF_TESTY_REG = 0x0E
MPU_SELF_TESTZ_REG = 0x0F
MPU_SELF_TESTA_REG = 0x10
MPU_SAMPLE_RATE_REG = 0x19
MPU_CFG_REG = 0x1A
MPU_GYRO_CFG_REG = 0x1B
MPU_ACCEL_CFG_REG = 0x1C
MPU_MOTION_DET_REG = 0x1F
MPU_FIFO_EN_REG = 0x23
MPU_I2CMST_CTRL_REG = 0x24
MPU_I2CSLV0_ADDR_REG = 0x25
MPU_I2CSLV0_REG = 0x26
MPU_I2CSLV0_CTRL_REG = 0x27
MPU_I2CSLV1_ADDR_REG = 0x28
MPU_I2CSLV1_REG = 0x29
MPU_I2CSLV1_CTRL_REG = 0x2A
MPU_I2CSLV2_ADDR_REG = 0x2B
MPU_I2CSLV2_REG = 0x2C
MPU_I2CSLV2_CTRL_REG = 0x2D
MPU_I2CSLV3_ADDR_REG = 0x2E
MPU_I2CSLV3_REG = 0x2F
MPU_I2CSLV3_CTRL_REG = 0x30
MPU_I2CSLV4_ADDR_REG = 0x31
MPU_I2CSLV4_REG = 0x32
MPU_I2CSLV4_DO_REG = 0x33
MPU_I2CSLV4_CTRL_REG = 0x34
MPU_I2CSLV4_DI_REG = 0x35
MPU_I2CMST_STA_REG = 0x36
MPU_INTBP_CFG_REG = 0x37
MPU_INT_EN_REG = 0x38
MPU_INT_STA_REG = 0x3A
MPU_ACCEL_XOUTH_REG = 0x3B
MPU_ACCEL_XOUTL_REG = 0x3C
MPU_ACCEL_YOUTH_REG = 0x3D
MPU_ACCEL_YOUTL_REG = 0x3E
MPU_ACCEL_ZOUTH_REG = 0x3F
MPU_ACCEL_ZOUTL_REG = 0x40
MPU_TEMP_OUTH_REG = 0x41
MPU_TEMP_OUTL_REG = 0x42
MPU_GYRO_XOUTH_REG = 0x43
MPU_GYRO_XOUTL_REG = 0x44
MPU_GYRO_YOUTH_REG = 0x45
MPU_GYRO_YOUTL_REG = 0x46
MPU_GYRO_ZOUTH_REG = 0x47
MPU_GYRO_ZOUTL_REG = 0x48
MPU_I2CSLV0_DO_REG = 0x63
MPU_I2CSLV1_DO_REG = 0x64
MPU_I2CSLV2_DO_REG = 0x65
MPU_I2CSLV3_DO_REG = 0x66
MPU_I2CMST_DELAY_REG = 0x67
MPU_SIGPATH_RST_REG = 0x68
MPU_MDETECT_CTRL_REG = 0x69
MPU_USER_CTRL_REG = 0x6A
MPU_PWR_MGMT1_REG = 0x6B
MPU_PWR_MGMT2_REG = 0x6C
MPU_FIFO_CNTH_REG = 0x72
MPU_FIFO_CNTL_REG = 0x73
MPU_FIFO_RW_REG = 0x74
MPU_DEVICE_ID_REG = 0x75
MPU_ADDR_ADDR = 0x68
class MPU6050(object):
def __init__(self, bus, sclpin, sdapin):
self.i2c = I2C(id=bus, scl=Pin(sclpin), sda=Pin(sdapin), freq=400000)
def Write_Mpu6050_REG(self, reg, dat):
buf = bytearray(1)
buf[0] = dat
self.i2c.writeto_mem(MPU_ADDR, reg, buf)
def Read_Mpu6050_REG(self, reg):
t = self.i2c.readfrom_mem(MPU_ADDR, reg, 1)[0]
return (t>>4)*10 + (t%16)
def Read_Mpu6050_Len(self,reg,len,buffer):
#buffer=bytearray(len)
self.i2c.readfrom_mem_into(MPU_ADDR, reg, buffer)
#fsr:0,±250dps;1,±500dps;2,±1000dps;3,±2000dps
def MPU_Set_Gyro_Fsr(self, fsr):
return self.Write_Mpu6050_REG(MPU_GYRO_CFG_REG, fsr<<3)
#fsr:0,±2g;1,±4g;2,±8g;3,±16g
def MPU_Set_Accel_Fsr(self, fsr):
return self.Write_Mpu6050_REG(MPU_ACCEL_CFG_REG, fsr<<3)
def MPU_Set_LPF(self, lpf):
if(lpf >= 188):
data = 1
elif(lpf >= 98):
data = 2
elif(lpf >= 42):
data = 3
elif(lpf >= 20):
data = 4
elif(lpf >= 10):
data = 5
else:
data = 6
self.Write_Mpu6050_REG(MPU_CFG_REG, data)
#rate:4~1000(Hz)
def MPU_Set_Rate(self, rate):
if(rate > 1000):
rate = 1000
if(rate < 4):
rate = 4
data = int(1000/rate-1)
datas = self.Write_Mpu6050_REG(MPU_SAMPLE_RATE_REG, data)
return self.MPU_Set_LPF(rate / 2)
def MPU_Init(self):
self.Write_Mpu6050_REG(MPU_PWR_MGMT1_REG, 0x80) #0x01
time.sleep_ms(100)
self.Write_Mpu6050_REG(MPU_PWR_MGMT1_REG, 0x00)
self.MPU_Set_Gyro_Fsr(3)
self.MPU_Set_Accel_Fsr(0)
self.MPU_Set_Rate(50)
self.Write_Mpu6050_REG(MPU_INT_EN_REG, 0x00)
self.Write_Mpu6050_REG(MPU_USER_CTRL_REG, 0x00)
self.Write_Mpu6050_REG(MPU_FIFO_EN_REG, 0x00)
self.Write_Mpu6050_REG(MPU_INTBP_CFG_REG, 0x80)
res = self.Read_Mpu6050_REG(MPU_DEVICE_ID_REG)
if(res == 68):
self.Write_Mpu6050_REG(MPU_PWR_MGMT1_REG, 0x01)
self.Write_Mpu6050_REG(MPU_PWR_MGMT2_REG, 0x00)
self.MPU_Set_Rate(50)
else:
return 1
return 0
#Get raw data
def MPU_Get_Gyroscope(self):
buf = bytearray(6)
res = self.Read_Mpu6050_Len(MPU_GYRO_XOUTH_REG, 6, buf)
gx = (buf[0]<<8) | buf[1]
gy = (buf[2]<<8) | buf[3]
gz = (buf[4]<<8) | buf[5]
if gx >= 0x8000:
gx = -((65535-gx)+1)
if gy >= 0x8000:
gy = -((65535-gy)+1)
if gz >= 0x8000:
gz = -((65535-gz)+1)
return gx, gy, gz
def MPU_Get_Accelerometer(self):
buf = bytearray(6)
res = self.Read_Mpu6050_Len(MPU_ACCEL_XOUTH_REG, 6, buf)
ax = (buf[0]<<8) | buf[1]
ay = (buf[2]<<8) | buf[3]
az = (buf[4]<<8) | buf[5]
if ax >= 0x8000:
ax = -((65535-ax)+1)
if ay >= 0x8000:
ay = -((65535-ay)+1)
if az >= 0x8000:
az = -((65535-az)+1)
return ax, ay, az
| 29.700565 | 77 | 0.604718 | import machine
from machine import I2C, Pin
import time
MPU_ADDR = const(0X68)
MPU_DEVICE_ID_REG = 0x75
MPU_PWR_MGMT1_REG = 0x6B
MPU_PWR_MGMT2_REG = 0x6C
MPU_SELF_TESTX_REG = 0x0D
MPU_SELF_TESTY_REG = 0x0E
MPU_SELF_TESTZ_REG = 0x0F
MPU_SELF_TESTA_REG = 0x10
MPU_SAMPLE_RATE_REG = 0x19
MPU_CFG_REG = 0x1A
MPU_GYRO_CFG_REG = 0x1B
MPU_ACCEL_CFG_REG = 0x1C
MPU_MOTION_DET_REG = 0x1F
MPU_FIFO_EN_REG = 0x23
MPU_I2CMST_CTRL_REG = 0x24
MPU_I2CSLV0_ADDR_REG = 0x25
MPU_I2CSLV0_REG = 0x26
MPU_I2CSLV0_CTRL_REG = 0x27
MPU_I2CSLV1_ADDR_REG = 0x28
MPU_I2CSLV1_REG = 0x29
MPU_I2CSLV1_CTRL_REG = 0x2A
MPU_I2CSLV2_ADDR_REG = 0x2B
MPU_I2CSLV2_REG = 0x2C
MPU_I2CSLV2_CTRL_REG = 0x2D
MPU_I2CSLV3_ADDR_REG = 0x2E
MPU_I2CSLV3_REG = 0x2F
MPU_I2CSLV3_CTRL_REG = 0x30
MPU_I2CSLV4_ADDR_REG = 0x31
MPU_I2CSLV4_REG = 0x32
MPU_I2CSLV4_DO_REG = 0x33
MPU_I2CSLV4_CTRL_REG = 0x34
MPU_I2CSLV4_DI_REG = 0x35
MPU_I2CMST_STA_REG = 0x36
MPU_INTBP_CFG_REG = 0x37
MPU_INT_EN_REG = 0x38
MPU_INT_STA_REG = 0x3A
MPU_ACCEL_XOUTH_REG = 0x3B
MPU_ACCEL_XOUTL_REG = 0x3C
MPU_ACCEL_YOUTH_REG = 0x3D
MPU_ACCEL_YOUTL_REG = 0x3E
MPU_ACCEL_ZOUTH_REG = 0x3F
MPU_ACCEL_ZOUTL_REG = 0x40
MPU_TEMP_OUTH_REG = 0x41
MPU_TEMP_OUTL_REG = 0x42
MPU_GYRO_XOUTH_REG = 0x43
MPU_GYRO_XOUTL_REG = 0x44
MPU_GYRO_YOUTH_REG = 0x45
MPU_GYRO_YOUTL_REG = 0x46
MPU_GYRO_ZOUTH_REG = 0x47
MPU_GYRO_ZOUTL_REG = 0x48
MPU_I2CSLV0_DO_REG = 0x63
MPU_I2CSLV1_DO_REG = 0x64
MPU_I2CSLV2_DO_REG = 0x65
MPU_I2CSLV3_DO_REG = 0x66
MPU_I2CMST_DELAY_REG = 0x67
MPU_SIGPATH_RST_REG = 0x68
MPU_MDETECT_CTRL_REG = 0x69
MPU_USER_CTRL_REG = 0x6A
MPU_PWR_MGMT1_REG = 0x6B
MPU_PWR_MGMT2_REG = 0x6C
MPU_FIFO_CNTH_REG = 0x72
MPU_FIFO_CNTL_REG = 0x73
MPU_FIFO_RW_REG = 0x74
MPU_DEVICE_ID_REG = 0x75
MPU_ADDR_ADDR = 0x68
class MPU6050(object):
def __init__(self, bus, sclpin, sdapin):
self.i2c = I2C(id=bus, scl=Pin(sclpin), sda=Pin(sdapin), freq=400000)
def Write_Mpu6050_REG(self, reg, dat):
buf = bytearray(1)
buf[0] = dat
self.i2c.writeto_mem(MPU_ADDR, reg, buf)
def Read_Mpu6050_REG(self, reg):
t = self.i2c.readfrom_mem(MPU_ADDR, reg, 1)[0]
return (t>>4)*10 + (t%16)
def Read_Mpu6050_Len(self,reg,len,buffer):
self.i2c.readfrom_mem_into(MPU_ADDR, reg, buffer)
def MPU_Set_Gyro_Fsr(self, fsr):
return self.Write_Mpu6050_REG(MPU_GYRO_CFG_REG, fsr<<3)
def MPU_Set_Accel_Fsr(self, fsr):
return self.Write_Mpu6050_REG(MPU_ACCEL_CFG_REG, fsr<<3)
def MPU_Set_LPF(self, lpf):
if(lpf >= 188):
data = 1
elif(lpf >= 98):
data = 2
elif(lpf >= 42):
data = 3
elif(lpf >= 20):
data = 4
elif(lpf >= 10):
data = 5
else:
data = 6
self.Write_Mpu6050_REG(MPU_CFG_REG, data)
def MPU_Set_Rate(self, rate):
if(rate > 1000):
rate = 1000
if(rate < 4):
rate = 4
data = int(1000/rate-1)
datas = self.Write_Mpu6050_REG(MPU_SAMPLE_RATE_REG, data)
return self.MPU_Set_LPF(rate / 2)
def MPU_Init(self):
self.Write_Mpu6050_REG(MPU_PWR_MGMT1_REG, 0x80)
time.sleep_ms(100)
self.Write_Mpu6050_REG(MPU_PWR_MGMT1_REG, 0x00)
self.MPU_Set_Gyro_Fsr(3)
self.MPU_Set_Accel_Fsr(0)
self.MPU_Set_Rate(50)
self.Write_Mpu6050_REG(MPU_INT_EN_REG, 0x00)
self.Write_Mpu6050_REG(MPU_USER_CTRL_REG, 0x00)
self.Write_Mpu6050_REG(MPU_FIFO_EN_REG, 0x00)
self.Write_Mpu6050_REG(MPU_INTBP_CFG_REG, 0x80)
res = self.Read_Mpu6050_REG(MPU_DEVICE_ID_REG)
if(res == 68):
self.Write_Mpu6050_REG(MPU_PWR_MGMT1_REG, 0x01)
self.Write_Mpu6050_REG(MPU_PWR_MGMT2_REG, 0x00)
self.MPU_Set_Rate(50)
else:
return 1
return 0
def MPU_Get_Gyroscope(self):
buf = bytearray(6)
res = self.Read_Mpu6050_Len(MPU_GYRO_XOUTH_REG, 6, buf)
gx = (buf[0]<<8) | buf[1]
gy = (buf[2]<<8) | buf[3]
gz = (buf[4]<<8) | buf[5]
if gx >= 0x8000:
gx = -((65535-gx)+1)
if gy >= 0x8000:
gy = -((65535-gy)+1)
if gz >= 0x8000:
gz = -((65535-gz)+1)
return gx, gy, gz
def MPU_Get_Accelerometer(self):
buf = bytearray(6)
res = self.Read_Mpu6050_Len(MPU_ACCEL_XOUTH_REG, 6, buf)
ax = (buf[0]<<8) | buf[1]
ay = (buf[2]<<8) | buf[3]
az = (buf[4]<<8) | buf[5]
if ax >= 0x8000:
ax = -((65535-ax)+1)
if ay >= 0x8000:
ay = -((65535-ay)+1)
if az >= 0x8000:
az = -((65535-az)+1)
return ax, ay, az
| true | true |
f7fb921e54642b921355f2d5b25983827d741b17 | 3,940 | py | Python | vispy/visuals/filters/clipping_planes.py | Dive576/vispy | 06bedb0e9aa410505dbe283d2c52dc9b168f8ded | [
"BSD-3-Clause"
] | 2,617 | 2015-01-02T07:52:18.000Z | 2022-03-29T19:31:15.000Z | vispy/visuals/filters/clipping_planes.py | Dive576/vispy | 06bedb0e9aa410505dbe283d2c52dc9b168f8ded | [
"BSD-3-Clause"
] | 1,674 | 2015-01-01T00:36:08.000Z | 2022-03-31T19:35:56.000Z | vispy/visuals/filters/clipping_planes.py | Dive576/vispy | 06bedb0e9aa410505dbe283d2c52dc9b168f8ded | [
"BSD-3-Clause"
] | 719 | 2015-01-10T14:25:00.000Z | 2022-03-02T13:24:56.000Z | # -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from functools import lru_cache
import numpy as np
from ..shaders import Function, Varying
from .base_filter import Filter
class PlanesClipper(Filter):
"""Clips visual output based on arbitrary clipping planes.
Parameters
----------
cliping_planes : ArrayLike
Each plane is defined by a position and a normal vector (magnitude is irrelevant). Shape: (n_planes, 2, 3)
coord_system : str
Coordinate system used by the clipping planes (see visuals.transforms.transform_system.py)
"""
VERT_CODE = """
void clip() {
// Transform back to visual coordinates and clip based on that
$v_distance_from_clip = $clip_with_planes($itransform(gl_Position).xyz);
}
"""
FRAG_CODE = """
void clip() {
if ($v_distance_from_clip < 0.)
discard;
}
"""
def __init__(self, clipping_planes=None, coord_system='scene'):
tr = ['visual', 'scene', 'document', 'canvas', 'framebuffer', 'render']
if coord_system not in tr:
raise ValueError(f'Invalid coordinate system {coord_system}. Must be one of {tr}.')
self._coord_system = coord_system
super().__init__(
vcode=Function(self.VERT_CODE), vhook='post', vpos=1,
fcode=Function(self.FRAG_CODE), fhook='pre', fpos=1,
)
v_distance_from_clip = Varying('v_distance_from_clip', 'float')
self.vshader['v_distance_from_clip'] = v_distance_from_clip
self.fshader['v_distance_from_clip'] = v_distance_from_clip
self.clipping_planes = clipping_planes
@property
def coord_system(self):
"""
Coordinate system used by the clipping planes (see visuals.transforms.transform_system.py)
"""
# unsettable cause we can't update the transform after being attached
return self._coord_system
def _attach(self, visual):
super()._attach(visual)
self.vshader['itransform'] = visual.get_transform('render', self._coord_system)
@staticmethod
@lru_cache(maxsize=10)
def _build_clipping_planes_func(n_planes):
"""Build the code snippet used to clip the volume based on self.clipping_planes."""
func_template = '''
float clip_planes(vec3 loc) {{
float distance_from_clip = 3.4e38; // max float
{clips};
return distance_from_clip;
}}
'''
# the vertex is considered clipped if on the "negative" side of the plane
clip_template = '''
vec3 relative_vec{idx} = loc - $clipping_plane_pos{idx};
float distance_from_clip{idx} = dot(relative_vec{idx}, $clipping_plane_norm{idx});
distance_from_clip = min(distance_from_clip{idx}, distance_from_clip);
'''
all_clips = []
for idx in range(n_planes):
all_clips.append(clip_template.format(idx=idx))
formatted_code = func_template.format(clips=''.join(all_clips))
return Function(formatted_code)
@property
def clipping_planes(self):
"""Get the set of planes used to clip the mesh.
Each plane is defined by a position and a normal vector (magnitude is irrelevant). Shape: (n_planes, 2, 3)
"""
return self._clipping_planes
@clipping_planes.setter
def clipping_planes(self, value):
if value is None:
value = np.empty([0, 2, 3])
self._clipping_planes = value
clip_func = self._build_clipping_planes_func(len(value))
self.vshader['clip_with_planes'] = clip_func
for idx, plane in enumerate(value):
clip_func[f'clipping_plane_pos{idx}'] = tuple(plane[0])
clip_func[f'clipping_plane_norm{idx}'] = tuple(plane[1])
| 35.818182 | 114 | 0.641878 |
from functools import lru_cache
import numpy as np
from ..shaders import Function, Varying
from .base_filter import Filter
class PlanesClipper(Filter):
VERT_CODE = """
void clip() {
// Transform back to visual coordinates and clip based on that
$v_distance_from_clip = $clip_with_planes($itransform(gl_Position).xyz);
}
"""
FRAG_CODE = """
void clip() {
if ($v_distance_from_clip < 0.)
discard;
}
"""
def __init__(self, clipping_planes=None, coord_system='scene'):
tr = ['visual', 'scene', 'document', 'canvas', 'framebuffer', 'render']
if coord_system not in tr:
raise ValueError(f'Invalid coordinate system {coord_system}. Must be one of {tr}.')
self._coord_system = coord_system
super().__init__(
vcode=Function(self.VERT_CODE), vhook='post', vpos=1,
fcode=Function(self.FRAG_CODE), fhook='pre', fpos=1,
)
v_distance_from_clip = Varying('v_distance_from_clip', 'float')
self.vshader['v_distance_from_clip'] = v_distance_from_clip
self.fshader['v_distance_from_clip'] = v_distance_from_clip
self.clipping_planes = clipping_planes
@property
def coord_system(self):
return self._coord_system
def _attach(self, visual):
super()._attach(visual)
self.vshader['itransform'] = visual.get_transform('render', self._coord_system)
@staticmethod
@lru_cache(maxsize=10)
def _build_clipping_planes_func(n_planes):
func_template = '''
float clip_planes(vec3 loc) {{
float distance_from_clip = 3.4e38; // max float
{clips};
return distance_from_clip;
}}
'''
# the vertex is considered clipped if on the "negative" side of the plane
clip_template = '''
vec3 relative_vec{idx} = loc - $clipping_plane_pos{idx};
float distance_from_clip{idx} = dot(relative_vec{idx}, $clipping_plane_norm{idx});
distance_from_clip = min(distance_from_clip{idx}, distance_from_clip);
'''
all_clips = []
for idx in range(n_planes):
all_clips.append(clip_template.format(idx=idx))
formatted_code = func_template.format(clips=''.join(all_clips))
return Function(formatted_code)
@property
def clipping_planes(self):
return self._clipping_planes
@clipping_planes.setter
def clipping_planes(self, value):
if value is None:
value = np.empty([0, 2, 3])
self._clipping_planes = value
clip_func = self._build_clipping_planes_func(len(value))
self.vshader['clip_with_planes'] = clip_func
for idx, plane in enumerate(value):
clip_func[f'clipping_plane_pos{idx}'] = tuple(plane[0])
clip_func[f'clipping_plane_norm{idx}'] = tuple(plane[1])
| true | true |
f7fb954c6a940503ff310e8f01ba7d7f09a3394c | 12,823 | py | Python | src/sage/geometry/polyhedron/backend_field.py | bopopescu/sage | 2d495be78e0bdc7a0a635454290b27bb4f5f70f0 | [
"BSL-1.0"
] | 3 | 2019-07-15T13:48:24.000Z | 2019-11-08T12:31:43.000Z | src/sage/geometry/polyhedron/backend_field.py | bopopescu/sage | 2d495be78e0bdc7a0a635454290b27bb4f5f70f0 | [
"BSL-1.0"
] | 2 | 2018-10-30T13:40:20.000Z | 2020-07-23T12:13:30.000Z | src/sage/geometry/polyhedron/backend_field.py | bopopescu/sage | 2d495be78e0bdc7a0a635454290b27bb4f5f70f0 | [
"BSL-1.0"
] | 1 | 2019-06-02T03:16:55.000Z | 2019-06-02T03:16:55.000Z | """
The Python backend
While slower than specialized C/C++ implementations, the
implementation is general and works with any exact field in Sage that
allows you to define polyhedra.
EXAMPLES::
sage: p0 = (0, 0)
sage: p1 = (1, 0)
sage: p2 = (1/2, AA(3).sqrt()/2)
sage: equilateral_triangle = Polyhedron([p0, p1, p2])
sage: equilateral_triangle.vertices()
(A vertex at (0, 0),
A vertex at (1, 0),
A vertex at (0.500000000000000?, 0.866025403784439?))
sage: equilateral_triangle.inequalities()
(An inequality (-1, -0.5773502691896258?) x + 1 >= 0,
An inequality (1, -0.5773502691896258?) x + 0 >= 0,
An inequality (0, 1.154700538379252?) x + 0 >= 0)
"""
from __future__ import absolute_import
#*****************************************************************************
# Copyright (C) 2014 Volker Braun <vbraun.name@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from .base import Polyhedron_base
from sage.structure.element import Element
class Polyhedron_field(Polyhedron_base):
"""
Polyhedra over all fields supported by Sage
INPUT:
- ``Vrep`` -- a list ``[vertices, rays, lines]`` or ``None``.
- ``Hrep`` -- a list ``[ieqs, eqns]`` or ``None``.
EXAMPLES::
sage: p = Polyhedron(vertices=[(0,0),(AA(2).sqrt(),0),(0,AA(3).sqrt())],
....: rays=[(1,1)], lines=[], backend='field', base_ring=AA)
sage: TestSuite(p).run()
TESTS::
sage: K.<sqrt3> = QuadraticField(3)
sage: p = Polyhedron([(0,0), (1,0), (1/2, sqrt3/2)])
sage: TestSuite(p).run()
Check that :trac:`19013` is fixed::
sage: K.<phi> = NumberField(x^2-x-1, embedding=1.618)
sage: P1 = Polyhedron([[0,1],[1,1],[1,-phi+1]])
sage: P2 = Polyhedron(ieqs=[[-1,-phi,0]])
sage: P1.intersection(P2)
The empty polyhedron in (Number Field in phi with defining polynomial
x^2 - x - 1)^2
"""
def _is_zero(self, x):
"""
Test whether ``x`` is zero.
INPUT:
- ``x`` -- a number in the base ring.
OUTPUT:
Boolean.
EXAMPLES::
sage: p = Polyhedron([(sqrt(3),sqrt(2))], base_ring=AA)
sage: p._is_zero(0)
True
sage: p._is_zero(1/100000)
False
"""
return x == 0
def _is_nonneg(self, x):
"""
Test whether ``x`` is nonnegative.
INPUT:
- ``x`` -- a number in the base ring.
OUTPUT:
Boolean.
EXAMPLES::
sage: p = Polyhedron([(sqrt(3),sqrt(2))], base_ring=AA)
sage: p._is_nonneg(1)
True
sage: p._is_nonneg(-1/100000)
False
"""
return x >= 0
def _is_positive(self, x):
"""
Test whether ``x`` is positive.
INPUT:
- ``x`` -- a number in the base ring.
OUTPUT:
Boolean.
EXAMPLES::
sage: p = Polyhedron([(sqrt(3),sqrt(2))], base_ring=AA)
sage: p._is_positive(1)
True
sage: p._is_positive(0)
False
"""
return x > 0
def __init__(self, parent, Vrep, Hrep, Vrep_minimal=None, Hrep_minimal=None, **kwds):
"""
Initializes the polyhedron.
See :class:`Polyhedron_base` for a description of ``Vrep`` and ``Hrep``.
If both ``Vrep`` and ``Hrep`` are provided, then
``Vrep_minimal`` and ``Hrep_minimal`` must be set to ``True``.
EXAMPLES::
sage: from sage.geometry.polyhedron.parent import Polyhedra_field
sage: from sage.geometry.polyhedron.backend_field import Polyhedron_field
sage: parent = Polyhedra_field(AA, 1, 'field')
sage: Vrep = [[[0], [1]], [], []]
sage: Hrep = [[[0, 1], [1, -1]], []]
sage: p = Polyhedron_field(parent, Vrep, Hrep,
....: Vrep_minimal=True, Hrep_minimal=True)
sage: p
A 1-dimensional polyhedron in AA^1 defined as the convex hull of 2 vertices
TESTS::
sage: p = Polyhedron() # indirect doctests
sage: Vrep = [[[0], [1/2], [1]], [], []]
sage: Hrep = [[[0, 1], [1, -1]], []]
sage: p = Polyhedron_field(parent, Vrep, Hrep,
....: Vrep_minimal=False, Hrep_minimal=True)
Traceback (most recent call last):
...
ValueError: if both Vrep and Hrep are provided, they must be minimal...
"""
if Vrep is not None and Hrep is not None:
if not (Vrep_minimal and Hrep_minimal):
raise ValueError("if both Vrep and Hrep are provided, they must be minimal"
" and Vrep_minimal and Hrep_minimal must both be True")
Element.__init__(self, parent=parent)
self._init_Vrepresentation(*Vrep)
self._init_Hrepresentation(*Hrep)
else:
super(Polyhedron_field, self).__init__(parent, Vrep, Hrep, **kwds)
def _init_from_Vrepresentation(self, vertices, rays, lines,
minimize=True, verbose=False):
"""
Construct polyhedron from V-representation data.
INPUT:
- ``vertices`` -- list of points. Each point can be specified
as any iterable container of
:meth:`~sage.geometry.polyhedron.base.base_ring` elements.
- ``rays`` -- list of rays. Each ray can be specified as any
iterable container of
:meth:`~sage.geometry.polyhedron.base.base_ring` elements.
- ``lines`` -- list of lines. Each line can be specified as
any iterable container of
:meth:`~sage.geometry.polyhedron.base.base_ring` elements.
- ``verbose`` -- boolean (default: ``False``). Whether to print
verbose output for debugging purposes.
EXAMPLES::
sage: p = Polyhedron(ambient_dim=2, backend='field')
sage: from sage.geometry.polyhedron.backend_field import Polyhedron_field
sage: Polyhedron_field._init_from_Vrepresentation(p, [(0,0)], [], [])
"""
from sage.geometry.polyhedron.double_description_inhomogeneous import Hrep2Vrep, Vrep2Hrep
H = Vrep2Hrep(self.base_ring(), self.ambient_dim(), vertices, rays, lines)
V = Hrep2Vrep(self.base_ring(), self.ambient_dim(),
H.inequalities, H.equations)
self._init_Vrepresentation_backend(V)
self._init_Hrepresentation_backend(H)
def _init_from_Hrepresentation(self, ieqs, eqns, minimize=True, verbose=False):
"""
Construct polyhedron from H-representation data.
INPUT:
- ``ieqs`` -- list of inequalities. Each line can be specified
as any iterable container of
:meth:`~sage.geometry.polyhedron.base.base_ring` elements.
- ``eqns`` -- list of equalities. Each line can be specified
as any iterable container of
:meth:`~sage.geometry.polyhedron.base.base_ring` elements.
- ``verbose`` -- boolean (default: ``False``). Whether to print
verbose output for debugging purposes.
TESTS::
sage: p = Polyhedron(ambient_dim=2, backend='field')
sage: from sage.geometry.polyhedron.backend_field import Polyhedron_field
sage: Polyhedron_field._init_from_Hrepresentation(p, [(1, 2, 3)], [])
"""
from sage.geometry.polyhedron.double_description_inhomogeneous import Hrep2Vrep, Vrep2Hrep
V = Hrep2Vrep(self.base_ring(), self.ambient_dim(), ieqs, eqns)
H = Vrep2Hrep(self.base_ring(), self.ambient_dim(),
V.vertices, V.rays, V.lines)
self._init_Vrepresentation_backend(V)
self._init_Hrepresentation_backend(H)
def _init_Vrepresentation(self, vertices, rays, lines):
"""
Create the Vrepresentation objects from the given minimal data.
EXAMPLES::
sage: from sage.geometry.polyhedron.parent import Polyhedra_field
sage: from sage.geometry.polyhedron.backend_field import Polyhedron_field
sage: parent = Polyhedra_field(AA, 1, 'field')
sage: Vrep = [[[0], [1]], [], []]
sage: Hrep = [[[0, 1], [1, -1]], []]
sage: p = Polyhedron_field(parent, Vrep, Hrep, # indirect doctest
....: Vrep_minimal=True, Hrep_minimal=True)
sage: p.vertices_list()
[[0], [1]]
"""
self._Vrepresentation = []
parent = self.parent()
for v in vertices:
parent._make_Vertex(self, v)
for r in rays:
parent._make_Ray(self, r)
for l in lines:
parent._make_Line(self, l)
self._Vrepresentation = tuple(self._Vrepresentation)
def _init_Vrepresentation_backend(self, Vrep):
"""
Create the V-representation objects from the double description.
EXAMPLES::
sage: p = Polyhedron(vertices=[(0,1/sqrt(2)),(sqrt(2),0),(4,sqrt(5)/6)],
....: base_ring=AA, backend='field') # indirect doctest
sage: p.Hrepresentation()
(An inequality (-0.1582178750233332?, 1.097777812326429?) x + 0.2237538646678492? >= 0,
An inequality (-0.1419794359520263?, -1.698172434277148?) x + 1.200789243901438? >= 0,
An inequality (0.3001973109753594?, 0.600394621950719?) x - 0.4245431085692869? >= 0)
sage: p.Vrepresentation()
(A vertex at (0.?e-15, 0.707106781186548?),
A vertex at (1.414213562373095?, 0),
A vertex at (4.000000000000000?, 0.372677996249965?))
"""
self._init_Vrepresentation(Vrep.vertices, Vrep.rays, Vrep.lines)
def _init_Hrepresentation(self, inequalities, equations):
"""
Create the Vrepresentation objects from the given minimal data.
EXAMPLES::
sage: from sage.geometry.polyhedron.parent import Polyhedra_field
sage: from sage.geometry.polyhedron.backend_field import Polyhedron_field
sage: parent = Polyhedra_field(AA, 1, 'field')
sage: Vrep = [[[0], [1]], [], []]
sage: Hrep = [[[0, 1], [1, -1]], []]
sage: p = Polyhedron_field(parent, Vrep, Hrep, # indirect doctest
....: Vrep_minimal=True, Hrep_minimal=True)
sage: p.inequalities_list()
[[0, 1], [1, -1]]
"""
self._Hrepresentation = []
parent = self.parent()
for ieq in inequalities:
parent._make_Inequality(self, ieq)
for eqn in equations:
parent._make_Equation(self, eqn)
self._Hrepresentation = tuple(self._Hrepresentation)
def _init_Hrepresentation_backend(self, Hrep):
"""
Create the H-representation objects from the double description.
EXAMPLES::
sage: p = Polyhedron(vertices=[(0,1/sqrt(2)),(sqrt(2),0),(4,sqrt(5)/6)],
....: base_ring=AA, backend='field') # indirect doctest
sage: p.Hrepresentation()
(An inequality (-0.1582178750233332?, 1.097777812326429?) x + 0.2237538646678492? >= 0,
An inequality (-0.1419794359520263?, -1.698172434277148?) x + 1.200789243901438? >= 0,
An inequality (0.3001973109753594?, 0.600394621950719?) x - 0.4245431085692869? >= 0)
sage: p.Vrepresentation()
(A vertex at (0.?e-15, 0.707106781186548?),
A vertex at (1.414213562373095?, 0),
A vertex at (4.000000000000000?, 0.372677996249965?))
"""
self._init_Hrepresentation(Hrep.inequalities, Hrep.equations)
def _init_empty_polyhedron(self):
"""
Initializes an empty polyhedron.
TESTS::
sage: empty = Polyhedron(backend='field', base_ring=AA); empty
The empty polyhedron in AA^0
sage: empty.Vrepresentation()
()
sage: empty.Hrepresentation()
(An equation -1 == 0,)
sage: Polyhedron(vertices = [], backend='field')
The empty polyhedron in QQ^0
sage: Polyhedron(backend='field')._init_empty_polyhedron()
"""
super(Polyhedron_field, self)._init_empty_polyhedron()
| 36.74212 | 99 | 0.567652 | from __future__ import absolute_import
from .base import Polyhedron_base
from sage.structure.element import Element
class Polyhedron_field(Polyhedron_base):
def _is_zero(self, x):
return x == 0
def _is_nonneg(self, x):
return x >= 0
def _is_positive(self, x):
return x > 0
def __init__(self, parent, Vrep, Hrep, Vrep_minimal=None, Hrep_minimal=None, **kwds):
if Vrep is not None and Hrep is not None:
if not (Vrep_minimal and Hrep_minimal):
raise ValueError("if both Vrep and Hrep are provided, they must be minimal"
" and Vrep_minimal and Hrep_minimal must both be True")
Element.__init__(self, parent=parent)
self._init_Vrepresentation(*Vrep)
self._init_Hrepresentation(*Hrep)
else:
super(Polyhedron_field, self).__init__(parent, Vrep, Hrep, **kwds)
def _init_from_Vrepresentation(self, vertices, rays, lines,
minimize=True, verbose=False):
from sage.geometry.polyhedron.double_description_inhomogeneous import Hrep2Vrep, Vrep2Hrep
H = Vrep2Hrep(self.base_ring(), self.ambient_dim(), vertices, rays, lines)
V = Hrep2Vrep(self.base_ring(), self.ambient_dim(),
H.inequalities, H.equations)
self._init_Vrepresentation_backend(V)
self._init_Hrepresentation_backend(H)
def _init_from_Hrepresentation(self, ieqs, eqns, minimize=True, verbose=False):
from sage.geometry.polyhedron.double_description_inhomogeneous import Hrep2Vrep, Vrep2Hrep
V = Hrep2Vrep(self.base_ring(), self.ambient_dim(), ieqs, eqns)
H = Vrep2Hrep(self.base_ring(), self.ambient_dim(),
V.vertices, V.rays, V.lines)
self._init_Vrepresentation_backend(V)
self._init_Hrepresentation_backend(H)
def _init_Vrepresentation(self, vertices, rays, lines):
self._Vrepresentation = []
parent = self.parent()
for v in vertices:
parent._make_Vertex(self, v)
for r in rays:
parent._make_Ray(self, r)
for l in lines:
parent._make_Line(self, l)
self._Vrepresentation = tuple(self._Vrepresentation)
def _init_Vrepresentation_backend(self, Vrep):
self._init_Vrepresentation(Vrep.vertices, Vrep.rays, Vrep.lines)
def _init_Hrepresentation(self, inequalities, equations):
self._Hrepresentation = []
parent = self.parent()
for ieq in inequalities:
parent._make_Inequality(self, ieq)
for eqn in equations:
parent._make_Equation(self, eqn)
self._Hrepresentation = tuple(self._Hrepresentation)
def _init_Hrepresentation_backend(self, Hrep):
self._init_Hrepresentation(Hrep.inequalities, Hrep.equations)
def _init_empty_polyhedron(self):
super(Polyhedron_field, self)._init_empty_polyhedron()
| true | true |
f7fb975231ce929afd221757cd5244aa3ab77027 | 11,618 | py | Python | tech_project/lib/python2.7/site-packages/cms/utils/permissions.py | priyamshah112/Project-Descripton-Blog | 8e01016c6be79776c4f5ca75563fa3daa839e39e | [
"MIT"
] | 4 | 2019-05-09T02:09:54.000Z | 2021-11-09T11:27:19.000Z | cms/utils/permissions.py | thisisalamin/django-cms | eeb1e4712b3866e243daf800c142e2199e4be9df | [
"BSD-3-Clause"
] | 12 | 2021-05-14T04:40:33.000Z | 2022-01-10T01:54:36.000Z | cms/utils/permissions.py | thisisalamin/django-cms | eeb1e4712b3866e243daf800c142e2199e4be9df | [
"BSD-3-Clause"
] | 4 | 2019-01-26T09:58:37.000Z | 2019-06-24T08:12:43.000Z | # -*- coding: utf-8 -*-
from collections import defaultdict
from contextlib import contextmanager
from functools import wraps
from threading import local
from django.contrib.auth import get_permission_codename, get_user_model
from django.contrib.auth.models import Group
from django.db.models import Q
from django.utils.decorators import available_attrs
from django.utils.lru_cache import lru_cache
from cms.constants import ROOT_USER_LEVEL, SCRIPT_USERNAME
from cms.exceptions import NoPermissionsException
from cms.models import GlobalPagePermission, Page, PagePermission
from cms.utils.compat import DJANGO_1_11
from cms.utils.conf import get_cms_setting
from cms.utils.page import get_clean_username
# thread local support
_thread_locals = local()
def set_current_user(user):
"""
Assigns current user from request to thread_locals, used by
CurrentUserMiddleware.
"""
_thread_locals.user = user
def get_current_user():
"""
Returns current user, or None
"""
return getattr(_thread_locals, 'user', None)
def get_current_user_name():
current_user = get_current_user()
if not current_user:
return SCRIPT_USERNAME
return get_clean_username(current_user)
@contextmanager
def current_user(user):
"""
Changes the current user just within a context.
"""
old_user = get_current_user()
set_current_user(user)
yield
set_current_user(old_user)
def get_model_permission_codename(model, action):
opts = model._meta
return opts.app_label + '.' + get_permission_codename(action, opts)
def _has_global_permission(user, site, action):
if not user.is_authenticated:
return False
if user.is_superuser:
return True
codename = get_model_permission_codename(GlobalPagePermission, action=action)
if not user.has_perm(codename):
return False
if not get_cms_setting('PERMISSION'):
return True
has_perm = (
GlobalPagePermission
.objects
.get_with_change_permissions(user, site.pk)
.exists()
)
return has_perm
def user_can_add_global_permissions(user, site):
return _has_global_permission(user, site, action='add')
def user_can_change_global_permissions(user, site):
return _has_global_permission(user, site, action='change')
def user_can_delete_global_permissions(user, site):
return _has_global_permission(user, site, action='delete')
def get_user_permission_level(user, site):
"""
Returns highest user level from the page/permission hierarchy on which
user haves can_change_permission. Also takes look into user groups. Higher
level equals to lower number. Users on top of hierarchy have level 0. Level
is the same like page.depth attribute.
Example:
A,W level 0
/ \
user B,GroupE level 1
/ \
C,X D,Y,W level 2
Users A, W have user level 0. GroupE and all his users have user level 1
If user D is a member of GroupE, his user level will be 1, otherwise is
2.
"""
if not user.is_authenticated:
raise NoPermissionsException
if user.is_superuser or not get_cms_setting('PERMISSION'):
return ROOT_USER_LEVEL
has_global_perms = (
GlobalPagePermission
.objects
.get_with_change_permissions(user, site.pk)
.exists()
)
if has_global_perms:
return ROOT_USER_LEVEL
try:
permission = (
PagePermission
.objects
.get_with_change_permissions(user, site)
.select_related('page')
.order_by('page__node__path')
)[0]
except IndexError:
# user isn't assigned to any node
raise NoPermissionsException
return permission.page.node.depth
def cached_func(func):
@wraps(func, assigned=available_attrs(func))
def cached_func(user, *args, **kwargs):
func_cache_name = '_djangocms_cached_func_%s' % func.__name__
if not hasattr(user, func_cache_name):
cached_func = lru_cache(maxsize=None)(func)
setattr(user, func_cache_name, cached_func)
return getattr(user, func_cache_name)(user, *args, **kwargs)
# Allows us to access the un-cached function
cached_func.without_cache = func
return cached_func
@cached_func
def get_global_actions_for_user(user, site):
actions = set()
global_perms = (
GlobalPagePermission
.objects
.get_with_site(user, site.pk)
)
for global_perm in global_perms.iterator():
actions.update(global_perm.get_configured_actions())
return actions
@cached_func
def get_page_actions_for_user(user, site):
actions = defaultdict(set)
pages = (
Page
.objects
.drafts()
.on_site(site)
.select_related('node')
.order_by('node__path')
)
nodes = [page.node for page in pages]
pages_by_id = {}
for page in pages:
if page.node.is_root():
page.node._set_hierarchy(nodes)
page.node.__dict__['item'] = page
pages_by_id[page.pk] = page
page_permissions = (
PagePermission
.objects
.with_user(user)
.filter(page__in=pages_by_id)
)
for perm in page_permissions.iterator():
# set internal fk cache to our page with loaded ancestors and descendants
if DJANGO_1_11:
perm._page_cache = pages_by_id[perm.page_id]
else:
# for django >= 2.0
PagePermission.page.field.set_cached_value(perm, pages_by_id[perm.page_id])
page_ids = frozenset(perm.get_page_ids())
for action in perm.get_configured_actions():
actions[action].update(page_ids)
return actions
def has_global_permission(user, site, action, use_cache=True):
if use_cache:
actions = get_global_actions_for_user(user, site)
else:
actions = get_global_actions_for_user.without_cache(user, site)
return action in actions
def has_page_permission(user, page, action, use_cache=True):
if use_cache:
actions = get_page_actions_for_user(user, page.node.site)
else:
actions = get_page_actions_for_user.without_cache(user, page.node.site)
return page.pk in actions[action]
def get_subordinate_users(user, site):
"""
Returns users queryset, containing all subordinate users to given user
including users created by given user and not assigned to any page.
Not assigned users must be returned, because they shouldn't get lost, and
user should still have possibility to see them.
Only users created_by given user which are on the same, or lover level are
returned.
If user haves global permissions or is a superuser, then he can see all the
users.
This function is currently used in PagePermissionInlineAdminForm for limit
users in permission combobox.
Example:
A,W level 0
/ \
user B,GroupE level 1
Z / \
C,X D,Y,W level 2
Rules: W was created by user, Z was created by user, but is not assigned
to any page.
Will return [user, C, X, D, Y, Z]. W was created by user, but is also
assigned to higher level.
"""
from cms.utils.page_permissions import get_change_permissions_id_list
try:
user_level = get_user_permission_level(user, site)
except NoPermissionsException:
# user has no Global or Page permissions.
# return only staff users created by user
# whose page permission record has no page attached.
qs = get_user_model().objects.distinct().filter(
Q(is_staff=True) &
Q(pageuser__created_by=user) &
Q(pagepermission__page=None)
)
qs = qs.exclude(pk=user.pk).exclude(groups__user__pk=user.pk)
return qs
if user_level == ROOT_USER_LEVEL:
return get_user_model().objects.all()
page_id_allow_list = get_change_permissions_id_list(user, site, check_global=False)
# normal query
qs = get_user_model().objects.distinct().filter(
Q(is_staff=True) &
(Q(pagepermission__page__id__in=page_id_allow_list) & Q(pagepermission__page__node__depth__gte=user_level))
| (Q(pageuser__created_by=user) & Q(pagepermission__page=None))
)
qs = qs.exclude(pk=user.pk).exclude(groups__user__pk=user.pk)
return qs
def get_subordinate_groups(user, site):
"""
Similar to get_subordinate_users, but returns queryset of Groups instead
of Users.
"""
from cms.utils.page_permissions import get_change_permissions_id_list
try:
user_level = get_user_permission_level(user, site)
except NoPermissionsException:
# user has no Global or Page permissions.
# return only groups created by user
# whose page permission record has no page attached.
groups = (
Group
.objects
.filter(
Q(pageusergroup__created_by=user) &
Q(pagepermission__page__isnull=True)
)
.distinct()
)
# no permission no records
# page_id_allow_list is empty
return groups
if user_level == ROOT_USER_LEVEL:
return Group.objects.all()
page_id_allow_list = get_change_permissions_id_list(user, site, check_global=False)
return Group.objects.distinct().filter(
(Q(pagepermission__page__id__in=page_id_allow_list) & Q(pagepermission__page__node__depth__gte=user_level))
| (Q(pageusergroup__created_by=user) & Q(pagepermission__page__isnull=True))
)
def get_view_restrictions(pages):
"""
Load all view restrictions for the pages
"""
restricted_pages = defaultdict(list)
if not get_cms_setting('PERMISSION'):
# Permissions are off. There's no concept of page restrictions.
return restricted_pages
if not pages:
return restricted_pages
nodes = [page.node for page in pages]
pages_by_id = {}
for page in pages:
if page.node.is_root():
page.node._set_hierarchy(nodes)
page.node.__dict__['item'] = page
pages_by_id[page.pk] = page
page_permissions = PagePermission.objects.filter(
page__in=pages_by_id,
can_view=True,
)
for perm in page_permissions:
# set internal fk cache to our page with loaded ancestors and descendants
if DJANGO_1_11:
perm._page_cache = pages_by_id[perm.page_id]
else:
# for django >= 2.0
PagePermission.page.field.set_cached_value(perm, pages_by_id[perm.page_id])
for page_id in perm.get_page_ids():
restricted_pages[page_id].append(perm)
return restricted_pages
def has_plugin_permission(user, plugin_type, permission_type):
"""
Checks that a user has permissions for the plugin-type given to perform
the action defined in permission_type
permission_type should be 'add', 'change' or 'delete'.
"""
from cms.plugin_pool import plugin_pool
plugin_class = plugin_pool.get_plugin(plugin_type)
codename = get_model_permission_codename(
plugin_class.model,
action=permission_type,
)
return user.has_perm(codename)
| 30.020672 | 115 | 0.661474 |
from collections import defaultdict
from contextlib import contextmanager
from functools import wraps
from threading import local
from django.contrib.auth import get_permission_codename, get_user_model
from django.contrib.auth.models import Group
from django.db.models import Q
from django.utils.decorators import available_attrs
from django.utils.lru_cache import lru_cache
from cms.constants import ROOT_USER_LEVEL, SCRIPT_USERNAME
from cms.exceptions import NoPermissionsException
from cms.models import GlobalPagePermission, Page, PagePermission
from cms.utils.compat import DJANGO_1_11
from cms.utils.conf import get_cms_setting
from cms.utils.page import get_clean_username
_thread_locals = local()
def set_current_user(user):
_thread_locals.user = user
def get_current_user():
return getattr(_thread_locals, 'user', None)
def get_current_user_name():
current_user = get_current_user()
if not current_user:
return SCRIPT_USERNAME
return get_clean_username(current_user)
@contextmanager
def current_user(user):
old_user = get_current_user()
set_current_user(user)
yield
set_current_user(old_user)
def get_model_permission_codename(model, action):
opts = model._meta
return opts.app_label + '.' + get_permission_codename(action, opts)
def _has_global_permission(user, site, action):
if not user.is_authenticated:
return False
if user.is_superuser:
return True
codename = get_model_permission_codename(GlobalPagePermission, action=action)
if not user.has_perm(codename):
return False
if not get_cms_setting('PERMISSION'):
return True
has_perm = (
GlobalPagePermission
.objects
.get_with_change_permissions(user, site.pk)
.exists()
)
return has_perm
def user_can_add_global_permissions(user, site):
return _has_global_permission(user, site, action='add')
def user_can_change_global_permissions(user, site):
return _has_global_permission(user, site, action='change')
def user_can_delete_global_permissions(user, site):
return _has_global_permission(user, site, action='delete')
def get_user_permission_level(user, site):
if not user.is_authenticated:
raise NoPermissionsException
if user.is_superuser or not get_cms_setting('PERMISSION'):
return ROOT_USER_LEVEL
has_global_perms = (
GlobalPagePermission
.objects
.get_with_change_permissions(user, site.pk)
.exists()
)
if has_global_perms:
return ROOT_USER_LEVEL
try:
permission = (
PagePermission
.objects
.get_with_change_permissions(user, site)
.select_related('page')
.order_by('page__node__path')
)[0]
except IndexError:
raise NoPermissionsException
return permission.page.node.depth
def cached_func(func):
@wraps(func, assigned=available_attrs(func))
def cached_func(user, *args, **kwargs):
func_cache_name = '_djangocms_cached_func_%s' % func.__name__
if not hasattr(user, func_cache_name):
cached_func = lru_cache(maxsize=None)(func)
setattr(user, func_cache_name, cached_func)
return getattr(user, func_cache_name)(user, *args, **kwargs)
# Allows us to access the un-cached function
cached_func.without_cache = func
return cached_func
@cached_func
def get_global_actions_for_user(user, site):
actions = set()
global_perms = (
GlobalPagePermission
.objects
.get_with_site(user, site.pk)
)
for global_perm in global_perms.iterator():
actions.update(global_perm.get_configured_actions())
return actions
@cached_func
def get_page_actions_for_user(user, site):
actions = defaultdict(set)
pages = (
Page
.objects
.drafts()
.on_site(site)
.select_related('node')
.order_by('node__path')
)
nodes = [page.node for page in pages]
pages_by_id = {}
for page in pages:
if page.node.is_root():
page.node._set_hierarchy(nodes)
page.node.__dict__['item'] = page
pages_by_id[page.pk] = page
page_permissions = (
PagePermission
.objects
.with_user(user)
.filter(page__in=pages_by_id)
)
for perm in page_permissions.iterator():
# set internal fk cache to our page with loaded ancestors and descendants
if DJANGO_1_11:
perm._page_cache = pages_by_id[perm.page_id]
else:
# for django >= 2.0
PagePermission.page.field.set_cached_value(perm, pages_by_id[perm.page_id])
page_ids = frozenset(perm.get_page_ids())
for action in perm.get_configured_actions():
actions[action].update(page_ids)
return actions
def has_global_permission(user, site, action, use_cache=True):
if use_cache:
actions = get_global_actions_for_user(user, site)
else:
actions = get_global_actions_for_user.without_cache(user, site)
return action in actions
def has_page_permission(user, page, action, use_cache=True):
if use_cache:
actions = get_page_actions_for_user(user, page.node.site)
else:
actions = get_page_actions_for_user.without_cache(user, page.node.site)
return page.pk in actions[action]
def get_subordinate_users(user, site):
from cms.utils.page_permissions import get_change_permissions_id_list
try:
user_level = get_user_permission_level(user, site)
except NoPermissionsException:
# user has no Global or Page permissions.
# return only staff users created by user
# whose page permission record has no page attached.
qs = get_user_model().objects.distinct().filter(
Q(is_staff=True) &
Q(pageuser__created_by=user) &
Q(pagepermission__page=None)
)
qs = qs.exclude(pk=user.pk).exclude(groups__user__pk=user.pk)
return qs
if user_level == ROOT_USER_LEVEL:
return get_user_model().objects.all()
page_id_allow_list = get_change_permissions_id_list(user, site, check_global=False)
# normal query
qs = get_user_model().objects.distinct().filter(
Q(is_staff=True) &
(Q(pagepermission__page__id__in=page_id_allow_list) & Q(pagepermission__page__node__depth__gte=user_level))
| (Q(pageuser__created_by=user) & Q(pagepermission__page=None))
)
qs = qs.exclude(pk=user.pk).exclude(groups__user__pk=user.pk)
return qs
def get_subordinate_groups(user, site):
from cms.utils.page_permissions import get_change_permissions_id_list
try:
user_level = get_user_permission_level(user, site)
except NoPermissionsException:
# user has no Global or Page permissions.
# return only groups created by user
# whose page permission record has no page attached.
groups = (
Group
.objects
.filter(
Q(pageusergroup__created_by=user) &
Q(pagepermission__page__isnull=True)
)
.distinct()
)
# no permission no records
# page_id_allow_list is empty
return groups
if user_level == ROOT_USER_LEVEL:
return Group.objects.all()
page_id_allow_list = get_change_permissions_id_list(user, site, check_global=False)
return Group.objects.distinct().filter(
(Q(pagepermission__page__id__in=page_id_allow_list) & Q(pagepermission__page__node__depth__gte=user_level))
| (Q(pageusergroup__created_by=user) & Q(pagepermission__page__isnull=True))
)
def get_view_restrictions(pages):
restricted_pages = defaultdict(list)
if not get_cms_setting('PERMISSION'):
# Permissions are off. There's no concept of page restrictions.
return restricted_pages
if not pages:
return restricted_pages
nodes = [page.node for page in pages]
pages_by_id = {}
for page in pages:
if page.node.is_root():
page.node._set_hierarchy(nodes)
page.node.__dict__['item'] = page
pages_by_id[page.pk] = page
page_permissions = PagePermission.objects.filter(
page__in=pages_by_id,
can_view=True,
)
for perm in page_permissions:
if DJANGO_1_11:
perm._page_cache = pages_by_id[perm.page_id]
else:
PagePermission.page.field.set_cached_value(perm, pages_by_id[perm.page_id])
for page_id in perm.get_page_ids():
restricted_pages[page_id].append(perm)
return restricted_pages
def has_plugin_permission(user, plugin_type, permission_type):
from cms.plugin_pool import plugin_pool
plugin_class = plugin_pool.get_plugin(plugin_type)
codename = get_model_permission_codename(
plugin_class.model,
action=permission_type,
)
return user.has_perm(codename)
| true | true |
f7fb97a1d5d5389acfdae56a02224b51aa7385f8 | 74 | py | Python | src/square_root.py | anikchatt/IS601_calculator | 9057e3205a689446bc9fae659cd3eed31aea91eb | [
"MIT"
] | null | null | null | src/square_root.py | anikchatt/IS601_calculator | 9057e3205a689446bc9fae659cd3eed31aea91eb | [
"MIT"
] | null | null | null | src/square_root.py | anikchatt/IS601_calculator | 9057e3205a689446bc9fae659cd3eed31aea91eb | [
"MIT"
] | null | null | null | def square_root(a):
a = float(a)
value = a ** .5
return value
| 14.8 | 19 | 0.540541 | def square_root(a):
a = float(a)
value = a ** .5
return value
| true | true |
f7fb97db3220bbc64ca010f0349740e9c79f310a | 178 | py | Python | bus_system/apps/bus_driver/admin.py | pygabo/bus_system | ffb76d3414e058286799f3df1cb551b26286e7c3 | [
"MIT"
] | null | null | null | bus_system/apps/bus_driver/admin.py | pygabo/bus_system | ffb76d3414e058286799f3df1cb551b26286e7c3 | [
"MIT"
] | null | null | null | bus_system/apps/bus_driver/admin.py | pygabo/bus_system | ffb76d3414e058286799f3df1cb551b26286e7c3 | [
"MIT"
] | null | null | null | # Core Django imports
from django.contrib import admin
# Imports from my apps
from bus_system.apps.bus_driver.models import BusDriverModel
admin.site.register(BusDriverModel)
| 22.25 | 60 | 0.825843 |
from django.contrib import admin
from bus_system.apps.bus_driver.models import BusDriverModel
admin.site.register(BusDriverModel)
| true | true |
f7fb98cc1d59d5cda7836c98dff49d525cd406e1 | 6,717 | py | Python | salt/modules/hashutil.py | jubrad/salt | 7960334fb726cfde45e6409da79a65535c626685 | [
"Apache-2.0"
] | 1 | 2020-01-02T09:03:21.000Z | 2020-01-02T09:03:21.000Z | salt/modules/hashutil.py | jubrad/salt | 7960334fb726cfde45e6409da79a65535c626685 | [
"Apache-2.0"
] | null | null | null | salt/modules/hashutil.py | jubrad/salt | 7960334fb726cfde45e6409da79a65535c626685 | [
"Apache-2.0"
] | 1 | 2020-01-02T09:03:24.000Z | 2020-01-02T09:03:24.000Z | # encoding: utf-8
'''
A collection of hashing and encoding functions
'''
from __future__ import absolute_import
# Import python libs
import base64
import hashlib
import hmac
# Import Salt libs
import salt.exceptions
from salt.ext import six
import salt.utils
import salt.utils.files
import salt.utils.hashutils
import salt.utils.stringutils
if six.PY2:
import StringIO
elif six.PY3:
from io import StringIO
def digest(instr, checksum='md5'):
'''
Return a checksum digest for a string
instr
A string
checksum : ``md5``
The hashing algorithm to use to generate checksums. Valid options: md5,
sha256, sha512.
CLI Example:
.. code-block:: bash
salt '*' hashutil.digest 'get salted'
'''
hashing_funcs = {
'md5': __salt__['hashutil.md5_digest'],
'sha256': __salt__['hashutil.sha256_digest'],
'sha512': __salt__['hashutil.sha512_digest'],
}
hash_func = hashing_funcs.get(checksum)
if hash_func is None:
raise salt.exceptions.CommandExecutionError(
"Hash func '{0}' is not supported.".format(checksum))
return hash_func(instr)
def digest_file(infile, checksum='md5'):
'''
Return a checksum digest for a file
infile
A file path
checksum : ``md5``
The hashing algorithm to use to generate checksums. Wraps the
:py:func:`hashutil.digest <salt.modules.hashutil.digest>` execution
function.
CLI Example:
.. code-block:: bash
salt '*' hashutil.digest_file /path/to/file
'''
if not __salt__['file.file_exists'](infile):
raise salt.exceptions.CommandExecutionError(
"File path '{0}' not found.".format(infile))
with salt.utils.files.fopen(infile, 'rb') as f:
file_hash = __salt__['hashutil.digest'](f.read(), checksum)
return file_hash
def base64_b64encode(instr):
'''
Encode a string as base64 using the "modern" Python interface.
Among other possible differences, the "modern" encoder does not include
newline ('\\n') characters in the encoded output.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_b64encode 'get salted'
'''
return salt.utils.hashutils.base64_b64encode(instr)
def base64_b64decode(instr):
'''
Decode a base64-encoded string using the "modern" Python interface
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_b64decode 'Z2V0IHNhbHRlZA=='
'''
return salt.utils.hashutils.base64_b64decode(instr)
def base64_encodestring(instr):
'''
Encode a string as base64 using the "legacy" Python interface.
Among other possible differences, the "legacy" encoder includes
a newline ('\\n') character after every 76 characters and always
at the end of the encoded string.
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_encodestring 'get salted'
'''
return salt.utils.hashutils.base64_encodestring(instr)
def base64_encodefile(fname):
'''
Read a file from the file system and return as a base64 encoded string
.. versionadded:: 2016.3.0
Pillar example:
.. code-block:: yaml
path:
to:
data: |
{{ salt.hashutil.base64_encodefile('/path/to/binary_file') | indent(6) }}
The :py:func:`file.decode <salt.states.file.decode>` state function can be
used to decode this data and write it to disk.
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_encodefile /path/to/binary_file
'''
encoded_f = StringIO.StringIO()
with salt.utils.files.fopen(fname, 'rb') as f:
base64.encode(f, encoded_f)
encoded_f.seek(0)
return encoded_f.read()
def base64_decodestring(instr):
'''
Decode a base64-encoded string using the "legacy" Python interface
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_decodestring instr='Z2V0IHNhbHRlZAo='
'''
return salt.utils.hashutils.base64_decodestring(instr)
def base64_decodefile(instr, outfile):
r'''
Decode a base64-encoded string and write the result to a file
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_decodefile instr='Z2V0IHNhbHRlZAo=' outfile='/path/to/binary_file'
'''
encoded_f = StringIO.StringIO(instr)
with salt.utils.files.fopen(outfile, 'wb') as f:
base64.decode(encoded_f, f)
return True
def md5_digest(instr):
'''
Generate an md5 hash of a given string
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.md5_digest 'get salted'
'''
return salt.utils.hashutils.md5_digest(instr)
def sha256_digest(instr):
'''
Generate an sha256 hash of a given string
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.sha256_digest 'get salted'
'''
return salt.utils.hashutils.sha256_digest(instr)
def sha512_digest(instr):
'''
Generate an sha512 hash of a given string
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.sha512_digest 'get salted'
'''
return salt.utils.hashutils.sha512_digest(instr)
def hmac_signature(string, shared_secret, challenge_hmac):
'''
Verify a challenging hmac signature against a string / shared-secret
.. versionadded:: 2014.7.0
Returns a boolean if the verification succeeded or failed.
CLI Example:
.. code-block:: bash
salt '*' hashutil.hmac_signature 'get salted' 'shared secret' 'eBWf9bstXg+NiP5AOwppB5HMvZiYMPzEM9W5YMm/AmQ='
'''
return salt.utils.hashutils.hmac_signature(string, shared_secret, challenge_hmac)
def github_signature(string, shared_secret, challenge_hmac):
'''
Verify a challenging hmac signature against a string / shared-secret for
github webhooks.
.. versionadded:: 2017.7.0
Returns a boolean if the verification succeeded or failed.
CLI Example:
.. code-block:: bash
salt '*' hashutil.github_signature '{"ref":....} ' 'shared secret' 'sha1=bc6550fc290acf5b42283fa8deaf55cea0f8c206'
'''
msg = string
key = shared_secret
hashtype, challenge = challenge_hmac.split('=')
if six.PY3:
msg = salt.utils.stringutils.to_bytes(msg)
key = salt.utils.stringutils.to_bytes(key)
hmac_hash = hmac.new(key, msg, getattr(hashlib, hashtype))
return hmac_hash.hexdigest() == challenge
| 23.242215 | 122 | 0.659223 |
from __future__ import absolute_import
import base64
import hashlib
import hmac
import salt.exceptions
from salt.ext import six
import salt.utils
import salt.utils.files
import salt.utils.hashutils
import salt.utils.stringutils
if six.PY2:
import StringIO
elif six.PY3:
from io import StringIO
def digest(instr, checksum='md5'):
hashing_funcs = {
'md5': __salt__['hashutil.md5_digest'],
'sha256': __salt__['hashutil.sha256_digest'],
'sha512': __salt__['hashutil.sha512_digest'],
}
hash_func = hashing_funcs.get(checksum)
if hash_func is None:
raise salt.exceptions.CommandExecutionError(
"Hash func '{0}' is not supported.".format(checksum))
return hash_func(instr)
def digest_file(infile, checksum='md5'):
if not __salt__['file.file_exists'](infile):
raise salt.exceptions.CommandExecutionError(
"File path '{0}' not found.".format(infile))
with salt.utils.files.fopen(infile, 'rb') as f:
file_hash = __salt__['hashutil.digest'](f.read(), checksum)
return file_hash
def base64_b64encode(instr):
return salt.utils.hashutils.base64_b64encode(instr)
def base64_b64decode(instr):
return salt.utils.hashutils.base64_b64decode(instr)
def base64_encodestring(instr):
return salt.utils.hashutils.base64_encodestring(instr)
def base64_encodefile(fname):
encoded_f = StringIO.StringIO()
with salt.utils.files.fopen(fname, 'rb') as f:
base64.encode(f, encoded_f)
encoded_f.seek(0)
return encoded_f.read()
def base64_decodestring(instr):
return salt.utils.hashutils.base64_decodestring(instr)
def base64_decodefile(instr, outfile):
encoded_f = StringIO.StringIO(instr)
with salt.utils.files.fopen(outfile, 'wb') as f:
base64.decode(encoded_f, f)
return True
def md5_digest(instr):
return salt.utils.hashutils.md5_digest(instr)
def sha256_digest(instr):
return salt.utils.hashutils.sha256_digest(instr)
def sha512_digest(instr):
return salt.utils.hashutils.sha512_digest(instr)
def hmac_signature(string, shared_secret, challenge_hmac):
return salt.utils.hashutils.hmac_signature(string, shared_secret, challenge_hmac)
def github_signature(string, shared_secret, challenge_hmac):
msg = string
key = shared_secret
hashtype, challenge = challenge_hmac.split('=')
if six.PY3:
msg = salt.utils.stringutils.to_bytes(msg)
key = salt.utils.stringutils.to_bytes(key)
hmac_hash = hmac.new(key, msg, getattr(hashlib, hashtype))
return hmac_hash.hexdigest() == challenge
| true | true |
f7fb98df9756aed8d07d6a928edbfde6089a2406 | 8,003 | py | Python | Applications/FlaskApp/flask_app.py | cemac-ccs/Faze-In_App | 5a937360b34357785699e36587d6457c2dd88806 | [
"MIT"
] | null | null | null | Applications/FlaskApp/flask_app.py | cemac-ccs/Faze-In_App | 5a937360b34357785699e36587d6457c2dd88806 | [
"MIT"
] | null | null | null | Applications/FlaskApp/flask_app.py | cemac-ccs/Faze-In_App | 5a937360b34357785699e36587d6457c2dd88806 | [
"MIT"
] | null | null | null | """Routes for core Flask app."""
from flask import current_app as app
from flask import render_template, flash, redirect, url_for, request
from flask import session, abort, Blueprint
#from wtforms import Form, validators, StringField, SelectField, TextAreaField
#from wtforms import IntegerField, PasswordField, SelectMultipleField, widgets
import sqlite3
import pandas as pd
#import numpy as np
import os
#import io
#import json
from passlib.hash import sha256_crypt
# Modules for this site
from .access import ChangePwdForm, AccessForm
from .access import table_list, user_login
from .access import is_logged_in, is_logged_in_as_admin
from .access import InsertUser, DeleteUser, AssignRole
# Connect to database
#DATABASE = '/home/cemacccs/Faze-In_App/FAZEin.db'
DATABASE = 'FAZEin.db'
assert os.path.exists(DATABASE), "Unable to locate database"
app.secret_key = 'secret'
conn = sqlite3.connect(DATABASE, check_same_thread=False)
counter = 1
main_bp = Blueprint('main_bp', __name__,
template_folder='templates',
static_folder='static')
@main_bp.route('/', methods=["GET"])
def index():
"""Landing page."""
return render_template('home.html.j2')
@main_bp.route("/")
def hitcounter():
global counter
counter += 1
return str(counter)
# Access ----------------------------------------------------------------------
# Login
@main_bp.route('/login', methods=["GET", "POST"])
def login():
if 'logged_in' in session:
flash('Already logged in', 'warning')
return redirect(url_for('main_bp.index'))
if request.method == 'POST':
# Get form fields
username = request.form['username']
password_candidate = request.form['password']
user_login(username, password_candidate, conn)
return redirect(url_for('main_bp.index'))
if request.method == 'GET':
return render_template('login.html.j2')
# Logout
@main_bp.route('/logout')
@is_logged_in
def logout():
session.clear()
flash('You are now logged out', 'success')
return redirect(url_for('main_bp.index'))
# Change password
@main_bp.route('/change-pwd', methods=["GET", "POST"])
@is_logged_in
def change_pwd():
username = session['username']
form = ChangePwdForm(request.form)
if request.method == 'POST' and form.validate():
user = pd.read_sql_query("SELECT * FROM users where username is '"
+ username + "' ;", conn)
password = user.password[0]
current = form.current.data
if sha256_crypt.verify(current, password):
user.password = sha256_crypt.hash(str(form.new.data))
sql = "UPDATE users SET password = ? WHERE username is ? ;"
cur = conn.cursor()
cur.execute(sql, (user.password[0], str(username)))
conn.commit()
flash('Password changed', 'success')
return redirect(url_for('main_bp.change_pwd'))
else:
flash('Current password incorrect', 'danger')
return redirect(url_for('main_bp.change_pwd'))
return render_template('change-pwd.html.j2', form=form)
# Access settings for a given user
@main_bp.route('/account/<string:username>', methods=['GET', 'POST'])
@is_logged_in
def account(username):
role = session['usertype']
# display role
# user name
# potential to add affiliations and email to give more bespoke access to
# who can edit which volcanoes. Eg. Prject or Institute
return render_template('account.html.j2', username=username, Role=role)
# Additional logged in as Admin only pages ------------------------------
@main_bp.route('/admin/information', methods=['GET', 'POST'])
@is_logged_in_as_admin
def admininfo():
return render_template('admininfo.html.j2')
@main_bp.route('/admin/users', methods=['GET', 'POST'])
@is_logged_in_as_admin
def ViewOrAddUsers():
df = pd.read_sql_query("SELECT * FROM Users ;", conn)
df['password'] = '********'
# add roles
u2r = pd.read_sql_query("SELECT * FROM users_roles ;", conn)
roles = pd.read_sql_query("SELECT * FROM roles ;", conn)
u2r2 = pd.merge(u2r, roles, on='group_id')
del u2r2['group_id']
usersandroles = pd.merge(df, u2r2, on='id', how='outer')
usersandroles.rename(columns={'name': 'Role'}, inplace=True)
usersandroles = usersandroles.dropna(subset=['username'])
colnames = [s.replace("_", " ").title() for s in usersandroles.columns.values[1:]]
return render_template('view.html.j2', title='Users', colnames=colnames,
tableClass='Users', editLink="edit",
data=usersandroles)
# Add entry
@main_bp.route('/add/Users', methods=["GET", "POST"])
@is_logged_in_as_admin
def add():
form = eval("Users_Form")(request.form)
if request.method == 'POST' and form.validate():
# Get form fields:
# Check
if len(str(form.password.data)) < 8:
return flash('password must be more than 8 characters',
'danger')
form.password.data = sha256_crypt.hash(str(form.password.data))
formdata = []
for f, field in enumerate(form):
formdata.append(field.data)
InsertUser(formdata[0], formdata[1], conn)
flash('User Added', 'success')
return redirect(url_for('main_bp.add', tableClass='Users'))
return render_template('add.html.j2', title='Add Users', tableClass='Users',
form=form)
# Delete entry
@main_bp.route('/delete/<string:tableClass>/<string:id>', methods=['POST'])
@is_logged_in_as_admin
def delete(tableClass, id):
# Retrieve DB entry:
user = pd.read_sql_query("SELECT * FROM Users where id = " + id + " ;",
conn)
username = user.username
DeleteUser(username[0], conn)
flash('User Deleted', 'success')
return redirect(url_for('main_bp.ViewOrAddUsers'))
# Access settings for a given user
@main_bp.route('/access/<string:id>', methods=['GET', 'POST'])
@is_logged_in_as_admin
def access(id):
form = AccessForm(request.form)
form.Role.choices = table_list('roles', 'name', conn)[1:]
# Retrieve user DB entry:
user = pd.read_sql_query("SELECT * FROM Users where id = " + id + " ;",
conn)
if user.empty:
abort(404)
# Retrieve all current role
u2r = pd.read_sql_query("SELECT * FROM users_roles WHERE id = " + id +
";", conn)
gid = u2r.group_id[0]
current_role = pd.read_sql_query("SELECT * FROM roles WHERE group_id = "
+ str(gid) + ";", conn)
# If user submits edit entry form:
if request.method == 'POST' and form.validate():
new_role = form.Role.data
AssignRole(user.username[0], new_role, conn)
print('test')
# Return with success
flash('Edits successful', 'success')
return redirect(url_for('main_bp.ViewOrAddUsers'))
# Pre-populate form fields with existing data:
form.username.render_kw = {'readonly': 'readonly'}
form.username.data = user.username[0]
form.Role.data = current_role.name[0]
return render_template('access.html.j2', form=form, id=id)
# static information pages ---------------------------------------------------
@main_bp.route('/copyright', methods=["GET"])
def copyright():
return render_template('copyright.html.j2')
@main_bp.route('/privacy', methods=["GET"])
def privacy():
return render_template('privacy.html.j2')
@main_bp.route('/contribute', methods=["GET"])
def contribute():
return render_template('contributor_guidelines.html.j2')
@main_bp.route('/about', methods=["GET"])
def about():
return render_template('about.html.j2')
@main_bp.route('/contact', methods=["GET"])
def contact():
return render_template('contact.html.j2')
@main_bp.route('/modelinfo', methods=["GET"])
def infopage1():
return render_template('modelinfo.html.j2')
| 35.411504 | 86 | 0.638386 | from flask import current_app as app
from flask import render_template, flash, redirect, url_for, request
from flask import session, abort, Blueprint
import sqlite3
import pandas as pd
import os
from passlib.hash import sha256_crypt
from .access import ChangePwdForm, AccessForm
from .access import table_list, user_login
from .access import is_logged_in, is_logged_in_as_admin
from .access import InsertUser, DeleteUser, AssignRole
DATABASE = 'FAZEin.db'
assert os.path.exists(DATABASE), "Unable to locate database"
app.secret_key = 'secret'
conn = sqlite3.connect(DATABASE, check_same_thread=False)
counter = 1
main_bp = Blueprint('main_bp', __name__,
template_folder='templates',
static_folder='static')
@main_bp.route('/', methods=["GET"])
def index():
return render_template('home.html.j2')
@main_bp.route("/")
def hitcounter():
global counter
counter += 1
return str(counter)
@main_bp.route('/login', methods=["GET", "POST"])
def login():
if 'logged_in' in session:
flash('Already logged in', 'warning')
return redirect(url_for('main_bp.index'))
if request.method == 'POST':
username = request.form['username']
password_candidate = request.form['password']
user_login(username, password_candidate, conn)
return redirect(url_for('main_bp.index'))
if request.method == 'GET':
return render_template('login.html.j2')
@main_bp.route('/logout')
@is_logged_in
def logout():
session.clear()
flash('You are now logged out', 'success')
return redirect(url_for('main_bp.index'))
@main_bp.route('/change-pwd', methods=["GET", "POST"])
@is_logged_in
def change_pwd():
username = session['username']
form = ChangePwdForm(request.form)
if request.method == 'POST' and form.validate():
user = pd.read_sql_query("SELECT * FROM users where username is '"
+ username + "' ;", conn)
password = user.password[0]
current = form.current.data
if sha256_crypt.verify(current, password):
user.password = sha256_crypt.hash(str(form.new.data))
sql = "UPDATE users SET password = ? WHERE username is ? ;"
cur = conn.cursor()
cur.execute(sql, (user.password[0], str(username)))
conn.commit()
flash('Password changed', 'success')
return redirect(url_for('main_bp.change_pwd'))
else:
flash('Current password incorrect', 'danger')
return redirect(url_for('main_bp.change_pwd'))
return render_template('change-pwd.html.j2', form=form)
@main_bp.route('/account/<string:username>', methods=['GET', 'POST'])
@is_logged_in
def account(username):
role = session['usertype']
return render_template('account.html.j2', username=username, Role=role)
@main_bp.route('/admin/information', methods=['GET', 'POST'])
@is_logged_in_as_admin
def admininfo():
return render_template('admininfo.html.j2')
@main_bp.route('/admin/users', methods=['GET', 'POST'])
@is_logged_in_as_admin
def ViewOrAddUsers():
df = pd.read_sql_query("SELECT * FROM Users ;", conn)
df['password'] = '********'
u2r = pd.read_sql_query("SELECT * FROM users_roles ;", conn)
roles = pd.read_sql_query("SELECT * FROM roles ;", conn)
u2r2 = pd.merge(u2r, roles, on='group_id')
del u2r2['group_id']
usersandroles = pd.merge(df, u2r2, on='id', how='outer')
usersandroles.rename(columns={'name': 'Role'}, inplace=True)
usersandroles = usersandroles.dropna(subset=['username'])
colnames = [s.replace("_", " ").title() for s in usersandroles.columns.values[1:]]
return render_template('view.html.j2', title='Users', colnames=colnames,
tableClass='Users', editLink="edit",
data=usersandroles)
@main_bp.route('/add/Users', methods=["GET", "POST"])
@is_logged_in_as_admin
def add():
form = eval("Users_Form")(request.form)
if request.method == 'POST' and form.validate():
if len(str(form.password.data)) < 8:
return flash('password must be more than 8 characters',
'danger')
form.password.data = sha256_crypt.hash(str(form.password.data))
formdata = []
for f, field in enumerate(form):
formdata.append(field.data)
InsertUser(formdata[0], formdata[1], conn)
flash('User Added', 'success')
return redirect(url_for('main_bp.add', tableClass='Users'))
return render_template('add.html.j2', title='Add Users', tableClass='Users',
form=form)
@main_bp.route('/delete/<string:tableClass>/<string:id>', methods=['POST'])
@is_logged_in_as_admin
def delete(tableClass, id):
user = pd.read_sql_query("SELECT * FROM Users where id = " + id + " ;",
conn)
username = user.username
DeleteUser(username[0], conn)
flash('User Deleted', 'success')
return redirect(url_for('main_bp.ViewOrAddUsers'))
@main_bp.route('/access/<string:id>', methods=['GET', 'POST'])
@is_logged_in_as_admin
def access(id):
form = AccessForm(request.form)
form.Role.choices = table_list('roles', 'name', conn)[1:]
user = pd.read_sql_query("SELECT * FROM Users where id = " + id + " ;",
conn)
if user.empty:
abort(404)
u2r = pd.read_sql_query("SELECT * FROM users_roles WHERE id = " + id +
";", conn)
gid = u2r.group_id[0]
current_role = pd.read_sql_query("SELECT * FROM roles WHERE group_id = "
+ str(gid) + ";", conn)
if request.method == 'POST' and form.validate():
new_role = form.Role.data
AssignRole(user.username[0], new_role, conn)
print('test')
flash('Edits successful', 'success')
return redirect(url_for('main_bp.ViewOrAddUsers'))
form.username.render_kw = {'readonly': 'readonly'}
form.username.data = user.username[0]
form.Role.data = current_role.name[0]
return render_template('access.html.j2', form=form, id=id)
@main_bp.route('/copyright', methods=["GET"])
def copyright():
return render_template('copyright.html.j2')
@main_bp.route('/privacy', methods=["GET"])
def privacy():
return render_template('privacy.html.j2')
@main_bp.route('/contribute', methods=["GET"])
def contribute():
return render_template('contributor_guidelines.html.j2')
@main_bp.route('/about', methods=["GET"])
def about():
return render_template('about.html.j2')
@main_bp.route('/contact', methods=["GET"])
def contact():
return render_template('contact.html.j2')
@main_bp.route('/modelinfo', methods=["GET"])
def infopage1():
return render_template('modelinfo.html.j2')
| true | true |
f7fb990918f7ec59ef84bbe86bbf6ef0eab0ef09 | 464 | py | Python | data/scripts/templates/object/draft_schematic/clothing/shared_clothing_backpack_field_03.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/draft_schematic/clothing/shared_clothing_backpack_field_03.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/draft_schematic/clothing/shared_clothing_backpack_field_03.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/clothing/shared_clothing_backpack_field_03.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 27.294118 | 90 | 0.737069 | true | true | |
f7fb99a703ddf58395a12a45c57820bb259a2284 | 3,766 | py | Python | submitit/core/test_utils.py | jrapin/submitit | dac5c6b76a81401aa7d44a32357872ebf92b968a | [
"MIT"
] | 602 | 2020-05-12T08:06:19.000Z | 2022-03-31T11:44:21.000Z | submitit/core/test_utils.py | mbchang/submitit | e37899bce0c7c58e3cc46ecb5b7fa8ce941fc3d7 | [
"MIT"
] | 1,639 | 2020-06-01T12:06:20.000Z | 2022-03-31T17:06:19.000Z | submitit/core/test_utils.py | mbchang/submitit | e37899bce0c7c58e3cc46ecb5b7fa8ce941fc3d7 | [
"MIT"
] | 55 | 2020-05-20T14:05:52.000Z | 2022-03-11T21:49:53.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import shutil
import sys
from pathlib import Path
from typing import Optional
import pytest
from . import utils
@pytest.mark.parametrize("existing_content", [None, "blublu"]) # type: ignore
def test_temporary_save_path(tmp_path: Path, existing_content: Optional[str]) -> None:
filepath = tmp_path / "save_and_move_test.txt"
if existing_content:
filepath.write_text(existing_content)
with utils.temporary_save_path(filepath) as tmp:
assert str(tmp).endswith(".txt.save_tmp")
tmp.write_text("12")
if existing_content:
assert filepath.read_text() == existing_content
assert filepath.read_text() == "12"
def test_temporary_save_path_error() -> None:
with pytest.raises(FileNotFoundError):
with utils.temporary_save_path("save_and_move_test"):
pass
def _three_time(x: int) -> int:
return 3 * x
def test_delayed(tmp_path: Path) -> None:
delayed = utils.DelayedSubmission(_three_time, 4)
assert not delayed.done()
assert delayed.result() == 12
assert delayed.done()
delayed_pkl = tmp_path / "test_delayed.pkl"
delayed.dump(delayed_pkl)
delayed2 = utils.DelayedSubmission.load(delayed_pkl)
assert delayed2.done()
def test_environment_variable_context() -> None:
name = "ENV_VAR_TEST"
assert name not in os.environ
with utils.environment_variables(ENV_VAR_TEST="blublu"):
assert os.environ[name] == "blublu"
with utils.environment_variables(ENV_VAR_TEST="blublu2"):
assert os.environ[name] == "blublu2"
assert os.environ[name] == "blublu"
assert name not in os.environ
def test_slurmpaths_id_independent() -> None:
path = "test/truc/machin_%j/name"
output = utils.JobPaths.get_first_id_independent_folder(path)
assert output.name == "truc"
def test_sanitize() -> None:
assert utils.sanitize("AlreadySanitized") == "AlreadySanitized"
assert utils.sanitize("Name with space") == "Name_with_space"
assert utils.sanitize("Name with space", only_alphanum=False) == '"Name with space"'
assert utils.sanitize("Name with many spaces") == "Name_with_many_spaces"
assert utils.sanitize(" Non alph@^ Nüm%") == "_Non_alph_Nüm_"
def test_archive_dev_folders(tmp_path: Path) -> None:
utils.archive_dev_folders([Path(__file__).parent], outfile=tmp_path.with_suffix(".tar.gz"))
shutil.unpack_archive(str(tmp_path.with_suffix(".tar.gz")), extract_dir=tmp_path)
assert (tmp_path / "core").exists()
def test_command_function() -> None:
# This will call `submitit.core.test_core.do_nothing`
command = [sys.executable, "-m", "submitit.core.test_core"]
word = "testblublu12"
output = utils.CommandFunction(command)(word)
assert output is not None
assert word in output
with pytest.raises(utils.FailedJobError, match="Too bad"):
# error=True will make `do_nothing` fail
utils.CommandFunction(command, verbose=True)(error=True)
def test_command_function_deadlock(executor) -> None:
code = """
import sys;
print(sys.__stderr__)
# The goal here is to fill up the stderr pipe buffer.
for i in range({n}):
print("-" * 1024, file=sys.stdout)
print("printed {n} lines to stderr")
"""
fn1 = utils.CommandFunction([sys.executable, "-c", code.format(n=10)])
executor.update_parameters(timeout_min=2 / 60)
j1 = executor.submit(fn1)
assert "10 lines" in j1.result()
fn2 = utils.CommandFunction(["python", "-c", code.format(n=1000)])
j2 = executor.submit(fn2)
assert "1000 lines" in j2.result()
| 33.625 | 95 | 0.701806 |
import os
import shutil
import sys
from pathlib import Path
from typing import Optional
import pytest
from . import utils
@pytest.mark.parametrize("existing_content", [None, "blublu"])
def test_temporary_save_path(tmp_path: Path, existing_content: Optional[str]) -> None:
filepath = tmp_path / "save_and_move_test.txt"
if existing_content:
filepath.write_text(existing_content)
with utils.temporary_save_path(filepath) as tmp:
assert str(tmp).endswith(".txt.save_tmp")
tmp.write_text("12")
if existing_content:
assert filepath.read_text() == existing_content
assert filepath.read_text() == "12"
def test_temporary_save_path_error() -> None:
with pytest.raises(FileNotFoundError):
with utils.temporary_save_path("save_and_move_test"):
pass
def _three_time(x: int) -> int:
return 3 * x
def test_delayed(tmp_path: Path) -> None:
delayed = utils.DelayedSubmission(_three_time, 4)
assert not delayed.done()
assert delayed.result() == 12
assert delayed.done()
delayed_pkl = tmp_path / "test_delayed.pkl"
delayed.dump(delayed_pkl)
delayed2 = utils.DelayedSubmission.load(delayed_pkl)
assert delayed2.done()
def test_environment_variable_context() -> None:
name = "ENV_VAR_TEST"
assert name not in os.environ
with utils.environment_variables(ENV_VAR_TEST="blublu"):
assert os.environ[name] == "blublu"
with utils.environment_variables(ENV_VAR_TEST="blublu2"):
assert os.environ[name] == "blublu2"
assert os.environ[name] == "blublu"
assert name not in os.environ
def test_slurmpaths_id_independent() -> None:
path = "test/truc/machin_%j/name"
output = utils.JobPaths.get_first_id_independent_folder(path)
assert output.name == "truc"
def test_sanitize() -> None:
assert utils.sanitize("AlreadySanitized") == "AlreadySanitized"
assert utils.sanitize("Name with space") == "Name_with_space"
assert utils.sanitize("Name with space", only_alphanum=False) == '"Name with space"'
assert utils.sanitize("Name with many spaces") == "Name_with_many_spaces"
assert utils.sanitize(" Non alph@^ Nüm%") == "_Non_alph_Nüm_"
def test_archive_dev_folders(tmp_path: Path) -> None:
utils.archive_dev_folders([Path(__file__).parent], outfile=tmp_path.with_suffix(".tar.gz"))
shutil.unpack_archive(str(tmp_path.with_suffix(".tar.gz")), extract_dir=tmp_path)
assert (tmp_path / "core").exists()
def test_command_function() -> None:
command = [sys.executable, "-m", "submitit.core.test_core"]
word = "testblublu12"
output = utils.CommandFunction(command)(word)
assert output is not None
assert word in output
with pytest.raises(utils.FailedJobError, match="Too bad"):
utils.CommandFunction(command, verbose=True)(error=True)
def test_command_function_deadlock(executor) -> None:
code = """
import sys;
print(sys.__stderr__)
# The goal here is to fill up the stderr pipe buffer.
for i in range({n}):
print("-" * 1024, file=sys.stdout)
print("printed {n} lines to stderr")
"""
fn1 = utils.CommandFunction([sys.executable, "-c", code.format(n=10)])
executor.update_parameters(timeout_min=2 / 60)
j1 = executor.submit(fn1)
assert "10 lines" in j1.result()
fn2 = utils.CommandFunction(["python", "-c", code.format(n=1000)])
j2 = executor.submit(fn2)
assert "1000 lines" in j2.result()
| true | true |
f7fb99f2e45a7b9638c12d2cd6ab44c44cbcda9a | 24,358 | py | Python | stable_baselines/deepq_lstm/build_graph.py | kevslinger/stable-baselines | 4bf9f3c1db49f462f5fb35df967d836d92a3dbcd | [
"MIT"
] | null | null | null | stable_baselines/deepq_lstm/build_graph.py | kevslinger/stable-baselines | 4bf9f3c1db49f462f5fb35df967d836d92a3dbcd | [
"MIT"
] | null | null | null | stable_baselines/deepq_lstm/build_graph.py | kevslinger/stable-baselines | 4bf9f3c1db49f462f5fb35df967d836d92a3dbcd | [
"MIT"
] | null | null | null | """Deep Q learning graph
The functions in this file can are used to create the following functions:
======= act ========
Function to chose an action given an observation
:param observation: (Any) Observation that can be feed into the output of make_obs_ph
:param stochastic: (bool) if set to False all the actions are always deterministic (default False)
:param update_eps_ph: (float) update epsilon a new value, if negative not update happens (default: no update)
:return: (TensorFlow Tensor) tensor of dtype tf.int64 and shape (BATCH_SIZE,) with an action to be performed for
every element of the batch.
======= act (in case of parameter noise) ========
Function to chose an action given an observation
:param observation: (Any) Observation that can be feed into the output of make_obs_ph
:param stochastic: (bool) if set to False all the actions are always deterministic (default False)
:param update_eps_ph: (float) update epsilon a new value, if negative not update happens
(default: no update)
:param reset_ph: (bool) reset the perturbed policy by sampling a new perturbation
:param update_param_noise_threshold_ph: (float) the desired threshold for the difference between
non-perturbed and perturbed policy
:param update_param_noise_scale_ph: (bool) whether or not to update the scale of the noise for the next time it is
re-perturbed
:return: (TensorFlow Tensor) tensor of dtype tf.int64 and shape (BATCH_SIZE,) with an action to be performed for
every element of the batch.
======= train =======
Function that takes a transition (s,a,r,s') and optimizes Bellman equation's error:
td_error = Q(s,a) - (r + gamma * max_a' Q(s', a'))
loss = huber_loss[td_error]
:param obs_t: (Any) a batch of observations
:param action: (numpy int) actions that were selected upon seeing obs_t. dtype must be int32 and shape must be
(batch_size,)
:param reward: (numpy float) immediate reward attained after executing those actions dtype must be float32 and
shape must be (batch_size,)
:param obs_tp1: (Any) observations that followed obs_t
:param done: (numpy bool) 1 if obs_t was the last observation in the episode and 0 otherwise obs_tp1 gets ignored,
but must be of the valid shape. dtype must be float32 and shape must be (batch_size,)
:param weight: (numpy float) imporance weights for every element of the batch (gradient is multiplied by the
importance weight) dtype must be float32 and shape must be (batch_size,)
:return: (numpy float) td_error: a list of differences between Q(s,a) and the target in Bellman's equation.
dtype is float32 and shape is (batch_size,)
======= update_target ========
copy the parameters from optimized Q function to the target Q function.
In Q learning we actually optimize the following error:
Q(s,a) - (r + gamma * max_a' Q'(s', a'))
Where Q' is lagging behind Q to stablize the learning. For example for Atari
Q' is set to Q once every 10000 updates training steps.
"""
import tensorflow as tf
from gym.spaces import MultiDiscrete
from stable_baselines.common import tf_util
def scope_vars(scope, trainable_only=False):
"""
Get variables inside a scope
The scope can be specified as a string
:param scope: (str or VariableScope) scope in which the variables reside.
:param trainable_only: (bool) whether or not to return only the variables that were marked as trainable.
:return: ([TensorFlow Tensor]) vars: list of variables in `scope`.
"""
return tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES if trainable_only else tf.GraphKeys.GLOBAL_VARIABLES,
scope=scope if isinstance(scope, str) else scope.name
)
def scope_name():
"""
Returns the name of current scope as a string, e.g. deepq/q_func
:return: (str) the name of current scope
"""
return tf.get_variable_scope().name
def absolute_scope_name(relative_scope_name):
"""
Appends parent scope name to `relative_scope_name`
:return: (str) the absolute name of the scope
"""
return scope_name() + "/" + relative_scope_name
def default_param_noise_filter(var):
"""
check whether or not a variable is perturbable or not
:param var: (TensorFlow Tensor) the variable
:return: (bool) can be perturb
"""
if var not in tf.trainable_variables():
# We never perturb non-trainable vars.
return False
if "fully_connected" in var.name:
# We perturb fully-connected layers.
return True
# The remaining layers are likely conv or layer norm layers, which we do not wish to
# perturb (in the former case because they only extract features, in the latter case because
# we use them for normalization purposes). If you change your network, you will likely want
# to re-consider which layers to perturb and which to keep untouched.
return False
def build_act(q_func, ob_space, ac_space, stochastic_ph, update_eps_ph, sess, layers=None):
"""
Creates the act function:
:param q_func: (DQNPolicy) the policy
:param ob_space: (Gym Space) The observation space of the environment
:param ac_space: (Gym Space) The action space of the environment
:param stochastic_ph: (TensorFlow Tensor) the stochastic placeholder
:param update_eps_ph: (TensorFlow Tensor) the update_eps placeholder
:param sess: (TensorFlow session) The current TensorFlow session
:return: (function (TensorFlow Tensor, bool, float): TensorFlow Tensor, (TensorFlow Tensor, TensorFlow Tensor)
act function to select and action given observation (See the top of the file for details),
A tuple containing the observation placeholder and the processed observation placeholder respectively.
"""
eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0))
policy = q_func(sess, ob_space, ac_space, 1, 1, None, layers=layers)
obs_phs = (policy.obs_ph, policy.processed_obs)
deterministic_actions = tf.argmax(policy.q_values, axis=1)
#########################
### KEVIN UPDATE ########
### GIMME DAT PRINTS ####
#########################
print("Hello yes I am in build_act without noise")
print(f"Obs space: {ob_space}")
print(f"policy.obs_ph: {policy.obs_ph}")
print(f"policy.processed_obs: {policy.processed_obs}")
print(f"Obs_phs space: {obs_phs}")
#assert 5 == 1
#######################
for var in tf.all_variables():
print(var)
batch_size = tf.shape(policy.obs_ph)[0]
n_actions = ac_space.nvec if isinstance(ac_space, MultiDiscrete) else ac_space.n
random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=n_actions, dtype=tf.int64)
chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)
update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
_act = tf_util.function(inputs=[policy.obs_ph, stochastic_ph, update_eps_ph],
outputs=output_actions,
givens={update_eps_ph: -1.0, stochastic_ph: True},
updates=[update_eps_expr])
def act(obs, stochastic=True, update_eps=-1):
return _act(obs, stochastic, update_eps)
return act, obs_phs
def build_act_with_param_noise(q_func, ob_space, ac_space, stochastic_ph, update_eps_ph, sess,
param_noise_filter_func=None):
"""
Creates the act function with support for parameter space noise exploration (https://arxiv.org/abs/1706.01905):
:param q_func: (DQNPolicy) the policy
:param ob_space: (Gym Space) The observation space of the environment
:param ac_space: (Gym Space) The action space of the environment
:param stochastic_ph: (TensorFlow Tensor) the stochastic placeholder
:param update_eps_ph: (TensorFlow Tensor) the update_eps placeholder
:param sess: (TensorFlow session) The current TensorFlow session
:param param_noise_filter_func: (function (TensorFlow Tensor): bool) function that decides whether or not a
variable should be perturbed. Only applicable if param_noise is True. If set to None, default_param_noise_filter
is used by default.
:return: (function (TensorFlow Tensor, bool, float): TensorFlow Tensor, (TensorFlow Tensor, TensorFlow Tensor)
act function to select and action given observation (See the top of the file for details),
A tuple containing the observation placeholder and the processed observation placeholder respectively.
"""
if param_noise_filter_func is None:
param_noise_filter_func = default_param_noise_filter
update_param_noise_threshold_ph = tf.placeholder(tf.float32, (), name="update_param_noise_threshold")
update_param_noise_scale_ph = tf.placeholder(tf.bool, (), name="update_param_noise_scale")
reset_ph = tf.placeholder(tf.bool, (), name="reset")
eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0))
param_noise_scale = tf.get_variable("param_noise_scale", (), initializer=tf.constant_initializer(0.01),
trainable=False)
param_noise_threshold = tf.get_variable("param_noise_threshold", (), initializer=tf.constant_initializer(0.05),
trainable=False)
# Unmodified Q.
policy = q_func(sess, ob_space, ac_space, 1, 1, None)
obs_phs = (policy.obs_ph, policy.processed_obs)
# Perturbable Q used for the actual rollout.
with tf.variable_scope("perturbed_model", reuse=False):
perturbable_policy = q_func(sess, ob_space, ac_space, 1, 1, None, obs_phs=obs_phs)
def perturb_vars(original_scope, perturbed_scope):
"""
We have to wrap this code into a function due to the way tf.cond() works.
See https://stackoverflow.com/questions/37063952/confused-by-the-behavior-of-tf-cond for a more detailed
discussion.
:param original_scope: (str or VariableScope) the original scope.
:param perturbed_scope: (str or VariableScope) the perturbed scope.
:return: (TensorFlow Operation)
"""
all_vars = scope_vars(absolute_scope_name(original_scope))
all_perturbed_vars = scope_vars(absolute_scope_name(perturbed_scope))
assert len(all_vars) == len(all_perturbed_vars)
perturb_ops = []
for var, perturbed_var in zip(all_vars, all_perturbed_vars):
if param_noise_filter_func(perturbed_var):
# Perturb this variable.
operation = tf.assign(perturbed_var,
var + tf.random_normal(shape=tf.shape(var), mean=0.,
stddev=param_noise_scale))
else:
# Do not perturb, just assign.
operation = tf.assign(perturbed_var, var)
perturb_ops.append(operation)
assert len(perturb_ops) == len(all_vars)
return tf.group(*perturb_ops)
# Set up functionality to re-compute `param_noise_scale`. This perturbs yet another copy
# of the network and measures the effect of that perturbation in action space. If the perturbation
# is too big, reduce scale of perturbation, otherwise increase.
with tf.variable_scope("adaptive_model", reuse=False):
adaptive_policy = q_func(sess, ob_space, ac_space, 1, 1, None, obs_phs=obs_phs)
perturb_for_adaption = perturb_vars(original_scope="model", perturbed_scope="adaptive_model/model")
kl_loss = tf.reduce_sum(
tf.nn.softmax(policy.q_values) *
(tf.log(tf.nn.softmax(policy.q_values)) - tf.log(tf.nn.softmax(adaptive_policy.q_values))),
axis=-1)
mean_kl = tf.reduce_mean(kl_loss)
def update_scale():
"""
update the scale expression
:return: (TensorFlow Tensor) the updated scale expression
"""
with tf.control_dependencies([perturb_for_adaption]):
update_scale_expr = tf.cond(mean_kl < param_noise_threshold,
lambda: param_noise_scale.assign(param_noise_scale * 1.01),
lambda: param_noise_scale.assign(param_noise_scale / 1.01),
)
return update_scale_expr
# Functionality to update the threshold for parameter space noise.
update_param_noise_thres_expr = param_noise_threshold.assign(
tf.cond(update_param_noise_threshold_ph >= 0, lambda: update_param_noise_threshold_ph,
lambda: param_noise_threshold))
# Put everything together.
perturbed_deterministic_actions = tf.argmax(perturbable_policy.q_values, axis=1)
deterministic_actions = tf.argmax(policy.q_values, axis=1)
batch_size = tf.shape(policy.obs_ph)[0]
n_actions = ac_space.nvec if isinstance(ac_space, MultiDiscrete) else ac_space.n
random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=n_actions, dtype=tf.int64)
chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
perturbed_stochastic_actions = tf.where(chose_random, random_actions, perturbed_deterministic_actions)
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
perturbed_output_actions = tf.cond(stochastic_ph, lambda: perturbed_stochastic_actions,
lambda: deterministic_actions)
output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)
update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
updates = [
update_eps_expr,
tf.cond(reset_ph, lambda: perturb_vars(original_scope="model", perturbed_scope="perturbed_model/model"),
lambda: tf.group(*[])),
tf.cond(update_param_noise_scale_ph, lambda: update_scale(), lambda: tf.Variable(0., trainable=False)),
update_param_noise_thres_expr,
]
_act = tf_util.function(inputs=[policy.obs_ph, stochastic_ph, update_eps_ph],
outputs=output_actions,
givens={update_eps_ph: -1.0, stochastic_ph: True},
updates=[update_eps_expr])
_perturbed_act = tf_util.function(
inputs=[policy.obs_ph, stochastic_ph, update_eps_ph, reset_ph, update_param_noise_threshold_ph,
update_param_noise_scale_ph],
outputs=perturbed_output_actions,
givens={update_eps_ph: -1.0, stochastic_ph: True, reset_ph: False, update_param_noise_threshold_ph: False,
update_param_noise_scale_ph: False},
updates=updates)
def act(obs, reset=None, update_param_noise_threshold=None, update_param_noise_scale=None, stochastic=True,
update_eps=-1):
"""
get the action from the current observation
:param obs: (Any) Observation that can be feed into the output of make_obs_ph
:param reset: (bool) reset the perturbed policy by sampling a new perturbation
:param update_param_noise_threshold: (float) the desired threshold for the difference between
non-perturbed and perturbed policy
:param update_param_noise_scale: (bool) whether or not to update the scale of the noise for the next time
it is re-perturbed
:param stochastic: (bool) if set to False all the actions are always deterministic (default False)
:param update_eps: (float) update epsilon a new value, if negative not update happens
(default: no update)
:return: (TensorFlow Tensor) tensor of dtype tf.int64 and shape (BATCH_SIZE,) with an action to be
performed for every element of the batch.
"""
if reset is None or update_param_noise_threshold is None or update_param_noise_scale is None:
return _act(obs, stochastic, update_eps)
else:
return _perturbed_act(obs, stochastic, update_eps, reset, update_param_noise_threshold,
update_param_noise_scale)
return act, obs_phs
def build_train(q_func, ob_space, ac_space, optimizer, sess, grad_norm_clipping=None,
gamma=1.0, double_q=True, scope="deepq", reuse=None,
param_noise=False, param_noise_filter_func=None, full_tensorboard_log=False, layers=None):
"""
Creates the train function:
:param q_func: (DQNPolicy) the policy
:param ob_space: (Gym Space) The observation space of the environment
:param ac_space: (Gym Space) The action space of the environment
:param reuse: (bool) whether or not to reuse the graph variables
:param optimizer: (tf.train.Optimizer) optimizer to use for the Q-learning objective.
:param sess: (TensorFlow session) The current TensorFlow session
:param grad_norm_clipping: (float) clip gradient norms to this value. If None no clipping is performed.
:param gamma: (float) discount rate.
:param double_q: (bool) if true will use Double Q Learning (https://arxiv.org/abs/1509.06461). In general it is a
good idea to keep it enabled.
:param scope: (str or VariableScope) optional scope for variable_scope.
:param reuse: (bool) whether or not the variables should be reused. To be able to reuse the scope must be given.
:param param_noise: (bool) whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
:param param_noise_filter_func: (function (TensorFlow Tensor): bool) function that decides whether or not a
variable should be perturbed. Only applicable if param_noise is True. If set to None, default_param_noise_filter
is used by default.
:param full_tensorboard_log: (bool) enable additional logging when using tensorboard
WARNING: this logging can take a lot of space quickly
:return: (tuple)
act: (function (TensorFlow Tensor, bool, float): TensorFlow Tensor) function to select and action given
observation. See the top of the file for details.
train: (function (Any, numpy float, numpy float, Any, numpy bool, numpy float): numpy float)
optimize the error in Bellman's equation. See the top of the file for details.
update_target: (function) copy the parameters from optimized Q function to the target Q function.
See the top of the file for details.
step_model: (DQNPolicy) Policy for evaluation
"""
n_actions = ac_space.nvec if isinstance(ac_space, MultiDiscrete) else ac_space.n
with tf.variable_scope("input", reuse=reuse):
stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic")
update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps")
with tf.variable_scope(scope, reuse=reuse):
if param_noise:
act_f, obs_phs = build_act_with_param_noise(q_func, ob_space, ac_space, stochastic_ph, update_eps_ph, sess,
param_noise_filter_func=param_noise_filter_func)
else:
act_f, obs_phs = build_act(q_func, ob_space, ac_space, stochastic_ph, update_eps_ph, sess, layers=layers)
# q network evaluation
with tf.variable_scope("step_model", reuse=True, custom_getter=tf_util.outer_scope_getter("step_model")):
step_model = q_func(sess, ob_space, ac_space, 1, 1, None, reuse=True, obs_phs=obs_phs, layers=layers)
q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name + "/model")
# target q network evaluation
with tf.variable_scope("target_q_func", reuse=False):
target_policy = q_func(sess, ob_space, ac_space, 1, 1, None, reuse=False, layers=layers)
target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
scope=tf.get_variable_scope().name + "/target_q_func")
# compute estimate of best possible value starting from state at t + 1
double_q_values = None
double_obs_ph = target_policy.obs_ph
if double_q:
with tf.variable_scope("double_q", reuse=True, custom_getter=tf_util.outer_scope_getter("double_q")):
double_policy = q_func(sess, ob_space, ac_space, 1, 1, None, reuse=True, layers=layers)
double_q_values = double_policy.q_values
double_obs_ph = double_policy.obs_ph
with tf.variable_scope("loss", reuse=reuse):
# set up placeholders
act_t_ph = tf.placeholder(tf.int32, [None], name="action")
rew_t_ph = tf.placeholder(tf.float32, [None], name="reward")
done_mask_ph = tf.placeholder(tf.float32, [None], name="done")
importance_weights_ph = tf.placeholder(tf.float32, [None], name="weight")
# q scores for actions which we know were selected in the given state.
q_t_selected = tf.reduce_sum(step_model.q_values * tf.one_hot(act_t_ph, n_actions), axis=1)
# compute estimate of best possible value starting from state at t + 1
if double_q:
q_tp1_best_using_online_net = tf.argmax(double_q_values, axis=1)
q_tp1_best = tf.reduce_sum(target_policy.q_values * tf.one_hot(q_tp1_best_using_online_net, n_actions), axis=1)
else:
q_tp1_best = tf.reduce_max(target_policy.q_values, axis=1)
q_tp1_best_masked = (1.0 - done_mask_ph) * q_tp1_best
# compute RHS of bellman equation
q_t_selected_target = rew_t_ph + gamma * q_tp1_best_masked
# compute the error (potentially clipped)
td_error = q_t_selected - tf.stop_gradient(q_t_selected_target)
errors = tf_util.huber_loss(td_error)
weighted_error = tf.reduce_mean(importance_weights_ph * errors)
tf.summary.scalar("td_error", tf.reduce_mean(td_error))
tf.summary.scalar("loss", weighted_error)
if full_tensorboard_log:
tf.summary.histogram("td_error", td_error)
# update_target_fn will be called periodically to copy Q network to target Q network
update_target_expr = []
for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name),
sorted(target_q_func_vars, key=lambda v: v.name)):
update_target_expr.append(var_target.assign(var))
update_target_expr = tf.group(*update_target_expr)
# compute optimization op (potentially with gradient clipping)
gradients = optimizer.compute_gradients(weighted_error, var_list=q_func_vars)
if grad_norm_clipping is not None:
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (tf.clip_by_norm(grad, grad_norm_clipping), var)
with tf.variable_scope("input_info", reuse=False):
tf.summary.scalar('rewards', tf.reduce_mean(rew_t_ph))
tf.summary.scalar('importance_weights', tf.reduce_mean(importance_weights_ph))
if full_tensorboard_log:
tf.summary.histogram('rewards', rew_t_ph)
tf.summary.histogram('importance_weights', importance_weights_ph)
if tf_util.is_image(obs_phs[0]):
tf.summary.image('observation', obs_phs[0])
elif len(obs_phs[0].shape) == 1:
tf.summary.histogram('observation', obs_phs[0])
optimize_expr = optimizer.apply_gradients(gradients)
summary = tf.summary.merge_all()
# Create callable functions
train = tf_util.function(
inputs=[
obs_phs[0],
act_t_ph,
rew_t_ph,
target_policy.obs_ph,
double_obs_ph,
done_mask_ph,
importance_weights_ph
],
outputs=[summary, td_error],
updates=[optimize_expr]
)
update_target = tf_util.function([], [], updates=[update_target_expr])
return act_f, train, update_target, step_model
| 50.53527 | 123 | 0.684539 | import tensorflow as tf
from gym.spaces import MultiDiscrete
from stable_baselines.common import tf_util
def scope_vars(scope, trainable_only=False):
return tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES if trainable_only else tf.GraphKeys.GLOBAL_VARIABLES,
scope=scope if isinstance(scope, str) else scope.name
)
def scope_name():
return tf.get_variable_scope().name
def absolute_scope_name(relative_scope_name):
return scope_name() + "/" + relative_scope_name
def default_param_noise_filter(var):
if var not in tf.trainable_variables():
return False
if "fully_connected" in var.name:
return True
return False
def build_act(q_func, ob_space, ac_space, stochastic_ph, update_eps_ph, sess, layers=None):
eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0))
policy = q_func(sess, ob_space, ac_space, 1, 1, None, layers=layers)
obs_phs = (policy.obs_ph, policy.processed_obs)
deterministic_actions = tf.argmax(policy.q_values, axis=1)
bs_ph, stochastic_ph, update_eps_ph],
outputs=output_actions,
givens={update_eps_ph: -1.0, stochastic_ph: True},
updates=[update_eps_expr])
def act(obs, stochastic=True, update_eps=-1):
return _act(obs, stochastic, update_eps)
return act, obs_phs
def build_act_with_param_noise(q_func, ob_space, ac_space, stochastic_ph, update_eps_ph, sess,
param_noise_filter_func=None):
if param_noise_filter_func is None:
param_noise_filter_func = default_param_noise_filter
update_param_noise_threshold_ph = tf.placeholder(tf.float32, (), name="update_param_noise_threshold")
update_param_noise_scale_ph = tf.placeholder(tf.bool, (), name="update_param_noise_scale")
reset_ph = tf.placeholder(tf.bool, (), name="reset")
eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0))
param_noise_scale = tf.get_variable("param_noise_scale", (), initializer=tf.constant_initializer(0.01),
trainable=False)
param_noise_threshold = tf.get_variable("param_noise_threshold", (), initializer=tf.constant_initializer(0.05),
trainable=False)
policy = q_func(sess, ob_space, ac_space, 1, 1, None)
obs_phs = (policy.obs_ph, policy.processed_obs)
with tf.variable_scope("perturbed_model", reuse=False):
perturbable_policy = q_func(sess, ob_space, ac_space, 1, 1, None, obs_phs=obs_phs)
def perturb_vars(original_scope, perturbed_scope):
all_vars = scope_vars(absolute_scope_name(original_scope))
all_perturbed_vars = scope_vars(absolute_scope_name(perturbed_scope))
assert len(all_vars) == len(all_perturbed_vars)
perturb_ops = []
for var, perturbed_var in zip(all_vars, all_perturbed_vars):
if param_noise_filter_func(perturbed_var):
operation = tf.assign(perturbed_var,
var + tf.random_normal(shape=tf.shape(var), mean=0.,
stddev=param_noise_scale))
else:
operation = tf.assign(perturbed_var, var)
perturb_ops.append(operation)
assert len(perturb_ops) == len(all_vars)
return tf.group(*perturb_ops)
with tf.variable_scope("adaptive_model", reuse=False):
adaptive_policy = q_func(sess, ob_space, ac_space, 1, 1, None, obs_phs=obs_phs)
perturb_for_adaption = perturb_vars(original_scope="model", perturbed_scope="adaptive_model/model")
kl_loss = tf.reduce_sum(
tf.nn.softmax(policy.q_values) *
(tf.log(tf.nn.softmax(policy.q_values)) - tf.log(tf.nn.softmax(adaptive_policy.q_values))),
axis=-1)
mean_kl = tf.reduce_mean(kl_loss)
def update_scale():
with tf.control_dependencies([perturb_for_adaption]):
update_scale_expr = tf.cond(mean_kl < param_noise_threshold,
lambda: param_noise_scale.assign(param_noise_scale * 1.01),
lambda: param_noise_scale.assign(param_noise_scale / 1.01),
)
return update_scale_expr
update_param_noise_thres_expr = param_noise_threshold.assign(
tf.cond(update_param_noise_threshold_ph >= 0, lambda: update_param_noise_threshold_ph,
lambda: param_noise_threshold))
perturbed_deterministic_actions = tf.argmax(perturbable_policy.q_values, axis=1)
deterministic_actions = tf.argmax(policy.q_values, axis=1)
batch_size = tf.shape(policy.obs_ph)[0]
n_actions = ac_space.nvec if isinstance(ac_space, MultiDiscrete) else ac_space.n
random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=n_actions, dtype=tf.int64)
chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
perturbed_stochastic_actions = tf.where(chose_random, random_actions, perturbed_deterministic_actions)
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
perturbed_output_actions = tf.cond(stochastic_ph, lambda: perturbed_stochastic_actions,
lambda: deterministic_actions)
output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)
update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
updates = [
update_eps_expr,
tf.cond(reset_ph, lambda: perturb_vars(original_scope="model", perturbed_scope="perturbed_model/model"),
lambda: tf.group(*[])),
tf.cond(update_param_noise_scale_ph, lambda: update_scale(), lambda: tf.Variable(0., trainable=False)),
update_param_noise_thres_expr,
]
_act = tf_util.function(inputs=[policy.obs_ph, stochastic_ph, update_eps_ph],
outputs=output_actions,
givens={update_eps_ph: -1.0, stochastic_ph: True},
updates=[update_eps_expr])
_perturbed_act = tf_util.function(
inputs=[policy.obs_ph, stochastic_ph, update_eps_ph, reset_ph, update_param_noise_threshold_ph,
update_param_noise_scale_ph],
outputs=perturbed_output_actions,
givens={update_eps_ph: -1.0, stochastic_ph: True, reset_ph: False, update_param_noise_threshold_ph: False,
update_param_noise_scale_ph: False},
updates=updates)
def act(obs, reset=None, update_param_noise_threshold=None, update_param_noise_scale=None, stochastic=True,
update_eps=-1):
if reset is None or update_param_noise_threshold is None or update_param_noise_scale is None:
return _act(obs, stochastic, update_eps)
else:
return _perturbed_act(obs, stochastic, update_eps, reset, update_param_noise_threshold,
update_param_noise_scale)
return act, obs_phs
def build_train(q_func, ob_space, ac_space, optimizer, sess, grad_norm_clipping=None,
gamma=1.0, double_q=True, scope="deepq", reuse=None,
param_noise=False, param_noise_filter_func=None, full_tensorboard_log=False, layers=None):
n_actions = ac_space.nvec if isinstance(ac_space, MultiDiscrete) else ac_space.n
with tf.variable_scope("input", reuse=reuse):
stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic")
update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps")
with tf.variable_scope(scope, reuse=reuse):
if param_noise:
act_f, obs_phs = build_act_with_param_noise(q_func, ob_space, ac_space, stochastic_ph, update_eps_ph, sess,
param_noise_filter_func=param_noise_filter_func)
else:
act_f, obs_phs = build_act(q_func, ob_space, ac_space, stochastic_ph, update_eps_ph, sess, layers=layers)
with tf.variable_scope("step_model", reuse=True, custom_getter=tf_util.outer_scope_getter("step_model")):
step_model = q_func(sess, ob_space, ac_space, 1, 1, None, reuse=True, obs_phs=obs_phs, layers=layers)
q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name + "/model")
with tf.variable_scope("target_q_func", reuse=False):
target_policy = q_func(sess, ob_space, ac_space, 1, 1, None, reuse=False, layers=layers)
target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
scope=tf.get_variable_scope().name + "/target_q_func")
double_q_values = None
double_obs_ph = target_policy.obs_ph
if double_q:
with tf.variable_scope("double_q", reuse=True, custom_getter=tf_util.outer_scope_getter("double_q")):
double_policy = q_func(sess, ob_space, ac_space, 1, 1, None, reuse=True, layers=layers)
double_q_values = double_policy.q_values
double_obs_ph = double_policy.obs_ph
with tf.variable_scope("loss", reuse=reuse):
act_t_ph = tf.placeholder(tf.int32, [None], name="action")
rew_t_ph = tf.placeholder(tf.float32, [None], name="reward")
done_mask_ph = tf.placeholder(tf.float32, [None], name="done")
importance_weights_ph = tf.placeholder(tf.float32, [None], name="weight")
q_t_selected = tf.reduce_sum(step_model.q_values * tf.one_hot(act_t_ph, n_actions), axis=1)
if double_q:
q_tp1_best_using_online_net = tf.argmax(double_q_values, axis=1)
q_tp1_best = tf.reduce_sum(target_policy.q_values * tf.one_hot(q_tp1_best_using_online_net, n_actions), axis=1)
else:
q_tp1_best = tf.reduce_max(target_policy.q_values, axis=1)
q_tp1_best_masked = (1.0 - done_mask_ph) * q_tp1_best
q_t_selected_target = rew_t_ph + gamma * q_tp1_best_masked
td_error = q_t_selected - tf.stop_gradient(q_t_selected_target)
errors = tf_util.huber_loss(td_error)
weighted_error = tf.reduce_mean(importance_weights_ph * errors)
tf.summary.scalar("td_error", tf.reduce_mean(td_error))
tf.summary.scalar("loss", weighted_error)
if full_tensorboard_log:
tf.summary.histogram("td_error", td_error)
update_target_expr = []
for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name),
sorted(target_q_func_vars, key=lambda v: v.name)):
update_target_expr.append(var_target.assign(var))
update_target_expr = tf.group(*update_target_expr)
gradients = optimizer.compute_gradients(weighted_error, var_list=q_func_vars)
if grad_norm_clipping is not None:
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (tf.clip_by_norm(grad, grad_norm_clipping), var)
with tf.variable_scope("input_info", reuse=False):
tf.summary.scalar('rewards', tf.reduce_mean(rew_t_ph))
tf.summary.scalar('importance_weights', tf.reduce_mean(importance_weights_ph))
if full_tensorboard_log:
tf.summary.histogram('rewards', rew_t_ph)
tf.summary.histogram('importance_weights', importance_weights_ph)
if tf_util.is_image(obs_phs[0]):
tf.summary.image('observation', obs_phs[0])
elif len(obs_phs[0].shape) == 1:
tf.summary.histogram('observation', obs_phs[0])
optimize_expr = optimizer.apply_gradients(gradients)
summary = tf.summary.merge_all()
train = tf_util.function(
inputs=[
obs_phs[0],
act_t_ph,
rew_t_ph,
target_policy.obs_ph,
double_obs_ph,
done_mask_ph,
importance_weights_ph
],
outputs=[summary, td_error],
updates=[optimize_expr]
)
update_target = tf_util.function([], [], updates=[update_target_expr])
return act_f, train, update_target, step_model
| true | true |
f7fb9b1df0ffd7fffa7f50ab711f37db3c8c17ea | 256 | py | Python | example.py | chuxbraganza/Wrapper-Python | 5d46d47da48a6f4e19a974a6308cbae8b303ec47 | [
"MIT"
] | null | null | null | example.py | chuxbraganza/Wrapper-Python | 5d46d47da48a6f4e19a974a6308cbae8b303ec47 | [
"MIT"
] | null | null | null | example.py | chuxbraganza/Wrapper-Python | 5d46d47da48a6f4e19a974a6308cbae8b303ec47 | [
"MIT"
] | null | null | null | from mistyPy import Robot
# TODO: Replace with your IP
misty = Robot("192.168.0.31") # This is the IP of my misty. Replace with your IP
misty.changeLED(0, 0, 255)
misty.moveHeadPosition(0, 0, 0, 100) # center the head
misty.moveArmsDegrees(0, 0, 100, 100) | 36.571429 | 80 | 0.726563 | from mistyPy import Robot
misty = Robot("192.168.0.31")
misty.changeLED(0, 0, 255)
misty.moveHeadPosition(0, 0, 0, 100)
misty.moveArmsDegrees(0, 0, 100, 100) | true | true |
f7fb9be3d99bff340ea4263655787d4e301fa063 | 21,075 | py | Python | test/functional/test_framework/script.py | AlphaconTeam/AlphaconNetwork | c484de626f36c04ecf3f177807fe52ed8d7470c4 | [
"MIT"
] | 1 | 2019-11-18T12:15:51.000Z | 2019-11-18T12:15:51.000Z | test/functional/test_framework/script.py | AlphaconTeam/AlphaconNetwork | c484de626f36c04ecf3f177807fe52ed8d7470c4 | [
"MIT"
] | null | null | null | test/functional/test_framework/script.py | AlphaconTeam/AlphaconNetwork | c484de626f36c04ecf3f177807fe52ed8d7470c4 | [
"MIT"
] | 2 | 2019-06-17T14:47:21.000Z | 2021-01-16T07:25:13.000Z | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Copyright (c) 2017 The Alphacon Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Functionality to build scripts, as well as SignatureHash().
This file is modified from python-alphaconlib.
"""
from .mininode import CTransaction, CTxOut, sha256, hash256, uint256_from_str, ser_uint256, ser_string
from binascii import hexlify
import hashlib
import sys
bchr = chr
bord = ord
if sys.version > '3':
long = int
bchr = lambda x: bytes([x])
bord = lambda x: x
import struct
from .bignum import bn2vch
MAX_SCRIPT_ELEMENT_SIZE = 520
OPCODE_NAMES = {}
def hash160(s):
return hashlib.new('ripemd160', sha256(s)).digest()
_opcode_instances = []
class CScriptOp(int):
"""A single script opcode"""
__slots__ = []
@staticmethod
def encode_op_pushdata(d):
"""Encode a PUSHDATA op, returning bytes"""
if len(d) < 0x4c:
return b'' + bchr(len(d)) + d # OP_PUSHDATA
elif len(d) <= 0xff:
return b'\x4c' + bchr(len(d)) + d # OP_PUSHDATA1
elif len(d) <= 0xffff:
return b'\x4d' + struct.pack(b'<H', len(d)) + d # OP_PUSHDATA2
elif len(d) <= 0xffffffff:
return b'\x4e' + struct.pack(b'<I', len(d)) + d # OP_PUSHDATA4
else:
raise ValueError("Data too long to encode in a PUSHDATA op")
@staticmethod
def encode_op_n(n):
"""Encode a small integer op, returning an opcode"""
if not (0 <= n <= 16):
raise ValueError('Integer must be in range 0 <= n <= 16, got %d' % n)
if n == 0:
return OP_0
else:
return CScriptOp(OP_1 + n-1)
def decode_op_n(self):
"""Decode a small integer opcode, returning an integer"""
if self == OP_0:
return 0
if not (self == OP_0 or OP_1 <= self <= OP_16):
raise ValueError('op %r is not an OP_N' % self)
return int(self - OP_1+1)
def is_small_int(self):
"""Return true if the op pushes a small integer to the stack"""
if 0x51 <= self <= 0x60 or self == 0:
return True
else:
return False
def __str__(self):
return repr(self)
def __repr__(self):
if self in OPCODE_NAMES:
return OPCODE_NAMES[self]
else:
return 'CScriptOp(0x%x)' % self
def __new__(cls, n):
try:
return _opcode_instances[n]
except IndexError:
assert len(_opcode_instances) == n
_opcode_instances.append(super(CScriptOp, cls).__new__(cls, n))
return _opcode_instances[n]
# Populate opcode instance table
for n in range(0xff+1):
CScriptOp(n)
# push value
OP_0 = CScriptOp(0x00)
OP_FALSE = OP_0
OP_PUSHDATA1 = CScriptOp(0x4c)
OP_PUSHDATA2 = CScriptOp(0x4d)
OP_PUSHDATA4 = CScriptOp(0x4e)
OP_1NEGATE = CScriptOp(0x4f)
OP_RESERVED = CScriptOp(0x50)
OP_1 = CScriptOp(0x51)
OP_TRUE=OP_1
OP_2 = CScriptOp(0x52)
OP_3 = CScriptOp(0x53)
OP_4 = CScriptOp(0x54)
OP_5 = CScriptOp(0x55)
OP_6 = CScriptOp(0x56)
OP_7 = CScriptOp(0x57)
OP_8 = CScriptOp(0x58)
OP_9 = CScriptOp(0x59)
OP_10 = CScriptOp(0x5a)
OP_11 = CScriptOp(0x5b)
OP_12 = CScriptOp(0x5c)
OP_13 = CScriptOp(0x5d)
OP_14 = CScriptOp(0x5e)
OP_15 = CScriptOp(0x5f)
OP_16 = CScriptOp(0x60)
# control
OP_NOP = CScriptOp(0x61)
OP_VER = CScriptOp(0x62)
OP_IF = CScriptOp(0x63)
OP_NOTIF = CScriptOp(0x64)
OP_VERIF = CScriptOp(0x65)
OP_VERNOTIF = CScriptOp(0x66)
OP_ELSE = CScriptOp(0x67)
OP_ENDIF = CScriptOp(0x68)
OP_VERIFY = CScriptOp(0x69)
OP_RETURN = CScriptOp(0x6a)
# stack ops
OP_TOALTSTACK = CScriptOp(0x6b)
OP_FROMALTSTACK = CScriptOp(0x6c)
OP_2DROP = CScriptOp(0x6d)
OP_2DUP = CScriptOp(0x6e)
OP_3DUP = CScriptOp(0x6f)
OP_2OVER = CScriptOp(0x70)
OP_2ROT = CScriptOp(0x71)
OP_2SWAP = CScriptOp(0x72)
OP_IFDUP = CScriptOp(0x73)
OP_DEPTH = CScriptOp(0x74)
OP_DROP = CScriptOp(0x75)
OP_DUP = CScriptOp(0x76)
OP_NIP = CScriptOp(0x77)
OP_OVER = CScriptOp(0x78)
OP_PICK = CScriptOp(0x79)
OP_ROLL = CScriptOp(0x7a)
OP_ROT = CScriptOp(0x7b)
OP_SWAP = CScriptOp(0x7c)
OP_TUCK = CScriptOp(0x7d)
# splice ops
OP_CAT = CScriptOp(0x7e)
OP_SUBSTR = CScriptOp(0x7f)
OP_LEFT = CScriptOp(0x80)
OP_RIGHT = CScriptOp(0x81)
OP_SIZE = CScriptOp(0x82)
# bit logic
OP_INVERT = CScriptOp(0x83)
OP_AND = CScriptOp(0x84)
OP_OR = CScriptOp(0x85)
OP_XOR = CScriptOp(0x86)
OP_EQUAL = CScriptOp(0x87)
OP_EQUALVERIFY = CScriptOp(0x88)
OP_RESERVED1 = CScriptOp(0x89)
OP_RESERVED2 = CScriptOp(0x8a)
# numeric
OP_1ADD = CScriptOp(0x8b)
OP_1SUB = CScriptOp(0x8c)
OP_2MUL = CScriptOp(0x8d)
OP_2DIV = CScriptOp(0x8e)
OP_NEGATE = CScriptOp(0x8f)
OP_ABS = CScriptOp(0x90)
OP_NOT = CScriptOp(0x91)
OP_0NOTEQUAL = CScriptOp(0x92)
OP_ADD = CScriptOp(0x93)
OP_SUB = CScriptOp(0x94)
OP_MUL = CScriptOp(0x95)
OP_DIV = CScriptOp(0x96)
OP_MOD = CScriptOp(0x97)
OP_LSHIFT = CScriptOp(0x98)
OP_RSHIFT = CScriptOp(0x99)
OP_BOOLAND = CScriptOp(0x9a)
OP_BOOLOR = CScriptOp(0x9b)
OP_NUMEQUAL = CScriptOp(0x9c)
OP_NUMEQUALVERIFY = CScriptOp(0x9d)
OP_NUMNOTEQUAL = CScriptOp(0x9e)
OP_LESSTHAN = CScriptOp(0x9f)
OP_GREATERTHAN = CScriptOp(0xa0)
OP_LESSTHANOREQUAL = CScriptOp(0xa1)
OP_GREATERTHANOREQUAL = CScriptOp(0xa2)
OP_MIN = CScriptOp(0xa3)
OP_MAX = CScriptOp(0xa4)
OP_WITHIN = CScriptOp(0xa5)
# crypto
OP_RIPEMD160 = CScriptOp(0xa6)
OP_SHA1 = CScriptOp(0xa7)
OP_SHA256 = CScriptOp(0xa8)
OP_HASH160 = CScriptOp(0xa9)
OP_HASH256 = CScriptOp(0xaa)
OP_CODESEPARATOR = CScriptOp(0xab)
OP_CHECKSIG = CScriptOp(0xac)
OP_CHECKSIGVERIFY = CScriptOp(0xad)
OP_CHECKMULTISIG = CScriptOp(0xae)
OP_CHECKMULTISIGVERIFY = CScriptOp(0xaf)
# expansion
OP_NOP1 = CScriptOp(0xb0)
OP_CHECKLOCKTIMEVERIFY = CScriptOp(0xb1)
OP_CHECKSEQUENCEVERIFY = CScriptOp(0xb2)
OP_NOP4 = CScriptOp(0xb3)
OP_NOP5 = CScriptOp(0xb4)
OP_NOP6 = CScriptOp(0xb5)
OP_NOP7 = CScriptOp(0xb6)
OP_NOP8 = CScriptOp(0xb7)
OP_NOP9 = CScriptOp(0xb8)
OP_NOP10 = CScriptOp(0xb9)
OP_ALP_ASSET = CScriptOp(0xc0)
# template matching params
OP_SMALLINTEGER = CScriptOp(0xfa)
OP_PUBKEYS = CScriptOp(0xfb)
OP_PUBKEYHASH = CScriptOp(0xfd)
OP_PUBKEY = CScriptOp(0xfe)
OP_INVALIDOPCODE = CScriptOp(0xff)
OPCODE_NAMES.update({
OP_0 : 'OP_0',
OP_PUSHDATA1 : 'OP_PUSHDATA1',
OP_PUSHDATA2 : 'OP_PUSHDATA2',
OP_PUSHDATA4 : 'OP_PUSHDATA4',
OP_1NEGATE : 'OP_1NEGATE',
OP_RESERVED : 'OP_RESERVED',
OP_1 : 'OP_1',
OP_2 : 'OP_2',
OP_3 : 'OP_3',
OP_4 : 'OP_4',
OP_5 : 'OP_5',
OP_6 : 'OP_6',
OP_7 : 'OP_7',
OP_8 : 'OP_8',
OP_9 : 'OP_9',
OP_10 : 'OP_10',
OP_11 : 'OP_11',
OP_12 : 'OP_12',
OP_13 : 'OP_13',
OP_14 : 'OP_14',
OP_15 : 'OP_15',
OP_16 : 'OP_16',
OP_NOP : 'OP_NOP',
OP_VER : 'OP_VER',
OP_IF : 'OP_IF',
OP_NOTIF : 'OP_NOTIF',
OP_VERIF : 'OP_VERIF',
OP_VERNOTIF : 'OP_VERNOTIF',
OP_ELSE : 'OP_ELSE',
OP_ENDIF : 'OP_ENDIF',
OP_VERIFY : 'OP_VERIFY',
OP_RETURN : 'OP_RETURN',
OP_TOALTSTACK : 'OP_TOALTSTACK',
OP_FROMALTSTACK : 'OP_FROMALTSTACK',
OP_2DROP : 'OP_2DROP',
OP_2DUP : 'OP_2DUP',
OP_3DUP : 'OP_3DUP',
OP_2OVER : 'OP_2OVER',
OP_2ROT : 'OP_2ROT',
OP_2SWAP : 'OP_2SWAP',
OP_IFDUP : 'OP_IFDUP',
OP_DEPTH : 'OP_DEPTH',
OP_DROP : 'OP_DROP',
OP_DUP : 'OP_DUP',
OP_NIP : 'OP_NIP',
OP_OVER : 'OP_OVER',
OP_PICK : 'OP_PICK',
OP_ROLL : 'OP_ROLL',
OP_ROT : 'OP_ROT',
OP_SWAP : 'OP_SWAP',
OP_TUCK : 'OP_TUCK',
OP_CAT : 'OP_CAT',
OP_SUBSTR : 'OP_SUBSTR',
OP_LEFT : 'OP_LEFT',
OP_RIGHT : 'OP_RIGHT',
OP_SIZE : 'OP_SIZE',
OP_INVERT : 'OP_INVERT',
OP_AND : 'OP_AND',
OP_OR : 'OP_OR',
OP_XOR : 'OP_XOR',
OP_EQUAL : 'OP_EQUAL',
OP_EQUALVERIFY : 'OP_EQUALVERIFY',
OP_RESERVED1 : 'OP_RESERVED1',
OP_RESERVED2 : 'OP_RESERVED2',
OP_1ADD : 'OP_1ADD',
OP_1SUB : 'OP_1SUB',
OP_2MUL : 'OP_2MUL',
OP_2DIV : 'OP_2DIV',
OP_NEGATE : 'OP_NEGATE',
OP_ABS : 'OP_ABS',
OP_NOT : 'OP_NOT',
OP_0NOTEQUAL : 'OP_0NOTEQUAL',
OP_ADD : 'OP_ADD',
OP_SUB : 'OP_SUB',
OP_MUL : 'OP_MUL',
OP_DIV : 'OP_DIV',
OP_MOD : 'OP_MOD',
OP_LSHIFT : 'OP_LSHIFT',
OP_RSHIFT : 'OP_RSHIFT',
OP_BOOLAND : 'OP_BOOLAND',
OP_BOOLOR : 'OP_BOOLOR',
OP_NUMEQUAL : 'OP_NUMEQUAL',
OP_NUMEQUALVERIFY : 'OP_NUMEQUALVERIFY',
OP_NUMNOTEQUAL : 'OP_NUMNOTEQUAL',
OP_LESSTHAN : 'OP_LESSTHAN',
OP_GREATERTHAN : 'OP_GREATERTHAN',
OP_LESSTHANOREQUAL : 'OP_LESSTHANOREQUAL',
OP_GREATERTHANOREQUAL : 'OP_GREATERTHANOREQUAL',
OP_MIN : 'OP_MIN',
OP_MAX : 'OP_MAX',
OP_WITHIN : 'OP_WITHIN',
OP_RIPEMD160 : 'OP_RIPEMD160',
OP_SHA1 : 'OP_SHA1',
OP_SHA256 : 'OP_SHA256',
OP_HASH160 : 'OP_HASH160',
OP_HASH256 : 'OP_HASH256',
OP_CODESEPARATOR : 'OP_CODESEPARATOR',
OP_CHECKSIG : 'OP_CHECKSIG',
OP_CHECKSIGVERIFY : 'OP_CHECKSIGVERIFY',
OP_CHECKMULTISIG : 'OP_CHECKMULTISIG',
OP_CHECKMULTISIGVERIFY : 'OP_CHECKMULTISIGVERIFY',
OP_NOP1 : 'OP_NOP1',
OP_CHECKLOCKTIMEVERIFY : 'OP_CHECKLOCKTIMEVERIFY',
OP_CHECKSEQUENCEVERIFY : 'OP_CHECKSEQUENCEVERIFY',
OP_NOP4 : 'OP_NOP4',
OP_NOP5 : 'OP_NOP5',
OP_NOP6 : 'OP_NOP6',
OP_NOP7 : 'OP_NOP7',
OP_NOP8 : 'OP_NOP8',
OP_NOP9 : 'OP_NOP9',
OP_NOP10 : 'OP_NOP10',
OP_ALP_ASSET : 'OP_ALP_ASSET',
OP_SMALLINTEGER : 'OP_SMALLINTEGER',
OP_PUBKEYS : 'OP_PUBKEYS',
OP_PUBKEYHASH : 'OP_PUBKEYHASH',
OP_PUBKEY : 'OP_PUBKEY',
OP_INVALIDOPCODE : 'OP_INVALIDOPCODE',
})
class CScriptInvalidError(Exception):
"""Base class for CScript exceptions"""
pass
class CScriptTruncatedPushDataError(CScriptInvalidError):
"""Invalid pushdata due to truncation"""
def __init__(self, msg, data):
self.data = data
super(CScriptTruncatedPushDataError, self).__init__(msg)
# This is used, eg, for blockchain heights in coinbase scripts (bip34)
class CScriptNum():
def __init__(self, d=0):
self.value = d
@staticmethod
def encode(obj):
r = bytearray(0)
if obj.value == 0:
return bytes(r)
neg = obj.value < 0
absvalue = -obj.value if neg else obj.value
while (absvalue):
r.append(absvalue & 0xff)
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return bytes(bchr(len(r)) + r)
class CScript(bytes):
"""Serialized script
A bytes subclass, so you can use this directly whenever bytes are accepted.
Note that this means that indexing does *not* work - you'll get an index by
byte rather than opcode. This format was chosen for efficiency so that the
general case would not require creating a lot of little CScriptOP objects.
iter(script) however does iterate by opcode.
"""
@classmethod
def __coerce_instance(cls, other):
# Coerce other into bytes
if isinstance(other, CScriptOp):
other = bchr(other)
elif isinstance(other, CScriptNum):
if (other.value == 0):
other = bchr(CScriptOp(OP_0))
else:
other = CScriptNum.encode(other)
elif isinstance(other, int):
if 0 <= other <= 16:
other = bytes(bchr(CScriptOp.encode_op_n(other)))
elif other == -1:
other = bytes(bchr(OP_1NEGATE))
else:
other = CScriptOp.encode_op_pushdata(bn2vch(other))
elif isinstance(other, (bytes, bytearray)):
other = CScriptOp.encode_op_pushdata(other)
return other
def __add__(self, other):
# Do the coercion outside of the try block so that errors in it are
# noticed.
other = self.__coerce_instance(other)
try:
# bytes.__add__ always returns bytes instances unfortunately
return CScript(super(CScript, self).__add__(other))
except TypeError:
raise TypeError('Can not add a %r instance to a CScript' % other.__class__)
def join(self, iterable):
# join makes no sense for a CScript()
raise NotImplementedError
def __new__(cls, value=b''):
if isinstance(value, bytes) or isinstance(value, bytearray):
return super(CScript, cls).__new__(cls, value)
else:
def coerce_iterable(iterable):
for instance in iterable:
yield cls.__coerce_instance(instance)
# Annoyingly on both python2 and python3 bytes.join() always
# returns a bytes instance even when subclassed.
return super(CScript, cls).__new__(cls, b''.join(coerce_iterable(value)))
def raw_iter(self):
"""Raw iteration
Yields tuples of (opcode, data, sop_idx) so that the different possible
PUSHDATA encodings can be accurately distinguished, as well as
determining the exact opcode byte indexes. (sop_idx)
"""
i = 0
while i < len(self):
sop_idx = i
opcode = bord(self[i])
i += 1
if opcode > OP_PUSHDATA4:
yield (opcode, None, sop_idx)
else:
datasize = None
pushdata_type = None
if opcode < OP_PUSHDATA1:
pushdata_type = 'PUSHDATA(%d)' % opcode
datasize = opcode
elif opcode == OP_PUSHDATA1:
pushdata_type = 'PUSHDATA1'
if i >= len(self):
raise CScriptInvalidError('PUSHDATA1: missing data length')
datasize = bord(self[i])
i += 1
elif opcode == OP_PUSHDATA2:
pushdata_type = 'PUSHDATA2'
if i + 1 >= len(self):
raise CScriptInvalidError('PUSHDATA2: missing data length')
datasize = bord(self[i]) + (bord(self[i+1]) << 8)
i += 2
elif opcode == OP_PUSHDATA4:
pushdata_type = 'PUSHDATA4'
if i + 3 >= len(self):
raise CScriptInvalidError('PUSHDATA4: missing data length')
datasize = bord(self[i]) + (bord(self[i+1]) << 8) + (bord(self[i+2]) << 16) + (bord(self[i+3]) << 24)
i += 4
else:
assert False # shouldn't happen
data = bytes(self[i:i+datasize])
# Check for truncation
if len(data) < datasize:
raise CScriptTruncatedPushDataError('%s: truncated data' % pushdata_type, data)
i += datasize
yield (opcode, data, sop_idx)
def __iter__(self):
"""'Cooked' iteration
Returns either a CScriptOP instance, an integer, or bytes, as
appropriate.
See raw_iter() if you need to distinguish the different possible
PUSHDATA encodings.
"""
for (opcode, data, sop_idx) in self.raw_iter():
if data is not None:
yield data
else:
opcode = CScriptOp(opcode)
if opcode.is_small_int():
yield opcode.decode_op_n()
else:
yield CScriptOp(opcode)
def __repr__(self):
# For Python3 compatibility add b before strings so testcases don't
# need to change
def _repr(o):
if isinstance(o, bytes):
return b"x('%s')" % hexlify(o).decode('ascii')
else:
return repr(o)
ops = []
i = iter(self)
while True:
op = None
try:
op = _repr(next(i))
except CScriptTruncatedPushDataError as err:
op = '%s...<ERROR: %s>' % (_repr(err.data), err)
break
except CScriptInvalidError as err:
op = '<ERROR: %s>' % err
break
except StopIteration:
break
finally:
if op is not None:
ops.append(op)
return "CScript([%s])" % ', '.join(ops)
def GetSigOpCount(self, fAccurate):
"""Get the SigOp count.
fAccurate - Accurately count CHECKMULTISIG, see BIP16 for details.
Note that this is consensus-critical.
"""
n = 0
lastOpcode = OP_INVALIDOPCODE
for (opcode, data, sop_idx) in self.raw_iter():
if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY):
n += 1
elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY):
if fAccurate and (OP_1 <= lastOpcode <= OP_16):
n += opcode.decode_op_n()
else:
n += 20
lastOpcode = opcode
return n
SIGHASH_ALL = 1
SIGHASH_NONE = 2
SIGHASH_SINGLE = 3
SIGHASH_ANYONECANPAY = 0x80
def FindAndDelete(script, sig):
"""Consensus critical, see FindAndDelete() in Satoshi codebase"""
r = b''
last_sop_idx = sop_idx = 0
skip = True
for (opcode, data, sop_idx) in script.raw_iter():
if not skip:
r += script[last_sop_idx:sop_idx]
last_sop_idx = sop_idx
if script[sop_idx:sop_idx + len(sig)] == sig:
skip = True
else:
skip = False
if not skip:
r += script[last_sop_idx:]
return CScript(r)
def SignatureHash(script, txTo, inIdx, hashtype):
"""Consensus-correct SignatureHash
Returns (hash, err) to precisely match the consensus-critical behavior of
the SIGHASH_SINGLE bug. (inIdx is *not* checked for validity)
"""
HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
if inIdx >= len(txTo.vin):
return (HASH_ONE, "inIdx %d out of range (%d)" % (inIdx, len(txTo.vin)))
txtmp = CTransaction(txTo)
for txin in txtmp.vin:
txin.scriptSig = b''
txtmp.vin[inIdx].scriptSig = FindAndDelete(script, CScript([OP_CODESEPARATOR]))
if (hashtype & 0x1f) == SIGHASH_NONE:
txtmp.vout = []
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
elif (hashtype & 0x1f) == SIGHASH_SINGLE:
outIdx = inIdx
if outIdx >= len(txtmp.vout):
return (HASH_ONE, "outIdx %d out of range (%d)" % (outIdx, len(txtmp.vout)))
tmp = txtmp.vout[outIdx]
txtmp.vout = []
for i in range(outIdx):
txtmp.vout.append(CTxOut(-1))
txtmp.vout.append(tmp)
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
if hashtype & SIGHASH_ANYONECANPAY:
tmp = txtmp.vin[inIdx]
txtmp.vin = []
txtmp.vin.append(tmp)
s = txtmp.serialize()
s += struct.pack(b"<I", hashtype)
hash = hash256(s)
return (hash, None)
# TODO: Allow cached hashPrevouts/hashSequence/hashOutputs to be provided.
# Performance optimization probably not necessary for python tests, however.
# Note that this corresponds to sigversion == 1 in EvalScript, which is used
# for version 0 witnesses.
def SegwitVersion1SignatureHash(script, txTo, inIdx, hashtype, amount):
hashPrevouts = 0
hashSequence = 0
hashOutputs = 0
if not (hashtype & SIGHASH_ANYONECANPAY):
serialize_prevouts = bytes()
for i in txTo.vin:
serialize_prevouts += i.prevout.serialize()
hashPrevouts = uint256_from_str(hash256(serialize_prevouts))
if (not (hashtype & SIGHASH_ANYONECANPAY) and (hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_sequence = bytes()
for i in txTo.vin:
serialize_sequence += struct.pack("<I", i.nSequence)
hashSequence = uint256_from_str(hash256(serialize_sequence))
if ((hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_outputs = bytes()
for o in txTo.vout:
serialize_outputs += o.serialize()
hashOutputs = uint256_from_str(hash256(serialize_outputs))
elif ((hashtype & 0x1f) == SIGHASH_SINGLE and inIdx < len(txTo.vout)):
serialize_outputs = txTo.vout[inIdx].serialize()
hashOutputs = uint256_from_str(hash256(serialize_outputs))
ss = bytes()
ss += struct.pack("<i", txTo.nVersion)
ss += ser_uint256(hashPrevouts)
ss += ser_uint256(hashSequence)
ss += txTo.vin[inIdx].prevout.serialize()
ss += ser_string(script)
ss += struct.pack("<q", amount)
ss += struct.pack("<I", txTo.vin[inIdx].nSequence)
ss += ser_uint256(hashOutputs)
ss += struct.pack("<i", txTo.nLockTime)
ss += struct.pack("<I", hashtype)
return hash256(ss)
| 30.150215 | 146 | 0.614282 |
from .mininode import CTransaction, CTxOut, sha256, hash256, uint256_from_str, ser_uint256, ser_string
from binascii import hexlify
import hashlib
import sys
bchr = chr
bord = ord
if sys.version > '3':
long = int
bchr = lambda x: bytes([x])
bord = lambda x: x
import struct
from .bignum import bn2vch
MAX_SCRIPT_ELEMENT_SIZE = 520
OPCODE_NAMES = {}
def hash160(s):
return hashlib.new('ripemd160', sha256(s)).digest()
_opcode_instances = []
class CScriptOp(int):
__slots__ = []
@staticmethod
def encode_op_pushdata(d):
if len(d) < 0x4c:
return b'' + bchr(len(d)) + d
elif len(d) <= 0xff:
return b'\x4c' + bchr(len(d)) + d
elif len(d) <= 0xffff:
return b'\x4d' + struct.pack(b'<H', len(d)) + d
elif len(d) <= 0xffffffff:
return b'\x4e' + struct.pack(b'<I', len(d)) + d
else:
raise ValueError("Data too long to encode in a PUSHDATA op")
@staticmethod
def encode_op_n(n):
if not (0 <= n <= 16):
raise ValueError('Integer must be in range 0 <= n <= 16, got %d' % n)
if n == 0:
return OP_0
else:
return CScriptOp(OP_1 + n-1)
def decode_op_n(self):
if self == OP_0:
return 0
if not (self == OP_0 or OP_1 <= self <= OP_16):
raise ValueError('op %r is not an OP_N' % self)
return int(self - OP_1+1)
def is_small_int(self):
if 0x51 <= self <= 0x60 or self == 0:
return True
else:
return False
def __str__(self):
return repr(self)
def __repr__(self):
if self in OPCODE_NAMES:
return OPCODE_NAMES[self]
else:
return 'CScriptOp(0x%x)' % self
def __new__(cls, n):
try:
return _opcode_instances[n]
except IndexError:
assert len(_opcode_instances) == n
_opcode_instances.append(super(CScriptOp, cls).__new__(cls, n))
return _opcode_instances[n]
for n in range(0xff+1):
CScriptOp(n)
OP_0 = CScriptOp(0x00)
OP_FALSE = OP_0
OP_PUSHDATA1 = CScriptOp(0x4c)
OP_PUSHDATA2 = CScriptOp(0x4d)
OP_PUSHDATA4 = CScriptOp(0x4e)
OP_1NEGATE = CScriptOp(0x4f)
OP_RESERVED = CScriptOp(0x50)
OP_1 = CScriptOp(0x51)
OP_TRUE=OP_1
OP_2 = CScriptOp(0x52)
OP_3 = CScriptOp(0x53)
OP_4 = CScriptOp(0x54)
OP_5 = CScriptOp(0x55)
OP_6 = CScriptOp(0x56)
OP_7 = CScriptOp(0x57)
OP_8 = CScriptOp(0x58)
OP_9 = CScriptOp(0x59)
OP_10 = CScriptOp(0x5a)
OP_11 = CScriptOp(0x5b)
OP_12 = CScriptOp(0x5c)
OP_13 = CScriptOp(0x5d)
OP_14 = CScriptOp(0x5e)
OP_15 = CScriptOp(0x5f)
OP_16 = CScriptOp(0x60)
OP_NOP = CScriptOp(0x61)
OP_VER = CScriptOp(0x62)
OP_IF = CScriptOp(0x63)
OP_NOTIF = CScriptOp(0x64)
OP_VERIF = CScriptOp(0x65)
OP_VERNOTIF = CScriptOp(0x66)
OP_ELSE = CScriptOp(0x67)
OP_ENDIF = CScriptOp(0x68)
OP_VERIFY = CScriptOp(0x69)
OP_RETURN = CScriptOp(0x6a)
OP_TOALTSTACK = CScriptOp(0x6b)
OP_FROMALTSTACK = CScriptOp(0x6c)
OP_2DROP = CScriptOp(0x6d)
OP_2DUP = CScriptOp(0x6e)
OP_3DUP = CScriptOp(0x6f)
OP_2OVER = CScriptOp(0x70)
OP_2ROT = CScriptOp(0x71)
OP_2SWAP = CScriptOp(0x72)
OP_IFDUP = CScriptOp(0x73)
OP_DEPTH = CScriptOp(0x74)
OP_DROP = CScriptOp(0x75)
OP_DUP = CScriptOp(0x76)
OP_NIP = CScriptOp(0x77)
OP_OVER = CScriptOp(0x78)
OP_PICK = CScriptOp(0x79)
OP_ROLL = CScriptOp(0x7a)
OP_ROT = CScriptOp(0x7b)
OP_SWAP = CScriptOp(0x7c)
OP_TUCK = CScriptOp(0x7d)
OP_CAT = CScriptOp(0x7e)
OP_SUBSTR = CScriptOp(0x7f)
OP_LEFT = CScriptOp(0x80)
OP_RIGHT = CScriptOp(0x81)
OP_SIZE = CScriptOp(0x82)
OP_INVERT = CScriptOp(0x83)
OP_AND = CScriptOp(0x84)
OP_OR = CScriptOp(0x85)
OP_XOR = CScriptOp(0x86)
OP_EQUAL = CScriptOp(0x87)
OP_EQUALVERIFY = CScriptOp(0x88)
OP_RESERVED1 = CScriptOp(0x89)
OP_RESERVED2 = CScriptOp(0x8a)
OP_1ADD = CScriptOp(0x8b)
OP_1SUB = CScriptOp(0x8c)
OP_2MUL = CScriptOp(0x8d)
OP_2DIV = CScriptOp(0x8e)
OP_NEGATE = CScriptOp(0x8f)
OP_ABS = CScriptOp(0x90)
OP_NOT = CScriptOp(0x91)
OP_0NOTEQUAL = CScriptOp(0x92)
OP_ADD = CScriptOp(0x93)
OP_SUB = CScriptOp(0x94)
OP_MUL = CScriptOp(0x95)
OP_DIV = CScriptOp(0x96)
OP_MOD = CScriptOp(0x97)
OP_LSHIFT = CScriptOp(0x98)
OP_RSHIFT = CScriptOp(0x99)
OP_BOOLAND = CScriptOp(0x9a)
OP_BOOLOR = CScriptOp(0x9b)
OP_NUMEQUAL = CScriptOp(0x9c)
OP_NUMEQUALVERIFY = CScriptOp(0x9d)
OP_NUMNOTEQUAL = CScriptOp(0x9e)
OP_LESSTHAN = CScriptOp(0x9f)
OP_GREATERTHAN = CScriptOp(0xa0)
OP_LESSTHANOREQUAL = CScriptOp(0xa1)
OP_GREATERTHANOREQUAL = CScriptOp(0xa2)
OP_MIN = CScriptOp(0xa3)
OP_MAX = CScriptOp(0xa4)
OP_WITHIN = CScriptOp(0xa5)
OP_RIPEMD160 = CScriptOp(0xa6)
OP_SHA1 = CScriptOp(0xa7)
OP_SHA256 = CScriptOp(0xa8)
OP_HASH160 = CScriptOp(0xa9)
OP_HASH256 = CScriptOp(0xaa)
OP_CODESEPARATOR = CScriptOp(0xab)
OP_CHECKSIG = CScriptOp(0xac)
OP_CHECKSIGVERIFY = CScriptOp(0xad)
OP_CHECKMULTISIG = CScriptOp(0xae)
OP_CHECKMULTISIGVERIFY = CScriptOp(0xaf)
OP_NOP1 = CScriptOp(0xb0)
OP_CHECKLOCKTIMEVERIFY = CScriptOp(0xb1)
OP_CHECKSEQUENCEVERIFY = CScriptOp(0xb2)
OP_NOP4 = CScriptOp(0xb3)
OP_NOP5 = CScriptOp(0xb4)
OP_NOP6 = CScriptOp(0xb5)
OP_NOP7 = CScriptOp(0xb6)
OP_NOP8 = CScriptOp(0xb7)
OP_NOP9 = CScriptOp(0xb8)
OP_NOP10 = CScriptOp(0xb9)
OP_ALP_ASSET = CScriptOp(0xc0)
OP_SMALLINTEGER = CScriptOp(0xfa)
OP_PUBKEYS = CScriptOp(0xfb)
OP_PUBKEYHASH = CScriptOp(0xfd)
OP_PUBKEY = CScriptOp(0xfe)
OP_INVALIDOPCODE = CScriptOp(0xff)
OPCODE_NAMES.update({
OP_0 : 'OP_0',
OP_PUSHDATA1 : 'OP_PUSHDATA1',
OP_PUSHDATA2 : 'OP_PUSHDATA2',
OP_PUSHDATA4 : 'OP_PUSHDATA4',
OP_1NEGATE : 'OP_1NEGATE',
OP_RESERVED : 'OP_RESERVED',
OP_1 : 'OP_1',
OP_2 : 'OP_2',
OP_3 : 'OP_3',
OP_4 : 'OP_4',
OP_5 : 'OP_5',
OP_6 : 'OP_6',
OP_7 : 'OP_7',
OP_8 : 'OP_8',
OP_9 : 'OP_9',
OP_10 : 'OP_10',
OP_11 : 'OP_11',
OP_12 : 'OP_12',
OP_13 : 'OP_13',
OP_14 : 'OP_14',
OP_15 : 'OP_15',
OP_16 : 'OP_16',
OP_NOP : 'OP_NOP',
OP_VER : 'OP_VER',
OP_IF : 'OP_IF',
OP_NOTIF : 'OP_NOTIF',
OP_VERIF : 'OP_VERIF',
OP_VERNOTIF : 'OP_VERNOTIF',
OP_ELSE : 'OP_ELSE',
OP_ENDIF : 'OP_ENDIF',
OP_VERIFY : 'OP_VERIFY',
OP_RETURN : 'OP_RETURN',
OP_TOALTSTACK : 'OP_TOALTSTACK',
OP_FROMALTSTACK : 'OP_FROMALTSTACK',
OP_2DROP : 'OP_2DROP',
OP_2DUP : 'OP_2DUP',
OP_3DUP : 'OP_3DUP',
OP_2OVER : 'OP_2OVER',
OP_2ROT : 'OP_2ROT',
OP_2SWAP : 'OP_2SWAP',
OP_IFDUP : 'OP_IFDUP',
OP_DEPTH : 'OP_DEPTH',
OP_DROP : 'OP_DROP',
OP_DUP : 'OP_DUP',
OP_NIP : 'OP_NIP',
OP_OVER : 'OP_OVER',
OP_PICK : 'OP_PICK',
OP_ROLL : 'OP_ROLL',
OP_ROT : 'OP_ROT',
OP_SWAP : 'OP_SWAP',
OP_TUCK : 'OP_TUCK',
OP_CAT : 'OP_CAT',
OP_SUBSTR : 'OP_SUBSTR',
OP_LEFT : 'OP_LEFT',
OP_RIGHT : 'OP_RIGHT',
OP_SIZE : 'OP_SIZE',
OP_INVERT : 'OP_INVERT',
OP_AND : 'OP_AND',
OP_OR : 'OP_OR',
OP_XOR : 'OP_XOR',
OP_EQUAL : 'OP_EQUAL',
OP_EQUALVERIFY : 'OP_EQUALVERIFY',
OP_RESERVED1 : 'OP_RESERVED1',
OP_RESERVED2 : 'OP_RESERVED2',
OP_1ADD : 'OP_1ADD',
OP_1SUB : 'OP_1SUB',
OP_2MUL : 'OP_2MUL',
OP_2DIV : 'OP_2DIV',
OP_NEGATE : 'OP_NEGATE',
OP_ABS : 'OP_ABS',
OP_NOT : 'OP_NOT',
OP_0NOTEQUAL : 'OP_0NOTEQUAL',
OP_ADD : 'OP_ADD',
OP_SUB : 'OP_SUB',
OP_MUL : 'OP_MUL',
OP_DIV : 'OP_DIV',
OP_MOD : 'OP_MOD',
OP_LSHIFT : 'OP_LSHIFT',
OP_RSHIFT : 'OP_RSHIFT',
OP_BOOLAND : 'OP_BOOLAND',
OP_BOOLOR : 'OP_BOOLOR',
OP_NUMEQUAL : 'OP_NUMEQUAL',
OP_NUMEQUALVERIFY : 'OP_NUMEQUALVERIFY',
OP_NUMNOTEQUAL : 'OP_NUMNOTEQUAL',
OP_LESSTHAN : 'OP_LESSTHAN',
OP_GREATERTHAN : 'OP_GREATERTHAN',
OP_LESSTHANOREQUAL : 'OP_LESSTHANOREQUAL',
OP_GREATERTHANOREQUAL : 'OP_GREATERTHANOREQUAL',
OP_MIN : 'OP_MIN',
OP_MAX : 'OP_MAX',
OP_WITHIN : 'OP_WITHIN',
OP_RIPEMD160 : 'OP_RIPEMD160',
OP_SHA1 : 'OP_SHA1',
OP_SHA256 : 'OP_SHA256',
OP_HASH160 : 'OP_HASH160',
OP_HASH256 : 'OP_HASH256',
OP_CODESEPARATOR : 'OP_CODESEPARATOR',
OP_CHECKSIG : 'OP_CHECKSIG',
OP_CHECKSIGVERIFY : 'OP_CHECKSIGVERIFY',
OP_CHECKMULTISIG : 'OP_CHECKMULTISIG',
OP_CHECKMULTISIGVERIFY : 'OP_CHECKMULTISIGVERIFY',
OP_NOP1 : 'OP_NOP1',
OP_CHECKLOCKTIMEVERIFY : 'OP_CHECKLOCKTIMEVERIFY',
OP_CHECKSEQUENCEVERIFY : 'OP_CHECKSEQUENCEVERIFY',
OP_NOP4 : 'OP_NOP4',
OP_NOP5 : 'OP_NOP5',
OP_NOP6 : 'OP_NOP6',
OP_NOP7 : 'OP_NOP7',
OP_NOP8 : 'OP_NOP8',
OP_NOP9 : 'OP_NOP9',
OP_NOP10 : 'OP_NOP10',
OP_ALP_ASSET : 'OP_ALP_ASSET',
OP_SMALLINTEGER : 'OP_SMALLINTEGER',
OP_PUBKEYS : 'OP_PUBKEYS',
OP_PUBKEYHASH : 'OP_PUBKEYHASH',
OP_PUBKEY : 'OP_PUBKEY',
OP_INVALIDOPCODE : 'OP_INVALIDOPCODE',
})
class CScriptInvalidError(Exception):
pass
class CScriptTruncatedPushDataError(CScriptInvalidError):
def __init__(self, msg, data):
self.data = data
super(CScriptTruncatedPushDataError, self).__init__(msg)
class CScriptNum():
def __init__(self, d=0):
self.value = d
@staticmethod
def encode(obj):
r = bytearray(0)
if obj.value == 0:
return bytes(r)
neg = obj.value < 0
absvalue = -obj.value if neg else obj.value
while (absvalue):
r.append(absvalue & 0xff)
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return bytes(bchr(len(r)) + r)
class CScript(bytes):
@classmethod
def __coerce_instance(cls, other):
if isinstance(other, CScriptOp):
other = bchr(other)
elif isinstance(other, CScriptNum):
if (other.value == 0):
other = bchr(CScriptOp(OP_0))
else:
other = CScriptNum.encode(other)
elif isinstance(other, int):
if 0 <= other <= 16:
other = bytes(bchr(CScriptOp.encode_op_n(other)))
elif other == -1:
other = bytes(bchr(OP_1NEGATE))
else:
other = CScriptOp.encode_op_pushdata(bn2vch(other))
elif isinstance(other, (bytes, bytearray)):
other = CScriptOp.encode_op_pushdata(other)
return other
def __add__(self, other):
other = self.__coerce_instance(other)
try:
return CScript(super(CScript, self).__add__(other))
except TypeError:
raise TypeError('Can not add a %r instance to a CScript' % other.__class__)
def join(self, iterable):
raise NotImplementedError
def __new__(cls, value=b''):
if isinstance(value, bytes) or isinstance(value, bytearray):
return super(CScript, cls).__new__(cls, value)
else:
def coerce_iterable(iterable):
for instance in iterable:
yield cls.__coerce_instance(instance)
return super(CScript, cls).__new__(cls, b''.join(coerce_iterable(value)))
def raw_iter(self):
i = 0
while i < len(self):
sop_idx = i
opcode = bord(self[i])
i += 1
if opcode > OP_PUSHDATA4:
yield (opcode, None, sop_idx)
else:
datasize = None
pushdata_type = None
if opcode < OP_PUSHDATA1:
pushdata_type = 'PUSHDATA(%d)' % opcode
datasize = opcode
elif opcode == OP_PUSHDATA1:
pushdata_type = 'PUSHDATA1'
if i >= len(self):
raise CScriptInvalidError('PUSHDATA1: missing data length')
datasize = bord(self[i])
i += 1
elif opcode == OP_PUSHDATA2:
pushdata_type = 'PUSHDATA2'
if i + 1 >= len(self):
raise CScriptInvalidError('PUSHDATA2: missing data length')
datasize = bord(self[i]) + (bord(self[i+1]) << 8)
i += 2
elif opcode == OP_PUSHDATA4:
pushdata_type = 'PUSHDATA4'
if i + 3 >= len(self):
raise CScriptInvalidError('PUSHDATA4: missing data length')
datasize = bord(self[i]) + (bord(self[i+1]) << 8) + (bord(self[i+2]) << 16) + (bord(self[i+3]) << 24)
i += 4
else:
assert False
data = bytes(self[i:i+datasize])
# Check for truncation
if len(data) < datasize:
raise CScriptTruncatedPushDataError('%s: truncated data' % pushdata_type, data)
i += datasize
yield (opcode, data, sop_idx)
def __iter__(self):
for (opcode, data, sop_idx) in self.raw_iter():
if data is not None:
yield data
else:
opcode = CScriptOp(opcode)
if opcode.is_small_int():
yield opcode.decode_op_n()
else:
yield CScriptOp(opcode)
def __repr__(self):
# For Python3 compatibility add b before strings so testcases don't
def _repr(o):
if isinstance(o, bytes):
return b"x('%s')" % hexlify(o).decode('ascii')
else:
return repr(o)
ops = []
i = iter(self)
while True:
op = None
try:
op = _repr(next(i))
except CScriptTruncatedPushDataError as err:
op = '%s...<ERROR: %s>' % (_repr(err.data), err)
break
except CScriptInvalidError as err:
op = '<ERROR: %s>' % err
break
except StopIteration:
break
finally:
if op is not None:
ops.append(op)
return "CScript([%s])" % ', '.join(ops)
def GetSigOpCount(self, fAccurate):
n = 0
lastOpcode = OP_INVALIDOPCODE
for (opcode, data, sop_idx) in self.raw_iter():
if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY):
n += 1
elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY):
if fAccurate and (OP_1 <= lastOpcode <= OP_16):
n += opcode.decode_op_n()
else:
n += 20
lastOpcode = opcode
return n
SIGHASH_ALL = 1
SIGHASH_NONE = 2
SIGHASH_SINGLE = 3
SIGHASH_ANYONECANPAY = 0x80
def FindAndDelete(script, sig):
r = b''
last_sop_idx = sop_idx = 0
skip = True
for (opcode, data, sop_idx) in script.raw_iter():
if not skip:
r += script[last_sop_idx:sop_idx]
last_sop_idx = sop_idx
if script[sop_idx:sop_idx + len(sig)] == sig:
skip = True
else:
skip = False
if not skip:
r += script[last_sop_idx:]
return CScript(r)
def SignatureHash(script, txTo, inIdx, hashtype):
HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
if inIdx >= len(txTo.vin):
return (HASH_ONE, "inIdx %d out of range (%d)" % (inIdx, len(txTo.vin)))
txtmp = CTransaction(txTo)
for txin in txtmp.vin:
txin.scriptSig = b''
txtmp.vin[inIdx].scriptSig = FindAndDelete(script, CScript([OP_CODESEPARATOR]))
if (hashtype & 0x1f) == SIGHASH_NONE:
txtmp.vout = []
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
elif (hashtype & 0x1f) == SIGHASH_SINGLE:
outIdx = inIdx
if outIdx >= len(txtmp.vout):
return (HASH_ONE, "outIdx %d out of range (%d)" % (outIdx, len(txtmp.vout)))
tmp = txtmp.vout[outIdx]
txtmp.vout = []
for i in range(outIdx):
txtmp.vout.append(CTxOut(-1))
txtmp.vout.append(tmp)
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
if hashtype & SIGHASH_ANYONECANPAY:
tmp = txtmp.vin[inIdx]
txtmp.vin = []
txtmp.vin.append(tmp)
s = txtmp.serialize()
s += struct.pack(b"<I", hashtype)
hash = hash256(s)
return (hash, None)
def SegwitVersion1SignatureHash(script, txTo, inIdx, hashtype, amount):
hashPrevouts = 0
hashSequence = 0
hashOutputs = 0
if not (hashtype & SIGHASH_ANYONECANPAY):
serialize_prevouts = bytes()
for i in txTo.vin:
serialize_prevouts += i.prevout.serialize()
hashPrevouts = uint256_from_str(hash256(serialize_prevouts))
if (not (hashtype & SIGHASH_ANYONECANPAY) and (hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_sequence = bytes()
for i in txTo.vin:
serialize_sequence += struct.pack("<I", i.nSequence)
hashSequence = uint256_from_str(hash256(serialize_sequence))
if ((hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_outputs = bytes()
for o in txTo.vout:
serialize_outputs += o.serialize()
hashOutputs = uint256_from_str(hash256(serialize_outputs))
elif ((hashtype & 0x1f) == SIGHASH_SINGLE and inIdx < len(txTo.vout)):
serialize_outputs = txTo.vout[inIdx].serialize()
hashOutputs = uint256_from_str(hash256(serialize_outputs))
ss = bytes()
ss += struct.pack("<i", txTo.nVersion)
ss += ser_uint256(hashPrevouts)
ss += ser_uint256(hashSequence)
ss += txTo.vin[inIdx].prevout.serialize()
ss += ser_string(script)
ss += struct.pack("<q", amount)
ss += struct.pack("<I", txTo.vin[inIdx].nSequence)
ss += ser_uint256(hashOutputs)
ss += struct.pack("<i", txTo.nLockTime)
ss += struct.pack("<I", hashtype)
return hash256(ss)
| true | true |
f7fb9c866385126f70956e6bc9f7c3c83521f4b6 | 4,927 | py | Python | docs/examples/adv_motion_correction2.py | jklymak/dolfyn | eea98fe0021886cf654e25293c385c5c3707ff8d | [
"BSD-3-Clause"
] | null | null | null | docs/examples/adv_motion_correction2.py | jklymak/dolfyn | eea98fe0021886cf654e25293c385c5c3707ff8d | [
"BSD-3-Clause"
] | null | null | null | docs/examples/adv_motion_correction2.py | jklymak/dolfyn | eea98fe0021886cf654e25293c385c5c3707ff8d | [
"BSD-3-Clause"
] | null | null | null | import dolfyn as dlfn
import dolfyn.adv.api as api
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
import matplotlib.dates as mpldt
##############################################################################
# User-input data
fname = '../../dolfyn/example_data/vector_data_imu01.VEC'
accel_filter = .03 # motion correction filter [Hz]
ensemble_size = 32*300 # sampling frequency * 300 seconds
# Read the data in, use the '.userdata.json' file
data_raw = dlfn.read(fname, userdata=True)
# Crop the data for the time range of interest:
t_start = dlfn.time.date2dt64(datetime(2012, 6, 12, 12, 8, 30))
t_end = data_raw.time[-1]
data = data_raw.sel(time=slice(t_start, t_end))
##############################################################################
# Clean the file using the Goring, Nikora 2002 method:
bad = api.clean.GN2002(data.vel)
data['vel'] = api.clean.clean_fill(data.vel, bad, method='cubic')
# To not replace data:
# data.coords['mask'] = (('dir','time'), ~bad)
# data.vel.values = data.vel.where(data.mask)
# plotting raw vs qc'd data
ax = plt.figure(figsize=(20, 10)).add_axes([.14, .14, .8, .74])
ax.plot(data_raw.time, data_raw.velds.u, label='raw data')
ax.plot(data.time, data.velds.u, label='despiked')
ax.set_xlabel('Time')
ax.xaxis.set_major_formatter(mpldt.DateFormatter('%D %H:%M'))
ax.set_ylabel('u-dir velocity, (m/s)')
ax.set_title('Raw vs Despiked Data')
plt.legend(loc='upper right')
plt.show()
data_cleaned = data.copy(deep=True)
##############################################################################
# Perform motion correction
data = api.correct_motion(data, accel_filter, to_earth=False)
# For reference, dolfyn defines ‘inst’ as the IMU frame of reference, not
# the ADV sensor head
# After motion correction, the pre- and post-correction datasets coordinates
# may not align. Since here the ADV sensor head and battery body axes are
# aligned, data.u is the same axis as data_cleaned.u
# Plotting corrected vs uncorrect velocity in instrument coordinates
ax = plt.figure(figsize=(20, 10)).add_axes([.14, .14, .8, .74])
ax.plot(data_cleaned.time, data_cleaned.velds.u, 'g-', label='uncorrected')
ax.plot(data.time, data.velds.u, 'b-', label='motion-corrected')
ax.set_xlabel('Time')
ax.xaxis.set_major_formatter(mpldt.DateFormatter('%D %H:%M'))
ax.set_ylabel('u velocity, (m/s)')
ax.set_title('Pre- and Post- Motion Corrected Data in XYZ coordinates')
plt.legend(loc='upper right')
plt.show()
# Rotate the uncorrected data into the earth frame for comparison to motion
# correction:
dlfn.rotate2(data, 'earth', inplace=True)
data_uncorrected = dlfn.rotate2(data_cleaned, 'earth', inplace=False)
# Calc principal heading (from earth coordinates) and rotate into the
# principal axes
data.attrs['principal_heading'] = dlfn.calc_principal_heading(data.vel)
data_uncorrected.attrs['principal_heading'] = dlfn.calc_principal_heading(
data_uncorrected.vel)
# Plotting corrected vs uncorrected velocity in principal coordinates
ax = plt.figure(figsize=(20, 10)).add_axes([.14, .14, .8, .74])
ax.plot(data_uncorrected.time, data_uncorrected.velds.u,
'g-', label='uncorrected')
ax.plot(data.time, data.velds.u, 'b-', label='motion-corrected')
ax.set_xlabel('Time')
ax.xaxis.set_major_formatter(mpldt.DateFormatter('%D %H:%M'))
ax.set_ylabel('streamwise velocity, (m/s)')
ax.set_title('Corrected and Uncorrected Data in Principal Coordinates')
plt.legend(loc='upper right')
plt.show()
##############################################################################
# Create velocity spectra
# Initiate tool to bin data based on the ensemble length. If n_fft is none,
# n_fft is equal to n_bin
ensemble_tool = api.ADVBinner(n_bin=9600, fs=data.fs, n_fft=4800)
# motion corrected data
mc_spec = ensemble_tool.calc_psd(data.vel, freq_units='Hz')
# not-motion corrected data
unm_spec = ensemble_tool.calc_psd(data_uncorrected.vel, freq_units='Hz')
# Find motion spectra from IMU velocity
uh_spec = ensemble_tool.calc_psd(data['velacc'] + data['velrot'],
freq_units='Hz')
# Plot U, V, W spectra
U = ['u', 'v', 'w']
for i in range(len(U)):
plt.figure(figsize=(15, 13))
plt.loglog(uh_spec.f, uh_spec[i].mean(axis=0), 'c',
label=('motion spectra ' + str(accel_filter) + 'Hz filter'))
plt.loglog(unm_spec.f, unm_spec[i].mean(axis=0), 'r', label='uncorrected')
plt.loglog(mc_spec.f, mc_spec[i].mean(
axis=0), 'b', label='motion corrected')
# plot -5/3 line
f_tmp = np.logspace(-2, 1)
plt.plot(f_tmp, 4e-5*f_tmp**(-5/3), 'k--', label='f^-5/3 slope')
if U[i] == 'u':
plt.title('Spectra in streamwise dir')
elif U[i] == 'v':
plt.title('Spectra in cross-stream dir')
else:
plt.title('Spectra in up dir')
plt.xlabel('Freq [Hz]')
plt.ylabel('$\mathrm{[m^2s^{-2}/Hz]}$', size='large')
plt.legend()
plt.show()
| 37.9 | 78 | 0.661457 | import dolfyn as dlfn
import dolfyn.adv.api as api
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
import matplotlib.dates as mpldt
| true | true |
f7fb9d5cd1845c1259c4d4a0eb730b23c778b855 | 3,876 | py | Python | awscliv2/installers.py | vemel/awscliv2 | f9eb8694bca24d1746eeb46a3a57fcbf9f5d70ef | [
"MIT"
] | 18 | 2020-09-03T19:16:36.000Z | 2022-03-31T18:25:52.000Z | awscliv2/installers.py | vemel/awscliv2 | f9eb8694bca24d1746eeb46a3a57fcbf9f5d70ef | [
"MIT"
] | 5 | 2020-09-03T19:22:57.000Z | 2022-03-04T20:16:14.000Z | awscliv2/installers.py | vemel/awscliv2 | f9eb8694bca24d1746eeb46a3a57fcbf9f5d70ef | [
"MIT"
] | 1 | 2021-11-25T16:19:25.000Z | 2021-11-25T16:19:25.000Z | """
AWS CLI v2 installers.
"""
import os
import platform
import shutil
from io import StringIO
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from urllib.request import urlopen
from zipfile import ZipFile
from awscliv2.constants import LINUX_ARM_URL, LINUX_X86_64_URL, MACOS_URL
from awscliv2.exceptions import InstallError
from awscliv2.interactive_process import InteractiveProcess
from awscliv2.logger import get_logger
def download_file(url: str, target: Path) -> None:
"""
Download file from `url` to `target` path.
"""
get_logger().info(f"Downloading package from {url} to {target}")
with urlopen(url) as f:
target.write_bytes(f.read())
def install_macos() -> None:
"""
Install AWS CLI v2 for MacOS.
"""
logger = get_logger()
logger.info("Installing AWS CLI v2 for MacOS")
install_path = Path.home() / "aws-cli"
if install_path.exists():
logger.info(f"Removing {install_path}")
shutil.rmtree(install_path)
with NamedTemporaryFile("w+", suffix=".pkg") as f_obj:
package_path = Path(f_obj.name)
download_file(MACOS_URL, package_path)
logger.info(f"Installing {package_path} to {install_path}")
InteractiveProcess(
[
"installer",
"-pkg",
package_path.as_posix(),
"-target",
install_path.as_posix(),
"-applyChoiceChangesXML",
"choices.xml",
]
)
logger.info("Now awsv2 will use this installed version")
logger.info("Running now to check installation: awsv2 --version")
def install_linux_x86_64() -> None:
"""
Install AWS CLI v2 for Linux x86_64.
"""
install_linux(LINUX_X86_64_URL)
def install_linux_arm() -> None:
"""
Install AWS CLI v2 for Linux ARM.
"""
install_linux(LINUX_ARM_URL)
def install_linux(url) -> None:
"""
Install AWS CLI v2 for Linux from `url`.
"""
logger = get_logger()
logger.info("Installing AWS CLI v2 for Linux")
temp_dir = Path(TemporaryDirectory().name)
install_path = Path.home() / ".awscliv2"
if install_path.exists():
logger.info(f"Removing {install_path}")
shutil.rmtree(install_path)
bin_path = install_path / "binaries"
with NamedTemporaryFile("w+", suffix=".zip") as f_obj:
package_path = Path(f_obj.name)
download_file(url, package_path)
logger.info(f"Extracting {package_path} to to {temp_dir}")
with ZipFile(package_path, "r") as zip_obj:
zip_obj.extractall(
temp_dir,
)
installer_path = temp_dir / "aws" / "install"
os.chmod(installer_path, 0o744)
os.chmod(temp_dir / "aws" / "dist" / "aws", 0o744)
logger.info(f"Installing {installer_path} to {install_path}")
output = StringIO()
return_code = InteractiveProcess(
[
installer_path.as_posix(),
"-i",
install_path.as_posix(),
"-b",
bin_path.as_posix(),
]
).run(stdout=output)
if return_code:
raise InstallError(f"Installation failed: {output.getvalue()}")
shutil.rmtree(temp_dir)
logger.info("Now awsv2 will use this installed version")
logger.info("Running now to check installation: awsv2 --version")
def install_multiplatform():
"""
Install AWS CLI v2 for Linux ar MacOS.
"""
os_platform = platform.system()
arch = platform.architecture()[0]
if os_platform == "Darwin":
return install_macos()
if os_platform == "Linux" and arch == "64bit":
return install_linux_x86_64()
if os_platform == "Linux" and arch == "arm":
return install_linux_arm()
raise InstallError(f"{os_platform} {arch} is not supported, use docker version")
| 29.815385 | 84 | 0.635965 | import os
import platform
import shutil
from io import StringIO
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from urllib.request import urlopen
from zipfile import ZipFile
from awscliv2.constants import LINUX_ARM_URL, LINUX_X86_64_URL, MACOS_URL
from awscliv2.exceptions import InstallError
from awscliv2.interactive_process import InteractiveProcess
from awscliv2.logger import get_logger
def download_file(url: str, target: Path) -> None:
get_logger().info(f"Downloading package from {url} to {target}")
with urlopen(url) as f:
target.write_bytes(f.read())
def install_macos() -> None:
logger = get_logger()
logger.info("Installing AWS CLI v2 for MacOS")
install_path = Path.home() / "aws-cli"
if install_path.exists():
logger.info(f"Removing {install_path}")
shutil.rmtree(install_path)
with NamedTemporaryFile("w+", suffix=".pkg") as f_obj:
package_path = Path(f_obj.name)
download_file(MACOS_URL, package_path)
logger.info(f"Installing {package_path} to {install_path}")
InteractiveProcess(
[
"installer",
"-pkg",
package_path.as_posix(),
"-target",
install_path.as_posix(),
"-applyChoiceChangesXML",
"choices.xml",
]
)
logger.info("Now awsv2 will use this installed version")
logger.info("Running now to check installation: awsv2 --version")
def install_linux_x86_64() -> None:
install_linux(LINUX_X86_64_URL)
def install_linux_arm() -> None:
install_linux(LINUX_ARM_URL)
def install_linux(url) -> None:
logger = get_logger()
logger.info("Installing AWS CLI v2 for Linux")
temp_dir = Path(TemporaryDirectory().name)
install_path = Path.home() / ".awscliv2"
if install_path.exists():
logger.info(f"Removing {install_path}")
shutil.rmtree(install_path)
bin_path = install_path / "binaries"
with NamedTemporaryFile("w+", suffix=".zip") as f_obj:
package_path = Path(f_obj.name)
download_file(url, package_path)
logger.info(f"Extracting {package_path} to to {temp_dir}")
with ZipFile(package_path, "r") as zip_obj:
zip_obj.extractall(
temp_dir,
)
installer_path = temp_dir / "aws" / "install"
os.chmod(installer_path, 0o744)
os.chmod(temp_dir / "aws" / "dist" / "aws", 0o744)
logger.info(f"Installing {installer_path} to {install_path}")
output = StringIO()
return_code = InteractiveProcess(
[
installer_path.as_posix(),
"-i",
install_path.as_posix(),
"-b",
bin_path.as_posix(),
]
).run(stdout=output)
if return_code:
raise InstallError(f"Installation failed: {output.getvalue()}")
shutil.rmtree(temp_dir)
logger.info("Now awsv2 will use this installed version")
logger.info("Running now to check installation: awsv2 --version")
def install_multiplatform():
os_platform = platform.system()
arch = platform.architecture()[0]
if os_platform == "Darwin":
return install_macos()
if os_platform == "Linux" and arch == "64bit":
return install_linux_x86_64()
if os_platform == "Linux" and arch == "arm":
return install_linux_arm()
raise InstallError(f"{os_platform} {arch} is not supported, use docker version")
| true | true |
f7fb9e15b3d5cf5d108075e52b71c83afc13300b | 840 | py | Python | repository/migrations/0004_auto_20170109_0937.py | shuang3322/EdmureBlog | e2754157f82dad60bbb5094d45f8b536bcc33f50 | [
"Apache-2.0"
] | 47 | 2021-04-13T10:32:13.000Z | 2022-03-31T10:30:30.000Z | repository/migrations/0004_auto_20170109_0937.py | shuang3322/EdmureBlog | e2754157f82dad60bbb5094d45f8b536bcc33f50 | [
"Apache-2.0"
] | 1 | 2021-11-01T07:41:04.000Z | 2021-11-01T07:41:10.000Z | repository/migrations/0004_auto_20170109_0937.py | shuang3322/EdmureBlog | e2754157f82dad60bbb5094d45f8b536bcc33f50 | [
"Apache-2.0"
] | 21 | 2021-04-13T10:32:17.000Z | 2022-03-26T07:43:22.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-01-09 09:37
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('repository', '0003_auto_20170109_0935'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('nid', models.AutoField(primary_key=True, serialize=False)),
('title', models.CharField(max_length=32, verbose_name='全部分类')),
],
),
migrations.AddField(
model_name='article',
name='category',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='repository.Category', verbose_name='分类'),
),
]
| 28.965517 | 137 | 0.609524 |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('repository', '0003_auto_20170109_0935'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('nid', models.AutoField(primary_key=True, serialize=False)),
('title', models.CharField(max_length=32, verbose_name='全部分类')),
],
),
migrations.AddField(
model_name='article',
name='category',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='repository.Category', verbose_name='分类'),
),
]
| true | true |
f7fb9f1cdb67e858c9683e7db6d762e36d90b899 | 2,608 | py | Python | plugins/plugins.py | lucasberti/telegrao-py | e8a275e39691e456b88f0dadcc6a76aa41e1612d | [
"MIT"
] | null | null | null | plugins/plugins.py | lucasberti/telegrao-py | e8a275e39691e456b88f0dadcc6a76aa41e1612d | [
"MIT"
] | null | null | null | plugins/plugins.py | lucasberti/telegrao-py | e8a275e39691e456b88f0dadcc6a76aa41e1612d | [
"MIT"
] | 6 | 2017-07-27T06:04:40.000Z | 2019-08-15T02:11:52.000Z | import config
from api import send_message
from reborn import is_sudoer
def on_msg_received(msg, matches):
# Checa se o usuário tem permissão e tem a quantidade correta de args no comando
command = matches.group(2)
plugin = matches.group(3)
if command is None and plugin is None:
string = ""
string += "atvdos::::: \n"
if len(config.plugins.items()) == 0:
string += "neñum rs"
else:
for query, plu in config.plugins.items():
string += plu + "✔️ \n"
string += "\ndesatvsd::: \n"
if len(config.disabled_plugins.items()) == 0:
string += "neñum rs"
else:
for query, plu in config.disabled_plugins.items():
string += plu + "❌ \n"
send_message(msg["chat"]["id"], string)
elif is_sudoer(msg["from"]["id"]) and command is not None and plugin is not None:
if command == "enable":
for query, plu in config.plugins.items():
# Se o plugin passado for encontrado nos plugins ativos, avisa e sai da função.
if plugin == plu:
send_message(msg["chat"]["id"], "pora o " + plugin + "ja ta ativado burro")
return
for query, plu in config.disabled_plugins.items():
# Se o plugin estiver inativo, troca de posição, salva e recarrega.
if plugin == plu:
config.disabled_plugins.pop(query)
config.plugins[query] = plu
config.save_config()
config.load_config()
send_message(msg["chat"]["id"], "ae carai o " + plugin + "agr ativado carai.....")
return
elif command == "disable":
for query, plu in config.disabled_plugins.items():
# Se o plugin passado for encontrado nos plugins desativados, avisa e sai da função.
if plugin == plu:
send_message(msg["chat"]["id"], "pora o " + plugin + " ja ta disativado burro")
return
for query, plu in config.plugins.items():
# Se o plugin estiver ativo, troca de posição, salva e recarrega.
if plugin == plu:
config.plugins.pop(query)
config.disabled_plugins[query] = plu
config.save_config()
config.load_config()
send_message(msg["chat"]["id"], "ae carai o " + plugin + "agr disisativado carai.....")
return | 39.515152 | 107 | 0.52684 | import config
from api import send_message
from reborn import is_sudoer
def on_msg_received(msg, matches):
command = matches.group(2)
plugin = matches.group(3)
if command is None and plugin is None:
string = ""
string += "atvdos::::: \n"
if len(config.plugins.items()) == 0:
string += "neñum rs"
else:
for query, plu in config.plugins.items():
string += plu + "✔️ \n"
string += "\ndesatvsd::: \n"
if len(config.disabled_plugins.items()) == 0:
string += "neñum rs"
else:
for query, plu in config.disabled_plugins.items():
string += plu + "❌ \n"
send_message(msg["chat"]["id"], string)
elif is_sudoer(msg["from"]["id"]) and command is not None and plugin is not None:
if command == "enable":
for query, plu in config.plugins.items():
if plugin == plu:
send_message(msg["chat"]["id"], "pora o " + plugin + "ja ta ativado burro")
return
for query, plu in config.disabled_plugins.items():
if plugin == plu:
config.disabled_plugins.pop(query)
config.plugins[query] = plu
config.save_config()
config.load_config()
send_message(msg["chat"]["id"], "ae carai o " + plugin + "agr ativado carai.....")
return
elif command == "disable":
for query, plu in config.disabled_plugins.items():
if plugin == plu:
send_message(msg["chat"]["id"], "pora o " + plugin + " ja ta disativado burro")
return
for query, plu in config.plugins.items():
if plugin == plu:
config.plugins.pop(query)
config.disabled_plugins[query] = plu
config.save_config()
config.load_config()
send_message(msg["chat"]["id"], "ae carai o " + plugin + "agr disisativado carai.....")
return | true | true |
f7fb9fde20a922f2167716c435c2799b61a7e7dd | 2,325 | py | Python | scripts/eval/robustness_exp_text.py | alex-kj-chin/prototransformer-public | f6c82ea0e4a1fe57f19f161d4d659db2668f7313 | [
"MIT"
] | null | null | null | scripts/eval/robustness_exp_text.py | alex-kj-chin/prototransformer-public | f6c82ea0e4a1fe57f19f161d4d659db2668f7313 | [
"MIT"
] | null | null | null | scripts/eval/robustness_exp_text.py | alex-kj-chin/prototransformer-public | f6c82ea0e4a1fe57f19f161d4d659db2668f7313 | [
"MIT"
] | null | null | null | """Evaluate accuracy for 1000 episodes on test set."""
import os
import numpy as np
from src.agents.nlp import *
from src.utils.setup import process_config, process_config_from_json
from src.datasets.text import *
def evaluate(args, gpu_device=-1):
config_path = os.path.join(args.exp_dir, 'config.json')
checkpoint_dir = os.path.join(args.exp_dir, 'checkpoints')
analysis_dir = os.path.join(args.exp_dir, 'analysis')
if not os.path.isdir(analysis_dir):
os.makedirs(analysis_dir)
config = process_config(config_path)
if gpu_device >= 0: config.gpu_device = gpu_device
config = process_config_from_json(config)
AgentClass = globals()[config.agent]
agent = AgentClass(config)
agent.load_checkpoint(
args.checkpoint_file,
checkpoint_dir=checkpoint_dir,
load_model=True,
load_optim=False,
load_epoch=True,
)
# turn on eval mode
agent.model.eval()
class_dict = {
'fs_20news': FewShot20News,
'fs_amazon': FewShotAmazon,
'fs_huffpost': FewShotHuffPost,
'fs_rcv1': FewShotRCV1,
'fs_reuters': FewShotReuters,
'fs_fewrel': FewShotFewRel,
}
DatasetClass = class_dict[config.dataset.name]
test_dataset = DatasetClass(
data_root=config.dataset.data_root,
n_ways=config.dataset.test.n_ways,
n_shots=config.dataset.test.n_shots,
n_queries=config.dataset.test.n_queries,
split='test',
)
test_loader, _ = agent._create_test_dataloader(
test_dataset,
config.optim.batch_size,
)
_, accuracies, acc_stdevs = agent.eval_split('Test', test_loader, verbose=True)
print(acc_stdevs)
print(accuracies)
checkpoint_name = args.checkpoint_file.replace('.pth.tar', '')
accuracy_fpath = os.path.join(analysis_dir, f'{checkpoint_name}_accuracies.csv')
np.savez(accuracy_fpath, accuracies=accuracies)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('exp_dir', type=str, help='path to experiment directory')
parser.add_argument('checkpoint_file', type=str, help='name of checkpoint')
parser.add_argument('--gpu-device', type=int, default=-1)
args = parser.parse_args()
evaluate(args, gpu_device=args.gpu_device)
| 31.418919 | 84 | 0.692043 |
import os
import numpy as np
from src.agents.nlp import *
from src.utils.setup import process_config, process_config_from_json
from src.datasets.text import *
def evaluate(args, gpu_device=-1):
config_path = os.path.join(args.exp_dir, 'config.json')
checkpoint_dir = os.path.join(args.exp_dir, 'checkpoints')
analysis_dir = os.path.join(args.exp_dir, 'analysis')
if not os.path.isdir(analysis_dir):
os.makedirs(analysis_dir)
config = process_config(config_path)
if gpu_device >= 0: config.gpu_device = gpu_device
config = process_config_from_json(config)
AgentClass = globals()[config.agent]
agent = AgentClass(config)
agent.load_checkpoint(
args.checkpoint_file,
checkpoint_dir=checkpoint_dir,
load_model=True,
load_optim=False,
load_epoch=True,
)
agent.model.eval()
class_dict = {
'fs_20news': FewShot20News,
'fs_amazon': FewShotAmazon,
'fs_huffpost': FewShotHuffPost,
'fs_rcv1': FewShotRCV1,
'fs_reuters': FewShotReuters,
'fs_fewrel': FewShotFewRel,
}
DatasetClass = class_dict[config.dataset.name]
test_dataset = DatasetClass(
data_root=config.dataset.data_root,
n_ways=config.dataset.test.n_ways,
n_shots=config.dataset.test.n_shots,
n_queries=config.dataset.test.n_queries,
split='test',
)
test_loader, _ = agent._create_test_dataloader(
test_dataset,
config.optim.batch_size,
)
_, accuracies, acc_stdevs = agent.eval_split('Test', test_loader, verbose=True)
print(acc_stdevs)
print(accuracies)
checkpoint_name = args.checkpoint_file.replace('.pth.tar', '')
accuracy_fpath = os.path.join(analysis_dir, f'{checkpoint_name}_accuracies.csv')
np.savez(accuracy_fpath, accuracies=accuracies)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('exp_dir', type=str, help='path to experiment directory')
parser.add_argument('checkpoint_file', type=str, help='name of checkpoint')
parser.add_argument('--gpu-device', type=int, default=-1)
args = parser.parse_args()
evaluate(args, gpu_device=args.gpu_device)
| true | true |
f7fb9ffdde0054952c056de8bc52073bd05e95da | 9,055 | py | Python | examples/sle_periodic/figure_1.py | fkemeth/emergent_pdes | d0501f21c9eb569543a19d4d95d6c91a9ccb11fe | [
"MIT"
] | null | null | null | examples/sle_periodic/figure_1.py | fkemeth/emergent_pdes | d0501f21c9eb569543a19d4d95d6c91a9ccb11fe | [
"MIT"
] | null | null | null | examples/sle_periodic/figure_1.py | fkemeth/emergent_pdes | d0501f21c9eb569543a19d4d95d6c91a9ccb11fe | [
"MIT"
] | null | null | null | """Initial plot for paper."""
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import int.matthews as mint
import fun.dmaps as dmaps
from mayavi import mlab
POINTS_W = 397.48499
plt.set_cmap('plasma')
def plot_1and2():
N = 256
pars = {"gamma": 1.7, "K": 1.2}
gamma_off = 0.2
pars["omega"] = np.linspace(-pars["gamma"], pars["gamma"], N)+gamma_off
np.random.shuffle(pars["omega"])
y0 = np.random.uniform(-.4, .4, N) + 1.0j*np.random.uniform(-.4, .4, N)
Ad = mint.integrate(tmin=0, tmax=80, T=2000, ic='manual', pars=pars, N=N, Ainit=y0,
append_init=True)
# fig = plt.figure()
# ax = fig.add_subplot(111)
# ax.scatter(Ad["init"].real, Ad["init"].imag, c=Ad["init"].real)
# ax.set_xlabel('Re W')
# ax.set_ylabel('Im W')
# # plt.savefig('fig/paper/plot_10.png', dpi=400)
# plt.show()
y0 = np.linspace(-.4, .4, int(np.sqrt(N)))
y1 = np.linspace(-.4, .4, int(np.sqrt(N)))
y0, y1 = np.meshgrid(y0, y1)
Ad = mint.integrate(tmin=0, tmax=80, T=2000, ic='manual', pars=pars, N=N,
Ainit=y0.flatten() + 1.0j*y1.flatten(),
append_init=True)
# fig = plt.figure()
# ax = fig.add_subplot(111)
# ax.scatter(Ad["init"].real, Ad["init"].imag, c=np.arange(N))
# ax.set_xlabel('Re W', fontsize=20, family='sans-serif')
# ax.set_ylabel('Im W', fontsize=20, family='sans-serif')
# ax.set_xticks((-0.4, 0., 0.4))
# ax.set_xticklabels((-0.4, 0., 0.4), family='sans-serif', fontsize=16)
# ax.set_yticks((-0.4, 0., 0.4))
# ax.set_yticklabels((-0.4, 0., 0.4), family='sans-serif', fontsize=16)
# plt.subplots_adjust(bottom=0.15)
# # plt.savefig('fig/paper/plot_10.png', dpi=400)
# plt.show()
colors = plt.rcParams["axes.prop_cycle"].by_key()["color"]
cmap = plt.get_cmap('plasma')
cmcolors = cmap((Ad["data"][0].real-min(Ad["data"][0].real)) /
(max(Ad["data"][0].real)-min(Ad["data"][0].real)))
cmcolors = cmap(np.linspace(0, 1, N))
f = mlab.figure(fgcolor=(0., 0., 0.), bgcolor=(1, 1, 1), size=(3*POINTS_W, 3*POINTS_W))
for i in range(Ad["N"]):
mlab.plot3d(Ad["data"][:1000, i].real, Ad["data"][:1000, i].imag, Ad["tt"][:1000]/40,
tube_radius=0.005, colormap='Spectral', figure=f,
color=tuple(cmcolors[i, :-1]))
ax = mlab.axes()
ax.axes.label_format = '%.1f'
ax.axes.corner_offset = 0.1
mlab.axes(xlabel='Re W', ylabel='Im W', zlabel='t', figure=f, extent=[-.4, .4, -.4, .4, 0, 1],
nb_labels=3, ranges=[-.4, .4, -.4, .4, Ad["tt"][0], Ad["tt"][1000]])
mlab.points3d(Ad["data"][1000, :].real, Ad["data"][1000, :].imag, np.repeat(Ad["tt"][1000]/40, N),
color=(0, 0, 0),
figure=f, reset_zoom=False)
mlab.view(azimuth=240, elevation=60, distance=5)
mlab.savefig('plot_1b.png', magnification=1)
# mlab.process_ui_events()
# mlab_image = mlab.screenshot(figure=f)
mlab.close()
# mlab.show()
tmin = 800
f = mlab.figure(fgcolor=(0., 0., 0.), bgcolor=(1, 1, 1), size=(3*POINTS_W, 3*POINTS_W))
for i in range(Ad["N"]):
mlab.plot3d(Ad["data"][tmin:1000, i].real, Ad["data"][tmin:1000, i].imag,
Ad["tt"][tmin:1000]/40,
tube_radius=0.005, colormap='Spectral', figure=f,
# color=mpl.colors.to_rgb(colors[np.mod(i, len(colors))]))
color=tuple(cmcolors[i, :-1]))
ax = mlab.axes()
ax.axes.label_format = '%.1f'
ax.axes.corner_offset = 0.1
# ax.axes.fontsize = 8
mlab.axes(xlabel='Re W', ylabel='Im W', zlabel='t', figure=f,
extent=[-.4/4, .4/4, -.4/4, .4/4, Ad["tt"][tmin]/40, 1],
nb_labels=3, ranges=[-.4/4, .4/4, -.4/4, .4/4, Ad["tt"][tmin], Ad["tt"][1000]])
mlab.points3d(Ad["data"][1000, :].real, Ad["data"][1000, :].imag, np.repeat(Ad["tt"][1000]/40, N),
color=(0, 0, 0),
figure=f)
cmap2 = plt.get_cmap('jet')
cmcolors2 = cmap2((Ad["omega"]-min(Ad["omega"]))/(max(Ad["omega"])-min(Ad["omega"])))
nodes = mlab.points3d(Ad["data"][1000].real, Ad["data"][1000].imag,
np.repeat(Ad["tt"][1000]/40, N)+.05,
(np.arctan(np.pi*(Ad["omega"]-min(Ad["omega"])) /
(max(Ad["omega"])-min(Ad["omega"]))-np.pi/2)+1)/2,
figure=f, colormap='jet', scale_mode='none')
mlab.view(azimuth=240, elevation=60, distance=1)
# mlab.savefig('fig/paper/plot_1b.png', size=(POINTS_W/72, 0.4*POINTS_W/72), magnification=1)
mlab.savefig('plot_1c.png', magnification=1)
mlab.show()
tmin = 800
cmap2 = plt.get_cmap('jet')
cmcolors2 = cmap2((np.arctan(np.pi*(Ad["omega"]-min(Ad["omega"])) /
(max(Ad["omega"])-min(Ad["omega"]))-np.pi/2)+1)/2)
# (Ad["omega"]-min(Ad["omega"]))/(max(Ad["omega"])-min(Ad["omega"])))
f = mlab.figure(fgcolor=(0., 0., 0.), bgcolor=(1, 1, 1), size=(3*POINTS_W, 3*POINTS_W))
for i in range(Ad["N"]):
mlab.plot3d(Ad["data"][tmin:1000, i].real, Ad["data"][tmin:1000, i].imag,
Ad["tt"][tmin:1000]/40,
tube_radius=0.005, colormap='jet', figure=f,
# color=mpl.colors.to_rgb(colors[np.mod(i, len(colors))]))
color=tuple(cmcolors2[i, :-1]))
ax = mlab.axes()
ax.axes.label_format = '%.1f'
ax.axes.corner_offset = 0.1
# ax.axes.fontsize = 8
mlab.axes(xlabel='Re W', ylabel='Im W', zlabel='t', figure=f,
extent=[-.4/4, .4/4, -.4/4, .4/4, Ad["tt"][tmin]/40, 1],
nb_labels=3, ranges=[-.4/4, .4/4, -.4/4, .4/4, Ad["tt"][tmin], Ad["tt"][1000]])
mlab.points3d(Ad["data"][1000, :].real, Ad["data"][1000, :].imag, np.repeat(Ad["tt"][1000]/40, N),
color=(0, 0, 0),
figure=f)
mlab.view(azimuth=240, elevation=60, distance=1)
# mlab.savefig('fig/paper/plot_1b.png', size=(POINTS_W/72, 0.4*POINTS_W/72), magnification=1)
mlab.savefig('plot_1d.png', magnification=1)
mlab.show()
# idxs = np.argsort(y0.real)
idxs = np.arange(N)
D, V = dmaps.dmaps(Ad["data"][1000:].T, eps=1e-2, alpha=1)
V[:, 1] = 2*(V[:, 1]-np.min(V[:, 1])) / \
(np.max(V[:, 1])-np.min(V[:, 1]))-1.
tmin = 500
import matplotlib.image as mpimg
mlab_image = mpimg.imread(r'plot_1b.png')
mlab_image2 = mpimg.imread(r'plot_1c.png')
mlab_image3 = mpimg.imread(r'plot_1d.png')
POINTS_W = 397.48499
fig = plt.figure(figsize=(POINTS_W/72, 1.2*5.5))
ax1 = fig.add_subplot(321)
ax1.scatter(Ad["init"].real, Ad["init"].imag, c=np.arange(N), s=2)
ax1.set_xlabel('Re W')
ax1.set_ylabel('Im W')
ax1.set_xticks((-0.4, 0., 0.4))
ax1.set_xticklabels((-0.4, 0., 0.4))
ax1.set_yticks((-0.4, 0., 0.4))
ax1.set_yticklabels((-0.4, 0., 0.4))
ax2 = fig.add_subplot(322)
crop = 180
ax2.imshow(mlab_image[int(1.5*crop):-crop, int(1.5*crop):-int(2*crop)])
ax2.set_axis_off()
ax3 = fig.add_subplot(323)
crop = 100
ax3.imshow(mlab_image2[crop:-crop, crop:-crop])
ax3.set_axis_off()
ax4 = fig.add_subplot(324)
crop = 100
ax4.imshow(mlab_image3[crop:-crop, crop:-crop])
ax4.set_axis_off()
ax5 = fig.add_subplot(325, projection='3d')
for i in range(int(Ad["data"].shape[1])):
ax5.plot(np.repeat(np.arange(len(idxs))[i], len(Ad["tt"])-tmin), Ad["tt"][tmin:],
Ad["data"][1+tmin:, idxs[i]].real, lw=0.09, color='k')
ax5.set_xlabel(r'$i$', labelpad=2)
ax5.set_ylabel(r'$t$')
ax5.set_zlabel(r'Re W', labelpad=6)
ax6 = fig.add_subplot(326, projection='3d')
for i in range(int(Ad["data"].shape[1])):
ax6.plot(np.repeat(V[i, 1], len(Ad["tt"])-tmin), Ad["tt"][tmin:],
Ad["data"][1+tmin:, i].real, lw=0.09, color='k')
ax6.set_xlabel(r'$\phi_i$', labelpad=2)
ax6.set_ylabel(r'$t$')
ax6.set_zlabel(r'Re W', labelpad=6)
plt.subplots_adjust(top=0.99, wspace=0.2, right=0.95, bottom=0.05, left=0.12, hspace=0.2)
ax5.view_init(elev=60., azim=10)
ax6.view_init(elev=60., azim=10)
ax1.text(-0.25, 0.95, r'$\mathbf{a}$', transform=ax1.transAxes, weight='bold', fontsize=12)
ax2.text(-0.1, 0.95, r'$\mathbf{b}$', transform=ax2.transAxes, weight='bold', fontsize=12)
ax3.text(-0.23, 1., r'$\mathbf{c}$', transform=ax3.transAxes, weight='bold', fontsize=12)
ax4.text(-0.05, 1., r'$\mathbf{d}$', transform=ax4.transAxes, weight='bold', fontsize=12)
ax5.text(40.0, -1.0, 2.0, r'$\mathbf{e}$', transform=ax5.transAxes, weight='bold', fontsize=12)
ax6.text(-9, 1.2, 1.8, r'$\mathbf{f}$', transform=ax6.transAxes, weight='bold', fontsize=12)
plt.savefig('figure_1.pdf', dpi=400)
plt.savefig('figure_1.png', dpi=400)
plt.show()
np.save('Source_Data_Figure_1.npy', Ad["data"])
| 43.325359 | 102 | 0.559801 | import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import int.matthews as mint
import fun.dmaps as dmaps
from mayavi import mlab
POINTS_W = 397.48499
plt.set_cmap('plasma')
def plot_1and2():
N = 256
pars = {"gamma": 1.7, "K": 1.2}
gamma_off = 0.2
pars["omega"] = np.linspace(-pars["gamma"], pars["gamma"], N)+gamma_off
np.random.shuffle(pars["omega"])
y0 = np.random.uniform(-.4, .4, N) + 1.0j*np.random.uniform(-.4, .4, N)
Ad = mint.integrate(tmin=0, tmax=80, T=2000, ic='manual', pars=pars, N=N, Ainit=y0,
append_init=True)
t(N)))
y1 = np.linspace(-.4, .4, int(np.sqrt(N)))
y0, y1 = np.meshgrid(y0, y1)
Ad = mint.integrate(tmin=0, tmax=80, T=2000, ic='manual', pars=pars, N=N,
Ainit=y0.flatten() + 1.0j*y1.flatten(),
append_init=True)
le"].by_key()["color"]
cmap = plt.get_cmap('plasma')
cmcolors = cmap((Ad["data"][0].real-min(Ad["data"][0].real)) /
(max(Ad["data"][0].real)-min(Ad["data"][0].real)))
cmcolors = cmap(np.linspace(0, 1, N))
f = mlab.figure(fgcolor=(0., 0., 0.), bgcolor=(1, 1, 1), size=(3*POINTS_W, 3*POINTS_W))
for i in range(Ad["N"]):
mlab.plot3d(Ad["data"][:1000, i].real, Ad["data"][:1000, i].imag, Ad["tt"][:1000]/40,
tube_radius=0.005, colormap='Spectral', figure=f,
color=tuple(cmcolors[i, :-1]))
ax = mlab.axes()
ax.axes.label_format = '%.1f'
ax.axes.corner_offset = 0.1
mlab.axes(xlabel='Re W', ylabel='Im W', zlabel='t', figure=f, extent=[-.4, .4, -.4, .4, 0, 1],
nb_labels=3, ranges=[-.4, .4, -.4, .4, Ad["tt"][0], Ad["tt"][1000]])
mlab.points3d(Ad["data"][1000, :].real, Ad["data"][1000, :].imag, np.repeat(Ad["tt"][1000]/40, N),
color=(0, 0, 0),
figure=f, reset_zoom=False)
mlab.view(azimuth=240, elevation=60, distance=5)
mlab.savefig('plot_1b.png', magnification=1)
mlab.close()
tmin = 800
f = mlab.figure(fgcolor=(0., 0., 0.), bgcolor=(1, 1, 1), size=(3*POINTS_W, 3*POINTS_W))
for i in range(Ad["N"]):
mlab.plot3d(Ad["data"][tmin:1000, i].real, Ad["data"][tmin:1000, i].imag,
Ad["tt"][tmin:1000]/40,
tube_radius=0.005, colormap='Spectral', figure=f,
color=tuple(cmcolors[i, :-1]))
ax = mlab.axes()
ax.axes.label_format = '%.1f'
ax.axes.corner_offset = 0.1
mlab.axes(xlabel='Re W', ylabel='Im W', zlabel='t', figure=f,
extent=[-.4/4, .4/4, -.4/4, .4/4, Ad["tt"][tmin]/40, 1],
nb_labels=3, ranges=[-.4/4, .4/4, -.4/4, .4/4, Ad["tt"][tmin], Ad["tt"][1000]])
mlab.points3d(Ad["data"][1000, :].real, Ad["data"][1000, :].imag, np.repeat(Ad["tt"][1000]/40, N),
color=(0, 0, 0),
figure=f)
cmap2 = plt.get_cmap('jet')
cmcolors2 = cmap2((Ad["omega"]-min(Ad["omega"]))/(max(Ad["omega"])-min(Ad["omega"])))
nodes = mlab.points3d(Ad["data"][1000].real, Ad["data"][1000].imag,
np.repeat(Ad["tt"][1000]/40, N)+.05,
(np.arctan(np.pi*(Ad["omega"]-min(Ad["omega"])) /
(max(Ad["omega"])-min(Ad["omega"]))-np.pi/2)+1)/2,
figure=f, colormap='jet', scale_mode='none')
mlab.view(azimuth=240, elevation=60, distance=1)
mlab.savefig('plot_1c.png', magnification=1)
mlab.show()
tmin = 800
cmap2 = plt.get_cmap('jet')
cmcolors2 = cmap2((np.arctan(np.pi*(Ad["omega"]-min(Ad["omega"])) /
(max(Ad["omega"])-min(Ad["omega"]))-np.pi/2)+1)/2)
f = mlab.figure(fgcolor=(0., 0., 0.), bgcolor=(1, 1, 1), size=(3*POINTS_W, 3*POINTS_W))
for i in range(Ad["N"]):
mlab.plot3d(Ad["data"][tmin:1000, i].real, Ad["data"][tmin:1000, i].imag,
Ad["tt"][tmin:1000]/40,
tube_radius=0.005, colormap='jet', figure=f,
color=tuple(cmcolors2[i, :-1]))
ax = mlab.axes()
ax.axes.label_format = '%.1f'
ax.axes.corner_offset = 0.1
mlab.axes(xlabel='Re W', ylabel='Im W', zlabel='t', figure=f,
extent=[-.4/4, .4/4, -.4/4, .4/4, Ad["tt"][tmin]/40, 1],
nb_labels=3, ranges=[-.4/4, .4/4, -.4/4, .4/4, Ad["tt"][tmin], Ad["tt"][1000]])
mlab.points3d(Ad["data"][1000, :].real, Ad["data"][1000, :].imag, np.repeat(Ad["tt"][1000]/40, N),
color=(0, 0, 0),
figure=f)
mlab.view(azimuth=240, elevation=60, distance=1)
mlab.savefig('plot_1d.png', magnification=1)
mlab.show()
idxs = np.arange(N)
D, V = dmaps.dmaps(Ad["data"][1000:].T, eps=1e-2, alpha=1)
V[:, 1] = 2*(V[:, 1]-np.min(V[:, 1])) / \
(np.max(V[:, 1])-np.min(V[:, 1]))-1.
tmin = 500
import matplotlib.image as mpimg
mlab_image = mpimg.imread(r'plot_1b.png')
mlab_image2 = mpimg.imread(r'plot_1c.png')
mlab_image3 = mpimg.imread(r'plot_1d.png')
POINTS_W = 397.48499
fig = plt.figure(figsize=(POINTS_W/72, 1.2*5.5))
ax1 = fig.add_subplot(321)
ax1.scatter(Ad["init"].real, Ad["init"].imag, c=np.arange(N), s=2)
ax1.set_xlabel('Re W')
ax1.set_ylabel('Im W')
ax1.set_xticks((-0.4, 0., 0.4))
ax1.set_xticklabels((-0.4, 0., 0.4))
ax1.set_yticks((-0.4, 0., 0.4))
ax1.set_yticklabels((-0.4, 0., 0.4))
ax2 = fig.add_subplot(322)
crop = 180
ax2.imshow(mlab_image[int(1.5*crop):-crop, int(1.5*crop):-int(2*crop)])
ax2.set_axis_off()
ax3 = fig.add_subplot(323)
crop = 100
ax3.imshow(mlab_image2[crop:-crop, crop:-crop])
ax3.set_axis_off()
ax4 = fig.add_subplot(324)
crop = 100
ax4.imshow(mlab_image3[crop:-crop, crop:-crop])
ax4.set_axis_off()
ax5 = fig.add_subplot(325, projection='3d')
for i in range(int(Ad["data"].shape[1])):
ax5.plot(np.repeat(np.arange(len(idxs))[i], len(Ad["tt"])-tmin), Ad["tt"][tmin:],
Ad["data"][1+tmin:, idxs[i]].real, lw=0.09, color='k')
ax5.set_xlabel(r'$i$', labelpad=2)
ax5.set_ylabel(r'$t$')
ax5.set_zlabel(r'Re W', labelpad=6)
ax6 = fig.add_subplot(326, projection='3d')
for i in range(int(Ad["data"].shape[1])):
ax6.plot(np.repeat(V[i, 1], len(Ad["tt"])-tmin), Ad["tt"][tmin:],
Ad["data"][1+tmin:, i].real, lw=0.09, color='k')
ax6.set_xlabel(r'$\phi_i$', labelpad=2)
ax6.set_ylabel(r'$t$')
ax6.set_zlabel(r'Re W', labelpad=6)
plt.subplots_adjust(top=0.99, wspace=0.2, right=0.95, bottom=0.05, left=0.12, hspace=0.2)
ax5.view_init(elev=60., azim=10)
ax6.view_init(elev=60., azim=10)
ax1.text(-0.25, 0.95, r'$\mathbf{a}$', transform=ax1.transAxes, weight='bold', fontsize=12)
ax2.text(-0.1, 0.95, r'$\mathbf{b}$', transform=ax2.transAxes, weight='bold', fontsize=12)
ax3.text(-0.23, 1., r'$\mathbf{c}$', transform=ax3.transAxes, weight='bold', fontsize=12)
ax4.text(-0.05, 1., r'$\mathbf{d}$', transform=ax4.transAxes, weight='bold', fontsize=12)
ax5.text(40.0, -1.0, 2.0, r'$\mathbf{e}$', transform=ax5.transAxes, weight='bold', fontsize=12)
ax6.text(-9, 1.2, 1.8, r'$\mathbf{f}$', transform=ax6.transAxes, weight='bold', fontsize=12)
plt.savefig('figure_1.pdf', dpi=400)
plt.savefig('figure_1.png', dpi=400)
plt.show()
np.save('Source_Data_Figure_1.npy', Ad["data"])
| true | true |
f7fba03d674a6b5572f014da7b92c849f0701810 | 3,778 | py | Python | Python/prover.py | BuserLukas/Logic | cc0447554cfa75b213a10a2db37ce82c42afb91d | [
"MIT"
] | 13 | 2019-10-03T13:25:02.000Z | 2021-12-26T11:49:25.000Z | Python/prover.py | BuserLukas/Logic | cc0447554cfa75b213a10a2db37ce82c42afb91d | [
"MIT"
] | 19 | 2015-01-14T15:36:24.000Z | 2019-04-21T02:13:23.000Z | Python/prover.py | BuserLukas/Logic | cc0447554cfa75b213a10a2db37ce82c42afb91d | [
"MIT"
] | 18 | 2019-10-03T16:05:46.000Z | 2021-12-10T19:44:15.000Z |
import folParser as fp
def parse(s):
return fp.LogicParser(s).parse()
import folCNF as cnf
import unify
from string import ascii_lowercase
def complement(l):
"Compute the complement of the literal l."
if l[0] == '¬':
return l[1]
else:
return ('¬', l)
def collectVariables(C):
if isinstance(C, frozenset):
return { x for literal in C
for x in collectVariables(literal)
}
if isinstance(C, str): # C is a variable
return { C }
if C[0] == '¬':
return collectVariables(C[1])
# C must be a term or atomic formula
args = C[1:]
return { x for t in args for x in collectVariables(t) }
def renameVariables(f, g=frozenset()):
OldVars = collectVariables(f)
NewVars = set(ascii_lowercase) - collectVariables(g)
NewVars = sorted(list(NewVars))
sigma = { x: NewVars[i] for (i, x) in enumerate(OldVars) }
return unify.apply(f, sigma)
def resolve(C1, C2):
C2 = renameVariables(C2, C1)
Result = set()
for L1 in C1:
for L2 in C2:
mu = unify.mgu(L1, complement(L2))
if mu != None:
C1C2 = unify.apply((C1 - { L1 }) | (C2 - { L2 }), mu)
if len(C1C2) <= 3:
Result.add(C1C2)
return Result
def factorize(C):
Result = set()
for L1 in C:
for L2 in C:
if L1 != L2:
mu = unify.mgu(L1, L2)
if mu != None:
Cmu = unify.apply(C, mu)
Result.add(Cmu)
return Result
def infere(Clauses):
Result = { (C, (C1, C2)) for C1 in Clauses
for C2 in Clauses
for C in resolve(C1, C2)
}
Result |= { (C, (C1,)) for C1 in Clauses for C in factorize(C1) }
return Result
def saturate(Cs):
Clauses = Cs.copy()
cnt = 1
Reasons = {}
while frozenset() not in Clauses:
for (C, R) in infere(Clauses):
if C not in Clauses:
Reasons[C] = R
Clauses.add(C)
print(f'cnt = {cnt}, number of clauses: {len(Clauses)}')
cnt += 1
return Reasons
def constructProof(clause, Reasons):
if clause in Reasons:
reason = Reasons[clause]
else:
return [f'Axiom: {set(clause)}']
if len(reason) == 1:
(C,) = reason
Proof = constructProof(C, Reasons)
Proof.append(f'Factorization: {set(C)} \n⊢' + ' ' * 12 + f'{set(clause)}')
if len(reason) == 2:
C1, C2 = reason
ProofC1 = constructProof(C1, Reasons)
ProofC2 = constructProof(C2, Reasons)
Proof = update(ProofC1, ProofC2)
Proof.append(f'Resolution: {set(C1)},\n' + ' ' * 13 +
f'{set(C2)} \n⊢' + ' ' * 12 + f'{set(clause)}')
return Proof
def update(P1, P2):
Result = P1
for line in P2:
if line not in Result:
Result.append(line)
return Result
def prove(Axioms, claim):
Axioms = { parse(s) for s in Axioms }
claim = parse(claim)
Clauses = { C for f in Axioms for C in cnf.normalize(f) }
Clauses |= { C for C in cnf.normalize(('¬', claim)) }
for C in Clauses:
print(set(C))
Reasons = saturate(Clauses)
Proof = constructProof(frozenset(), Reasons)
for line in Proof:
print(line)
if __name__ == "__main__":
s1 = '∀x:(∀y:(Child(y, x) → CanFly(y)) → Happy(x))'
s2 = '∀x:(Red(x) → CanFly(x))'
s3 = '∀x:(Red(x) → ∀y:(Child(y, x) → Red(y)))'
s4 = '∀x:(Red(x) → Happy(x))'
Axioms = { s1, s2, s3 }
claim = s4
prove(Axioms, claim)
Axioms = { '∀x:(In(x, R) ↔ ¬In(x, x))' }
claim = '⊥'
prove(Axioms, claim)
| 28.621212 | 82 | 0.517205 |
import folParser as fp
def parse(s):
return fp.LogicParser(s).parse()
import folCNF as cnf
import unify
from string import ascii_lowercase
def complement(l):
if l[0] == '¬':
return l[1]
else:
return ('¬', l)
def collectVariables(C):
if isinstance(C, frozenset):
return { x for literal in C
for x in collectVariables(literal)
}
if isinstance(C, str):
return { C }
if C[0] == '¬':
return collectVariables(C[1])
args = C[1:]
return { x for t in args for x in collectVariables(t) }
def renameVariables(f, g=frozenset()):
OldVars = collectVariables(f)
NewVars = set(ascii_lowercase) - collectVariables(g)
NewVars = sorted(list(NewVars))
sigma = { x: NewVars[i] for (i, x) in enumerate(OldVars) }
return unify.apply(f, sigma)
def resolve(C1, C2):
C2 = renameVariables(C2, C1)
Result = set()
for L1 in C1:
for L2 in C2:
mu = unify.mgu(L1, complement(L2))
if mu != None:
C1C2 = unify.apply((C1 - { L1 }) | (C2 - { L2 }), mu)
if len(C1C2) <= 3:
Result.add(C1C2)
return Result
def factorize(C):
Result = set()
for L1 in C:
for L2 in C:
if L1 != L2:
mu = unify.mgu(L1, L2)
if mu != None:
Cmu = unify.apply(C, mu)
Result.add(Cmu)
return Result
def infere(Clauses):
Result = { (C, (C1, C2)) for C1 in Clauses
for C2 in Clauses
for C in resolve(C1, C2)
}
Result |= { (C, (C1,)) for C1 in Clauses for C in factorize(C1) }
return Result
def saturate(Cs):
Clauses = Cs.copy()
cnt = 1
Reasons = {}
while frozenset() not in Clauses:
for (C, R) in infere(Clauses):
if C not in Clauses:
Reasons[C] = R
Clauses.add(C)
print(f'cnt = {cnt}, number of clauses: {len(Clauses)}')
cnt += 1
return Reasons
def constructProof(clause, Reasons):
if clause in Reasons:
reason = Reasons[clause]
else:
return [f'Axiom: {set(clause)}']
if len(reason) == 1:
(C,) = reason
Proof = constructProof(C, Reasons)
Proof.append(f'Factorization: {set(C)} \n⊢' + ' ' * 12 + f'{set(clause)}')
if len(reason) == 2:
C1, C2 = reason
ProofC1 = constructProof(C1, Reasons)
ProofC2 = constructProof(C2, Reasons)
Proof = update(ProofC1, ProofC2)
Proof.append(f'Resolution: {set(C1)},\n' + ' ' * 13 +
f'{set(C2)} \n⊢' + ' ' * 12 + f'{set(clause)}')
return Proof
def update(P1, P2):
Result = P1
for line in P2:
if line not in Result:
Result.append(line)
return Result
def prove(Axioms, claim):
Axioms = { parse(s) for s in Axioms }
claim = parse(claim)
Clauses = { C for f in Axioms for C in cnf.normalize(f) }
Clauses |= { C for C in cnf.normalize(('¬', claim)) }
for C in Clauses:
print(set(C))
Reasons = saturate(Clauses)
Proof = constructProof(frozenset(), Reasons)
for line in Proof:
print(line)
if __name__ == "__main__":
s1 = '∀x:(∀y:(Child(y, x) → CanFly(y)) → Happy(x))'
s2 = '∀x:(Red(x) → CanFly(x))'
s3 = '∀x:(Red(x) → ∀y:(Child(y, x) → Red(y)))'
s4 = '∀x:(Red(x) → Happy(x))'
Axioms = { s1, s2, s3 }
claim = s4
prove(Axioms, claim)
Axioms = { '∀x:(In(x, R) ↔ ¬In(x, x))' }
claim = '⊥'
prove(Axioms, claim)
| true | true |
f7fba056cdb6076e9bf7b924fb1ab04db98f7a08 | 3,001 | py | Python | setup.py | CHIPO-Project/temp | 99a38472bb12a10dcf2460ebfbfd7fa91ec35692 | [
"MIT"
] | null | null | null | setup.py | CHIPO-Project/temp | 99a38472bb12a10dcf2460ebfbfd7fa91ec35692 | [
"MIT"
] | 1 | 2021-06-02T00:43:25.000Z | 2021-06-02T00:43:25.000Z | setup.py | CHIPO-Project/temp | 99a38472bb12a10dcf2460ebfbfd7fa91ec35692 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# python setup.py sdist --format=zip,gztar
import os
import sys
import platform
import importlib.util
import argparse
import subprocess
from setuptools import setup, find_packages
from setuptools.command.install import install
MIN_PYTHON_VERSION = "3.6.1"
_min_python_version_tuple = tuple(map(int, (MIN_PYTHON_VERSION.split("."))))
if sys.version_info[:3] < _min_python_version_tuple:
sys.exit("Error: Electrum requires Python version >= %s..." % MIN_PYTHON_VERSION)
with open('contrib/requirements/requirements.txt') as f:
requirements = f.read().splitlines()
with open('contrib/requirements/requirements-hw.txt') as f:
requirements_hw = f.read().splitlines()
# load version.py; needlessly complicated alternative to "imp.load_source":
version_spec = importlib.util.spec_from_file_location('version', 'electrum/version.py')
version_module = version = importlib.util.module_from_spec(version_spec)
version_spec.loader.exec_module(version_module)
data_files = []
if platform.system() in ['Linux', 'FreeBSD', 'DragonFly']:
parser = argparse.ArgumentParser()
parser.add_argument('--root=', dest='root_path', metavar='dir', default='/')
opts, _ = parser.parse_known_args(sys.argv[1:])
usr_share = os.path.join(sys.prefix, "share")
icons_dirname = 'pixmaps'
if not os.access(opts.root_path + usr_share, os.W_OK) and \
not os.access(opts.root_path, os.W_OK):
icons_dirname = 'icons'
if 'XDG_DATA_HOME' in os.environ.keys():
usr_share = os.environ['XDG_DATA_HOME']
else:
usr_share = os.path.expanduser('~/.local/share')
data_files += [
(os.path.join(usr_share, 'applications/'), ['electrum.desktop']),
(os.path.join(usr_share, icons_dirname), ['electrum/gui/icons/electrum.png']),
]
extras_require = {
'hardware': requirements_hw,
'fast': ['pycryptodomex'],
'gui': ['pyqt5'],
}
extras_require['full'] = [pkg for sublist in list(extras_require.values()) for pkg in sublist]
setup(
name="Electrum",
version=version.ELECTRUM_VERSION,
python_requires='>={}'.format(MIN_PYTHON_VERSION),
install_requires=requirements,
extras_require=extras_require,
packages=[
'electrum',
'electrum.gui',
'electrum.gui.qt',
'electrum.plugins',
] + [('electrum.plugins.'+pkg) for pkg in find_packages('electrum/plugins')],
package_dir={
'electrum': 'electrum'
},
package_data={
'': ['*.txt', '*.json', '*.ttf', '*.otf'],
'electrum': [
'wordlist/*.txt',
'locale/*/LC_MESSAGES/electrum.mo',
],
'electrum.gui': [
'icons/*',
],
},
scripts=['electrum/electrum'],
data_files=data_files,
description="Lightweight CHIPO Wallet",
author="chipo dev",
author_email="dev@chipo.icu",
license="MIT Licence",
url="https://www.chipo.icu",
long_description="""Lightweight CHIPO Wallet""",
)
| 31.589474 | 94 | 0.662113 |
import os
import sys
import platform
import importlib.util
import argparse
import subprocess
from setuptools import setup, find_packages
from setuptools.command.install import install
MIN_PYTHON_VERSION = "3.6.1"
_min_python_version_tuple = tuple(map(int, (MIN_PYTHON_VERSION.split("."))))
if sys.version_info[:3] < _min_python_version_tuple:
sys.exit("Error: Electrum requires Python version >= %s..." % MIN_PYTHON_VERSION)
with open('contrib/requirements/requirements.txt') as f:
requirements = f.read().splitlines()
with open('contrib/requirements/requirements-hw.txt') as f:
requirements_hw = f.read().splitlines()
version_spec = importlib.util.spec_from_file_location('version', 'electrum/version.py')
version_module = version = importlib.util.module_from_spec(version_spec)
version_spec.loader.exec_module(version_module)
data_files = []
if platform.system() in ['Linux', 'FreeBSD', 'DragonFly']:
parser = argparse.ArgumentParser()
parser.add_argument('--root=', dest='root_path', metavar='dir', default='/')
opts, _ = parser.parse_known_args(sys.argv[1:])
usr_share = os.path.join(sys.prefix, "share")
icons_dirname = 'pixmaps'
if not os.access(opts.root_path + usr_share, os.W_OK) and \
not os.access(opts.root_path, os.W_OK):
icons_dirname = 'icons'
if 'XDG_DATA_HOME' in os.environ.keys():
usr_share = os.environ['XDG_DATA_HOME']
else:
usr_share = os.path.expanduser('~/.local/share')
data_files += [
(os.path.join(usr_share, 'applications/'), ['electrum.desktop']),
(os.path.join(usr_share, icons_dirname), ['electrum/gui/icons/electrum.png']),
]
extras_require = {
'hardware': requirements_hw,
'fast': ['pycryptodomex'],
'gui': ['pyqt5'],
}
extras_require['full'] = [pkg for sublist in list(extras_require.values()) for pkg in sublist]
setup(
name="Electrum",
version=version.ELECTRUM_VERSION,
python_requires='>={}'.format(MIN_PYTHON_VERSION),
install_requires=requirements,
extras_require=extras_require,
packages=[
'electrum',
'electrum.gui',
'electrum.gui.qt',
'electrum.plugins',
] + [('electrum.plugins.'+pkg) for pkg in find_packages('electrum/plugins')],
package_dir={
'electrum': 'electrum'
},
package_data={
'': ['*.txt', '*.json', '*.ttf', '*.otf'],
'electrum': [
'wordlist/*.txt',
'locale/*/LC_MESSAGES/electrum.mo',
],
'electrum.gui': [
'icons/*',
],
},
scripts=['electrum/electrum'],
data_files=data_files,
description="Lightweight CHIPO Wallet",
author="chipo dev",
author_email="dev@chipo.icu",
license="MIT Licence",
url="https://www.chipo.icu",
long_description="""Lightweight CHIPO Wallet""",
)
| true | true |
f7fba101def49b948a1d90f3fd4d01321a63fcb4 | 67 | py | Python | typings/bl_i18n_utils/merge_po.py | Argmaster/PyR3 | 6786bcb6a101fe4bd4cc50fe43767b8178504b15 | [
"MIT"
] | 2 | 2021-12-12T18:51:52.000Z | 2022-02-23T09:49:16.000Z | src/blender/blender_autocomplete-master/2.92/bl_i18n_utils/merge_po.py | JonasWard/ClayAdventures | a716445ac690e4792e70658319aa1d5299f9c9e9 | [
"MIT"
] | 2 | 2021-11-08T12:09:02.000Z | 2021-12-12T23:01:12.000Z | typings/bl_i18n_utils/merge_po.py | Argmaster/PyR3 | 6786bcb6a101fe4bd4cc50fe43767b8178504b15 | [
"MIT"
] | null | null | null | import sys
import typing
def main():
'''
'''
pass
| 6.090909 | 13 | 0.477612 | import sys
import typing
def main():
pass
| true | true |
f7fba481c041dd7f36bebfdebccbbd1bf56935ba | 2,734 | py | Python | core/management/commands/reset.py | timgates42/timestrap | 744ebcb0cd5fc536245c18058236169f4f36cb8b | [
"BSD-2-Clause"
] | 1,758 | 2017-04-21T08:42:59.000Z | 2022-03-09T22:58:53.000Z | core/management/commands/reset.py | timgates42/timestrap | 744ebcb0cd5fc536245c18058236169f4f36cb8b | [
"BSD-2-Clause"
] | 172 | 2017-04-23T21:30:03.000Z | 2022-02-10T20:10:06.000Z | core/management/commands/reset.py | timgates42/timestrap | 744ebcb0cd5fc536245c18058236169f4f36cb8b | [
"BSD-2-Clause"
] | 138 | 2017-04-23T23:02:16.000Z | 2022-03-25T04:44:19.000Z | from django.core.management import call_command
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS
from django.utils.six.moves import input
class Command(BaseCommand):
help = (
"Deletes all data from this instance and recreates the original "
"site and default admin account."
)
def add_arguments(self, parser):
parser.add_argument(
"--noinput",
"--no-input",
action="store_false",
dest="interactive",
default=True,
help="Tells the command to NOT prompt the user for input of any " "kind.",
)
parser.add_argument(
"--database",
action="store",
dest="database",
default=DEFAULT_DB_ALIAS,
help='Nominates a database to flush. Defaults to the "default" '
"database.",
)
parser.add_argument(
"--fake",
dest="iterations",
default=5,
help=(
"Fill the database with fake data after the reset. Provide a "
"number a number of iterations to perform (higher number = "
"more fake data)."
),
)
def handle(self, *args, **kwargs):
interactive = kwargs["interactive"]
database = kwargs["database"]
verbosity = kwargs["verbosity"]
iterations = int(kwargs["iterations"])
if interactive:
confirm = input(
"""You have requested a reset of the application.
This will IRREVERSIBLY DESTROY all data currently in the
database, establish the initial site, and create the initial
admin user.
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """
)
else:
confirm = "yes"
if confirm == "yes":
try:
call_command(
"flush", database=database, interactive=False, verbosity=verbosity
)
self.stdout.write(self.style.SUCCESS("Successfully flushed database."))
except CommandError:
self.stdout.write(
self.style.ERROR("Database flush failed. Reset aborted.")
)
return
call_command("migrate", verbosity=verbosity)
if iterations > 0:
call_command("fake", verbosity=verbosity, iterations=iterations)
if verbosity > 0:
self.stdout.write(self.style.SUCCESS("Reset operation complete."))
else:
if verbosity > 0:
self.stdout.write(self.style.ERROR("Reset operation cancelled."))
| 32.939759 | 87 | 0.561814 | from django.core.management import call_command
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS
from django.utils.six.moves import input
class Command(BaseCommand):
help = (
"Deletes all data from this instance and recreates the original "
"site and default admin account."
)
def add_arguments(self, parser):
parser.add_argument(
"--noinput",
"--no-input",
action="store_false",
dest="interactive",
default=True,
help="Tells the command to NOT prompt the user for input of any " "kind.",
)
parser.add_argument(
"--database",
action="store",
dest="database",
default=DEFAULT_DB_ALIAS,
help='Nominates a database to flush. Defaults to the "default" '
"database.",
)
parser.add_argument(
"--fake",
dest="iterations",
default=5,
help=(
"Fill the database with fake data after the reset. Provide a "
"number a number of iterations to perform (higher number = "
"more fake data)."
),
)
def handle(self, *args, **kwargs):
interactive = kwargs["interactive"]
database = kwargs["database"]
verbosity = kwargs["verbosity"]
iterations = int(kwargs["iterations"])
if interactive:
confirm = input(
"""You have requested a reset of the application.
This will IRREVERSIBLY DESTROY all data currently in the
database, establish the initial site, and create the initial
admin user.
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """
)
else:
confirm = "yes"
if confirm == "yes":
try:
call_command(
"flush", database=database, interactive=False, verbosity=verbosity
)
self.stdout.write(self.style.SUCCESS("Successfully flushed database."))
except CommandError:
self.stdout.write(
self.style.ERROR("Database flush failed. Reset aborted.")
)
return
call_command("migrate", verbosity=verbosity)
if iterations > 0:
call_command("fake", verbosity=verbosity, iterations=iterations)
if verbosity > 0:
self.stdout.write(self.style.SUCCESS("Reset operation complete."))
else:
if verbosity > 0:
self.stdout.write(self.style.ERROR("Reset operation cancelled."))
| true | true |
f7fba4936d2b1fb37d77beb19c5971a0fc860471 | 21,320 | py | Python | backend-for-whatsapp/app.py | kenextra/WhatsappMessenger | bde7aa81c41384983d8cd1515db38be7be49d080 | [
"Apache-2.0"
] | 1 | 2021-08-02T01:09:58.000Z | 2021-08-02T01:09:58.000Z | backend-for-whatsapp/app.py | kenextra/WhatsappMessenger | bde7aa81c41384983d8cd1515db38be7be49d080 | [
"Apache-2.0"
] | null | null | null | backend-for-whatsapp/app.py | kenextra/WhatsappMessenger | bde7aa81c41384983d8cd1515db38be7be49d080 | [
"Apache-2.0"
] | null | null | null | """ Import Libraries """
from flask import Flask, render_template, request, jsonify
import requests
import os
import json
import math
import string
import pandas as pd
import numpy as np
import scipy as sp
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import train_test_split
from sklearn.compose import TransformedTargetRegressor
from twilio.twiml.messaging_response import MessagingResponse
from ibm_watson_machine_learning import APIClient
from twilio.rest import Client
from PIL import Image, ImageDraw, ImageFont
from news_bot import get_news
""" Initialize Flask Variables """
app = Flask(__name__)
app.config["SERVICES"] = "static/watsonservices/"
app.config["CREDENTIALS"] = "static/watsoncredentials/"
app.config["DATASET"] = "static/datasets/"
account_sid = ""
auth_token = ""
wml_credentials = {}
space_id = ""
receivedMsg = ""
sentMsg = ""
@app.route("/getWmlCredentials")
def getWmlCredentials():
try:
global wml_credentials, space_id
with open(app.config["CREDENTIALS"] + "wmlCredentials.json") as wmlCreds:
wmlcred = json.loads(wmlCreds.read())
wml_credentials = {"apikey": wmlcred.get("apikey"), "url": wmlcred.get("url")}
space_id = wmlcred.get("space_id")
returnablejson = wml_credentials
returnablejson.update({"status": "Configured"})
return jsonify(returnablejson)
except:
return jsonify({"status": "Not Configured"})
@app.route("/getWatsonCredentials")
def getWatsonCredentials():
try:
x = scanAvailableFiles(app.config["CREDENTIALS"])
returnableObj = {"services": x}
return jsonify(returnableObj)
except:
return jsonify({"services": ["No Service Configured"]})
@app.route("/getTwilioCredentials")
def getTwilioCredentials():
try:
global account_sid
global auth_token
with open("twiliocredentials.json") as creds:
twiliocred = json.loads(creds.read())
account_sid = twiliocred.get("account_sid")
auth_token = twiliocred.get("auth_token")
return jsonify({"status": "Configured"})
except:
return jsonify({"status": "Not Configured"})
@app.route("/getDeploymentState")
def getDeploymentState():
try:
with open(app.config["SERVICES"] + "wmlDeployment.json") as temp:
cred = json.loads(temp.read())
model_id = cred["entity"]["asset"]["id"]
model_name = cred["entity"]["name"]
model_status = cred["entity"]["status"]["state"]
return jsonify(
{
"status": model_status,
"modelId": model_id,
"modelName": model_name,
}
)
except Exception:
return jsonify({"status": "Model not Deployed"})
@app.route("/storeTwilioCredentials", methods=["GET", "POST"])
def storeTwilioCredentials():
receivedPayload = json.loads(request.form["Credentials"])
data = {
"account_sid": receivedPayload.get("account_sid"),
"auth_token": receivedPayload.get("auth_token"),
}
with open("twiliocredentials.json", "w") as fs:
json.dump(data, fs, indent=2)
return jsonify({"status": "Configured"})
@app.route("/storeWatsonCredentials", methods=["GET", "POST"])
def storeWatsonCredentials():
receivedPayload = json.loads(request.form["Credentials"])
if receivedPayload.get("type") == "wml":
data = receivedPayload
data.pop("type")
with open(app.config["CREDENTIALS"] + "wmlCredentials.json", "w") as fs:
json.dump(data, fs, indent=2)
return jsonify({"status": "Configured"})
data = json.loads(receivedPayload.get("apikey"))
data.update({"cloudfunctionurl": receivedPayload.get("cloudfunctionurl") + ".json"})
data.update({"windowURL": receivedPayload.get("windowURL")})
with open(
app.config["CREDENTIALS"] + receivedPayload.get("type") + "Credentials.json",
"w",
) as fs:
json.dump(data, fs, indent=2)
return jsonify({"status": "Configured"})
@app.route("/deployWMLModel")
def deployWMLModel():
"""Step 1: Build the Linear Regression Model"""
# importing the dataset
df = pd.read_csv(app.config["DATASET"] + "Data.csv")
columns_to_use = ["Area", "Item", "Months", "Value", "Year"]
data = df[columns_to_use]
data['Item'] = data['Item'].str.replace('Rice, paddy', 'Rice')
data['Area'] = data.Area.str.lower()
data['Item'] = data.Item.str.lower()
data['Months'] = data.Months.str.lower()
data.query('Value > 0.0', inplace=True)
X = data.drop(columns=["Value"], axis=1)
y = data["Value"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.05, random_state=42
)
numerical_cols = X_train.select_dtypes(include=['int64', 'float64']).columns
categorical_cols = X_train.select_dtypes(include=["object", "bool"]).columns
cat_pipeline = Pipeline([("cat", OneHotEncoder()), ])
num_pipeline = Pipeline([("num", StandardScaler()), ])
transformer = ColumnTransformer(
[
("num_pipe", num_pipeline, numerical_cols),
("cat_pipe", cat_pipeline, categorical_cols),
]
)
estimator = DecisionTreeRegressor(max_depth=150, random_state=42)
tt_model = TransformedTargetRegressor(regressor=estimator,
func=np.log10,
inverse_func=sp.special.exp10
)
model = Pipeline([("preparation", transformer),
("model", tt_model),
])
model.fit(X_train, y_train)
print("Model Built Successfully")
""" Deploy the Model to Watson Machine Learning """
getWmlCredentials()
client = APIClient(wml_credentials)
client.set.default_space(space_id)
sofware_spec_uid = client.software_specifications.get_id_by_name("default_py3.7_opence")
metadata = {
client.repository.ModelMetaNames.NAME: "Food Data Price Prediction",
client.repository.ModelMetaNames.TYPE: "scikit-learn_0.23",
client.repository.ModelMetaNames.SOFTWARE_SPEC_UID: sofware_spec_uid,
}
published_model = client.repository.store_model(model, meta_props=metadata)
published_model_uid = client.repository.get_model_uid(published_model)
deploy_meta = {
client.deployments.ConfigurationMetaNames.NAME: "Deployment of Food Data Price Prediction",
client.deployments.ConfigurationMetaNames.ONLINE: {},
}
created_deployment = client.deployments.create(
published_model_uid, meta_props=deploy_meta
)
with open(app.config["SERVICES"] + "wmlDeployment.json", "w") as fp:
json.dump(created_deployment, fp, indent=2)
print(json.dumps(created_deployment, indent=2))
print("Model Successfully Deployed..")
with open(app.config["SERVICES"] + "wmlDeployment.json") as temp:
cred = json.loads(temp.read())
model_id = cred["entity"]["asset"]["id"]
return jsonify({"status": "Deployed, Model ID: " + model_id})
def predict_price_wml(area, item):
getWmlCredentials()
cols = ['Area', 'Item', 'Months', 'Year']
client = APIClient(wml_credentials)
client.set.default_space(space_id)
with open(app.config["SERVICES"] + 'wmlDeployment.json', 'r') as wmlDeployment:
cred = json.loads(wmlDeployment.read())
x = [area.lower(), item.lower()]
today = pd.to_datetime("today")
x.append(today.month_name().lower())
x.append(today.year)
x = np.array([x], dtype=object)
z = pd.DataFrame(x, columns=cols)
did = client.deployments.get_uid(cred)
job_payload = {
client.deployments.ScoringMetaNames.INPUT_DATA: [{'values': z}]
}
scoring_response = client.deployments.score(did, job_payload)
value = scoring_response['predictions'][0]['values'][0][0]
return math.ceil(value)
def createImagePrediction(area, item, price, dt_day):
image = Image.open("static/images/DarkOcean.png")
draw = ImageDraw.Draw(image)
font = ImageFont.truetype("static/fonts/Roboto.ttf", size=55)
(x, y) = (115, 300)
message = f"Producer Price for {item}"
color = "rgb(255, 255, 255)"
draw.text((x, y), message, fill=color, font=font)
(x, y) = (115, 400)
message = "in "
color = "rgb(255, 255, 255)"
draw.text((x, y), message, fill=color, font=font)
(x, y) = (165, 400)
message = f" {area} "
color = "rgb(255,165,0)"
draw.text((x, y), message, fill=color, font=font)
(x, y) = (115, 500)
message = f"on "
color = "rgb(255, 255, 255)"
draw.text((x, y), message, fill=color, font=font)
(x, y) = (165, 500)
message = f" {dt_day} "
color = "rgb(255,165,0)"
draw.text((x, y), message, fill=color, font=font)
(x, y) = (115, 600)
message = "is "
color = "rgb(255, 255, 255)"
draw.text((x, y), message, fill=color, font=font)
(x, y) = (165, 600)
name = f"~{price} LCU/tonne"
color = "rgb(0, 255, 0)" # white color
draw.text((x, y), name, fill=color, font=font)
image.save("static/images/predicted.png", optimize=True, quality=20)
def checkServices(to_, from_, client):
try:
files = scanAvailableFiles(app.config["CREDENTIALS"])
# print(files)
idx = 0
inx = 1
for i in files:
if i == "wmlCredentials.json":
x = scanAvailableFiles(app.config["SERVICES"])
print(x)
for j in x:
if j == "wmlDeployment.json":
with open(app.config["SERVICES"] + j) as temp:
cred = json.loads(temp.read())
files[idx] = "{0}. Watson Machine Learning -> *{1}*".format(
inx, cred["entity"]["status"]["state"]
)
inx += 1
else:
files[
idx
] = "{0}. Watson Machine Learning -> *No Model Deployed*".format(
inx
)
inx += 1
if i == "waCredentials.json":
x = scanAvailableFiles(app.config["SERVICES"])
print(x)
for j in x:
if j == "waDeployment.json":
with open(app.config["SERVICES"] + j) as temp:
cred = json.loads(temp.read())
files[idx] = "{0}. Watson Assistant -> *{1}*".format(
inx, cred["entity"]["status"]["state"]
)
inx += 1
else:
files[idx] = "{0}. Watson Assistant -> *No Skills*".format(inx)
inx += 1
if i == "wnluCredentials.json":
x = scanAvailableFiles(app.config["SERVICES"])
print(x)
for j in x:
if j == "wmlDeployment.json":
with open(app.config["SERVICES"] + j) as temp:
cred = json.loads(temp.read())
files[
idx
] = "{0}. Watson Natural Language Understanding -> *{1}*".format(
inx, cred["entity"]["status"]["state"]
)
inx += 1
else:
files[
idx
] = "{0}. Watson Natural Language Understanding -> *No Custom Model Deployed*".format(
inx
)
inx += 1
if i == "wvrCredentials.json":
x = scanAvailableFiles(app.config["SERVICES"])
print(x)
for j in x:
if j == "wvrDeployment.json":
with open(app.config["SERVICES"] + j) as temp:
cred = json.loads(temp.read())
files[idx] = "{0}. Watson Visual Recognition -> *{1}*".format(
inx, cred["entity"]["status"]["state"]
)
inx += 1
else:
files[
idx
] = "{0}. Watson Visual Recognition -> *No Custom Model Deployed*".format(
inx
)
inx += 1
idx += 1
files.append(f"{idx+1}. Watson Assistant -> *For Agriculture News*")
services = "\n".join(files)
msg = (
f"I found the following services associated to me: \n\n{services}"
"\n\nEnter the number to know more."
)
message = client.messages.create(from_=from_, body=msg, to=to_)
global sentMsg
sentMsg = "I am a bot who is connected to watson services on IBM Cloud! \nTry asking *What are the services you are connected to?*"
return message.sid
except Exception as e:
files = "no service associated, please configure the application on IBM Cloud"
print(e)
message = client.messages.create(from_=from_, body=files, to=to_)
return message.sid
def scanAvailableFiles(path):
availableFiles = os.listdir(path)
if '.gitkeep' in availableFiles:
availableFiles.pop(availableFiles.index('.gitkeep'))
return availableFiles
@app.route("/getMessages")
def getMessages():
global receivedMsg
global sentMsg
return jsonify({"sentMsg": sentMsg, "receivedMsg": receivedMsg})
""" Default Route """
@app.route("/", methods=["GET", "POST"])
def index():
if request.method == "POST":
getTwilioCredentials()
with open(app.config["CREDENTIALS"] + "wmlCredentials.json") as wmlCreds:
wmlcred = json.loads(wmlCreds.read())
ResponseMsg = json.dumps(request.form.to_dict(), indent=2)
respo = json.loads(ResponseMsg)
# print(respo)
global receivedMsg
global sentMsg
receivedMsg = respo.get("Body")
trans = str.maketrans('', '', string.punctuation)
if str(respo.get("Body")).strip().lower().translate(trans) == "what can you do":
client = Client(account_sid, auth_token)
to_ = respo.get("From")
from_ = respo.get("To")
message = client.messages.create(
from_=from_,
body="I am a bot who is connected to watson services on IBM Cloud! \nTry asking *What are the services you are connected to?*",
media_url=wmlcred.get("windowURL") + "static/images/architecture.png",
to=to_,
)
sentMsg = "I am a bot who is connected to watson services on IBM Cloud! \nTry asking *What are the services you are connected to?*"
return message.sid
if str(respo.get("Body")).strip().lower().translate(trans) == "what are the services you are connected to":
to_ = respo.get("From")
from_ = respo.get("To")
client = Client(account_sid, auth_token)
checkServices(to_, from_, client)
return str("ok")
if respo.get("Body") == "1":
message = "Watson Machine Learning Details"
resp = MessagingResponse()
resp.message(message)
sentMsg = message
x = scanAvailableFiles(app.config["SERVICES"])
for j in x:
if j == "wmlDeployment.json":
with open(app.config["SERVICES"] + j) as temp:
cred = json.loads(temp.read())
model_id = cred["entity"]["asset"]["id"]
model_name = cred["entity"]["name"]
model_status = cred["entity"]["status"]["state"]
if model_status == "ready":
message = (
f"WML Model id: *{model_id}*"
f"\nWML Model Name: *{model_name}*"
f"\nWML Model Status: *{model_status}*"
"\n\nTry asking *I want to know food prices*"
)
else:
message = (
f"Model id: *{model_id}*"
f"\nModel Name: *{model_name}*"
f"\nModel Status: *{model_status}*"
)
resp.message(message)
sentMsg = message
return str(resp)
else:
message = "Service configured, but no model deployed!\nType *Deploy* to deploy a test model"
resp.message(message)
sentMsg = message
return str(resp)
if respo.get("Body") == "2":
message = "Watson Assistant"
resp = MessagingResponse()
resp.message(message)
sentMsg = message
message = "Type *News* for Agriculture News"
resp.message(message)
sentMsg = message
return str(resp)
if respo.get("Body").strip().lower().translate(trans) == "i want to know food prices":
message = "Please enter the details with the below format:\n\n*Predict:<Country>,<Item>*\n\nExample: *Predict:Germany,Apples*"
resp = MessagingResponse()
resp.message(message)
sentMsg = message
return str(resp)
if respo.get("Body")[:7].strip().lower() == "predict":
temp = respo.get("Body").split(":")[1].split(",")
area = temp[0].strip()
item = temp[1].strip()
price = predict_price_wml(area, item)
today = pd.to_datetime("today")
dt_day = today.date().strftime("%A, %d %B %Y")
messageTxt = f"Item: *{item}*\n\nwill cost you approx: *{price}* LCU/tonne\n\nin *{area}*\n\n on *{dt_day}*"
createImagePrediction(area, item, price, dt_day)
client = Client(account_sid, auth_token)
to_ = respo.get("From")
from_ = respo.get("To")
message = client.messages.create(
from_=from_,
body=messageTxt,
media_url=wmlcred.get("windowURL") + "static/images/predicted.png",
to=to_,
)
sentMsg = messageTxt
return message.sid
if respo.get("Body").strip().lower().translate(trans) == "news":
message = get_news()
resp = MessagingResponse()
resp.message(message)
sentMsg = message
return str(resp)
if "google" in respo.get("Body").strip().lower():
query = str(respo.get("Body")).lower().replace("google", "")
query = query.replace(" ", "+")
message = f"https://www.google.com/search?q={query}"
resp = MessagingResponse()
resp.message(message)
sentMsg = message
return str(resp)
if respo.get("MediaUrl0") is not None:
imageURL = respo.get("MediaUrl0")
with open(app.config["CREDENTIALS"] + "wvrCredentials.json") as wmlCreds:
wvrcred = json.loads(wmlCreds.read())
payload = {
"apikey": wvrcred.get("apikey"),
"url": wvrcred.get("url"),
"imageURL": imageURL,
}
r = requests.post(wvrcred.get("cloudfunctionurl"), data=payload)
response = r.json()
messageTxt = "Classified as *{0}*\nwith an accuracy of *{1}*".format(
response.get("class"), response.get("score")
)
# createImageVisual(response.get("class"), response.get("score"))
client = Client(account_sid, auth_token)
to_ = respo.get("From")
from_ = respo.get("To")
message = client.messages.create(
from_=from_,
body=messageTxt,
media_url=wvrcred.get("windowURL") + "static/images/visualclass.png",
to=to_,
)
sentMsg = messageTxt
return message.sid
msg = "The message,\n'_{0}_'\nthat you typed on your phone, went through\nWhatsapp -> Twilio -> Python App hosted on IBM Cloud and returned back to you from\nPython App hosted on IBM Cloud -> Twilio -> Whatsapp.\n\n*How Cool is that!!*\n\n Try asking *What can you do?*".format(
respo.get("Body")
)
resp = MessagingResponse()
resp.message(msg)
sentMsg = msg
return str(resp)
return render_template("index.html")
""" Start the Server """
port = os.getenv("VCAP_APP_PORT", "8080")
if __name__ == "__main__":
app.secret_key = os.urandom(12)
app.run(debug=True, host="0.0.0.0", port=port)
| 35.47421 | 286 | 0.552486 |
from flask import Flask, render_template, request, jsonify
import requests
import os
import json
import math
import string
import pandas as pd
import numpy as np
import scipy as sp
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import train_test_split
from sklearn.compose import TransformedTargetRegressor
from twilio.twiml.messaging_response import MessagingResponse
from ibm_watson_machine_learning import APIClient
from twilio.rest import Client
from PIL import Image, ImageDraw, ImageFont
from news_bot import get_news
app = Flask(__name__)
app.config["SERVICES"] = "static/watsonservices/"
app.config["CREDENTIALS"] = "static/watsoncredentials/"
app.config["DATASET"] = "static/datasets/"
account_sid = ""
auth_token = ""
wml_credentials = {}
space_id = ""
receivedMsg = ""
sentMsg = ""
@app.route("/getWmlCredentials")
def getWmlCredentials():
try:
global wml_credentials, space_id
with open(app.config["CREDENTIALS"] + "wmlCredentials.json") as wmlCreds:
wmlcred = json.loads(wmlCreds.read())
wml_credentials = {"apikey": wmlcred.get("apikey"), "url": wmlcred.get("url")}
space_id = wmlcred.get("space_id")
returnablejson = wml_credentials
returnablejson.update({"status": "Configured"})
return jsonify(returnablejson)
except:
return jsonify({"status": "Not Configured"})
@app.route("/getWatsonCredentials")
def getWatsonCredentials():
try:
x = scanAvailableFiles(app.config["CREDENTIALS"])
returnableObj = {"services": x}
return jsonify(returnableObj)
except:
return jsonify({"services": ["No Service Configured"]})
@app.route("/getTwilioCredentials")
def getTwilioCredentials():
try:
global account_sid
global auth_token
with open("twiliocredentials.json") as creds:
twiliocred = json.loads(creds.read())
account_sid = twiliocred.get("account_sid")
auth_token = twiliocred.get("auth_token")
return jsonify({"status": "Configured"})
except:
return jsonify({"status": "Not Configured"})
@app.route("/getDeploymentState")
def getDeploymentState():
try:
with open(app.config["SERVICES"] + "wmlDeployment.json") as temp:
cred = json.loads(temp.read())
model_id = cred["entity"]["asset"]["id"]
model_name = cred["entity"]["name"]
model_status = cred["entity"]["status"]["state"]
return jsonify(
{
"status": model_status,
"modelId": model_id,
"modelName": model_name,
}
)
except Exception:
return jsonify({"status": "Model not Deployed"})
@app.route("/storeTwilioCredentials", methods=["GET", "POST"])
def storeTwilioCredentials():
receivedPayload = json.loads(request.form["Credentials"])
data = {
"account_sid": receivedPayload.get("account_sid"),
"auth_token": receivedPayload.get("auth_token"),
}
with open("twiliocredentials.json", "w") as fs:
json.dump(data, fs, indent=2)
return jsonify({"status": "Configured"})
@app.route("/storeWatsonCredentials", methods=["GET", "POST"])
def storeWatsonCredentials():
receivedPayload = json.loads(request.form["Credentials"])
if receivedPayload.get("type") == "wml":
data = receivedPayload
data.pop("type")
with open(app.config["CREDENTIALS"] + "wmlCredentials.json", "w") as fs:
json.dump(data, fs, indent=2)
return jsonify({"status": "Configured"})
data = json.loads(receivedPayload.get("apikey"))
data.update({"cloudfunctionurl": receivedPayload.get("cloudfunctionurl") + ".json"})
data.update({"windowURL": receivedPayload.get("windowURL")})
with open(
app.config["CREDENTIALS"] + receivedPayload.get("type") + "Credentials.json",
"w",
) as fs:
json.dump(data, fs, indent=2)
return jsonify({"status": "Configured"})
@app.route("/deployWMLModel")
def deployWMLModel():
df = pd.read_csv(app.config["DATASET"] + "Data.csv")
columns_to_use = ["Area", "Item", "Months", "Value", "Year"]
data = df[columns_to_use]
data['Item'] = data['Item'].str.replace('Rice, paddy', 'Rice')
data['Area'] = data.Area.str.lower()
data['Item'] = data.Item.str.lower()
data['Months'] = data.Months.str.lower()
data.query('Value > 0.0', inplace=True)
X = data.drop(columns=["Value"], axis=1)
y = data["Value"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.05, random_state=42
)
numerical_cols = X_train.select_dtypes(include=['int64', 'float64']).columns
categorical_cols = X_train.select_dtypes(include=["object", "bool"]).columns
cat_pipeline = Pipeline([("cat", OneHotEncoder()), ])
num_pipeline = Pipeline([("num", StandardScaler()), ])
transformer = ColumnTransformer(
[
("num_pipe", num_pipeline, numerical_cols),
("cat_pipe", cat_pipeline, categorical_cols),
]
)
estimator = DecisionTreeRegressor(max_depth=150, random_state=42)
tt_model = TransformedTargetRegressor(regressor=estimator,
func=np.log10,
inverse_func=sp.special.exp10
)
model = Pipeline([("preparation", transformer),
("model", tt_model),
])
model.fit(X_train, y_train)
print("Model Built Successfully")
getWmlCredentials()
client = APIClient(wml_credentials)
client.set.default_space(space_id)
sofware_spec_uid = client.software_specifications.get_id_by_name("default_py3.7_opence")
metadata = {
client.repository.ModelMetaNames.NAME: "Food Data Price Prediction",
client.repository.ModelMetaNames.TYPE: "scikit-learn_0.23",
client.repository.ModelMetaNames.SOFTWARE_SPEC_UID: sofware_spec_uid,
}
published_model = client.repository.store_model(model, meta_props=metadata)
published_model_uid = client.repository.get_model_uid(published_model)
deploy_meta = {
client.deployments.ConfigurationMetaNames.NAME: "Deployment of Food Data Price Prediction",
client.deployments.ConfigurationMetaNames.ONLINE: {},
}
created_deployment = client.deployments.create(
published_model_uid, meta_props=deploy_meta
)
with open(app.config["SERVICES"] + "wmlDeployment.json", "w") as fp:
json.dump(created_deployment, fp, indent=2)
print(json.dumps(created_deployment, indent=2))
print("Model Successfully Deployed..")
with open(app.config["SERVICES"] + "wmlDeployment.json") as temp:
cred = json.loads(temp.read())
model_id = cred["entity"]["asset"]["id"]
return jsonify({"status": "Deployed, Model ID: " + model_id})
def predict_price_wml(area, item):
getWmlCredentials()
cols = ['Area', 'Item', 'Months', 'Year']
client = APIClient(wml_credentials)
client.set.default_space(space_id)
with open(app.config["SERVICES"] + 'wmlDeployment.json', 'r') as wmlDeployment:
cred = json.loads(wmlDeployment.read())
x = [area.lower(), item.lower()]
today = pd.to_datetime("today")
x.append(today.month_name().lower())
x.append(today.year)
x = np.array([x], dtype=object)
z = pd.DataFrame(x, columns=cols)
did = client.deployments.get_uid(cred)
job_payload = {
client.deployments.ScoringMetaNames.INPUT_DATA: [{'values': z}]
}
scoring_response = client.deployments.score(did, job_payload)
value = scoring_response['predictions'][0]['values'][0][0]
return math.ceil(value)
def createImagePrediction(area, item, price, dt_day):
image = Image.open("static/images/DarkOcean.png")
draw = ImageDraw.Draw(image)
font = ImageFont.truetype("static/fonts/Roboto.ttf", size=55)
(x, y) = (115, 300)
message = f"Producer Price for {item}"
color = "rgb(255, 255, 255)"
draw.text((x, y), message, fill=color, font=font)
(x, y) = (115, 400)
message = "in "
color = "rgb(255, 255, 255)"
draw.text((x, y), message, fill=color, font=font)
(x, y) = (165, 400)
message = f" {area} "
color = "rgb(255,165,0)"
draw.text((x, y), message, fill=color, font=font)
(x, y) = (115, 500)
message = f"on "
color = "rgb(255, 255, 255)"
draw.text((x, y), message, fill=color, font=font)
(x, y) = (165, 500)
message = f" {dt_day} "
color = "rgb(255,165,0)"
draw.text((x, y), message, fill=color, font=font)
(x, y) = (115, 600)
message = "is "
color = "rgb(255, 255, 255)"
draw.text((x, y), message, fill=color, font=font)
(x, y) = (165, 600)
name = f"~{price} LCU/tonne"
color = "rgb(0, 255, 0)"
draw.text((x, y), name, fill=color, font=font)
image.save("static/images/predicted.png", optimize=True, quality=20)
def checkServices(to_, from_, client):
try:
files = scanAvailableFiles(app.config["CREDENTIALS"])
idx = 0
inx = 1
for i in files:
if i == "wmlCredentials.json":
x = scanAvailableFiles(app.config["SERVICES"])
print(x)
for j in x:
if j == "wmlDeployment.json":
with open(app.config["SERVICES"] + j) as temp:
cred = json.loads(temp.read())
files[idx] = "{0}. Watson Machine Learning -> *{1}*".format(
inx, cred["entity"]["status"]["state"]
)
inx += 1
else:
files[
idx
] = "{0}. Watson Machine Learning -> *No Model Deployed*".format(
inx
)
inx += 1
if i == "waCredentials.json":
x = scanAvailableFiles(app.config["SERVICES"])
print(x)
for j in x:
if j == "waDeployment.json":
with open(app.config["SERVICES"] + j) as temp:
cred = json.loads(temp.read())
files[idx] = "{0}. Watson Assistant -> *{1}*".format(
inx, cred["entity"]["status"]["state"]
)
inx += 1
else:
files[idx] = "{0}. Watson Assistant -> *No Skills*".format(inx)
inx += 1
if i == "wnluCredentials.json":
x = scanAvailableFiles(app.config["SERVICES"])
print(x)
for j in x:
if j == "wmlDeployment.json":
with open(app.config["SERVICES"] + j) as temp:
cred = json.loads(temp.read())
files[
idx
] = "{0}. Watson Natural Language Understanding -> *{1}*".format(
inx, cred["entity"]["status"]["state"]
)
inx += 1
else:
files[
idx
] = "{0}. Watson Natural Language Understanding -> *No Custom Model Deployed*".format(
inx
)
inx += 1
if i == "wvrCredentials.json":
x = scanAvailableFiles(app.config["SERVICES"])
print(x)
for j in x:
if j == "wvrDeployment.json":
with open(app.config["SERVICES"] + j) as temp:
cred = json.loads(temp.read())
files[idx] = "{0}. Watson Visual Recognition -> *{1}*".format(
inx, cred["entity"]["status"]["state"]
)
inx += 1
else:
files[
idx
] = "{0}. Watson Visual Recognition -> *No Custom Model Deployed*".format(
inx
)
inx += 1
idx += 1
files.append(f"{idx+1}. Watson Assistant -> *For Agriculture News*")
services = "\n".join(files)
msg = (
f"I found the following services associated to me: \n\n{services}"
"\n\nEnter the number to know more."
)
message = client.messages.create(from_=from_, body=msg, to=to_)
global sentMsg
sentMsg = "I am a bot who is connected to watson services on IBM Cloud! \nTry asking *What are the services you are connected to?*"
return message.sid
except Exception as e:
files = "no service associated, please configure the application on IBM Cloud"
print(e)
message = client.messages.create(from_=from_, body=files, to=to_)
return message.sid
def scanAvailableFiles(path):
availableFiles = os.listdir(path)
if '.gitkeep' in availableFiles:
availableFiles.pop(availableFiles.index('.gitkeep'))
return availableFiles
@app.route("/getMessages")
def getMessages():
global receivedMsg
global sentMsg
return jsonify({"sentMsg": sentMsg, "receivedMsg": receivedMsg})
@app.route("/", methods=["GET", "POST"])
def index():
if request.method == "POST":
getTwilioCredentials()
with open(app.config["CREDENTIALS"] + "wmlCredentials.json") as wmlCreds:
wmlcred = json.loads(wmlCreds.read())
ResponseMsg = json.dumps(request.form.to_dict(), indent=2)
respo = json.loads(ResponseMsg)
global receivedMsg
global sentMsg
receivedMsg = respo.get("Body")
trans = str.maketrans('', '', string.punctuation)
if str(respo.get("Body")).strip().lower().translate(trans) == "what can you do":
client = Client(account_sid, auth_token)
to_ = respo.get("From")
from_ = respo.get("To")
message = client.messages.create(
from_=from_,
body="I am a bot who is connected to watson services on IBM Cloud! \nTry asking *What are the services you are connected to?*",
media_url=wmlcred.get("windowURL") + "static/images/architecture.png",
to=to_,
)
sentMsg = "I am a bot who is connected to watson services on IBM Cloud! \nTry asking *What are the services you are connected to?*"
return message.sid
if str(respo.get("Body")).strip().lower().translate(trans) == "what are the services you are connected to":
to_ = respo.get("From")
from_ = respo.get("To")
client = Client(account_sid, auth_token)
checkServices(to_, from_, client)
return str("ok")
if respo.get("Body") == "1":
message = "Watson Machine Learning Details"
resp = MessagingResponse()
resp.message(message)
sentMsg = message
x = scanAvailableFiles(app.config["SERVICES"])
for j in x:
if j == "wmlDeployment.json":
with open(app.config["SERVICES"] + j) as temp:
cred = json.loads(temp.read())
model_id = cred["entity"]["asset"]["id"]
model_name = cred["entity"]["name"]
model_status = cred["entity"]["status"]["state"]
if model_status == "ready":
message = (
f"WML Model id: *{model_id}*"
f"\nWML Model Name: *{model_name}*"
f"\nWML Model Status: *{model_status}*"
"\n\nTry asking *I want to know food prices*"
)
else:
message = (
f"Model id: *{model_id}*"
f"\nModel Name: *{model_name}*"
f"\nModel Status: *{model_status}*"
)
resp.message(message)
sentMsg = message
return str(resp)
else:
message = "Service configured, but no model deployed!\nType *Deploy* to deploy a test model"
resp.message(message)
sentMsg = message
return str(resp)
if respo.get("Body") == "2":
message = "Watson Assistant"
resp = MessagingResponse()
resp.message(message)
sentMsg = message
message = "Type *News* for Agriculture News"
resp.message(message)
sentMsg = message
return str(resp)
if respo.get("Body").strip().lower().translate(trans) == "i want to know food prices":
message = "Please enter the details with the below format:\n\n*Predict:<Country>,<Item>*\n\nExample: *Predict:Germany,Apples*"
resp = MessagingResponse()
resp.message(message)
sentMsg = message
return str(resp)
if respo.get("Body")[:7].strip().lower() == "predict":
temp = respo.get("Body").split(":")[1].split(",")
area = temp[0].strip()
item = temp[1].strip()
price = predict_price_wml(area, item)
today = pd.to_datetime("today")
dt_day = today.date().strftime("%A, %d %B %Y")
messageTxt = f"Item: *{item}*\n\nwill cost you approx: *{price}* LCU/tonne\n\nin *{area}*\n\n on *{dt_day}*"
createImagePrediction(area, item, price, dt_day)
client = Client(account_sid, auth_token)
to_ = respo.get("From")
from_ = respo.get("To")
message = client.messages.create(
from_=from_,
body=messageTxt,
media_url=wmlcred.get("windowURL") + "static/images/predicted.png",
to=to_,
)
sentMsg = messageTxt
return message.sid
if respo.get("Body").strip().lower().translate(trans) == "news":
message = get_news()
resp = MessagingResponse()
resp.message(message)
sentMsg = message
return str(resp)
if "google" in respo.get("Body").strip().lower():
query = str(respo.get("Body")).lower().replace("google", "")
query = query.replace(" ", "+")
message = f"https://www.google.com/search?q={query}"
resp = MessagingResponse()
resp.message(message)
sentMsg = message
return str(resp)
if respo.get("MediaUrl0") is not None:
imageURL = respo.get("MediaUrl0")
with open(app.config["CREDENTIALS"] + "wvrCredentials.json") as wmlCreds:
wvrcred = json.loads(wmlCreds.read())
payload = {
"apikey": wvrcred.get("apikey"),
"url": wvrcred.get("url"),
"imageURL": imageURL,
}
r = requests.post(wvrcred.get("cloudfunctionurl"), data=payload)
response = r.json()
messageTxt = "Classified as *{0}*\nwith an accuracy of *{1}*".format(
response.get("class"), response.get("score")
)
client = Client(account_sid, auth_token)
to_ = respo.get("From")
from_ = respo.get("To")
message = client.messages.create(
from_=from_,
body=messageTxt,
media_url=wvrcred.get("windowURL") + "static/images/visualclass.png",
to=to_,
)
sentMsg = messageTxt
return message.sid
msg = "The message,\n'_{0}_'\nthat you typed on your phone, went through\nWhatsapp -> Twilio -> Python App hosted on IBM Cloud and returned back to you from\nPython App hosted on IBM Cloud -> Twilio -> Whatsapp.\n\n*How Cool is that!!*\n\n Try asking *What can you do?*".format(
respo.get("Body")
)
resp = MessagingResponse()
resp.message(msg)
sentMsg = msg
return str(resp)
return render_template("index.html")
port = os.getenv("VCAP_APP_PORT", "8080")
if __name__ == "__main__":
app.secret_key = os.urandom(12)
app.run(debug=True, host="0.0.0.0", port=port)
| true | true |
f7fba53f0fbb830bd209ab71d4348a1fc352b178 | 2,262 | py | Python | setup.py | itaiw/databricks-cli | fdcb92499da5cda90c4436d5f09cdc697a8f46b9 | [
"Apache-2.0"
] | null | null | null | setup.py | itaiw/databricks-cli | fdcb92499da5cda90c4436d5f09cdc697a8f46b9 | [
"Apache-2.0"
] | null | null | null | setup.py | itaiw/databricks-cli | fdcb92499da5cda90c4436d5f09cdc697a8f46b9 | [
"Apache-2.0"
] | null | null | null | # Databricks CLI
# Copyright 2017 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"), except
# that the use of services to which certain application programming
# interfaces (each, an "API") connect requires that the user first obtain
# a license for the use of the APIs from Databricks, Inc. ("Databricks"),
# by creating an account at www.databricks.com and agreeing to either (a)
# the Community Edition Terms of Service, (b) the Databricks Terms of
# Service, or (c) another written agreement between Licensee and Databricks
# for the use of the APIs.
#
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import imp
import io
import os
from setuptools import setup, find_packages
version = imp.load_source(
'databricks_cli.version', os.path.join('databricks_cli', 'version.py')).version
setup(
name='databricks-cli',
version=version,
packages=find_packages(exclude=['tests', 'tests.*']),
install_requires=[
'click>=6.7',
'requests>=2.17.3',
'tabulate>=0.7.7',
'six>=1.10.0',
'configparser>=0.3.5;python_version < "3.6"',
],
entry_points='''
[console_scripts]
databricks=databricks_cli.cli:cli
dbfs=databricks_cli.dbfs.cli:dbfs_group
''',
zip_safe=False,
author='Andrew Chen',
author_email='andrewchen@databricks.com',
description='A command line interface for Databricks',
long_description=io.open('README.rst', encoding='utf-8').read(),
license='Apache License 2.0',
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
],
keywords='databricks cli',
url='https://github.com/databricks/databricks-cli'
)
| 35.904762 | 83 | 0.697171 |
import imp
import io
import os
from setuptools import setup, find_packages
version = imp.load_source(
'databricks_cli.version', os.path.join('databricks_cli', 'version.py')).version
setup(
name='databricks-cli',
version=version,
packages=find_packages(exclude=['tests', 'tests.*']),
install_requires=[
'click>=6.7',
'requests>=2.17.3',
'tabulate>=0.7.7',
'six>=1.10.0',
'configparser>=0.3.5;python_version < "3.6"',
],
entry_points='''
[console_scripts]
databricks=databricks_cli.cli:cli
dbfs=databricks_cli.dbfs.cli:dbfs_group
''',
zip_safe=False,
author='Andrew Chen',
author_email='andrewchen@databricks.com',
description='A command line interface for Databricks',
long_description=io.open('README.rst', encoding='utf-8').read(),
license='Apache License 2.0',
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
],
keywords='databricks cli',
url='https://github.com/databricks/databricks-cli'
)
| true | true |
f7fba61f2b5e0c950aed03670c9958ba8e0453be | 9,247 | py | Python | wordle/solve.py | ZLLentz/wordle_calc | 02f654e01793d9d5303638dd84e2377c55436fa5 | [
"0BSD"
] | 1 | 2022-02-02T23:27:41.000Z | 2022-02-02T23:27:41.000Z | wordle/solve.py | ZLLentz/wordle_calc | 02f654e01793d9d5303638dd84e2377c55436fa5 | [
"0BSD"
] | null | null | null | wordle/solve.py | ZLLentz/wordle_calc | 02f654e01793d9d5303638dd84e2377c55436fa5 | [
"0BSD"
] | null | null | null | import logging
import multiprocessing
import time
from functools import cache, partial
from typing import Optional
from .game import SingleGame, WordEval
from .words import WordList
logger = logging.getLogger(__name__)
logger.spam = partial(logger.log, 5)
class Strategy:
hardcoded = ()
def __init__(
self,
valid_guesses: WordList = WordList.ALL,
valid_answers: WordList = WordList.ALL,
strategy_answers: WordList = WordList.ALL,
):
self.valid_guesses = valid_guesses
self.valid_answers = valid_answers
self.strategy_answers = strategy_answers
def guess(self) -> str:
raise NotImplementedError
def default_guess(self) -> Optional[str]:
if len(self.remaining_words) == 0:
return 'fails'
if len(self.remaining_words) <= 2:
return self.remaining_words[0]
try:
return self.hardcoded[len(self.game_instance.clues)]
except IndexError:
return
def simulate_all_games(self) -> dict[int: list[str]]:
start = time.monotonic()
all_words = self.strategy_answers.get()
game_count = len(all_words)
logger.info(
"Simulating all %s games.",
game_count,
)
results = {1: [], 2: [], 3: [], 4: [], 5: [], 6: [], 7: []}
count = 0
for num, word in enumerate(all_words):
results[self.simulate_game(word)].append(word)
count += 1
logger.info(
'Simulated %d/%d games, %.1f min elapsed',
count,
game_count,
(time.monotonic() - start) / 60,
)
logger.info("Our score is:")
scores = {val: len(words) for val, words in results.items()}
logger.info(scores)
win_count = game_count - scores[7]
logger.info(
"We won %d out of %d games for a win rate of %.2f%%",
win_count,
game_count,
win_count / game_count * 100,
)
logger.info(
"In our wins, we had an average score of %.2f guesses per game.",
(
sum(key * value for key, value in scores.items() if key != 7)
/ win_count
)
)
logger.info("We got these words in 1 guess:")
logger.info(results[1])
logger.info("We got these words in 2 guesses:")
logger.info(results[2])
logger.info("We got these words in 6 guesses, nearly failed!")
logger.info(results[6])
logger.info("We failed to solve these words:")
logger.info(results[7])
def simulate_game(self, answer: str) -> int:
self.initialize_game(answer)
while self.game_instance.running:
self.simulate_turn()
if self.game_instance.victory:
return len(self.game_instance.clues)
else:
return 7
def initialize_game(self, answer: str):
self.remaining_words = self.strategy_answers.get()
self.game_instance = SingleGame.begin(
answer=answer,
valid_guesses=self.valid_guesses,
valid_answers=self.valid_answers,
)
def simulate_turn(self):
guess = self.default_guess()
if guess is None:
guess = self.guess()
self.game_instance.make_guess(guess)
logger.debug('\n%s', self.game_instance)
self.remaining_words = prune_words(self.remaining_words, self.game_instance.clues)
def prune_words(words: list[str], clues: list[WordEval]) -> list[str]:
new_words = []
for word in words:
if all(clue.allows(word) for clue in clues):
new_words.append(word)
logger.debug('%d words remaining: %s', len(new_words), new_words)
return new_words
class InOrder(Strategy):
"""
Guess the valid words in order.
"""
def guess(self) -> str:
try:
return self.remaining_words[0]
except IndexError:
return 'fails'
class BruteForce(Strategy):
"""
Pick the word with the highest avg possible words removed
for all the possible answers.
"""
hardcoded_map = {
WordList.ALL: ('later',),
WordList.SGB: ('tares',),
WordList.CHEAT: ('roate',),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.hardcoded:
self.hardcoded = self.hardcoded_map[self.strategy_answers]
def guess(self) -> str:
if len(self.remaining_words) > 100:
return self.brute_force(
tuple(self.remaining_words),
tuple(self.get_likely_guesses()),
)
return self.brute_force(
tuple(self.remaining_words),
tuple(self.strategy_answers.get()),
)
def get_likely_guesses(self) -> list[str]:
guessed = ''.join(clue.word for clue in self.game_instance.clues)
likely = [
word for word in self.strategy_answers.get()
if not any(char in guessed for char in word)
]
logger.debug('%d likely guesses: %s', len(likely), likely)
return likely
@staticmethod
@cache
def brute_force(
words: tuple[str],
all_words: tuple[str],
) -> str:
cpus = multiprocessing.cpu_count() - 1
with multiprocessing.Pool(cpus) as pool:
scores = pool.imap_unordered(
partial(
BruteForce._check_proc,
remaining_ok=words,
),
NoisyList(all_words),
)
best_word = all_words[0]
best_score = 0
for word, score in scores:
if score > best_score:
logger.debug(
"New record! Score for %s is %s!",
word,
score,
)
best_word = word
best_score = score
return best_word
@staticmethod
def _check_proc(word: str, remaining_ok: list[str]) -> tuple[str, int]:
return word, BruteForce.check_one(
word,
remaining_ok,
)
@staticmethod
def check_one(word: str, remaining_ok: list[str]) -> int:
word_score = 0
for possibility in remaining_ok:
# If the answer was possibility, how many words are eliminated
eval = WordEval.from_guess(word, possibility)
local_score = 0
for other in remaining_ok:
if not eval.allows(other):
local_score += 1
# If all the words are eliminated, that is bad, not good!
if local_score < len(remaining_ok):
word_score += local_score
return word_score
@staticmethod
def precompute(
guesses: WordList = WordList.ALL,
answers: WordList = WordList.ALL,
) -> str:
logger.info('Running precompute...')
ans = BruteForce.brute_force(
tuple(answers.get()),
tuple(guesses.get()),
)
logger.info('Best word is %s', ans)
return ans
@staticmethod
def precompute_cheat():
BruteForce.precompute(answers=WordList.CHEAT)
@staticmethod
def check_one_for_profile(
guesses: WordList = WordList.ALL,
):
start_time = time.monotonic()
while time.monotonic() - start_time < 10:
BruteForce.check_one('check', guesses.get())
class NoisyList:
def __init__(self, words):
self.remaining_words = words
self.reinit()
def reinit(self):
self._iter_words = iter(self.remaining_words)
self.start = time.monotonic()
self.count = 0
def __iter__(self):
self.reinit()
return self
def __next__(self):
word = next(self._iter_words)
self.count += 1
logger.spam(
'Doing %s, (%d / %d), %.2f mins elapsed',
word,
self.count,
len(self.remaining_words),
(time.monotonic() - self.start) / 60,
)
return word
class SudokuChannel(BruteForce):
"""
Guess the words the Sudoku guys recommend, then brute force it
"""
hardcoded = ('siren', 'octal', 'dumpy')
class BruteForceYolo(BruteForce):
"""
Always guess something that could potentially be an answer.
No further pruning of the guess tree.
"""
hardcoded_map = BruteForce.hardcoded_map.copy()
hardcoded_map[WordList.CHEAT] = ('raise',)
def guess(self) -> str:
yolo_word = self.brute_force(
tuple(self.remaining_words),
tuple(self.remaining_words),
)
yolo_score = self.check_one(yolo_word, self.remaining_words)
if yolo_score / len(self.remaining_words) > len(self.remaining_words) / 2:
return yolo_word
else:
logger.debug('Yolo guess was too risky')
return super().guess()
@staticmethod
def precompute_yolo_cheat():
BruteForce.precompute(
guesses=WordList.CHEAT,
answers=WordList.CHEAT,
) | 30.72093 | 90 | 0.566562 | import logging
import multiprocessing
import time
from functools import cache, partial
from typing import Optional
from .game import SingleGame, WordEval
from .words import WordList
logger = logging.getLogger(__name__)
logger.spam = partial(logger.log, 5)
class Strategy:
hardcoded = ()
def __init__(
self,
valid_guesses: WordList = WordList.ALL,
valid_answers: WordList = WordList.ALL,
strategy_answers: WordList = WordList.ALL,
):
self.valid_guesses = valid_guesses
self.valid_answers = valid_answers
self.strategy_answers = strategy_answers
def guess(self) -> str:
raise NotImplementedError
def default_guess(self) -> Optional[str]:
if len(self.remaining_words) == 0:
return 'fails'
if len(self.remaining_words) <= 2:
return self.remaining_words[0]
try:
return self.hardcoded[len(self.game_instance.clues)]
except IndexError:
return
def simulate_all_games(self) -> dict[int: list[str]]:
start = time.monotonic()
all_words = self.strategy_answers.get()
game_count = len(all_words)
logger.info(
"Simulating all %s games.",
game_count,
)
results = {1: [], 2: [], 3: [], 4: [], 5: [], 6: [], 7: []}
count = 0
for num, word in enumerate(all_words):
results[self.simulate_game(word)].append(word)
count += 1
logger.info(
'Simulated %d/%d games, %.1f min elapsed',
count,
game_count,
(time.monotonic() - start) / 60,
)
logger.info("Our score is:")
scores = {val: len(words) for val, words in results.items()}
logger.info(scores)
win_count = game_count - scores[7]
logger.info(
"We won %d out of %d games for a win rate of %.2f%%",
win_count,
game_count,
win_count / game_count * 100,
)
logger.info(
"In our wins, we had an average score of %.2f guesses per game.",
(
sum(key * value for key, value in scores.items() if key != 7)
/ win_count
)
)
logger.info("We got these words in 1 guess:")
logger.info(results[1])
logger.info("We got these words in 2 guesses:")
logger.info(results[2])
logger.info("We got these words in 6 guesses, nearly failed!")
logger.info(results[6])
logger.info("We failed to solve these words:")
logger.info(results[7])
def simulate_game(self, answer: str) -> int:
self.initialize_game(answer)
while self.game_instance.running:
self.simulate_turn()
if self.game_instance.victory:
return len(self.game_instance.clues)
else:
return 7
def initialize_game(self, answer: str):
self.remaining_words = self.strategy_answers.get()
self.game_instance = SingleGame.begin(
answer=answer,
valid_guesses=self.valid_guesses,
valid_answers=self.valid_answers,
)
def simulate_turn(self):
guess = self.default_guess()
if guess is None:
guess = self.guess()
self.game_instance.make_guess(guess)
logger.debug('\n%s', self.game_instance)
self.remaining_words = prune_words(self.remaining_words, self.game_instance.clues)
def prune_words(words: list[str], clues: list[WordEval]) -> list[str]:
new_words = []
for word in words:
if all(clue.allows(word) for clue in clues):
new_words.append(word)
logger.debug('%d words remaining: %s', len(new_words), new_words)
return new_words
class InOrder(Strategy):
def guess(self) -> str:
try:
return self.remaining_words[0]
except IndexError:
return 'fails'
class BruteForce(Strategy):
hardcoded_map = {
WordList.ALL: ('later',),
WordList.SGB: ('tares',),
WordList.CHEAT: ('roate',),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.hardcoded:
self.hardcoded = self.hardcoded_map[self.strategy_answers]
def guess(self) -> str:
if len(self.remaining_words) > 100:
return self.brute_force(
tuple(self.remaining_words),
tuple(self.get_likely_guesses()),
)
return self.brute_force(
tuple(self.remaining_words),
tuple(self.strategy_answers.get()),
)
def get_likely_guesses(self) -> list[str]:
guessed = ''.join(clue.word for clue in self.game_instance.clues)
likely = [
word for word in self.strategy_answers.get()
if not any(char in guessed for char in word)
]
logger.debug('%d likely guesses: %s', len(likely), likely)
return likely
@staticmethod
@cache
def brute_force(
words: tuple[str],
all_words: tuple[str],
) -> str:
cpus = multiprocessing.cpu_count() - 1
with multiprocessing.Pool(cpus) as pool:
scores = pool.imap_unordered(
partial(
BruteForce._check_proc,
remaining_ok=words,
),
NoisyList(all_words),
)
best_word = all_words[0]
best_score = 0
for word, score in scores:
if score > best_score:
logger.debug(
"New record! Score for %s is %s!",
word,
score,
)
best_word = word
best_score = score
return best_word
@staticmethod
def _check_proc(word: str, remaining_ok: list[str]) -> tuple[str, int]:
return word, BruteForce.check_one(
word,
remaining_ok,
)
@staticmethod
def check_one(word: str, remaining_ok: list[str]) -> int:
word_score = 0
for possibility in remaining_ok:
eval = WordEval.from_guess(word, possibility)
local_score = 0
for other in remaining_ok:
if not eval.allows(other):
local_score += 1
if local_score < len(remaining_ok):
word_score += local_score
return word_score
@staticmethod
def precompute(
guesses: WordList = WordList.ALL,
answers: WordList = WordList.ALL,
) -> str:
logger.info('Running precompute...')
ans = BruteForce.brute_force(
tuple(answers.get()),
tuple(guesses.get()),
)
logger.info('Best word is %s', ans)
return ans
@staticmethod
def precompute_cheat():
BruteForce.precompute(answers=WordList.CHEAT)
@staticmethod
def check_one_for_profile(
guesses: WordList = WordList.ALL,
):
start_time = time.monotonic()
while time.monotonic() - start_time < 10:
BruteForce.check_one('check', guesses.get())
class NoisyList:
def __init__(self, words):
self.remaining_words = words
self.reinit()
def reinit(self):
self._iter_words = iter(self.remaining_words)
self.start = time.monotonic()
self.count = 0
def __iter__(self):
self.reinit()
return self
def __next__(self):
word = next(self._iter_words)
self.count += 1
logger.spam(
'Doing %s, (%d / %d), %.2f mins elapsed',
word,
self.count,
len(self.remaining_words),
(time.monotonic() - self.start) / 60,
)
return word
class SudokuChannel(BruteForce):
hardcoded = ('siren', 'octal', 'dumpy')
class BruteForceYolo(BruteForce):
hardcoded_map = BruteForce.hardcoded_map.copy()
hardcoded_map[WordList.CHEAT] = ('raise',)
def guess(self) -> str:
yolo_word = self.brute_force(
tuple(self.remaining_words),
tuple(self.remaining_words),
)
yolo_score = self.check_one(yolo_word, self.remaining_words)
if yolo_score / len(self.remaining_words) > len(self.remaining_words) / 2:
return yolo_word
else:
logger.debug('Yolo guess was too risky')
return super().guess()
@staticmethod
def precompute_yolo_cheat():
BruteForce.precompute(
guesses=WordList.CHEAT,
answers=WordList.CHEAT,
) | true | true |
f7fba643daf708683640e5dee531a0e8476b3cf5 | 4,897 | py | Python | molo/globalsite/tests/test_views.py | praekeltfoundation/molo.globalsite | 225d68de81b566aa8511e0326cf09b15fe7530c6 | [
"BSD-2-Clause"
] | null | null | null | molo/globalsite/tests/test_views.py | praekeltfoundation/molo.globalsite | 225d68de81b566aa8511e0326cf09b15fe7530c6 | [
"BSD-2-Clause"
] | 7 | 2018-05-10T14:36:41.000Z | 2018-10-18T08:27:51.000Z | molo/globalsite/tests/test_views.py | praekeltfoundation/molo.globalsite | 225d68de81b566aa8511e0326cf09b15fe7530c6 | [
"BSD-2-Clause"
] | null | null | null | import pytest
from django.test import TestCase, Client
from django.http.request import HttpRequest
from django.core.urlresolvers import reverse
from wagtail.wagtailcore.models import Site
from molo.core.tests.base import MoloTestCaseMixin
from molo.globalsite.models import CountrySite, GlobalSiteSettings, Region
from molo.globalsite import geo
@pytest.mark.django_db
class TestGlobalSiteViews(TestCase, MoloTestCaseMixin):
def setUp(self):
self.request = HttpRequest()
self.mk_main()
self.mk_main2()
africa = Region.objects.create(name='Africa')
asia = Region.objects.create(name='Asia')
CountrySite.objects.create(
name='South Africa', code='za',
site_url='http://za.site.org', region=africa)
CountrySite.objects.create(
name='Iran', code='IR',
site_url='http://ir.site.org', region=asia)
default_site = Site.objects.get(is_default_site=True)
self.setting = GlobalSiteSettings.objects.create(site=default_site)
self.setting.is_globalsite = True
self.setting.description = 'Welcome To Global Site'
self.setting.save()
def test_country_sites(self):
country = CountrySite.objects.all()
self.assertEquals(country.count(), 2)
self.assertEquals(country[0].code, 'ZA')
self.assertEquals(country[0].name, 'South Africa')
self.assertEquals(country[1].code, 'IR')
self.assertEquals(country[1].name, 'Iran')
def test_global_site_is_activated(self):
response = self.client.get('/')
self.assertRedirects(
response, reverse('molo.globalsite:country_selection'))
self.setting.is_globalsite = False
self.setting.save()
response = self.client.get('/')
self.assertEquals(response.status_code, 200)
def test_country_listing(self):
response = self.client.get('/', follow=True)
self.assertContains(response, 'Welcome To Global Site')
self.assertContains(response, 'Africa')
self.assertContains(response, 'South Africa')
self.assertContains(response, 'Asia')
self.assertContains(response, 'Iran')
def test_country_redirect(self):
response = self.client.get(
reverse('molo.globalsite:set_country', args=('ZA',)))
self.assertEquals(response.url, 'http://za.site.org')
def test_auto_redirect(self):
self.client.get(
reverse('molo.globalsite:set_country', args=('ZA',)))
response = self.client.get('/')
self.assertEquals(response.status_code, 200)
self.client.get(
reverse('molo.globalsite:set_country', args=('ZA',)))
self.setting.autoredirect = True
self.setting.save()
response = self.client.get('/')
self.assertEquals(response.status_code, 302)
def test_changing_country(self):
with self.settings(GLOBAL_SITE_URL=self.site.root_url):
client = Client(HTTP_HOST=self.site2.hostname)
url = self.site2.root_url + '/globalsite/changecountry/'
response = client.get(url)
self.assertEquals(
response.url,
'http://main-1.localhost:8000/globalsite/countries/')
response = client.get(url, follow=True)
self.assertContains(response, 'South Africa')
def test_settings_globalsite_ignore_path(self):
excl = ['/search/']
response = self.client.get(excl[0])
self.assertEquals(response.status_code, 302)
with self.settings(GLOBAL_SITE_IGNORE_PATH=excl):
response = self.client.get(excl[0])
self.assertEquals(response.status_code, 200)
def test_country_detection_using_ip(self):
self.request.META['HTTP_X_FORWARDED_FOR'] = '41.31.255.255'
self.assertEqual(geo.get_country_code(
self.request), 'ZA')
self.assertEqual(geo.get_country_site(
self.request), 'http://za.site.org')
self.request.META['HTTP_X_FORWARDED_FOR'] = '146.185.25.250'
self.assertEqual(geo.get_country_code(
self.request), 'GB')
self.assertEqual(geo.get_country_site(
self.request), None)
self.request.META['HTTP_X_FORWARDED_FOR'] = 'http://127.0.0.1'
self.assertEqual(geo.get_country_code(
self.request), None)
self.assertEqual(geo.get_country_site(
self.request), None)
def test_geolocation_using_ip(self):
client = Client(HTTP_X_FORWARDED_FOR='41.31.255.255')
response = client.get('/')
self.assertRedirects(
response, reverse('molo.globalsite:country_selection'))
self.setting.geolocation = True
self.setting.save()
response = client.get('/')
self.assertEquals(response.url, 'http://za.site.org')
| 39.176 | 75 | 0.646927 | import pytest
from django.test import TestCase, Client
from django.http.request import HttpRequest
from django.core.urlresolvers import reverse
from wagtail.wagtailcore.models import Site
from molo.core.tests.base import MoloTestCaseMixin
from molo.globalsite.models import CountrySite, GlobalSiteSettings, Region
from molo.globalsite import geo
@pytest.mark.django_db
class TestGlobalSiteViews(TestCase, MoloTestCaseMixin):
def setUp(self):
self.request = HttpRequest()
self.mk_main()
self.mk_main2()
africa = Region.objects.create(name='Africa')
asia = Region.objects.create(name='Asia')
CountrySite.objects.create(
name='South Africa', code='za',
site_url='http://za.site.org', region=africa)
CountrySite.objects.create(
name='Iran', code='IR',
site_url='http://ir.site.org', region=asia)
default_site = Site.objects.get(is_default_site=True)
self.setting = GlobalSiteSettings.objects.create(site=default_site)
self.setting.is_globalsite = True
self.setting.description = 'Welcome To Global Site'
self.setting.save()
def test_country_sites(self):
country = CountrySite.objects.all()
self.assertEquals(country.count(), 2)
self.assertEquals(country[0].code, 'ZA')
self.assertEquals(country[0].name, 'South Africa')
self.assertEquals(country[1].code, 'IR')
self.assertEquals(country[1].name, 'Iran')
def test_global_site_is_activated(self):
response = self.client.get('/')
self.assertRedirects(
response, reverse('molo.globalsite:country_selection'))
self.setting.is_globalsite = False
self.setting.save()
response = self.client.get('/')
self.assertEquals(response.status_code, 200)
def test_country_listing(self):
response = self.client.get('/', follow=True)
self.assertContains(response, 'Welcome To Global Site')
self.assertContains(response, 'Africa')
self.assertContains(response, 'South Africa')
self.assertContains(response, 'Asia')
self.assertContains(response, 'Iran')
def test_country_redirect(self):
response = self.client.get(
reverse('molo.globalsite:set_country', args=('ZA',)))
self.assertEquals(response.url, 'http://za.site.org')
def test_auto_redirect(self):
self.client.get(
reverse('molo.globalsite:set_country', args=('ZA',)))
response = self.client.get('/')
self.assertEquals(response.status_code, 200)
self.client.get(
reverse('molo.globalsite:set_country', args=('ZA',)))
self.setting.autoredirect = True
self.setting.save()
response = self.client.get('/')
self.assertEquals(response.status_code, 302)
def test_changing_country(self):
with self.settings(GLOBAL_SITE_URL=self.site.root_url):
client = Client(HTTP_HOST=self.site2.hostname)
url = self.site2.root_url + '/globalsite/changecountry/'
response = client.get(url)
self.assertEquals(
response.url,
'http://main-1.localhost:8000/globalsite/countries/')
response = client.get(url, follow=True)
self.assertContains(response, 'South Africa')
def test_settings_globalsite_ignore_path(self):
excl = ['/search/']
response = self.client.get(excl[0])
self.assertEquals(response.status_code, 302)
with self.settings(GLOBAL_SITE_IGNORE_PATH=excl):
response = self.client.get(excl[0])
self.assertEquals(response.status_code, 200)
def test_country_detection_using_ip(self):
self.request.META['HTTP_X_FORWARDED_FOR'] = '41.31.255.255'
self.assertEqual(geo.get_country_code(
self.request), 'ZA')
self.assertEqual(geo.get_country_site(
self.request), 'http://za.site.org')
self.request.META['HTTP_X_FORWARDED_FOR'] = '146.185.25.250'
self.assertEqual(geo.get_country_code(
self.request), 'GB')
self.assertEqual(geo.get_country_site(
self.request), None)
self.request.META['HTTP_X_FORWARDED_FOR'] = 'http://127.0.0.1'
self.assertEqual(geo.get_country_code(
self.request), None)
self.assertEqual(geo.get_country_site(
self.request), None)
def test_geolocation_using_ip(self):
client = Client(HTTP_X_FORWARDED_FOR='41.31.255.255')
response = client.get('/')
self.assertRedirects(
response, reverse('molo.globalsite:country_selection'))
self.setting.geolocation = True
self.setting.save()
response = client.get('/')
self.assertEquals(response.url, 'http://za.site.org')
| true | true |
f7fba69c4135fc23e640d964279a52b9682f3836 | 1,110 | py | Python | 2-resources/_PYTHON/code-examples-master/aws/python/dynamodb/create_and_tag_table.py | eengineergz/Lambda | 1fe511f7ef550aed998b75c18a432abf6ab41c5f | [
"MIT"
] | null | null | null | 2-resources/_PYTHON/code-examples-master/aws/python/dynamodb/create_and_tag_table.py | eengineergz/Lambda | 1fe511f7ef550aed998b75c18a432abf6ab41c5f | [
"MIT"
] | null | null | null | 2-resources/_PYTHON/code-examples-master/aws/python/dynamodb/create_and_tag_table.py | eengineergz/Lambda | 1fe511f7ef550aed998b75c18a432abf6ab41c5f | [
"MIT"
] | 1 | 2021-11-05T07:48:26.000Z | 2021-11-05T07:48:26.000Z | # Creates a DynamoDB Table: test_20181030 and Tags it
import boto3
import time
session = boto3.Session(region_name='eu-west-1', profile_name='test')
resource = session.resource('dynamodb')
client = session.client('dynamodb')
def create_table():
table_name = "test_{0}".format(time.strftime("%Y%m%d"))
response = resource.create_table(
TableName=table_name,
KeySchema=[{
'AttributeName': 'uuid',
'KeyType': 'HASH'
}],
AttributeDefinitions=[{
'AttributeName': 'uuid',
'AttributeType': 'S'
}],
ProvisionedThroughput={
'ReadCapacityUnits': 1,
'WriteCapacityUnits': 1
}
)
resource.Table(table_name).wait_until_exists()
arn = client.describe_table(TableName=table_name)['Table']['TableArn']
client.tag_resource(
ResourceArn=arn,
Tags=[
{'Key': 'Name','Value': 'yes'},
{'Key': 'Environment','Value': 'yes'},
{'Key': 'Owner','Value': 'yes'}
]
)
return resource.Table(table_name).table_status
| 27.073171 | 74 | 0.581081 |
import boto3
import time
session = boto3.Session(region_name='eu-west-1', profile_name='test')
resource = session.resource('dynamodb')
client = session.client('dynamodb')
def create_table():
table_name = "test_{0}".format(time.strftime("%Y%m%d"))
response = resource.create_table(
TableName=table_name,
KeySchema=[{
'AttributeName': 'uuid',
'KeyType': 'HASH'
}],
AttributeDefinitions=[{
'AttributeName': 'uuid',
'AttributeType': 'S'
}],
ProvisionedThroughput={
'ReadCapacityUnits': 1,
'WriteCapacityUnits': 1
}
)
resource.Table(table_name).wait_until_exists()
arn = client.describe_table(TableName=table_name)['Table']['TableArn']
client.tag_resource(
ResourceArn=arn,
Tags=[
{'Key': 'Name','Value': 'yes'},
{'Key': 'Environment','Value': 'yes'},
{'Key': 'Owner','Value': 'yes'}
]
)
return resource.Table(table_name).table_status
| true | true |
f7fba76706ffed832bf27c13995f882ba9655203 | 5,645 | py | Python | nltk/corpus/reader/wordlist.py | RnDevelover/nltk | 87e23ff22fbdd14b5cd3ea44a48bcc46f50be551 | [
"Apache-2.0"
] | null | null | null | nltk/corpus/reader/wordlist.py | RnDevelover/nltk | 87e23ff22fbdd14b5cd3ea44a48bcc46f50be551 | [
"Apache-2.0"
] | null | null | null | nltk/corpus/reader/wordlist.py | RnDevelover/nltk | 87e23ff22fbdd14b5cd3ea44a48bcc46f50be551 | [
"Apache-2.0"
] | null | null | null | # Natural Language Toolkit: Word List Corpus Reader
#
# Copyright (C) 2001-2021 NLTK Project
# Author: Steven Bird <stevenbird1@gmail.com>
# Edward Loper <edloper@gmail.com>
# URL: <https://www.nltk.org/>
# For license information, see LICENSE.TXT
from nltk.corpus.reader.api import *
from nltk.corpus.reader.util import *
from nltk.tokenize import line_tokenize
class WordListCorpusReader(CorpusReader):
"""
List of words, one per line. Blank lines are ignored.
"""
def words(self, fileids=None, ignore_lines_startswith="\n"):
return [
line
for line in line_tokenize(self.raw(fileids))
if not line.startswith(ignore_lines_startswith)
]
class SwadeshCorpusReader(WordListCorpusReader):
def entries(self, fileids=None):
"""
:return: a tuple of words for the specified fileids.
"""
if not fileids:
fileids = self.fileids()
wordlists = [self.words(f) for f in fileids]
return list(zip(*wordlists))
class NonbreakingPrefixesCorpusReader(WordListCorpusReader):
"""
This is a class to read the nonbreaking prefixes textfiles from the
Moses Machine Translation toolkit. These lists are used in the Python port
of the Moses' word tokenizer.
"""
available_langs = {
"catalan": "ca",
"czech": "cs",
"german": "de",
"greek": "el",
"english": "en",
"spanish": "es",
"finnish": "fi",
"french": "fr",
"hungarian": "hu",
"icelandic": "is",
"italian": "it",
"latvian": "lv",
"dutch": "nl",
"polish": "pl",
"portuguese": "pt",
"romanian": "ro",
"russian": "ru",
"slovak": "sk",
"slovenian": "sl",
"swedish": "sv",
"tamil": "ta",
}
# Also, add the lang IDs as the keys.
available_langs.update({v: v for v in available_langs.values()})
def words(self, lang=None, fileids=None, ignore_lines_startswith="#"):
"""
This module returns a list of nonbreaking prefixes for the specified
language(s).
>>> from nltk.corpus import nonbreaking_prefixes as nbp
>>> nbp.words('en')[:10] == [u'A', u'B', u'C', u'D', u'E', u'F', u'G', u'H', u'I', u'J']
True
>>> nbp.words('ta')[:5] == [u'\u0b85', u'\u0b86', u'\u0b87', u'\u0b88', u'\u0b89']
True
:return: a list words for the specified language(s).
"""
# If *lang* in list of languages available, allocate apt fileid.
# Otherwise, the function returns non-breaking prefixes for
# all languages when fileids==None.
if lang in self.available_langs:
lang = self.available_langs[lang]
fileids = ["nonbreaking_prefix." + lang]
return [
line
for line in line_tokenize(self.raw(fileids))
if not line.startswith(ignore_lines_startswith)
]
class UnicharsCorpusReader(WordListCorpusReader):
"""
This class is used to read lists of characters from the Perl Unicode
Properties (see https://perldoc.perl.org/perluniprops.html).
The files in the perluniprop.zip are extracted using the Unicode::Tussle
module from https://search.cpan.org/~bdfoy/Unicode-Tussle-1.11/lib/Unicode/Tussle.pm
"""
# These are categories similar to the Perl Unicode Properties
available_categories = [
"Close_Punctuation",
"Currency_Symbol",
"IsAlnum",
"IsAlpha",
"IsLower",
"IsN",
"IsSc",
"IsSo",
"IsUpper",
"Line_Separator",
"Number",
"Open_Punctuation",
"Punctuation",
"Separator",
"Symbol",
]
def chars(self, category=None, fileids=None):
"""
This module returns a list of characters from the Perl Unicode Properties.
They are very useful when porting Perl tokenizers to Python.
>>> from nltk.corpus import perluniprops as pup
>>> pup.chars('Open_Punctuation')[:5] == [u'(', u'[', u'{', u'\u0f3a', u'\u0f3c']
True
>>> pup.chars('Currency_Symbol')[:5] == [u'$', u'\xa2', u'\xa3', u'\xa4', u'\xa5']
True
>>> pup.available_categories
['Close_Punctuation', 'Currency_Symbol', 'IsAlnum', 'IsAlpha', 'IsLower', 'IsN', 'IsSc', 'IsSo', 'IsUpper', 'Line_Separator', 'Number', 'Open_Punctuation', 'Punctuation', 'Separator', 'Symbol']
:return: a list of characters given the specific unicode character category
"""
if category in self.available_categories:
fileids = [category + ".txt"]
return list(self.raw(fileids).strip())
class MWAPPDBCorpusReader(WordListCorpusReader):
"""
This class is used to read the list of word pairs from the subset of lexical
pairs of The Paraphrase Database (PPDB) XXXL used in the Monolingual Word
Alignment (MWA) algorithm described in Sultan et al. (2014a, 2014b, 2015):
- http://acl2014.org/acl2014/Q14/pdf/Q14-1017
- https://www.aclweb.org/anthology/S14-2039
- https://www.aclweb.org/anthology/S15-2027
The original source of the full PPDB corpus can be found on
https://www.cis.upenn.edu/~ccb/ppdb/
:return: a list of tuples of similar lexical terms.
"""
mwa_ppdb_xxxl_file = "ppdb-1.0-xxxl-lexical.extended.synonyms.uniquepairs"
def entries(self, fileids=mwa_ppdb_xxxl_file):
"""
:return: a tuple of synonym word pairs.
"""
return [tuple(line.split("\t")) for line in line_tokenize(self.raw(fileids))]
| 34.006024 | 201 | 0.606023 |
from nltk.corpus.reader.api import *
from nltk.corpus.reader.util import *
from nltk.tokenize import line_tokenize
class WordListCorpusReader(CorpusReader):
def words(self, fileids=None, ignore_lines_startswith="\n"):
return [
line
for line in line_tokenize(self.raw(fileids))
if not line.startswith(ignore_lines_startswith)
]
class SwadeshCorpusReader(WordListCorpusReader):
def entries(self, fileids=None):
if not fileids:
fileids = self.fileids()
wordlists = [self.words(f) for f in fileids]
return list(zip(*wordlists))
class NonbreakingPrefixesCorpusReader(WordListCorpusReader):
available_langs = {
"catalan": "ca",
"czech": "cs",
"german": "de",
"greek": "el",
"english": "en",
"spanish": "es",
"finnish": "fi",
"french": "fr",
"hungarian": "hu",
"icelandic": "is",
"italian": "it",
"latvian": "lv",
"dutch": "nl",
"polish": "pl",
"portuguese": "pt",
"romanian": "ro",
"russian": "ru",
"slovak": "sk",
"slovenian": "sl",
"swedish": "sv",
"tamil": "ta",
}
available_langs.update({v: v for v in available_langs.values()})
def words(self, lang=None, fileids=None, ignore_lines_startswith="#"):
if lang in self.available_langs:
lang = self.available_langs[lang]
fileids = ["nonbreaking_prefix." + lang]
return [
line
for line in line_tokenize(self.raw(fileids))
if not line.startswith(ignore_lines_startswith)
]
class UnicharsCorpusReader(WordListCorpusReader):
available_categories = [
"Close_Punctuation",
"Currency_Symbol",
"IsAlnum",
"IsAlpha",
"IsLower",
"IsN",
"IsSc",
"IsSo",
"IsUpper",
"Line_Separator",
"Number",
"Open_Punctuation",
"Punctuation",
"Separator",
"Symbol",
]
def chars(self, category=None, fileids=None):
if category in self.available_categories:
fileids = [category + ".txt"]
return list(self.raw(fileids).strip())
class MWAPPDBCorpusReader(WordListCorpusReader):
mwa_ppdb_xxxl_file = "ppdb-1.0-xxxl-lexical.extended.synonyms.uniquepairs"
def entries(self, fileids=mwa_ppdb_xxxl_file):
return [tuple(line.split("\t")) for line in line_tokenize(self.raw(fileids))]
| true | true |
f7fba7928c4013a5d6ab3f05a845ed74d4dbc12f | 969 | py | Python | my_code.py | Athenian-ComputerScience-Fall2020/my-age-Jackypop101 | 587841ee1b65534eb40b10490b7aa8ad36f0362c | [
"Apache-2.0"
] | null | null | null | my_code.py | Athenian-ComputerScience-Fall2020/my-age-Jackypop101 | 587841ee1b65534eb40b10490b7aa8ad36f0362c | [
"Apache-2.0"
] | null | null | null | my_code.py | Athenian-ComputerScience-Fall2020/my-age-Jackypop101 | 587841ee1b65534eb40b10490b7aa8ad36f0362c | [
"Apache-2.0"
] | null | null | null | # Complete the function below calculate your age after different numbers of years.
def age_now(x):
# My age now
print(f"I am currently {x} years old.\n")
def age_1(y):
# My age next year
print(f"Next year I'll be {y+1} years old.\n")
def age_10(z):
# My age in 10 years
print(f"In 10 years, I'll be {z+10}!\n")
def age_50(q):
# My age in 50 years!
print(f"In 50 years, I'll be {q+50}! Wow!\n")
if __name__ == '__main__':
age = 15 # change this number to your current age
'''
after you have this working by defining "age" above, you can comment that line out and add a new line
# below it to get the user's age as an input.
'''
age = int(input("How old are you?"))
age_now(age) # run function age_now with argument age
age_1(age) # run function age_1 with argument age
age_10(age) # run function age_10 with argument age
age_50(age) # run function age_50 with argument age
| 26.916667 | 105 | 0.638803 |
def age_now(x):
print(f"I am currently {x} years old.\n")
def age_1(y):
print(f"Next year I'll be {y+1} years old.\n")
def age_10(z):
# My age in 10 years
print(f"In 10 years, I'll be {z+10}!\n")
def age_50(q):
print(f"In 50 years, I'll be {q+50}! Wow!\n")
if __name__ == '__main__':
age = 15 # change this number to your current age
age = int(input("How old are you?"))
age_now(age) # run function age_now with argument age
age_1(age) # run function age_1 with argument age
age_10(age) # run function age_10 with argument age
age_50(age) # run function age_50 with argument age
| true | true |
f7fba7cd4afb99a4ea6731070fd8cb5b47cfc793 | 7,836 | py | Python | Frcwp/Frcwp.py | sladesha/Frcwp | 421e8e831343bfeeeb31cb599598f059a563bbf8 | [
"MIT"
] | 47 | 2018-01-02T15:55:44.000Z | 2022-03-22T14:50:31.000Z | Frcwp/Frcwp.py | sladesha/Frcwp | 421e8e831343bfeeeb31cb599598f059a563bbf8 | [
"MIT"
] | 1 | 2018-05-22T01:54:57.000Z | 2018-05-22T01:54:57.000Z | Frcwp/Frcwp.py | sladesha/Frcwp | 421e8e831343bfeeeb31cb599598f059a563bbf8 | [
"MIT"
] | 7 | 2019-04-19T06:03:40.000Z | 2022-03-24T06:46:24.000Z | import pandas as pd
import numpy as np
import sys
from .reshapedata import natest as _natest
from .reshapedata import valuenumber, coltest, standardizeddata, formatcheck
from .distince import distince
from .slicing import grouped, isolationforest, iswhole
class Frcwp():
'''
param : na_rate : if na_rate != None , remove some column if the column with too many nan values
param : single_dealed : if single_dealed > 0 , remove some column if the column with single value
param : is_scale : if is_scale = 1 , scale the data to improve the calculation speed ,may change the distribution of the initial data , is_scale =0 may be better usually
param : distince_method : if 'Maha' then Mahalanobis_dist ; if 'Eucl' then euclidean_dist
param : outlier_rate : the estimated outliers / all cases , the smaller, the better but not smaller than 0
param : strange_rate : the strange outliers(潜在可能的异常点) / all cases
param : nestimators : isolation tree number
param : contamination : actual estimated outliers / all cases
param : is_whole : if is_whole = 1 , the output features is the same as input ; if is_whole = 0 ,the output features are the features which take part in the training process
param : output : if None then output all the potentialdata , if 0<output<1 then treated as rate , if output>1 the treated as number
attribute : useful_index : the feature rank_index used for the model training actually
attribute : similarity_label : the output data outlier degree the larger the outlier degree higher
attribute : normalbarycentre : common level of your input data
attribute : potentialdata_set : the suggested outlier potential data set , you can use your outlier potential data as well
'''
def __init__(self):
self.na_rate = None
self.single_dealed = None
self.is_scale = None
self.distince_method = None
self.outlier_rate = None
self.strange_rate = None
self.nestimators = None
self.contamination = None
self.is_whole = None
self.output = None
self.iforest = None
self.useful_index = None
self.original_data = None
self.similarity_label = None
def changeformat(self, X, index=0):
assert isinstance(index, int), '请输入识别列的列序号,以0开始'
if isinstance(X, pd.DataFrame) != 1:
X = formatcheck(X)
X.index = X.iloc[:, index]
keep_index = [x for x in range(X.shape[1]) if x != index]
return X.iloc[:, keep_index]
def fit(self, X=None, na_rate=None, single_dealed=None, is_scale=0, distince_method='Maha', outlier_rate=0.01,
strange_rate=0.1, nestimators=100, contamination=0.1):
self.na_rate = na_rate
self.single_dealed = None
self.is_scale = is_scale
self.distince_method = distince_method
self.outlier_rate = outlier_rate
self.strange_rate = strange_rate
self.nestimators = nestimators
self.contamination = contamination
self.normalbarycentre = None
self.Metricset = None
self.potentialdata_set = None
self.original_data = X.copy()
if isinstance(X, pd.DataFrame) != 1:
print('we will change your data as the format of dataframe~')
# begin preprocessing the data
if self.na_rate != None:
natt = _natest(X, self.na_rate)
X = natt.naremove()
if self.single_dealed:
vnb = valuenumber(X)
X = vnb.singlevalueremove()
if self.is_scale:
stdd = standardizeddata(X)
X = stdd.standardstep()
# begin outliers pre-recognition
cal_X = X.copy()
# remove the zero_columns and the collinearity_data
colt = coltest(cal_X)
colt_cal_X, colt_col_index = colt.columnstest()
cov_cal_X = np.cov(colt_cal_X.T)
self.useful_index = colt_col_index
if self.distince_method not in ['Maha', 'Eucl']:
raise NotImplementedError('distince_method should be Maha or Eucl~')
if self.distince_method == 'Maha':
colt1 = coltest(pd.DataFrame(cov_cal_X))
colt1_cal_X, colt1_col_index = colt1.columnstest()
if len(colt1_col_index) <= 1:
raise ValueError(
'the outlier among the train data is too small ,PLS turn the is_scale = 0 or add reshape data')
while cov_cal_X.shape != colt1_cal_X.shape:
colt = coltest(pd.DataFrame(colt_cal_X).iloc[:, colt1_col_index])
colt_cal_X, colt_col_index = colt.columnstest()
cov_cal_X = np.cov(colt_cal_X.T)
colt1 = coltest(cov_cal_X)
colt1_cal_X, colt1_col_index = colt1.columnstest()
cal_X_colt = cal_X.iloc[:, colt1_col_index]
normalbarycentre = cal_X_colt.mean(axis=0)
# calculate each case normal degree
similarity_d = []
for i in range(cal_X_colt.shape[0]):
dist = distince(cal_X_colt.iloc[i, :], normalbarycentre)
similarity_d.append(dist.Mahalanobis_dist(cal_X_colt))
else:
normalbarycentre = colt_cal_X.mean(axis=0)
similarity_d = []
for i in range(colt_cal_X.shape[0]):
dist = distince(colt_cal_X.iloc[i, :], normalbarycentre)
similarity_d.append(dist.euclidean_dist())
self.normalbarycentre = normalbarycentre
# spilt all user into outlier,strange and common part
ggp = grouped(colt_cal_X, similarity_d, self.outlier_rate, self.strange_rate)
outlierset, _ = ggp.outlier_group()
strangeset, _ = ggp.strange_group()
commonset = ggp.common_group()
traindata = pd.concat([outlierset, commonset], axis=0)
potentialdata = pd.concat([outlierset, strangeset], axis=0)
traincol = [x for x in traindata.columns if x != 'simi']
potentialcol = [x for x in potentialdata.columns if x != 'simi']
self.Metricset = traindata[traincol]
self.potentialdata_set = potentialdata[potentialcol]
# score the cases in outlier and strange part
ift = isolationforest(self.Metricset, self.nestimators, self.contamination)
ift.fit()
self.iforest = ift
def predict(self, potentialdata, output, is_whole):
potentialdata = potentialdata.copy()
self.is_whole = is_whole
self.output = output
score = pd.DataFrame(self.iforest.predict(potentialdata))
score.index = potentialdata.index
potentialdata['simi'] = score
self.similarity_label = (abs(potentialdata['simi']) - abs(potentialdata['simi']).min()) / (
abs(potentialdata['simi']).max() - abs(potentialdata['simi']).min())
if self.output == None:
potentialdata = potentialdata.sort_values(by='simi')
elif self.output > 1:
potentialdata = potentialdata.sort_values(by='simi')
potentialdata = potentialdata.iloc[:self.output, :]
elif self.output > 0 and self.output < 1:
potentialdata = potentialdata.sort_values(by='simi')
assert (self.output * self.original_data.shape[0]) < potentialdata.shape[
0], '你想要产出的异常点超过预估点,请降低异常点数output值'
potentialdata = potentialdata.iloc[:int(self.output * self.original_data.shape[0]), :]
assert abs(potentialdata['simi']).max() != abs(potentialdata['simi']).min(), '数据无明显离散异常点'
# output
if self.is_whole:
isw = iswhole(potentialdata)
out = isw.getoutput(self.original_data)
else:
outindex = [x for x in potentialdata.columns if x != 'simi']
out = potentialdata[outindex]
return out
| 45.034483 | 177 | 0.643951 | import pandas as pd
import numpy as np
import sys
from .reshapedata import natest as _natest
from .reshapedata import valuenumber, coltest, standardizeddata, formatcheck
from .distince import distince
from .slicing import grouped, isolationforest, iswhole
class Frcwp():
def __init__(self):
self.na_rate = None
self.single_dealed = None
self.is_scale = None
self.distince_method = None
self.outlier_rate = None
self.strange_rate = None
self.nestimators = None
self.contamination = None
self.is_whole = None
self.output = None
self.iforest = None
self.useful_index = None
self.original_data = None
self.similarity_label = None
def changeformat(self, X, index=0):
assert isinstance(index, int), '请输入识别列的列序号,以0开始'
if isinstance(X, pd.DataFrame) != 1:
X = formatcheck(X)
X.index = X.iloc[:, index]
keep_index = [x for x in range(X.shape[1]) if x != index]
return X.iloc[:, keep_index]
def fit(self, X=None, na_rate=None, single_dealed=None, is_scale=0, distince_method='Maha', outlier_rate=0.01,
strange_rate=0.1, nestimators=100, contamination=0.1):
self.na_rate = na_rate
self.single_dealed = None
self.is_scale = is_scale
self.distince_method = distince_method
self.outlier_rate = outlier_rate
self.strange_rate = strange_rate
self.nestimators = nestimators
self.contamination = contamination
self.normalbarycentre = None
self.Metricset = None
self.potentialdata_set = None
self.original_data = X.copy()
if isinstance(X, pd.DataFrame) != 1:
print('we will change your data as the format of dataframe~')
if self.na_rate != None:
natt = _natest(X, self.na_rate)
X = natt.naremove()
if self.single_dealed:
vnb = valuenumber(X)
X = vnb.singlevalueremove()
if self.is_scale:
stdd = standardizeddata(X)
X = stdd.standardstep()
cal_X = X.copy()
colt = coltest(cal_X)
colt_cal_X, colt_col_index = colt.columnstest()
cov_cal_X = np.cov(colt_cal_X.T)
self.useful_index = colt_col_index
if self.distince_method not in ['Maha', 'Eucl']:
raise NotImplementedError('distince_method should be Maha or Eucl~')
if self.distince_method == 'Maha':
colt1 = coltest(pd.DataFrame(cov_cal_X))
colt1_cal_X, colt1_col_index = colt1.columnstest()
if len(colt1_col_index) <= 1:
raise ValueError(
'the outlier among the train data is too small ,PLS turn the is_scale = 0 or add reshape data')
while cov_cal_X.shape != colt1_cal_X.shape:
colt = coltest(pd.DataFrame(colt_cal_X).iloc[:, colt1_col_index])
colt_cal_X, colt_col_index = colt.columnstest()
cov_cal_X = np.cov(colt_cal_X.T)
colt1 = coltest(cov_cal_X)
colt1_cal_X, colt1_col_index = colt1.columnstest()
cal_X_colt = cal_X.iloc[:, colt1_col_index]
normalbarycentre = cal_X_colt.mean(axis=0)
similarity_d = []
for i in range(cal_X_colt.shape[0]):
dist = distince(cal_X_colt.iloc[i, :], normalbarycentre)
similarity_d.append(dist.Mahalanobis_dist(cal_X_colt))
else:
normalbarycentre = colt_cal_X.mean(axis=0)
similarity_d = []
for i in range(colt_cal_X.shape[0]):
dist = distince(colt_cal_X.iloc[i, :], normalbarycentre)
similarity_d.append(dist.euclidean_dist())
self.normalbarycentre = normalbarycentre
ggp = grouped(colt_cal_X, similarity_d, self.outlier_rate, self.strange_rate)
outlierset, _ = ggp.outlier_group()
strangeset, _ = ggp.strange_group()
commonset = ggp.common_group()
traindata = pd.concat([outlierset, commonset], axis=0)
potentialdata = pd.concat([outlierset, strangeset], axis=0)
traincol = [x for x in traindata.columns if x != 'simi']
potentialcol = [x for x in potentialdata.columns if x != 'simi']
self.Metricset = traindata[traincol]
self.potentialdata_set = potentialdata[potentialcol]
ift = isolationforest(self.Metricset, self.nestimators, self.contamination)
ift.fit()
self.iforest = ift
def predict(self, potentialdata, output, is_whole):
potentialdata = potentialdata.copy()
self.is_whole = is_whole
self.output = output
score = pd.DataFrame(self.iforest.predict(potentialdata))
score.index = potentialdata.index
potentialdata['simi'] = score
self.similarity_label = (abs(potentialdata['simi']) - abs(potentialdata['simi']).min()) / (
abs(potentialdata['simi']).max() - abs(potentialdata['simi']).min())
if self.output == None:
potentialdata = potentialdata.sort_values(by='simi')
elif self.output > 1:
potentialdata = potentialdata.sort_values(by='simi')
potentialdata = potentialdata.iloc[:self.output, :]
elif self.output > 0 and self.output < 1:
potentialdata = potentialdata.sort_values(by='simi')
assert (self.output * self.original_data.shape[0]) < potentialdata.shape[
0], '你想要产出的异常点超过预估点,请降低异常点数output值'
potentialdata = potentialdata.iloc[:int(self.output * self.original_data.shape[0]), :]
assert abs(potentialdata['simi']).max() != abs(potentialdata['simi']).min(), '数据无明显离散异常点'
if self.is_whole:
isw = iswhole(potentialdata)
out = isw.getoutput(self.original_data)
else:
outindex = [x for x in potentialdata.columns if x != 'simi']
out = potentialdata[outindex]
return out
| true | true |
f7fbaa2a18f1329ee97c281d72bf234ca8b9cc61 | 13 | py | Python | app/tests/__init__.py | bobosoft/intrepyd | 13f0912b31f86f9bcc50f52ef4ad870e33f0cf65 | [
"BSD-3-Clause"
] | 2 | 2021-04-25T17:38:03.000Z | 2022-03-20T20:48:50.000Z | app/tests/__init__.py | bobosoft/intrepyd | 13f0912b31f86f9bcc50f52ef4ad870e33f0cf65 | [
"BSD-3-Clause"
] | 1 | 2016-11-30T22:25:00.000Z | 2017-01-16T22:43:39.000Z | app/tests/__init__.py | bobosoft/intrepyd | 13f0912b31f86f9bcc50f52ef4ad870e33f0cf65 | [
"BSD-3-Clause"
] | null | null | null | """
Tests
""" | 4.333333 | 5 | 0.384615 | true | true | |
f7fbaa39f08144a394f3528eced029a295e4cd57 | 503 | py | Python | env/Lib/site-packages/plotly/validators/choroplethmapbox/colorbar/_dtick.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 11,750 | 2015-10-12T07:03:39.000Z | 2022-03-31T20:43:15.000Z | env/Lib/site-packages/plotly/validators/choroplethmapbox/colorbar/_dtick.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 2,951 | 2015-10-12T00:41:25.000Z | 2022-03-31T22:19:26.000Z | env/Lib/site-packages/plotly/validators/choroplethmapbox/colorbar/_dtick.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 2,623 | 2015-10-15T14:40:27.000Z | 2022-03-28T16:05:50.000Z | import _plotly_utils.basevalidators
class DtickValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(
self, plotly_name="dtick", parent_name="choroplethmapbox.colorbar", **kwargs
):
super(DtickValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
implied_edits=kwargs.pop("implied_edits", {"tickmode": "linear"}),
**kwargs
)
| 33.533333 | 84 | 0.654076 | import _plotly_utils.basevalidators
class DtickValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(
self, plotly_name="dtick", parent_name="choroplethmapbox.colorbar", **kwargs
):
super(DtickValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
implied_edits=kwargs.pop("implied_edits", {"tickmode": "linear"}),
**kwargs
)
| true | true |
f7fbaa5cfa1f86677ffd9b5dfd35292a782f7e57 | 560 | py | Python | src/openprocurement/tender/simpledefense/views/cancellation.py | ProzorroUKR/openprocurement.api | 2855a99aa8738fb832ee0dbad4e9590bd3643511 | [
"Apache-2.0"
] | 10 | 2020-02-18T01:56:21.000Z | 2022-03-28T00:32:57.000Z | src/openprocurement/tender/simpledefense/views/cancellation.py | quintagroup/openprocurement.api | 2855a99aa8738fb832ee0dbad4e9590bd3643511 | [
"Apache-2.0"
] | 26 | 2018-07-16T09:30:44.000Z | 2021-02-02T17:51:30.000Z | src/openprocurement/tender/simpledefense/views/cancellation.py | ProzorroUKR/openprocurement.api | 2855a99aa8738fb832ee0dbad4e9590bd3643511 | [
"Apache-2.0"
] | 15 | 2019-08-08T10:50:47.000Z | 2022-02-05T14:13:36.000Z | # -*- coding: utf-8 -*-
from openprocurement.tender.core.utils import optendersresource
from openprocurement.tender.openua.views.cancellation import TenderUaCancellationResource as TenderCancellationResource
@optendersresource(
name="simple.defense:Tender Cancellations",
collection_path="/tenders/{tender_id}/cancellations",
path="/tenders/{tender_id}/cancellations/{cancellation_id}",
procurementMethodType="simple.defense",
description="Tender cancellations",
)
class TenderUaCancellationResource(TenderCancellationResource):
pass
| 37.333333 | 119 | 0.801786 |
from openprocurement.tender.core.utils import optendersresource
from openprocurement.tender.openua.views.cancellation import TenderUaCancellationResource as TenderCancellationResource
@optendersresource(
name="simple.defense:Tender Cancellations",
collection_path="/tenders/{tender_id}/cancellations",
path="/tenders/{tender_id}/cancellations/{cancellation_id}",
procurementMethodType="simple.defense",
description="Tender cancellations",
)
class TenderUaCancellationResource(TenderCancellationResource):
pass
| true | true |
f7fbac947ff7b60430f8c5943807083ee505dfd9 | 2,899 | py | Python | letsencrypt-apache/letsencrypt_apache/tests/display_ops_test.py | mithrandi/letsencrypt | 8a8711a16ff94b9196868ed37eb22e716c5d8ba9 | [
"Apache-2.0"
] | 2 | 2017-03-13T23:04:40.000Z | 2021-12-11T07:11:01.000Z | letsencrypt-apache/letsencrypt_apache/tests/display_ops_test.py | mithrandi/letsencrypt | 8a8711a16ff94b9196868ed37eb22e716c5d8ba9 | [
"Apache-2.0"
] | null | null | null | letsencrypt-apache/letsencrypt_apache/tests/display_ops_test.py | mithrandi/letsencrypt | 8a8711a16ff94b9196868ed37eb22e716c5d8ba9 | [
"Apache-2.0"
] | 7 | 2017-08-10T21:33:51.000Z | 2021-06-03T15:53:50.000Z | """Test letsencrypt_apache.display_ops."""
import sys
import unittest
import mock
import zope.component
from letsencrypt.display import util as display_util
from letsencrypt import errors
from letsencrypt_apache import obj
from letsencrypt_apache.tests import util
class SelectVhostTest(unittest.TestCase):
"""Tests for letsencrypt_apache.display_ops.select_vhost."""
def setUp(self):
zope.component.provideUtility(display_util.FileDisplay(sys.stdout))
self.base_dir = "/example_path"
self.vhosts = util.get_vh_truth(
self.base_dir, "debian_apache_2_4/two_vhost_80")
@classmethod
def _call(cls, vhosts):
from letsencrypt_apache.display_ops import select_vhost
return select_vhost("example.com", vhosts)
@mock.patch("letsencrypt_apache.display_ops.zope.component.getUtility")
def test_successful_choice(self, mock_util):
mock_util().menu.return_value = (display_util.OK, 3)
self.assertEqual(self.vhosts[3], self._call(self.vhosts))
@mock.patch("letsencrypt_apache.display_ops.zope.component.getUtility")
def test_noninteractive(self, mock_util):
mock_util().menu.side_effect = errors.MissingCommandlineFlag("no vhost default")
try:
self._call(self.vhosts)
except errors.MissingCommandlineFlag as e:
self.assertTrue("VirtualHost directives" in e.message)
@mock.patch("letsencrypt_apache.display_ops.zope.component.getUtility")
def test_more_info_cancel(self, mock_util):
mock_util().menu.side_effect = [
(display_util.HELP, 1),
(display_util.HELP, 0),
(display_util.CANCEL, -1),
]
self.assertEqual(None, self._call(self.vhosts))
self.assertEqual(mock_util().notification.call_count, 2)
def test_no_vhosts(self):
self.assertEqual(self._call([]), None)
@mock.patch("letsencrypt_apache.display_ops.display_util")
@mock.patch("letsencrypt_apache.display_ops.zope.component.getUtility")
@mock.patch("letsencrypt_apache.display_ops.logger")
def test_small_display(self, mock_logger, mock_util, mock_display_util):
mock_display_util.WIDTH = 20
mock_util().menu.return_value = (display_util.OK, 0)
self._call(self.vhosts)
self.assertEqual(mock_logger.debug.call_count, 1)
@mock.patch("letsencrypt_apache.display_ops.zope.component.getUtility")
def test_multiple_names(self, mock_util):
mock_util().menu.return_value = (display_util.OK, 5)
self.vhosts.append(
obj.VirtualHost(
"path", "aug_path", set([obj.Addr.fromstring("*:80")]),
False, False,
"wildcard.com", set(["*.wildcard.com"])))
self.assertEqual(self.vhosts[5], self._call(self.vhosts))
if __name__ == "__main__":
unittest.main() # pragma: no cover
| 35.353659 | 88 | 0.693343 | import sys
import unittest
import mock
import zope.component
from letsencrypt.display import util as display_util
from letsencrypt import errors
from letsencrypt_apache import obj
from letsencrypt_apache.tests import util
class SelectVhostTest(unittest.TestCase):
def setUp(self):
zope.component.provideUtility(display_util.FileDisplay(sys.stdout))
self.base_dir = "/example_path"
self.vhosts = util.get_vh_truth(
self.base_dir, "debian_apache_2_4/two_vhost_80")
@classmethod
def _call(cls, vhosts):
from letsencrypt_apache.display_ops import select_vhost
return select_vhost("example.com", vhosts)
@mock.patch("letsencrypt_apache.display_ops.zope.component.getUtility")
def test_successful_choice(self, mock_util):
mock_util().menu.return_value = (display_util.OK, 3)
self.assertEqual(self.vhosts[3], self._call(self.vhosts))
@mock.patch("letsencrypt_apache.display_ops.zope.component.getUtility")
def test_noninteractive(self, mock_util):
mock_util().menu.side_effect = errors.MissingCommandlineFlag("no vhost default")
try:
self._call(self.vhosts)
except errors.MissingCommandlineFlag as e:
self.assertTrue("VirtualHost directives" in e.message)
@mock.patch("letsencrypt_apache.display_ops.zope.component.getUtility")
def test_more_info_cancel(self, mock_util):
mock_util().menu.side_effect = [
(display_util.HELP, 1),
(display_util.HELP, 0),
(display_util.CANCEL, -1),
]
self.assertEqual(None, self._call(self.vhosts))
self.assertEqual(mock_util().notification.call_count, 2)
def test_no_vhosts(self):
self.assertEqual(self._call([]), None)
@mock.patch("letsencrypt_apache.display_ops.display_util")
@mock.patch("letsencrypt_apache.display_ops.zope.component.getUtility")
@mock.patch("letsencrypt_apache.display_ops.logger")
def test_small_display(self, mock_logger, mock_util, mock_display_util):
mock_display_util.WIDTH = 20
mock_util().menu.return_value = (display_util.OK, 0)
self._call(self.vhosts)
self.assertEqual(mock_logger.debug.call_count, 1)
@mock.patch("letsencrypt_apache.display_ops.zope.component.getUtility")
def test_multiple_names(self, mock_util):
mock_util().menu.return_value = (display_util.OK, 5)
self.vhosts.append(
obj.VirtualHost(
"path", "aug_path", set([obj.Addr.fromstring("*:80")]),
False, False,
"wildcard.com", set(["*.wildcard.com"])))
self.assertEqual(self.vhosts[5], self._call(self.vhosts))
if __name__ == "__main__":
unittest.main()
| true | true |
f7fbadad51273535f63cef7042da911a9422fc57 | 1,020 | py | Python | mpa_admin_app/comments/routes.py | norbertorok92/flask_MVC | 134bb6fe9fcaf0550248a906055b17bbb3f31343 | [
"MIT"
] | null | null | null | mpa_admin_app/comments/routes.py | norbertorok92/flask_MVC | 134bb6fe9fcaf0550248a906055b17bbb3f31343 | [
"MIT"
] | null | null | null | mpa_admin_app/comments/routes.py | norbertorok92/flask_MVC | 134bb6fe9fcaf0550248a906055b17bbb3f31343 | [
"MIT"
] | null | null | null | from flask import (render_template, url_for, flash,
redirect, request, abort, Blueprint)
from flask_login import current_user, login_required
from mpa_admin_app import db
from mpa_admin_app.models import Comment
from mpa_admin_app.comments.forms import CommentForm
comments = Blueprint('comments', __name__)
@comments.route("/comment/<int:post_id>", methods=['GET', 'POST'])
@login_required
def new_comment(post_id):
comment = Comment(content=request.form['addComment'], post_id=post_id, author=current_user)
path = request.form['path']
db.session.add(comment)
db.session.commit()
flash('Your comment has been posted!', 'success')
return redirect(path)
@comments.route("/comment/<int:comment_id>/delete", methods=['GET', 'POST'])
@login_required
def delete_comment(comment_id):
path = request.form['path']
comment = Comment.query.get_or_404(comment_id)
db.session.delete(comment)
db.session.commit()
flash('Your comment has been deleted!', 'success')
return redirect(path)
| 27.567568 | 92 | 0.748039 | from flask import (render_template, url_for, flash,
redirect, request, abort, Blueprint)
from flask_login import current_user, login_required
from mpa_admin_app import db
from mpa_admin_app.models import Comment
from mpa_admin_app.comments.forms import CommentForm
comments = Blueprint('comments', __name__)
@comments.route("/comment/<int:post_id>", methods=['GET', 'POST'])
@login_required
def new_comment(post_id):
comment = Comment(content=request.form['addComment'], post_id=post_id, author=current_user)
path = request.form['path']
db.session.add(comment)
db.session.commit()
flash('Your comment has been posted!', 'success')
return redirect(path)
@comments.route("/comment/<int:comment_id>/delete", methods=['GET', 'POST'])
@login_required
def delete_comment(comment_id):
path = request.form['path']
comment = Comment.query.get_or_404(comment_id)
db.session.delete(comment)
db.session.commit()
flash('Your comment has been deleted!', 'success')
return redirect(path)
| true | true |
f7fbaff527a618e8f96d582f9b96d647e8d299dc | 431 | py | Python | Institute_Management_System/Institute_Management_System/asgi.py | Amitchawarekar/Institute_Management_System | 89fcf089e6c317647b28368b51304bfd1eda4743 | [
"MIT"
] | null | null | null | Institute_Management_System/Institute_Management_System/asgi.py | Amitchawarekar/Institute_Management_System | 89fcf089e6c317647b28368b51304bfd1eda4743 | [
"MIT"
] | null | null | null | Institute_Management_System/Institute_Management_System/asgi.py | Amitchawarekar/Institute_Management_System | 89fcf089e6c317647b28368b51304bfd1eda4743 | [
"MIT"
] | null | null | null | """
ASGI config for Institute_Management_System project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Institute_Management_System.settings')
application = get_asgi_application()
| 25.352941 | 87 | 0.805104 |
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Institute_Management_System.settings')
application = get_asgi_application()
| true | true |
f7fbaffca520ad8bdd68aa052c50a385c674284f | 9,328 | py | Python | rootfs/api/models/__init__.py | deiscc/controller | 034be59c2f8860369f2dc8a96a553264aed0ec11 | [
"MIT"
] | null | null | null | rootfs/api/models/__init__.py | deiscc/controller | 034be59c2f8860369f2dc8a96a553264aed0ec11 | [
"MIT"
] | null | null | null | rootfs/api/models/__init__.py | deiscc/controller | 034be59c2f8860369f2dc8a96a553264aed0ec11 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Data models for the Deis API.
"""
import hashlib
import hmac
import importlib
import logging
import morph
import re
import urllib.parse
import uuid
from django.conf import settings
from django.db import models
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from rest_framework.exceptions import ValidationError
from rest_framework.authtoken.models import Token
import requests
from requests_toolbelt import user_agent
from api import __version__ as deis_version
from api.exceptions import DeisException, AlreadyExists, ServiceUnavailable, UnprocessableEntity # noqa
from api.utils import dict_merge
from scheduler import KubeException
logger = logging.getLogger(__name__)
session = None
def get_session():
global session
if session is None:
session = requests.Session()
session.headers = {
# https://toolbelt.readthedocs.org/en/latest/user-agent.html#user-agent-constructor
'User-Agent': user_agent('Deis Controller', deis_version),
}
# `mount` a custom adapter that retries failed connections for HTTP and HTTPS requests.
# http://docs.python-requests.org/en/latest/api/#requests.adapters.HTTPAdapter
session.mount('http://', requests.adapters.HTTPAdapter(max_retries=10))
session.mount('https://', requests.adapters.HTTPAdapter(max_retries=10))
return session
def validate_label(value):
"""
Check that the value follows the kubernetes name constraints
http://kubernetes.io/v1.1/docs/design/identifiers.html
"""
match = re.match(r'^[a-z0-9-]+$', value)
if not match:
raise ValidationError("Can only contain a-z (lowercase), 0-9 and hyphens")
class AuditedModel(models.Model):
"""Add created and updated fields to a model."""
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
"""Mark :class:`AuditedModel` as abstract."""
abstract = True
@property
def _scheduler(self):
mod = importlib.import_module(settings.SCHEDULER_MODULE)
return mod.SchedulerClient(settings.SCHEDULER_URL, settings.K8S_API_VERIFY_TLS)
def _fetch_service_config(self, app, svc_name=None):
try:
# Get the service from k8s to attach the domain correctly
if svc_name is None:
svc_name = app
svc = self._scheduler.svc.get(app, svc_name).json()
except KubeException as e:
raise ServiceUnavailable('Could not fetch Kubernetes Service {}'.format(app)) from e
# Get minimum structure going if it is missing on the service
if 'metadata' not in svc or 'annotations' not in svc['metadata']:
default = {'metadata': {'annotations': {}}}
svc = dict_merge(svc, default)
if 'labels' not in svc['metadata']:
default = {'metadata': {'labels': {}}}
svc = dict_merge(svc, default)
return svc
def _load_service_config(self, app, component, svc_name=None):
# fetch setvice definition with minimum structure
svc = self._fetch_service_config(app, svc_name)
# always assume a .deis.cc/ ending
component = "%s.deis.cc/" % component
# Filter to only include values for the component and strip component out of it
# Processes dots into a nested structure
config = morph.unflatten(morph.pick(svc['metadata']['annotations'], prefix=component))
return config
def _save_service_config(self, app, component, data, svc_name=None):
if svc_name is None:
svc_name = app
# fetch setvice definition with minimum structure
svc = self._fetch_service_config(app, svc_name)
# always assume a .deis.cc ending
component = "%s.deis.cc/" % component
# add component to data and flatten
data = {"%s%s" % (component, key): value for key, value in list(data.items()) if value}
svc['metadata']['annotations'].update(morph.flatten(data))
# Update the k8s service for the application with new service information
try:
self._scheduler.svc.update(app, svc_name, svc)
except KubeException as e:
raise ServiceUnavailable('Could not update Kubernetes Service {}'.format(app)) from e
class UuidAuditedModel(AuditedModel):
"""Add a UUID primary key to an :class:`AuditedModel`."""
uuid = models.UUIDField('UUID',
default=uuid.uuid4,
primary_key=True,
editable=False,
auto_created=True,
unique=True)
class Meta:
"""Mark :class:`UuidAuditedModel` as abstract."""
abstract = True
from .app import App, validate_app_id, validate_reserved_names, validate_app_structure # noqa
from .appsettings import AppSettings # noqa
from .build import Build # noqa
from .certificate import Certificate, validate_certificate # noqa
from .config import Config # noqa
from .domain import Domain # noqa
from .service import Service # noqa
from .key import Key, validate_base64 # noqa
from .release import Release # noqa
from .tls import TLS # noqa
# define update/delete callbacks for synchronizing
# models with the configuration management backend
def _log_instance_created(**kwargs):
if kwargs.get('created'):
instance = kwargs['instance']
message = '{} {} created'.format(instance.__class__.__name__.lower(), instance)
if hasattr(instance, 'app'):
instance.app.log(message)
else:
logger.info(message)
def _log_instance_added(**kwargs):
if kwargs.get('created'):
instance = kwargs['instance']
message = '{} {} added'.format(instance.__class__.__name__.lower(), instance)
if hasattr(instance, 'app'):
instance.app.log(message)
else:
logger.info(message)
def _log_instance_updated(**kwargs):
instance = kwargs['instance']
message = '{} {} updated'.format(instance.__class__.__name__.lower(), instance)
if hasattr(instance, 'app'):
instance.app.log(message)
else:
logger.info(message)
def _log_instance_removed(**kwargs):
instance = kwargs['instance']
message = '{} {} removed'.format(instance.__class__.__name__.lower(), instance)
if hasattr(instance, 'app'):
instance.app.log(message)
else:
logger.info(message)
# special case: log the release summary and send release info to each deploy hook
def _hook_release_created(**kwargs):
if kwargs.get('created'):
release = kwargs['instance']
# append release lifecycle logs to the app
release.app.log(release.summary)
for deploy_hook in settings.DEIS_DEPLOY_HOOK_URLS:
url = deploy_hook
params = {
'app': release.app,
'release': 'v{}'.format(release.version),
'release_summary': release.summary,
'sha': '',
'user': release.owner,
}
if release.build is not None:
params['sha'] = release.build.sha
# order of the query arguments is important when computing the HMAC auth secret
params = sorted(params.items())
url += '?{}'.format(urllib.parse.urlencode(params))
headers = {}
if settings.DEIS_DEPLOY_HOOK_SECRET_KEY is not None:
headers['Authorization'] = hmac.new(
settings.DEIS_DEPLOY_HOOK_SECRET_KEY.encode('utf-8'),
url.encode('utf-8'),
hashlib.sha1
).hexdigest()
try:
get_session().post(url, headers=headers)
# just notify with the base URL, disregard the added URL query
release.app.log('Deploy hook sent to {}'.format(deploy_hook))
except requests.RequestException as e:
release.app.log('An error occurred while sending the deploy hook to {}: {}'.format(
deploy_hook, e), logging.ERROR)
# Log significant app-related events
post_save.connect(_hook_release_created, sender=Release, dispatch_uid='api.models.log')
post_save.connect(_log_instance_created, sender=Build, dispatch_uid='api.models.log')
post_save.connect(_log_instance_added, sender=Certificate, dispatch_uid='api.models.log')
post_save.connect(_log_instance_added, sender=Domain, dispatch_uid='api.models.log')
post_save.connect(_log_instance_updated, sender=AppSettings, dispatch_uid='api.models.log')
post_save.connect(_log_instance_updated, sender=Config, dispatch_uid='api.models.log')
post_delete.connect(_log_instance_removed, sender=Certificate, dispatch_uid='api.models.log')
post_delete.connect(_log_instance_removed, sender=Domain, dispatch_uid='api.models.log')
post_delete.connect(_log_instance_removed, sender=TLS, dispatch_uid='api.models.log')
# automatically generate a new token on creation
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
| 36.4375 | 104 | 0.66295 |
import hashlib
import hmac
import importlib
import logging
import morph
import re
import urllib.parse
import uuid
from django.conf import settings
from django.db import models
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from rest_framework.exceptions import ValidationError
from rest_framework.authtoken.models import Token
import requests
from requests_toolbelt import user_agent
from api import __version__ as deis_version
from api.exceptions import DeisException, AlreadyExists, ServiceUnavailable, UnprocessableEntity
from api.utils import dict_merge
from scheduler import KubeException
logger = logging.getLogger(__name__)
session = None
def get_session():
global session
if session is None:
session = requests.Session()
session.headers = {
t': user_agent('Deis Controller', deis_version),
}
/', requests.adapters.HTTPAdapter(max_retries=10))
session.mount('https://', requests.adapters.HTTPAdapter(max_retries=10))
return session
def validate_label(value):
match = re.match(r'^[a-z0-9-]+$', value)
if not match:
raise ValidationError("Can only contain a-z (lowercase), 0-9 and hyphens")
class AuditedModel(models.Model):
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
@property
def _scheduler(self):
mod = importlib.import_module(settings.SCHEDULER_MODULE)
return mod.SchedulerClient(settings.SCHEDULER_URL, settings.K8S_API_VERIFY_TLS)
def _fetch_service_config(self, app, svc_name=None):
try:
if svc_name is None:
svc_name = app
svc = self._scheduler.svc.get(app, svc_name).json()
except KubeException as e:
raise ServiceUnavailable('Could not fetch Kubernetes Service {}'.format(app)) from e
if 'metadata' not in svc or 'annotations' not in svc['metadata']:
default = {'metadata': {'annotations': {}}}
svc = dict_merge(svc, default)
if 'labels' not in svc['metadata']:
default = {'metadata': {'labels': {}}}
svc = dict_merge(svc, default)
return svc
def _load_service_config(self, app, component, svc_name=None):
svc = self._fetch_service_config(app, svc_name)
component = "%s.deis.cc/" % component
config = morph.unflatten(morph.pick(svc['metadata']['annotations'], prefix=component))
return config
def _save_service_config(self, app, component, data, svc_name=None):
if svc_name is None:
svc_name = app
svc = self._fetch_service_config(app, svc_name)
component = "%s.deis.cc/" % component
data = {"%s%s" % (component, key): value for key, value in list(data.items()) if value}
svc['metadata']['annotations'].update(morph.flatten(data))
try:
self._scheduler.svc.update(app, svc_name, svc)
except KubeException as e:
raise ServiceUnavailable('Could not update Kubernetes Service {}'.format(app)) from e
class UuidAuditedModel(AuditedModel):
uuid = models.UUIDField('UUID',
default=uuid.uuid4,
primary_key=True,
editable=False,
auto_created=True,
unique=True)
class Meta:
abstract = True
from .app import App, validate_app_id, validate_reserved_names, validate_app_structure
from .appsettings import AppSettings
from .build import Build
from .certificate import Certificate, validate_certificate
from .config import Config
from .domain import Domain
from .service import Service
from .key import Key, validate_base64
from .release import Release
from .tls import TLS
def _log_instance_created(**kwargs):
if kwargs.get('created'):
instance = kwargs['instance']
message = '{} {} created'.format(instance.__class__.__name__.lower(), instance)
if hasattr(instance, 'app'):
instance.app.log(message)
else:
logger.info(message)
def _log_instance_added(**kwargs):
if kwargs.get('created'):
instance = kwargs['instance']
message = '{} {} added'.format(instance.__class__.__name__.lower(), instance)
if hasattr(instance, 'app'):
instance.app.log(message)
else:
logger.info(message)
def _log_instance_updated(**kwargs):
instance = kwargs['instance']
message = '{} {} updated'.format(instance.__class__.__name__.lower(), instance)
if hasattr(instance, 'app'):
instance.app.log(message)
else:
logger.info(message)
def _log_instance_removed(**kwargs):
instance = kwargs['instance']
message = '{} {} removed'.format(instance.__class__.__name__.lower(), instance)
if hasattr(instance, 'app'):
instance.app.log(message)
else:
logger.info(message)
def _hook_release_created(**kwargs):
if kwargs.get('created'):
release = kwargs['instance']
release.app.log(release.summary)
for deploy_hook in settings.DEIS_DEPLOY_HOOK_URLS:
url = deploy_hook
params = {
'app': release.app,
'release': 'v{}'.format(release.version),
'release_summary': release.summary,
'sha': '',
'user': release.owner,
}
if release.build is not None:
params['sha'] = release.build.sha
params = sorted(params.items())
url += '?{}'.format(urllib.parse.urlencode(params))
headers = {}
if settings.DEIS_DEPLOY_HOOK_SECRET_KEY is not None:
headers['Authorization'] = hmac.new(
settings.DEIS_DEPLOY_HOOK_SECRET_KEY.encode('utf-8'),
url.encode('utf-8'),
hashlib.sha1
).hexdigest()
try:
get_session().post(url, headers=headers)
release.app.log('Deploy hook sent to {}'.format(deploy_hook))
except requests.RequestException as e:
release.app.log('An error occurred while sending the deploy hook to {}: {}'.format(
deploy_hook, e), logging.ERROR)
post_save.connect(_hook_release_created, sender=Release, dispatch_uid='api.models.log')
post_save.connect(_log_instance_created, sender=Build, dispatch_uid='api.models.log')
post_save.connect(_log_instance_added, sender=Certificate, dispatch_uid='api.models.log')
post_save.connect(_log_instance_added, sender=Domain, dispatch_uid='api.models.log')
post_save.connect(_log_instance_updated, sender=AppSettings, dispatch_uid='api.models.log')
post_save.connect(_log_instance_updated, sender=Config, dispatch_uid='api.models.log')
post_delete.connect(_log_instance_removed, sender=Certificate, dispatch_uid='api.models.log')
post_delete.connect(_log_instance_removed, sender=Domain, dispatch_uid='api.models.log')
post_delete.connect(_log_instance_removed, sender=TLS, dispatch_uid='api.models.log')
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
| true | true |
f7fbb0e21fd5862d16c0e9f042f42dfc7700ace5 | 1,963 | py | Python | modules/image/semantic_segmentation/humanseg_mobile/data_feed.py | chunzhang-hub/PaddleHub | c5cfd021f77fd59340fb26e223e09a592e6a345f | [
"Apache-2.0"
] | 8,360 | 2019-01-18T10:46:45.000Z | 2022-03-31T14:50:02.000Z | modules/image/semantic_segmentation/humanseg_mobile/data_feed.py | dwuping/PaddleHub | 9a3b23295947e22149cc85c17cb4cf23c03f9e06 | [
"Apache-2.0"
] | 1,158 | 2019-04-11T09:22:43.000Z | 2022-03-31T12:12:09.000Z | modules/image/semantic_segmentation/humanseg_mobile/data_feed.py | dwuping/PaddleHub | 9a3b23295947e22149cc85c17cb4cf23c03f9e06 | [
"Apache-2.0"
] | 1,677 | 2019-04-09T15:07:40.000Z | 2022-03-31T06:41:10.000Z | # -*- coding:utf-8 -*-
import os
import time
from collections import OrderedDict
import cv2
import numpy as np
__all__ = ['reader', 'preprocess_v']
def preprocess_v(img, w, h):
img = cv2.resize(img, (w, h), cv2.INTER_LINEAR).astype(np.float32)
img_mean = np.array([0.5, 0.5, 0.5]).reshape((3, 1, 1))
img_std = np.array([0.5, 0.5, 0.5]).reshape((3, 1, 1))
img = img.transpose((2, 0, 1)) / 255
img -= img_mean
img /= img_std
return img
def reader(images=None, paths=None):
"""
Preprocess to yield image.
Args:
images (list(numpy.ndarray)): images data, shape of each is [H, W, C]
paths (list[str]): paths to images.
Yield:
each (collections.OrderedDict): info of original image, preprocessed image.
"""
component = list()
if paths:
for im_path in paths:
each = OrderedDict()
assert os.path.isfile(im_path), "The {} isn't a valid file path.".format(im_path)
#print(im_path)
im = cv2.imread(im_path).astype('float32')
each['org_im'] = im
each['org_im_path'] = im_path
each['org_im_shape'] = im.shape
component.append(each)
if images is not None:
assert type(images) is list, "images should be a list."
for im in images:
each = OrderedDict()
each['org_im'] = im
each['org_im_path'] = 'ndarray_time={}'.format(round(time.time(), 6) * 1e6)
each['org_im_shape'] = im.shape
component.append(each)
for element in component:
img = element['org_im'].copy()
img = cv2.resize(img, (192, 192)).astype(np.float32)
img_mean = np.array([0.5, 0.5, 0.5]).reshape((3, 1, 1))
img_std = np.array([0.5, 0.5, 0.5]).reshape((3, 1, 1))
img = img.transpose((2, 0, 1)) / 255
img -= img_mean
img /= img_std
element['image'] = img
yield element
| 31.15873 | 93 | 0.564952 |
import os
import time
from collections import OrderedDict
import cv2
import numpy as np
__all__ = ['reader', 'preprocess_v']
def preprocess_v(img, w, h):
img = cv2.resize(img, (w, h), cv2.INTER_LINEAR).astype(np.float32)
img_mean = np.array([0.5, 0.5, 0.5]).reshape((3, 1, 1))
img_std = np.array([0.5, 0.5, 0.5]).reshape((3, 1, 1))
img = img.transpose((2, 0, 1)) / 255
img -= img_mean
img /= img_std
return img
def reader(images=None, paths=None):
component = list()
if paths:
for im_path in paths:
each = OrderedDict()
assert os.path.isfile(im_path), "The {} isn't a valid file path.".format(im_path)
#print(im_path)
im = cv2.imread(im_path).astype('float32')
each['org_im'] = im
each['org_im_path'] = im_path
each['org_im_shape'] = im.shape
component.append(each)
if images is not None:
assert type(images) is list, "images should be a list."
for im in images:
each = OrderedDict()
each['org_im'] = im
each['org_im_path'] = 'ndarray_time={}'.format(round(time.time(), 6) * 1e6)
each['org_im_shape'] = im.shape
component.append(each)
for element in component:
img = element['org_im'].copy()
img = cv2.resize(img, (192, 192)).astype(np.float32)
img_mean = np.array([0.5, 0.5, 0.5]).reshape((3, 1, 1))
img_std = np.array([0.5, 0.5, 0.5]).reshape((3, 1, 1))
img = img.transpose((2, 0, 1)) / 255
img -= img_mean
img /= img_std
element['image'] = img
yield element
| true | true |
f7fbb17ed9afb0e45d4c5b38102a61493754e1be | 1,369 | py | Python | hackerrank/domain/algorithms/implementation/time_in_words/solution.py | spradeepv/dive-into-python | ec27d4686b7b007d21f9ba4f85d042be31ee2639 | [
"MIT"
] | null | null | null | hackerrank/domain/algorithms/implementation/time_in_words/solution.py | spradeepv/dive-into-python | ec27d4686b7b007d21f9ba4f85d042be31ee2639 | [
"MIT"
] | null | null | null | hackerrank/domain/algorithms/implementation/time_in_words/solution.py | spradeepv/dive-into-python | ec27d4686b7b007d21f9ba4f85d042be31ee2639 | [
"MIT"
] | null | null | null | units = ['', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight',
'nine', 'ten']
teens = ['', 'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen', 'sixteen',
'seventeen', 'eighteen', 'nineteen']
tens = ['', 'ten', 'twenty', 'thirty', 'forty', 'fifty']
h = int(raw_input())
m = int(raw_input())
text = ""
hour = units[h]
if m == 0:
text = units[h] + " o' clock"
elif m > 0 and m <= 30:
if m == 15:
text = "quarter past " + hour
elif m == 30:
text = "half past " + hour
else:
t = m / 10
o = m % 10
if t == 1:
if o >= 1:
text = teens[o]
else:
text = tens[t]
else:
text = tens[t] + " " + units[o]
text = text + " minutes past " + hour
elif m > 30 and m < 60:
hh = h + 1
if hh == 10:
hour = tens[1]
elif hh > 10:
hour = teens[hh - 10]
else:
hour = units[hh]
if m == 45:
text = "quarter to " + hour
else:
m = 60 - m
t = m / 10
o = m % 10
if t == 1:
if o >= 1:
text = teens[o]
else:
text = tens[t]
elif t > 0:
text = tens[t] + " " + units[o]
else:
text = units[o]
text = text + " minutes to " + hour
print (text)
| 25.830189 | 78 | 0.402484 | units = ['', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight',
'nine', 'ten']
teens = ['', 'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen', 'sixteen',
'seventeen', 'eighteen', 'nineteen']
tens = ['', 'ten', 'twenty', 'thirty', 'forty', 'fifty']
h = int(raw_input())
m = int(raw_input())
text = ""
hour = units[h]
if m == 0:
text = units[h] + " o' clock"
elif m > 0 and m <= 30:
if m == 15:
text = "quarter past " + hour
elif m == 30:
text = "half past " + hour
else:
t = m / 10
o = m % 10
if t == 1:
if o >= 1:
text = teens[o]
else:
text = tens[t]
else:
text = tens[t] + " " + units[o]
text = text + " minutes past " + hour
elif m > 30 and m < 60:
hh = h + 1
if hh == 10:
hour = tens[1]
elif hh > 10:
hour = teens[hh - 10]
else:
hour = units[hh]
if m == 45:
text = "quarter to " + hour
else:
m = 60 - m
t = m / 10
o = m % 10
if t == 1:
if o >= 1:
text = teens[o]
else:
text = tens[t]
elif t > 0:
text = tens[t] + " " + units[o]
else:
text = units[o]
text = text + " minutes to " + hour
print (text)
| true | true |
f7fbb2e83c7db86b9dbc095d2691fdd3e4312bad | 1,425 | py | Python | launchcontainer/browser_tests.py | appsembler/xblock-launchcontainer | 18bf840066efc128571f4b757720e01fc8e89c41 | [
"MIT"
] | 2 | 2016-11-24T04:41:20.000Z | 2017-05-11T14:42:40.000Z | launchcontainer/browser_tests.py | appsembler/xblock-launchcontainer | 18bf840066efc128571f4b757720e01fc8e89c41 | [
"MIT"
] | 34 | 2017-05-23T15:15:42.000Z | 2022-02-03T09:06:58.000Z | launchcontainer/browser_tests.py | appsembler/xblock-launchcontainer | 18bf840066efc128571f4b757720e01fc8e89c41 | [
"MIT"
] | 3 | 2017-04-29T16:12:06.000Z | 2019-08-01T20:41:50.000Z | import unittest
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
BASE_URL = 'http://localhost:8001'
USERNAME = 'YOUR_USERNAME'
PASSWORD = 'YOUR_PASSWORD'
class TestLaunchContainer(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(10)
def tearDown(self):
self.driver.close()
def test_login(self):
"""I can login at /signin."""
self.driver.get('{}/signin'.format(BASE_URL))
assert "Sign In".lower() in self.driver.title.lower()
username_input = self.driver.find_element_by_name('email')
username_input.send_keys(USERNAME)
password_input = self.driver.find_element_by_name('password')
password_input.send_keys(PASSWORD)
login_submit = self.driver.find_element_by_name('submit')
login_submit.click()
# This implicitly asserts that the page has changed.
try:
WebDriverWait(self.driver, 5).until(
EC.title_contains("Studio Home")
)
except TimeoutException:
raise AssertionError(
"The page was not loaded. Current title: {}".format(self.driver.title)
)
finally:
self.driver.quit()
| 30.319149 | 86 | 0.664561 | import unittest
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
BASE_URL = 'http://localhost:8001'
USERNAME = 'YOUR_USERNAME'
PASSWORD = 'YOUR_PASSWORD'
class TestLaunchContainer(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(10)
def tearDown(self):
self.driver.close()
def test_login(self):
self.driver.get('{}/signin'.format(BASE_URL))
assert "Sign In".lower() in self.driver.title.lower()
username_input = self.driver.find_element_by_name('email')
username_input.send_keys(USERNAME)
password_input = self.driver.find_element_by_name('password')
password_input.send_keys(PASSWORD)
login_submit = self.driver.find_element_by_name('submit')
login_submit.click()
try:
WebDriverWait(self.driver, 5).until(
EC.title_contains("Studio Home")
)
except TimeoutException:
raise AssertionError(
"The page was not loaded. Current title: {}".format(self.driver.title)
)
finally:
self.driver.quit()
| true | true |
f7fbb51acebb5d249ad58c45726bc733e35b2963 | 9,695 | py | Python | bin/train_asr.py | voidism/End-to-end-ASR-Pytorch | 509c389fa6ab98c30e227c6f4c8f7474adbc1bb2 | [
"MIT"
] | null | null | null | bin/train_asr.py | voidism/End-to-end-ASR-Pytorch | 509c389fa6ab98c30e227c6f4c8f7474adbc1bb2 | [
"MIT"
] | null | null | null | bin/train_asr.py | voidism/End-to-end-ASR-Pytorch | 509c389fa6ab98c30e227c6f4c8f7474adbc1bb2 | [
"MIT"
] | null | null | null | import torch
from src.solver import BaseSolver
from src.asr import ASR
from src.optim import Optimizer
from src.data import load_dataset
from src.util import human_format, cal_er, feat_to_fig
class Solver(BaseSolver):
''' Solver for training'''
def __init__(self, config, paras, mode):
super().__init__(config, paras, mode)
# Logger settings
self.best_wer = {'att': 3.0, 'ctc': 3.0}
# Curriculum learning affects data loader
self.curriculum = self.config['hparas']['curriculum']
def fetch_data(self, data):
''' Move data to device and compute text seq. length'''
_, feat, feat_len, txt = data
feat = feat.to(self.device)
feat_len = feat_len.to(self.device)
txt = txt.to(self.device)
txt_len = torch.sum(txt != 0, dim=-1)
return feat, feat_len, txt, txt_len
def load_data(self):
''' Load data for training/validation, store tokenizer and input/output shape'''
self.tr_set, self.dv_set, self.feat_dim, self.vocab_size, self.tokenizer, msg = \
load_dataset(self.paras.njobs, self.paras.gpu, self.paras.pin_memory,
self.curriculum > 0, **self.config['data'])
self.verbose(msg)
def set_model(self):
''' Setup ASR model and optimizer '''
# Model
init_adadelta = self.config['hparas']['optimizer'] == 'Adadelta'
self.model = ASR(self.feat_dim, self.vocab_size, init_adadelta, **
self.config['model']).to(self.device)
self.verbose(self.model.create_msg())
model_paras = [{'params': self.model.parameters()}]
# Losses
self.seq_loss = torch.nn.CrossEntropyLoss(ignore_index=0)
# Note: zero_infinity=False is unstable?
self.ctc_loss = torch.nn.CTCLoss(blank=0, zero_infinity=False)
# Plug-ins
self.emb_fuse = False
self.emb_reg = ('emb' in self.config) and (
self.config['emb']['enable'])
if self.emb_reg:
from src.plugin import EmbeddingRegularizer
self.emb_decoder = EmbeddingRegularizer(
self.tokenizer, self.model.dec_dim, **self.config['emb']).to(self.device)
model_paras.append({'params': self.emb_decoder.parameters()})
self.emb_fuse = self.emb_decoder.apply_fuse
if self.emb_fuse:
self.seq_loss = torch.nn.NLLLoss(ignore_index=0)
self.verbose(self.emb_decoder.create_msg())
# Optimizer
self.optimizer = Optimizer(model_paras, **self.config['hparas'])
self.verbose(self.optimizer.create_msg())
# Enable AMP if needed
self.enable_apex()
# Automatically load pre-trained model if self.paras.load is given
self.load_ckpt()
# ToDo: other training methods
def exec(self):
''' Training End-to-end ASR system '''
self.verbose('Total training steps {}.'.format(
human_format(self.max_step)))
ctc_loss, att_loss, emb_loss = None, None, None
n_epochs = 0
self.timer.set()
while self.step < self.max_step:
# Renew dataloader to enable random sampling
if self.curriculum > 0 and n_epochs == self.curriculum:
self.verbose(
'Curriculum learning ends after {} epochs, starting random sampling.'.format(n_epochs))
self.tr_set, _, _, _, _, _ = \
load_dataset(self.paras.njobs, self.paras.gpu, self.paras.pin_memory,
False, **self.config['data'])
for data in self.tr_set:
# Pre-step : update tf_rate/lr_rate and do zero_grad
tf_rate = self.optimizer.pre_step(self.step)
total_loss = 0
# Fetch data
feat, feat_len, txt, txt_len = self.fetch_data(data)
self.timer.cnt('rd')
# Forward model
# Note: txt should NOT start w/ <sos>
ctc_output, encode_len, att_output, att_align, dec_state = \
self.model(feat, feat_len, max(txt_len), tf_rate=tf_rate,
teacher=txt, get_dec_state=self.emb_reg)
# Plugins
if self.emb_reg:
emb_loss, fuse_output = self.emb_decoder(
dec_state, att_output, label=txt)
total_loss += self.emb_decoder.weight*emb_loss
# Compute all objectives
if ctc_output is not None:
if self.paras.cudnn_ctc:
ctc_loss = self.ctc_loss(ctc_output.transpose(0, 1),
txt.to_sparse().values().to(device='cpu', dtype=torch.int32),
[ctc_output.shape[1]] *
len(ctc_output),
txt_len.cpu().tolist())
else:
ctc_loss = self.ctc_loss(ctc_output.transpose(
0, 1), txt, encode_len, txt_len)
total_loss += ctc_loss*self.model.ctc_weight
if att_output is not None:
b, t, _ = att_output.shape
att_output = fuse_output if self.emb_fuse else att_output
att_loss = self.seq_loss(
att_output.contiguous().view(b*t, -1), txt.contiguous().view(-1))
total_loss += att_loss*(1-self.model.ctc_weight)
self.timer.cnt('fw')
# Backprop
grad_norm = self.backward(total_loss)
self.step += 1
# Logger
if (self.step == 1) or (self.step % self.PROGRESS_STEP == 0):
self.progress('Tr stat | Loss - {:.2f} | Grad. Norm - {:.2f} | {}'
.format(total_loss.cpu().item(), grad_norm, self.timer.show()))
self.write_log(
'loss', {'tr_ctc': ctc_loss, 'tr_att': att_loss})
self.write_log('emb_loss', {'tr': emb_loss})
self.write_log('wer', {'tr_att': cal_er(self.tokenizer, att_output, txt),
'tr_ctc': cal_er(self.tokenizer, ctc_output, txt, ctc=True)})
if self.emb_fuse:
if self.emb_decoder.fuse_learnable:
self.write_log('fuse_lambda', {
'emb': self.emb_decoder.get_weight()})
self.write_log(
'fuse_temp', {'temp': self.emb_decoder.get_temp()})
# Validation
if (self.step == 1) or (self.step % self.valid_step == 0):
self.validate()
# End of step
# https://github.com/pytorch/pytorch/issues/13246#issuecomment-529185354
torch.cuda.empty_cache()
self.timer.set()
if self.step > self.max_step:
break
n_epochs += 1
self.log.close()
def validate(self):
# Eval mode
self.model.eval()
if self.emb_decoder is not None:
self.emb_decoder.eval()
dev_wer = {'att': [], 'ctc': []}
for i, data in enumerate(self.dv_set):
self.progress('Valid step - {}/{}'.format(i+1, len(self.dv_set)))
# Fetch data
feat, feat_len, txt, txt_len = self.fetch_data(data)
# Forward model
with torch.no_grad():
ctc_output, encode_len, att_output, att_align, dec_state = \
self.model(feat, feat_len, int(max(txt_len)*self.DEV_STEP_RATIO),
emb_decoder=self.emb_decoder)
dev_wer['att'].append(cal_er(self.tokenizer, att_output, txt))
dev_wer['ctc'].append(cal_er(self.tokenizer, ctc_output, txt, ctc=True))
# Show some example on tensorboard
if i == len(self.dv_set)//2:
for i in range(min(len(txt), self.DEV_N_EXAMPLE)):
if self.step == 1:
self.write_log('true_text{}'.format(
i), self.tokenizer.decode(txt[i].tolist()))
if att_output is not None:
self.write_log('att_align{}'.format(i), feat_to_fig(
att_align[i, 0, :, :].cpu().detach()))
self.write_log('att_text{}'.format(i), self.tokenizer.decode(
att_output[i].argmax(dim=-1).tolist()))
if ctc_output is not None:
self.write_log('ctc_text{}'.format(i), self.tokenizer.decode(ctc_output[i].argmax(dim=-1).tolist(),
ignore_repeat=True))
# Ckpt if performance improves
for task in ['att', 'ctc']:
dev_wer[task] = sum(dev_wer[task])/len(dev_wer[task])
if dev_wer[task] < self.best_wer[task]:
self.best_wer[task] = dev_wer[task]
self.save_checkpoint('best_{}.pth'.format(task), 'wer', dev_wer[task])
self.write_log('wer', {'dv_'+task: dev_wer[task]})
self.save_checkpoint('latest.pth', 'wer', dev_wer['att'], show_msg=False)
# Resume training
self.model.train()
if self.emb_decoder is not None:
self.emb_decoder.train()
| 44.472477 | 123 | 0.525529 | import torch
from src.solver import BaseSolver
from src.asr import ASR
from src.optim import Optimizer
from src.data import load_dataset
from src.util import human_format, cal_er, feat_to_fig
class Solver(BaseSolver):
def __init__(self, config, paras, mode):
super().__init__(config, paras, mode)
self.best_wer = {'att': 3.0, 'ctc': 3.0}
self.curriculum = self.config['hparas']['curriculum']
def fetch_data(self, data):
_, feat, feat_len, txt = data
feat = feat.to(self.device)
feat_len = feat_len.to(self.device)
txt = txt.to(self.device)
txt_len = torch.sum(txt != 0, dim=-1)
return feat, feat_len, txt, txt_len
def load_data(self):
self.tr_set, self.dv_set, self.feat_dim, self.vocab_size, self.tokenizer, msg = \
load_dataset(self.paras.njobs, self.paras.gpu, self.paras.pin_memory,
self.curriculum > 0, **self.config['data'])
self.verbose(msg)
def set_model(self):
init_adadelta = self.config['hparas']['optimizer'] == 'Adadelta'
self.model = ASR(self.feat_dim, self.vocab_size, init_adadelta, **
self.config['model']).to(self.device)
self.verbose(self.model.create_msg())
model_paras = [{'params': self.model.parameters()}]
self.seq_loss = torch.nn.CrossEntropyLoss(ignore_index=0)
self.ctc_loss = torch.nn.CTCLoss(blank=0, zero_infinity=False)
self.emb_fuse = False
self.emb_reg = ('emb' in self.config) and (
self.config['emb']['enable'])
if self.emb_reg:
from src.plugin import EmbeddingRegularizer
self.emb_decoder = EmbeddingRegularizer(
self.tokenizer, self.model.dec_dim, **self.config['emb']).to(self.device)
model_paras.append({'params': self.emb_decoder.parameters()})
self.emb_fuse = self.emb_decoder.apply_fuse
if self.emb_fuse:
self.seq_loss = torch.nn.NLLLoss(ignore_index=0)
self.verbose(self.emb_decoder.create_msg())
self.optimizer = Optimizer(model_paras, **self.config['hparas'])
self.verbose(self.optimizer.create_msg())
self.enable_apex()
self.load_ckpt()
def exec(self):
self.verbose('Total training steps {}.'.format(
human_format(self.max_step)))
ctc_loss, att_loss, emb_loss = None, None, None
n_epochs = 0
self.timer.set()
while self.step < self.max_step:
if self.curriculum > 0 and n_epochs == self.curriculum:
self.verbose(
'Curriculum learning ends after {} epochs, starting random sampling.'.format(n_epochs))
self.tr_set, _, _, _, _, _ = \
load_dataset(self.paras.njobs, self.paras.gpu, self.paras.pin_memory,
False, **self.config['data'])
for data in self.tr_set:
tf_rate = self.optimizer.pre_step(self.step)
total_loss = 0
feat, feat_len, txt, txt_len = self.fetch_data(data)
self.timer.cnt('rd')
ctc_output, encode_len, att_output, att_align, dec_state = \
self.model(feat, feat_len, max(txt_len), tf_rate=tf_rate,
teacher=txt, get_dec_state=self.emb_reg)
if self.emb_reg:
emb_loss, fuse_output = self.emb_decoder(
dec_state, att_output, label=txt)
total_loss += self.emb_decoder.weight*emb_loss
if ctc_output is not None:
if self.paras.cudnn_ctc:
ctc_loss = self.ctc_loss(ctc_output.transpose(0, 1),
txt.to_sparse().values().to(device='cpu', dtype=torch.int32),
[ctc_output.shape[1]] *
len(ctc_output),
txt_len.cpu().tolist())
else:
ctc_loss = self.ctc_loss(ctc_output.transpose(
0, 1), txt, encode_len, txt_len)
total_loss += ctc_loss*self.model.ctc_weight
if att_output is not None:
b, t, _ = att_output.shape
att_output = fuse_output if self.emb_fuse else att_output
att_loss = self.seq_loss(
att_output.contiguous().view(b*t, -1), txt.contiguous().view(-1))
total_loss += att_loss*(1-self.model.ctc_weight)
self.timer.cnt('fw')
grad_norm = self.backward(total_loss)
self.step += 1
if (self.step == 1) or (self.step % self.PROGRESS_STEP == 0):
self.progress('Tr stat | Loss - {:.2f} | Grad. Norm - {:.2f} | {}'
.format(total_loss.cpu().item(), grad_norm, self.timer.show()))
self.write_log(
'loss', {'tr_ctc': ctc_loss, 'tr_att': att_loss})
self.write_log('emb_loss', {'tr': emb_loss})
self.write_log('wer', {'tr_att': cal_er(self.tokenizer, att_output, txt),
'tr_ctc': cal_er(self.tokenizer, ctc_output, txt, ctc=True)})
if self.emb_fuse:
if self.emb_decoder.fuse_learnable:
self.write_log('fuse_lambda', {
'emb': self.emb_decoder.get_weight()})
self.write_log(
'fuse_temp', {'temp': self.emb_decoder.get_temp()})
if (self.step == 1) or (self.step % self.valid_step == 0):
self.validate()
cuda.empty_cache()
self.timer.set()
if self.step > self.max_step:
break
n_epochs += 1
self.log.close()
def validate(self):
self.model.eval()
if self.emb_decoder is not None:
self.emb_decoder.eval()
dev_wer = {'att': [], 'ctc': []}
for i, data in enumerate(self.dv_set):
self.progress('Valid step - {}/{}'.format(i+1, len(self.dv_set)))
feat, feat_len, txt, txt_len = self.fetch_data(data)
with torch.no_grad():
ctc_output, encode_len, att_output, att_align, dec_state = \
self.model(feat, feat_len, int(max(txt_len)*self.DEV_STEP_RATIO),
emb_decoder=self.emb_decoder)
dev_wer['att'].append(cal_er(self.tokenizer, att_output, txt))
dev_wer['ctc'].append(cal_er(self.tokenizer, ctc_output, txt, ctc=True))
if i == len(self.dv_set)//2:
for i in range(min(len(txt), self.DEV_N_EXAMPLE)):
if self.step == 1:
self.write_log('true_text{}'.format(
i), self.tokenizer.decode(txt[i].tolist()))
if att_output is not None:
self.write_log('att_align{}'.format(i), feat_to_fig(
att_align[i, 0, :, :].cpu().detach()))
self.write_log('att_text{}'.format(i), self.tokenizer.decode(
att_output[i].argmax(dim=-1).tolist()))
if ctc_output is not None:
self.write_log('ctc_text{}'.format(i), self.tokenizer.decode(ctc_output[i].argmax(dim=-1).tolist(),
ignore_repeat=True))
for task in ['att', 'ctc']:
dev_wer[task] = sum(dev_wer[task])/len(dev_wer[task])
if dev_wer[task] < self.best_wer[task]:
self.best_wer[task] = dev_wer[task]
self.save_checkpoint('best_{}.pth'.format(task), 'wer', dev_wer[task])
self.write_log('wer', {'dv_'+task: dev_wer[task]})
self.save_checkpoint('latest.pth', 'wer', dev_wer['att'], show_msg=False)
self.model.train()
if self.emb_decoder is not None:
self.emb_decoder.train()
| true | true |
f7fbb76b0773caa373dd69f082c9489291ff804d | 1,145 | py | Python | tests/fpga/vector_reduce_test.py | Walon1998/dace | 95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0 | [
"BSD-3-Clause"
] | 227 | 2019-03-15T23:39:06.000Z | 2022-03-30T07:49:08.000Z | tests/fpga/vector_reduce_test.py | Walon1998/dace | 95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0 | [
"BSD-3-Clause"
] | 834 | 2019-07-31T22:49:31.000Z | 2022-03-28T14:01:32.000Z | tests/fpga/vector_reduce_test.py | Walon1998/dace | 95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0 | [
"BSD-3-Clause"
] | 64 | 2019-03-19T05:40:37.000Z | 2022-03-11T15:02:42.000Z | # Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
""" Sums all the element of the vector with a reduce. """
import dace
import numpy as np
import argparse
from dace.fpga_testing import fpga_test
from dace.transformation.interstate import FPGATransformSDFG
N = dace.symbol('N')
@dace.program
def vector_reduce(x: dace.float32[N], s: dace.scalar(dace.float32)):
#transient
tmp = dace.define_local([N], dtype=x.dtype)
@dace.map
def sum(i: _[0:N]):
in_x << x[i]
out >> tmp[i]
out = in_x
dace.reduce(lambda a, b: a + b, tmp, s, axis=(0), identity=0)
@fpga_test()
def test_vector_reduce():
N.set(24)
# Initialize arrays: X, Y and Z
X = np.random.rand(N.get()).astype(dace.float32.type)
s = dace.scalar(dace.float32)
sdfg = vector_reduce.to_sdfg()
sdfg.apply_transformations(FPGATransformSDFG)
sdfg(x=X, s=s, N=N)
# Compute expected result
s_exp = 0.0
for x in X:
s_exp += x
diff = np.linalg.norm(s_exp - s) / N.get()
assert diff <= 1e-5
return sdfg
if __name__ == "__main__":
test_vector_reduce(None)
| 21.603774 | 75 | 0.647162 |
import dace
import numpy as np
import argparse
from dace.fpga_testing import fpga_test
from dace.transformation.interstate import FPGATransformSDFG
N = dace.symbol('N')
@dace.program
def vector_reduce(x: dace.float32[N], s: dace.scalar(dace.float32)):
tmp = dace.define_local([N], dtype=x.dtype)
@dace.map
def sum(i: _[0:N]):
in_x << x[i]
out >> tmp[i]
out = in_x
dace.reduce(lambda a, b: a + b, tmp, s, axis=(0), identity=0)
@fpga_test()
def test_vector_reduce():
N.set(24)
X = np.random.rand(N.get()).astype(dace.float32.type)
s = dace.scalar(dace.float32)
sdfg = vector_reduce.to_sdfg()
sdfg.apply_transformations(FPGATransformSDFG)
sdfg(x=X, s=s, N=N)
s_exp = 0.0
for x in X:
s_exp += x
diff = np.linalg.norm(s_exp - s) / N.get()
assert diff <= 1e-5
return sdfg
if __name__ == "__main__":
test_vector_reduce(None)
| true | true |
f7fbb7c3fd5f5fe51bd8a265fc65365f8aea2c10 | 210 | py | Python | File Extentions/File Extentions.py | RyanH7279/Code-Quest-Academy | 542d10edaa3b6c87d58df0dc4abeb6568e12237a | [
"Unlicense"
] | null | null | null | File Extentions/File Extentions.py | RyanH7279/Code-Quest-Academy | 542d10edaa3b6c87d58df0dc4abeb6568e12237a | [
"Unlicense"
] | null | null | null | File Extentions/File Extentions.py | RyanH7279/Code-Quest-Academy | 542d10edaa3b6c87d58df0dc4abeb6568e12237a | [
"Unlicense"
] | null | null | null | cases = int(input())
total = []
for case in range(cases):total.append(input().split(".")[1])
p = {}
for type in total: p[type] = total.count(type)
for key, value in p.items(): print(key + " " + str(value)) | 35 | 61 | 0.609524 | cases = int(input())
total = []
for case in range(cases):total.append(input().split(".")[1])
p = {}
for type in total: p[type] = total.count(type)
for key, value in p.items(): print(key + " " + str(value)) | true | true |
f7fbb82de6e8534437a1ad7b6238c51c30bad02d | 68,875 | py | Python | src/_nats/aio/client.py | charbonnierg/jetstream.py | 4d8dc56fc6953d0a28d207b9b162c6f8d0080d37 | [
"Apache-2.0"
] | 8 | 2021-07-26T10:54:10.000Z | 2021-12-06T08:41:02.000Z | src/_nats/aio/client.py | charbonnierg/jetstream.py | 4d8dc56fc6953d0a28d207b9b162c6f8d0080d37 | [
"Apache-2.0"
] | 3 | 2021-08-09T10:25:39.000Z | 2021-12-06T08:40:41.000Z | src/_nats/aio/client.py | charbonnierg/jetstream.py | 4d8dc56fc6953d0a28d207b9b162c6f8d0080d37 | [
"Apache-2.0"
] | 3 | 2021-08-22T01:55:11.000Z | 2021-09-13T13:51:42.000Z | # Copyright 2016-2021 The NATS Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
from asyncio.futures import Future
from asyncio.streams import StreamReader, StreamWriter
from asyncio.tasks import Task
import json
import time
import ssl
import ipaddress
import base64
import warnings
from random import shuffle
from urllib.parse import ParseResult, urlparse
import sys
import logging
from typing import (
AsyncIterator,
Awaitable,
Callable,
Dict,
List,
Optional,
Sequence,
Union,
Tuple,
)
from email.parser import BytesParser
from _nats.aio.errors import *
from _nats.aio.nuid import NUID
from _nats.aio.types import ClientStats, ServerInfos
from _nats.protocol.parser import *
from _nats.protocol import command as prot_command
__version__ = "2.0.0-dev"
__lang__ = "python3"
_logger = logging.getLogger(__name__)
PROTOCOL = 1
INFO_OP = b"INFO"
CONNECT_OP = b"CONNECT"
PING_OP = b"PING"
PONG_OP = b"PONG"
OK_OP = b"+OK"
ERR_OP = b"-ERR"
_CRLF_ = b"\r\n"
_SPC_ = b" "
_EMPTY_ = b""
EMPTY = ""
PING_PROTO = PING_OP + _CRLF_
PONG_PROTO = PONG_OP + _CRLF_
INBOX_PREFIX = bytearray(b"_INBOX.")
INBOX_PREFIX_LEN = len(INBOX_PREFIX) + 22 + 1
DEFAULT_PENDING_SIZE = 1024 * 1024
DEFAULT_BUFFER_SIZE = 32768
DEFAULT_RECONNECT_TIME_WAIT = 2 # in seconds
DEFAULT_MAX_RECONNECT_ATTEMPTS = 60
DEFAULT_PING_INTERVAL = 120 # in seconds
DEFAULT_MAX_OUTSTANDING_PINGS = 2
DEFAULT_MAX_PAYLOAD_SIZE = 1048576
DEFAULT_MAX_FLUSHER_QUEUE_SIZE = 1024
DEFAULT_CONNECT_TIMEOUT = 2 # in seconds
DEFAULT_DRAIN_TIMEOUT = 30 # in seconds
MAX_CONTROL_LINE_SIZE = 1024
# Default Pending Limits of Subscriptions
DEFAULT_SUB_PENDING_MSGS_LIMIT = 65536
DEFAULT_SUB_PENDING_BYTES_LIMIT = 65536 * 1024
NATS_HDR_LINE = bytearray(b"NATS/1.0\r\n")
NO_RESPONDERS_STATUS = "503"
STATUS_MSG_LEN = 3 # e.g. 20x, 40x, 50x
CTRL_LEN = len(_CRLF_)
STATUS_HDR = "Status"
DESC_HDR = "Description"
class Subscription:
"""
A subscription represents interest in a particular subject.
A subscription should not be constructed directly, rather
`connection.subscribe()` should be used to get a subscription.
"""
def __init__(
self,
conn: "Client",
id: int = 0,
subject: str = "",
queue: str = "",
cb: Optional[Callable[["Msg"], Awaitable[None]]] = None,
future: Optional['asyncio.Future["Msg"]'] = None,
max_msgs: int = 0,
pending_msgs_limit: int = DEFAULT_SUB_PENDING_MSGS_LIMIT,
pending_bytes_limit: int = DEFAULT_SUB_PENDING_BYTES_LIMIT,
) -> None:
self._conn = conn
self._id = id
self._subject = subject
self._queue = queue
self._max_msgs = max_msgs
self._received = 0
self._cb = cb
self._future = future
# Per subscription message processor.
self._pending_msgs_limit = pending_msgs_limit
self._pending_bytes_limit = pending_bytes_limit
self._pending_queue: asyncio.Queue[Msg] = asyncio.Queue(
maxsize=pending_msgs_limit
)
self._pending_size = 0
self._wait_for_msgs_task: Optional[Task[None]] = None
self._message_iterator: Optional[_SubscriptionMessageIterator] = None
@property
def messages(self) -> AsyncIterator["Msg"]:
"""
Retrieves an async iterator for the messages from the subscription.
This is only available if a callback isn't provided when creating a
subscription.
"""
if not self._message_iterator:
raise NatsError(
"cannot iterate over messages with a non iteration subscription type"
)
return self._message_iterator
async def next_msg(self, timeout: Optional[float] = 1.0) -> "Msg":
"""
next_msg can be used to retrieve the next message
from a stream of messages using await syntax.
"""
future: Future[Msg] = asyncio.Future()
async def _next_msg() -> None:
msg = await self._pending_queue.get()
future.set_result(msg)
task = asyncio.create_task(_next_msg())
try:
msg = await asyncio.wait_for(future, timeout)
return msg
except asyncio.TimeoutError:
future.cancel()
task.cancel()
raise ErrTimeout
def _start(self, error_cb: Callable[[Exception], Awaitable[None]]) -> None:
"""
Creates the resources for the subscription to start processing messages.
"""
if self._cb:
if not asyncio.iscoroutinefunction(self._cb) and not (
hasattr(self._cb, "func") and asyncio.iscoroutinefunction(self._cb.func)
): # type: ignore[attr-defined]
raise NatsError("nats: must use coroutine for subscriptions")
self._wait_for_msgs_task = asyncio.create_task(
self._wait_for_msgs(error_cb)
)
elif self._future:
# Used to handle the single response from a request.
pass
else:
self._message_iterator = _SubscriptionMessageIterator(self._pending_queue)
async def drain(self) -> None:
"""
Removes interest in a subject, but will process remaining messages.
"""
try:
# Announce server that no longer want to receive more
# messages in this sub and just process the ones remaining.
await self._conn._send_unsubscribe(self._id)
# Roundtrip to ensure that the server has sent all messages.
await self._conn.flush()
if self._pending_queue:
# Wait until no more messages are left,
# then cancel the subscription task.
await self._pending_queue.join()
# stop waiting for messages
self._stop_processing()
# Subscription is done and won't be receiving further
# messages so can throw it away now.
self._conn._remove_sub(self._id)
except asyncio.CancelledError:
# In case draining of a connection times out then
# the sub per task will be canceled as well.
pass
async def unsubscribe(self, limit: int = 0) -> None:
"""
Removes interest in a subject, remaining messages will be discarded.
If `limit` is greater than zero, interest is not immediately removed,
rather, interest will be automatically removed after `limit` messages
are received.
"""
if self._conn.is_closed:
raise ErrConnectionClosed
if self._conn.is_draining:
raise ErrConnectionDraining
self._max_msgs = limit
if limit == 0 or self._received >= limit:
self._stop_processing()
self._conn._remove_sub(self._id)
if not self._conn.is_reconnecting:
await self._conn._send_unsubscribe(self._id, limit=limit)
def _stop_processing(self):
"""
Stops the subscription from processing new messages.
"""
if self._wait_for_msgs_task and not self._wait_for_msgs_task.done():
self._wait_for_msgs_task.cancel()
if self._message_iterator:
self._message_iterator._cancel()
async def _wait_for_msgs(self, error_cb: Callable[[Exception], Awaitable[None]]):
"""
A coroutine to read and process messages if a callback is provided.
Should be called as a task.
"""
while True:
try:
msg = await self._pending_queue.get()
self._pending_size -= len(msg.data)
try:
# Invoke depending of type of handler.
await self._cb(msg) # type: ignore[misc]
except asyncio.CancelledError:
# In case the coroutine handler gets cancelled
# then stop task loop and return.
break
except Exception as e:
# All errors from calling a handler
# are async errors.
if error_cb:
await error_cb(e)
finally:
# indicate the message finished processing so drain can continue
self._pending_queue.task_done()
except asyncio.CancelledError:
break
class _SubscriptionMessageIterator:
def __init__(self, queue) -> None:
self._queue: asyncio.Queue["Msg"] = queue
self._unsubscribed_future: Future[bool] = asyncio.Future()
def _cancel(self) -> None:
if not self._unsubscribed_future.done():
self._unsubscribed_future.set_result(True)
def __aiter__(self) -> "_SubscriptionMessageIterator":
return self
async def __anext__(self) -> "Msg":
get_task = asyncio.create_task(self._queue.get())
finished, _ = await asyncio.wait(
[get_task, self._unsubscribed_future], # type: ignore[type-var]
return_when=asyncio.FIRST_COMPLETED,
)
if get_task in finished:
self._queue.task_done()
return get_task.result()
elif self._unsubscribed_future.done():
get_task.cancel()
raise StopAsyncIteration
class Msg:
"""
Msg represents a message delivered by NATS.
"""
__slots__ = ("subject", "reply", "data", "sid", "_client", "headers")
def __init__(
self,
subject: str = "",
reply: str = "",
data: bytes = b"",
sid: int = 0,
client: Optional["Client"] = None,
headers: Dict[str, str] = None,
) -> None:
self.subject = subject
self.reply = reply
self.data = data
self.sid = sid
self._client = client
self.headers = headers
def __repr__(self) -> str:
return f"<{self.__class__.__name__}: subject='{self.subject}' reply='{self.reply}' data='{self.data[:10].decode()}...'>"
async def respond(self, data: bytes) -> None:
if not self.reply:
raise NatsError("no reply subject available")
if not self._client:
raise NatsError("client not set")
await self._client.publish(self.reply, data, headers=self.headers)
class Srv:
"""
Srv is a helper data structure to hold state of a server.
"""
def __init__(self, uri: ParseResult) -> None:
self.uri = uri
self.reconnects = 0
self.did_connect = False
self.discovered = False
self.last_attempt: Optional[float] = None
self.tls_name: Optional[str] = None
async def _default_error_callback(ex: BaseException) -> None:
"""
Provides a default way to handle async errors if the user
does not provide one.
"""
_logger.error("nats: encountered error", exc_info=ex)
class Client:
"""
Asyncio based client for NATS.
"""
msg_class = Msg
DISCONNECTED = 0
CONNECTED = 1
CLOSED = 2
RECONNECTING = 3
CONNECTING = 4
DRAINING_SUBS = 5
DRAINING_PUBS = 6
def __repr__(self) -> str:
return f"<nats client v{__version__}>"
def __init__(self) -> None:
self._current_server: Optional[Srv] = None
self._server_info: ServerInfos = {}
self._server_pool: List[Srv] = []
self._reading_task: Optional[Task[None]] = None
self._ping_interval_task: Optional[Task[None]] = None
self._pings_outstanding = 0
self._pongs_received = 0
self._pongs: List[Future[bool]] = []
self._bare_io_reader: Optional[StreamReader] = None
self._io_reader: Optional[StreamReader] = None
self._bare_io_writer: Optional[StreamWriter] = None
self._io_writer: Optional[StreamWriter] = None
self._err: Optional[Exception] = None
self._error_cb: Optional[Callable[[Exception], Awaitable[None]]] = None
self._disconnected_cb: Optional[Callable[[], Awaitable[None]]] = None
self._closed_cb: Optional[Callable[[], Awaitable[None]]] = None
self._discovered_server_cb: Optional[Callable[[], None]] = None
self._reconnected_cb: Optional[Callable[[], Awaitable[None]]] = None
self._reconnection_task: Optional[Task[None]] = None
self._reconnection_task_future: Optional[Future[bool]] = None
self._max_payload = DEFAULT_MAX_PAYLOAD_SIZE
# This is the client id that the NATS server knows
# about. Useful in debugging application errors
# when logged with this identifier along
# with nats server log.
# This would make more sense if we log the server
# connected to as well in case of cluster setup.
self._client_id: Optional[int] = None
self._sid = 0
self._subs: Dict[int, Subscription] = {}
self._status = Client.DISCONNECTED
self._ps = Parser(self)
self._pending: List[bytes] = []
self._pending_data_size = 0
self._flush_queue: Optional[asyncio.Queue[None]] = None
self._flusher_task: Optional[Task[None]] = None
self._hdr_parser = BytesParser()
# New style request/response
self._resp_map: Dict[str, Future[Msg]] = {}
self._resp_sub_prefix: Optional[bytearray] = None
self._nuid = NUID()
# NKEYS support
#
# user_jwt_cb is used to fetch and return the account
# signed JWT for this user.
self._user_jwt_cb: Optional[Callable[[], bytes]] = None
# signature_cb is used to sign a nonce from the server while
# authenticating with nkeys. The user should sign the nonce and
# return the base64 encoded signature.
self._signature_cb: Optional[Callable[[str], bytes]] = None
# user credentials file can be a tuple or single file.
self._user_credentials: Union[None, str, Tuple[str, str]] = None
# file that contains the nkeys seed and its public key as a string.
self._nkeys_seed: Optional[str] = None
self._public_nkey: Optional[str] = None
self.options: Dict[str, Any] = {}
self.stats: ClientStats = {
"in_msgs": 0,
"out_msgs": 0,
"in_bytes": 0,
"out_bytes": 0,
"reconnects": 0,
"errors_received": 0,
}
async def connect(
self,
servers: List[str] = ["nats://127.0.0.1:4222"],
error_cb: Optional[Callable[[Exception], Awaitable[None]]] = None,
disconnected_cb: Optional[Callable[[], Awaitable[None]]] = None,
closed_cb: Optional[Callable[[], Awaitable[None]]] = None,
discovered_server_cb: Optional[Callable[[], None]] = None,
reconnected_cb: Optional[Callable[[], Awaitable[None]]] = None,
name: Optional[str] = None,
pedantic: bool = False,
verbose: bool = False,
allow_reconnect: bool = True,
connect_timeout: int = DEFAULT_CONNECT_TIMEOUT,
reconnect_time_wait: int = DEFAULT_RECONNECT_TIME_WAIT,
max_reconnect_attempts: int = DEFAULT_MAX_RECONNECT_ATTEMPTS,
ping_interval: int = DEFAULT_PING_INTERVAL,
max_outstanding_pings: int = DEFAULT_MAX_OUTSTANDING_PINGS,
dont_randomize: bool = False,
flusher_queue_size: int = DEFAULT_MAX_FLUSHER_QUEUE_SIZE,
pending_size: int = DEFAULT_PENDING_SIZE,
no_echo: bool = False,
tls: Optional[ssl.SSLContext] = None,
tls_hostname: Optional[str] = None,
user: Optional[str] = None,
password: Optional[str] = None,
token: Optional[str] = None,
drain_timeout: int = DEFAULT_DRAIN_TIMEOUT,
signature_cb=None,
user_jwt_cb: Optional[Callable[[], bytes]] = None,
user_credentials: Optional[Union[str, Tuple[str, str]]] = None,
nkeys_seed: Optional[str] = None,
) -> None:
for cb in [
error_cb,
disconnected_cb,
closed_cb,
reconnected_cb,
discovered_server_cb,
]:
if cb and not asyncio.iscoroutinefunction(cb):
raise ErrInvalidCallbackType()
self._setup_server_pool(servers)
self._error_cb = error_cb or _default_error_callback
self._closed_cb = closed_cb
self._discovered_server_cb = discovered_server_cb
self._reconnected_cb = reconnected_cb
self._disconnected_cb = disconnected_cb
# NKEYS support
self._signature_cb = signature_cb
self._user_jwt_cb = user_jwt_cb
self._user_credentials = user_credentials
self._nkeys_seed = nkeys_seed
# Customizable options
self.options["verbose"] = verbose
self.options["pedantic"] = pedantic
self.options["name"] = name
self.options["allow_reconnect"] = allow_reconnect
self.options["dont_randomize"] = dont_randomize
self.options["reconnect_time_wait"] = reconnect_time_wait
self.options["max_reconnect_attempts"] = max_reconnect_attempts
self.options["ping_interval"] = ping_interval
self.options["max_outstanding_pings"] = max_outstanding_pings
self.options["no_echo"] = no_echo
self.options["user"] = user
self.options["password"] = password
self.options["token"] = token
self.options["connect_timeout"] = connect_timeout
self.options["drain_timeout"] = drain_timeout
self.options["pending_size"] = pending_size
if tls:
self.options["tls"] = tls
if tls_hostname:
self.options["tls_hostname"] = tls_hostname
if self._user_credentials is not None or self._nkeys_seed is not None:
self._setup_nkeys_connect()
# Queue used to trigger flushes to the socket
self._flush_queue = asyncio.Queue(maxsize=flusher_queue_size)
if self.options["dont_randomize"] is False:
shuffle(self._server_pool)
while True:
try:
await self._select_next_server()
await self._process_connect_init()
self._current_server.reconnects = 0 # type: ignore[union-attr]
break
except ErrNoServers as e:
if self.options["max_reconnect_attempts"] < 0:
# Never stop reconnecting
continue
self._err = e
raise e
except (OSError, NatsError, asyncio.TimeoutError) as e:
self._err = e
await self._error_cb(e)
# Bail on first attempt if reconnecting is disallowed.
if not self.options["allow_reconnect"]:
raise e
await self._close(Client.DISCONNECTED, False)
self._current_server.last_attempt = (
time.monotonic() # type: ignore[union-attr]
)
self._current_server.reconnects += 1 # type: ignore[union-attr]
async def close(self) -> None:
"""
Closes the socket to which we are connected and
sets the client to be in the CLOSED state.
No further reconnections occur once reaching this point.
"""
await self._close(Client.CLOSED)
async def drain(self) -> None:
"""
Drain will put a connection into a drain state. All subscriptions will
immediately be put into a drain state. Upon completion, the publishers
will be drained and can not publish any additional messages. Upon draining
of the publishers, the connection will be closed. Use the `closed_cb'
option to know when the connection has moved from draining to closed.
"""
if self.is_draining:
return
if self.is_closed:
raise ErrConnectionClosed
if self.is_connecting or self.is_reconnecting:
raise ErrConnectionReconnecting
# Start draining the subscriptions
self._status = Client.DRAINING_SUBS
drain_tasks = []
for sub in self._subs.values():
coro = sub.drain()
task = asyncio.create_task(coro)
drain_tasks.append(task)
drain_is_done = asyncio.gather(*drain_tasks)
try:
await asyncio.wait_for(drain_is_done, self.options["drain_timeout"])
except asyncio.TimeoutError:
drain_is_done.exception()
drain_is_done.cancel()
await self._error_cb(ErrDrainTimeout) # type: ignore[arg-type, misc]
except asyncio.CancelledError:
pass
finally:
self._status = Client.DRAINING_PUBS
await self.flush()
await self._close(Client.CLOSED)
async def flush(self, timeout: float = 60) -> None:
"""
Sends a ping to the server expecting a pong back ensuring
what we have written so far has made it to the server and
also enabling measuring of roundtrip time.
In case a pong is not returned within the allowed timeout,
then it will raise ErrTimeout.
"""
if timeout <= 0:
raise ErrBadTimeout
if self.is_closed:
raise ErrConnectionClosed
future: Future[bool] = asyncio.Future()
try:
await self._send_ping(future)
await asyncio.wait_for(future, timeout)
except asyncio.TimeoutError:
future.cancel()
raise ErrTimeout
async def publish(
self,
subject: str,
payload: bytes = b"",
reply: str = "",
headers: Optional[Dict[str, str]] = None,
) -> None:
"""
Sends a PUB command to the server on the specified subject.
A reply can be used by the recipient to respond to the message.
->> PUB hello <reply> 5
->> MSG_PAYLOAD: world
<<- MSG hello 2 5
"""
if self.is_closed:
raise ErrConnectionClosed
if self.is_draining_pubs:
raise ErrConnectionDraining
payload_size = len(payload)
if payload_size > self._max_payload:
raise ErrMaxPayload
await self._send_publish(subject, reply, payload, payload_size, headers)
async def publish_request(
self,
subject: str,
reply: str,
payload: bytes,
) -> None:
"""Deprecated. Will be removed in incoming versions.
Use Client.publish if you want to use headers parameter.
"""
warnings.warn(
"publish_request method from _nats.aio.client.Client is deprecated. "
"Use Client.publish(subject, payload, reply, headers) instead."
)
await self.publish(subject, payload, reply, None)
async def request(
self,
subject: str,
payload: bytes = b"",
timeout: float = 0.5,
old_style: bool = False,
headers: Optional[Dict[str, str]] = None,
) -> Msg:
"""
Implements the request/response pattern via pub/sub
using a single wildcard subscription that handles
the responses.
"""
if old_style:
return await self._request_old_style(subject, payload, timeout=timeout)
return await self._request_new_style(
subject, payload, timeout=timeout, headers=headers
)
async def subscribe(
self,
subject: str,
queue: str = "",
cb: Optional[Callable[[Msg], Awaitable[None]]] = None,
future: Optional[asyncio.Future] = None,
max_msgs: int = 0,
pending_msgs_limit: int = DEFAULT_SUB_PENDING_MSGS_LIMIT,
pending_bytes_limit: int = DEFAULT_SUB_PENDING_BYTES_LIMIT,
) -> Subscription:
"""
Expresses interest in a given subject.
A `Subscription` object will be returned.
If a callback is provided, messages will be processed asychronously.
If a callback isn't provided, messages can be retrieved via an
asynchronous iterator on the returned subscription object.
"""
if not subject:
raise ErrBadSubject
if self.is_closed:
raise ErrConnectionClosed
if self.is_draining:
raise ErrConnectionDraining
self._sid += 1
sid = self._sid
sub = Subscription(
self,
sid,
subject,
queue=queue,
cb=cb,
future=future,
max_msgs=max_msgs,
pending_msgs_limit=pending_msgs_limit,
pending_bytes_limit=pending_bytes_limit,
)
sub._start(self._error_cb) # type: ignore[arg-type]
self._subs[sid] = sub
await self._send_subscribe(sub)
return sub
@property
def connected_url(self) -> Optional[str]:
if self.is_connected:
return str(self._current_server.uri) # type: ignore[union-attr]
else:
return None
@property
def servers(self) -> List[Srv]:
servers = []
for srv in self._server_pool:
servers.append(srv)
return servers
@property
def discovered_servers(self) -> List[Srv]:
servers = []
for srv in self._server_pool:
if srv.discovered:
servers.append(srv)
return servers
@property
def max_payload(self) -> Optional[int]:
"""
Returns the max payload which we received from the servers INFO
"""
return self._max_payload # type: ignore[no-any-return]
@property
def client_id(self) -> Optional[int]:
"""
Returns the client id which we received from the servers INFO
"""
return self._client_id # type: ignore[no-any-return]
@property
def last_error(self) -> Optional[Exception]:
"""
Returns the last error which may have occured.
"""
return self._err # type: ignore[no-any-return]
@property
def pending_data_size(self) -> int:
return self._pending_data_size # type: ignore[no-any-return]
@property
def is_closed(self) -> bool:
return self._status == Client.CLOSED # type: ignore[no-any-return]
@property
def is_reconnecting(self) -> bool:
return self._status == Client.RECONNECTING # type: ignore[no-any-return]
@property
def is_connected(self) -> bool:
return (self._status == Client.CONNECTED) or self.is_draining
@property
def is_connecting(self) -> bool:
return self._status == Client.CONNECTING # type: ignore[no-any-return]
@property
def is_draining(self) -> bool:
return ( # type: ignore[no-any-return]
self._status == Client.DRAINING_SUBS or self._status == Client.DRAINING_PUBS
)
@property
def is_draining_pubs(self) -> bool:
return self._status == Client.DRAINING_PUBS # type: ignore[no-any-return]
def _build_message(
self,
subject: bytes,
reply: bytes,
data: bytes,
headers: Optional[Dict[str, str]],
) -> "Msg":
return self.msg_class(
subject=subject.decode(),
reply=reply.decode(),
data=data,
headers=headers,
client=self,
)
def _connect_command(self) -> bytes:
"""
Generates a JSON string with the params to be used
when sending CONNECT to the server.
->> CONNECT {"lang": "python3"}
"""
options = {
"verbose": self.options["verbose"],
"pedantic": self.options["pedantic"],
"lang": __lang__,
"version": __version__,
"protocol": PROTOCOL,
}
if "headers" in self._server_info:
options["headers"] = self._server_info["headers"]
options["no_responders"] = self._server_info["headers"]
if "auth_required" in self._server_info:
if self._server_info["auth_required"]:
if "nonce" in self._server_info and self._signature_cb is not None:
sig = self._signature_cb(self._server_info["nonce"])
options["sig"] = sig.decode()
if self._user_jwt_cb is not None:
jwt = self._user_jwt_cb()
options["jwt"] = jwt.decode()
elif self._public_nkey is not None:
options["nkey"] = self._public_nkey
# In case there is no password, then consider handle
# sending a token instead.
elif (
self.options["user"] is not None
and self.options["password"] is not None
):
options["user"] = self.options["user"]
options["pass"] = self.options["password"]
elif self.options["token"] is not None:
options["auth_token"] = self.options["token"]
elif self._current_server.uri.username is not None: # type: ignore[union-attr]
if self._current_server.uri.password is None: # type: ignore[union-attr]
options[
"auth_token"
] = self._current_server.uri.username # type: ignore[union-attr]
else:
options[
"user"
] = self._current_server.uri.username # type: ignore[union-attr]
options[
"pass"
] = self._current_server.uri.password # type: ignore[union-attr]
if self.options["name"] is not None:
options["name"] = self.options["name"]
if self.options["no_echo"] is not None:
options["echo"] = not self.options["no_echo"]
connect_opts = json.dumps(options, sort_keys=True)
return b"".join([CONNECT_OP + _SPC_ + connect_opts.encode() + _CRLF_])
def _host_is_ip(self, connect_url: Optional[str]) -> bool:
try:
ipaddress.ip_address(connect_url)
return True
except:
return False
def _parse_headers(self, headers: Optional[bytes]) -> Dict[str, str]:
hdrs: Dict[str, str] = {}
if headers is None:
return hdrs
raw_headers = headers[len(NATS_HDR_LINE) :]
parsed_hdrs = self._hdr_parser.parsebytes(raw_headers)
# Check if it is an inline status message like:
#
# NATS/1.0 404 No Messages
#
if len(parsed_hdrs.items()) == 0:
l = headers[len(NATS_HDR_LINE) - 1 :]
status = l[:STATUS_MSG_LEN]
desc = l[STATUS_MSG_LEN + 1 : len(l) - CTRL_LEN - CTRL_LEN]
hdrs[STATUS_HDR] = status.decode()
hdrs[DESC_HDR] = desc.decode()
else:
for k, v in parsed_hdrs.items():
hdrs[k] = v
return hdrs
def _process_disconnect(self) -> None:
"""
Process disconnection from the server and set client status
to DISCONNECTED.
"""
self._status = Client.DISCONNECTED
def _process_info(
self, info: ServerInfos, initial_connection: bool = False
) -> None:
"""
Process INFO lines sent by the server to reconfigure client
with latest updates from cluster to enable server discovery.
"""
if "connect_urls" in info:
if info["connect_urls"]:
connect_urls = []
for connect_url in info["connect_urls"]:
scheme = ""
if self._current_server.uri.scheme == "tls": # type: ignore[union-attr]
scheme = "tls"
else:
scheme = "nats"
uri = urlparse(f"{scheme}://{connect_url}")
srv = Srv(uri)
srv.discovered = True
# Check whether we should reuse the original hostname.
if (
"tls_required" in self._server_info
and self._server_info["tls_required"]
and self._host_is_ip(uri.hostname)
):
srv.tls_name = self._current_server.uri.hostname # type: ignore[union-attr]
# Filter for any similar server in the server pool already.
should_add = True
for s in self._server_pool:
if uri.netloc == s.uri.netloc:
should_add = False
if should_add:
connect_urls.append(srv)
if self.options["dont_randomize"] is not True:
shuffle(connect_urls)
for srv in connect_urls:
self._server_pool.append(srv)
if (
not initial_connection
and connect_urls
and self._discovered_server_cb
):
self._discovered_server_cb()
def _remove_sub(self, sid: int) -> None:
self._subs.pop(sid, None)
def _setup_nkeys_connect(self) -> None:
if self._user_credentials is not None:
self._setup_nkeys_jwt_connect()
else:
self._setup_nkeys_seed_connect()
def _setup_nkeys_jwt_connect(self) -> None:
import nkeys
import os
creds = self._user_credentials
if isinstance(creds, tuple) and len(creds) > 1:
def user_cb():
contents = None
with open(creds[0], "rb") as f:
contents = bytearray(os.fstat(f.fileno()).st_size)
f.readinto(contents)
return contents
self._user_jwt_cb = user_cb
def sig_cb(nonce):
seed = None
with open(creds[1], "rb") as f:
seed = bytearray(os.fstat(f.fileno()).st_size)
f.readinto(seed)
kp = nkeys.from_seed(seed)
raw_signed = kp.sign(nonce.encode())
sig = base64.b64encode(raw_signed)
# Best effort attempt to clear from memory.
kp.wipe()
del kp
del seed
return sig
self._signature_cb = sig_cb
else:
# Define the functions to be able to sign things using nkeys.
def user_cb():
user_jwt = None
with open(creds, "rb") as f:
while True:
line = bytearray(f.readline())
if b"BEGIN NATS USER JWT" in line:
user_jwt = bytearray(f.readline())
break
# Remove trailing line break but reusing same memory view.
return user_jwt[: len(user_jwt) - 1]
self._user_jwt_cb = user_cb
def sig_cb(nonce):
user_seed = None
with open(creds, "rb", buffering=0) as f:
for line in f:
# Detect line where the NKEY would start and end,
# then seek and read into a fixed bytearray that
# can be wiped.
if b"BEGIN USER NKEY SEED" in line:
nkey_start_pos = f.tell()
try:
next(f)
except StopIteration:
raise ErrInvalidUserCredentials()
nkey_end_pos = f.tell()
nkey_size = nkey_end_pos - nkey_start_pos - 1
f.seek(nkey_start_pos)
# Only gather enough bytes for the user seed
# into the pre allocated bytearray.
user_seed = bytearray(nkey_size)
f.readinto(user_seed)
kp = nkeys.from_seed(user_seed)
raw_signed = kp.sign(nonce.encode())
sig = base64.b64encode(raw_signed)
# Delete all state related to the keys.
kp.wipe()
del user_seed
del kp
return sig
self._signature_cb = sig_cb
def _setup_nkeys_seed_connect(self) -> None:
import nkeys
import os
seed = None
creds: str = self._nkeys_seed # type: ignore[assignment]
with open(creds, "rb") as f:
seed = bytearray(os.fstat(f.fileno()).st_size)
f.readinto(seed) # type: ignore[attr-defined]
kp = nkeys.from_seed(seed)
self._public_nkey = kp.public_key.decode()
kp.wipe()
del kp
del seed
def sig_cb(nonce: str) -> bytes:
seed = None
with open(creds, "rb") as f:
seed = bytearray(os.fstat(f.fileno()).st_size)
f.readinto(seed) # type: ignore[attr-defined]
kp = nkeys.from_seed(seed)
raw_signed = kp.sign(nonce.encode())
sig = base64.b64encode(raw_signed)
# Best effort attempt to clear from memory.
kp.wipe()
del kp
del seed
return sig
self._signature_cb = sig_cb
def _setup_server_pool(self, connect_url: Union[str, List[str]]) -> None:
if type(connect_url) is str:
try:
if "nats://" in connect_url or "tls://" in connect_url:
# Closer to how the Go client handles this.
# e.g. nats://127.0.0.1:4222
uri = urlparse(connect_url)
elif ":" in connect_url:
# Expand the scheme for the user
# e.g. 127.0.0.1:4222
uri = urlparse(f"nats://{connect_url}")
else:
# Just use the endpoint with the default NATS port.
# e.g. demo.nats.io
uri = urlparse(f"nats://{connect_url}:4222")
# In case only endpoint with scheme was set.
# e.g. nats://demo.nats.io or localhost:
if uri.port is None:
uri = urlparse(f"nats://{uri.hostname}:4222")
except ValueError:
raise NatsError("nats: invalid connect url option")
if uri.hostname is None or uri.hostname == "none":
raise NatsError("nats: invalid hostname in connect url")
self._server_pool.append(Srv(uri))
elif type(connect_url) is list:
try:
for server in connect_url:
uri = urlparse(server)
self._server_pool.append(Srv(uri))
except ValueError:
raise NatsError("nats: invalid connect url option")
else:
raise NatsError("nats: invalid connect url option")
async def _attempt_reconnect(self) -> None:
if self._reading_task is not None and not self._reading_task.cancelled():
self._reading_task.cancel()
if (
self._ping_interval_task is not None
and not self._ping_interval_task.cancelled()
):
self._ping_interval_task.cancel()
if self._flusher_task is not None and not self._flusher_task.cancelled():
self._flusher_task.cancel()
if self._io_writer is not None:
self._io_writer.close()
try:
await self._io_writer.wait_closed()
except Exception as e:
await self._error_cb(e) # type: ignore[misc]
self._err = None
if self._disconnected_cb is not None:
await self._disconnected_cb()
if self.is_closed:
return
if "dont_randomize" not in self.options or not self.options["dont_randomize"]:
shuffle(self._server_pool)
# Create a future that the client can use to control waiting
# on the reconnection attempts.
self._reconnection_task_future = asyncio.Future()
while True:
try:
# Try to establish a TCP connection to a server in
# the cluster then send CONNECT command to it.
await self._select_next_server()
await self._process_connect_init()
# Consider a reconnect to be done once CONNECT was
# processed by the server successfully.
self.stats["reconnects"] += 1
# Reset reconnect attempts for this server
# since have successfully connected.
self._current_server.did_connect = True # type: ignore[union-attr]
self._current_server.reconnects = 0 # type: ignore[union-attr]
# Replay all the subscriptions in case there were some.
subs_to_remove = []
for sid, sub in self._subs.items():
max_msgs = 0
if sub._max_msgs > 0:
# If we already hit the message limit, remove the subscription and don't resubscribe
if sub._received >= sub._max_msgs:
subs_to_remove.append(sid)
continue
# auto unsubscribe the number of messages we have left
max_msgs = sub._max_msgs - sub._received
sub_cmd = prot_command.sub_cmd(sub._subject, sub._queue, sid)
self._io_writer.write(sub_cmd) # type: ignore[union-attr]
if max_msgs > 0:
unsub_cmd = prot_command.unsub_cmd(sid, max_msgs)
self._io_writer.write(unsub_cmd) # type: ignore[union-attr]
for sid in subs_to_remove:
self._subs.pop(sid)
await self._io_writer.drain() # type: ignore[union-attr]
# Flush pending data before continuing in connected status.
# FIXME: Could use future here and wait for an error result
# to bail earlier in case there are errors in the connection.
await self._flush_pending()
self._status = Client.CONNECTED
await self.flush()
if self._reconnected_cb is not None:
await self._reconnected_cb()
self._reconnection_task_future = None
break
except ErrNoServers as e:
self._err = e
await self.close()
break
except (OSError, NatsError, asyncio.TimeoutError) as e:
self._err = e
await self._error_cb(e) # type: ignore[misc]
self._status = Client.RECONNECTING
self._current_server.last_attempt = (
time.monotonic() # type: ignore[union-attr]
)
self._current_server.reconnects += 1 # type: ignore[union-attr]
except asyncio.CancelledError:
break
if (
self._reconnection_task_future is not None
and not self._reconnection_task_future.cancelled()
):
self._reconnection_task_future.set_result(True)
async def _close(self, status: int, do_cbs: bool = True) -> None:
if self.is_closed:
self._status = status
return
self._status = Client.CLOSED
# Kick the flusher once again so it breaks
# and avoid pending futures.
await self._flush_pending()
if self._reading_task is not None and not self._reading_task.cancelled():
self._reading_task.cancel()
if (
self._ping_interval_task is not None
and not self._ping_interval_task.cancelled()
):
self._ping_interval_task.cancel()
if self._flusher_task is not None and not self._flusher_task.cancelled():
self._flusher_task.cancel()
if self._reconnection_task is not None and not self._reconnection_task.done():
self._reconnection_task.cancel()
# Wait for the reconection task to be done which should be soon.
try:
if (
self._reconnection_task_future is not None
and not self._reconnection_task_future.cancelled()
):
await asyncio.wait_for(
self._reconnection_task_future,
self.options["reconnect_time_wait"],
)
except (asyncio.CancelledError, asyncio.TimeoutError):
pass
# Relinquish control to allow background tasks to wrap up.
await asyncio.sleep(0)
if self._current_server is not None:
# In case there is any pending data at this point, flush before disconnecting.
if self._pending_data_size > 0:
self._io_writer.writelines(self._pending[:]) # type: ignore[union-attr]
self._pending = []
self._pending_data_size = 0
await self._io_writer.drain() # type: ignore[union-attr]
# Cleanup subscriptions since not reconnecting so no need
# to replay the subscriptions anymore.
for sub in self._subs.values():
# FIXME: Should we clear the pending queue here?
if sub._wait_for_msgs_task and not sub._wait_for_msgs_task.done():
sub._wait_for_msgs_task.cancel()
self._subs.clear()
if self._io_writer is not None:
self._io_writer.close()
try:
await self._io_writer.wait_closed()
except Exception as e:
await self._error_cb(e) # type: ignore[misc]
if do_cbs:
if self._disconnected_cb is not None:
await self._disconnected_cb()
if self._closed_cb is not None:
await self._closed_cb()
# Set the client_id back to None
self._client_id = None
async def _flusher(self) -> None:
"""
Coroutine which continuously tries to consume pending commands
and then flushes them to the socket.
"""
while True:
if not self.is_connected or self.is_connecting:
break
try:
await self._flush_queue.get() # type: ignore[union-attr]
if self._pending_data_size > 0:
self._io_writer.writelines( # type: ignore[union-attr]
self._pending[:]
)
self._pending = []
self._pending_data_size = 0
await self._io_writer.drain() # type: ignore[union-attr]
except OSError as e:
await self._error_cb(e) # type: ignore[misc]
await self._process_op_err(e)
break
except asyncio.CancelledError:
break
async def _flush_pending(self) -> None:
try:
# kick the flusher!
await self._flush_queue.put(None) # type: ignore[union-attr]
if not self.is_connected:
return
except asyncio.CancelledError:
pass
async def _init_request_sub(self) -> None:
# TODO just initialize it this way
self._resp_map = {}
self._resp_sub_prefix = INBOX_PREFIX[:]
self._resp_sub_prefix.extend(self._nuid.next())
self._resp_sub_prefix.extend(b".")
resp_mux_subject = self._resp_sub_prefix[:]
resp_mux_subject.extend(b"*")
await self.subscribe(resp_mux_subject.decode(), cb=self._request_sub_callback)
async def _ping_interval(self) -> None:
while True:
await asyncio.sleep(self.options["ping_interval"])
if not self.is_connected:
continue
try:
self._pings_outstanding += 1
if self._pings_outstanding > self.options["max_outstanding_pings"]:
await self._process_op_err(
ErrStaleConnection # type: ignore[arg-type]
)
return
await self._send_ping()
except asyncio.CancelledError:
break
# except asyncio.InvalidStateError:
# pass
async def _process_connect_init(self) -> None:
"""
Process INFO received from the server and CONNECT to the server
with authentication. It is also responsible of setting up the
reading and ping interval tasks from the client.
"""
self._status = Client.CONNECTING
connection_completed = self._io_reader.readline() # type: ignore[union-attr]
info_line = await asyncio.wait_for(
connection_completed, self.options["connect_timeout"]
)
if INFO_OP not in info_line:
raise NatsError(
"nats: empty response from server when expecting INFO message"
)
_, info = info_line.split(INFO_OP + _SPC_, 1)
try:
srv_info: ServerInfos = json.loads(info.decode())
except:
raise NatsError("nats: info message, json parse error")
self._server_info = srv_info
self._process_info(srv_info, initial_connection=True)
if "max_payload" in self._server_info:
self._max_payload = self._server_info["max_payload"]
if "client_id" in self._server_info:
self._client_id = self._server_info["client_id"]
if "tls_required" in self._server_info and self._server_info["tls_required"]:
ssl_context = None
if "tls" in self.options:
ssl_context = self.options.get("tls")
elif self._current_server.uri.scheme == "tls": # type: ignore[union-attr]
ssl_context = ssl.create_default_context()
else:
raise NatsError("nats: no ssl context provided")
# Check whether to reuse the original hostname for an implicit route.
hostname = None
if "tls_hostname" in self.options:
hostname = self.options["tls_hostname"]
elif self._current_server.tls_name is not None: # type: ignore[union-attr]
hostname = self._current_server.tls_name # type: ignore[union-attr]
else:
hostname = self._current_server.uri.hostname # type: ignore[union-attr]
# just in case something is left
await self._io_writer.drain() # type: ignore[union-attr]
# loop.start_tls was introduced in python 3.7
# the previous method is removed in 3.9
if sys.version_info.minor >= 7:
# manually recreate the stream reader/writer with a tls upgraded transport
reader = asyncio.StreamReader()
protocol = asyncio.StreamReaderProtocol(reader)
transport_future = asyncio.get_event_loop().start_tls(
self._io_writer.transport, # type: ignore[union-attr]
protocol,
ssl_context, # type: ignore[arg-type]
server_hostname=hostname,
)
transport = await asyncio.wait_for(
transport_future, self.options["connect_timeout"]
)
writer = asyncio.StreamWriter(
transport, protocol, reader, asyncio.get_event_loop()
)
self._io_reader, self._io_writer = reader, writer
else:
transport = self._io_writer.transport # type: ignore[union-attr]
sock = transport.get_extra_info("socket")
if not sock:
# This shouldn't happen
raise NatsError("nats: unable to get socket")
connection_future = asyncio.open_connection(
limit=DEFAULT_BUFFER_SIZE,
sock=sock,
ssl=ssl_context,
server_hostname=hostname,
)
self._io_reader, self._io_writer = await asyncio.wait_for(
connection_future, self.options["connect_timeout"]
)
# Refresh state of parser upon reconnect.
if self.is_reconnecting:
self._ps.reset()
connect_cmd = self._connect_command()
self._io_writer.write(connect_cmd) # type: ignore[union-attr]
await self._io_writer.drain() # type: ignore[union-attr]
if self.options["verbose"]:
future = self._io_reader.readline() # type: ignore[union-attr]
next_op = await asyncio.wait_for(future, self.options["connect_timeout"])
if OK_OP in next_op:
# Do nothing
pass
elif ERR_OP in next_op:
err_line = next_op.decode()
_, err_msg = err_line.split(" ", 1)
# FIXME: Maybe handling could be more special here,
# checking for ErrAuthorization for example.
# await self._process_err(err_msg)
raise NatsError("nats: " + err_msg.rstrip("\r\n"))
self._io_writer.write(PING_PROTO) # type: ignore[union-attr]
await self._io_writer.drain() # type: ignore[union-attr]
future = self._io_reader.readline() # type: ignore[union-attr]
next_op = await asyncio.wait_for(future, self.options["connect_timeout"])
if PONG_PROTO in next_op:
self._status = Client.CONNECTED
elif ERR_OP in next_op:
err_line = next_op.decode()
_, err_msg = err_line.split(" ", 1)
# FIXME: Maybe handling could be more special here,
# checking for ErrAuthorization for example.
# await self._process_err(err_msg)
raise NatsError("nats: " + err_msg.rstrip("\r\n"))
if PONG_PROTO in next_op:
self._status = Client.CONNECTED
self._reading_task = asyncio.get_event_loop().create_task(self._read_loop())
self._pongs = []
self._pings_outstanding = 0
self._ping_interval_task = asyncio.get_event_loop().create_task(
self._ping_interval()
)
# Task for kicking the flusher queue
self._flusher_task = asyncio.get_event_loop().create_task(self._flusher())
async def _process_err(self, err_msg: Sequence[bytes]) -> None:
"""
Processes the raw error message sent by the server
and close connection with current server.
"""
if STALE_CONNECTION in err_msg:
await self._process_op_err(ErrStaleConnection) # type: ignore[arg-type]
return
if AUTHORIZATION_VIOLATION in err_msg:
self._err = ErrAuthorization # type: ignore[assignment]
else:
m = b"nats: " + err_msg[0]
err = NatsError(m.decode())
self._err = err
if PERMISSIONS_ERR in m:
await self._error_cb(err) # type: ignore[misc]
return
do_cbs = False
if not self.is_connecting:
do_cbs = True
# FIXME: Some errors such as 'Invalid Subscription'
# do not cause the server to close the connection.
# For now we handle similar as other clients and close.
asyncio.get_event_loop().create_task(self._close(Client.CLOSED, do_cbs))
async def _process_msg(
self,
sid: int,
subject: bytes,
reply: bytes,
data: bytes,
headers: Optional[bytes],
) -> None:
"""
Process MSG sent by server.
"""
payload_size = len(data)
self.stats["in_msgs"] += 1
self.stats["in_bytes"] += payload_size
sub = self._subs.get(sid)
if not sub:
# Skip in case no subscription present.
return
sub._received += 1
if sub._max_msgs > 0 and sub._received >= sub._max_msgs:
# Enough messages so can throwaway subscription now.
self._subs.pop(sid, None)
sub._stop_processing()
try:
hdrs = self._parse_headers(headers)
except Exception as e:
await self._error_cb(e) # type: ignore[misc]
return
msg = self._build_message(subject, reply, data, hdrs)
# Check if it is an old style request.
if sub._future:
if sub._future.cancelled():
# Already gave up, nothing to do.
return
sub._future.set_result(msg)
return
# Let subscription wait_for_msgs coroutine process the messages,
# but in case sending to the subscription task would block,
# then consider it to be an slow consumer and drop the message.
try:
sub._pending_size += payload_size
# allow setting pending_bytes_limit to 0 to disable
if (
sub._pending_bytes_limit > 0
and sub._pending_size >= sub._pending_bytes_limit
):
# Subtract the bytes since the message will be thrown away
# so it would not be pending data.
sub._pending_size -= payload_size
await self._error_cb(
ErrSlowConsumer(subject=subject, sid=sid)
) # type: ignore[misc]
return
sub._pending_queue.put_nowait(msg)
except asyncio.QueueFull:
await self._error_cb(
ErrSlowConsumer(subject=subject, sid=sid)
) # type: ignore[misc]
async def _process_op_err(self, e: Exception) -> None:
"""
Process errors which occured while reading or parsing
the protocol. If allow_reconnect is enabled it will
try to switch the server to which it is currently connected
otherwise it will disconnect.
"""
if self.is_connecting or self.is_closed or self.is_reconnecting:
return
if self.options["allow_reconnect"] and self.is_connected:
self._status = Client.RECONNECTING
self._ps.reset()
if (
self._reconnection_task is not None
and not self._reconnection_task.cancelled()
):
# Cancel the previous task in case it may still be running.
self._reconnection_task.cancel()
self._reconnection_task = asyncio.get_event_loop().create_task(
self._attempt_reconnect()
)
else:
self._process_disconnect()
self._err = e
await self._close(Client.CLOSED, True)
async def _process_ping(self) -> None:
"""
Process PING sent by server.
"""
await self._send_command(PONG)
await self._flush_pending()
async def _process_pong(self) -> None:
"""
Process PONG sent by server.
"""
if len(self._pongs) > 0:
future = self._pongs.pop(0)
future.set_result(True)
self._pongs_received += 1
self._pings_outstanding = 0
async def _read_loop(self) -> None:
"""
Coroutine which gathers bytes sent by the server
and feeds them to the protocol parser.
In case of error while reading, it will stop running
and its task has to be rescheduled.
"""
while True:
try:
should_bail = self.is_closed or self.is_reconnecting
if should_bail or self._io_reader is None:
break
if self.is_connected and self._io_reader.at_eof():
await self._error_cb(
ErrStaleConnection # type: ignore[misc, arg-type]
)
await self._process_op_err(
ErrStaleConnection # type: ignore[arg-type]
)
break
b = await self._io_reader.read(DEFAULT_BUFFER_SIZE)
await self._ps.parse(b)
except ErrProtocol:
await self._process_op_err(ErrProtocol) # type: ignore[arg-type]
break
except OSError as e:
await self._process_op_err(e)
break
except asyncio.CancelledError:
break
# except asyncio.InvalidStateError:
# pass
async def _request_new_style(
self,
subject: str,
payload: bytes,
timeout: float = 0.5,
headers: Optional[Dict[str, str]] = None,
) -> Msg:
if self.is_draining_pubs:
raise ErrConnectionDraining
if not self._resp_sub_prefix:
await self._init_request_sub()
# Use a new NUID for the token inbox and then use the future.
token = self._nuid.next()
inbox = self._resp_sub_prefix[:] # type: ignore[index]
inbox.extend(token)
future: Future[Msg] = asyncio.Future()
self._resp_map[token.decode()] = future
await self.publish(subject, payload, reply=inbox.decode(), headers=headers)
# Wait for the response or give up on timeout.
try:
msg = await asyncio.wait_for(future, timeout)
return msg
except asyncio.TimeoutError:
self._resp_map.pop(token.decode())
future.cancel()
raise ErrTimeout
async def _request_old_style(
self, subject: str, payload: bytes, timeout: float = 0.5
) -> Msg:
"""
Implements the request/response pattern via pub/sub
using an ephemeral subscription which will be published
with a limited interest of 1 reply returning the response
or raising a Timeout error.
"""
next_inbox = INBOX_PREFIX[:]
next_inbox.extend(self._nuid.next())
inbox = next_inbox.decode()
future: Future[Msg] = asyncio.Future()
sub = await self.subscribe(inbox, future=future, max_msgs=1)
await sub.unsubscribe(limit=1)
await self.publish(subject, payload, reply=inbox)
try:
msg = await asyncio.wait_for(future, timeout)
return msg
except asyncio.TimeoutError:
await sub.unsubscribe()
future.cancel()
raise ErrTimeout
async def _request_sub_callback(self, msg: Msg) -> None:
token = msg.subject[INBOX_PREFIX_LEN:]
try:
fut = self._resp_map.get(token)
if not fut:
return
fut.set_result(msg)
self._resp_map.pop(token, None)
except (asyncio.CancelledError, asyncio.InvalidStateError):
# Request may have timed out already so remove the entry
self._resp_map.pop(token, None)
async def _select_next_server(self) -> None:
"""
Looks up in the server pool for an available server
and attempts to connect.
"""
while True:
if len(self._server_pool) == 0:
self._current_server = None
raise ErrNoServers
now = time.monotonic()
s = self._server_pool.pop(0)
if self.options["max_reconnect_attempts"] > 0:
if s.reconnects > self.options["max_reconnect_attempts"]:
# Discard server since already tried to reconnect too many times
continue
# Not yet exceeded max_reconnect_attempts so can still use
# this server in the future.
self._server_pool.append(s)
if (
s.last_attempt is not None
and now < s.last_attempt + self.options["reconnect_time_wait"]
):
# Backoff connecting to server if we attempted recently.
await asyncio.sleep(self.options["reconnect_time_wait"])
try:
s.last_attempt = time.monotonic()
connection_future = asyncio.open_connection(
s.uri.hostname, s.uri.port, limit=DEFAULT_BUFFER_SIZE
)
r, w = await asyncio.wait_for(
connection_future, self.options["connect_timeout"]
)
self._current_server = s
# We keep a reference to the initial transport we used when
# establishing the connection in case we later upgrade to TLS
# after getting the first INFO message. This is in order to
# prevent the GC closing the socket after we send CONNECT
# and replace the transport.
#
# See https://github.com/nats-io/asyncio-nats/issues/43
self._bare_io_reader = self._io_reader = r
self._bare_io_writer = self._io_writer = w
break
except Exception as e:
s.last_attempt = time.monotonic()
s.reconnects += 1
self._err = e
await self._error_cb(e) # type: ignore[misc]
continue
async def _send_command(self, cmd: bytes, priority: bool = False) -> None:
if priority:
self._pending.insert(0, cmd)
else:
self._pending.append(cmd)
self._pending_data_size += len(cmd)
if self._pending_data_size > self.options["pending_size"]:
await self._flush_pending()
async def _send_ping(self, future: Optional["Future[bool]"] = None) -> None:
if future is None:
future = asyncio.Future()
self._pongs.append(future)
self._io_writer.write(PING_PROTO) # type: ignore[union-attr]
await self._flush_pending()
async def _send_publish(
self,
subject: str,
reply: str,
payload: bytes,
payload_size: int,
headers: Optional[Dict[str, str]],
) -> None:
"""
Sends PUB command to the NATS server.
"""
if subject == "":
# Avoid sending messages with empty replies.
raise ErrBadSubject
pub_cmd = None
if headers is None:
pub_cmd = prot_command.pub_cmd(subject, reply, payload)
else:
hdr = bytearray()
hdr.extend(NATS_HDR_LINE)
for k, v in headers.items():
hdr.extend(k.encode())
hdr.extend(b": ")
hdr.extend(v.encode())
hdr.extend(_CRLF_)
hdr.extend(_CRLF_)
pub_cmd = prot_command.hpub_cmd(subject, reply, hdr, payload)
self.stats["out_msgs"] += 1
self.stats["out_bytes"] += payload_size
await self._send_command(pub_cmd)
if self._flush_queue.empty(): # type: ignore[union-attr]
await self._flush_pending()
async def _send_subscribe(self, sub: Subscription) -> None:
sub_cmd = prot_command.sub_cmd(sub._subject, sub._queue, sub._id)
await self._send_command(sub_cmd)
await self._flush_pending()
async def _send_unsubscribe(self, sid: int, limit: int = 1) -> None:
unsub_cmd = prot_command.unsub_cmd(sid, limit)
await self._send_command(unsub_cmd)
await self._flush_pending()
async def __aenter__(self) -> "Client":
"""For when NATS client is used in a context manager"""
return self
async def __aexit__(self, *exc_info: Any) -> None:
"""Close connection to NATS when used in a context manager"""
await self._close(Client.CLOSED, do_cbs=True)
| 36.596706 | 128 | 0.573721 |
import asyncio
from asyncio.futures import Future
from asyncio.streams import StreamReader, StreamWriter
from asyncio.tasks import Task
import json
import time
import ssl
import ipaddress
import base64
import warnings
from random import shuffle
from urllib.parse import ParseResult, urlparse
import sys
import logging
from typing import (
AsyncIterator,
Awaitable,
Callable,
Dict,
List,
Optional,
Sequence,
Union,
Tuple,
)
from email.parser import BytesParser
from _nats.aio.errors import *
from _nats.aio.nuid import NUID
from _nats.aio.types import ClientStats, ServerInfos
from _nats.protocol.parser import *
from _nats.protocol import command as prot_command
__version__ = "2.0.0-dev"
__lang__ = "python3"
_logger = logging.getLogger(__name__)
PROTOCOL = 1
INFO_OP = b"INFO"
CONNECT_OP = b"CONNECT"
PING_OP = b"PING"
PONG_OP = b"PONG"
OK_OP = b"+OK"
ERR_OP = b"-ERR"
_CRLF_ = b"\r\n"
_SPC_ = b" "
_EMPTY_ = b""
EMPTY = ""
PING_PROTO = PING_OP + _CRLF_
PONG_PROTO = PONG_OP + _CRLF_
INBOX_PREFIX = bytearray(b"_INBOX.")
INBOX_PREFIX_LEN = len(INBOX_PREFIX) + 22 + 1
DEFAULT_PENDING_SIZE = 1024 * 1024
DEFAULT_BUFFER_SIZE = 32768
DEFAULT_RECONNECT_TIME_WAIT = 2
DEFAULT_MAX_RECONNECT_ATTEMPTS = 60
DEFAULT_PING_INTERVAL = 120
DEFAULT_MAX_OUTSTANDING_PINGS = 2
DEFAULT_MAX_PAYLOAD_SIZE = 1048576
DEFAULT_MAX_FLUSHER_QUEUE_SIZE = 1024
DEFAULT_CONNECT_TIMEOUT = 2
DEFAULT_DRAIN_TIMEOUT = 30
MAX_CONTROL_LINE_SIZE = 1024
DEFAULT_SUB_PENDING_MSGS_LIMIT = 65536
DEFAULT_SUB_PENDING_BYTES_LIMIT = 65536 * 1024
NATS_HDR_LINE = bytearray(b"NATS/1.0\r\n")
NO_RESPONDERS_STATUS = "503"
STATUS_MSG_LEN = 3
CTRL_LEN = len(_CRLF_)
STATUS_HDR = "Status"
DESC_HDR = "Description"
class Subscription:
def __init__(
self,
conn: "Client",
id: int = 0,
subject: str = "",
queue: str = "",
cb: Optional[Callable[["Msg"], Awaitable[None]]] = None,
future: Optional['asyncio.Future["Msg"]'] = None,
max_msgs: int = 0,
pending_msgs_limit: int = DEFAULT_SUB_PENDING_MSGS_LIMIT,
pending_bytes_limit: int = DEFAULT_SUB_PENDING_BYTES_LIMIT,
) -> None:
self._conn = conn
self._id = id
self._subject = subject
self._queue = queue
self._max_msgs = max_msgs
self._received = 0
self._cb = cb
self._future = future
self._pending_msgs_limit = pending_msgs_limit
self._pending_bytes_limit = pending_bytes_limit
self._pending_queue: asyncio.Queue[Msg] = asyncio.Queue(
maxsize=pending_msgs_limit
)
self._pending_size = 0
self._wait_for_msgs_task: Optional[Task[None]] = None
self._message_iterator: Optional[_SubscriptionMessageIterator] = None
@property
def messages(self) -> AsyncIterator["Msg"]:
if not self._message_iterator:
raise NatsError(
"cannot iterate over messages with a non iteration subscription type"
)
return self._message_iterator
async def next_msg(self, timeout: Optional[float] = 1.0) -> "Msg":
future: Future[Msg] = asyncio.Future()
async def _next_msg() -> None:
msg = await self._pending_queue.get()
future.set_result(msg)
task = asyncio.create_task(_next_msg())
try:
msg = await asyncio.wait_for(future, timeout)
return msg
except asyncio.TimeoutError:
future.cancel()
task.cancel()
raise ErrTimeout
def _start(self, error_cb: Callable[[Exception], Awaitable[None]]) -> None:
if self._cb:
if not asyncio.iscoroutinefunction(self._cb) and not (
hasattr(self._cb, "func") and asyncio.iscoroutinefunction(self._cb.func)
):
raise NatsError("nats: must use coroutine for subscriptions")
self._wait_for_msgs_task = asyncio.create_task(
self._wait_for_msgs(error_cb)
)
elif self._future:
pass
else:
self._message_iterator = _SubscriptionMessageIterator(self._pending_queue)
async def drain(self) -> None:
try:
await self._conn._send_unsubscribe(self._id)
await self._conn.flush()
if self._pending_queue:
await self._pending_queue.join()
self._stop_processing()
# messages so can throw it away now.
self._conn._remove_sub(self._id)
except asyncio.CancelledError:
# In case draining of a connection times out then
# the sub per task will be canceled as well.
pass
async def unsubscribe(self, limit: int = 0) -> None:
if self._conn.is_closed:
raise ErrConnectionClosed
if self._conn.is_draining:
raise ErrConnectionDraining
self._max_msgs = limit
if limit == 0 or self._received >= limit:
self._stop_processing()
self._conn._remove_sub(self._id)
if not self._conn.is_reconnecting:
await self._conn._send_unsubscribe(self._id, limit=limit)
def _stop_processing(self):
if self._wait_for_msgs_task and not self._wait_for_msgs_task.done():
self._wait_for_msgs_task.cancel()
if self._message_iterator:
self._message_iterator._cancel()
async def _wait_for_msgs(self, error_cb: Callable[[Exception], Awaitable[None]]):
while True:
try:
msg = await self._pending_queue.get()
self._pending_size -= len(msg.data)
try:
# Invoke depending of type of handler.
await self._cb(msg) # type: ignore[misc]
except asyncio.CancelledError:
# In case the coroutine handler gets cancelled
# then stop task loop and return.
break
except Exception as e:
# All errors from calling a handler
# are async errors.
if error_cb:
await error_cb(e)
finally:
# indicate the message finished processing so drain can continue
self._pending_queue.task_done()
except asyncio.CancelledError:
break
class _SubscriptionMessageIterator:
def __init__(self, queue) -> None:
self._queue: asyncio.Queue["Msg"] = queue
self._unsubscribed_future: Future[bool] = asyncio.Future()
def _cancel(self) -> None:
if not self._unsubscribed_future.done():
self._unsubscribed_future.set_result(True)
def __aiter__(self) -> "_SubscriptionMessageIterator":
return self
async def __anext__(self) -> "Msg":
get_task = asyncio.create_task(self._queue.get())
finished, _ = await asyncio.wait(
[get_task, self._unsubscribed_future], # type: ignore[type-var]
return_when=asyncio.FIRST_COMPLETED,
)
if get_task in finished:
self._queue.task_done()
return get_task.result()
elif self._unsubscribed_future.done():
get_task.cancel()
raise StopAsyncIteration
class Msg:
__slots__ = ("subject", "reply", "data", "sid", "_client", "headers")
def __init__(
self,
subject: str = "",
reply: str = "",
data: bytes = b"",
sid: int = 0,
client: Optional["Client"] = None,
headers: Dict[str, str] = None,
) -> None:
self.subject = subject
self.reply = reply
self.data = data
self.sid = sid
self._client = client
self.headers = headers
def __repr__(self) -> str:
return f"<{self.__class__.__name__}: subject='{self.subject}' reply='{self.reply}' data='{self.data[:10].decode()}...'>"
async def respond(self, data: bytes) -> None:
if not self.reply:
raise NatsError("no reply subject available")
if not self._client:
raise NatsError("client not set")
await self._client.publish(self.reply, data, headers=self.headers)
class Srv:
def __init__(self, uri: ParseResult) -> None:
self.uri = uri
self.reconnects = 0
self.did_connect = False
self.discovered = False
self.last_attempt: Optional[float] = None
self.tls_name: Optional[str] = None
async def _default_error_callback(ex: BaseException) -> None:
_logger.error("nats: encountered error", exc_info=ex)
class Client:
msg_class = Msg
DISCONNECTED = 0
CONNECTED = 1
CLOSED = 2
RECONNECTING = 3
CONNECTING = 4
DRAINING_SUBS = 5
DRAINING_PUBS = 6
def __repr__(self) -> str:
return f"<nats client v{__version__}>"
def __init__(self) -> None:
self._current_server: Optional[Srv] = None
self._server_info: ServerInfos = {}
self._server_pool: List[Srv] = []
self._reading_task: Optional[Task[None]] = None
self._ping_interval_task: Optional[Task[None]] = None
self._pings_outstanding = 0
self._pongs_received = 0
self._pongs: List[Future[bool]] = []
self._bare_io_reader: Optional[StreamReader] = None
self._io_reader: Optional[StreamReader] = None
self._bare_io_writer: Optional[StreamWriter] = None
self._io_writer: Optional[StreamWriter] = None
self._err: Optional[Exception] = None
self._error_cb: Optional[Callable[[Exception], Awaitable[None]]] = None
self._disconnected_cb: Optional[Callable[[], Awaitable[None]]] = None
self._closed_cb: Optional[Callable[[], Awaitable[None]]] = None
self._discovered_server_cb: Optional[Callable[[], None]] = None
self._reconnected_cb: Optional[Callable[[], Awaitable[None]]] = None
self._reconnection_task: Optional[Task[None]] = None
self._reconnection_task_future: Optional[Future[bool]] = None
self._max_payload = DEFAULT_MAX_PAYLOAD_SIZE
# This is the client id that the NATS server knows
# about. Useful in debugging application errors
# when logged with this identifier along
# with nats server log.
# This would make more sense if we log the server
# connected to as well in case of cluster setup.
self._client_id: Optional[int] = None
self._sid = 0
self._subs: Dict[int, Subscription] = {}
self._status = Client.DISCONNECTED
self._ps = Parser(self)
self._pending: List[bytes] = []
self._pending_data_size = 0
self._flush_queue: Optional[asyncio.Queue[None]] = None
self._flusher_task: Optional[Task[None]] = None
self._hdr_parser = BytesParser()
# New style request/response
self._resp_map: Dict[str, Future[Msg]] = {}
self._resp_sub_prefix: Optional[bytearray] = None
self._nuid = NUID()
# NKEYS support
#
# user_jwt_cb is used to fetch and return the account
# signed JWT for this user.
self._user_jwt_cb: Optional[Callable[[], bytes]] = None
# signature_cb is used to sign a nonce from the server while
# authenticating with nkeys. The user should sign the nonce and
# return the base64 encoded signature.
self._signature_cb: Optional[Callable[[str], bytes]] = None
# user credentials file can be a tuple or single file.
self._user_credentials: Union[None, str, Tuple[str, str]] = None
# file that contains the nkeys seed and its public key as a string.
self._nkeys_seed: Optional[str] = None
self._public_nkey: Optional[str] = None
self.options: Dict[str, Any] = {}
self.stats: ClientStats = {
"in_msgs": 0,
"out_msgs": 0,
"in_bytes": 0,
"out_bytes": 0,
"reconnects": 0,
"errors_received": 0,
}
async def connect(
self,
servers: List[str] = ["nats://127.0.0.1:4222"],
error_cb: Optional[Callable[[Exception], Awaitable[None]]] = None,
disconnected_cb: Optional[Callable[[], Awaitable[None]]] = None,
closed_cb: Optional[Callable[[], Awaitable[None]]] = None,
discovered_server_cb: Optional[Callable[[], None]] = None,
reconnected_cb: Optional[Callable[[], Awaitable[None]]] = None,
name: Optional[str] = None,
pedantic: bool = False,
verbose: bool = False,
allow_reconnect: bool = True,
connect_timeout: int = DEFAULT_CONNECT_TIMEOUT,
reconnect_time_wait: int = DEFAULT_RECONNECT_TIME_WAIT,
max_reconnect_attempts: int = DEFAULT_MAX_RECONNECT_ATTEMPTS,
ping_interval: int = DEFAULT_PING_INTERVAL,
max_outstanding_pings: int = DEFAULT_MAX_OUTSTANDING_PINGS,
dont_randomize: bool = False,
flusher_queue_size: int = DEFAULT_MAX_FLUSHER_QUEUE_SIZE,
pending_size: int = DEFAULT_PENDING_SIZE,
no_echo: bool = False,
tls: Optional[ssl.SSLContext] = None,
tls_hostname: Optional[str] = None,
user: Optional[str] = None,
password: Optional[str] = None,
token: Optional[str] = None,
drain_timeout: int = DEFAULT_DRAIN_TIMEOUT,
signature_cb=None,
user_jwt_cb: Optional[Callable[[], bytes]] = None,
user_credentials: Optional[Union[str, Tuple[str, str]]] = None,
nkeys_seed: Optional[str] = None,
) -> None:
for cb in [
error_cb,
disconnected_cb,
closed_cb,
reconnected_cb,
discovered_server_cb,
]:
if cb and not asyncio.iscoroutinefunction(cb):
raise ErrInvalidCallbackType()
self._setup_server_pool(servers)
self._error_cb = error_cb or _default_error_callback
self._closed_cb = closed_cb
self._discovered_server_cb = discovered_server_cb
self._reconnected_cb = reconnected_cb
self._disconnected_cb = disconnected_cb
# NKEYS support
self._signature_cb = signature_cb
self._user_jwt_cb = user_jwt_cb
self._user_credentials = user_credentials
self._nkeys_seed = nkeys_seed
# Customizable options
self.options["verbose"] = verbose
self.options["pedantic"] = pedantic
self.options["name"] = name
self.options["allow_reconnect"] = allow_reconnect
self.options["dont_randomize"] = dont_randomize
self.options["reconnect_time_wait"] = reconnect_time_wait
self.options["max_reconnect_attempts"] = max_reconnect_attempts
self.options["ping_interval"] = ping_interval
self.options["max_outstanding_pings"] = max_outstanding_pings
self.options["no_echo"] = no_echo
self.options["user"] = user
self.options["password"] = password
self.options["token"] = token
self.options["connect_timeout"] = connect_timeout
self.options["drain_timeout"] = drain_timeout
self.options["pending_size"] = pending_size
if tls:
self.options["tls"] = tls
if tls_hostname:
self.options["tls_hostname"] = tls_hostname
if self._user_credentials is not None or self._nkeys_seed is not None:
self._setup_nkeys_connect()
# Queue used to trigger flushes to the socket
self._flush_queue = asyncio.Queue(maxsize=flusher_queue_size)
if self.options["dont_randomize"] is False:
shuffle(self._server_pool)
while True:
try:
await self._select_next_server()
await self._process_connect_init()
self._current_server.reconnects = 0 # type: ignore[union-attr]
break
except ErrNoServers as e:
if self.options["max_reconnect_attempts"] < 0:
# Never stop reconnecting
continue
self._err = e
raise e
except (OSError, NatsError, asyncio.TimeoutError) as e:
self._err = e
await self._error_cb(e)
# Bail on first attempt if reconnecting is disallowed.
if not self.options["allow_reconnect"]:
raise e
await self._close(Client.DISCONNECTED, False)
self._current_server.last_attempt = (
time.monotonic() # type: ignore[union-attr]
)
self._current_server.reconnects += 1 # type: ignore[union-attr]
async def close(self) -> None:
await self._close(Client.CLOSED)
async def drain(self) -> None:
if self.is_draining:
return
if self.is_closed:
raise ErrConnectionClosed
if self.is_connecting or self.is_reconnecting:
raise ErrConnectionReconnecting
# Start draining the subscriptions
self._status = Client.DRAINING_SUBS
drain_tasks = []
for sub in self._subs.values():
coro = sub.drain()
task = asyncio.create_task(coro)
drain_tasks.append(task)
drain_is_done = asyncio.gather(*drain_tasks)
try:
await asyncio.wait_for(drain_is_done, self.options["drain_timeout"])
except asyncio.TimeoutError:
drain_is_done.exception()
drain_is_done.cancel()
await self._error_cb(ErrDrainTimeout) # type: ignore[arg-type, misc]
except asyncio.CancelledError:
pass
finally:
self._status = Client.DRAINING_PUBS
await self.flush()
await self._close(Client.CLOSED)
async def flush(self, timeout: float = 60) -> None:
if timeout <= 0:
raise ErrBadTimeout
if self.is_closed:
raise ErrConnectionClosed
future: Future[bool] = asyncio.Future()
try:
await self._send_ping(future)
await asyncio.wait_for(future, timeout)
except asyncio.TimeoutError:
future.cancel()
raise ErrTimeout
async def publish(
self,
subject: str,
payload: bytes = b"",
reply: str = "",
headers: Optional[Dict[str, str]] = None,
) -> None:
if self.is_closed:
raise ErrConnectionClosed
if self.is_draining_pubs:
raise ErrConnectionDraining
payload_size = len(payload)
if payload_size > self._max_payload:
raise ErrMaxPayload
await self._send_publish(subject, reply, payload, payload_size, headers)
async def publish_request(
self,
subject: str,
reply: str,
payload: bytes,
) -> None:
warnings.warn(
"publish_request method from _nats.aio.client.Client is deprecated. "
"Use Client.publish(subject, payload, reply, headers) instead."
)
await self.publish(subject, payload, reply, None)
async def request(
self,
subject: str,
payload: bytes = b"",
timeout: float = 0.5,
old_style: bool = False,
headers: Optional[Dict[str, str]] = None,
) -> Msg:
if old_style:
return await self._request_old_style(subject, payload, timeout=timeout)
return await self._request_new_style(
subject, payload, timeout=timeout, headers=headers
)
async def subscribe(
self,
subject: str,
queue: str = "",
cb: Optional[Callable[[Msg], Awaitable[None]]] = None,
future: Optional[asyncio.Future] = None,
max_msgs: int = 0,
pending_msgs_limit: int = DEFAULT_SUB_PENDING_MSGS_LIMIT,
pending_bytes_limit: int = DEFAULT_SUB_PENDING_BYTES_LIMIT,
) -> Subscription:
if not subject:
raise ErrBadSubject
if self.is_closed:
raise ErrConnectionClosed
if self.is_draining:
raise ErrConnectionDraining
self._sid += 1
sid = self._sid
sub = Subscription(
self,
sid,
subject,
queue=queue,
cb=cb,
future=future,
max_msgs=max_msgs,
pending_msgs_limit=pending_msgs_limit,
pending_bytes_limit=pending_bytes_limit,
)
sub._start(self._error_cb) # type: ignore[arg-type]
self._subs[sid] = sub
await self._send_subscribe(sub)
return sub
@property
def connected_url(self) -> Optional[str]:
if self.is_connected:
return str(self._current_server.uri) # type: ignore[union-attr]
else:
return None
@property
def servers(self) -> List[Srv]:
servers = []
for srv in self._server_pool:
servers.append(srv)
return servers
@property
def discovered_servers(self) -> List[Srv]:
servers = []
for srv in self._server_pool:
if srv.discovered:
servers.append(srv)
return servers
@property
def max_payload(self) -> Optional[int]:
return self._max_payload # type: ignore[no-any-return]
@property
def client_id(self) -> Optional[int]:
return self._client_id # type: ignore[no-any-return]
@property
def last_error(self) -> Optional[Exception]:
return self._err # type: ignore[no-any-return]
@property
def pending_data_size(self) -> int:
return self._pending_data_size # type: ignore[no-any-return]
@property
def is_closed(self) -> bool:
return self._status == Client.CLOSED # type: ignore[no-any-return]
@property
def is_reconnecting(self) -> bool:
return self._status == Client.RECONNECTING # type: ignore[no-any-return]
@property
def is_connected(self) -> bool:
return (self._status == Client.CONNECTED) or self.is_draining
@property
def is_connecting(self) -> bool:
return self._status == Client.CONNECTING # type: ignore[no-any-return]
@property
def is_draining(self) -> bool:
return ( # type: ignore[no-any-return]
self._status == Client.DRAINING_SUBS or self._status == Client.DRAINING_PUBS
)
@property
def is_draining_pubs(self) -> bool:
return self._status == Client.DRAINING_PUBS # type: ignore[no-any-return]
def _build_message(
self,
subject: bytes,
reply: bytes,
data: bytes,
headers: Optional[Dict[str, str]],
) -> "Msg":
return self.msg_class(
subject=subject.decode(),
reply=reply.decode(),
data=data,
headers=headers,
client=self,
)
def _connect_command(self) -> bytes:
options = {
"verbose": self.options["verbose"],
"pedantic": self.options["pedantic"],
"lang": __lang__,
"version": __version__,
"protocol": PROTOCOL,
}
if "headers" in self._server_info:
options["headers"] = self._server_info["headers"]
options["no_responders"] = self._server_info["headers"]
if "auth_required" in self._server_info:
if self._server_info["auth_required"]:
if "nonce" in self._server_info and self._signature_cb is not None:
sig = self._signature_cb(self._server_info["nonce"])
options["sig"] = sig.decode()
if self._user_jwt_cb is not None:
jwt = self._user_jwt_cb()
options["jwt"] = jwt.decode()
elif self._public_nkey is not None:
options["nkey"] = self._public_nkey
# In case there is no password, then consider handle
# sending a token instead.
elif (
self.options["user"] is not None
and self.options["password"] is not None
):
options["user"] = self.options["user"]
options["pass"] = self.options["password"]
elif self.options["token"] is not None:
options["auth_token"] = self.options["token"]
elif self._current_server.uri.username is not None: # type: ignore[union-attr]
if self._current_server.uri.password is None: # type: ignore[union-attr]
options[
"auth_token"
] = self._current_server.uri.username # type: ignore[union-attr]
else:
options[
"user"
] = self._current_server.uri.username # type: ignore[union-attr]
options[
"pass"
] = self._current_server.uri.password # type: ignore[union-attr]
if self.options["name"] is not None:
options["name"] = self.options["name"]
if self.options["no_echo"] is not None:
options["echo"] = not self.options["no_echo"]
connect_opts = json.dumps(options, sort_keys=True)
return b"".join([CONNECT_OP + _SPC_ + connect_opts.encode() + _CRLF_])
def _host_is_ip(self, connect_url: Optional[str]) -> bool:
try:
ipaddress.ip_address(connect_url)
return True
except:
return False
def _parse_headers(self, headers: Optional[bytes]) -> Dict[str, str]:
hdrs: Dict[str, str] = {}
if headers is None:
return hdrs
raw_headers = headers[len(NATS_HDR_LINE) :]
parsed_hdrs = self._hdr_parser.parsebytes(raw_headers)
# Check if it is an inline status message like:
#
# NATS/1.0 404 No Messages
#
if len(parsed_hdrs.items()) == 0:
l = headers[len(NATS_HDR_LINE) - 1 :]
status = l[:STATUS_MSG_LEN]
desc = l[STATUS_MSG_LEN + 1 : len(l) - CTRL_LEN - CTRL_LEN]
hdrs[STATUS_HDR] = status.decode()
hdrs[DESC_HDR] = desc.decode()
else:
for k, v in parsed_hdrs.items():
hdrs[k] = v
return hdrs
def _process_disconnect(self) -> None:
self._status = Client.DISCONNECTED
def _process_info(
self, info: ServerInfos, initial_connection: bool = False
) -> None:
if "connect_urls" in info:
if info["connect_urls"]:
connect_urls = []
for connect_url in info["connect_urls"]:
scheme = ""
if self._current_server.uri.scheme == "tls": # type: ignore[union-attr]
scheme = "tls"
else:
scheme = "nats"
uri = urlparse(f"{scheme}://{connect_url}")
srv = Srv(uri)
srv.discovered = True
# Check whether we should reuse the original hostname.
if (
"tls_required" in self._server_info
and self._server_info["tls_required"]
and self._host_is_ip(uri.hostname)
):
srv.tls_name = self._current_server.uri.hostname # type: ignore[union-attr]
# Filter for any similar server in the server pool already.
should_add = True
for s in self._server_pool:
if uri.netloc == s.uri.netloc:
should_add = False
if should_add:
connect_urls.append(srv)
if self.options["dont_randomize"] is not True:
shuffle(connect_urls)
for srv in connect_urls:
self._server_pool.append(srv)
if (
not initial_connection
and connect_urls
and self._discovered_server_cb
):
self._discovered_server_cb()
def _remove_sub(self, sid: int) -> None:
self._subs.pop(sid, None)
def _setup_nkeys_connect(self) -> None:
if self._user_credentials is not None:
self._setup_nkeys_jwt_connect()
else:
self._setup_nkeys_seed_connect()
def _setup_nkeys_jwt_connect(self) -> None:
import nkeys
import os
creds = self._user_credentials
if isinstance(creds, tuple) and len(creds) > 1:
def user_cb():
contents = None
with open(creds[0], "rb") as f:
contents = bytearray(os.fstat(f.fileno()).st_size)
f.readinto(contents)
return contents
self._user_jwt_cb = user_cb
def sig_cb(nonce):
seed = None
with open(creds[1], "rb") as f:
seed = bytearray(os.fstat(f.fileno()).st_size)
f.readinto(seed)
kp = nkeys.from_seed(seed)
raw_signed = kp.sign(nonce.encode())
sig = base64.b64encode(raw_signed)
# Best effort attempt to clear from memory.
kp.wipe()
del kp
del seed
return sig
self._signature_cb = sig_cb
else:
# Define the functions to be able to sign things using nkeys.
def user_cb():
user_jwt = None
with open(creds, "rb") as f:
while True:
line = bytearray(f.readline())
if b"BEGIN NATS USER JWT" in line:
user_jwt = bytearray(f.readline())
break
# Remove trailing line break but reusing same memory view.
return user_jwt[: len(user_jwt) - 1]
self._user_jwt_cb = user_cb
def sig_cb(nonce):
user_seed = None
with open(creds, "rb", buffering=0) as f:
for line in f:
# Detect line where the NKEY would start and end,
# then seek and read into a fixed bytearray that
# can be wiped.
if b"BEGIN USER NKEY SEED" in line:
nkey_start_pos = f.tell()
try:
next(f)
except StopIteration:
raise ErrInvalidUserCredentials()
nkey_end_pos = f.tell()
nkey_size = nkey_end_pos - nkey_start_pos - 1
f.seek(nkey_start_pos)
# Only gather enough bytes for the user seed
# into the pre allocated bytearray.
user_seed = bytearray(nkey_size)
f.readinto(user_seed)
kp = nkeys.from_seed(user_seed)
raw_signed = kp.sign(nonce.encode())
sig = base64.b64encode(raw_signed)
# Delete all state related to the keys.
kp.wipe()
del user_seed
del kp
return sig
self._signature_cb = sig_cb
def _setup_nkeys_seed_connect(self) -> None:
import nkeys
import os
seed = None
creds: str = self._nkeys_seed # type: ignore[assignment]
with open(creds, "rb") as f:
seed = bytearray(os.fstat(f.fileno()).st_size)
f.readinto(seed) # type: ignore[attr-defined]
kp = nkeys.from_seed(seed)
self._public_nkey = kp.public_key.decode()
kp.wipe()
del kp
del seed
def sig_cb(nonce: str) -> bytes:
seed = None
with open(creds, "rb") as f:
seed = bytearray(os.fstat(f.fileno()).st_size)
f.readinto(seed) # type: ignore[attr-defined]
kp = nkeys.from_seed(seed)
raw_signed = kp.sign(nonce.encode())
sig = base64.b64encode(raw_signed)
# Best effort attempt to clear from memory.
kp.wipe()
del kp
del seed
return sig
self._signature_cb = sig_cb
def _setup_server_pool(self, connect_url: Union[str, List[str]]) -> None:
if type(connect_url) is str:
try:
if "nats://" in connect_url or "tls://" in connect_url:
# Closer to how the Go client handles this.
# e.g. nats://127.0.0.1:4222
uri = urlparse(connect_url)
elif ":" in connect_url:
# Expand the scheme for the user
# e.g. 127.0.0.1:4222
uri = urlparse(f"nats://{connect_url}")
else:
# Just use the endpoint with the default NATS port.
# e.g. demo.nats.io
uri = urlparse(f"nats://{connect_url}:4222")
# In case only endpoint with scheme was set.
# e.g. nats://demo.nats.io or localhost:
if uri.port is None:
uri = urlparse(f"nats://{uri.hostname}:4222")
except ValueError:
raise NatsError("nats: invalid connect url option")
if uri.hostname is None or uri.hostname == "none":
raise NatsError("nats: invalid hostname in connect url")
self._server_pool.append(Srv(uri))
elif type(connect_url) is list:
try:
for server in connect_url:
uri = urlparse(server)
self._server_pool.append(Srv(uri))
except ValueError:
raise NatsError("nats: invalid connect url option")
else:
raise NatsError("nats: invalid connect url option")
async def _attempt_reconnect(self) -> None:
if self._reading_task is not None and not self._reading_task.cancelled():
self._reading_task.cancel()
if (
self._ping_interval_task is not None
and not self._ping_interval_task.cancelled()
):
self._ping_interval_task.cancel()
if self._flusher_task is not None and not self._flusher_task.cancelled():
self._flusher_task.cancel()
if self._io_writer is not None:
self._io_writer.close()
try:
await self._io_writer.wait_closed()
except Exception as e:
await self._error_cb(e) # type: ignore[misc]
self._err = None
if self._disconnected_cb is not None:
await self._disconnected_cb()
if self.is_closed:
return
if "dont_randomize" not in self.options or not self.options["dont_randomize"]:
shuffle(self._server_pool)
# Create a future that the client can use to control waiting
# on the reconnection attempts.
self._reconnection_task_future = asyncio.Future()
while True:
try:
# Try to establish a TCP connection to a server in
# the cluster then send CONNECT command to it.
await self._select_next_server()
await self._process_connect_init()
# Consider a reconnect to be done once CONNECT was
# processed by the server successfully.
self.stats["reconnects"] += 1
# Reset reconnect attempts for this server
# since have successfully connected.
self._current_server.did_connect = True # type: ignore[union-attr]
self._current_server.reconnects = 0 # type: ignore[union-attr]
# Replay all the subscriptions in case there were some.
subs_to_remove = []
for sid, sub in self._subs.items():
max_msgs = 0
if sub._max_msgs > 0:
# If we already hit the message limit, remove the subscription and don't resubscribe
if sub._received >= sub._max_msgs:
subs_to_remove.append(sid)
continue
max_msgs = sub._max_msgs - sub._received
sub_cmd = prot_command.sub_cmd(sub._subject, sub._queue, sid)
self._io_writer.write(sub_cmd)
if max_msgs > 0:
unsub_cmd = prot_command.unsub_cmd(sid, max_msgs)
self._io_writer.write(unsub_cmd)
for sid in subs_to_remove:
self._subs.pop(sid)
await self._io_writer.drain()
await self._flush_pending()
self._status = Client.CONNECTED
await self.flush()
if self._reconnected_cb is not None:
await self._reconnected_cb()
self._reconnection_task_future = None
break
except ErrNoServers as e:
self._err = e
await self.close()
break
except (OSError, NatsError, asyncio.TimeoutError) as e:
self._err = e
await self._error_cb(e)
self._status = Client.RECONNECTING
self._current_server.last_attempt = (
time.monotonic()
)
self._current_server.reconnects += 1
except asyncio.CancelledError:
break
if (
self._reconnection_task_future is not None
and not self._reconnection_task_future.cancelled()
):
self._reconnection_task_future.set_result(True)
async def _close(self, status: int, do_cbs: bool = True) -> None:
if self.is_closed:
self._status = status
return
self._status = Client.CLOSED
await self._flush_pending()
if self._reading_task is not None and not self._reading_task.cancelled():
self._reading_task.cancel()
if (
self._ping_interval_task is not None
and not self._ping_interval_task.cancelled()
):
self._ping_interval_task.cancel()
if self._flusher_task is not None and not self._flusher_task.cancelled():
self._flusher_task.cancel()
if self._reconnection_task is not None and not self._reconnection_task.done():
self._reconnection_task.cancel()
try:
if (
self._reconnection_task_future is not None
and not self._reconnection_task_future.cancelled()
):
await asyncio.wait_for(
self._reconnection_task_future,
self.options["reconnect_time_wait"],
)
except (asyncio.CancelledError, asyncio.TimeoutError):
pass
await asyncio.sleep(0)
if self._current_server is not None:
if self._pending_data_size > 0:
self._io_writer.writelines(self._pending[:])
self._pending = []
self._pending_data_size = 0
await self._io_writer.drain()
for sub in self._subs.values():
if sub._wait_for_msgs_task and not sub._wait_for_msgs_task.done():
sub._wait_for_msgs_task.cancel()
self._subs.clear()
if self._io_writer is not None:
self._io_writer.close()
try:
await self._io_writer.wait_closed()
except Exception as e:
await self._error_cb(e)
if do_cbs:
if self._disconnected_cb is not None:
await self._disconnected_cb()
if self._closed_cb is not None:
await self._closed_cb()
self._client_id = None
async def _flusher(self) -> None:
while True:
if not self.is_connected or self.is_connecting:
break
try:
await self._flush_queue.get()
if self._pending_data_size > 0:
self._io_writer.writelines(
self._pending[:]
)
self._pending = []
self._pending_data_size = 0
await self._io_writer.drain()
except OSError as e:
await self._error_cb(e)
await self._process_op_err(e)
break
except asyncio.CancelledError:
break
async def _flush_pending(self) -> None:
try:
await self._flush_queue.put(None)
if not self.is_connected:
return
except asyncio.CancelledError:
pass
async def _init_request_sub(self) -> None:
self._resp_map = {}
self._resp_sub_prefix = INBOX_PREFIX[:]
self._resp_sub_prefix.extend(self._nuid.next())
self._resp_sub_prefix.extend(b".")
resp_mux_subject = self._resp_sub_prefix[:]
resp_mux_subject.extend(b"*")
await self.subscribe(resp_mux_subject.decode(), cb=self._request_sub_callback)
async def _ping_interval(self) -> None:
while True:
await asyncio.sleep(self.options["ping_interval"])
if not self.is_connected:
continue
try:
self._pings_outstanding += 1
if self._pings_outstanding > self.options["max_outstanding_pings"]:
await self._process_op_err(
ErrStaleConnection
)
return
await self._send_ping()
except asyncio.CancelledError:
break
async def _process_connect_init(self) -> None:
self._status = Client.CONNECTING
connection_completed = self._io_reader.readline()
info_line = await asyncio.wait_for(
connection_completed, self.options["connect_timeout"]
)
if INFO_OP not in info_line:
raise NatsError(
"nats: empty response from server when expecting INFO message"
)
_, info = info_line.split(INFO_OP + _SPC_, 1)
try:
srv_info: ServerInfos = json.loads(info.decode())
except:
raise NatsError("nats: info message, json parse error")
self._server_info = srv_info
self._process_info(srv_info, initial_connection=True)
if "max_payload" in self._server_info:
self._max_payload = self._server_info["max_payload"]
if "client_id" in self._server_info:
self._client_id = self._server_info["client_id"]
if "tls_required" in self._server_info and self._server_info["tls_required"]:
ssl_context = None
if "tls" in self.options:
ssl_context = self.options.get("tls")
elif self._current_server.uri.scheme == "tls":
ssl_context = ssl.create_default_context()
else:
raise NatsError("nats: no ssl context provided")
hostname = None
if "tls_hostname" in self.options:
hostname = self.options["tls_hostname"]
elif self._current_server.tls_name is not None:
hostname = self._current_server.tls_name
else:
hostname = self._current_server.uri.hostname
await self._io_writer.drain()
if sys.version_info.minor >= 7:
reader = asyncio.StreamReader()
protocol = asyncio.StreamReaderProtocol(reader)
transport_future = asyncio.get_event_loop().start_tls(
self._io_writer.transport,
protocol,
ssl_context,
server_hostname=hostname,
)
transport = await asyncio.wait_for(
transport_future, self.options["connect_timeout"]
)
writer = asyncio.StreamWriter(
transport, protocol, reader, asyncio.get_event_loop()
)
self._io_reader, self._io_writer = reader, writer
else:
transport = self._io_writer.transport
sock = transport.get_extra_info("socket")
if not sock:
raise NatsError("nats: unable to get socket")
connection_future = asyncio.open_connection(
limit=DEFAULT_BUFFER_SIZE,
sock=sock,
ssl=ssl_context,
server_hostname=hostname,
)
self._io_reader, self._io_writer = await asyncio.wait_for(
connection_future, self.options["connect_timeout"]
)
# Refresh state of parser upon reconnect.
if self.is_reconnecting:
self._ps.reset()
connect_cmd = self._connect_command()
self._io_writer.write(connect_cmd) # type: ignore[union-attr]
await self._io_writer.drain() # type: ignore[union-attr]
if self.options["verbose"]:
future = self._io_reader.readline() # type: ignore[union-attr]
next_op = await asyncio.wait_for(future, self.options["connect_timeout"])
if OK_OP in next_op:
# Do nothing
pass
elif ERR_OP in next_op:
err_line = next_op.decode()
_, err_msg = err_line.split(" ", 1)
# FIXME: Maybe handling could be more special here,
# checking for ErrAuthorization for example.
# await self._process_err(err_msg)
raise NatsError("nats: " + err_msg.rstrip("\r\n"))
self._io_writer.write(PING_PROTO) # type: ignore[union-attr]
await self._io_writer.drain() # type: ignore[union-attr]
future = self._io_reader.readline() # type: ignore[union-attr]
next_op = await asyncio.wait_for(future, self.options["connect_timeout"])
if PONG_PROTO in next_op:
self._status = Client.CONNECTED
elif ERR_OP in next_op:
err_line = next_op.decode()
_, err_msg = err_line.split(" ", 1)
# FIXME: Maybe handling could be more special here,
# checking for ErrAuthorization for example.
# await self._process_err(err_msg)
raise NatsError("nats: " + err_msg.rstrip("\r\n"))
if PONG_PROTO in next_op:
self._status = Client.CONNECTED
self._reading_task = asyncio.get_event_loop().create_task(self._read_loop())
self._pongs = []
self._pings_outstanding = 0
self._ping_interval_task = asyncio.get_event_loop().create_task(
self._ping_interval()
)
# Task for kicking the flusher queue
self._flusher_task = asyncio.get_event_loop().create_task(self._flusher())
async def _process_err(self, err_msg: Sequence[bytes]) -> None:
if STALE_CONNECTION in err_msg:
await self._process_op_err(ErrStaleConnection) # type: ignore[arg-type]
return
if AUTHORIZATION_VIOLATION in err_msg:
self._err = ErrAuthorization # type: ignore[assignment]
else:
m = b"nats: " + err_msg[0]
err = NatsError(m.decode())
self._err = err
if PERMISSIONS_ERR in m:
await self._error_cb(err) # type: ignore[misc]
return
do_cbs = False
if not self.is_connecting:
do_cbs = True
# FIXME: Some errors such as 'Invalid Subscription'
# do not cause the server to close the connection.
# For now we handle similar as other clients and close.
asyncio.get_event_loop().create_task(self._close(Client.CLOSED, do_cbs))
async def _process_msg(
self,
sid: int,
subject: bytes,
reply: bytes,
data: bytes,
headers: Optional[bytes],
) -> None:
payload_size = len(data)
self.stats["in_msgs"] += 1
self.stats["in_bytes"] += payload_size
sub = self._subs.get(sid)
if not sub:
# Skip in case no subscription present.
return
sub._received += 1
if sub._max_msgs > 0 and sub._received >= sub._max_msgs:
# Enough messages so can throwaway subscription now.
self._subs.pop(sid, None)
sub._stop_processing()
try:
hdrs = self._parse_headers(headers)
except Exception as e:
await self._error_cb(e) # type: ignore[misc]
return
msg = self._build_message(subject, reply, data, hdrs)
# Check if it is an old style request.
if sub._future:
if sub._future.cancelled():
# Already gave up, nothing to do.
return
sub._future.set_result(msg)
return
# Let subscription wait_for_msgs coroutine process the messages,
# but in case sending to the subscription task would block,
# then consider it to be an slow consumer and drop the message.
try:
sub._pending_size += payload_size
# allow setting pending_bytes_limit to 0 to disable
if (
sub._pending_bytes_limit > 0
and sub._pending_size >= sub._pending_bytes_limit
):
# Subtract the bytes since the message will be thrown away
# so it would not be pending data.
sub._pending_size -= payload_size
await self._error_cb(
ErrSlowConsumer(subject=subject, sid=sid)
) # type: ignore[misc]
return
sub._pending_queue.put_nowait(msg)
except asyncio.QueueFull:
await self._error_cb(
ErrSlowConsumer(subject=subject, sid=sid)
) # type: ignore[misc]
async def _process_op_err(self, e: Exception) -> None:
if self.is_connecting or self.is_closed or self.is_reconnecting:
return
if self.options["allow_reconnect"] and self.is_connected:
self._status = Client.RECONNECTING
self._ps.reset()
if (
self._reconnection_task is not None
and not self._reconnection_task.cancelled()
):
# Cancel the previous task in case it may still be running.
self._reconnection_task.cancel()
self._reconnection_task = asyncio.get_event_loop().create_task(
self._attempt_reconnect()
)
else:
self._process_disconnect()
self._err = e
await self._close(Client.CLOSED, True)
async def _process_ping(self) -> None:
await self._send_command(PONG)
await self._flush_pending()
async def _process_pong(self) -> None:
if len(self._pongs) > 0:
future = self._pongs.pop(0)
future.set_result(True)
self._pongs_received += 1
self._pings_outstanding = 0
async def _read_loop(self) -> None:
while True:
try:
should_bail = self.is_closed or self.is_reconnecting
if should_bail or self._io_reader is None:
break
if self.is_connected and self._io_reader.at_eof():
await self._error_cb(
ErrStaleConnection # type: ignore[misc, arg-type]
)
await self._process_op_err(
ErrStaleConnection # type: ignore[arg-type]
)
break
b = await self._io_reader.read(DEFAULT_BUFFER_SIZE)
await self._ps.parse(b)
except ErrProtocol:
await self._process_op_err(ErrProtocol) # type: ignore[arg-type]
break
except OSError as e:
await self._process_op_err(e)
break
except asyncio.CancelledError:
break
# except asyncio.InvalidStateError:
# pass
async def _request_new_style(
self,
subject: str,
payload: bytes,
timeout: float = 0.5,
headers: Optional[Dict[str, str]] = None,
) -> Msg:
if self.is_draining_pubs:
raise ErrConnectionDraining
if not self._resp_sub_prefix:
await self._init_request_sub()
# Use a new NUID for the token inbox and then use the future.
token = self._nuid.next()
inbox = self._resp_sub_prefix[:] # type: ignore[index]
inbox.extend(token)
future: Future[Msg] = asyncio.Future()
self._resp_map[token.decode()] = future
await self.publish(subject, payload, reply=inbox.decode(), headers=headers)
# Wait for the response or give up on timeout.
try:
msg = await asyncio.wait_for(future, timeout)
return msg
except asyncio.TimeoutError:
self._resp_map.pop(token.decode())
future.cancel()
raise ErrTimeout
async def _request_old_style(
self, subject: str, payload: bytes, timeout: float = 0.5
) -> Msg:
next_inbox = INBOX_PREFIX[:]
next_inbox.extend(self._nuid.next())
inbox = next_inbox.decode()
future: Future[Msg] = asyncio.Future()
sub = await self.subscribe(inbox, future=future, max_msgs=1)
await sub.unsubscribe(limit=1)
await self.publish(subject, payload, reply=inbox)
try:
msg = await asyncio.wait_for(future, timeout)
return msg
except asyncio.TimeoutError:
await sub.unsubscribe()
future.cancel()
raise ErrTimeout
async def _request_sub_callback(self, msg: Msg) -> None:
token = msg.subject[INBOX_PREFIX_LEN:]
try:
fut = self._resp_map.get(token)
if not fut:
return
fut.set_result(msg)
self._resp_map.pop(token, None)
except (asyncio.CancelledError, asyncio.InvalidStateError):
# Request may have timed out already so remove the entry
self._resp_map.pop(token, None)
async def _select_next_server(self) -> None:
while True:
if len(self._server_pool) == 0:
self._current_server = None
raise ErrNoServers
now = time.monotonic()
s = self._server_pool.pop(0)
if self.options["max_reconnect_attempts"] > 0:
if s.reconnects > self.options["max_reconnect_attempts"]:
# Discard server since already tried to reconnect too many times
continue
# Not yet exceeded max_reconnect_attempts so can still use
# this server in the future.
self._server_pool.append(s)
if (
s.last_attempt is not None
and now < s.last_attempt + self.options["reconnect_time_wait"]
):
# Backoff connecting to server if we attempted recently.
await asyncio.sleep(self.options["reconnect_time_wait"])
try:
s.last_attempt = time.monotonic()
connection_future = asyncio.open_connection(
s.uri.hostname, s.uri.port, limit=DEFAULT_BUFFER_SIZE
)
r, w = await asyncio.wait_for(
connection_future, self.options["connect_timeout"]
)
self._current_server = s
# We keep a reference to the initial transport we used when
# establishing the connection in case we later upgrade to TLS
# after getting the first INFO message. This is in order to
# prevent the GC closing the socket after we send CONNECT
# and replace the transport.
#
# See https://github.com/nats-io/asyncio-nats/issues/43
self._bare_io_reader = self._io_reader = r
self._bare_io_writer = self._io_writer = w
break
except Exception as e:
s.last_attempt = time.monotonic()
s.reconnects += 1
self._err = e
await self._error_cb(e) # type: ignore[misc]
continue
async def _send_command(self, cmd: bytes, priority: bool = False) -> None:
if priority:
self._pending.insert(0, cmd)
else:
self._pending.append(cmd)
self._pending_data_size += len(cmd)
if self._pending_data_size > self.options["pending_size"]:
await self._flush_pending()
async def _send_ping(self, future: Optional["Future[bool]"] = None) -> None:
if future is None:
future = asyncio.Future()
self._pongs.append(future)
self._io_writer.write(PING_PROTO) # type: ignore[union-attr]
await self._flush_pending()
async def _send_publish(
self,
subject: str,
reply: str,
payload: bytes,
payload_size: int,
headers: Optional[Dict[str, str]],
) -> None:
if subject == "":
# Avoid sending messages with empty replies.
raise ErrBadSubject
pub_cmd = None
if headers is None:
pub_cmd = prot_command.pub_cmd(subject, reply, payload)
else:
hdr = bytearray()
hdr.extend(NATS_HDR_LINE)
for k, v in headers.items():
hdr.extend(k.encode())
hdr.extend(b": ")
hdr.extend(v.encode())
hdr.extend(_CRLF_)
hdr.extend(_CRLF_)
pub_cmd = prot_command.hpub_cmd(subject, reply, hdr, payload)
self.stats["out_msgs"] += 1
self.stats["out_bytes"] += payload_size
await self._send_command(pub_cmd)
if self._flush_queue.empty(): # type: ignore[union-attr]
await self._flush_pending()
async def _send_subscribe(self, sub: Subscription) -> None:
sub_cmd = prot_command.sub_cmd(sub._subject, sub._queue, sub._id)
await self._send_command(sub_cmd)
await self._flush_pending()
async def _send_unsubscribe(self, sid: int, limit: int = 1) -> None:
unsub_cmd = prot_command.unsub_cmd(sid, limit)
await self._send_command(unsub_cmd)
await self._flush_pending()
async def __aenter__(self) -> "Client":
return self
async def __aexit__(self, *exc_info: Any) -> None:
await self._close(Client.CLOSED, do_cbs=True)
| true | true |
f7fbb94e5b23812820f8ad951984e63b5d24e2ad | 15,002 | py | Python | numpy/core/tests/test_deprecations.py | hmeine/numpy | ddd02d50e8cd06d84deecd3b2943813be20b91b8 | [
"BSD-3-Clause"
] | null | null | null | numpy/core/tests/test_deprecations.py | hmeine/numpy | ddd02d50e8cd06d84deecd3b2943813be20b91b8 | [
"BSD-3-Clause"
] | null | null | null | numpy/core/tests/test_deprecations.py | hmeine/numpy | ddd02d50e8cd06d84deecd3b2943813be20b91b8 | [
"BSD-3-Clause"
] | null | null | null | """
Tests related to deprecation warnings. Also a convenient place
to document how deprecations should eventually be turned into errors.
"""
from __future__ import division, absolute_import, print_function
import sys
import operator
import warnings
from nose.plugins.skip import SkipTest
import numpy as np
from numpy.testing import (dec, run_module_suite, assert_raises,
assert_warns, assert_array_equal)
class _DeprecationTestCase(object):
# Just as warning: warnings uses re.match, so the start of this message
# must match.
message = ''
def setUp(self):
self.warn_ctx = warnings.catch_warnings(record=True)
self.log = self.warn_ctx.__enter__()
# Do *not* ignore other DeprecationWarnings. Ignoring warnings
# can give very confusing results because of
# http://bugs.python.org/issue4180 and it is probably simplest to
# try to keep the tests cleanly giving only the right warning type.
# (While checking them set to "error" those are ignored anyway)
# We still have them show up, because otherwise they would be raised
warnings.filterwarnings("always", category=DeprecationWarning)
warnings.filterwarnings("always", message=self.message,
category=DeprecationWarning)
def tearDown(self):
self.warn_ctx.__exit__()
def assert_deprecated(self, function, num=1, ignore_others=False,
function_fails=False,
exceptions=(DeprecationWarning,), args=(), kwargs={}):
"""Test if DeprecationWarnings are given and raised.
This first checks if the function when called gives `num`
DeprecationWarnings, after that it tries to raise these
DeprecationWarnings and compares them with `exceptions`.
The exceptions can be different for cases where this code path
is simply not anticipated and the exception is replaced.
Parameters
----------
f : callable
The function to test
num : int
Number of DeprecationWarnings to expect. This should normally be 1.
ignore_other : bool
Whether warnings of the wrong type should be ignored (note that
the message is not checked)
function_fails : bool
If the function would normally fail, setting this will check for
warnings inside a try/except block.
exceptions : Exception or tuple of Exceptions
Exception to expect when turning the warnings into an error.
The default checks for DeprecationWarnings. If exceptions is
empty the function is expected to run successfull.
args : tuple
Arguments for `f`
kwargs : dict
Keyword arguments for `f`
"""
# reset the log
self.log[:] = []
try:
function(*args, **kwargs)
except (Exception if function_fails else tuple()):
pass
# just in case, clear the registry
num_found = 0
for warning in self.log:
if warning.category is DeprecationWarning:
num_found += 1
elif not ignore_others:
raise AssertionError("expected DeprecationWarning but %s given"
% warning.category)
if num_found != num:
raise AssertionError("%i warnings found but %i expected"
% (len(self.log), num))
with warnings.catch_warnings():
warnings.filterwarnings("error", message=self.message,
category=DeprecationWarning)
try:
function(*args, **kwargs)
if exceptions != tuple():
raise AssertionError("No error raised during function call")
except exceptions:
if exceptions == tuple():
raise AssertionError("Error raised during function call")
def assert_not_deprecated(self, function, args=(), kwargs={}):
"""Test if DeprecationWarnings are given and raised.
This is just a shorthand for:
self.assert_deprecated(function, num=0, ignore_others=True,
exceptions=tuple(), args=args, kwargs=kwargs)
"""
self.assert_deprecated(function, num=0, ignore_others=True,
exceptions=tuple(), args=args, kwargs=kwargs)
class TestFloatNonIntegerArgumentDeprecation(_DeprecationTestCase):
"""
These test that ``DeprecationWarning`` is given when you try to use
non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]``
and ``a[0.5]``, or other functions like ``array.reshape(1., -1)``.
After deprecation, changes need to be done inside conversion_utils.c
in PyArray_PyIntAsIntp and possibly PyArray_IntpConverter.
In iterators.c the function slice_GetIndices could be removed in favor
of its python equivalent and in mapping.c the function _tuple_of_integers
can be simplified (if ``np.array([1]).__index__()`` is also deprecated).
As for the deprecation time-frame: via Ralf Gommers,
"Hard to put that as a version number, since we don't know if the
version after 1.8 will be 6 months or 2 years after. I'd say 2
years is reasonable."
I interpret this to mean 2 years after the 1.8 release. Possibly
giving a PendingDeprecationWarning before that (which is visible
by default)
"""
message = "using a non-integer number instead of an integer " \
"will result in an error in the future"
def test_indexing(self):
a = np.array([[[5]]])
def assert_deprecated(*args, **kwargs):
self.assert_deprecated(*args, exceptions=(IndexError,), **kwargs)
assert_deprecated(lambda: a[0.0])
assert_deprecated(lambda: a[0, 0.0])
assert_deprecated(lambda: a[0.0, 0])
assert_deprecated(lambda: a[0.0,:])
assert_deprecated(lambda: a[:, 0.0])
assert_deprecated(lambda: a[:, 0.0,:])
assert_deprecated(lambda: a[0.0,:,:])
assert_deprecated(lambda: a[0, 0, 0.0])
assert_deprecated(lambda: a[0.0, 0, 0])
assert_deprecated(lambda: a[0, 0.0, 0])
assert_deprecated(lambda: a[-1.4])
assert_deprecated(lambda: a[0, -1.4])
assert_deprecated(lambda: a[-1.4, 0])
assert_deprecated(lambda: a[-1.4,:])
assert_deprecated(lambda: a[:, -1.4])
assert_deprecated(lambda: a[:, -1.4,:])
assert_deprecated(lambda: a[-1.4,:,:])
assert_deprecated(lambda: a[0, 0, -1.4])
assert_deprecated(lambda: a[-1.4, 0, 0])
assert_deprecated(lambda: a[0, -1.4, 0])
# Test that the slice parameter deprecation warning doesn't mask
# the scalar index warning.
assert_deprecated(lambda: a[0.0:, 0.0], num=2)
assert_deprecated(lambda: a[0.0:, 0.0,:], num=2)
def test_valid_indexing(self):
a = np.array([[[5]]])
assert_not_deprecated = self.assert_not_deprecated
assert_not_deprecated(lambda: a[np.array([0])])
assert_not_deprecated(lambda: a[[0, 0]])
assert_not_deprecated(lambda: a[:, [0, 0]])
assert_not_deprecated(lambda: a[:, 0,:])
assert_not_deprecated(lambda: a[:,:,:])
def test_slicing(self):
a = np.array([[5]])
def assert_deprecated(*args, **kwargs):
self.assert_deprecated(*args, exceptions=(IndexError,), **kwargs)
# start as float.
assert_deprecated(lambda: a[0.0:])
assert_deprecated(lambda: a[0:, 0.0:2])
assert_deprecated(lambda: a[0.0::2, :0])
assert_deprecated(lambda: a[0.0:1:2,:])
assert_deprecated(lambda: a[:, 0.0:])
# stop as float.
assert_deprecated(lambda: a[:0.0])
assert_deprecated(lambda: a[:0, 1:2.0])
assert_deprecated(lambda: a[:0.0:2, :0])
assert_deprecated(lambda: a[:0.0,:])
assert_deprecated(lambda: a[:, 0:4.0:2])
# step as float.
assert_deprecated(lambda: a[::1.0])
assert_deprecated(lambda: a[0:, :2:2.0])
assert_deprecated(lambda: a[1::4.0, :0])
assert_deprecated(lambda: a[::5.0,:])
assert_deprecated(lambda: a[:, 0:4:2.0])
# mixed.
assert_deprecated(lambda: a[1.0:2:2.0], num=2)
assert_deprecated(lambda: a[1.0::2.0], num=2)
assert_deprecated(lambda: a[0:, :2.0:2.0], num=2)
assert_deprecated(lambda: a[1.0:1:4.0, :0], num=2)
assert_deprecated(lambda: a[1.0:5.0:5.0,:], num=3)
assert_deprecated(lambda: a[:, 0.4:4.0:2.0], num=3)
# should still get the DeprecationWarning if step = 0.
assert_deprecated(lambda: a[::0.0], function_fails=True)
def test_valid_slicing(self):
a = np.array([[[5]]])
assert_not_deprecated = self.assert_not_deprecated
assert_not_deprecated(lambda: a[::])
assert_not_deprecated(lambda: a[0:])
assert_not_deprecated(lambda: a[:2])
assert_not_deprecated(lambda: a[0:2])
assert_not_deprecated(lambda: a[::2])
assert_not_deprecated(lambda: a[1::2])
assert_not_deprecated(lambda: a[:2:2])
assert_not_deprecated(lambda: a[1:2:2])
def test_non_integer_argument_deprecations(self):
a = np.array([[5]])
self.assert_deprecated(np.reshape, args=(a, (1., 1., -1)), num=2)
self.assert_deprecated(np.reshape, args=(a, (np.array(1.), -1)))
self.assert_deprecated(np.take, args=(a, [0], 1.))
self.assert_deprecated(np.take, args=(a, [0], np.float64(1.)))
def test_non_integer_sequence_multiplication(self):
# Numpy scalar sequence multiply should not work with non-integers
def mult(a, b):
return a * b
self.assert_deprecated(mult, args=([1], np.float_(3)))
self.assert_not_deprecated(mult, args=([1], np.int_(3)))
class TestBooleanArgumentDeprecation(_DeprecationTestCase):
"""This tests that using a boolean as integer argument/indexing is
deprecated.
This should be kept in sync with TestFloatNonIntegerArgumentDeprecation
and like it is handled in PyArray_PyIntAsIntp.
"""
message = "using a boolean instead of an integer " \
"will result in an error in the future"
def test_bool_as_int_argument(self):
a = np.array([[[1]]])
self.assert_deprecated(np.reshape, args=(a, (True, -1)))
self.assert_deprecated(np.reshape, args=(a, (np.bool_(True), -1)))
# Note that operator.index(np.array(True)) does not work, a boolean
# array is thus also deprecated, but not with the same message:
assert_raises(TypeError, operator.index, np.array(True))
self.assert_deprecated(np.take, args=(a, [0], False))
self.assert_deprecated(lambda: a[False:True:True], exceptions=IndexError, num=3)
self.assert_deprecated(lambda: a[False, 0], exceptions=IndexError)
self.assert_deprecated(lambda: a[False, 0, 0], exceptions=IndexError)
class TestArrayToIndexDeprecation(_DeprecationTestCase):
"""This tests that creating an an index from an array is deprecated
if the array is not 0d.
This can probably be deprecated somewhat faster then the integer
deprecations. The deprecation period started with NumPy 1.8.
For deprecation this needs changing of array_index in number.c
"""
message = "converting an array with ndim \> 0 to an index will result " \
"in an error in the future"
def test_array_to_index_deprecation(self):
# This drops into the non-integer deprecation, which is ignored here,
# so no exception is expected. The raising is effectively tested above.
a = np.array([[[1]]])
self.assert_deprecated(operator.index, args=(np.array([1]),))
self.assert_deprecated(np.reshape, args=(a, (a, -1)), exceptions=())
self.assert_deprecated(np.take, args=(a, [0], a), exceptions=())
# Check slicing. Normal indexing checks arrays specifically.
self.assert_deprecated(lambda: a[a:a:a], exceptions=(), num=3)
class TestNonIntegerArrayLike(_DeprecationTestCase):
"""Tests that array likes, i.e. lists give a deprecation warning
when they cannot be safely cast to an integer.
"""
message = "non integer \(and non boolean\) array-likes will not be " \
"accepted as indices in the future"
def test_basic(self):
a = np.arange(10)
self.assert_deprecated(a.__getitem__, args=([0.5, 1.5],),
exceptions=IndexError)
self.assert_deprecated(a.__getitem__, args=((['1', '2'],),),
exceptions=IndexError)
self.assert_not_deprecated(a.__getitem__, ([],))
def test_boolean_futurewarning(self):
a = np.arange(10)
with warnings.catch_warnings():
warnings.filterwarnings('always')
assert_warns(FutureWarning, a.__getitem__, [True])
# Unfortunatly, the deprecation warning takes precedence:
#assert_warns(FutureWarning, a.__getitem__, True)
with warnings.catch_warnings():
warnings.filterwarnings('error')
assert_raises(FutureWarning, a.__getitem__, [True])
#assert_raises(FutureWarning, a.__getitem__, True)
class TestMultipleEllipsisDeprecation(_DeprecationTestCase):
message = "an index can only have a single Ellipsis \(`...`\); replace " \
"all but one with slices \(`:`\)."
def test_basic(self):
a = np.arange(10)
self.assert_deprecated(a.__getitem__, args=((Ellipsis, Ellipsis),))
with warnings.catch_warnings():
warnings.filterwarnings('ignore', '', DeprecationWarning)
# Just check that this works:
b = a[...,...]
assert_array_equal(a, b)
assert_raises(IndexError, a.__getitem__, ((Ellipsis, ) * 3,))
class TestBooleanSubtractDeprecations(_DeprecationTestCase):
"""Test deprecation of boolean `-`. While + and * are well
defined, - is not and even a corrected form seems to have
no real uses.
The deprecation process was started in NumPy 1.9.
"""
message = r"numpy boolean .* \(the .* `-` operator\) is deprecated, " \
"use the bitwise"
def test_operator_deprecation(self):
array = np.array([True])
generic = np.bool_(True)
# Minus operator/subtract ufunc:
self.assert_deprecated(operator.sub, args=(array, array))
self.assert_deprecated(operator.sub, args=(generic, generic))
# Unary minus/negative ufunc:
self.assert_deprecated(operator.neg, args=(array,))
self.assert_deprecated(operator.neg, args=(generic,))
if __name__ == "__main__":
run_module_suite()
| 40.436658 | 88 | 0.628716 | from __future__ import division, absolute_import, print_function
import sys
import operator
import warnings
from nose.plugins.skip import SkipTest
import numpy as np
from numpy.testing import (dec, run_module_suite, assert_raises,
assert_warns, assert_array_equal)
class _DeprecationTestCase(object):
message = ''
def setUp(self):
self.warn_ctx = warnings.catch_warnings(record=True)
self.log = self.warn_ctx.__enter__()
warnings.filterwarnings("always", category=DeprecationWarning)
warnings.filterwarnings("always", message=self.message,
category=DeprecationWarning)
def tearDown(self):
self.warn_ctx.__exit__()
def assert_deprecated(self, function, num=1, ignore_others=False,
function_fails=False,
exceptions=(DeprecationWarning,), args=(), kwargs={}):
self.log[:] = []
try:
function(*args, **kwargs)
except (Exception if function_fails else tuple()):
pass
num_found = 0
for warning in self.log:
if warning.category is DeprecationWarning:
num_found += 1
elif not ignore_others:
raise AssertionError("expected DeprecationWarning but %s given"
% warning.category)
if num_found != num:
raise AssertionError("%i warnings found but %i expected"
% (len(self.log), num))
with warnings.catch_warnings():
warnings.filterwarnings("error", message=self.message,
category=DeprecationWarning)
try:
function(*args, **kwargs)
if exceptions != tuple():
raise AssertionError("No error raised during function call")
except exceptions:
if exceptions == tuple():
raise AssertionError("Error raised during function call")
def assert_not_deprecated(self, function, args=(), kwargs={}):
self.assert_deprecated(function, num=0, ignore_others=True,
exceptions=tuple(), args=args, kwargs=kwargs)
class TestFloatNonIntegerArgumentDeprecation(_DeprecationTestCase):
message = "using a non-integer number instead of an integer " \
"will result in an error in the future"
def test_indexing(self):
a = np.array([[[5]]])
def assert_deprecated(*args, **kwargs):
self.assert_deprecated(*args, exceptions=(IndexError,), **kwargs)
assert_deprecated(lambda: a[0.0])
assert_deprecated(lambda: a[0, 0.0])
assert_deprecated(lambda: a[0.0, 0])
assert_deprecated(lambda: a[0.0,:])
assert_deprecated(lambda: a[:, 0.0])
assert_deprecated(lambda: a[:, 0.0,:])
assert_deprecated(lambda: a[0.0,:,:])
assert_deprecated(lambda: a[0, 0, 0.0])
assert_deprecated(lambda: a[0.0, 0, 0])
assert_deprecated(lambda: a[0, 0.0, 0])
assert_deprecated(lambda: a[-1.4])
assert_deprecated(lambda: a[0, -1.4])
assert_deprecated(lambda: a[-1.4, 0])
assert_deprecated(lambda: a[-1.4,:])
assert_deprecated(lambda: a[:, -1.4])
assert_deprecated(lambda: a[:, -1.4,:])
assert_deprecated(lambda: a[-1.4,:,:])
assert_deprecated(lambda: a[0, 0, -1.4])
assert_deprecated(lambda: a[-1.4, 0, 0])
assert_deprecated(lambda: a[0, -1.4, 0])
# the scalar index warning.
assert_deprecated(lambda: a[0.0:, 0.0], num=2)
assert_deprecated(lambda: a[0.0:, 0.0,:], num=2)
def test_valid_indexing(self):
a = np.array([[[5]]])
assert_not_deprecated = self.assert_not_deprecated
assert_not_deprecated(lambda: a[np.array([0])])
assert_not_deprecated(lambda: a[[0, 0]])
assert_not_deprecated(lambda: a[:, [0, 0]])
assert_not_deprecated(lambda: a[:, 0,:])
assert_not_deprecated(lambda: a[:,:,:])
def test_slicing(self):
a = np.array([[5]])
def assert_deprecated(*args, **kwargs):
self.assert_deprecated(*args, exceptions=(IndexError,), **kwargs)
# start as float.
assert_deprecated(lambda: a[0.0:])
assert_deprecated(lambda: a[0:, 0.0:2])
assert_deprecated(lambda: a[0.0::2, :0])
assert_deprecated(lambda: a[0.0:1:2,:])
assert_deprecated(lambda: a[:, 0.0:])
# stop as float.
assert_deprecated(lambda: a[:0.0])
assert_deprecated(lambda: a[:0, 1:2.0])
assert_deprecated(lambda: a[:0.0:2, :0])
assert_deprecated(lambda: a[:0.0,:])
assert_deprecated(lambda: a[:, 0:4.0:2])
# step as float.
assert_deprecated(lambda: a[::1.0])
assert_deprecated(lambda: a[0:, :2:2.0])
assert_deprecated(lambda: a[1::4.0, :0])
assert_deprecated(lambda: a[::5.0,:])
assert_deprecated(lambda: a[:, 0:4:2.0])
# mixed.
assert_deprecated(lambda: a[1.0:2:2.0], num=2)
assert_deprecated(lambda: a[1.0::2.0], num=2)
assert_deprecated(lambda: a[0:, :2.0:2.0], num=2)
assert_deprecated(lambda: a[1.0:1:4.0, :0], num=2)
assert_deprecated(lambda: a[1.0:5.0:5.0,:], num=3)
assert_deprecated(lambda: a[:, 0.4:4.0:2.0], num=3)
# should still get the DeprecationWarning if step = 0.
assert_deprecated(lambda: a[::0.0], function_fails=True)
def test_valid_slicing(self):
a = np.array([[[5]]])
assert_not_deprecated = self.assert_not_deprecated
assert_not_deprecated(lambda: a[::])
assert_not_deprecated(lambda: a[0:])
assert_not_deprecated(lambda: a[:2])
assert_not_deprecated(lambda: a[0:2])
assert_not_deprecated(lambda: a[::2])
assert_not_deprecated(lambda: a[1::2])
assert_not_deprecated(lambda: a[:2:2])
assert_not_deprecated(lambda: a[1:2:2])
def test_non_integer_argument_deprecations(self):
a = np.array([[5]])
self.assert_deprecated(np.reshape, args=(a, (1., 1., -1)), num=2)
self.assert_deprecated(np.reshape, args=(a, (np.array(1.), -1)))
self.assert_deprecated(np.take, args=(a, [0], 1.))
self.assert_deprecated(np.take, args=(a, [0], np.float64(1.)))
def test_non_integer_sequence_multiplication(self):
# Numpy scalar sequence multiply should not work with non-integers
def mult(a, b):
return a * b
self.assert_deprecated(mult, args=([1], np.float_(3)))
self.assert_not_deprecated(mult, args=([1], np.int_(3)))
class TestBooleanArgumentDeprecation(_DeprecationTestCase):
message = "using a boolean instead of an integer " \
"will result in an error in the future"
def test_bool_as_int_argument(self):
a = np.array([[[1]]])
self.assert_deprecated(np.reshape, args=(a, (True, -1)))
self.assert_deprecated(np.reshape, args=(a, (np.bool_(True), -1)))
# Note that operator.index(np.array(True)) does not work, a boolean
# array is thus also deprecated, but not with the same message:
assert_raises(TypeError, operator.index, np.array(True))
self.assert_deprecated(np.take, args=(a, [0], False))
self.assert_deprecated(lambda: a[False:True:True], exceptions=IndexError, num=3)
self.assert_deprecated(lambda: a[False, 0], exceptions=IndexError)
self.assert_deprecated(lambda: a[False, 0, 0], exceptions=IndexError)
class TestArrayToIndexDeprecation(_DeprecationTestCase):
message = "converting an array with ndim \> 0 to an index will result " \
"in an error in the future"
def test_array_to_index_deprecation(self):
# This drops into the non-integer deprecation, which is ignored here,
# so no exception is expected. The raising is effectively tested above.
a = np.array([[[1]]])
self.assert_deprecated(operator.index, args=(np.array([1]),))
self.assert_deprecated(np.reshape, args=(a, (a, -1)), exceptions=())
self.assert_deprecated(np.take, args=(a, [0], a), exceptions=())
# Check slicing. Normal indexing checks arrays specifically.
self.assert_deprecated(lambda: a[a:a:a], exceptions=(), num=3)
class TestNonIntegerArrayLike(_DeprecationTestCase):
message = "non integer \(and non boolean\) array-likes will not be " \
"accepted as indices in the future"
def test_basic(self):
a = np.arange(10)
self.assert_deprecated(a.__getitem__, args=([0.5, 1.5],),
exceptions=IndexError)
self.assert_deprecated(a.__getitem__, args=((['1', '2'],),),
exceptions=IndexError)
self.assert_not_deprecated(a.__getitem__, ([],))
def test_boolean_futurewarning(self):
a = np.arange(10)
with warnings.catch_warnings():
warnings.filterwarnings('always')
assert_warns(FutureWarning, a.__getitem__, [True])
# Unfortunatly, the deprecation warning takes precedence:
#assert_warns(FutureWarning, a.__getitem__, True)
with warnings.catch_warnings():
warnings.filterwarnings('error')
assert_raises(FutureWarning, a.__getitem__, [True])
#assert_raises(FutureWarning, a.__getitem__, True)
class TestMultipleEllipsisDeprecation(_DeprecationTestCase):
message = "an index can only have a single Ellipsis \(`...`\); replace " \
"all but one with slices \(`:`\)."
def test_basic(self):
a = np.arange(10)
self.assert_deprecated(a.__getitem__, args=((Ellipsis, Ellipsis),))
with warnings.catch_warnings():
warnings.filterwarnings('ignore', '', DeprecationWarning)
# Just check that this works:
b = a[...,...]
assert_array_equal(a, b)
assert_raises(IndexError, a.__getitem__, ((Ellipsis, ) * 3,))
class TestBooleanSubtractDeprecations(_DeprecationTestCase):
message = r"numpy boolean .* \(the .* `-` operator\) is deprecated, " \
"use the bitwise"
def test_operator_deprecation(self):
array = np.array([True])
generic = np.bool_(True)
# Minus operator/subtract ufunc:
self.assert_deprecated(operator.sub, args=(array, array))
self.assert_deprecated(operator.sub, args=(generic, generic))
# Unary minus/negative ufunc:
self.assert_deprecated(operator.neg, args=(array,))
self.assert_deprecated(operator.neg, args=(generic,))
if __name__ == "__main__":
run_module_suite()
| true | true |
f7fbb988a634687215dee9a594a7dd08ba2137e7 | 6,347 | py | Python | dictionaries_examples.py | carlosmertens/Python-Introduction | 041b2ff1ad7acef7ad4a1016a5654ace4f33ac35 | [
"MIT"
] | null | null | null | dictionaries_examples.py | carlosmertens/Python-Introduction | 041b2ff1ad7acef7ad4a1016a5654ace4f33ac35 | [
"MIT"
] | null | null | null | dictionaries_examples.py | carlosmertens/Python-Introduction | 041b2ff1ad7acef7ad4a1016a5654ace4f33ac35 | [
"MIT"
] | null | null | null | """ DICTIONARIES
Rather than storing single objects like lists and sets do,
dictionaries store pairs of elements: keys and values.
Dictionary keys are similar to list indices: we can select elements
from the data structure by putting the index/key in square brackets.
Unlike lists, dictionaries can have keys of any immutable type, not
just integers. The element dictionary uses strings for its keys. However,
it's not even necessary for every key to have the same type!
"""
from test_countries_list import country_list
# Note: since the list of countries is so large,
# it's tidier to put it in a separate file. (test_countries_list.py)
# this is for Quiz 1
print("\n")
print("==================== Example 1 ====================\n")
# In this example we define a dictionary where the keys are element names and the values are
# their corresponding atomic numbers.
elements = {'hydrogen': 1, 'helium': 2, 'carbon': 6} # create a dictionary{}
print(elements)
print(elements['carbon']) # print the value of the specific key
elements['lithium'] = 3 # add new key ('lithium') and value (3) to the dictionary{}
print(elements)
#
#
print("\n")
print("==================== Example 2 ====================\n")
"""Quiz: Define a Dictionary
Define a Dictionary, population, that provides information on the
world's largest cities. The key is the name of a city (a string),
and the associated value is its population in millions of people.
Key | Value
Shanghai | 17.8
Istanbul | 13.3
Karachi | 13.0
Mumbai | 12.5
"""
population = {"Shanghai": 17.8, "Istambul": 13.3, "Karachi": 13.0, "Mumbai": 12.5}
print(population)
# trying to find elements in the dictionary
# print(population["New York"]) will create a key error
# better ways to look up for elements:
if "New York" in population:
print(population["New York]"])
else:
print("We do not have data for New York")
print(population.get("New York")) # will return None but it will not create a key error
print(population.get("New York", "We do not have data for New York")) # after the coma, text to be return
#
#
print("\n")
print("==================== Quiz 1 ====================\n")
"""Quiz: Users by Country
Create a dict, country_counts, whose keys are country names, and whose values are the
number of times the country occurs in the countries list."""
# from test_countries_list import country_list (already call on top of the file)
country_counts = {}
for country in country_list:
if country in country_counts:
country_counts[country] += 1 # increase the count of the country found
else:
country_counts[country] = 1 # add the country to the dictionary and set the value to 1
print(country_counts)
#
#
print("\n")
print("==================== Example 3 ====================\n")
"""The syntax for iterating over dictionaries is very similar.
The difference is that dicts store key value pairs, and when we loop over them we iterate through the keys:"""
Beatles_Discography = {"Please Please Me": 1963, "With the Beatles": 1963,
"A Hard Day's Night": 1964, "Beatles for Sale": 1964, "Twist and Shout": 1964,
"Help": 1965, "Rubber Soul": 1965, "Revolver": 1966,
"Sgt. Pepper's Lonely Hearts Club Band": 1967,
"Magical Mystery Tour": 1967, "The Beatles": 1968,
"Yellow Submarine": 1969, 'Abbey Road': 1969,
"Let It Be": 1970}
for album_title in Beatles_Discography:
print("title: {}, year: {}".format(album_title, Beatles_Discography[album_title])) # iterating
#
#
print("\n")
print("==================== Quiz 2 ====================\n")
"""Quiz: Prolific Year
Write a function most_prolific that takes a dict formatted like Beatles_Discography
example above and returns the year in which the most albums were released. If you call
the function on the Beatles_Discography it should return 1964, which saw more releases
than any other year in the discography.
If there are multiple years with the same maximum number of releases,
the function should return a list of years."""
def most_prolific(discs):
years = {} # empty dictionary to create key: year, value: how many times they are in the discography
max_years = [] # empty list to be use to calculate the most repeating years
max_number = 0 # variable to be used as a counter
for disc in discs:
# loop to add values to years{}
year = discs[disc]
if year in years:
years[year] += 1
else:
years[year] = 1
for year in years:
# loop to count the repeating years
if years[year] > max_number:
# max_years = [] observation!!!
max_years.append(year)
max_number = years[year]
elif years[year] == max_number and not (year in max_years):
max_years.append(year)
if len(max_years) == 1:
return max_years[0]
else:
return max_years
print(most_prolific(Beatles_Discography))
#
#
print("\n")
print("==================== Quiz 3 ====================\n")
""" A DICTIONARIES OF DICTIONARIES
Flying Circus Records:
A regular flying circus happens twice or three times a month. For each month, information about
the amount of money taken at each event is saved in a list, so that the amounts appear in the order
in which they happened. The months' data is all collected in a dictionary called monthly_takings.
For this quiz, write a function total_takings that calculates the sum of takings from every circus
in the year. Here's a sample input for this function:
"""
def total_takings(monthly_takings):
total = 0 # variable to add the monthly takings
for month in monthly_takings.keys(): # iterate through the keys(months)
total += sum(monthly_takings[month]) # add the values of every month
return total
monthly_takings_list = {'January': [54, 63], 'February': [64, 60], 'March': [63, 49],
'April': [57, 42], 'May': [55, 37], 'June': [34, 32],
'July': [69, 41, 32], 'August': [40, 61, 40], 'September': [51, 62],
'October': [34, 58, 45], 'November': [67, 44], 'December': [41, 58]}
print(total_takings(monthly_takings_list))
| 36.0625 | 110 | 0.648653 |
from test_countries_list import country_list
# this is for Quiz 1
print("\n")
print("==================== Example 1 ====================\n")
# In this example we define a dictionary where the keys are element names and the values are
# their corresponding atomic numbers.
elements = {'hydrogen': 1, 'helium': 2, 'carbon': 6} # create a dictionary{}
print(elements)
print(elements['carbon']) # print the value of the specific key
elements['lithium'] = 3 # add new key ('lithium') and value (3) to the dictionary{}
print(elements)
#
#
print("\n")
print("==================== Example 2 ====================\n")
population = {"Shanghai": 17.8, "Istambul": 13.3, "Karachi": 13.0, "Mumbai": 12.5}
print(population)
# trying to find elements in the dictionary
# print(population["New York"]) will create a key error
# better ways to look up for elements:
if "New York" in population:
print(population["New York]"])
else:
print("We do not have data for New York")
print(population.get("New York")) # will return None but it will not create a key error
print(population.get("New York", "We do not have data for New York")) # after the coma, text to be return
#
#
print("\n")
print("==================== Quiz 1 ====================\n")
# from test_countries_list import country_list (already call on top of the file)
country_counts = {}
for country in country_list:
if country in country_counts:
country_counts[country] += 1 # increase the count of the country found
else:
country_counts[country] = 1 # add the country to the dictionary and set the value to 1
print(country_counts)
#
#
print("\n")
print("==================== Example 3 ====================\n")
Beatles_Discography = {"Please Please Me": 1963, "With the Beatles": 1963,
"A Hard Day's Night": 1964, "Beatles for Sale": 1964, "Twist and Shout": 1964,
"Help": 1965, "Rubber Soul": 1965, "Revolver": 1966,
"Sgt. Pepper's Lonely Hearts Club Band": 1967,
"Magical Mystery Tour": 1967, "The Beatles": 1968,
"Yellow Submarine": 1969, 'Abbey Road': 1969,
"Let It Be": 1970}
for album_title in Beatles_Discography:
print("title: {}, year: {}".format(album_title, Beatles_Discography[album_title])) # iterating
#
#
print("\n")
print("==================== Quiz 2 ====================\n")
def most_prolific(discs):
years = {} # empty dictionary to create key: year, value: how many times they are in the discography
max_years = [] # empty list to be use to calculate the most repeating years
max_number = 0 # variable to be used as a counter
for disc in discs:
# loop to add values to years{}
year = discs[disc]
if year in years:
years[year] += 1
else:
years[year] = 1
for year in years:
# loop to count the repeating years
if years[year] > max_number:
# max_years = [] observation!!!
max_years.append(year)
max_number = years[year]
elif years[year] == max_number and not (year in max_years):
max_years.append(year)
if len(max_years) == 1:
return max_years[0]
else:
return max_years
print(most_prolific(Beatles_Discography))
#
#
print("\n")
print("==================== Quiz 3 ====================\n")
def total_takings(monthly_takings):
total = 0 # variable to add the monthly takings
for month in monthly_takings.keys(): # iterate through the keys(months)
total += sum(monthly_takings[month]) # add the values of every month
return total
monthly_takings_list = {'January': [54, 63], 'February': [64, 60], 'March': [63, 49],
'April': [57, 42], 'May': [55, 37], 'June': [34, 32],
'July': [69, 41, 32], 'August': [40, 61, 40], 'September': [51, 62],
'October': [34, 58, 45], 'November': [67, 44], 'December': [41, 58]}
print(total_takings(monthly_takings_list))
| true | true |
f7fbb9bc4d02c1939e17717a80e9b77248ab1fb9 | 9,841 | py | Python | paddlenlp/transformers/blenderbot_small/tokenizer.py | zzz2010/PaddleNLP | fba0b29601b0e8286a9ab860bf69c9acca4481f4 | [
"Apache-2.0"
] | 1 | 2022-01-17T02:11:58.000Z | 2022-01-17T02:11:58.000Z | paddlenlp/transformers/blenderbot_small/tokenizer.py | zzz2010/PaddleNLP | fba0b29601b0e8286a9ab860bf69c9acca4481f4 | [
"Apache-2.0"
] | null | null | null | paddlenlp/transformers/blenderbot_small/tokenizer.py | zzz2010/PaddleNLP | fba0b29601b0e8286a9ab860bf69c9acca4481f4 | [
"Apache-2.0"
] | null | null | null | # encoding=utf-8
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# Copyright 2021 The Facebook, Inc. and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..gpt.tokenizer import GPTTokenizer
import re
__all__ = ['BlenderbotSmallTokenizer']
# Copy from paddlenlp.transformers.gpt.tokenizer.get_pairs
def get_pairs(word):
"""
Args:
word (tuple): tuple of symbols (symbols being variable-length strings).
Returns:
set: symbol pairs in a word.
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
class BlenderbotSmallTokenizer(GPTTokenizer):
r"""
Constructs a BlenderbotSmall tokenizer based on Byte-Pair-Encoding.
This tokenizer inherits from :class:`~paddlenlp.transformers.GPTTokenizer`,
which contains most of the main methods.
Please should refer to the superclass for more information regarding methods.
Args:
vocab_file (str): file path of the vocabulary
merges_file (str): file path of the merges file.
errors (str): The method to handle errors in decoding
max_len (int): The specified maximum sequence length. Default: "None".
special_tokens (dict): The additional special tokens. Default: "None".
bos_token (str): The special token for beginning of sequence token. Default: "__start__".
eos_token (str): The special token for end of sequence token. Default: "__end__".
unk_token (str): The special token for unknown tokens. Default: "__unk__"
pad_token (str): The special token for padding. Default: "__null__".
eol_token (str): The special token for newline. Default: "__newln__".
Examples:
.. code-block:: python
from paddlenlp.transformers import BlenderbotSmallTokenizer
tokenizer = BlenderbotSmallTokenizer.from_pretrained("blenderbot_small-90M")
text = "My friends are cool but they eat too many carbs."
inputs = tokenizer(text)
# above line outputs:
# {'input_ids': [42, 643, 46, 1430, 45, 52, 1176, 146, 177, 753, 2430, 5],
# 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}
"""
resource_files_names = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt"
}
pretrained_resource_files_map = {
"vocab_file": {
"blenderbot_small-90M":
"https://bj.bcebos.com/paddlenlp/models/transformers/blenderbot_small/blenderbot_small-90M-vocab.json",
},
"merges_file": {
"blenderbot_small-90M":
"https://bj.bcebos.com/paddlenlp/models/transformers/blenderbot_small/blenderbot_small-90M-merges.txt",
}
}
pretrained_init_configuration = {"blenderbot_small-90M": {}}
def __init__(
self,
vocab_file,
merges_file,
errors='replace',
max_len=None,
special_tokens=None,
bos_token="__start__",
eos_token="__end__",
unk_token="__unk__",
pad_token="__null__",
eol_token="__newln__", ):
super(BlenderbotSmallTokenizer, self).__init__(
vocab_file=vocab_file,
merges_file=merges_file,
errors=errors,
max_len=max_len,
special_tokens=special_tokens,
pad_token=pad_token,
eos_token=eos_token,
eol_token=eol_token)
self.pat = r"\S+\n?" # String matching pattern of BlenderbotSmall is different from Blenderbot
self.unk_id = self.encoder[unk_token]
self.eol_token = eol_token
def bpe(self, token):
"""
Apply Byte-Pair-Encoding on token.
The process of bpe in BlenderbotSmall is different from Blenderbot.
Args:
token (str): The token to be converted.
Returns:
str: Converted token.
"""
if token in self.cache:
return self.cache[token]
token = re.sub("([.,!?()])", r" \1", token)
token = re.sub("(')", r" \1 ", token)
token = re.sub(r"\s{2,}", " ", token)
if "\n" in token:
token = token.replace("\n", self.eol_token)
tokens = token.split(" ")
words = []
for token in tokens:
if not len(token):
continue
token = token.lower()
word = tuple(token)
word = tuple(list(word[:-1]) + [word[-1] + "</w>"])
pairs = get_pairs(word)
if not pairs:
words.append(token)
continue
while True:
bigram = min(
pairs,
key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except ValueError:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[
i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = "@@ ".join(word)
word = word[:-4]
self.cache[token] = word
words.append(word)
return " ".join(words)
def convert_tokens_to_ids(self, tokens):
"""
Converts a sequence of tokens into ids.
Args:
tokens (list[int]): List of token ids.
Returns:
list: Converted id list.
"""
ids = []
if isinstance(tokens, str):
if tokens in self.special_tokens:
return self.special_tokens[tokens]
else:
return self.encoder.get(tokens, self.unk_id)
for token in tokens:
if token in self.special_tokens:
ids.append(self.special_tokens[token])
else:
ids.append(self.encoder.get(token, self.unk_id))
return ids
def convert_tokens_to_string(self, tokens):
"""
Converts a sequence of tokens (list of string) to a single string.
Args:
tokens (list[str]): A sequence of tokens.
Returns:
str: Converted string.
"""
return " ".join(tokens).replace("@@ ", "").strip()
def convert_ids_to_string(self,
ids,
skip_special_tokens=True,
clean_up_tokenization_spaces=True):
"""
Converts a sequence of ids (list of integers) to a single string.
Args:
ids (list[int]):
A sequence of ids corresponding to tokens.
skip_special_tokens (bool, optional):
Whether to skip and not decode special tokens when converting. Defaults to `False`.
clean_up_tokenization_spaces (bool, optional):
Whether to Clean up a list of simple English tokenization artifacts
like spaces before punctuations and abbreviated forms.
Returns:
str: Converted string.
"""
tokens = self.convert_ids_to_tokens(
ids, skip_special_tokens=skip_special_tokens)
output_string = self.convert_tokens_to_string(tokens)
if clean_up_tokenization_spaces:
output_string = (output_string.replace(" .", ".").replace(" ?", "?")
.replace(" !", "!").replace(" ,", ",")
.replace(" ' ", "'").replace(" n't", "n't")
.replace(" 'm", "'m").replace(" 's", "'s")
.replace(" 've", "'ve").replace(" 're", "'re"))
return output_string
def convert_ids_to_tokens(self, ids, skip_special_tokens=False):
"""
Converts a token id or a sequence of token ids (integer) to a token or
a sequence of tokens (str) by using the `vocab` attribute (an instance
of `Vocab`).
Args:
ids (int` or `list[int]):
A token id or a sequence of token ids.
skip_special_tokens (bool, optional):
Whether to skip and not decode special tokens when converting. Defaults to `False`.
Returns:
str: Converted token or token sequence.
"""
tokens = [self.decoder[i] for i in ids]
if skip_special_tokens and isinstance(tokens, list):
tokens = [
token for token in tokens
if token not in self.all_special_tokens
]
return tokens
| 37.418251 | 115 | 0.552281 |
from ..gpt.tokenizer import GPTTokenizer
import re
__all__ = ['BlenderbotSmallTokenizer']
def get_pairs(word):
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
class BlenderbotSmallTokenizer(GPTTokenizer):
resource_files_names = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt"
}
pretrained_resource_files_map = {
"vocab_file": {
"blenderbot_small-90M":
"https://bj.bcebos.com/paddlenlp/models/transformers/blenderbot_small/blenderbot_small-90M-vocab.json",
},
"merges_file": {
"blenderbot_small-90M":
"https://bj.bcebos.com/paddlenlp/models/transformers/blenderbot_small/blenderbot_small-90M-merges.txt",
}
}
pretrained_init_configuration = {"blenderbot_small-90M": {}}
def __init__(
self,
vocab_file,
merges_file,
errors='replace',
max_len=None,
special_tokens=None,
bos_token="__start__",
eos_token="__end__",
unk_token="__unk__",
pad_token="__null__",
eol_token="__newln__", ):
super(BlenderbotSmallTokenizer, self).__init__(
vocab_file=vocab_file,
merges_file=merges_file,
errors=errors,
max_len=max_len,
special_tokens=special_tokens,
pad_token=pad_token,
eos_token=eos_token,
eol_token=eol_token)
self.pat = r"\S+\n?"
self.unk_id = self.encoder[unk_token]
self.eol_token = eol_token
def bpe(self, token):
if token in self.cache:
return self.cache[token]
token = re.sub("([.,!?()])", r" \1", token)
token = re.sub("(')", r" \1 ", token)
token = re.sub(r"\s{2,}", " ", token)
if "\n" in token:
token = token.replace("\n", self.eol_token)
tokens = token.split(" ")
words = []
for token in tokens:
if not len(token):
continue
token = token.lower()
word = tuple(token)
word = tuple(list(word[:-1]) + [word[-1] + "</w>"])
pairs = get_pairs(word)
if not pairs:
words.append(token)
continue
while True:
bigram = min(
pairs,
key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except ValueError:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[
i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = "@@ ".join(word)
word = word[:-4]
self.cache[token] = word
words.append(word)
return " ".join(words)
def convert_tokens_to_ids(self, tokens):
ids = []
if isinstance(tokens, str):
if tokens in self.special_tokens:
return self.special_tokens[tokens]
else:
return self.encoder.get(tokens, self.unk_id)
for token in tokens:
if token in self.special_tokens:
ids.append(self.special_tokens[token])
else:
ids.append(self.encoder.get(token, self.unk_id))
return ids
def convert_tokens_to_string(self, tokens):
return " ".join(tokens).replace("@@ ", "").strip()
def convert_ids_to_string(self,
ids,
skip_special_tokens=True,
clean_up_tokenization_spaces=True):
tokens = self.convert_ids_to_tokens(
ids, skip_special_tokens=skip_special_tokens)
output_string = self.convert_tokens_to_string(tokens)
if clean_up_tokenization_spaces:
output_string = (output_string.replace(" .", ".").replace(" ?", "?")
.replace(" !", "!").replace(" ,", ",")
.replace(" ' ", "'").replace(" n't", "n't")
.replace(" 'm", "'m").replace(" 's", "'s")
.replace(" 've", "'ve").replace(" 're", "'re"))
return output_string
def convert_ids_to_tokens(self, ids, skip_special_tokens=False):
tokens = [self.decoder[i] for i in ids]
if skip_special_tokens and isinstance(tokens, list):
tokens = [
token for token in tokens
if token not in self.all_special_tokens
]
return tokens
| true | true |
f7fbb9d987769ea378cab29f1a1a71027622279b | 944 | py | Python | jpype/JClassUtil.py | marscher/jpype | ce359d7a0ff348831af8abd6e778ed4f7c52c81e | [
"Apache-2.0"
] | 6 | 2015-04-28T16:51:08.000Z | 2017-07-12T11:29:00.000Z | jpype/JClassUtil.py | donaldlab/jpype-py2 | 1b9850ad4ba4d3c14446a2c31d9c0bb1d4744076 | [
"Apache-2.0"
] | 29 | 2015-02-24T11:11:26.000Z | 2017-08-25T08:30:18.000Z | jpype/JClassUtil.py | donaldlab/jpype-py2 | 1b9850ad4ba4d3c14446a2c31d9c0bb1d4744076 | [
"Apache-2.0"
] | 2 | 2019-12-16T09:45:03.000Z | 2021-04-15T00:20:24.000Z | #*****************************************************************************
# Copyright 2004-2008 Steve Menard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#*****************************************************************************
import _jclass
def isInterface(t) :
if not isinstance(t, _jclass._JavaClass) :
return False
return t.__javaclass__.isInterface()
| 37.76 | 79 | 0.579449 |
import _jclass
def isInterface(t) :
if not isinstance(t, _jclass._JavaClass) :
return False
return t.__javaclass__.isInterface()
| true | true |
f7fbb9e54d6b7f244e96b81874cc9dd0196bdc22 | 4,122 | py | Python | rasa/cli/run.py | GolemXlV/rasa-for-botfront | 35fa580993ab5d1b06defaa9bdcd45ca5ab04324 | [
"Apache-2.0"
] | null | null | null | rasa/cli/run.py | GolemXlV/rasa-for-botfront | 35fa580993ab5d1b06defaa9bdcd45ca5ab04324 | [
"Apache-2.0"
] | null | null | null | rasa/cli/run.py | GolemXlV/rasa-for-botfront | 35fa580993ab5d1b06defaa9bdcd45ca5ab04324 | [
"Apache-2.0"
] | 1 | 2020-03-12T16:09:06.000Z | 2020-03-12T16:09:06.000Z | import argparse
import logging
import os
from typing import List, Text
from rasa.cli.arguments import run as arguments
from rasa.cli.utils import get_validated_path, print_error
from rasa.constants import (
DEFAULT_ACTIONS_PATH,
DEFAULT_CREDENTIALS_PATH,
DEFAULT_ENDPOINTS_PATH,
DEFAULT_MODELS_PATH,
DOCS_BASE_URL,
)
from rasa.exceptions import ModelNotFound
logger = logging.getLogger(__name__)
# noinspection PyProtectedMember
def add_subparser(
subparsers: argparse._SubParsersAction, parents: List[argparse.ArgumentParser]
):
run_parser = subparsers.add_parser(
"run",
parents=parents,
conflict_handler="resolve",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Starts a Rasa server with your trained model.",
)
run_parser.set_defaults(func=run)
run_subparsers = run_parser.add_subparsers()
sdk_subparser = run_subparsers.add_parser(
"actions",
parents=parents,
conflict_handler="resolve",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Runs the action server.",
)
sdk_subparser.set_defaults(func=run_actions)
arguments.set_run_arguments(run_parser)
arguments.set_run_action_arguments(sdk_subparser)
def run_actions(args: argparse.Namespace):
import rasa_sdk.__main__ as sdk
args.actions = args.actions or DEFAULT_ACTIONS_PATH
sdk.main_from_args(args)
def _validate_model_path(model_path: Text, parameter: Text, default: Text):
if model_path is not None and not os.path.exists(model_path):
reason_str = f"'{model_path}' not found."
if model_path is None:
reason_str = f"Parameter '{parameter}' not set."
logger.debug(f"{reason_str} Using default location '{default}' instead.")
os.makedirs(default, exist_ok=True)
model_path = default
return model_path
def run(args: argparse.Namespace):
import rasa.run
# botfront:start
from rasa.utils.botfront import set_endpoints_credentials_args_from_remote
set_endpoints_credentials_args_from_remote(args)
# botfront:end
args.endpoints = get_validated_path(
args.endpoints, "endpoints", DEFAULT_ENDPOINTS_PATH, True
)
args.credentials = get_validated_path(
args.credentials, "credentials", DEFAULT_CREDENTIALS_PATH, True
)
if args.enable_api:
if not args.remote_storage:
args.model = _validate_model_path(args.model, "model", DEFAULT_MODELS_PATH)
rasa.run(**vars(args))
return
# if the API is not enable you cannot start without a model
# make sure either a model server, a remote storage, or a local model is
# configured
from rasa.model import get_model
from rasa.core.utils import AvailableEndpoints
# start server if remote storage is configured
if args.remote_storage is not None:
rasa.run(**vars(args))
return
# start server if model server is configured
endpoints = AvailableEndpoints.read_endpoints(args.endpoints)
model_server = endpoints.model if endpoints and endpoints.model else None
if model_server is not None:
rasa.run(**vars(args))
return
# start server if local model found
args.model = _validate_model_path(args.model, "model", DEFAULT_MODELS_PATH)
local_model_set = True
try:
get_model(args.model)
except ModelNotFound:
local_model_set = False
if local_model_set:
rasa.run(**vars(args))
return
print_error(
"No model found. You have three options to provide a model:\n"
"1. Configure a model server in the endpoint configuration and provide "
"the configuration via '--endpoints'.\n"
"2. Specify a remote storage via '--remote-storage' to load the model "
"from.\n"
"3. Train a model before running the server using `rasa train` and "
"use '--model' to provide the model path.\n"
"For more information check {}.".format(
DOCS_BASE_URL + "/user-guide/running-the-server/"
)
)
| 30.992481 | 87 | 0.69966 | import argparse
import logging
import os
from typing import List, Text
from rasa.cli.arguments import run as arguments
from rasa.cli.utils import get_validated_path, print_error
from rasa.constants import (
DEFAULT_ACTIONS_PATH,
DEFAULT_CREDENTIALS_PATH,
DEFAULT_ENDPOINTS_PATH,
DEFAULT_MODELS_PATH,
DOCS_BASE_URL,
)
from rasa.exceptions import ModelNotFound
logger = logging.getLogger(__name__)
def add_subparser(
subparsers: argparse._SubParsersAction, parents: List[argparse.ArgumentParser]
):
run_parser = subparsers.add_parser(
"run",
parents=parents,
conflict_handler="resolve",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Starts a Rasa server with your trained model.",
)
run_parser.set_defaults(func=run)
run_subparsers = run_parser.add_subparsers()
sdk_subparser = run_subparsers.add_parser(
"actions",
parents=parents,
conflict_handler="resolve",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Runs the action server.",
)
sdk_subparser.set_defaults(func=run_actions)
arguments.set_run_arguments(run_parser)
arguments.set_run_action_arguments(sdk_subparser)
def run_actions(args: argparse.Namespace):
import rasa_sdk.__main__ as sdk
args.actions = args.actions or DEFAULT_ACTIONS_PATH
sdk.main_from_args(args)
def _validate_model_path(model_path: Text, parameter: Text, default: Text):
if model_path is not None and not os.path.exists(model_path):
reason_str = f"'{model_path}' not found."
if model_path is None:
reason_str = f"Parameter '{parameter}' not set."
logger.debug(f"{reason_str} Using default location '{default}' instead.")
os.makedirs(default, exist_ok=True)
model_path = default
return model_path
def run(args: argparse.Namespace):
import rasa.run
from rasa.utils.botfront import set_endpoints_credentials_args_from_remote
set_endpoints_credentials_args_from_remote(args)
args.endpoints = get_validated_path(
args.endpoints, "endpoints", DEFAULT_ENDPOINTS_PATH, True
)
args.credentials = get_validated_path(
args.credentials, "credentials", DEFAULT_CREDENTIALS_PATH, True
)
if args.enable_api:
if not args.remote_storage:
args.model = _validate_model_path(args.model, "model", DEFAULT_MODELS_PATH)
rasa.run(**vars(args))
return
from rasa.model import get_model
from rasa.core.utils import AvailableEndpoints
if args.remote_storage is not None:
rasa.run(**vars(args))
return
endpoints = AvailableEndpoints.read_endpoints(args.endpoints)
model_server = endpoints.model if endpoints and endpoints.model else None
if model_server is not None:
rasa.run(**vars(args))
return
args.model = _validate_model_path(args.model, "model", DEFAULT_MODELS_PATH)
local_model_set = True
try:
get_model(args.model)
except ModelNotFound:
local_model_set = False
if local_model_set:
rasa.run(**vars(args))
return
print_error(
"No model found. You have three options to provide a model:\n"
"1. Configure a model server in the endpoint configuration and provide "
"the configuration via '--endpoints'.\n"
"2. Specify a remote storage via '--remote-storage' to load the model "
"from.\n"
"3. Train a model before running the server using `rasa train` and "
"use '--model' to provide the model path.\n"
"For more information check {}.".format(
DOCS_BASE_URL + "/user-guide/running-the-server/"
)
)
| true | true |
f7fbba6fe562f012c775b92d55e99ad97b599b17 | 6,631 | py | Python | scripts/xml2bom.py | nikhilantony28/cusf-kicad | 78f8d0be15db59c0c8e1e36f8c8176bb85810b8c | [
"MIT"
] | 24 | 2020-03-27T13:30:46.000Z | 2022-01-21T12:11:09.000Z | hardware/agg-kicad/scripts/xml2bom.py | g2545420172/ffp | fb29d1a506bc5a284d2de44264a1c483874066c4 | [
"Apache-2.0",
"MIT"
] | null | null | null | hardware/agg-kicad/scripts/xml2bom.py | g2545420172/ffp | fb29d1a506bc5a284d2de44264a1c483874066c4 | [
"Apache-2.0",
"MIT"
] | 9 | 2020-03-28T07:52:53.000Z | 2021-06-11T03:05:17.000Z | #!/usr/bin/env python
"""
xml2bom.py
Copyright 2015 Adam Greig
Licensed under the MIT licence, see LICENSE file for details.
Convert Farnell BOM XMLs to a useful text report, including sanity checking,
and outputting quickpaste formats for Farnell, RS and DigiKey.
"""
from __future__ import print_function, division
import os.path
import datetime
import argparse
import xml.etree.ElementTree as ET
parser = argparse.ArgumentParser(
prog='xml2bom',
description="Convert KiCAD EESchema XML BOMs to an expanded text format")
parser.add_argument("input", help="input filename")
parser.add_argument("output", nargs='?', default=None, help="output filename")
parser.add_argument("-x", "--quantity", type=int, help="quantity multiplier")
group = parser.add_mutually_exclusive_group()
group.add_argument("-i", "--include", nargs='+', help="parts to include")
group.add_argument("-e", "--exclude", nargs='+', help="parts to exclude")
args = parser.parse_args()
tree = ET.parse(args.input)
parts = {}
missing_order_code = []
missing_footprint = []
inconsistent_order_code = {}
quantity_multiplier = 1
if args.quantity:
quantity_multiplier = args.quantity
def ignore_part(ref):
if args.include and ref not in args.include:
return True
elif args.exclude and ref in args.exclude:
return True
return False
for comp in tree.getroot().iter('comp'):
ref = comp.get('ref')
if ignore_part(ref):
continue
val = comp.findtext('value')
foot = comp.findtext('footprint')
fields = {}
part = {"ref": ref, "value": val, "footprint": foot, "fields": fields}
for field in comp.iter('field'):
name = field.get('name')
number = field.text
fields[name] = number
if name not in parts:
parts[name] = {}
if number not in parts[name]:
parts[name][number] = []
elif (parts[name][number][0]['value'] != val or
parts[name][number][0]['footprint'] != foot):
if name not in inconsistent_order_code:
inconsistent_order_code[name] = {}
if number not in inconsistent_order_code[name]:
inconsistent_order_code[name][number] = [
parts[name][number][0]]
inconsistent_order_code[name][number].append(part)
parts[name][number].append(part)
# Store parts missing order codes or footprints
if not fields:
missing_order_code.append(part)
if not foot:
missing_footprint.append(part)
missing_footprint_report = "\n".join(
"{:6} {:15}".format(p['ref'], p['value']) for p in missing_footprint)
missing_order_code_report = "\n".join(
"{:6} {:15} {}".format(p['ref'], p['value'], p['footprint'])
for p in missing_order_code)
inconsistent_order_code_report = "\n".join(
" {}\n".format(name) + " " + "~"*len(name) + "\n" + "\n".join(
" {}: ".format(number) + "\n" + "\n".join(
" " +
"{:6} {:15} {}".format(p['ref'], p['value'], p['footprint'])
for p in inconsistent_order_code[name][number]
) for number in inconsistent_order_code[name]
) for name in inconsistent_order_code)
def farnell_formatter(number, parts):
qty = len(parts)
footprints = " ".join(set(
str(p['footprint']).split(":")[-1] for p in parts))
values = " ".join(set(p['value'] for p in parts))
note = "{}x {} {}".format(qty, values, footprints)
return "{},{},{}\n".format(number, qty * quantity_multiplier, note[:30])
def rs_formatter(number, parts):
qty = len(parts)
refs = "".join(p['ref'] for p in parts)
footprints = "-".join(set(
str(p['footprint']).split(":")[-1] for p in parts))
values = "-".join(set(p['value'] for p in parts))
return "{},{},,{}x--{}--{}--{}\n".format(
number, qty * quantity_multiplier, qty, values, footprints, refs)
def digikey_formatter(number, parts):
qty = len(parts)
refs = " ".join(p['ref'] for p in parts)
footprints = " ".join(set(
str(p['footprint']).split(":")[-1] for p in parts))
values = " ".join(set(p['value'] for p in parts))
return "{},{},{}x {} {} {}\n".format(
qty * quantity_multiplier, number, qty, values, footprints, refs)
vendor_bom_formatters = {
"farnell": farnell_formatter,
"rs": rs_formatter,
"digikey": digikey_formatter,
}
vendor_boms = []
for name in parts:
bom_text = " {}\n".format(name) + " " + "~"*len(name) + "\n"
for number in parts[name]:
if name.lower() in vendor_bom_formatters:
bom_text += vendor_bom_formatters[name.lower()](
number, parts[name][number])
else:
qty = len(parts[name][number])
bom_text += "{},{},{}x {} {}\n".format(
number, qty * quantity_multiplier, qty,
" ".join(p['ref'] for p in parts[name][number]),
",".join(set(
str(p['footprint']).split(":")[-1]
for p in parts[name][number])))
vendor_boms.append(bom_text)
vendor_boms = "\n\n".join(vendor_boms)
assembly_bom = "\n".join("\n".join(
"{:20} {:<3} {:15} {:<15} {:<}".format(
number, len(parts[name][number]),
",".join(set(str(p['value']) for p in parts[name][number])),
",".join(set(str(p['footprint']).split(":")[-1]
for p in parts[name][number])),
" ".join(sorted(p['ref'] for p in parts[name][number])))
for number in parts[name])
for name in parts)
report = """Bill Of Materials
=================
Source file: {source}
Date: {date}
Parts Missing Footprints
------------------------
{missing_footprint}
Parts Missing Order Codes
-------------------------
{missing_code}
Inconsistent Order Codes
------------------------
{inconsistent_code}
Vendor Specific BOMs
--------------------
{vendor_boms}
Assembly BOM
------------
{assembly_bom}
""".format(
source=os.path.basename(args.input),
date=datetime.datetime.now().isoformat(),
missing_footprint=missing_footprint_report,
missing_code=missing_order_code_report,
inconsistent_code=inconsistent_order_code_report,
vendor_boms=vendor_boms,
assembly_bom=assembly_bom)
print(report)
if args.output:
filename = args.output
if filename[-4:].lower() != ".bom":
filename += ".bom"
with open(filename, 'w') as f:
f.write(report)
| 32.504902 | 79 | 0.582416 |
from __future__ import print_function, division
import os.path
import datetime
import argparse
import xml.etree.ElementTree as ET
parser = argparse.ArgumentParser(
prog='xml2bom',
description="Convert KiCAD EESchema XML BOMs to an expanded text format")
parser.add_argument("input", help="input filename")
parser.add_argument("output", nargs='?', default=None, help="output filename")
parser.add_argument("-x", "--quantity", type=int, help="quantity multiplier")
group = parser.add_mutually_exclusive_group()
group.add_argument("-i", "--include", nargs='+', help="parts to include")
group.add_argument("-e", "--exclude", nargs='+', help="parts to exclude")
args = parser.parse_args()
tree = ET.parse(args.input)
parts = {}
missing_order_code = []
missing_footprint = []
inconsistent_order_code = {}
quantity_multiplier = 1
if args.quantity:
quantity_multiplier = args.quantity
def ignore_part(ref):
if args.include and ref not in args.include:
return True
elif args.exclude and ref in args.exclude:
return True
return False
for comp in tree.getroot().iter('comp'):
ref = comp.get('ref')
if ignore_part(ref):
continue
val = comp.findtext('value')
foot = comp.findtext('footprint')
fields = {}
part = {"ref": ref, "value": val, "footprint": foot, "fields": fields}
for field in comp.iter('field'):
name = field.get('name')
number = field.text
fields[name] = number
if name not in parts:
parts[name] = {}
if number not in parts[name]:
parts[name][number] = []
elif (parts[name][number][0]['value'] != val or
parts[name][number][0]['footprint'] != foot):
if name not in inconsistent_order_code:
inconsistent_order_code[name] = {}
if number not in inconsistent_order_code[name]:
inconsistent_order_code[name][number] = [
parts[name][number][0]]
inconsistent_order_code[name][number].append(part)
parts[name][number].append(part)
if not fields:
missing_order_code.append(part)
if not foot:
missing_footprint.append(part)
missing_footprint_report = "\n".join(
"{:6} {:15}".format(p['ref'], p['value']) for p in missing_footprint)
missing_order_code_report = "\n".join(
"{:6} {:15} {}".format(p['ref'], p['value'], p['footprint'])
for p in missing_order_code)
inconsistent_order_code_report = "\n".join(
" {}\n".format(name) + " " + "~"*len(name) + "\n" + "\n".join(
" {}: ".format(number) + "\n" + "\n".join(
" " +
"{:6} {:15} {}".format(p['ref'], p['value'], p['footprint'])
for p in inconsistent_order_code[name][number]
) for number in inconsistent_order_code[name]
) for name in inconsistent_order_code)
def farnell_formatter(number, parts):
qty = len(parts)
footprints = " ".join(set(
str(p['footprint']).split(":")[-1] for p in parts))
values = " ".join(set(p['value'] for p in parts))
note = "{}x {} {}".format(qty, values, footprints)
return "{},{},{}\n".format(number, qty * quantity_multiplier, note[:30])
def rs_formatter(number, parts):
qty = len(parts)
refs = "".join(p['ref'] for p in parts)
footprints = "-".join(set(
str(p['footprint']).split(":")[-1] for p in parts))
values = "-".join(set(p['value'] for p in parts))
return "{},{},,{}x--{}--{}--{}\n".format(
number, qty * quantity_multiplier, qty, values, footprints, refs)
def digikey_formatter(number, parts):
qty = len(parts)
refs = " ".join(p['ref'] for p in parts)
footprints = " ".join(set(
str(p['footprint']).split(":")[-1] for p in parts))
values = " ".join(set(p['value'] for p in parts))
return "{},{},{}x {} {} {}\n".format(
qty * quantity_multiplier, number, qty, values, footprints, refs)
vendor_bom_formatters = {
"farnell": farnell_formatter,
"rs": rs_formatter,
"digikey": digikey_formatter,
}
vendor_boms = []
for name in parts:
bom_text = " {}\n".format(name) + " " + "~"*len(name) + "\n"
for number in parts[name]:
if name.lower() in vendor_bom_formatters:
bom_text += vendor_bom_formatters[name.lower()](
number, parts[name][number])
else:
qty = len(parts[name][number])
bom_text += "{},{},{}x {} {}\n".format(
number, qty * quantity_multiplier, qty,
" ".join(p['ref'] for p in parts[name][number]),
",".join(set(
str(p['footprint']).split(":")[-1]
for p in parts[name][number])))
vendor_boms.append(bom_text)
vendor_boms = "\n\n".join(vendor_boms)
assembly_bom = "\n".join("\n".join(
"{:20} {:<3} {:15} {:<15} {:<}".format(
number, len(parts[name][number]),
",".join(set(str(p['value']) for p in parts[name][number])),
",".join(set(str(p['footprint']).split(":")[-1]
for p in parts[name][number])),
" ".join(sorted(p['ref'] for p in parts[name][number])))
for number in parts[name])
for name in parts)
report = """Bill Of Materials
=================
Source file: {source}
Date: {date}
Parts Missing Footprints
------------------------
{missing_footprint}
Parts Missing Order Codes
-------------------------
{missing_code}
Inconsistent Order Codes
------------------------
{inconsistent_code}
Vendor Specific BOMs
--------------------
{vendor_boms}
Assembly BOM
------------
{assembly_bom}
""".format(
source=os.path.basename(args.input),
date=datetime.datetime.now().isoformat(),
missing_footprint=missing_footprint_report,
missing_code=missing_order_code_report,
inconsistent_code=inconsistent_order_code_report,
vendor_boms=vendor_boms,
assembly_bom=assembly_bom)
print(report)
if args.output:
filename = args.output
if filename[-4:].lower() != ".bom":
filename += ".bom"
with open(filename, 'w') as f:
f.write(report)
| true | true |
f7fbbb5cfb10efcac3dcc39b47428d937d92ae88 | 1,581 | py | Python | tests/unit/__init__.py | dailymuse/musekafka-py | 3aec3d5ae620d5760733b2d9b73e9ac135dbd875 | [
"MIT"
] | 1 | 2020-11-10T22:15:49.000Z | 2020-11-10T22:15:49.000Z | tests/unit/__init__.py | dailymuse/musekafka-py | 3aec3d5ae620d5760733b2d9b73e9ac135dbd875 | [
"MIT"
] | 2 | 2020-11-13T16:32:49.000Z | 2020-11-17T05:36:31.000Z | tests/unit/__init__.py | dailymuse/musekafka-py | 3aec3d5ae620d5760733b2d9b73e9ac135dbd875 | [
"MIT"
] | null | null | null | import time
from typing import Any, Optional
def make_side_effect(messages, delay=None):
"""Make a side effect from a list of messages, optionally adding a delay."""
msg_queue = list(reversed(messages))
sleep_delay = delay
def side_effect(*args, **kwargs):
if sleep_delay is not None:
time.sleep(sleep_delay)
return msg_queue.pop()
return side_effect
class FakeMessage:
"""Implements the Message interface.
You cannot create messages directly (C code does not expose init method),
so we need a fake. Again, I will extend to fill out the interface as needed.
"""
def __init__(
self,
topic: str,
partition: int = 0,
offset: int = 0,
error: Optional[str] = None,
value: Any = None,
) -> None:
"""Create the fake message."""
self._topic = topic
self._partition = partition
self._offset = offset
self._error = error
self._value = value
def topic(self) -> str:
"""Return the topic this message is from."""
return self._topic
def partition(self) -> int:
"""Return the partition for this message."""
return self._partition
def offset(self) -> int:
"""Return the offset of this message."""
return self._offset
def error(self) -> Optional[str]:
"""Return an error associated with the message, if any."""
return self._error
def value(self) -> Any:
"""Return the value set for the message."""
return self._value
| 26.79661 | 80 | 0.607211 | import time
from typing import Any, Optional
def make_side_effect(messages, delay=None):
msg_queue = list(reversed(messages))
sleep_delay = delay
def side_effect(*args, **kwargs):
if sleep_delay is not None:
time.sleep(sleep_delay)
return msg_queue.pop()
return side_effect
class FakeMessage:
def __init__(
self,
topic: str,
partition: int = 0,
offset: int = 0,
error: Optional[str] = None,
value: Any = None,
) -> None:
self._topic = topic
self._partition = partition
self._offset = offset
self._error = error
self._value = value
def topic(self) -> str:
return self._topic
def partition(self) -> int:
return self._partition
def offset(self) -> int:
return self._offset
def error(self) -> Optional[str]:
return self._error
def value(self) -> Any:
return self._value
| true | true |
f7fbbb900dac51f1f50deeaf9cdae673108631e6 | 2,702 | py | Python | examples/2D/flight_conditions/expected_airfoil.py | SzymonSzyszko/AeroPy | b061c690e5926fdd834b7c50837c25108e908156 | [
"MIT"
] | 1 | 2020-07-23T00:15:00.000Z | 2020-07-23T00:15:00.000Z | examples/2D/flight_conditions/expected_airfoil.py | SzymonSzyszko/AeroPy | b061c690e5926fdd834b7c50837c25108e908156 | [
"MIT"
] | null | null | null | examples/2D/flight_conditions/expected_airfoil.py | SzymonSzyszko/AeroPy | b061c690e5926fdd834b7c50837c25108e908156 | [
"MIT"
] | null | null | null | import pickle
import numpy as np
import pandas as pd
import seaborn as sns
from scipy import interpolate
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin_min
import aeropy.xfoil_module as xf
from aeropy.aero_module import Reynolds
from aeropy.geometry.airfoil import CST, create_x
import scipy.io
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import simps
from weather.scraper.flight_conditions import properties, Airframe
def expected(data, airFrame):
# data = data[data[:,0].argsort()]
alpha, velocity, lift_to_drag = data
try:
pdf = airFrame.pdf.score_samples(np.vstack([alpha.ravel(), velocity.ravel()]).T)
except:
print(alpha)
print(velocity)
BREAk
pdf = np.exp(pdf.reshape(lift_to_drag.shape))
expected_value = 0
numerator_list = []
denominator_list = []
N = len(alpha.ravel())
# V = 12*45
V = 1
total_pdf = sum(pdf)
for i in range(len(lift_to_drag)):
if lift_to_drag[i] is not None:
expected_value += (V/total_pdf)*pdf[i]*lift_to_drag[i]
return(expected_value)
C172 = pickle.load(open('C172.p', 'rb'))
data = pickle.load(open('aerodynamics_3.p', 'rb'))
airfoil_database = pickle.load(open('fitting.p', 'rb'))
# list of strings
Al_database = np.array(airfoil_database['Al'])
Au_database = np.array(airfoil_database['Au'])
dl_database = np.array(airfoil_database['dl'])
du_database = np.array(airfoil_database['du'])
airfoil = 'from_database_3'
altitude = 10000
chord = 1
[AOAs, velocities] = C172.samples.T
AOAs = AOAs[0]
velocities = velocities[0]
# data = {'Names':airfoil_database['names'], 'AOA':AOAs, 'V':velocities,
# 'L/D':[], 'Expected':[]}
for j in range(len(data['L/D'])):
if len(data['L/D'][j]) == len(AOAs.flatten()):
data_i = np.array([AOAs.flatten(), velocities.flatten(),
np.array(data['L/D'][j])])
data['Expected'].append(expected(data_i, C172))
else:
data['Expected'].append(0)
max_value = max(data['Expected'])
max_index = data['Expected'].index(max_value)
# df = pd.DataFrame(np.array([data['Names'], data['Expected']]).T,
# columns = ['Names', 'Expected'])
df = pd.DataFrame(np.array([data['Expected']]).T,
columns = ['Expected'])
df['Expected'] = df['Expected'].astype(float)
df.hist(column = 'Expected', cumulative=True, normed=True, bins=20)
plt.show()
print(max_index )
print(df.max())
print(airfoil_database['names'][max_index-3:max_index+3])
# print(data['L/D'][max_index])
C172.plot_pdf()
x, y = C172.samples.T
plt.scatter(x, y, c='k')
plt.show()
| 30.022222 | 88 | 0.668394 | import pickle
import numpy as np
import pandas as pd
import seaborn as sns
from scipy import interpolate
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin_min
import aeropy.xfoil_module as xf
from aeropy.aero_module import Reynolds
from aeropy.geometry.airfoil import CST, create_x
import scipy.io
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import simps
from weather.scraper.flight_conditions import properties, Airframe
def expected(data, airFrame):
alpha, velocity, lift_to_drag = data
try:
pdf = airFrame.pdf.score_samples(np.vstack([alpha.ravel(), velocity.ravel()]).T)
except:
print(alpha)
print(velocity)
BREAk
pdf = np.exp(pdf.reshape(lift_to_drag.shape))
expected_value = 0
numerator_list = []
denominator_list = []
N = len(alpha.ravel())
V = 1
total_pdf = sum(pdf)
for i in range(len(lift_to_drag)):
if lift_to_drag[i] is not None:
expected_value += (V/total_pdf)*pdf[i]*lift_to_drag[i]
return(expected_value)
C172 = pickle.load(open('C172.p', 'rb'))
data = pickle.load(open('aerodynamics_3.p', 'rb'))
airfoil_database = pickle.load(open('fitting.p', 'rb'))
Al_database = np.array(airfoil_database['Al'])
Au_database = np.array(airfoil_database['Au'])
dl_database = np.array(airfoil_database['dl'])
du_database = np.array(airfoil_database['du'])
airfoil = 'from_database_3'
altitude = 10000
chord = 1
[AOAs, velocities] = C172.samples.T
AOAs = AOAs[0]
velocities = velocities[0]
for j in range(len(data['L/D'])):
if len(data['L/D'][j]) == len(AOAs.flatten()):
data_i = np.array([AOAs.flatten(), velocities.flatten(),
np.array(data['L/D'][j])])
data['Expected'].append(expected(data_i, C172))
else:
data['Expected'].append(0)
max_value = max(data['Expected'])
max_index = data['Expected'].index(max_value)
df = pd.DataFrame(np.array([data['Expected']]).T,
columns = ['Expected'])
df['Expected'] = df['Expected'].astype(float)
df.hist(column = 'Expected', cumulative=True, normed=True, bins=20)
plt.show()
print(max_index )
print(df.max())
print(airfoil_database['names'][max_index-3:max_index+3])
C172.plot_pdf()
x, y = C172.samples.T
plt.scatter(x, y, c='k')
plt.show()
| true | true |
f7fbbcb5813cc1ca1e74c35f70d33ca552ff6fd7 | 585 | py | Python | src/python/playground/yaml2json.py | skitazaki/sandbox | abb807423430dd885ca12379d0f3dfafbfa56626 | [
"MIT"
] | 1 | 2015-09-05T14:02:10.000Z | 2015-09-05T14:02:10.000Z | src/python/playground/yaml2json.py | skitazaki/sandbox | abb807423430dd885ca12379d0f3dfafbfa56626 | [
"MIT"
] | null | null | null | src/python/playground/yaml2json.py | skitazaki/sandbox | abb807423430dd885ca12379d0f3dfafbfa56626 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Convert data format from YAML to JSON.
"""
import logging
import json
import click
import yaml
import yaml.scanner
from sandboxlib import main
@main.command("run")
@click.argument("file", type=click.File("r"), nargs=-1)
def yaml2json(file):
logger = logging.getLogger("")
for fh in file:
try:
cfg = yaml.safe_load(fh)
except yaml.scanner.ScannerError as e:
logger.error(f"Invalid YAML file: {e}")
continue
click.echo(json.dumps(cfg, indent=2))
if __name__ == "__main__":
main()
| 18.870968 | 55 | 0.623932 |
import logging
import json
import click
import yaml
import yaml.scanner
from sandboxlib import main
@main.command("run")
@click.argument("file", type=click.File("r"), nargs=-1)
def yaml2json(file):
logger = logging.getLogger("")
for fh in file:
try:
cfg = yaml.safe_load(fh)
except yaml.scanner.ScannerError as e:
logger.error(f"Invalid YAML file: {e}")
continue
click.echo(json.dumps(cfg, indent=2))
if __name__ == "__main__":
main()
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.