text
stringlengths 2
999k
|
|---|
import os
from pathlib import Path
import tempfile
import subprocess
import idaapi
import copy
from externals import get_external
from .brick_utils import temp_env, temp_patch, set_directory, execfile
from contextlib import contextmanager
import sqlite3
DIAPHORA_DIR = get_external('diaphora')
def _export_this_idb(export_filename: str, **diaphora_kwargs: dict):
with temp_env():
os.environ['DIAPHORA_AUTO'] = '1'
os.environ['DIAPHORA_EXPORT_FILE'] = export_filename
os.environ.update(diaphora_kwargs)
# Hook idaapi.qexit to avoid termination of the process.
with temp_patch(idaapi, 'qexit', lambda code: None):
with set_directory(DIAPHORA_DIR):
new_globals = copy.copy(globals())
new_globals['__file__'] = str(DIAPHORA_DIR / 'diaphora.py')
new_globals['__name__'] = '__main__'
execfile('diaphora.py', new_globals)
def export_this_idb(export_filename: str, use_decompiler=True, slow_heuristics=True):
diaphora_kwargs = {}
if use_decompiler:
diaphora_kwargs['DIAPHORA_USE_DECOMPILER'] = '1'
if slow_heuristics:
diaphora_kwargs['DIAPHORA_SLOW_HEURISTICS'] = '1'
_export_this_idb(export_filename, **diaphora_kwargs)
def calculate_diff(first: str, second: str, output_path: str=None) -> sqlite3.Connection:
if output_path is None:
(temp_fd, temp_name) = tempfile.mkstemp()
os.close(temp_fd)
output_path = temp_name
with set_directory(DIAPHORA_DIR):
args = ['python', 'diaphora.py', first, second, '-o', output_path]
# print('Executing {}'.format(' '.join(args)))
subprocess.check_call(args, creationflags=subprocess.CREATE_NO_WINDOW)
return sqlite3.connect(output_path)
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import frappe
from frappe import _
from frappe.desk.form.document_follow import follow_document
from frappe.desk.doctype.notification_log.notification_log import enqueue_create_notification,\
get_title, get_title_html
from frappe.utils import cint
@frappe.whitelist()
def add(doctype, name, user=None, read=1, write=0, submit=0, share=0, everyone=0, flags=None, notify=0):
"""Share the given document with a user."""
if not user:
user = frappe.session.user
if not (flags or {}).get("ignore_share_permission"):
check_share_permission(doctype, name)
share_name = get_share_name(doctype, name, user, everyone)
if share_name:
doc = frappe.get_doc("DocShare", share_name)
else:
doc = frappe.new_doc("DocShare")
doc.update({
"user": user,
"share_doctype": doctype,
"share_name": name,
"everyone": cint(everyone)
})
if flags:
doc.flags.update(flags)
doc.update({
# always add read, since you are adding!
"read": 1,
"write": cint(write),
"submit": cint(submit),
"share": cint(share)
})
doc.save(ignore_permissions=True)
notify_assignment(user, doctype, name, everyone, notify=notify)
follow_document(doctype, name, user)
return doc
def remove(doctype, name, user, flags=None):
share_name = frappe.db.get_value("DocShare", {"user": user, "share_name": name,
"share_doctype": doctype})
if share_name:
frappe.delete_doc("DocShare", share_name, flags=flags)
@frappe.whitelist()
def set_permission(doctype, name, user, permission_to, value=1, everyone=0):
"""Set share permission."""
check_share_permission(doctype, name)
share_name = get_share_name(doctype, name, user, everyone)
value = int(value)
if not share_name:
if value:
share = add(doctype, name, user, everyone=everyone, **{permission_to: 1})
else:
# no share found, nothing to remove
share = {}
pass
else:
share = frappe.get_doc("DocShare", share_name)
share.flags.ignore_permissions = True
share.set(permission_to, value)
if not value:
# un-set higher-order permissions too
if permission_to=="read":
share.read = share.write = share.submit = share.share = 0
share.save()
if not (share.read or share.write or share.submit or share.share):
share.delete()
share = {}
return share
@frappe.whitelist()
def get_users(doctype, name):
"""Get list of users with which this document is shared"""
return frappe.db.get_all("DocShare",
fields=["`name`", "`user`", "`read`", "`write`", "`submit`", "`share`", "everyone", "owner", "creation"],
filters=dict(
share_doctype=doctype,
share_name=name
))
def get_shared(doctype, user=None, rights=None):
"""Get list of shared document names for given user and DocType.
:param doctype: DocType of which shared names are queried.
:param user: User for which shared names are queried.
:param rights: List of rights for which the document is shared. List of `read`, `write`, `share`"""
if not user:
user = frappe.session.user
if not rights:
rights = ["read"]
filters = [[right, '=', 1] for right in rights]
filters += [['share_doctype', '=', doctype]]
or_filters = [['user', '=', user]]
if user != 'Guest':
or_filters += [['everyone', '=', 1]]
shared_docs = frappe.db.get_all('DocShare',
fields=['share_name'],
filters=filters,
or_filters=or_filters)
return [doc.share_name for doc in shared_docs]
def get_shared_doctypes(user=None):
"""Return list of doctypes in which documents are shared for the given user."""
if not user:
user = frappe.session.user
return frappe.db.sql_list("select distinct share_doctype from tabDocShare where (user=%s or everyone=1)", user)
def get_share_name(doctype, name, user, everyone):
if cint(everyone):
share_name = frappe.db.get_value("DocShare", {"everyone": 1, "share_name": name,
"share_doctype": doctype})
else:
share_name = frappe.db.get_value("DocShare", {"user": user, "share_name": name,
"share_doctype": doctype})
return share_name
def check_share_permission(doctype, name):
"""Check if the user can share with other users"""
if not frappe.has_permission(doctype, ptype="share", doc=name):
frappe.throw(_("No permission to {0} {1} {2}").format("share", doctype, name), frappe.PermissionError)
def notify_assignment(shared_by, doctype, doc_name, everyone, notify=0):
if not (shared_by and doctype and doc_name) or everyone or not notify:
return
from frappe.utils import get_fullname
title = get_title(doctype, doc_name)
reference_user = get_fullname(frappe.session.user)
notification_message = _('{0} shared a document {1} {2} with you').format(
frappe.bold(reference_user), frappe.bold(doctype), get_title_html(title))
notification_doc = {
'type': 'Share',
'document_type': doctype,
'subject': notification_message,
'document_name': doc_name,
'from_user': frappe.session.user
}
enqueue_create_notification(shared_by, notification_doc)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:expandtab
#
# ==================================================================
#
# Copyright (c) 2005-2014 Parallels Software International, Inc.
# Released under the terms of MIT license (see LICENSE for details)
#
# ==================================================================
#
# pylint: disable=no-self-use, maybe-no-member
""" artifactory: a python module for interfacing with JFrog Artifactory
This module is intended to serve as a logical descendant of pathlib
(https://docs.python.org/3/library/pathlib.html), a Python 3 module
for object-oriented path manipulations. As such, it implements
everything as closely as possible to the origin with few exceptions,
such as stat().
There are PureArtifactoryPath and ArtifactoryPath that can be used
to manipulate artifactory paths. See pathlib docs for details how
pure paths can be used.
"""
import collections
import errno
import fnmatch
import hashlib
import json
import logging
import os
import pathlib
import re
import sys
import urllib.parse
from itertools import islice
import dateutil.parser
import requests
from dohq_artifactory.admin import Group
from dohq_artifactory.admin import PermissionTarget
from dohq_artifactory.admin import Repository
from dohq_artifactory.admin import RepositoryLocal
from dohq_artifactory.admin import RepositoryRemote
from dohq_artifactory.admin import RepositoryVirtual
from dohq_artifactory.admin import User
from dohq_artifactory.auth import XJFrogArtApiAuth
from dohq_artifactory.exception import ArtifactoryException
try:
import requests.packages.urllib3 as urllib3
except ImportError:
import urllib3
try:
import configparser
except ImportError:
import ConfigParser as configparser
default_config_path = "~/.artifactory_python.cfg"
global_config = None
def read_config(config_path=default_config_path):
"""
Read configuration file and produce a dictionary of the following structure:
{'<instance1>': {'username': '<user>', 'password': '<pass>',
'verify': <True/False>, 'cert': '<path-to-cert>'}
'<instance2>': {...},
...}
Format of the file:
[https://artifactory-instance.local/artifactory]
username = foo
password = @dmin
verify = false
cert = ~/path-to-cert
config-path - specifies where to read the config from
"""
config_path = os.path.expanduser(config_path)
if not os.path.isfile(config_path):
raise OSError(
errno.ENOENT, "Artifactory configuration file not found: '%s'" % config_path
)
p = configparser.ConfigParser()
p.read(config_path)
result = {}
for section in p.sections():
username = (
p.get(section, "username") if p.has_option(section, "username") else None
)
password = (
p.get(section, "password") if p.has_option(section, "password") else None
)
verify = (
p.getboolean(section, "verify") if p.has_option(section, "verify") else True
)
cert = p.get(section, "cert") if p.has_option(section, "cert") else None
result[section] = {
"username": username,
"password": password,
"verify": verify,
"cert": cert,
}
# certificate path may contain '~', and we'd better expand it properly
if result[section]["cert"]:
result[section]["cert"] = os.path.expanduser(result[section]["cert"])
return result
def read_global_config(config_path=default_config_path):
"""
Attempt to read global configuration file and store the result in
'global_config' variable.
config_path - specifies where to read the config from
"""
global global_config
if global_config is None:
try:
global_config = read_config(config_path)
except OSError:
pass
def without_http_prefix(url):
"""
Returns a URL without the http:// or https:// prefixes
"""
if url.startswith("http://"):
return url[7:]
elif url.startswith("https://"):
return url[8:]
return url
def get_base_url(config, url):
"""
Look through config and try to find best matching base for 'url'
config - result of read_config() or read_global_config()
url - artifactory url to search the base for
"""
if not config:
return None
# First, try to search for the best match
for item in config:
if url.startswith(item):
return item
# Then search for indirect match
for item in config:
if without_http_prefix(url).startswith(without_http_prefix(item)):
return item
def get_config_entry(config, url):
"""
Look through config and try to find best matching entry for 'url'
config - result of read_config() or read_global_config()
url - artifactory url to search the config for
"""
if not config:
return None
# First, try to search for the best match
if url in config:
return config[url]
# Then search for indirect match
for item in config:
if without_http_prefix(item) == without_http_prefix(url):
return config[item]
return None
def get_global_config_entry(url):
"""
Look through global config and try to find best matching entry for 'url'
url - artifactory url to search the config for
"""
read_global_config()
return get_config_entry(global_config, url)
def get_global_base_url(url):
"""
Look through global config and try to find best matching base for 'url'
url - artifactory url to search the base for
"""
read_global_config()
return get_base_url(global_config, url)
def md5sum(filename):
"""
Calculates md5 hash of a file
"""
md5 = hashlib.md5()
with open(filename, "rb") as f:
for chunk in iter(lambda: f.read(128 * md5.block_size), b""):
md5.update(chunk)
return md5.hexdigest()
def sha1sum(filename):
"""
Calculates sha1 hash of a file
"""
sha1 = hashlib.sha1()
with open(filename, "rb") as f:
for chunk in iter(lambda: f.read(128 * sha1.block_size), b""):
sha1.update(chunk)
return sha1.hexdigest()
def sha256sum(filename):
"""
Calculates sha256 hash of a file
"""
sha256 = hashlib.sha256()
with open(filename, "rb") as f:
for chunk in iter(lambda: f.read(128 * sha256.block_size), b""):
sha256.update(chunk)
return sha256.hexdigest()
def chunks(data, size):
"""
Get chink for dict, copy as-is from https://stackoverflow.com/a/8290508/6753144
"""
it = iter(data)
for _ in range(0, len(data), size):
yield {k: data[k] for k in islice(it, size)}
class HTTPResponseWrapper(object):
"""
This class is intended as a workaround for 'requests' module
inability to consume HTTPResponse as a streaming upload source.
I.e. if you want to download data from one url and upload it
to another.
The problem is that underlying code uses seek() and tell() to
calculate stream length, but HTTPResponse throws a NotImplementedError,
according to python file-like object implementation guidelines, since
the stream is obviously non-rewindable.
Another problem arises when requests.put() tries to calculate stream
length with other methods. It tries several ways, including len()
and __len__(), and falls back to reading the whole stream. But
since the stream is not rewindable, by the time it tries to send
actual content, there is nothing left in the stream.
"""
def __init__(self, obj):
self.obj = obj
def __getattr__(self, attr):
"""
Redirect member requests except seek() to original object
"""
if attr in self.__dict__:
return self.__dict__[attr]
if attr == "seek":
raise AttributeError
return getattr(self.obj, attr)
def __len__(self):
"""
__len__ will be used by requests to determine stream size
"""
return int(self.getheader("content-length"))
def encode_matrix_parameters(parameters):
"""
Performs encoding of url matrix parameters from dictionary to
a string.
See http://www.w3.org/DesignIssues/MatrixURIs.html for specs.
"""
result = []
for param in iter(sorted(parameters)):
if isinstance(parameters[param], (list, tuple)):
value = (";%s=" % (param)).join(parameters[param])
else:
value = parameters[param]
result.append("=".join((param, value)))
return ";".join(result)
def escape_chars(s):
"""
Performs character escaping of comma, pipe and equals characters
"""
return "".join(["\\" + ch if ch in "=|," else ch for ch in s])
def encode_properties(parameters):
"""
Performs encoding of url parameters from dictionary to a string. It does
not escape backslash because it is not needed.
See: http://www.jfrog.com/confluence/display/RTF/Artifactory+REST+API#ArtifactoryRESTAPI-SetItemProperties
"""
result = []
for param in iter(sorted(parameters)):
if isinstance(parameters[param], (list, tuple)):
value = ",".join([escape_chars(x) for x in parameters[param]])
else:
value = escape_chars(parameters[param])
result.append("=".join((param, value)))
return ";".join(result)
class _ArtifactoryFlavour(pathlib._Flavour):
"""
Implements Artifactory-specific pure path manipulations.
I.e. what is 'drive', 'root' and 'path' and how to split full path into
components.
See 'pathlib' documentation for explanation how those are used.
drive: in context of artifactory, it's the base URI like
http://mysite/artifactory
root: repository, e.g. 'libs-snapshot-local' or 'ext-release-local'
path: relative artifact path within the repository
"""
sep = "/"
altsep = "/"
has_drv = True
pathmod = pathlib.posixpath
is_supported = True
def _get_base_url(self, url):
return get_global_base_url(url)
def compile_pattern(self, pattern):
return re.compile(fnmatch.translate(pattern), re.IGNORECASE).fullmatch
def parse_parts(self, parts):
drv, root, parsed = super(_ArtifactoryFlavour, self).parse_parts(parts)
return drv, root, parsed
def join_parsed_parts(self, drv, root, parts, drv2, root2, parts2):
drv2, root2, parts2 = super(_ArtifactoryFlavour, self).join_parsed_parts(
drv, root, parts, drv2, root2, parts2
)
# quick hack for https://github.com/devopshq/artifactory/issues/29
# drive or repository must start with / , if not - add it
if not drv2.endswith("/") and not root2.startswith("/"):
drv2 = drv2 + self.sep
return drv2, root2, parts2
def splitroot(self, part, sep=sep):
"""
Splits path string into drive, root and relative path
Uses '/artifactory/' as a splitting point in URI. Everything
before it, including '/artifactory/' itself is treated as drive.
The next folder is treated as root, and everything else is taken
for relative path.
If '/artifactory/' is not in the URI. Everything before the path
component is treated as drive. The first folder of the path is
treated as root, and everything else is taken for relative path.
"""
drv = ""
root = ""
base = self._get_base_url(part)
if base and without_http_prefix(part).startswith(without_http_prefix(base)):
mark = without_http_prefix(base).rstrip(sep) + sep
parts = part.split(mark)
elif sep not in part:
return "", "", part
else:
url = urllib3.util.parse_url(part)
if (
without_http_prefix(part).strip("/") == part.strip("/")
and url.path
and not url.path.strip("/").startswith("artifactory")
):
return "", "", part
if url.path is None or url.path == sep:
if url.scheme:
return part.rstrip(sep), "", ""
return "", "", part
elif url.path.lstrip("/").startswith("artifactory"):
mark = sep + "artifactory" + sep
parts = part.split(mark)
else:
path = self._get_path(part)
drv = part.rpartition(path)[0]
path_parts = path.strip(sep).split(sep)
root = sep + path_parts[0] + sep
rest = sep.join(path_parts[1:])
return drv, root, rest
if len(parts) >= 2:
drv = parts[0] + mark.rstrip(sep)
rest = sep + mark.join(parts[1:])
elif part.endswith(mark.rstrip(sep)):
drv = part
rest = ""
else:
rest = part
if not rest:
return drv, "", ""
if rest == sep:
return drv, "", ""
if rest.startswith(sep):
root, _, part = rest[1:].partition(sep)
root = sep + root + sep
return drv, root, part
def _get_path(self, url):
"""
Get path of a url and return without percent-encoding
http://example.com/dir/file.html
path = /dir/file.html
http://example.com/dir/inval:d-ch@rs.html
path = /dir/inval:d-ch@rs.html
!= /dir/inval%3Ad-ch%40rs.html
:param url: Full URL to parse
:return: path: /dir/file.html
"""
parsed_url = urllib3.util.parse_url(url)
path = parsed_url.path
if path in url:
# URL doesn't contain percent-encoded byptes
# http://example.com/dir/file.html
# No further processing necessary
return path
unquoted_path = urllib.parse.unquote(parsed_url.path)
if unquoted_path in url:
# URL contained /?#@: and is percent-encoded by urllib3.util.parse_url()
# http://example.com/d:r/f:le.html became http://example.com/d%3Ar/f%3Ale.html
# Decode back to http://example.com/d:r/f:le.html using urllib.parse.unquote()
return unquoted_path
# Is this ever reached?
raise ValueError("Can't parse URL {}".format(url))
def casefold(self, string):
"""
Convert path string to default FS case if it's not
case-sensitive. Do nothing otherwise.
"""
return string
def casefold_parts(self, parts):
"""
Convert path parts to default FS case if it's not
case sensitive. Do nothing otherwise.
"""
return parts
def resolve(self, path):
"""
Resolve all symlinks and relative paths in 'path'
"""
return path
def is_reserved(self, _):
"""
Returns True if the file is 'reserved', e.g. device node or socket
For Artifactory there are no reserved files.
"""
return False
def make_uri(self, path):
"""
Return path as URI. For Artifactory this is the same as returning
'path' unmodified.
"""
return path
class _ArtifactorySaaSFlavour(_ArtifactoryFlavour):
def _get_base_url(self, url):
split_url = pathlib.PurePosixPath(url)
if len(split_url.parts) < 3:
return None
return urllib.parse.urljoin(
"//".join((split_url.parts[0], split_url.parts[1])), split_url.parts[2]
)
_artifactory_flavour = _ArtifactoryFlavour()
_saas_artifactory_flavour = _ArtifactorySaaSFlavour()
ArtifactoryFileStat = collections.namedtuple(
"ArtifactoryFileStat",
[
"ctime",
"mtime",
"created_by",
"modified_by",
"mime_type",
"size",
"sha1",
"sha256",
"md5",
"is_dir",
"children",
],
)
class _ScandirIter:
"""
For compatibility with different python versions.
Pathlib:
- prior 3.8 - Use it as an iterator
- 3.8 - Use it as an context manager
"""
def __init__(self, iterator):
self.iterator = iterator
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __iter__(self):
return self.iterator
class _ArtifactoryAccessor(pathlib._Accessor):
"""
Implements operations with Artifactory REST API
"""
def rest_get(
self,
url,
params=None,
headers=None,
session=None,
verify=True,
cert=None,
timeout=None,
):
"""
Perform a GET request to url with requests.session
"""
res = session.get(
url,
params=params,
headers=headers,
verify=verify,
cert=cert,
timeout=timeout,
)
return res.text, res.status_code
def rest_put(
self,
url,
params=None,
headers=None,
session=None,
verify=True,
cert=None,
timeout=None,
):
"""
Perform a PUT request to url with requests.session
"""
res = session.put(
url,
params=params,
headers=headers,
verify=verify,
cert=cert,
timeout=timeout,
)
return res.text, res.status_code
def rest_post(
self,
url,
params=None,
headers=None,
session=None,
verify=True,
cert=None,
timeout=None,
):
"""
Perform a POST request to url with requests.session
"""
res = session.post(
url,
params=params,
headers=headers,
verify=verify,
cert=cert,
timeout=timeout,
)
return res.text, res.status_code
def rest_del(
self, url, params=None, session=None, verify=True, cert=None, timeout=None
):
"""
Perform a DELETE request to url with requests.session
"""
res = session.delete(
url, params=params, verify=verify, cert=cert, timeout=timeout
)
return res.text, res.status_code
def rest_put_stream(
self,
url,
stream,
headers=None,
session=None,
verify=True,
cert=None,
timeout=None,
):
"""
Perform a chunked PUT request to url with requests.session
This is specifically to upload files.
"""
res = session.put(
url, headers=headers, data=stream, verify=verify, cert=cert, timeout=timeout
)
return res.text, res.status_code
def rest_get_stream(self, url, session=None, verify=True, cert=None, timeout=None):
"""
Perform a chunked GET request to url with requests.session
This is specifically to download files.
"""
res = session.get(url, stream=True, verify=verify, cert=cert, timeout=timeout)
return res.raw, res.status_code
def get_stat_json(self, pathobj):
"""
Request remote file/directory status info
Returns a json object as specified by Artifactory REST API
"""
url = "/".join(
[
pathobj.drive.rstrip("/"),
"api/storage",
str(pathobj.relative_to(pathobj.drive)).strip("/"),
]
)
text, code = self.rest_get(
url,
session=pathobj.session,
verify=pathobj.verify,
cert=pathobj.cert,
timeout=pathobj.timeout,
)
if code == 404 and ("Unable to find item" in text or "Not Found" in text):
raise OSError(2, "No such file or directory: '%s'" % url)
if code != 200:
raise RuntimeError(text)
return json.loads(text)
def stat(self, pathobj):
"""
Request remote file/directory status info
Returns an object of class ArtifactoryFileStat.
The following fields are available:
ctime -- file creation time
mtime -- file modification time
created_by -- original uploader
modified_by -- last user modifying the file
mime_type -- MIME type of the file
size -- file size
sha1 -- SHA1 digest of the file
sha256 -- SHA256 digest of the file
md5 -- MD5 digest of the file
is_dir -- 'True' if path is a directory
children -- list of children names
"""
jsn = self.get_stat_json(pathobj)
is_dir = False
if "size" not in jsn:
is_dir = True
children = None
if "children" in jsn:
children = [child["uri"][1:] for child in jsn["children"]]
checksums = jsn.get("checksums", {})
stat = ArtifactoryFileStat(
ctime=dateutil.parser.parse(jsn["created"]),
mtime=dateutil.parser.parse(jsn["lastModified"]),
created_by=jsn.get("createdBy"),
modified_by=jsn.get("modifiedBy"),
mime_type=jsn.get("mimeType"),
size=int(jsn.get("size", "0")),
sha1=checksums.get("sha1"),
sha256=checksums.get("sha256"),
md5=checksums.get("md5"),
is_dir=is_dir,
children=children,
)
return stat
def is_dir(self, pathobj):
"""
Returns True if given path is a directory
"""
try:
stat = self.stat(pathobj)
return stat.is_dir
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
return False
def is_file(self, pathobj):
"""
Returns True if given path is a regular file
"""
try:
stat = self.stat(pathobj)
return not stat.is_dir
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
return False
def listdir(self, pathobj):
"""
Returns a list of immediate sub-directories and files in path
"""
stat = self.stat(pathobj)
if not stat.is_dir:
raise OSError(20, "Not a directory: %s" % str(pathobj))
return stat.children
def mkdir(self, pathobj, _):
"""
Creates remote directory
Note that this operation is not recursive
"""
if not pathobj.drive or not pathobj.root:
raise RuntimeError("Full path required: '%s'" % str(pathobj))
if pathobj.exists():
raise OSError(17, "File exists: '%s'" % str(pathobj))
url = str(pathobj) + "/"
text, code = self.rest_put(
url,
session=pathobj.session,
verify=pathobj.verify,
cert=pathobj.cert,
timeout=pathobj.timeout,
)
if code != 201:
raise RuntimeError("%s %d" % (text, code))
def rmdir(self, pathobj):
"""
Removes a directory
"""
stat = self.stat(pathobj)
if not stat.is_dir:
raise OSError(20, "Not a directory: '%s'" % str(pathobj))
url = str(pathobj) + "/"
text, code = self.rest_del(
url, session=pathobj.session, verify=pathobj.verify, cert=pathobj.cert
)
if code not in (200, 202, 204):
raise RuntimeError("Failed to delete directory: '%s'" % text)
def unlink(self, pathobj):
"""
Removes a file
"""
# TODO: Why do we forbid remove folder?
# if stat.is_dir:
# raise IsADirectoryError(1, "Operation not permitted: {!r}".format(pathobj))
url = str(pathobj)
text, code = self.rest_del(
url,
session=pathobj.session,
verify=pathobj.verify,
cert=pathobj.cert,
timeout=pathobj.timeout,
)
if code not in (200, 202, 204):
raise FileNotFoundError("Failed to delete file: {} {!r}".format(code, text))
def touch(self, pathobj):
"""
Create an empty file
"""
if not pathobj.drive or not pathobj.root:
raise RuntimeError("Full path required")
if pathobj.exists():
return
url = str(pathobj)
text, code = self.rest_put(
url,
session=pathobj.session,
verify=pathobj.verify,
cert=pathobj.cert,
timeout=pathobj.timeout,
)
if code != 201:
raise RuntimeError("%s %d" % (text, code))
def owner(self, pathobj):
"""
Returns file owner
This makes little sense for Artifactory, but to be consistent
with pathlib, we return modified_by instead, if available
"""
stat = self.stat(pathobj)
if not stat.is_dir:
return stat.modified_by
else:
return "nobody"
def creator(self, pathobj):
"""
Returns file creator
This makes little sense for Artifactory, but to be consistent
with pathlib, we return created_by instead, if available
"""
stat = self.stat(pathobj)
if not stat.is_dir:
return stat.created_by
else:
return "nobody"
def open(self, pathobj):
"""
Opens the remote file and returns a file-like object HTTPResponse
Given the nature of HTTP streaming, this object doesn't support
seek()
"""
url = str(pathobj)
raw, code = self.rest_get_stream(
url,
session=pathobj.session,
verify=pathobj.verify,
cert=pathobj.cert,
timeout=pathobj.timeout,
)
if code != 200:
raise RuntimeError(code)
return raw
def deploy(self, pathobj, fobj, md5=None, sha1=None, sha256=None, parameters=None):
"""
Uploads a given file-like object
HTTP chunked encoding will be attempted
"""
if isinstance(fobj, urllib3.response.HTTPResponse):
fobj = HTTPResponseWrapper(fobj)
url = str(pathobj)
if parameters:
url += ";%s" % encode_matrix_parameters(parameters)
headers = {}
if md5:
headers["X-Checksum-Md5"] = md5
if sha1:
headers["X-Checksum-Sha1"] = sha1
if sha256:
headers["X-Checksum-Sha256"] = sha256
text, code = self.rest_put_stream(
url,
fobj,
headers=headers,
session=pathobj.session,
verify=pathobj.verify,
cert=pathobj.cert,
timeout=pathobj.timeout,
)
if code not in (200, 201):
raise RuntimeError(text)
def copy(self, src, dst, suppress_layouts=False):
"""
Copy artifact from src to dst
"""
url = "/".join(
[
src.drive.rstrip("/"),
"api/copy",
str(src.relative_to(src.drive)).rstrip("/"),
]
)
params = {
"to": str(dst.relative_to(dst.drive)).rstrip("/"),
"suppressLayouts": int(suppress_layouts),
}
text, code = self.rest_post(
url,
params=params,
session=src.session,
verify=src.verify,
cert=src.cert,
timeout=src.timeout,
)
if code not in (200, 201):
raise RuntimeError(text)
def move(self, src, dst):
"""
Move artifact from src to dst
"""
url = "/".join(
[
src.drive.rstrip("/"),
"api/move",
str(src.relative_to(src.drive)).rstrip("/"),
]
)
params = {"to": str(dst.relative_to(dst.drive)).rstrip("/")}
text, code = self.rest_post(
url,
params=params,
session=src.session,
verify=src.verify,
cert=src.cert,
timeout=src.timeout,
)
if code not in (200, 201):
raise RuntimeError(text)
def get_properties(self, pathobj):
"""
Get artifact properties and return them as a dictionary.
"""
url = "/".join(
[
pathobj.drive.rstrip("/"),
"api/storage",
str(pathobj.relative_to(pathobj.drive)).strip("/"),
]
)
params = "properties"
text, code = self.rest_get(
url,
params=params,
session=pathobj.session,
verify=pathobj.verify,
cert=pathobj.cert,
timeout=pathobj.timeout,
)
if code == 404 and ("Unable to find item" in text or "Not Found" in text):
raise OSError(2, "No such file or directory: '%s'" % url)
if code == 404 and "No properties could be found" in text:
return {}
if code != 200:
raise RuntimeError(text)
return json.loads(text)["properties"]
def set_properties(self, pathobj, props, recursive):
"""
Set artifact properties
"""
url = "/".join(
[
pathobj.drive.rstrip("/"),
"api/storage",
str(pathobj.relative_to(pathobj.drive)).strip("/"),
]
)
params = {"properties": encode_properties(props)}
if not recursive:
params["recursive"] = "0"
text, code = self.rest_put(
url,
params=params,
session=pathobj.session,
verify=pathobj.verify,
cert=pathobj.cert,
timeout=pathobj.timeout,
)
if code == 404 and ("Unable to find item" in text or "Not Found" in text):
raise OSError(2, "No such file or directory: '%s'" % url)
if code != 204:
raise RuntimeError(text)
def del_properties(self, pathobj, props, recursive):
"""
Delete artifact properties
"""
if isinstance(props, str):
props = (props,)
url = "/".join(
[
pathobj.drive.rstrip("/"),
"api/storage",
str(pathobj.relative_to(pathobj.drive)).strip("/"),
]
)
params = {"properties": ",".join(sorted(props))}
if not recursive:
params["recursive"] = "0"
text, code = self.rest_del(
url,
params=params,
session=pathobj.session,
verify=pathobj.verify,
cert=pathobj.cert,
timeout=pathobj.timeout,
)
if code == 404 and ("Unable to find item" in text or "Not Found" in text):
raise OSError(2, "No such file or directory: '%s'" % url)
if code != 204:
raise RuntimeError(text)
def scandir(self, pathobj):
return _ScandirIter((pathobj.joinpath(x) for x in self.listdir(pathobj)))
def writeto(self, fd, out, chunk_size):
url = str(fd)
res = fd.session.get(url, stream=True, verify=True, cert=None)
if res.status_code != 200:
raise RuntimeError(res.status_code)
for chunk in res.iter_content(chunk_size=chunk_size):
if chunk:
out.write(chunk)
_artifactory_accessor = _ArtifactoryAccessor()
class ArtifactoryProAccessor(_ArtifactoryAccessor):
"""
TODO: implement OpenSource/Pro differentiation
"""
class ArtifactoryOpensourceAccessor(_ArtifactoryAccessor):
"""
TODO: implement OpenSource/Pro differentiation
"""
class PureArtifactoryPath(pathlib.PurePath):
"""
A class to work with Artifactory paths that doesn't connect
to Artifactory server. I.e. it supports only basic path
operations.
"""
_flavour = _artifactory_flavour
__slots__ = ()
class _FakePathTemplate(object):
def __init__(self, accessor):
self._accessor = accessor
class ArtifactoryPath(pathlib.Path, PureArtifactoryPath):
"""
Implements fully-featured pathlib-like Artifactory interface
Unless explicitly mentioned, all methods copy the behaviour
of their pathlib counterparts.
Note that because of peculiarities of pathlib.Path, the methods
that create new path objects, have to also manually set the 'auth'
field, since the copying strategy of pathlib.Path is not based
on regular constructors, but rather on templates.
"""
# Pathlib limits what members can be present in 'Path' class,
# so authentication information has to be added via __slots__
__slots__ = ("auth", "verify", "cert", "session", "timeout")
def __new__(cls, *args, **kwargs):
"""
pathlib.Path overrides __new__ in order to create objects
of different classes based on platform. This magic prevents
us from adding an 'auth' argument to the constructor.
So we have to first construct ArtifactoryPath by Pathlib and
only then add auth information.
"""
obj = pathlib.Path.__new__(cls, *args, **kwargs)
cfg_entry = get_global_config_entry(obj.drive)
# Auth section
apikey = kwargs.get("apikey")
auth_type = kwargs.get("auth_type")
if apikey is None:
auth = kwargs.get("auth")
obj.auth = auth if auth_type is None else auth_type(*auth)
else:
logging.debug("Use XJFrogApiAuth")
obj.auth = XJFrogArtApiAuth(apikey)
if obj.auth is None and cfg_entry:
auth = (cfg_entry["username"], cfg_entry["password"])
obj.auth = auth if auth_type is None else auth_type(*auth)
obj.cert = kwargs.get("cert")
obj.session = kwargs.get("session")
obj.timeout = kwargs.get("timeout")
if obj.cert is None and cfg_entry:
obj.cert = cfg_entry["cert"]
if "verify" in kwargs:
obj.verify = kwargs.get("verify")
elif cfg_entry:
obj.verify = cfg_entry["verify"]
else:
obj.verify = True
if obj.session is None:
obj.session = requests.Session()
obj.session.auth = obj.auth
obj.session.cert = obj.cert
obj.session.verify = obj.verify
obj.session.timeout = obj.timeout
return obj
def _init(self, *args, **kwargs):
if "template" not in kwargs:
kwargs["template"] = _FakePathTemplate(_artifactory_accessor)
super(ArtifactoryPath, self)._init(*args, **kwargs)
@property
def top(self):
obj = ArtifactoryPath(self.drive)
obj.auth = self.auth
obj.verify = self.verify
obj.cert = self.cert
obj.session = self.session
obj.timeout = self.timeout
return obj
@property
def parent(self):
"""
The logical parent of the path.
"""
obj = super(ArtifactoryPath, self).parent
obj.auth = self.auth
obj.verify = self.verify
obj.cert = self.cert
obj.session = self.session
obj.timeout = self.timeout
return obj
def with_name(self, name):
"""
Return a new path with the file name changed.
"""
obj = super(ArtifactoryPath, self).with_name(name)
obj.auth = self.auth
obj.verify = self.verify
obj.cert = self.cert
obj.session = self.session
obj.timeout = self.timeout
return obj
def with_suffix(self, suffix):
"""
Return a new path with the file suffix changed (or added, if none).
"""
obj = super(ArtifactoryPath, self).with_suffix(suffix)
obj.auth = self.auth
obj.verify = self.verify
obj.cert = self.cert
obj.session = self.session
obj.timeout = self.timeout
return obj
def relative_to(self, *other):
"""
Return the relative path to another path identified by the passed
arguments. If the operation is not possible (because this is not
a subpath of the other path), raise ValueError.
"""
obj = super(ArtifactoryPath, self).relative_to(*other)
obj.auth = self.auth
obj.verify = self.verify
obj.cert = self.cert
obj.session = self.session
obj.timeout = self.timeout
return obj
def joinpath(self, *args):
"""
Combine this path with one or several arguments, and return a
new path representing either a subpath (if all arguments are relative
paths) or a totally different path (if one of the arguments is
anchored).
"""
obj = super(ArtifactoryPath, self).joinpath(*args)
obj.auth = self.auth
obj.verify = self.verify
obj.cert = self.cert
obj.session = self.session
obj.timeout = self.timeout
return obj
def __truediv__(self, key):
"""
Join two paths with '/'
"""
obj = super(ArtifactoryPath, self).__truediv__(key)
obj.auth = self.auth
obj.verify = self.verify
obj.cert = self.cert
obj.session = self.session
obj.timeout = self.timeout
return obj
def __rtruediv__(self, key):
"""
Join two paths with '/'
"""
obj = super(ArtifactoryPath, self).__truediv__(key)
obj.auth = self.auth
obj.verify = self.verify
obj.cert = self.cert
obj.session = self.session
obj.timeout = self.timeout
return obj
if sys.version_info < (3,):
__div__ = __truediv__
__rdiv__ = __rtruediv__
def _make_child(self, args):
obj = super(ArtifactoryPath, self)._make_child(args)
obj.auth = self.auth
obj.verify = self.verify
obj.cert = self.cert
obj.session = self.session
obj.timeout = self.timeout
return obj
def _make_child_relpath(self, args):
obj = super(ArtifactoryPath, self)._make_child_relpath(args)
obj.auth = self.auth
obj.verify = self.verify
obj.cert = self.cert
obj.session = self.session
obj.timeout = self.timeout
return obj
def __iter__(self):
"""Iterate over the files in this directory. Does not yield any
result for the special paths '.' and '..'.
"""
for name in self._accessor.listdir(self):
if name in [".", ".."]:
# Yielding a path object for these makes little sense
continue
yield self._make_child_relpath(name)
def open(self, mode="r", buffering=-1, encoding=None, errors=None, newline=None):
"""
Open the given Artifactory URI and return a file-like object
HTTPResponse, as if it was a regular filesystem object.
The only difference is that this object doesn't support seek()
"""
if mode != "r" or buffering != -1 or encoding or errors or newline:
raise NotImplementedError(
"Only the default open() " + "arguments are supported"
)
return self._accessor.open(self)
def download_folder_archive(self, archive_type="zip", check_sum=False):
"""
Convert URL to the new link to download specified folder as archive according to REST API.
Requires Enable Folder Download to be set in artifactory.
:param: archive_type (str): one of possible archive types (supports zip/tar/tar.gz/tgz)
:param: check_sum (bool): defines of check sum is required along with download
:return: raw object for download
"""
if archive_type not in ["zip", "tar", "tar.gz", "tgz"]:
raise NotImplementedError(archive_type + " is not support by current API")
archive_url = (
self.drive
+ "/api/archive/download/"
+ self.repo
+ self.path_in_repo
+ "?archiveType="
+ archive_type
)
if check_sum:
archive_url += "&includeChecksumFiles=true"
with self.joinpath(archive_url) as archive_cls:
return self._accessor.open(archive_cls)
def owner(self):
"""
Returns file owner.
This makes little sense for Artifactory, but to be consistent
with pathlib, we return modified_by instead, if available.
"""
return self._accessor.owner(self)
def creator(self):
"""
Returns file creator.
This makes little sense for Artifactory, but to be consistent
with pathlib, we return created_by instead, if available.
"""
return self._accessor.creator(self)
def is_dir(self):
"""
Whether this path is a directory.
"""
return self._accessor.is_dir(self)
def is_file(self):
"""
Whether this path is a regular file.
"""
return self._accessor.is_file(self)
def is_symlink(self):
"""
Whether this path is a symlink.
Since Artifactory doen't have special files, returns False.
"""
return False
def is_socket(self):
"""
Whether this path is a socket.
Since Artifactory doen't have special files, returns False.
"""
return False
def is_fifo(self):
"""
Whether this path is a fifo.
Since Artifactory doen't have special files, returns False.
"""
return False
def is_block_device(self):
"""
Whether this path is a block device.
Since Artifactory doen't have special files, returns False.
"""
return False
def is_char_device(self):
"""
Whether this path is a character device.
Since Artifactory doen't have special files, returns False.
"""
return False
def touch(self, mode=0o666, exist_ok=True):
"""
Create a file if it doesn't exist.
Mode is ignored by Artifactory.
"""
if self.exists() and not exist_ok:
raise OSError(17, "File exists", str(self))
self._accessor.touch(self)
def chmod(self, mode):
"""
Throw NotImplementedError
Changing access rights makes no sense for Artifactory.
"""
raise NotImplementedError()
def lchmod(self, mode):
"""
Throw NotImplementedError
Changing access rights makes no sense for Artifactory.
"""
raise NotImplementedError()
def symlink_to(self, target, target_is_directory=False):
"""
Throw NotImplementedError
Artifactory doesn't have symlinks
"""
raise NotImplementedError()
def deploy(self, fobj, md5=None, sha1=None, sha256=None, parameters={}):
"""
Upload the given file object to this path
"""
return self._accessor.deploy(
self, fobj, md5=md5, sha1=sha1, sha256=sha256, parameters=parameters
)
def deploy_file(
self, file_name, calc_md5=True, calc_sha1=True, calc_sha256=True, parameters={}
):
"""
Upload the given file to this path
"""
md5 = md5sum(file_name) if calc_md5 else None
sha1 = sha1sum(file_name) if calc_sha1 else None
sha256 = sha256sum(file_name) if calc_sha256 else None
target = self
if self.is_dir():
target = self / pathlib.Path(file_name).name
with open(file_name, "rb") as fobj:
target.deploy(
fobj, md5=md5, sha1=sha1, sha256=sha256, parameters=parameters
)
def deploy_deb(
self, file_name, distribution, component, architecture, parameters={}
):
"""
Convenience method to deploy .deb packages
Keyword arguments:
file_name -- full path to local file that will be deployed
distribution -- debian distribution (e.g. 'wheezy')
component -- repository component (e.g. 'main')
architecture -- package architecture (e.g. 'i386')
parameters -- attach any additional metadata
"""
params = {
"deb.distribution": distribution,
"deb.component": component,
"deb.architecture": architecture,
}
params.update(parameters)
self.deploy_file(file_name, parameters=params)
def copy(self, dst, suppress_layouts=False):
"""
Copy artifact from this path to destinaiton.
If files are on the same instance of artifactory, lightweight (local)
copying will be attempted.
The suppress_layouts parameter, when set to True, will allow artifacts
from one path to be copied directly into another path without enforcing
repository layouts. The default behaviour is to copy to the repository
root, but remap the [org], [module], [baseVer], etc. structure to the
target repository.
For example, if we have a builds repository using the default maven2
repository where we publish our builds. We also have a published
repository where a directory for production and a directory for
staging environments should hold the current promoted builds. How do
we copy the contents of a build over to the production folder?
>>> from artifactory import ArtifactoryPath
>>> source = ArtifactoryPath("http://example.com/artifactory/builds/product/product/1.0.0/")
>>> dest = ArtifactoryPath("http://example.com/artifactory/published/production/")
Using copy with the default, suppress_layouts=False, the artifacts inside
builds/product/product/1.0.0/ will not end up in the published/production
path as we intended, but rather the entire structure product/product/1.0.0
is placed in the destination repo.
>>> source.copy(dest)
>>> for p in dest: print p
http://example.com/artifactory/published/production/foo-0.0.1.gz
http://example.com/artifactory/published/production/foo-0.0.1.pom
>>> for p in ArtifactoryPath("http://example.com/artifactory/published/product/product/1.0.0.tar"):
... print p
http://example.com/artifactory/published/product/product/1.0.0/product-1.0.0.tar.gz
http://example.com/artifactory/published/product/product/1.0.0/product-1.0.0.tar.pom
Using copy with suppress_layouts=True, the contents inside our source are copied
directly inside our dest as we intended.
>>> source.copy(dest, suppress_layouts=True)
>>> for p in dest: print p
http://example.com/artifactory/published/production/foo-0.0.1.gz
http://example.com/artifactory/published/production/foo-0.0.1.pom
http://example.com/artifactory/published/production/product-1.0.0.tar.gz
http://example.com/artifactory/published/production/product-1.0.0.tar.pom
"""
if self.drive.rstrip("/") == dst.drive.rstrip("/"):
self._accessor.copy(self, dst, suppress_layouts=suppress_layouts)
else:
with self.open() as fobj:
dst.deploy(fobj)
def move(self, dst):
"""
Move artifact from this path to destinaiton.
"""
if self.drive.rstrip("/") != dst.drive.rstrip("/"):
raise NotImplementedError("Moving between instances is not implemented yet")
self._accessor.move(self, dst)
@property
def properties(self):
"""
Fetch artifact properties
"""
return self._accessor.get_properties(self)
@properties.setter
def properties(self, properties):
properties_to_remove = set(self.properties) - set(properties)
if properties_to_remove:
self.del_properties(properties_to_remove, recursive=False)
self.set_properties(properties, recursive=False)
@properties.deleter
def properties(self):
self.del_properties(self.properties, recursive=False)
def set_properties(self, properties, recursive=True):
"""
Adds new or modifies existing properties listed in properties
properties - is a dict which contains the property names and values to set.
Property values can be a list or tuple to set multiple values
for a key.
recursive - on folders property attachment is recursive by default. It is
possible to force recursive behavior.
"""
if not properties:
return
# If URL > 13KB, nginx default raise error '414 Request-URI Too Large'
MAX_SIZE = 50
if len(properties) > MAX_SIZE:
for chunk in chunks(properties, MAX_SIZE):
self._accessor.set_properties(self, chunk, recursive)
else:
self._accessor.set_properties(self, properties, recursive)
def del_properties(self, properties, recursive=None):
"""
Delete properties listed in properties
properties - iterable contains the property names to delete. If it is an
str it will be casted to tuple.
recursive - on folders property attachment is recursive by default. It is
possible to force recursive behavior.
"""
return self._accessor.del_properties(self, properties, recursive)
def aql(self, *args):
"""
Send AQL query to Artifactory
:param args:
:return:
"""
aql_query_url = "{}/api/search/aql".format(self.drive.rstrip("/"))
aql_query_text = self.create_aql_text(*args)
r = self.session.post(aql_query_url, data=aql_query_text)
r.raise_for_status()
content = r.json()
return content["results"]
@staticmethod
def create_aql_text(*args):
"""
Create AQL querty from string or list or dict arguments
"""
aql_query_text = ""
for arg in args:
if isinstance(arg, dict):
arg = "({})".format(json.dumps(arg))
elif isinstance(arg, list):
arg = "({})".format(json.dumps(arg)).replace("[", "").replace("]", "")
aql_query_text += arg
return aql_query_text
def from_aql(self, result):
"""
Convert raw AQL result to pathlib object
:param result: ONE raw result
:return:
"""
result_type = result.get("type")
if result_type not in ("file", "folder"):
raise RuntimeError(
"Path object with type '{}' doesn't support. File or folder only".format(
result_type
)
)
result_path = "{}/{repo}/{path}/{name}".format(self.drive.rstrip("/"), **result)
obj = ArtifactoryPath(
result_path,
auth=self.auth,
verify=self.verify,
cert=self.cert,
session=self.session,
timeout=self.timeout,
)
return obj
@property
def repo(self):
return self._root.replace("/", "")
@property
def path_in_repo(self):
parts = self.parts
path_in_repo = "/" + "/".join(parts[1:])
return path_in_repo
def find_user(self, name):
obj = User(self, name, email="", password=None)
if obj.read():
return obj
return None
def find_group(self, name):
obj = Group(self, name)
if obj.read():
return obj
return None
def find_repository_local(self, name):
obj = RepositoryLocal(self, name)
if obj.read():
return obj
return None
def find_repository_virtual(self, name):
obj = RepositoryVirtual(self, name)
if obj.read():
return obj
return None
def find_repository_remote(self, name):
obj = RepositoryRemote(self, name)
if obj.read():
return obj
return None
def find_repository(self, name):
try:
return self.find_repository_local(name)
except ArtifactoryException:
pass
try:
return self.find_repository_remote(name)
except ArtifactoryException:
pass
try:
return self.find_repository_virtual(name)
except ArtifactoryException:
pass
return None
def find_permission_target(self, name):
obj = PermissionTarget(self, name)
if obj.read():
return obj
return None
def writeto(self, out, chunk_size=None):
"""
Writes artifact to file descriptor in chunks
:param out: File Descriptor
:param chunk_size: Chunk size, default 256
"""
if not chunk_size:
chunk_size = 256
self._accessor.writeto(self, out, chunk_size=chunk_size)
def _get_all(self, lazy: bool, url=None, key="name", cls=None):
"""
Create a list of objects from the given endpoint
:param url: A URL where to find objects
:param lazy: `True` if we don't need anything except object's name
:param key: Primary key for objects
:param cls: Create objects of this class
"return: A list of found objects
"""
request_url = self.drive + url
r = self.session.get(request_url, auth=self.auth)
r.raise_for_status()
response = r.json()
results = []
for i in response:
if cls is Repository:
item = Repository.create_by_type(i["type"], self, i[key])
else:
item = cls(self, i[key])
if not lazy:
item.read()
results.append(item)
return results
def get_users(self, lazy=False):
"""
Get all users
:param lazy: `True` if we don't need anything except object's name
"""
return self._get_all(url="/api/security/users", key="name", cls=User, lazy=lazy)
def get_groups(self, lazy=False):
"""
Get all groups
:param lazy: `True` if we don't need anything except object's name
"""
return self._get_all(
url="/api/security/groups", key="name", cls=Group, lazy=lazy
)
def get_repositories(self, lazy=False):
"""
Get all repositories
:param lazy: `True` if we don't need anything except object's name
"""
return self._get_all(
url="/api/repositories", key="key", cls=Repository, lazy=lazy
)
def get_permissions(self, lazy=False):
"""
Get all permissions
:param lazy: `True` if we don't need anything except object's name
"""
return self._get_all(
url="/api/security/permissions", key="name", cls=PermissionTarget, lazy=lazy
)
class ArtifactorySaaSPath(ArtifactoryPath):
"""Class for SaaS Artifactory"""
_flavour = _saas_artifactory_flavour
def chmod(self, mode):
"""
Throw NotImplementedError
Changing access rights makes no sense for Artifactory.
"""
raise NotImplementedError()
def lchmod(self, mode):
"""
Throw NotImplementedError
Changing access rights makes no sense for Artifactory.
"""
raise NotImplementedError()
def symlink_to(self, target, target_is_directory=False):
"""
Throw NotImplementedError
Artifactory doesn't have symlinks
"""
raise NotImplementedError()
def walk(pathobj, topdown=True):
"""
os.walk like function to traverse the URI like a file system.
The only difference is that this function takes and returns Path objects
in places where original implementation will return strings
"""
dirs = []
nondirs = []
for child in pathobj:
relpath = str(child.relative_to(str(pathobj)))
if relpath.startswith("/"):
relpath = relpath[1:]
if relpath.endswith("/"):
relpath = relpath[:-1]
if child.is_dir():
dirs.append(relpath)
else:
nondirs.append(relpath)
if topdown:
yield pathobj, dirs, nondirs
for dir in dirs:
for result in walk(pathobj / dir):
yield result
if not topdown:
yield pathobj, dirs, nondirs
|
"""Avoid passing data down long trees as props by using a Context API."""
from viewdom import Context
from viewdom import html
from viewdom import render
from viewdom import use_context
def Todo(label):
"""Render a to do."""
prefix = use_context("prefix")
return html("<li>{prefix}{label}</li>")
def TodoList(these_todos):
"""Render a to do list."""
return html("<ul>{[Todo(label) for label in these_todos]}</ul>")
def main() -> str:
"""Render a template to a string."""
assert Context
title = "My Todos"
todos = ["first"]
result = render(
html(
"""
<{Context} prefix="Item: ">
<h1>{title}</h1>
<{TodoList} these_todos={todos} />
<//>
"""
)
)
return result
|
import unittest
from recipe8 import *
class RomanNumeralTest(unittest.TestCase):
def setUp(self):
self.cvt = RomanNumeralConverter()
def test_convert_to_decimal(self):
self.assertEquals(0, self.cvt.convert_to_decimal(""))
self.assertEquals(1, self.cvt.convert_to_decimal("I"))
self.assertEquals(2010, \
self.cvt.convert_to_decimal("MMX"))
self.assertEquals(4000, \
self.cvt.convert_to_decimal("MMMM"))
def test_convert_to_roman(self):
self.assertEquals("", self.cvt.convert_to_roman(0))
self.assertEquals("II", self.cvt.convert_to_roman(2))
self.assertEquals("V", self.cvt.convert_to_roman(5))
self.assertEquals("XII", \
self.cvt.convert_to_roman(12))
self.assertEquals("MMX", \
self.cvt.convert_to_roman(2010))
self.assertEquals("MMMM", \
self.cvt.convert_to_roman(4000))
if __name__ == "__main__":
unittest.main()
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import os
import shutil
import unittest
from datetime import timedelta
from tempfile import NamedTemporaryFile, mkdtemp
import mock
import psutil
import pytest
import six
from freezegun import freeze_time
from mock import MagicMock, patch
from parameterized import parameterized
import airflow.example_dags
import airflow.smart_sensor_dags
from airflow import settings
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.executors.base_executor import BaseExecutor
from airflow.jobs.backfill_job import BackfillJob
from airflow.jobs.scheduler_job import DagFileProcessor, SchedulerJob
from airflow.models import DAG, DagBag, DagModel, Pool, SlaMiss, TaskInstance, errors
from airflow.models.dagrun import DagRun
from airflow.models.taskinstance import SimpleTaskInstance, TaskInstanceKey
from airflow.operators.bash import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.serialization.serialized_objects import SerializedDAG
from airflow.utils import timezone
from airflow.utils.dag_processing import FailureCallbackRequest, SimpleDagBag
from airflow.utils.dates import days_ago
from airflow.utils.file import list_py_file_paths
from airflow.utils.session import create_session, provide_session
from airflow.utils.state import State
from airflow.utils.types import DagRunType
from tests.test_utils.asserts import assert_queries_count
from tests.test_utils.config import conf_vars, env_vars
from tests.test_utils.db import (
clear_db_dags, clear_db_errors, clear_db_jobs, clear_db_pools, clear_db_runs, clear_db_sla_miss,
set_default_pool_slots,
)
from tests.test_utils.mock_executor import MockExecutor
ROOT_FOLDER = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir)
)
PERF_DAGS_FOLDER = os.path.join(ROOT_FOLDER, "tests", "utils", "perf", "dags")
ELASTIC_DAG_FILE = os.path.join(PERF_DAGS_FOLDER, "elastic_dag.py")
TEST_DAG_FOLDER = os.environ['AIRFLOW__CORE__DAGS_FOLDER']
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
TRY_NUMBER = 1
# Include the words "airflow" and "dag" in the file contents,
# tricking airflow into thinking these
# files contain a DAG (otherwise Airflow will skip them)
PARSEABLE_DAG_FILE_CONTENTS = '"airflow DAG"'
UNPARSEABLE_DAG_FILE_CONTENTS = 'airflow DAG'
# Filename to be used for dags that are created in an ad-hoc manner and can be removed/
# created at runtime
TEMP_DAG_FILENAME = "temp_dag.py"
@pytest.fixture(scope="class")
def disable_load_example():
with conf_vars({('core', 'load_examples'): 'false'}):
with env_vars({('core', 'load_examples'): 'false'}):
yield
@pytest.mark.usefixtures("disable_load_example")
class TestDagFileProcessor(unittest.TestCase):
@staticmethod
def clean_db():
clear_db_runs()
clear_db_pools()
clear_db_dags()
clear_db_sla_miss()
clear_db_errors()
clear_db_jobs()
def setUp(self):
self.clean_db()
# Speed up some tests by not running the tasks, just look at what we
# enqueue!
self.null_exec = MockExecutor()
def tearDown(self) -> None:
self.clean_db()
def create_test_dag(self, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE + timedelta(hours=1), **kwargs):
dag = DAG(
dag_id='test_scheduler_reschedule',
start_date=start_date,
# Make sure it only creates a single DAG Run
end_date=end_date)
dag.clear()
dag.is_subdag = False
with create_session() as session:
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
return dag
@classmethod
@patch("airflow.models.dagbag.settings.STORE_SERIALIZED_DAGS", True)
def setUpClass(cls):
# Ensure the DAGs we are looking at from the DB are up-to-date
non_serialized_dagbag = DagBag(read_dags_from_db=False, include_examples=False)
non_serialized_dagbag.sync_to_db()
cls.dagbag = DagBag(read_dags_from_db=True)
def test_dag_file_processor_sla_miss_callback(self):
"""
Test that the dag file processor calls the sla miss callback
"""
session = settings.Session()
sla_callback = MagicMock()
# Create dag with a start of 1 day ago, but an sla of 0
# so we'll already have an sla_miss on the books.
test_start_date = days_ago(1)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date,
'sla': datetime.timedelta()})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow')
session.merge(TaskInstance(task=task, execution_date=test_start_date, state='success'))
session.merge(SlaMiss(task_id='dummy', dag_id='test_sla_miss', execution_date=test_start_date))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag_file_processor.manage_slas(dag=dag, session=session)
assert sla_callback.called
def test_dag_file_processor_sla_miss_callback_invalid_sla(self):
"""
Test that the dag file processor does not call the sla miss callback when
given an invalid sla
"""
session = settings.Session()
sla_callback = MagicMock()
# Create dag with a start of 1 day ago, but an sla of 0
# so we'll already have an sla_miss on the books.
# Pass anything besides a timedelta object to the sla argument.
test_start_date = days_ago(1)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date,
'sla': None})
task = DummyOperator(task_id='dummy', dag=dag, owner='airflow')
session.merge(TaskInstance(task=task, execution_date=test_start_date, state='success'))
session.merge(SlaMiss(task_id='dummy', dag_id='test_sla_miss', execution_date=test_start_date))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag_file_processor.manage_slas(dag=dag, session=session)
sla_callback.assert_not_called()
def test_scheduler_executor_overflow(self):
"""
Test that tasks that are set back to scheduled and removed from the executor
queue in the case of an overflow.
"""
executor = MockExecutor(do_update=True, parallelism=3)
with create_session() as session:
dagbag = DagBag(dag_folder=os.path.join(settings.DAGS_FOLDER, "no_dags.py"),
include_examples=False,
include_smart_sensor=False)
dag = self.create_test_dag()
dag.clear()
dagbag.bag_dag(dag=dag, root_dag=dag)
dag = self.create_test_dag()
dag.clear()
task = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
tis = []
for i in range(1, 10):
ti = TaskInstance(task, DEFAULT_DATE + timedelta(days=i))
ti.state = State.SCHEDULED
tis.append(ti)
session.merge(ti)
# scheduler._process_dags(simple_dag_bag)
@mock.patch('airflow.jobs.scheduler_job.DagBag', return_value=dagbag)
@mock.patch('airflow.jobs.scheduler_job.SchedulerJob._change_state_for_tis_without_dagrun')
def do_schedule(mock_dagbag, mock_change_state):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
with conf_vars({('core', 'mp_start_method'): 'fork'}):
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule() # pylint: disable=no-value-for-parameter
for ti in tis:
ti.refresh_from_db()
self.assertEqual(len(executor.queued_tasks), 0)
successful_tasks = [ti for ti in tis if ti.state == State.SUCCESS]
scheduled_tasks = [ti for ti in tis if ti.state == State.SCHEDULED]
self.assertEqual(3, len(successful_tasks))
self.assertEqual(6, len(scheduled_tasks))
def test_dag_file_processor_sla_miss_callback_sent_notification(self):
"""
Test that the dag file processor does not call the sla_miss_callback when a
notification has already been sent
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
sla_callback = MagicMock()
# Create dag with a start of 2 days ago, but an sla of 1 day
# ago so we'll already have an sla_miss on the books
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy', dag=dag, owner='airflow')
# Create a TaskInstance for two days ago
session.merge(TaskInstance(task=task, execution_date=test_start_date, state='success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date,
email_sent=False,
notification_sent=True))
# Now call manage_slas and see if the sla_miss callback gets called
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag_file_processor.manage_slas(dag=dag, session=session)
sla_callback.assert_not_called()
def test_dag_file_processor_sla_miss_callback_exception(self):
"""
Test that the dag file processor gracefully logs an exception if there is a problem
calling the sla_miss_callback
"""
session = settings.Session()
sla_callback = MagicMock(side_effect=RuntimeError('Could not call function'))
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow',
sla=datetime.timedelta(hours=1))
session.merge(TaskInstance(task=task, execution_date=test_start_date, state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
# Now call manage_slas and see if the sla_miss callback gets called
mock_log = mock.MagicMock()
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock_log)
dag_file_processor.manage_slas(dag=dag, session=session)
assert sla_callback.called
mock_log.exception.assert_called_once_with(
'Could not call sla_miss_callback for DAG %s',
'test_sla_miss')
@mock.patch('airflow.jobs.scheduler_job.send_email')
def test_dag_file_processor_only_collect_emails_from_sla_missed_tasks(self, mock_send_email):
session = settings.Session()
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
email1 = 'test1@test.com'
task = DummyOperator(task_id='sla_missed',
dag=dag,
owner='airflow',
email=email1,
sla=datetime.timedelta(hours=1))
session.merge(TaskInstance(task=task, execution_date=test_start_date, state='Success'))
email2 = 'test2@test.com'
DummyOperator(task_id='sla_not_missed',
dag=dag,
owner='airflow',
email=email2)
session.merge(SlaMiss(task_id='sla_missed', dag_id='test_sla_miss', execution_date=test_start_date))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag_file_processor.manage_slas(dag=dag, session=session)
self.assertTrue(len(mock_send_email.call_args_list), 1)
send_email_to = mock_send_email.call_args_list[0][0][0]
self.assertIn(email1, send_email_to)
self.assertNotIn(email2, send_email_to)
@mock.patch('airflow.jobs.scheduler_job.Stats.incr')
@mock.patch("airflow.utils.email.send_email")
def test_dag_file_processor_sla_miss_email_exception(self, mock_send_email, mock_stats_incr):
"""
Test that the dag file processor gracefully logs an exception if there is a problem
sending an email
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
mock_send_email.side_effect = RuntimeError('Could not send an email')
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow',
email='test@test.com',
sla=datetime.timedelta(hours=1))
session.merge(TaskInstance(task=task, execution_date=test_start_date, state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(SlaMiss(task_id='dummy', dag_id='test_sla_miss', execution_date=test_start_date))
mock_log = mock.MagicMock()
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock_log)
dag_file_processor.manage_slas(dag=dag, session=session)
mock_log.exception.assert_called_once_with(
'Could not send SLA Miss email notification for DAG %s',
'test_sla_miss')
mock_stats_incr.assert_called_once_with('sla_email_notification_failure')
def test_dag_file_processor_sla_miss_deleted_task(self):
"""
Test that the dag file processor will not crash when trying to send
sla miss notification for a deleted task
"""
session = settings.Session()
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow',
email='test@test.com',
sla=datetime.timedelta(hours=1))
session.merge(TaskInstance(task=task, execution_date=test_start_date, state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(SlaMiss(task_id='dummy_deleted', dag_id='test_sla_miss',
execution_date=test_start_date))
mock_log = mock.MagicMock()
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock_log)
dag_file_processor.manage_slas(dag=dag, session=session)
def test_dag_file_processor_dagrun_once(self):
"""
Test if the dag file proccessor does not create multiple dagruns
if a dag is scheduled with @once and a start_date
"""
dag = DAG(
'test_scheduler_dagrun_once',
start_date=timezone.datetime(2015, 1, 1),
schedule_interval="@once")
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag.clear()
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNone(dr)
@freeze_time(timezone.datetime(2020, 1, 5))
def test_dag_file_processor_dagrun_with_timedelta_schedule_and_catchup_false(self):
"""
Test that the dag file processor does not create multiple dagruns
if a dag is scheduled with 'timedelta' and catchup=False
"""
dag = DAG(
'test_scheduler_dagrun_once_with_timedelta_and_catchup_false',
start_date=timezone.datetime(2015, 1, 1),
schedule_interval=timedelta(days=1),
catchup=False)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag.clear()
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, timezone.datetime(2020, 1, 4))
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNone(dr)
@freeze_time(timezone.datetime(2020, 5, 4))
def test_dag_file_processor_dagrun_with_timedelta_schedule_and_catchup_true(self):
"""
Test that the dag file processor creates multiple dagruns
if a dag is scheduled with 'timedelta' and catchup=True
"""
dag = DAG(
'test_scheduler_dagrun_once_with_timedelta_and_catchup_true',
start_date=timezone.datetime(2020, 5, 1),
schedule_interval=timedelta(days=1),
catchup=True)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag.clear()
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, timezone.datetime(2020, 5, 1))
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, timezone.datetime(2020, 5, 2))
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, timezone.datetime(2020, 5, 3))
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNone(dr)
@parameterized.expand([
[State.NONE, None, None],
[State.UP_FOR_RETRY, timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15)],
[State.UP_FOR_RESCHEDULE, timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15)],
])
def test_dag_file_processor_process_task_instances(self, state, start_date, end_date):
"""
Test if _process_task_instances puts the right task instances into the
mock_list.
"""
dag = DAG(
dag_id='test_scheduler_process_execute_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
with create_session() as session:
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag.clear()
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
with create_session() as session:
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = state
ti.start_date = start_date
ti.end_date = end_date
mock_list = dag_file_processor._process_task_instances(dag, dag_runs=[dr])
self.assertEqual(
[(dag.dag_id, dag_task1.task_id, DEFAULT_DATE, TRY_NUMBER)],
mock_list
)
@parameterized.expand([
[State.NONE, None, None],
[State.UP_FOR_RETRY, timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15)],
[State.UP_FOR_RESCHEDULE, timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15)],
])
def test_dag_file_processor_process_task_instances_with_task_concurrency(
self, state, start_date, end_date,
):
"""
Test if _process_task_instances puts the right task instances into the
mock_list.
"""
dag = DAG(
dag_id='test_scheduler_process_execute_task_with_task_concurrency',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
task_concurrency=2,
dag=dag,
owner='airflow')
with create_session() as session:
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag.clear()
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
with create_session() as session:
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = state
ti.start_date = start_date
ti.end_date = end_date
ti_to_schedule = dag_file_processor._process_task_instances(dag, dag_runs=[dr])
assert ti_to_schedule == [
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE, TRY_NUMBER),
]
@parameterized.expand([
[State.NONE, None, None],
[State.UP_FOR_RETRY, timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15)],
[State.UP_FOR_RESCHEDULE, timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15)],
])
def test_dag_file_processor_process_task_instances_depends_on_past(self, state, start_date, end_date):
"""
Test if _process_task_instances puts the right task instances into the
mock_list.
"""
dag = DAG(
dag_id='test_scheduler_process_execute_task_depends_on_past',
start_date=DEFAULT_DATE,
default_args={
'depends_on_past': True,
},
)
dag_task1 = DummyOperator(
task_id='dummy1',
dag=dag,
owner='airflow')
dag_task2 = DummyOperator(
task_id='dummy2',
dag=dag,
owner='airflow')
with create_session() as session:
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag.clear()
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
with create_session() as session:
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = state
ti.start_date = start_date
ti.end_date = end_date
ti_to_schedule = dag_file_processor._process_task_instances(dag, dag_runs=[dr])
assert sorted(ti_to_schedule) == [
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE, TRY_NUMBER),
(dag.dag_id, dag_task2.task_id, DEFAULT_DATE, TRY_NUMBER),
]
def test_dag_file_processor_do_not_schedule_removed_task(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag.clear()
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = DagRun.find(run_id=dr.run_id)[0]
# Re-create the DAG, but remove the task
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
mock_list = dag_file_processor._process_task_instances(dag, dag_runs=[dr])
self.assertEqual([], mock_list)
def test_dag_file_processor_do_not_schedule_too_early(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_too_early',
start_date=timezone.datetime(2200, 1, 1))
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag.clear()
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNone(dr)
mock_list = dag_file_processor._process_task_instances(dag, dag_runs=[])
self.assertEqual([], mock_list)
def test_dag_file_processor_do_not_schedule_without_tasks(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_without_tasks',
start_date=DEFAULT_DATE)
with create_session() as session:
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag.clear(session=session)
dag.start_date = None
dr = dag_file_processor.create_dag_run(dag, session=session)
self.assertIsNone(dr)
def test_dag_file_processor_do_not_run_finished(self):
dag = DAG(
dag_id='test_scheduler_do_not_run_finished',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag.clear()
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = State.SUCCESS
session.commit()
session.close()
mock_list = dag_file_processor._process_task_instances(dag, dag_runs=[dr])
self.assertEqual([], mock_list)
def test_dag_file_processor_add_new_task(self):
"""
Test if a task instance will be added if the dag is updated
"""
dag = DAG(
dag_id='test_scheduler_add_new_task',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag.clear()
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 1)
DummyOperator(
task_id='dummy2',
dag=dag,
owner='airflow')
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag_file_processor._process_task_instances(dag, dag_runs=[dr])
tis = dr.get_task_instances()
self.assertEqual(len(tis), 2)
def test_dag_file_processor_verify_max_active_runs(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs has been reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag.clear()
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNone(dr)
def test_dag_file_processor_fail_dagrun_timeout(self):
"""
Test if a a dagrun wil be set failed if timeout
"""
dag = DAG(
dag_id='test_scheduler_fail_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.dagrun_timeout = datetime.timedelta(seconds=60)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag.clear()
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
dr2 = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr2)
dr.refresh_from_db(session=session)
self.assertEqual(dr.state, State.FAILED)
def test_dag_file_processor_verify_max_active_runs_and_dagrun_timeout(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs
has been reached and dagrun_timeout is not reached
Test if a a dagrun will be scheduled if max_dag_runs has
been reached but dagrun_timeout is also reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs_and_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
dag.dagrun_timeout = datetime.timedelta(seconds=60)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag.clear()
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
# Should not be scheduled as DagRun has not timedout and max_active_runs is reached
new_dr = dag_file_processor.create_dag_run(dag)
self.assertIsNone(new_dr)
# Should be scheduled as dagrun_timeout has passed
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
new_dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(new_dr)
def test_dag_file_processor_max_active_runs_respected_after_clear(self):
"""
Test if _process_task_instances only schedules ti's up to max_active_runs
(related to issue AIRFLOW-137)
"""
dag = DAG(
dag_id='test_scheduler_max_active_runs_respected_after_clear',
start_date=DEFAULT_DATE)
dag.max_active_runs = 3
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag.clear()
# First create up to 3 dagruns in RUNNING state.
dr1 = dag_file_processor.create_dag_run(dag)
assert dr1 is not None
dr2 = dag_file_processor.create_dag_run(dag)
assert dr2 is not None
dr3 = dag_file_processor.create_dag_run(dag)
assert dr3 is not None
assert len(DagRun.find(dag_id=dag.dag_id, state=State.RUNNING, session=session)) == 3
# Reduce max_active_runs to 1
dag.max_active_runs = 1
# and schedule them in, so we can check how many
# tasks are put on the task_instances_list (should be one, not 3)
task_instances_list = dag_file_processor._process_task_instances(dag, dag_runs=[dr1, dr2, dr3])
self.assertEqual([(dag.dag_id, dag_task1.task_id, DEFAULT_DATE, TRY_NUMBER)], task_instances_list)
def test_find_dags_to_run_includes_subdags(self):
dag = self.dagbag.get_dag('test_subdag_operator')
self.assertGreater(len(dag.subdags), 0)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dags = dag_file_processor._find_dags_to_process(self.dagbag.dags.values())
self.assertIn(dag, dags)
for subdag in dag.subdags:
self.assertIn(subdag, dags)
def test_dag_catchup_option(self):
"""
Test to check that a DAG with catchup = False only schedules beginning now, not back to the start date
"""
def setup_dag(dag_id, schedule_interval, start_date, catchup):
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': start_date
}
dag = DAG(dag_id,
schedule_interval=schedule_interval,
max_active_runs=1,
catchup=catchup,
default_args=default_args)
op1 = DummyOperator(task_id='t1', dag=dag)
op2 = DummyOperator(task_id='t2', dag=dag)
op2.set_upstream(op1)
op3 = DummyOperator(task_id='t3', dag=dag)
op3.set_upstream(op2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
return SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
now = timezone.utcnow()
six_hours_ago_to_the_hour = (now - datetime.timedelta(hours=6)).replace(
minute=0, second=0, microsecond=0)
half_an_hour_ago = now - datetime.timedelta(minutes=30)
two_hours_ago = now - datetime.timedelta(hours=2)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag1 = setup_dag(dag_id='dag_with_catchup',
schedule_interval='* * * * *',
start_date=six_hours_ago_to_the_hour,
catchup=True)
default_catchup = conf.getboolean('scheduler', 'catchup_by_default')
self.assertEqual(default_catchup, True)
self.assertEqual(dag1.catchup, True)
dag2 = setup_dag(dag_id='dag_without_catchup_ten_minute',
schedule_interval='*/10 * * * *',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = dag_file_processor.create_dag_run(dag2)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last half an hour, not 6 hours ago
self.assertGreater(dr.execution_date, half_an_hour_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, timezone.utcnow())
dag3 = setup_dag(dag_id='dag_without_catchup_hourly',
schedule_interval='@hourly',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = dag_file_processor.create_dag_run(dag3)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last 2 hours, not 6 hours ago
self.assertGreater(dr.execution_date, two_hours_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, timezone.utcnow())
dag4 = setup_dag(dag_id='dag_without_catchup_once',
schedule_interval='@once',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = dag_file_processor.create_dag_run(dag4)
self.assertIsNotNone(dr)
def test_dag_file_processor_auto_align(self):
"""
Test if the schedule_interval will be auto aligned with the start_date
such that if the start_date coincides with the schedule the first
execution_date will be start_date, otherwise it will be start_date +
interval.
"""
dag = DAG(
dag_id='test_scheduler_auto_align_1',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="4 5 * * *"
)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, timezone.datetime(2016, 1, 2, 5, 4))
dag = DAG(
dag_id='test_scheduler_auto_align_2',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="10 10 * * *"
)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag.clear()
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, timezone.datetime(2016, 1, 1, 10, 10))
def test_process_dags_not_create_dagrun_for_subdags(self):
dag = self.dagbag.get_dag('test_subdag_operator')
scheduler = DagFileProcessor(dag_ids=[dag.dag_id], log=mock.MagicMock())
scheduler._process_task_instances = mock.MagicMock()
scheduler.manage_slas = mock.MagicMock()
scheduler._process_dags([dag] + dag.subdags)
with create_session() as session:
sub_dagruns = (
session.query(DagRun).filter(DagRun.dag_id == dag.subdags[0].dag_id).count()
)
self.assertEqual(0, sub_dagruns)
parent_dagruns = (
session.query(DagRun).filter(DagRun.dag_id == dag.dag_id).count()
)
self.assertGreater(parent_dagruns, 0)
@patch.object(TaskInstance, 'handle_failure')
def test_execute_on_failure_callbacks(self, mock_ti_handle_failure):
dagbag = DagBag(dag_folder="/dev/null", include_examples=True)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
with create_session() as session:
session.query(TaskInstance).delete()
dag = dagbag.get_dag('example_branch_operator')
task = dag.get_task(task_id='run_this_first')
ti = TaskInstance(task, DEFAULT_DATE, State.RUNNING)
session.add(ti)
session.commit()
requests = [
FailureCallbackRequest(
full_filepath="A",
simple_task_instance=SimpleTaskInstance(ti),
msg="Message"
)
]
dag_file_processor.execute_on_failure_callbacks(dagbag, requests)
mock_ti_handle_failure.assert_called_once_with(
"Message",
conf.getboolean('core', 'unit_test_mode'),
mock.ANY
)
def test_process_file_should_failure_callback(self):
dag_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)), '../dags/test_on_failure_callback.py'
)
dagbag = DagBag(dag_folder=dag_file, include_examples=False)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
with create_session() as session, NamedTemporaryFile(delete=False) as callback_file:
session.query(TaskInstance).delete()
dag = dagbag.get_dag('test_om_failure_callback_dag')
task = dag.get_task(task_id='test_om_failure_callback_task')
ti = TaskInstance(task, DEFAULT_DATE, State.RUNNING)
session.add(ti)
session.commit()
requests = [
FailureCallbackRequest(
full_filepath=dag.full_filepath,
simple_task_instance=SimpleTaskInstance(ti),
msg="Message"
)
]
callback_file.close()
with mock.patch.dict("os.environ", {"AIRFLOW_CALLBACK_FILE": callback_file.name}):
dag_file_processor.process_file(dag_file, requests)
with open(callback_file.name) as callback_file2:
content = callback_file2.read()
self.assertEqual("Callback fired", content)
os.remove(callback_file.name)
def test_should_parse_only_unpaused_dags(self):
dag_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)), '../dags/test_multiple_dags.py'
)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dagbag = DagBag(dag_folder=dag_file, include_examples=False)
dagbag.sync_to_db()
with create_session() as session:
session.query(TaskInstance).delete()
(
session.query(DagModel)
.filter(DagModel.dag_id == "test_multiple_dags__dag_1")
.update({DagModel.is_paused: True}, synchronize_session=False)
)
serialized_dags, import_errors_count = dag_file_processor.process_file(
file_path=dag_file, failure_callback_requests=[]
)
dags = [SerializedDAG.from_dict(serialized_dag) for serialized_dag in serialized_dags]
with create_session() as session:
tis = session.query(TaskInstance).all()
self.assertEqual(0, import_errors_count)
self.assertEqual(['test_multiple_dags__dag_2'], [dag.dag_id for dag in dags])
self.assertEqual({'test_multiple_dags__dag_2'}, {ti.dag_id for ti in tis})
def test_should_mark_dummy_task_as_success(self):
dag_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)), '../dags/test_only_dummy_tasks.py'
)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
with create_session() as session:
session.query(TaskInstance).delete()
session.query(DagModel).delete()
dagbag = DagBag(dag_folder=dag_file, include_examples=False)
dagbag.sync_to_db()
serialized_dags, import_errors_count = dag_file_processor.process_file(
file_path=dag_file, failure_callback_requests=[]
)
dags = [SerializedDAG.from_dict(serialized_dag) for serialized_dag in serialized_dags]
with create_session() as session:
tis = session.query(TaskInstance).all()
self.assertEqual(0, import_errors_count)
self.assertEqual(['test_only_dummy_tasks'], [dag.dag_id for dag in dags])
self.assertEqual(5, len(tis))
self.assertEqual({
('test_task_a', 'success'),
('test_task_b', None),
('test_task_c', 'success'),
('test_task_on_execute', 'scheduled'),
('test_task_on_success', 'scheduled'),
}, {(ti.task_id, ti.state) for ti in tis})
for state, start_date, end_date, duration in [(ti.state, ti.start_date, ti.end_date, ti.duration) for
ti in tis]:
if state == 'success':
self.assertIsNotNone(start_date)
self.assertIsNotNone(end_date)
self.assertEqual(0.0, duration)
else:
self.assertIsNone(start_date)
self.assertIsNone(end_date)
self.assertIsNone(duration)
dag_file_processor.process_file(
file_path=dag_file, failure_callback_requests=[]
)
with create_session() as session:
tis = session.query(TaskInstance).all()
self.assertEqual(5, len(tis))
self.assertEqual({
('test_task_a', 'success'),
('test_task_b', 'success'),
('test_task_c', 'success'),
('test_task_on_execute', 'scheduled'),
('test_task_on_success', 'scheduled'),
}, {(ti.task_id, ti.state) for ti in tis})
for state, start_date, end_date, duration in [(ti.state, ti.start_date, ti.end_date, ti.duration) for
ti in tis]:
if state == 'success':
self.assertIsNotNone(start_date)
self.assertIsNotNone(end_date)
self.assertEqual(0.0, duration)
else:
self.assertIsNone(start_date)
self.assertIsNone(end_date)
self.assertIsNone(duration)
@pytest.mark.heisentests
class TestDagFileProcessorQueriesCount(unittest.TestCase):
"""
These tests are designed to detect changes in the number of queries for different DAG files.
Each test has saved queries count in the table/spreadsheets. If you make a change that affected the number
of queries, please update the tables.
These tests allow easy detection when a change is made that affects the performance of the
DagFileProcessor.
"""
def setUp(self) -> None:
clear_db_runs()
clear_db_pools()
clear_db_dags()
clear_db_sla_miss()
clear_db_errors()
@parameterized.expand(
[
# pylint: disable=bad-whitespace
# expected, dag_count, task_count, start_ago, schedule_interval, shape
# One DAG with one task per DAG file
([ 1, 1, 1, 1], 1, 1, "1d", "None", "no_structure"), # noqa
([ 1, 1, 1, 1], 1, 1, "1d", "None", "linear"), # noqa
([ 9, 5, 5, 5], 1, 1, "1d", "@once", "no_structure"), # noqa
([ 9, 5, 5, 5], 1, 1, "1d", "@once", "linear"), # noqa
([ 9, 12, 15, 18], 1, 1, "1d", "30m", "no_structure"), # noqa
([ 9, 12, 15, 18], 1, 1, "1d", "30m", "linear"), # noqa
([ 9, 12, 15, 18], 1, 1, "1d", "30m", "binary_tree"), # noqa
([ 9, 12, 15, 18], 1, 1, "1d", "30m", "star"), # noqa
([ 9, 12, 15, 18], 1, 1, "1d", "30m", "grid"), # noqa
# One DAG with five tasks per DAG file
([ 1, 1, 1, 1], 1, 5, "1d", "None", "no_structure"), # noqa
([ 1, 1, 1, 1], 1, 5, "1d", "None", "linear"), # noqa
([ 9, 5, 5, 5], 1, 5, "1d", "@once", "no_structure"), # noqa
([10, 6, 6, 6], 1, 5, "1d", "@once", "linear"), # noqa
([ 9, 12, 15, 18], 1, 5, "1d", "30m", "no_structure"), # noqa
([10, 14, 18, 22], 1, 5, "1d", "30m", "linear"), # noqa
([10, 14, 18, 22], 1, 5, "1d", "30m", "binary_tree"), # noqa
([10, 14, 18, 22], 1, 5, "1d", "30m", "star"), # noqa
([10, 14, 18, 22], 1, 5, "1d", "30m", "grid"), # noqa
# 10 DAGs with 10 tasks per DAG file
([ 1, 1, 1, 1], 10, 10, "1d", "None", "no_structure"), # noqa
([ 1, 1, 1, 1], 10, 10, "1d", "None", "linear"), # noqa
([81, 41, 41, 41], 10, 10, "1d", "@once", "no_structure"), # noqa
([91, 51, 51, 51], 10, 10, "1d", "@once", "linear"), # noqa
([81, 111, 111, 111], 10, 10, "1d", "30m", "no_structure"), # noqa
([91, 131, 131, 131], 10, 10, "1d", "30m", "linear"), # noqa
([91, 131, 131, 131], 10, 10, "1d", "30m", "binary_tree"), # noqa
([91, 131, 131, 131], 10, 10, "1d", "30m", "star"), # noqa
([91, 131, 131, 131], 10, 10, "1d", "30m", "grid"), # noqa
# pylint: enable=bad-whitespace
]
)
def test_process_dags_queries_count(
self, expected_query_counts, dag_count, task_count, start_ago, schedule_interval, shape
):
with mock.patch.dict("os.environ", {
"PERF_DAGS_COUNT": str(dag_count),
"PERF_TASKS_COUNT": str(task_count),
"PERF_START_AGO": start_ago,
"PERF_SCHEDULE_INTERVAL": schedule_interval,
"PERF_SHAPE": shape,
}), conf_vars({
('scheduler', 'use_job_schedule'): 'True',
}):
dagbag = DagBag(dag_folder=ELASTIC_DAG_FILE,
include_examples=False,
include_smart_sensor=False)
processor = DagFileProcessor([], mock.MagicMock())
for expected_query_count in expected_query_counts:
with assert_queries_count(expected_query_count):
processor._process_dags(dagbag.dags.values())
@parameterized.expand(
[
# pylint: disable=bad-whitespace
# expected, dag_count, task_count, start_ago, schedule_interval, shape
# One DAG with two tasks per DAG file
([ 5, 5, 5, 5], 1, 1, "1d", "None", "no_structure"), # noqa
([ 5, 5, 5, 5], 1, 1, "1d", "None", "linear"), # noqa
([15, 9, 9, 9], 1, 1, "1d", "@once", "no_structure"), # noqa
([15, 9, 9, 9], 1, 1, "1d", "@once", "linear"), # noqa
([15, 18, 21, 24], 1, 1, "1d", "30m", "no_structure"), # noqa
([15, 18, 21, 24], 1, 1, "1d", "30m", "linear"), # noqa
# One DAG with five tasks per DAG file
([ 5, 5, 5, 5], 1, 5, "1d", "None", "no_structure"), # noqa
([ 5, 5, 5, 5], 1, 5, "1d", "None", "linear"), # noqa
([15, 9, 9, 9], 1, 5, "1d", "@once", "no_structure"), # noqa
([16, 10, 10, 10], 1, 5, "1d", "@once", "linear"), # noqa
([15, 18, 21, 24], 1, 5, "1d", "30m", "no_structure"), # noqa
([16, 20, 24, 28], 1, 5, "1d", "30m", "linear"), # noqa
# 10 DAGs with 10 tasks per DAG file
([ 5, 5, 5, 5], 10, 10, "1d", "None", "no_structure"), # noqa
([ 5, 5, 5, 5], 10, 10, "1d", "None", "linear"), # noqa
([87, 45, 45, 45], 10, 10, "1d", "@once", "no_structure"), # noqa
([97, 55, 55, 55], 10, 10, "1d", "@once", "linear"), # noqa
([87, 117, 117, 117], 10, 10, "1d", "30m", "no_structure"), # noqa
([97, 137, 137, 137], 10, 10, "1d", "30m", "linear"), # noqa
# pylint: enable=bad-whitespace
]
)
def test_process_file_queries_count(
self, expected_query_counts, dag_count, task_count, start_ago, schedule_interval, shape
):
with mock.patch.dict("os.environ", {
"PERF_DAGS_COUNT": str(dag_count),
"PERF_TASKS_COUNT": str(task_count),
"PERF_START_AGO": start_ago,
"PERF_SCHEDULE_INTERVAL": schedule_interval,
"PERF_SHAPE": shape,
}), conf_vars({
('scheduler', 'use_job_schedule'): 'True'
}):
processor = DagFileProcessor([], mock.MagicMock())
for expected_query_count in expected_query_counts:
with assert_queries_count(expected_query_count):
processor.process_file(ELASTIC_DAG_FILE, [])
@pytest.mark.usefixtures("disable_load_example")
class TestSchedulerJob(unittest.TestCase):
def setUp(self):
clear_db_runs()
clear_db_pools()
clear_db_dags()
clear_db_sla_miss()
clear_db_errors()
# Speed up some tests by not running the tasks, just look at what we
# enqueue!
self.null_exec = MockExecutor()
@classmethod
@patch("airflow.models.dagbag.settings.STORE_SERIALIZED_DAGS", True)
def setUpClass(cls):
# Ensure the DAGs we are looking at from the DB are up-to-date
non_serialized_dagbag = DagBag(read_dags_from_db=False, include_examples=False)
non_serialized_dagbag.sync_to_db()
cls.dagbag = DagBag(read_dags_from_db=True)
def test_is_alive(self):
job = SchedulerJob(None, heartrate=10, state=State.RUNNING)
self.assertTrue(job.is_alive())
job.latest_heartbeat = timezone.utcnow() - datetime.timedelta(seconds=20)
self.assertTrue(job.is_alive())
job.latest_heartbeat = timezone.utcnow() - datetime.timedelta(seconds=31)
self.assertFalse(job.is_alive())
# test because .seconds was used before instead of total_seconds
# internal repr of datetime is (days, seconds)
job.latest_heartbeat = timezone.utcnow() - datetime.timedelta(days=1)
self.assertFalse(job.is_alive())
job.state = State.SUCCESS
job.latest_heartbeat = timezone.utcnow() - datetime.timedelta(seconds=10)
self.assertFalse(job.is_alive(), "Completed jobs even with recent heartbeat should not be alive")
def run_single_scheduler_loop_with_no_dags(self, dags_folder):
"""
Utility function that runs a single scheduler loop without actually
changing/scheduling any dags. This is useful to simulate the other side effects of
running a scheduler loop, e.g. to see what parse errors there are in the
dags_folder.
:param dags_folder: the directory to traverse
:type dags_folder: str
"""
scheduler = SchedulerJob(
executor=self.null_exec,
dag_id='this_dag_doesnt_exist', # We don't want to actually run anything
num_runs=1,
subdir=os.path.join(dags_folder))
scheduler.heartrate = 0
scheduler.run()
def _make_simple_dag_bag(self, dags):
return SimpleDagBag([SerializedDAG.from_dict(SerializedDAG.to_dict(dag)) for dag in dags])
def test_no_orphan_process_will_be_left(self):
empty_dir = mkdtemp()
current_process = psutil.Process()
old_children = current_process.children(recursive=True)
scheduler = SchedulerJob(subdir=empty_dir,
num_runs=1,
executor=MockExecutor(do_update=False))
scheduler.run()
shutil.rmtree(empty_dir)
# Remove potential noise created by previous tests.
current_children = set(current_process.children(recursive=True)) - set(
old_children)
self.assertFalse(current_children)
@mock.patch('airflow.jobs.scheduler_job.Stats.incr')
def test_process_executor_events(self, mock_stats_incr):
dag_id = "test_process_executor_events"
dag_id2 = "test_process_executor_events_2"
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, full_filepath="/test_path1/")
dag2 = DAG(dag_id=dag_id2, start_date=DEFAULT_DATE, full_filepath="/test_path1/")
task1 = DummyOperator(dag=dag, task_id=task_id_1)
DummyOperator(dag=dag2, task_id=task_id_1)
dag.fileloc = "/test_path1/"
dag2.fileloc = "/test_path1/"
dagbag1 = self._make_simple_dag_bag([dag])
dagbag2 = self._make_simple_dag_bag([dag2])
scheduler = SchedulerJob()
session = settings.Session()
ti1 = TaskInstance(task1, DEFAULT_DATE)
ti1.state = State.QUEUED
session.merge(ti1)
session.commit()
executor = MockExecutor(do_update=False)
executor.event_buffer[ti1.key] = State.FAILED, None
scheduler.executor = executor
scheduler.processor_agent = mock.MagicMock()
# dag bag does not contain dag_id
scheduler._process_executor_events(simple_dag_bag=dagbag2)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.QUEUED)
scheduler.processor_agent.send_callback_to_execute.assert_not_called()
# dag bag does contain dag_id
scheduler._process_executor_events(simple_dag_bag=dagbag1)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.QUEUED)
scheduler.processor_agent.send_callback_to_execute.assert_called_once_with(
full_filepath='/test_path1/',
task_instance=mock.ANY,
msg='Executor reports task instance '
'<TaskInstance: test_process_executor_events.dummy_task 2016-01-01 00:00:00+00:00 [queued]> '
'finished (failed) although the task says its queued. (Info: None) '
'Was the task killed externally?'
)
scheduler.processor_agent.reset_mock()
# ti in success state
ti1.state = State.SUCCESS
session.merge(ti1)
session.commit()
executor.event_buffer[ti1.key] = State.SUCCESS, None
scheduler._process_executor_events(simple_dag_bag=dagbag1)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.SUCCESS)
scheduler.processor_agent.send_callback_to_execute.assert_not_called()
mock_stats_incr.assert_called_once_with('scheduler.tasks.killed_externally')
def test_process_executor_events_uses_inmemory_try_number(self):
execution_date = DEFAULT_DATE
dag_id = "dag_id"
task_id = "task_id"
try_number = 42
scheduler = SchedulerJob()
executor = MagicMock()
event_buffer = {
TaskInstanceKey(dag_id, task_id, execution_date, try_number): (State.SUCCESS, None)
}
executor.get_event_buffer.return_value = event_buffer
scheduler.executor = executor
processor_agent = MagicMock()
scheduler.processor_agent = processor_agent
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task = DummyOperator(dag=dag, task_id=task_id)
with create_session() as session:
ti = TaskInstance(task, DEFAULT_DATE)
ti.state = State.SUCCESS
session.merge(ti)
scheduler._process_executor_events(simple_dag_bag=MagicMock())
# Assert that the even_buffer is empty so the task was popped using right
# task instance key
self.assertEqual(event_buffer, {})
def test_execute_task_instances_is_paused_wont_execute(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_is_paused_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dagbag = self._make_simple_dag_bag([dag])
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
session = settings.Session()
dr1 = dag_file_processor.create_dag_run(dag)
ti1 = TaskInstance(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
dr1.state = State.RUNNING
dagmodel = DagModel()
dagmodel.dag_id = dag_id
dagmodel.is_paused = True
session.merge(ti1)
session.merge(dr1)
session.add(dagmodel)
session.commit()
scheduler._execute_task_instances(dagbag)
ti1.refresh_from_db()
self.assertEqual(State.SCHEDULED, ti1.state)
def test_execute_task_instances_no_dagrun_task_will_execute(self):
"""
Tests that tasks without dagrun still get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_no_dagrun_task_will_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dagbag = self._make_simple_dag_bag([dag])
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
session = settings.Session()
dag_file_processor.create_dag_run(dag)
ti1 = TaskInstance(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
ti1.execution_date = ti1.execution_date + datetime.timedelta(days=1)
session.merge(ti1)
session.commit()
scheduler._execute_task_instances(dagbag)
ti1.refresh_from_db()
self.assertEqual(State.QUEUED, ti1.state)
def test_execute_task_instances_backfill_tasks_wont_execute(self):
"""
Tests that backfill tasks won't get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_backfill_tasks_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dagbag = self._make_simple_dag_bag([dag])
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
session = settings.Session()
dr1 = dag_file_processor.create_dag_run(dag)
dr1.run_type = DagRunType.BACKFILL_JOB.value
ti1 = TaskInstance(task1, dr1.execution_date)
ti1.refresh_from_db()
ti1.state = State.SCHEDULED
session.merge(ti1)
session.merge(dr1)
session.commit()
self.assertTrue(dr1.is_backfill)
scheduler._execute_task_instances(dagbag)
ti1.refresh_from_db()
self.assertEqual(State.SCHEDULED, ti1.state)
def test_find_executable_task_instances_backfill_nodagrun(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_backfill_nodagrun'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dagbag = self._make_simple_dag_bag([dag])
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
session = settings.Session()
dr1 = dag_file_processor.create_dag_run(dag)
dr2 = dag_file_processor.create_dag_run(dag)
dr2.run_type = DagRunType.BACKFILL_JOB.value
ti_no_dagrun = TaskInstance(task1, DEFAULT_DATE - datetime.timedelta(days=1))
ti_backfill = TaskInstance(task1, dr2.execution_date)
ti_with_dagrun = TaskInstance(task1, dr1.execution_date)
# ti_with_paused
ti_no_dagrun.state = State.SCHEDULED
ti_backfill.state = State.SCHEDULED
ti_with_dagrun.state = State.SCHEDULED
session.merge(dr2)
session.merge(ti_no_dagrun)
session.merge(ti_backfill)
session.merge(ti_with_dagrun)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
session=session)
self.assertEqual(2, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti_no_dagrun.key, res_keys)
self.assertIn(ti_with_dagrun.key, res_keys)
def test_find_executable_task_instances_pool(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_pool'
task_id_1 = 'dummy'
task_id_2 = 'dummydummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, pool='a')
task2 = DummyOperator(dag=dag, task_id=task_id_2, pool='b')
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dagbag = self._make_simple_dag_bag([dag])
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
session = settings.Session()
dr1 = dag_file_processor.create_dag_run(dag)
dr2 = dag_file_processor.create_dag_run(dag)
tis = ([
TaskInstance(task1, dr1.execution_date),
TaskInstance(task2, dr1.execution_date),
TaskInstance(task1, dr2.execution_date),
TaskInstance(task2, dr2.execution_date)
])
for ti in tis:
ti.state = State.SCHEDULED
session.merge(ti)
pool = Pool(pool='a', slots=1, description='haha')
pool2 = Pool(pool='b', slots=100, description='haha')
session.add(pool)
session.add(pool2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
session=session)
session.commit()
self.assertEqual(3, len(res))
res_keys = []
for ti in res:
res_keys.append(ti.key)
self.assertIn(tis[0].key, res_keys)
self.assertIn(tis[1].key, res_keys)
self.assertIn(tis[3].key, res_keys)
def test_find_executable_task_instances_in_default_pool(self):
set_default_pool_slots(1)
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_in_default_pool'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
op1 = DummyOperator(dag=dag, task_id='dummy1')
op2 = DummyOperator(dag=dag, task_id='dummy2')
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dagbag = self._make_simple_dag_bag([dag])
executor = MockExecutor(do_update=True)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob(executor=executor)
dr1 = dag_file_processor.create_dag_run(dag)
dr2 = dag_file_processor.create_dag_run(dag)
ti1 = TaskInstance(task=op1, execution_date=dr1.execution_date)
ti2 = TaskInstance(task=op2, execution_date=dr2.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session = settings.Session()
session.merge(ti1)
session.merge(ti2)
session.commit()
# Two tasks w/o pool up for execution and our default pool size is 1
res = scheduler._find_executable_task_instances(
dagbag,
session=session)
self.assertEqual(1, len(res))
ti2.state = State.RUNNING
session.merge(ti2)
session.commit()
# One task w/o pool up for execution and one task task running
res = scheduler._find_executable_task_instances(
dagbag,
session=session)
self.assertEqual(0, len(res))
session.close()
def test_nonexistent_pool(self):
dag_id = 'SchedulerJobTest.test_nonexistent_pool'
task_id = 'dummy_wrong_pool'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task = DummyOperator(dag=dag, task_id=task_id, pool="this_pool_doesnt_exist")
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dagbag = self._make_simple_dag_bag([dag])
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
session = settings.Session()
dr = dag_file_processor.create_dag_run(dag)
ti = TaskInstance(task, dr.execution_date)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
session=session)
session.commit()
self.assertEqual(0, len(res))
def test_find_executable_task_instances_none(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_none'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
DummyOperator(dag=dag, task_id=task_id_1)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dagbag = self._make_simple_dag_bag([dag])
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
session = settings.Session()
dag_file_processor.create_dag_run(dag)
session.commit()
self.assertEqual(0, len(scheduler._find_executable_task_instances(
dagbag,
session=session)))
def test_find_executable_task_instances_concurrency(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_concurrency'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dagbag = self._make_simple_dag_bag([dag])
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
session = settings.Session()
dr1 = dag_file_processor.create_dag_run(dag)
dr2 = dag_file_processor.create_dag_run(dag)
dr3 = dag_file_processor.create_dag_run(dag)
ti1 = TaskInstance(task1, dr1.execution_date)
ti2 = TaskInstance(task1, dr2.execution_date)
ti3 = TaskInstance(task1, dr3.execution_date)
ti1.state = State.RUNNING
ti2.state = State.SCHEDULED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
session=session)
self.assertEqual(1, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti2.key, res_keys)
ti2.state = State.RUNNING
session.merge(ti2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
session=session)
self.assertEqual(0, len(res))
def test_find_executable_task_instances_concurrency_queued(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_concurrency_queued'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id='dummy1')
task2 = DummyOperator(dag=dag, task_id='dummy2')
task3 = DummyOperator(dag=dag, task_id='dummy3')
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dagbag = self._make_simple_dag_bag([dag])
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
session = settings.Session()
dag_run = dag_file_processor.create_dag_run(dag)
ti1 = TaskInstance(task1, dag_run.execution_date)
ti2 = TaskInstance(task2, dag_run.execution_date)
ti3 = TaskInstance(task3, dag_run.execution_date)
ti1.state = State.RUNNING
ti2.state = State.QUEUED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
session=session)
self.assertEqual(1, len(res))
self.assertEqual(res[0].key, ti3.key)
def test_find_executable_task_instances_task_concurrency(self): # pylint: disable=too-many-statements
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_task_concurrency'
task_id_1 = 'dummy'
task_id_2 = 'dummy2'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, task_concurrency=2)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dagbag = self._make_simple_dag_bag([dag])
executor = MockExecutor(do_update=True)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob(executor=executor)
session = settings.Session()
dr1 = dag_file_processor.create_dag_run(dag)
dr2 = dag_file_processor.create_dag_run(dag)
dr3 = dag_file_processor.create_dag_run(dag)
ti1_1 = TaskInstance(task1, dr1.execution_date)
ti2 = TaskInstance(task2, dr1.execution_date)
ti1_1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti2.state = State.RUNNING
ti1_2 = TaskInstance(task1, dr2.execution_date)
ti1_2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.merge(ti1_2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
session=session)
self.assertEqual(1, len(res))
ti1_2.state = State.RUNNING
ti1_3 = TaskInstance(task1, dr3.execution_date)
ti1_3.state = State.SCHEDULED
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
session=session)
self.assertEqual(0, len(res))
ti1_1.state = State.SCHEDULED
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
session=session)
self.assertEqual(1, len(res))
def test_change_state_for_executable_task_instances_no_tis(self):
scheduler = SchedulerJob()
session = settings.Session()
res = scheduler._change_state_for_executable_task_instances(
[], session)
self.assertEqual(0, len(res))
def test_change_state_for_executable_task_instances_no_tis_with_state(self):
dag_id = 'SchedulerJobTest.test_change_state_for__no_tis_with_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dr1 = dag_file_processor.create_dag_run(dag)
dr2 = dag_file_processor.create_dag_run(dag)
dr3 = dag_file_processor.create_dag_run(dag)
ti1 = TaskInstance(task1, dr1.execution_date)
ti2 = TaskInstance(task1, dr2.execution_date)
ti3 = TaskInstance(task1, dr3.execution_date)
ti1.state = State.RUNNING
ti2.state = State.RUNNING
ti3.state = State.RUNNING
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._change_state_for_executable_task_instances(
[ti1, ti2, ti3],
session)
self.assertEqual(0, len(res))
def test_enqueue_task_instances_with_queued_state(self):
dag_id = 'SchedulerJobTest.test_enqueue_task_instances_with_queued_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dagbag = self._make_simple_dag_bag([dag])
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
session = settings.Session()
dr1 = dag_file_processor.create_dag_run(dag)
ti1 = TaskInstance(task1, dr1.execution_date)
session.merge(ti1)
session.commit()
with patch.object(BaseExecutor, 'queue_command') as mock_queue_command:
scheduler._enqueue_task_instances_with_queued_state(dagbag, [ti1])
assert mock_queue_command.called
def test_execute_task_instances_nothing(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_nothing'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dagbag = SimpleDagBag([])
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
session = settings.Session()
dr1 = dag_file_processor.create_dag_run(dag)
ti1 = TaskInstance(task1, dr1.execution_date)
ti1.state = State.SCHEDULED
session.merge(ti1)
session.commit()
self.assertEqual(0, scheduler._execute_task_instances(dagbag))
def test_execute_task_instances(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_nonexistent_queue'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dagbag = self._make_simple_dag_bag([dag])
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
session = settings.Session()
# create first dag run with 1 running and 1 queued
dr1 = dag_file_processor.create_dag_run(dag)
ti1 = TaskInstance(task1, dr1.execution_date)
ti2 = TaskInstance(task2, dr1.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.RUNNING
ti2.state = State.RUNNING
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(State.RUNNING, dr1.state)
self.assertEqual(
2,
DAG.get_num_task_instances(
dag_id, dag.task_ids, states=[State.RUNNING], session=session
)
)
# create second dag run
dr2 = dag_file_processor.create_dag_run(dag)
ti3 = TaskInstance(task1, dr2.execution_date)
ti4 = TaskInstance(task2, dr2.execution_date)
ti3.refresh_from_db()
ti4.refresh_from_db()
# manually set to scheduled so we can pick them up
ti3.state = State.SCHEDULED
ti4.state = State.SCHEDULED
session.merge(ti3)
session.merge(ti4)
session.commit()
self.assertEqual(State.RUNNING, dr2.state)
res = scheduler._execute_task_instances(dagbag)
# check that concurrency is respected
ti1.refresh_from_db()
ti2.refresh_from_db()
ti3.refresh_from_db()
ti4.refresh_from_db()
self.assertEqual(
3,
DAG.get_num_task_instances(
dag_id, dag.task_ids, states=[State.RUNNING, State.QUEUED], session=session
)
)
self.assertEqual(State.RUNNING, ti1.state)
self.assertEqual(State.RUNNING, ti2.state)
six.assertCountEqual(self, [State.QUEUED, State.SCHEDULED], [ti3.state, ti4.state])
self.assertEqual(1, res)
def test_execute_task_instances_limit(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_limit'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_2'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dagbag = self._make_simple_dag_bag([dag])
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
scheduler.max_tis_per_query = 3
session = settings.Session()
tis = []
for _ in range(0, 4):
dr = dag_file_processor.create_dag_run(dag)
ti1 = TaskInstance(task1, dr.execution_date)
ti2 = TaskInstance(task2, dr.execution_date)
tis.append(ti1)
tis.append(ti2)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.commit()
res = scheduler._execute_task_instances(dagbag)
self.assertEqual(8, res)
for ti in tis:
ti.refresh_from_db()
self.assertEqual(State.QUEUED, ti.state)
@pytest.mark.quarantined
def test_change_state_for_tis_without_dagrun(self):
dag1 = DAG(dag_id='test_change_state_for_tis_without_dagrun', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag1, owner='airflow')
DummyOperator(task_id='dummy_b', dag=dag1, owner='airflow')
dag1 = SerializedDAG.from_dict(SerializedDAG.to_dict(dag1))
dag2 = DAG(dag_id='test_change_state_for_tis_without_dagrun_dont_change', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag2, owner='airflow')
dag2 = SerializedDAG.from_dict(SerializedDAG.to_dict(dag2))
dag3 = DAG(dag_id='test_change_state_for_tis_without_dagrun_no_dagrun', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag3, owner='airflow')
dag3 = SerializedDAG.from_dict(SerializedDAG.to_dict(dag3))
session = settings.Session()
dr1 = dag1.create_dagrun(run_type=DagRunType.SCHEDULED,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag2.create_dagrun(run_type=DagRunType.SCHEDULED,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti1a = dr1.get_task_instance(task_id='dummy', session=session)
ti1a.state = State.SCHEDULED
ti1b = dr1.get_task_instance(task_id='dummy_b', session=session)
ti1b.state = State.SUCCESS
session.commit()
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.state = State.SCHEDULED
session.commit()
ti3 = TaskInstance(dag3.get_task('dummy'), DEFAULT_DATE)
ti3.state = State.SCHEDULED
session.merge(ti3)
session.commit()
dagbag = self._make_simple_dag_bag([dag1, dag2, dag3])
scheduler = SchedulerJob(num_runs=0)
scheduler._change_state_for_tis_without_dagrun(
simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti1a = dr1.get_task_instance(task_id='dummy', session=session)
ti1a.refresh_from_db(session=session)
self.assertEqual(ti1a.state, State.SCHEDULED)
ti1b = dr1.get_task_instance(task_id='dummy_b', session=session)
ti1b.refresh_from_db(session=session)
self.assertEqual(ti1b.state, State.SUCCESS)
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
ti3.refresh_from_db(session=session)
self.assertEqual(ti3.state, State.NONE)
dr1.refresh_from_db(session=session)
dr1.state = State.FAILED
# why o why
session.merge(dr1)
session.commit()
scheduler._change_state_for_tis_without_dagrun(
simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti1a.refresh_from_db(session=session)
self.assertEqual(ti1a.state, State.SCHEDULED)
# don't touch ti1b
ti1b.refresh_from_db(session=session)
self.assertEqual(ti1b.state, State.SUCCESS)
# don't touch ti2
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
def test_change_state_for_tasks_failed_to_execute(self):
dag = DAG(
dag_id='dag_id',
start_date=DEFAULT_DATE)
task = DummyOperator(
task_id='task_id',
dag=dag,
owner='airflow')
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
# If there's no left over task in executor.queued_tasks, nothing happens
session = settings.Session()
scheduler_job = SchedulerJob()
mock_logger = mock.MagicMock()
test_executor = MockExecutor(do_update=False)
scheduler_job.executor = test_executor
scheduler_job._logger = mock_logger
scheduler_job._change_state_for_tasks_failed_to_execute() # pylint: disable=no-value-for-parameter
mock_logger.info.assert_not_called()
# Tasks failed to execute with QUEUED state will be set to SCHEDULED state.
session.query(TaskInstance).delete()
session.commit()
key = 'dag_id', 'task_id', DEFAULT_DATE, 1
test_executor.queued_tasks[key] = 'value'
ti = TaskInstance(task, DEFAULT_DATE)
ti.state = State.QUEUED
session.merge(ti) # pylint: disable=no-value-for-parameter
session.commit()
scheduler_job._change_state_for_tasks_failed_to_execute() # pylint: disable=no-value-for-parameter
ti.refresh_from_db()
self.assertEqual(State.SCHEDULED, ti.state)
# Tasks failed to execute with RUNNING state will not be set to SCHEDULED state.
session.query(TaskInstance).delete()
session.commit()
ti.state = State.RUNNING
session.merge(ti)
session.commit()
scheduler_job._change_state_for_tasks_failed_to_execute() # pylint: disable=no-value-for-parameter
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
def test_reset_state_for_orphaned_tasks(self):
session = settings.Session()
dag = DAG(
'test_execute_helper_reset_orphaned_tasks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag.clear()
dr = dag.create_dagrun(run_type=DagRunType.SCHEDULED,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag.create_dagrun(run_type=DagRunType.BACKFILL_JOB,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(1),
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.SCHEDULED
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
ti2.state = State.SCHEDULED
session.commit()
processor = mock.MagicMock()
scheduler = SchedulerJob(num_runs=0)
scheduler.processor_agent = processor
scheduler.reset_state_for_orphaned_tasks()
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti.state, State.NONE)
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
@parameterized.expand([
[State.UP_FOR_RETRY, State.FAILED],
[State.QUEUED, State.NONE],
[State.SCHEDULED, State.NONE],
[State.UP_FOR_RESCHEDULE, State.NONE],
])
def test_scheduler_loop_should_change_state_for_tis_without_dagrun(self,
initial_task_state,
expected_task_state):
session = settings.Session()
dag = DAG(
'test_execute_helper_should_change_state_for_tis_without_dagrun',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
# Create DAG run with FAILED state
dag.clear()
dr = dag.create_dagrun(run_type=DagRunType.SCHEDULED,
state=State.FAILED,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = initial_task_state
session.commit()
# Create scheduler and mock calls to processor. Run duration is set
# to a high value to ensure loop is entered. Poll interval is 0 to
# avoid sleep. Done flag is set to true to exist the loop immediately.
scheduler = SchedulerJob(num_runs=0, processor_poll_interval=0)
executor = MockExecutor(do_update=False)
executor.queued_tasks
scheduler.executor = executor
processor = mock.MagicMock()
processor.harvest_serialized_dags.return_value = [
SerializedDAG.from_dict(SerializedDAG.to_dict(dag))]
processor.done = True
scheduler.processor_agent = processor
scheduler._run_scheduler_loop()
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti.state, expected_task_state)
@provide_session
def evaluate_dagrun(
self,
dag_id,
expected_task_states, # dict of task_id: state
dagrun_state,
run_kwargs=None,
advance_execution_date=False,
session=None): # pylint: disable=unused-argument
"""
Helper for testing DagRun states with simple two-task DAGS.
This is hackish: a dag run is created but its tasks are
run by a backfill.
"""
if run_kwargs is None:
run_kwargs = {}
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag = self.dagbag.get_dag(dag_id)
dr = dag_file_processor.create_dag_run(dag)
if advance_execution_date:
# run a second time to schedule a dagrun after the start_date
dr = dag_file_processor.create_dag_run(dag)
ex_date = dr.execution_date
for tid, state in expected_task_states.items():
if state != State.FAILED:
continue
self.null_exec.mock_task_fail(dag_id, tid, ex_date)
try:
# This needs a _REAL_ dag, not the serialized version
dag.run(start_date=ex_date, end_date=ex_date, executor=self.null_exec, **run_kwargs)
except AirflowException:
pass
# test tasks
for task_id, expected_state in expected_task_states.items():
task = dag.get_task(task_id)
ti = TaskInstance(task, ex_date)
ti.refresh_from_db()
self.assertEqual(ti.state, expected_state)
# load dagrun
dr = DagRun.find(dag_id=dag_id, execution_date=ex_date)
dr = dr[0]
dr.dag = dag
self.assertEqual(dr.state, dagrun_state)
def test_dagrun_fail(self):
"""
DagRuns with one failed and one incomplete root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_fail',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.UPSTREAM_FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_success(self):
"""
DagRuns with one failed and one successful root task -> SUCCESS
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_success',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.SUCCESS,
},
dagrun_state=State.SUCCESS)
def test_dagrun_root_fail(self):
"""
DagRuns with one successful and one failed root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_root_fail',
expected_task_states={
'test_dagrun_succeed': State.SUCCESS,
'test_dagrun_fail': State.FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_root_fail_unfinished(self):
"""
DagRuns with one unfinished and one failed root task -> RUNNING
"""
# TODO: this should live in test_dagrun.py
# Run both the failed and successful tasks
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag_id = 'test_dagrun_states_root_fail_unfinished'
dag = self.dagbag.get_dag(dag_id)
dr = dag_file_processor.create_dag_run(dag)
self.null_exec.mock_task_fail(dag_id, 'test_dagrun_fail', DEFAULT_DATE)
with self.assertRaises(AirflowException):
dag.run(start_date=dr.execution_date, end_date=dr.execution_date, executor=self.null_exec)
# Mark the successful task as never having run since we want to see if the
# dagrun will be in a running state despite haveing an unfinished task.
with create_session() as session:
ti = dr.get_task_instance('test_dagrun_unfinished', session=session)
ti.state = State.NONE
session.commit()
dr.update_state()
self.assertEqual(dr.state, State.RUNNING)
def test_dagrun_root_after_dagrun_unfinished(self):
"""
DagRuns with one successful and one future root task -> SUCCESS
Noted: the DagRun state could be still in running state during CI.
"""
dag_id = 'test_dagrun_states_root_future'
dag = self.dagbag.get_dag(dag_id)
scheduler = SchedulerJob(
dag_id,
num_runs=1,
executor=self.null_exec,
subdir=dag.fileloc)
scheduler.run()
first_run = DagRun.find(dag_id=dag_id, execution_date=DEFAULT_DATE)[0]
ti_ids = [(ti.task_id, ti.state) for ti in first_run.get_task_instances()]
self.assertEqual(ti_ids, [('current', State.SUCCESS)])
self.assertIn(first_run.state, [State.SUCCESS, State.RUNNING])
def test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date(self):
"""
DagRun is marked a success if ignore_first_depends_on_past=True
Test that an otherwise-deadlocked dagrun is marked as a success
if ignore_first_depends_on_past=True and the dagrun execution_date
is after the start_date.
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
advance_execution_date=True,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_dagrun_deadlock_ignore_depends_on_past(self):
"""
Test that ignore_first_depends_on_past doesn't affect results
(this is the same test as
test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date except
that start_date == execution_date so depends_on_past is irrelevant).
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_scheduler_start_date(self):
"""
Test that the scheduler respects start_dates, even when DAGS have run
"""
with create_session() as session:
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertGreater(dag.start_date, datetime.datetime.now(timezone.utc))
scheduler = SchedulerJob(dag_id,
executor=self.null_exec,
subdir=dag.fileloc,
num_runs=1)
scheduler.run()
# zero tasks ran
self.assertEqual(
len(session.query(TaskInstance).filter(TaskInstance.dag_id == dag_id).all()), 0)
session.commit()
self.assertListEqual([], self.null_exec.sorted_tasks)
# previously, running this backfill would kick off the Scheduler
# because it would take the most recent run and start from there
# That behavior still exists, but now it will only do so if after the
# start date
bf_exec = MockExecutor()
backfill = BackfillJob(
executor=bf_exec,
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
backfill.run()
# one task ran
self.assertEqual(
len(session.query(TaskInstance).filter(TaskInstance.dag_id == dag_id).all()), 1)
self.assertListEqual(
[
(TaskInstanceKey(dag.dag_id, 'dummy', DEFAULT_DATE, 1), (State.SUCCESS, None)),
],
bf_exec.sorted_tasks
)
session.commit()
scheduler = SchedulerJob(dag_id,
executor=self.null_exec,
subdir=dag.fileloc,
num_runs=1)
scheduler.run()
# still one task
self.assertEqual(
len(session.query(TaskInstance).filter(TaskInstance.dag_id == dag_id).all()), 1)
session.commit()
self.assertListEqual([], self.null_exec.sorted_tasks)
def test_scheduler_task_start_date(self):
"""
Test that the scheduler respects task start dates that are different from DAG start dates
"""
dag_id = 'test_task_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_id,
executor=self.null_exec,
subdir=os.path.join(TEST_DAG_FOLDER, 'test_scheduler_dags.py'),
num_runs=2)
scheduler.run()
session = settings.Session()
tiq = session.query(TaskInstance).filter(TaskInstance.dag_id == dag_id)
ti1s = tiq.filter(TaskInstance.task_id == 'dummy1').all()
ti2s = tiq.filter(TaskInstance.task_id == 'dummy2').all()
self.assertEqual(len(ti1s), 0)
self.assertEqual(len(ti2s), 2)
for task in ti2s:
self.assertEqual(task.state, State.SUCCESS)
def test_scheduler_multiprocessing(self):
"""
Test that the scheduler can successfully queue multiple dags in parallel
"""
dag_ids = ['test_start_date_scheduling', 'test_dagrun_states_success']
for dag_id in dag_ids:
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
executor=self.null_exec,
subdir=os.path.join(TEST_DAG_FOLDER, 'test_scheduler_dags.py'),
num_runs=1)
scheduler.run()
# zero tasks ran
dag_id = 'test_start_date_scheduling'
session = settings.Session()
self.assertEqual(
len(session.query(TaskInstance).filter(TaskInstance.dag_id == dag_id).all()), 0)
@conf_vars({("core", "mp_start_method"): "spawn"})
def test_scheduler_multiprocessing_with_spawn_method(self):
"""
Test that the scheduler can successfully queue multiple dags in parallel
when using "spawn" mode of multiprocessing. (Fork is default on Linux and older OSX)
"""
dag_ids = ['test_start_date_scheduling', 'test_dagrun_states_success']
for dag_id in dag_ids:
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
executor=self.null_exec,
subdir=os.path.join(
TEST_DAG_FOLDER, 'test_scheduler_dags.py'),
num_runs=1)
scheduler.run()
# zero tasks ran
dag_id = 'test_start_date_scheduling'
with create_session() as session:
self.assertEqual(
session.query(TaskInstance).filter(TaskInstance.dag_id == dag_id).count(), 0)
def test_scheduler_verify_pool_full(self):
"""
Test task instances not queued when pool is full
"""
dag = DAG(
dag_id='test_scheduler_verify_pool_full',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow',
pool='test_scheduler_verify_pool_full')
session = settings.Session()
pool = Pool(pool='test_scheduler_verify_pool_full', slots=1)
session.add(pool)
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob(executor=self.null_exec)
# Create 2 dagruns, which will create 2 task instances.
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, DEFAULT_DATE)
dr = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dr)
dag_runs = DagRun.find(dag_id="test_scheduler_verify_pool_full")
task_instances_list = dag_file_processor._process_task_instances(dag, dag_runs=dag_runs)
self.assertEqual(len(task_instances_list), 2)
dagbag = self._make_simple_dag_bag([dag])
# Recreated part of the scheduler here, to kick off tasks -> executor
for ti_key in task_instances_list:
task = dag.get_task(ti_key[1])
ti = TaskInstance(task, ti_key[2])
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
session.merge(ti)
session.commit()
self.assertEqual(len(scheduler.executor.queued_tasks), 0, "Check test pre-condition")
scheduler._execute_task_instances(dagbag, session=session)
self.assertEqual(len(scheduler.executor.queued_tasks), 1)
def test_scheduler_verify_pool_full_2_slots_per_task(self):
"""
Test task instances not queued when pool is full.
Variation with non-default pool_slots
"""
dag = DAG(
dag_id='test_scheduler_verify_pool_full_2_slots_per_task',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow',
pool='test_scheduler_verify_pool_full_2_slots_per_task',
pool_slots=2,
)
session = settings.Session()
pool = Pool(pool='test_scheduler_verify_pool_full_2_slots_per_task', slots=6)
session.add(pool)
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob(executor=self.null_exec)
# Create 5 dagruns, which will create 5 task instances.
for _ in range(5):
dag_file_processor.create_dag_run(dag)
dag_runs = DagRun.find(dag_id="test_scheduler_verify_pool_full_2_slots_per_task")
task_instances_list = dag_file_processor._process_task_instances(dag, dag_runs=dag_runs)
self.assertEqual(len(task_instances_list), 5)
dagbag = self._make_simple_dag_bag([dag])
# Recreated part of the scheduler here, to kick off tasks -> executor
for ti_key in task_instances_list:
task = dag.get_task(ti_key[1])
ti = TaskInstance(task, ti_key[2])
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
session.merge(ti)
session.commit()
self.assertEqual(len(scheduler.executor.queued_tasks), 0, "Check test pre-condition")
scheduler._execute_task_instances(dagbag, session=session)
# As tasks require 2 slots, only 3 can fit into 6 available
self.assertEqual(len(scheduler.executor.queued_tasks), 3)
def test_scheduler_verify_priority_and_slots(self):
"""
Test task instances with higher priority are not queued
when pool does not have enough slots.
Though tasks with lower priority might be executed.
"""
dag = DAG(
dag_id='test_scheduler_verify_priority_and_slots',
start_date=DEFAULT_DATE)
# Medium priority, not enough slots
DummyOperator(
task_id='test_scheduler_verify_priority_and_slots_t0',
dag=dag,
owner='airflow',
pool='test_scheduler_verify_priority_and_slots',
pool_slots=2,
priority_weight=2,
)
# High priority, occupies first slot
DummyOperator(
task_id='test_scheduler_verify_priority_and_slots_t1',
dag=dag,
owner='airflow',
pool='test_scheduler_verify_priority_and_slots',
pool_slots=1,
priority_weight=3,
)
# Low priority, occupies second slot
DummyOperator(
task_id='test_scheduler_verify_priority_and_slots_t2',
dag=dag,
owner='airflow',
pool='test_scheduler_verify_priority_and_slots',
pool_slots=1,
priority_weight=1,
)
session = settings.Session()
pool = Pool(pool='test_scheduler_verify_priority_and_slots', slots=2)
session.add(pool)
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob(executor=self.null_exec)
dag_file_processor.create_dag_run(dag)
dag_runs = DagRun.find(dag_id="test_scheduler_verify_priority_and_slots")
task_instances_list = dag_file_processor._process_task_instances(dag, dag_runs=dag_runs)
self.assertEqual(len(task_instances_list), 3)
dagbag = self._make_simple_dag_bag([dag])
# Recreated part of the scheduler here, to kick off tasks -> executor
for ti_key in task_instances_list:
task = dag.get_task(ti_key[1])
ti = TaskInstance(task, ti_key[2])
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
session.merge(ti)
session.commit()
self.assertEqual(len(scheduler.executor.queued_tasks), 0, "Check test pre-condition")
scheduler._execute_task_instances(dagbag, session=session)
# Only second and third
self.assertEqual(len(scheduler.executor.queued_tasks), 2)
ti0 = session.query(TaskInstance)\
.filter(TaskInstance.task_id == 'test_scheduler_verify_priority_and_slots_t0').first()
self.assertEqual(ti0.state, State.SCHEDULED)
ti1 = session.query(TaskInstance)\
.filter(TaskInstance.task_id == 'test_scheduler_verify_priority_and_slots_t1').first()
self.assertEqual(ti1.state, State.QUEUED)
ti2 = session.query(TaskInstance)\
.filter(TaskInstance.task_id == 'test_scheduler_verify_priority_and_slots_t2').first()
self.assertEqual(ti2.state, State.QUEUED)
def test_scheduler_reschedule(self):
"""
Checks if tasks that are not taken up by the executor
get rescheduled
"""
executor = MockExecutor(do_update=False)
dagbag = DagBag(dag_folder=os.path.join(settings.DAGS_FOLDER, "no_dags.py"))
dagbag.dags.clear()
dag = DAG(
dag_id='test_scheduler_reschedule',
start_date=DEFAULT_DATE)
dummy_task = BashOperator(
task_id='dummy',
dag=dag,
owner='airflow',
bash_command='echo 1',
)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dag.clear()
dag.is_subdag = False
with create_session() as session:
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
dagbag.bag_dag(dag=dag, root_dag=dag)
@mock.patch('airflow.jobs.scheduler_job.DagBag', return_value=dagbag)
def do_schedule(mock_dagbag):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
with conf_vars({('core', 'mp_start_method'): 'fork'}):
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule() # pylint: disable=no-value-for-parameter
with create_session() as session:
ti = session.query(TaskInstance).filter(TaskInstance.dag_id == dag.dag_id,
TaskInstance.task_id == dummy_task.task_id).first()
self.assertEqual(0, len(executor.queued_tasks))
self.assertEqual(State.SCHEDULED, ti.state)
executor.do_update = True
do_schedule() # pylint: disable=no-value-for-parameter
self.assertEqual(0, len(executor.queued_tasks))
ti.refresh_from_db()
self.assertEqual(State.SUCCESS, ti.state)
def test_retry_still_in_executor(self):
"""
Checks if the scheduler does not put a task in limbo, when a task is retried
but is still present in the executor.
"""
executor = MockExecutor(do_update=False)
dagbag = DagBag(dag_folder=os.path.join(settings.DAGS_FOLDER, "no_dags.py"))
dagbag.dags.clear()
dag = DAG(
dag_id='test_retry_still_in_executor',
start_date=DEFAULT_DATE,
schedule_interval="@once")
dag_task1 = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
with create_session() as session:
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
dagbag.bag_dag(dag=dag, root_dag=dag)
@mock.patch('airflow.jobs.scheduler_job.DagBag', return_value=dagbag)
def do_schedule(mock_dagbag):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule() # pylint: disable=no-value-for-parameter
with create_session() as session:
ti = session.query(TaskInstance).filter(TaskInstance.dag_id == 'test_retry_still_in_executor',
TaskInstance.task_id == 'test_retry_handling_op').first()
ti.task = dag_task1
# Nothing should be left in the queued_tasks as we don't do update in MockExecutor yet,
# and the queued_tasks will be cleared by scheduler job.
self.assertEqual(0, len(executor.queued_tasks))
def run_with_error(ti, ignore_ti_state=False):
try:
ti.run(ignore_ti_state=ignore_ti_state)
except AirflowException:
pass
self.assertEqual(ti.try_number, 1)
# At this point, scheduler has tried to schedule the task once and
# heartbeated the executor once, which moved the state of the task from
# SCHEDULED to QUEUED and then to SCHEDULED, to fail the task execution
# we need to ignore the TaskInstance state as SCHEDULED is not a valid state to start
# executing task.
run_with_error(ti, ignore_ti_state=True)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti.try_number, 2)
with create_session() as session:
ti.refresh_from_db(lock_for_update=True, session=session)
ti.state = State.SCHEDULED
session.merge(ti)
# do schedule
do_schedule() # pylint: disable=no-value-for-parameter
# MockExecutor is not aware of the TaskInstance since we don't do update yet
# and no trace of this TaskInstance will be left in the executor.
self.assertFalse(executor.has_task(ti))
self.assertEqual(ti.state, State.SCHEDULED)
# To verify that task does get re-queued.
executor.do_update = True
do_schedule() # pylint: disable=no-value-for-parameter
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
@pytest.mark.quarantined
def test_retry_handling_job(self):
"""
Integration test of the scheduler not accidentally resetting
the try_numbers for a task
"""
dag = self.dagbag.get_dag('test_retry_handling_job')
dag_task1 = dag.get_task("test_retry_handling_op")
dag.clear()
scheduler = SchedulerJob(dag_id=dag.dag_id,
num_runs=1)
scheduler.heartrate = 0
scheduler.run()
session = settings.Session()
ti = session.query(TaskInstance).filter(TaskInstance.dag_id == dag.dag_id,
TaskInstance.task_id == dag_task1.task_id).first()
# make sure the counter has increased
self.assertEqual(ti.try_number, 2)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
def test_dag_with_system_exit(self):
"""
Test to check that a DAG with a system.exit() doesn't break the scheduler.
"""
dag_id = 'exit_test_dag'
dag_ids = [dag_id]
dag_directory = os.path.join(settings.DAGS_FOLDER, "..", "dags_with_system_exit")
dag_file = os.path.join(dag_directory, 'b_test_scheduler_dags.py')
dagbag = DagBag(dag_folder=dag_file)
for dag_id in dag_ids:
dag = dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
executor=self.null_exec,
subdir=dag_directory,
num_runs=1)
scheduler.run()
with create_session() as session:
tis = session.query(TaskInstance).filter(TaskInstance.dag_id == dag_id).all()
# Since this dag has no end date, and there's a chance that we'll
# start a and finish two dag parsing processes twice in one loop!
self.assertGreaterEqual(
len(tis), 1,
repr(tis))
def test_dag_get_active_runs(self):
"""
Test to check that a DAG returns its active runs
"""
now = timezone.utcnow()
six_hours_ago_to_the_hour = \
(now - datetime.timedelta(hours=6)).replace(minute=0, second=0, microsecond=0)
start_date = six_hours_ago_to_the_hour
dag_name1 = 'get_active_runs_test'
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': start_date
}
dag1 = DAG(dag_name1,
schedule_interval='* * * * *',
max_active_runs=1,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag1)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag1)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag1)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag1.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag1.clear()
dr = dag_file_processor.create_dag_run(dag1)
# We had better get a dag run
self.assertIsNotNone(dr)
execution_date = dr.execution_date
running_dates = dag1.get_active_runs()
try:
running_date = running_dates[0]
except Exception: # pylint: disable=broad-except
running_date = 'Except'
self.assertEqual(execution_date, running_date, 'Running Date must match Execution Date')
def test_add_unparseable_file_before_sched_start_creates_import_error(self):
dags_folder = mkdtemp()
try:
with env_vars({('core', 'dags_folder'): dags_folder}):
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_add_unparseable_file_after_sched_start_creates_import_error(self):
dags_folder = mkdtemp()
try:
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_no_import_errors_with_parseable_dag(self):
try:
dags_folder = mkdtemp()
parseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(parseable_filename, 'w') as parseable_file:
parseable_file.writelines(PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_new_import_error_replaces_old(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Generate replacement import error (the error will be on the second line now)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(
PARSEABLE_DAG_FILE_CONTENTS +
os.linesep +
UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 2)".format(TEMP_DAG_FILENAME))
def test_remove_error_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Remove the import error from the file
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(
PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_remove_file_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
# Rerun the scheduler once the dag file has been removed
self.run_single_scheduler_loop_with_no_dags(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_list_py_file_paths(self):
"""
[JIRA-1357] Test the 'list_py_file_paths' function used by the
scheduler to list and load DAGs.
"""
detected_files = set()
expected_files = set()
# No_dags is empty, _invalid_ is ignored by .airflowignore
ignored_files = {
'no_dags.py',
'test_invalid_cron.py',
'test_zip_invalid_cron.zip',
'test_ignore_this.py',
}
for root, _, files in os.walk(TEST_DAG_FOLDER): # pylint: disable=too-many-nested-blocks
for file_name in files:
if file_name.endswith('.py') or file_name.endswith('.zip'):
if file_name not in ignored_files:
expected_files.add(
'{}/{}'.format(root, file_name))
for file_path in list_py_file_paths(TEST_DAG_FOLDER, include_examples=False):
detected_files.add(file_path)
self.assertEqual(detected_files, expected_files)
ignored_files = {
'helper.py',
}
example_dag_folder = airflow.example_dags.__path__[0]
for root, _, files in os.walk(example_dag_folder): # pylint: disable=too-many-nested-blocks
for file_name in files:
if file_name.endswith('.py') or file_name.endswith('.zip'):
if file_name not in ['__init__.py'] and file_name not in ignored_files:
expected_files.add(os.path.join(root, file_name))
detected_files.clear()
for file_path in list_py_file_paths(TEST_DAG_FOLDER, include_examples=True):
detected_files.add(file_path)
self.assertEqual(detected_files, expected_files)
smart_sensor_dag_folder = airflow.smart_sensor_dags.__path__[0]
for root, _, files in os.walk(smart_sensor_dag_folder):
for file_name in files:
if (file_name.endswith('.py') or file_name.endswith('.zip')) and \
file_name not in ['__init__.py']:
expected_files.add(os.path.join(root, file_name))
detected_files.clear()
for file_path in list_py_file_paths(TEST_DAG_FOLDER,
include_examples=True,
include_smart_sensor=True):
detected_files.add(file_path)
self.assertEqual(detected_files, expected_files)
def test_reset_orphaned_tasks_nothing(self):
"""Try with nothing. """
scheduler = SchedulerJob()
session = settings.Session()
self.assertEqual(
0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_external_triggered_dag(self):
dag_id = 'test_reset_orphaned_tasks_external_triggered_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
session = settings.Session()
dr1 = dag_file_processor.create_dag_run(dag, session=session)
ti = dr1.get_task_instances(session=session)[0]
dr1.state = State.RUNNING
ti.state = State.SCHEDULED
dr1.external_trigger = True
session.merge(ti)
session.merge(dr1)
session.commit()
reset_tis = scheduler.reset_state_for_orphaned_tasks(session=session)
self.assertEqual(1, len(reset_tis))
def test_reset_orphaned_tasks_backfill_dag(self):
dag_id = 'test_reset_orphaned_tasks_backfill_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
session = settings.Session()
dr1 = dag_file_processor.create_dag_run(dag, session=session)
ti = dr1.get_task_instances(session=session)[0]
ti.state = State.SCHEDULED
dr1.state = State.RUNNING
dr1.run_type = DagRunType.BACKFILL_JOB.value
session.merge(ti)
session.merge(dr1)
session.commit()
self.assertTrue(dr1.is_backfill)
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_specified_dagrun(self):
"""Try to reset when we specify a dagrun and ensure nothing else is."""
dag_id = 'test_reset_orphaned_tasks_specified_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
# make two dagruns, only reset for one
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dr1 = dag_file_processor.create_dag_run(dag)
dr2 = dag_file_processor.create_dag_run(dag)
dr1.state = State.SUCCESS
dr2.state = State.RUNNING
ti1 = dr1.get_task_instances(session=session)[0]
ti2 = dr2.get_task_instances(session=session)[0]
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(dr1)
session.merge(dr2)
session.commit()
reset_tis = scheduler.reset_state_for_orphaned_tasks(filter_by_dag_run=dr2, session=session)
self.assertEqual(1, len(reset_tis))
ti1.refresh_from_db(session=session)
ti2.refresh_from_db(session=session)
self.assertEqual(State.SCHEDULED, ti1.state)
self.assertEqual(State.NONE, ti2.state)
def test_reset_orphaned_tasks_nonexistent_dagrun(self):
"""Make sure a task in an orphaned state is not reset if it has no dagrun. """
dag_id = 'test_reset_orphaned_tasks_nonexistent_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
task = DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
session.add(ti)
session.commit()
ti.refresh_from_db()
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_no_orphans(self):
dag_id = 'test_reset_orphaned_tasks_no_orphans'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
session = settings.Session()
dr1 = dag_file_processor.create_dag_run(dag)
dr1.state = State.RUNNING
tis = dr1.get_task_instances(session=session)
tis[0].state = State.RUNNING
session.merge(dr1)
session.merge(tis[0])
session.commit()
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
tis[0].refresh_from_db()
self.assertEqual(State.RUNNING, tis[0].state)
def test_reset_orphaned_tasks_non_running_dagruns(self):
"""Ensure orphaned tasks with non-running dagruns are not reset."""
dag_id = 'test_reset_orphaned_tasks_non_running_dagruns'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
scheduler = SchedulerJob()
session = settings.Session()
dr1 = dag_file_processor.create_dag_run(dag)
dr1.state = State.SUCCESS
tis = dr1.get_task_instances(session=session)
self.assertEqual(1, len(tis))
tis[0].state = State.SCHEDULED
session.merge(dr1)
session.merge(tis[0])
session.commit()
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_with_orphans(self):
"""Create dagruns and esnure only ones with correct states are reset."""
prefix = 'scheduler_job_test_test_reset_orphaned_tasks'
states = [State.QUEUED, State.SCHEDULED, State.NONE, State.RUNNING, State.SUCCESS]
states_to_reset = [State.QUEUED, State.SCHEDULED, State.NONE]
dag = DAG(dag_id=prefix,
start_date=DEFAULT_DATE,
schedule_interval="@daily")
tasks = []
for i in range(len(states)):
task_id = "{}_task_{}".format(prefix, i)
task = DummyOperator(task_id=task_id, dag=dag)
tasks.append(task)
scheduler = SchedulerJob()
session = settings.Session()
# create dagruns
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dr1 = dag_file_processor.create_dag_run(dag)
dr2 = dag_file_processor.create_dag_run(dag)
dr1.state = State.RUNNING
dr2.state = State.SUCCESS
session.merge(dr1)
session.merge(dr2)
session.commit()
# create taskinstances and set states
dr1_tis = []
dr2_tis = []
for i, (task, state) in enumerate(zip(tasks, states)):
ti1 = TaskInstance(task, dr1.execution_date)
ti2 = TaskInstance(task, dr2.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = state
ti2.state = state
dr1_tis.append(ti1)
dr2_tis.append(ti2)
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(2, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
for ti in dr1_tis + dr2_tis:
ti.refresh_from_db()
# running dagrun should be reset
for state, ti in zip(states, dr1_tis):
if state in states_to_reset:
self.assertIsNone(ti.state)
else:
self.assertEqual(state, ti.state)
# otherwise not
for state, ti in zip(states, dr2_tis):
self.assertEqual(state, ti.state)
for state, ti in zip(states, dr1_tis):
ti.state = state
session.commit()
scheduler.reset_state_for_orphaned_tasks(filter_by_dag_run=dr1, session=session)
# check same for dag_run version
for state, ti in zip(states, dr2_tis):
self.assertEqual(state, ti.state)
session.close()
def test_task_with_upstream_skip_process_task_instances():
"""
Test if _process_task_instances puts a task instance into SKIPPED state if any of its
upstream tasks are skipped according to TriggerRuleDep.
"""
clear_db_runs()
with DAG(
dag_id='test_task_with_upstream_skip_dag',
start_date=DEFAULT_DATE,
schedule_interval=None
) as dag:
dummy1 = DummyOperator(task_id='dummy1')
dummy2 = DummyOperator(task_id="dummy2")
dummy3 = DummyOperator(task_id="dummy3")
[dummy1, dummy2] >> dummy3
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag.clear()
dr = dag.create_dagrun(run_type=DagRunType.MANUAL,
state=State.RUNNING,
execution_date=DEFAULT_DATE)
assert dr is not None
with create_session() as session:
tis = {ti.task_id: ti for ti in dr.get_task_instances(session=session)}
# Set dummy1 to skipped and dummy2 to success. dummy3 remains as none.
tis[dummy1.task_id].state = State.SKIPPED
tis[dummy2.task_id].state = State.SUCCESS
assert tis[dummy3.task_id].state == State.NONE
dag_runs = DagRun.find(dag_id='test_task_with_upstream_skip_dag')
dag_file_processor._process_task_instances(dag, dag_runs=dag_runs)
with create_session() as session:
tis = {ti.task_id: ti for ti in dr.get_task_instances(session=session)}
assert tis[dummy1.task_id].state == State.SKIPPED
assert tis[dummy2.task_id].state == State.SUCCESS
# dummy3 should be skipped because dummy1 is skipped.
assert tis[dummy3.task_id].state == State.SKIPPED
class TestSchedulerJobQueriesCount(unittest.TestCase):
"""
These tests are designed to detect changes in the number of queries for
different DAG files. These tests allow easy detection when a change is
made that affects the performance of the SchedulerJob.
"""
def setUp(self) -> None:
clear_db_runs()
clear_db_pools()
clear_db_dags()
clear_db_sla_miss()
clear_db_errors()
@parameterized.expand(
[
# pylint: disable=bad-whitespace
# expected, dag_count, task_count
# One DAG with one task per DAG file
(13, 1, 1), # noqa
# One DAG with five tasks per DAG file
(17, 1, 5), # noqa
# 10 DAGs with 10 tasks per DAG file
(46, 10, 10), # noqa
]
)
def test_execute_queries_count_with_harvested_dags(self, expected_query_count, dag_count, task_count):
with mock.patch.dict("os.environ", {
"PERF_DAGS_COUNT": str(dag_count),
"PERF_TASKS_COUNT": str(task_count),
"PERF_START_AGO": "1d",
"PERF_SCHEDULE_INTERVAL": "30m",
"PERF_SHAPE": "no_structure",
}), conf_vars({
('scheduler', 'use_job_schedule'): 'True',
('core', 'load_examples'): 'False',
}):
dagbag = DagBag(dag_folder=ELASTIC_DAG_FILE, include_examples=False)
for i, dag in enumerate(dagbag.dags.values()):
dr = dag.create_dagrun(state=State.RUNNING, run_id=f"{DagRunType.MANUAL.value}__{i}")
for ti in dr.get_task_instances():
ti.set_state(state=State.SCHEDULED)
mock_agent = mock.MagicMock()
mock_agent.harvest_serialized_dags.return_value = [
SerializedDAG.from_dict(SerializedDAG.to_dict(d)) for d in dagbag.dags.values()]
job = SchedulerJob(subdir=PERF_DAGS_FOLDER)
job.executor = MockExecutor()
job.heartbeat = mock.MagicMock()
job.processor_agent = mock_agent
with assert_queries_count(expected_query_count):
job._run_scheduler_loop()
@parameterized.expand(
[
# pylint: disable=bad-whitespace
# expected, dag_count, task_count
# One DAG with one task per DAG file
(2, 1, 1), # noqa
# One DAG with five tasks per DAG file
(2, 1, 5), # noqa
# 10 DAGs with 10 tasks per DAG file
(2, 10, 10), # noqa
]
)
def test_execute_queries_count_no_harvested_dags(self, expected_query_count, dag_count, task_count):
with mock.patch.dict("os.environ", {
"PERF_DAGS_COUNT": str(dag_count),
"PERF_TASKS_COUNT": str(task_count),
"PERF_START_AGO": "1d",
"PERF_SCHEDULE_INTERVAL": "30m",
"PERF_SHAPE": "no_structure",
}), conf_vars({
('scheduler', 'use_job_schedule'): 'True',
('core', 'load_examples'): 'False',
}):
dagbag = DagBag(dag_folder=ELASTIC_DAG_FILE, include_examples=False)
for i, dag in enumerate(dagbag.dags.values()):
dr = dag.create_dagrun(state=State.RUNNING, run_id=f"{DagRunType.MANUAL.value}__{i}")
for ti in dr.get_task_instances():
ti.set_state(state=State.SCHEDULED)
mock_agent = mock.MagicMock()
mock_agent.harvest_serialized_dags.return_value = []
job = SchedulerJob(subdir=PERF_DAGS_FOLDER)
job.executor = MockExecutor()
job.heartbeat = mock.MagicMock()
job.processor_agent = mock_agent
with assert_queries_count(expected_query_count):
job._run_scheduler_loop()
|
# -*- coding:utf8 -*-
try:
import simplejson as json
except ImportError:
import json
import uuid
from datetime import date, datetime
from decimal import Decimal
from .exceptions import SerializationError
from .compat import string_types
__all__ = ["JSONSerializer"]
class JSONSerializer(object):
def default(self, data):
if isinstance(data, (date, datetime)):
return data.isoformat()
elif isinstance(data, Decimal):
return float(data)
elif isinstance(data, uuid.UUID):
return str(data)
raise TypeError("Unable to serialize %r (type: %s)" % (data, type(data)))
def dumps(self, data):
# don't serialize strings
if isinstance(data, string_types):
if isinstance(data, unicode):
return data.encode("utf-8")
else:
return data
# don't serialize file
if isinstance(data, file):
return data
try:
return json.dumps(data, default=self.default, ensure_ascii=False)
except (ValueError, TypeError) as e:
raise SerializationError(data, e)
|
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from ... import opcodes
from ...core import OutputType
from ...serialize import TupleField, DictField, FunctionField
from ..operands import DataFrameOperandMixin, DataFrameOperand
from ..utils import build_df, build_empty_df, build_series, build_empty_series, parse_index
class GroupByApply(DataFrameOperand, DataFrameOperandMixin):
_op_type_ = opcodes.APPLY
_op_module_ = 'dataframe.groupby'
_func = FunctionField('func')
_args = TupleField('args')
_kwds = DictField('kwds')
def __init__(self, func=None, args=None, kwds=None, output_types=None, **kw):
super().__init__(_func=func, _args=args, _kwds=kwds, _output_types=output_types, **kw)
@property
def func(self):
return self._func
@property
def args(self):
return getattr(self, '_args', None) or ()
@property
def kwds(self):
return getattr(self, '_kwds', None) or dict()
@classmethod
def execute(cls, ctx, op):
in_data = ctx[op.inputs[0].key]
if not in_data:
if op.output_types[0] == OutputType.dataframe:
ctx[op.outputs[0].key] = build_empty_df(op.outputs[0].dtypes)
else:
ctx[op.outputs[0].key] = build_empty_series(op.outputs[0].dtype)
return
applied = in_data.apply(op.func, *op.args, **op.kwds)
# when there is only one group, pandas tend to return a DataFrame, while
# we need to convert it into a compatible series
if op.output_types[0] == OutputType.series and isinstance(applied, pd.DataFrame):
assert len(applied.index) == 1
applied_idx = pd.MultiIndex.from_arrays(
[[applied.index[0]] * len(applied.columns), applied.columns.tolist()])
applied_idx.names = [applied.index.name, None]
applied = pd.Series(np.array(applied.iloc[0]), applied_idx, name=applied.columns.name)
ctx[op.outputs[0].key] = applied
@classmethod
def tile(cls, op):
in_groupby = op.inputs[0]
out_df = op.outputs[0]
chunks = []
for c in in_groupby.chunks:
inp_chunks = [c]
new_op = op.copy().reset_key()
if op.output_types[0] == OutputType.dataframe:
chunks.append(new_op.new_chunk(
inp_chunks, index=c.index, shape=(np.nan, len(out_df.dtypes)), dtypes=out_df.dtypes,
columns_value=out_df.columns_value, index_value=out_df.index_value))
else:
chunks.append(new_op.new_chunk(
inp_chunks, name=out_df.name, index=(c.index[0],), shape=(np.nan,), dtype=out_df.dtype,
index_value=out_df.index_value))
new_op = op.copy().reset_key()
kw = out_df.params.copy()
kw['chunks'] = chunks
if op.output_types[0] == OutputType.dataframe:
kw['nsplits'] = ((np.nan,) * len(chunks), (out_df.shape[1],))
else:
kw['nsplits'] = ((np.nan,) * len(chunks),)
return new_op.new_tileables([in_groupby], **kw)
def _infer_df_func_returns(self, in_groupby, in_df, dtypes, index):
index_value, output_type, new_dtypes = None, None, None
try:
if in_df.op.output_types[0] == OutputType.dataframe:
test_df = build_df(in_df, size=2)
else:
test_df = build_series(in_df, size=2, name=in_df.name)
selection = getattr(in_groupby.op, 'selection', None)
if selection:
test_df = test_df[selection]
with np.errstate(all='ignore'):
infer_df = self.func(test_df, *self.args, **self.kwds)
# todo return proper index when sort=True is implemented
index_value = parse_index(None, in_df.key, self.func)
if isinstance(infer_df, pd.DataFrame):
output_type = output_type or OutputType.dataframe
new_dtypes = new_dtypes or infer_df.dtypes
elif isinstance(infer_df, pd.Series):
output_type = output_type or OutputType.series
new_dtypes = new_dtypes or (infer_df.name, infer_df.dtype)
else:
output_type = OutputType.series
new_dtypes = (None, pd.Series(infer_df).dtype)
except: # noqa: E722 # nosec
pass
self.output_types = [output_type] if not self.output_types else self.output_types
dtypes = new_dtypes if dtypes is None else dtypes
index_value = index_value if index is None else parse_index(index)
return dtypes, index_value
def __call__(self, groupby, dtypes=None, index=None):
in_df = groupby
while in_df.op.output_types[0] not in (OutputType.dataframe, OutputType.series):
in_df = in_df.inputs[0]
dtypes, index_value = self._infer_df_func_returns(groupby, in_df, dtypes, index)
for arg, desc in zip((self.output_types, dtypes, index_value),
('output_types', 'dtypes', 'index')):
if arg is None:
raise TypeError(f'Cannot determine {desc} by calculating with enumerate data, '
'please specify it as arguments')
if self.output_types[0] == OutputType.dataframe:
new_shape = (np.nan, len(dtypes))
return self.new_dataframe([groupby], shape=new_shape, dtypes=dtypes, index_value=index_value,
columns_value=parse_index(dtypes.index, store_data=True))
else:
name, dtype = dtypes
new_shape = (np.nan,)
return self.new_series([groupby], name=name, shape=new_shape, dtype=dtype,
index_value=index_value)
def groupby_apply(groupby, func, *args, dtypes=None, index=None, output_types=None, **kwargs):
# todo this can be done with sort_index implemented
if not groupby.op.groupby_params.get('as_index', True):
raise NotImplementedError('apply when set_index == False is not supported')
op = GroupByApply(func=func, args=args, kwds=kwargs, output_types=output_types)
return op(groupby, dtypes=dtypes, index=index)
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for handling UI actions.
.. contents::
:depth: 2
:local:
Introduction
------------
This module is used both by the core application and plugins to create and
register menu entries, toolbar buttons and keyboard shortcuts. All these are
created using the `ActionInfo` and `SeparatorInfo` classes.
Registering actions
-------------------
Actions are registered by creating an instance of the `ActionInfo` class with
the needed data. After configuring the instance it can be registered. The core
application handles registration itself, but plugins should always use the
`pluginapi.Plugin.register_action` method.
Menu separators
~~~~~~~~~~~~~~~
Menu separators are created using instances of the `SeparatorInfo` class.
They must be registered using the same methods as the `ActionInfo` instances.
Registering multiple actions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If there is a need to create a larger number of actions, it might be convenient
to use a simple DSL understood by the `ActionInfoCollection` factory method.
This factory creates a list containing `ActionInfo` and `SeparatorInfo` objects
which can be registered in a one go.
Handling actions
----------------
When any of the registered user actions is executed, RIDE decides which
registered event handlers should be called. It is possible to register a handler
globally or so that it is called only when the plugin is considered active
(i.e. it has focus).
The registering mechanism allows multiple handlers to be registered to the same
menu entry, toolbar button, or shortcut. It is thus possible that, for example,
one keyboard shortcut calls multiple handlers.
It is also possible to enable/disable action's menu entry, toolbar button,
and shortcut by enabling/disabling those with `action.Action.enable` and
`action.Action.disable` methods. Action is returned by
`pluginapi.Plugin.register_action` method.
"""
from actioninfo import ActionInfoCollection, SeparatorInfo, ActionInfo
from action import ActionFactory
|
'''
BLINC Adaptive Prosthetics Toolkit
- Bionic Limbs for Improved Natural Control, blinclab.ca
anna.koop@gmail.com
A toolkit for running machine learning experiments on prosthetic limb data
This module file environments
# TODO: expand to handle ros environments
'''
import os
import pandas as pd
import numpy.random as random
from sklearn import preprocessing
from contextlib import contextmanager
from features import calculate_return, get_unitizer
from collections import defaultdict
from functools import partial
from datasets import *
from logfiles import *
from local import base_dir, test_dir
class EnvDir(namedtuple('EnvDir', 'dirname steps')):
def __str__(self):
if self.steps:
return "{}-{}".format(self.dirname, self.steps)
else:
return self.dirname
class Protocol():
"""
Really lightweight object for handling conversions from lists to base_dir
to string
>>> df = DataFrame({'steps': [100], 'dirname': ['hand-wrist']})
>>> print(Protocol(df))
hand-wrist-100
>>> print(Protocol.from_string('hand-wrist-100_hand-wrist-10'))
hand-wrist-100_hand-wrist-10
>>> print(Protocol.from_list(['hand-wrist', 100]))
hand-wrist-100
"""
@staticmethod
def get_protocol_parts(protocol):
"""
Break down a protocol string into its component parts
>>> Protocol.get_protocol_parts('fred')
[('fred', 0)]
>>> Protocol.get_protocol_parts('fred-')
[('fred', 0)]
>>> Protocol.get_protocol_parts('fred-12')
[('fred', 12)]
>>> Protocol.get_protocol_parts('fred-_jones-')
[('fred', 0), ('jones', 0)]
>>> Protocol.get_protocol_parts('fred_jones')
[('fred', 0), ('jones', 0)]
>>> Protocol.get_protocol_parts('fred-15_jones-12')
[('fred', 15), ('jones', 12)]
>>> Protocol.get_protocol_parts('fred-pos-12')
[('fred-pos', 12)]
"""
parts = protocol.split('_')
ps = []
for p in parts:
subparts = p.split('-')
# check if the last part is empty, as for test-
if not subparts[-1]:
name = '-'.join(subparts[:-1])
num = 0
else:
#TODO: allow floats someday for % repeats
# if the last bit is an integer, great
try:
num = int(subparts[-1])
name = '-'.join(subparts[:-1])
except ValueError:
name = '-'.join(subparts)
num = 0
assert isinstance(name, str)
assert isinstance(num, int)
ps.append((name, num))
return ps
@classmethod
def from_list(cls, protocols):
return cls(DataFrame(list(zip(protocols[0::2],
protocols[1::2])),
columns=['dirname', 'steps'], dtype=int))
@classmethod
def from_string(cls, protocol):
"""
Parse a string representation of protocol
>>> p = Protocol.from_string('hand-wrist')
>>> p.get_dirs()
['hand-wrist']
>>> p.data.dirname[0]
'hand-wrist'
"""
return cls(DataFrame(Protocol.get_protocol_parts(protocol),
columns=['dirname', 'steps'], dtype=int))
def __iter__(self):
"""
>>> p = Protocol.from_string('hand-wrist-100_hand-wrist-10')
>>> for pr in p: print(pr, pr.steps)
hand-wrist-100 100
hand-wrist-10 10
>>> pr
EnvDir(dirname='hand-wrist', steps=10)
"""
for v in self.data.values:
yield EnvDir(*v)
def __init__(self, df):
if isinstance(df, DataFrame):
self.data = df
else:
msg = "Must call custom from_ method for type {}".format(type(df))
raise(ValueError(msg))
def get_dirs(self):
return list(self.data.dirname)
def __str__(self):
return '_'.join(self.data.fillna(0).apply(format_protocol, axis=1))
class Chainer():
"""
Another lightweight object for handling repeated presentation of files.
Same format as protocol, but in this case only the last element should have a number,
and that is the number of times to repeat the specified sequence of files
"""
def __init__(self, *args, reps=1):
self.reps = reps
self.data = DataFrame(data=args, columns=['dirname'])
self.data['steps'] = 0
self.data
class CoinFlip():
"""
Construct a generator/callable that returns a random binary signal
For now, accepts a probability (of 1) or list of probabilities and
changepoints
"""
@staticmethod
def parse_prob(data, prob):
"""
Calculate the 0/1 designation according to the given probability
>>> CoinFlip.parse_prob(.6, .7)
0
>>> CoinFlip.parse_prob(.9, .7)
1
>>> CoinFlip.parse_prob([.2, .65, .9, .5], .5).values
array([0, 1, 1, 1])
"""
try:
return 1 if data >= prob else 0
except TypeError:
data = Series(data)
data[data>=prob] = 1
data[data<prob] = 0
return data.astype(int)
def _iter_changes(self):
"""
Loop through the contexts
"""
if self.changepoints is None:
yield (0, -1)
else:
yield from enumerate(self.changepoints)
yield (len(self.changepoints), -1)
def __iter__(self):
with self:
yield self()
def __init__(self, prob=0.5, seed=None, changepoints=None):
"""
Will get seed from random if not provided, so it can be saved.
If changepoints are provided, it should be a list at least as long
as the list of probabilities.
>>> cf = CoinFlip(.5, seed=0)
>>> (cf.timestep, cf.num_changes)
(-1, -1)
>>> (cf.total_changes, cf.num_contexts)
(0, 1)
>>> cf.seed
0
>>> cf.probabilities
[0.5]
>>> cf = CoinFlip()
>>> cf.seed != 0
True
>>> cf.probabilities
[0.5]
>>> cf = CoinFlip([0.2, 0.8])
>>> (cf.changepoints, cf.num_contexts)
(None, 2)
>>> cf = CoinFlip([0.2, 0.8], changepoints=[16, 32, 64])
>>> (cf.changepoints, cf.num_contexts)
([16, 32, 64], 2)
"""
self.prob = None
if seed is None:
# TODO: may want to generate someting
self.seed = random.get_state()[1][0]
else:
self.seed = seed
self.probabilities = listify(prob)
self.num_contexts = len(self.probabilities)
if changepoints is not None:
self.total_changes = len(changepoints)
self.changepoints = changepoints
else:
self.total_changes = 0
self.changepoints = None
self.reset()
def __enter__(self):
"""
Set up for going through the list of probabilities
>>> cf = CoinFlip(0.25, seed=0)
>>> (cf.timestep, cf.num_changes)
(-1, -1)
>>> s = cf.__enter__()
>>> (cf.timestep, cf.num_changes)
(-1, 0)
>>> cf.prob
0.25
>>> cf = CoinFlip([0.24, .75], changepoints=[2])
>>> (cf.prob, cf.next_change)
(None, 0)
>>> s = cf.__enter__()
>>> (cf.prob, cf.next_change)
(0.24, 2)
"""
self.reset()
random.seed(self.seed)
self.update_probability()
return self
def reset(self):
self.timestep = -1
self.num_changes = -1
self.next_change = 0
self.context = None
self.prob = None
def __exit__(self, *args):
self.context = None
self.prob = None
def __call__(self):
"""
Increment the timestep, check if the probability needs to be updated,
then return the coin flip value.
This way the stored self.prob will always be the distribution that
generated the data output
>>> cf = CoinFlip([0.24, .75], changepoints=[2], seed=0)
>>> cf()
1
>>> cf = CoinFlip(.9, seed=0)
>>> cf()
0
"""
if self.timestep < 0:
self.__enter__()
self.timestep += 1
self.update_probability()
return CoinFlip.parse_prob(random.random(), self.prob)
def update_probability(self):
"""
TODO: this is totally dodgy and won't work if the changepoints are
out of order
>>> cf = CoinFlip([0.24, .75], changepoints=[2])
>>> (cf.prob, cf.num_changes)
(None, -1)
>>> s = cf(); s = cf();
>>> (cf.prob, cf.num_changes)
(0.24, 0)
>>> s = cf()
>>> (cf.prob, cf.num_changes)
(0.75, 1)
"""
if self.timestep < 0:
self.context = self._iter_changes()
self.next_change = -1
if self.timestep == self.next_change:
(self.num_changes, self.next_change) = self.context.__next__()
ind = self.num_changes % self.num_contexts
self.prob = self.probabilities[ind]
# TODO: the probability seems backwards
def get_data(self, terminal):
"""
Get a bunch of data at once
>>> cf = CoinFlip(.5)
>>> data = cf.get_data(1000)
>>> sum(data) < 525 and sum(data) > 475
True
>>> cf = CoinFlip([0.1, 0.9], changepoints=[1000])
>>> data = cf.get_data(2000)
>>> sum(data[:1000]) > sum(data[1000:])
True
"""
with self as flipper:
return Series([flipper() for _ in range(terminal)])
class FileEnvironment():
"""
Constructs a generator/callable that steps through environment file(s) and processes the features
Filter parameters must be specified and given a default value of "None" so that we can make sure they are actually set.
required parameters are popped off of kwargs before
kwargs are used to pass lists of filters for every particular file header
"""
@classmethod
def read_log(cls, *args, **kwargs):
return cls(*args, **kwargs).get_all_data()
def __init__(self, base_dir, platform, protocol, pid,
file_name='raw-data.txt', clean_file_data=False, **kwargs):
self.base_dir = os.path.join(base_dir, platform)
self.suffix = os.path.join(pid, file_name)
if clean_file_data:
raise NotImplementedError("Don't think I have cleaning file data working")
self.clean_file_data = clean_file_data
if isinstance(protocol, Protocol):
self.protocol = protocol
elif isinstance(protocol, str):
self.protocol = Protocol.from_string(protocol)
else:
self.protocol = Protocol.from_list(protocol)
self.file_name = file_name
self.file_filters = self.get_file_filters(**kwargs)
self.set_headers()
self.reset()
@staticmethod
def get_file_filters(**kwargs):
"""
Returns a dictionary mapping feature headers to functions
"""
# pop off particular parameters the filters need
trace_rate = kwargs.pop('trace_rate', None)
window = kwargs.pop('window', None)
ranges = kwargs.pop('ranges', None)
num_classes = kwargs.pop('num_classes', None)
ff = defaultdict(list)
for header, filtrs in kwargs.items():
# for each function, append it set it as necessary
ff[header] = []
filtrs = listify(filtrs)
for ftr in filtrs:
if not ftr:
continue
elif ftr == 'rollmean':
func = partial(pd.rolling_mean, window=window, min_periods=1)
elif ftr == 'trace':
raise NotImplementedError("Don't have traced features yet")
elif ftr == 'return':
func = partial(calculate_return, gamma=gamma, horizon=horizon)
elif ftr == 'scaled':
func = lambda x: (x-ranges[header][0])/ranges[header][1]
elif ftr == 'unitvect':
func = get_unitizer(num_classes)
else:
raise ValueError("Unknown filter type {}".format(ftr))
ff[header].append(func)
return ff
def __iter_chunks__(self):
"""
Loop through each complete datablock
The datablocks are determined by the protocols and may be
cropped or looped depending on the size the protocol specifies
"""
self.start()
while self.dir_iter is not None:
self.update_data_block()
yield self.get_data_block()
self.reset()
def get_all_data(self):
"""
Return a full-size dataframe for all the required data over all protocols
"""
return pd.concat([x for x in self.__iter_chunks__()], ignore_index=True)
def get_env_data(self, **kwargs):
"""
For when you want the same base environment, but not necessarily the
block of headers you specified. Goes back to the environment file to
pull data
kwargs holds the filter information
{header_name: filter_types_from_left_to_right,
parameter1: val,
parameter2: val}
"""
return FileEnvironment(self.base_dir, self.platform, self.protocol, self.pid,
**kwargs).get_all_data()
def get_raw_data(self, header=None, **kwargs):
"""
Get the environment data for the given header/filter info
Return as a block
"""
return read_log(self, self.base_dir, self.platform, self.protocol,
self.pid, file_name=self.file_name, **kwargs)
def get_return_data(self, header=None, gamma=None, horizon=None):
"""
Get the full data vector for
Calculate the return data for these environment settings and the given parameters
#TODO: don't know if this should be here or not.
"""
old_headers = self.headers
if header is not None:
self.headers = listify(header)
data = self.get_all_data()
if gamma is not None or horizon is not None:
data['Return'] = calculate_return(data.values, gamma=gamma, horizon=horizon)
self.headers = old_headers
return data
def get_data_block(self):
"""
Get all of the current data block
Unlike the incremental version, this will duplicate rows to fill out the requested
protocol length
# TODO: generalize this to take protocol as a potential argument
# TODO: or make a helper function that expands/shrinks a dataframe
"""
if self.data is None:
return None
data_size = len(self.data.index)
if self.current_max <= 0 or self.current_max == data_size:
self.local_step = data_size
return self.data
repeats = self.current_max // data_size
remainder = self.current_max % data_size
if repeats:
data = pd.concat([self.data]*repeats, ignore_index=True)
else:
data = DataFrame()
data = data.append(self.data[:remainder], ignore_index=True)
self.local_step = self.current_max
return data
#@contextmanager
#def __iter__(self):
#self.start()
#while self.dir_iter is not None:
#self.update_data_block()
#yield self.get_data_block()
#self.reset()
def set_headers(self, headers=None):
"""
Look up the file headers in the first protocol path,
identify which ones match up with the requested headers (if none provided,
use the keys of self.file_filters)
Matching headers are saved in self.headers and the translation
between requested header/file_filter and actual header is stored in self.header_mapping
"""
# set the file headers according to an actual data file
prot = self.protocol.get_dirs()[0]
filepath = os.path.join(self.base_dir, prot, self.suffix)
self.file_headers = LogReader.check_headers(filepath)
if headers is None:
headers = list(self.file_filters.keys())
# figure out which ones we care about
# and which ones need custom mappings
self.header_mapping = {}
self.headers = []
for h in self.file_headers:
if h in headers:
self.headers.append(h)
# we won't store the mapping if we don't need to
else:
parts = h.split('-')
if parts[0] in headers:
self.header_mapping[h] = parts[0]
self.headers.append(h)
# could do other conversion checking here
def reset(self):
"""
Resets all the tracking values
"""
self.dir_iter = None
self.current_dir = None
self.step = -1
self.local_step = -1
self.data = None
def start(self):
"""
Reset everything, then start up the protocol iterator
and set the global step counter
"""
self.reset()
self.dir_iter = self.protocol.__iter__()
self.step = 0
def update(self):
"""
Ensure that the correct data block is loaded,
grab the current data,
then increment the steps and return the data
"""
self.update_data_block()
data = self.get_data()
self.step += 1 # global count
self.local_step += 1 # count for the current protocol
return data
def get_data(self, index=None):
"""
Return the data for the index passed (loops allowed, defaults to the current local time index)
"""
if self.data is None:
return None
if index is None:
index = self.local_step % len(self.data.index)
return self.data.ix[index]
def update_data_block(self):
"""
Check to see if we need to load a new block of data
And then set self.raw_data and self.data appropriately
"""
# if we haven't started yet
if self.dir_iter is None:
self.start()
# if we have some data loaded and we're below the max (or don't have a max) just keep going
if self.data is not None and ((self.current_max == 0 and \
self.local_step < len(self.data.index)) or \
(self.current_max > 0 and \
self.local_step < self.current_max)):
return
# otherwise we need to grab a datablock
# grab the next protocol from the iterator, if possible
try:
prot = next(self.dir_iter)
self.current_dir = prot.dirname
self.current_max = prot.steps
self.local_step = 0
except StopIteration:
# means we're done with our current data block and don't want another
self.reset()
return
self.raw_data = self._get_data_block()
self.data = self._filter_data_block()
def _get_data_block(self, headers=None, directory=None, num_lines=None):
"""
Load num_lines of raw_data from the specified directory according to the requested headers.
Default to all headers, the current protocol directory and max
"""
if headers is None:
headers = self.headers
if not directory:
if not self.current_dir:
self.update_data_block()
directory = self.current_dir
if self.current_max and self.current_max > 0:
num_lines = self.current_max
else:
num_lines = None
# load the data block for the current protocol and given headers
filepath = os.path.join(str(directory), self.suffix)
raw_data = LogReader.read_log(filepath, base_dir=self.base_dir,
headers=headers,
nrows=num_lines,
clean_file_data=self.clean_file_data)
return raw_data
def _filter_data_block(self, data=None, data_filters=None):
"""
Fully parse the data passed (defaults to self.raw_data) according to
whatever filters (defaults to self.filters)
"""
if data is None:
#TODO: this may be inefficient
data = self.raw_data.copy()
if data_filters is None:
data_filters = self.file_filters
for h in data.columns:
header = self.header_mapping.get(h, h)
for func in data_filters[header]:
data[h] = func(data[h])
return data
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=False)
print("Done doctests")
|
# Definition for a binary tree node
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# @param root : root node of tree
# @param k : integer
# @return an integer
def __init__(self):
self.count = 0
def recurse(self, root, k):
if self.count == k:
return (True, root.val)
if root.left is None:
self.count += 1
if self.count == k:
return (True, root.val)
if root.right is None:
return (False, -1)
else:
res = self.recurse(root.right, k)
if res[0] == True:
return res
else:
res = self.recurse(root.left, k)
if res[0] == True:
return res
self.count += 1
if self.count == k:
return (True, root.val)
if root.right is not None:
res = self.recurse(root.right, k)
if res[0] == True:
return res
def kthsmallest(self, root, k):
res = self.recurse(root, k)
if res[0] == True:
return res[1]
root = TreeNode(2)
root.left = TreeNode(1)
root.right = TreeNode(3)
root.right.right = TreeNode(4)
print Solution().kthsmallest(root, 4)
|
import unittest
from linty_fresh.linters import passthrough
from linty_fresh.problem import Problem
class PassthroughTest(unittest.TestCase):
def test_empty_parse(self):
self.assertEqual(set(), passthrough.parse(''))
def test_parse_errors(self):
test_string = [
' Something happened!',
"More stuff 'happened'\n\n",
]
result = passthrough.parse('\n'.join(test_string))
self.assertEqual(1, len(result))
self.assertIn(Problem('', 0,
'Something happened!\n'
"More stuff 'happened'"), result)
|
import datetime
import json as jsonlib
import logging
import re
import urlparse
from django.conf import settings
from django.contrib.staticfiles.templatetags.staticfiles import static as django_static
from django.core.urlresolvers import reverse as django_reverse
from django.http import QueryDict
from django.template.loader import render_to_string
from django.utils.encoding import smart_str, smart_text
from django.utils.http import urlencode
from django.utils.translation import ugettext_lazy as _lazy, ugettext as _, ungettext
from django.utils.tzinfo import LocalTimezone
import bleach
import jinja2
from babel import localedata
from babel.dates import format_date, format_time, format_datetime
from babel.numbers import format_decimal
from django_jinja import library
from jinja2.utils import Markup
from pytz import timezone
from kitsune.sumo import parser
from kitsune.sumo.urlresolvers import reverse
from kitsune.users.models import Profile
from kitsune.wiki.showfor import showfor_data as _showfor_data
log = logging.getLogger('k.helpers')
class DateTimeFormatError(Exception):
"""Called by the datetimeformat function when receiving invalid format."""
pass
@library.filter
def paginator(pager):
"""Render list of pages."""
return Paginator(pager).render()
@library.filter
def simple_paginator(pager):
return jinja2.Markup(render_to_string('includes/simple_paginator.html', {'pager': pager}))
@library.filter
def quick_paginator(pager):
return jinja2.Markup(render_to_string('includes/quick_paginator.html', {'pager': pager}))
@library.filter
def mobile_paginator(pager):
return jinja2.Markup(render_to_string('includes/mobile/paginator.html', {'pager': pager}))
@library.global_function
def url(viewname, *args, **kwargs):
"""Helper for Django's ``reverse`` in templates.
Uses sumo's locale-aware reverse."""
locale = kwargs.pop('locale', None)
return reverse(viewname, locale=locale, args=args, kwargs=kwargs)
@library.global_function
def unlocalized_url(viewname, *args, **kwargs):
"""Helper for Django's ``reverse`` in templates.
Uses django's default reverse."""
return django_reverse(viewname, args=args, kwargs=kwargs)
@library.filter
def urlparams(url_, hash=None, query_dict=None, **query):
"""
Add a fragment and/or query parameters to a URL.
New query params will be appended to exising parameters, except duplicate
names, which will be replaced.
"""
url_ = urlparse.urlparse(url_)
fragment = hash if hash is not None else url_.fragment
q = url_.query
new_query_dict = (QueryDict(smart_str(q), mutable=True) if
q else QueryDict('', mutable=True))
if query_dict:
for k, l in query_dict.lists():
new_query_dict[k] = None # Replace, don't append.
for v in l:
new_query_dict.appendlist(k, v)
for k, v in query.items():
new_query_dict[k] = v # Replace, don't append.
query_string = urlencode([(k, v) for k, l in new_query_dict.lists() for
v in l if v is not None])
new = urlparse.ParseResult(url_.scheme, url_.netloc, url_.path,
url_.params, query_string, fragment)
return new.geturl()
@library.filter
def wiki_to_html(wiki_markup, locale=settings.WIKI_DEFAULT_LANGUAGE,
nofollow=True):
"""Wiki Markup -> HTML jinja2.Markup object"""
return jinja2.Markup(parser.wiki_to_html(wiki_markup, locale=locale,
nofollow=nofollow))
@library.filter
def truncate_question(text, length, longtext=None):
if len(text) > length:
if longtext is None:
longtext = text
stripped_text = bleach.clean(text, tags=[], strip=True)
f = ('<p class="short-text">%s… ' +
'<span class="show-more-link">(' + _('read more') + ')</span>' +
'</p><div class="long-text">%s</div>')
return f % (stripped_text[:length], longtext)
return text
class Paginator(object):
def __init__(self, pager):
self.pager = pager
self.max = 10
self.span = (self.max - 1) / 2
self.page = pager.number
self.num_pages = pager.paginator.num_pages
self.count = pager.paginator.count
pager.page_range = self.range()
pager.dotted_upper = self.num_pages not in pager.page_range
pager.dotted_lower = 1 not in pager.page_range
def range(self):
"""Return a list of page numbers to show in the paginator."""
page, total, span = self.page, self.num_pages, self.span
if total < self.max:
lower, upper = 0, total
elif page < span + 1:
lower, upper = 0, span * 2
elif page > total - span:
lower, upper = total - span * 2, total
else:
lower, upper = page - span, page + span - 1
return range(max(lower + 1, 1), min(total, upper) + 1)
def render(self):
c = {'pager': self.pager, 'num_pages': self.num_pages,
'count': self.count}
return jinja2.Markup(render_to_string('layout/paginator.html', c))
@jinja2.contextfunction
@library.global_function
def breadcrumbs(context, items=list(), add_default=True, id=None):
"""
Show a list of breadcrumbs. If url is None, it won't be a link.
Accepts: [(url, label)]
"""
if add_default:
first_crumb = u'Home'
crumbs = [(reverse('home'), _lazy(first_crumb))]
else:
crumbs = []
# add user-defined breadcrumbs
if items:
try:
crumbs += items
except TypeError:
crumbs.append(items)
c = {'breadcrumbs': crumbs, 'id': id}
return jinja2.Markup(render_to_string('layout/breadcrumbs.html', c))
def _babel_locale(locale):
"""Return the Babel locale code, given a normal one."""
# Babel uses underscore as separator.
return locale.replace('-', '_')
def _contextual_locale(context):
"""Return locale from the context, falling back to a default if invalid."""
request = context.get('request')
locale = request.LANGUAGE_CODE
if not localedata.exists(locale):
locale = settings.LANGUAGE_CODE
return locale
@jinja2.contextfunction
@library.global_function
def datetimeformat(context, value, format='shortdatetime'):
"""
Returns a formatted date/time using Babel's locale settings. Uses the
timezone from settings.py, if the user has not been authenticated.
"""
if not isinstance(value, datetime.datetime):
# Expecting date value
raise ValueError(
'Unexpected value {value} passed to datetimeformat'.format(
value=value))
request = context.get('request')
default_tzinfo = convert_tzinfo = timezone(settings.TIME_ZONE)
if value.tzinfo is None:
value = default_tzinfo.localize(value)
new_value = value.astimezone(default_tzinfo)
else:
new_value = value
if hasattr(request, 'session'):
if 'timezone' not in request.session:
if hasattr(request, 'user') and request.user.is_authenticated():
try:
convert_tzinfo = (Profile.objects.get(user=request.user).timezone or
default_tzinfo)
except (Profile.DoesNotExist, AttributeError):
pass
request.session['timezone'] = convert_tzinfo
else:
convert_tzinfo = request.session['timezone']
convert_value = new_value.astimezone(convert_tzinfo)
locale = _babel_locale(_contextual_locale(context))
# If within a day, 24 * 60 * 60 = 86400s
if format == 'shortdatetime':
# Check if the date is today
today = datetime.datetime.now(tz=convert_tzinfo).toordinal()
if convert_value.toordinal() == today:
formatted = _lazy(u'Today at %s') % format_time(
convert_value, format='short', tzinfo=convert_tzinfo,
locale=locale)
else:
formatted = format_datetime(convert_value,
format='short',
tzinfo=convert_tzinfo,
locale=locale)
elif format == 'longdatetime':
formatted = format_datetime(convert_value, format='long',
tzinfo=convert_tzinfo, locale=locale)
elif format == 'date':
formatted = format_date(convert_value, locale=locale)
elif format == 'time':
formatted = format_time(convert_value, tzinfo=convert_tzinfo,
locale=locale)
elif format == 'datetime':
formatted = format_datetime(convert_value, tzinfo=convert_tzinfo,
locale=locale)
elif format == 'year':
formatted = format_datetime(convert_value, format='yyyy',
tzinfo=convert_tzinfo, locale=locale)
else:
# Unknown format
raise DateTimeFormatError
return jinja2.Markup('<time datetime="%s">%s</time>' %
(convert_value.isoformat(), formatted))
_whitespace_then_break = re.compile(r'[\r\n\t ]+[\r\n]+')
@library.filter
def collapse_linebreaks(text):
"""Replace consecutive CRs and/or LFs with single CRLFs.
CRs or LFs with nothing but whitespace between them are still considered
consecutive.
As a nice side effect, also strips trailing whitespace from lines that are
followed by line breaks.
"""
# I previously tried an heuristic where we'd cut the number of linebreaks
# in half until there remained at least one lone linebreak in the text.
# However, about:support in some versions of Firefox does yield some hard-
# wrapped paragraphs using single linebreaks.
return _whitespace_then_break.sub('\r\n', text)
@library.filter
def json(s):
return jsonlib.dumps(s)
@jinja2.contextfunction
@library.global_function
def number(context, n):
"""Return the localized representation of an integer or decimal.
For None, print nothing.
"""
if n is None:
return ''
return format_decimal(n, locale=_babel_locale(_contextual_locale(context)))
@library.filter
def timesince(d, now=None):
"""Take two datetime objects and return the time between d and now as a
nicely formatted string, e.g. "10 minutes". If d is None or occurs after
now, return ''.
Units used are years, months, weeks, days, hours, and minutes. Seconds and
microseconds are ignored. Just one unit is displayed. For example,
"2 weeks" and "1 year" are possible outputs, but "2 weeks, 3 days" and "1
year, 5 months" are not.
Adapted from django.utils.timesince to have better i18n (not assuming
commas as list separators and including "ago" so order of words isn't
assumed), show only one time unit, and include seconds.
"""
if d is None:
return u''
chunks = [
(60 * 60 * 24 * 365, lambda n: ungettext('%(number)d year ago',
'%(number)d years ago', n)),
(60 * 60 * 24 * 30, lambda n: ungettext('%(number)d month ago',
'%(number)d months ago', n)),
(60 * 60 * 24 * 7, lambda n: ungettext('%(number)d week ago',
'%(number)d weeks ago', n)),
(60 * 60 * 24, lambda n: ungettext('%(number)d day ago',
'%(number)d days ago', n)),
(60 * 60, lambda n: ungettext('%(number)d hour ago',
'%(number)d hours ago', n)),
(60, lambda n: ungettext('%(number)d minute ago',
'%(number)d minutes ago', n)),
(1, lambda n: ungettext('%(number)d second ago',
'%(number)d seconds ago', n))]
if not now:
if d.tzinfo:
now = datetime.datetime.now(LocalTimezone(d))
else:
now = datetime.datetime.now()
# Ignore microsecond part of 'd' since we removed it from 'now'
delta = now - (d - datetime.timedelta(0, 0, d.microsecond))
since = delta.days * 24 * 60 * 60 + delta.seconds
if since <= 0:
# d is in the future compared to now, stop processing.
return u''
for i, (seconds, name) in enumerate(chunks):
count = since // seconds
if count != 0:
break
return name(count) % {'number': count}
@library.filter
def label_with_help(f):
"""Print the label tag for a form field, including the help_text
value as a title attribute."""
label = u'<label for="%s" title="%s">%s</label>'
return jinja2.Markup(label % (f.auto_id, f.help_text, f.label))
@library.filter
def yesno(boolean_value):
return jinja2.Markup(_lazy(u'Yes') if boolean_value else _lazy(u'No'))
@library.filter
def remove(list_, item):
"""Removes an item from a list."""
return [i for i in list_ if i != item]
@jinja2.contextfunction
@library.global_function
def ga_push_attribute(context):
"""Return the json for the data-ga-push attribute.
This is used to defined custom variables and other special tracking with
Google Analytics.
"""
request = context.get('request')
ga_push = context.get('ga_push', [])
# If the user is on the first page after logging in,
# we add a "User Type" custom variable.
if request.GET.get('fpa') == '1' and request.user.is_authenticated():
user = request.user
group_names = user.groups.values_list('name', flat=True)
# If they belong to the Administrator group:
if 'Administrators' in group_names:
ga_push.append(
['_setCustomVar', 1, 'User Type', 'Contributor - Admin', 1])
# If they belong to the Contributors group:
elif 'Contributors' in group_names:
ga_push.append(['_setCustomVar', 1, 'User Type', 'Contributor', 1])
# If they don't belong to any of these groups:
else:
ga_push.append(['_setCustomVar', 1, 'User Type', 'Registered', 1])
return jsonlib.dumps(ga_push)
@jinja2.contextfunction
@library.global_function
def is_secure(context):
request = context.get('request')
if request and hasattr(request, 'is_secure'):
return context.get('request').is_secure()
return False
@library.filter
def linkify(text):
return bleach.linkify(text)
@library.global_function
def showfor_data(products):
# Markup() marks this data as safe.
return Markup(jsonlib.dumps(_showfor_data(products)))
@library.global_function
def add_utm(url_, campaign, source='notification', medium='email'):
"""Add the utm_* tracking parameters to a URL."""
return urlparams(
url_, utm_campaign=campaign, utm_source=source, utm_medium=medium)
@library.global_function
def to_unicode(str):
return unicode(str)
@library.global_function
def static(path):
"""Generate a URL for the specified static file."""
try:
return django_static(path)
except ValueError as err:
log.error('Static helper error: %s' % err)
return ''
@library.global_function
def now():
return datetime.datetime.now()
@library.filter
def class_selected(a, b):
"""
Return 'class="selected"' if a == b, otherwise return ''.
"""
if a == b:
return 'class="selected"'
else:
return ''
@library.filter
def f(format_string, *args, **kwargs):
"""
Uses ``str.format`` for string interpolation.
>>> {{ "{0} arguments and {x} arguments"|f('positional', x='keyword') }}
"positional arguments and keyword arguments"
"""
# Jinja will sometimes give us a str and other times give a unicode
# for the `format_string` parameter, and we can't control it, so coerce it here.
if isinstance(format_string, str): # not unicode
format_string = unicode(format_string)
return format_string.format(*args, **kwargs)
@library.filter
def fe(format_string, *args, **kwargs):
"""Format a safe string with potentially unsafe arguments. returns a safe string."""
args = [jinja2.escape(smart_text(v)) for v in args]
for k in kwargs:
kwargs[k] = jinja2.escape(smart_text(kwargs[k]))
# Jinja will sometimes give us a str and other times give a unicode
# for the `format_string` parameter, and we can't control it, so coerce it here.
if isinstance(format_string, str): # not unicode
format_string = unicode(format_string)
return jinja2.Markup(format_string.format(*args, **kwargs))
|
"""
@package mi.instrument.uw.hpies.ooicore.driver
@file marine-integrations/mi/instrument/uw/hpies/ooicore/driver.py
@author Dan Mergens
@brief Driver for the ooicore
Release notes:
initial_rev
"""
__author__ = 'Dan Mergens'
__license__ = 'Apache 2.0'
import time
import re
import tempfile
from mi.core.exceptions import \
SampleException, \
InstrumentProtocolException, \
InstrumentParameterException, \
InstrumentTimeoutException
from mi.core.instrument.driver_dict import DriverDictKey
from mi.core.instrument.protocol_param_dict import ParameterDictVisibility, ParameterDictType
from mi.core.util import dict_equal
from mi.core.log import \
get_logger, \
get_logging_metaclass
from mi.core.common import BaseEnum, Units
from mi.core.instrument.instrument_protocol import CommandResponseInstrumentProtocol
from mi.core.instrument.instrument_fsm import ThreadSafeFSM
from mi.core.instrument.instrument_driver import \
SingleConnectionInstrumentDriver, DriverEvent, DriverAsyncEvent, DriverProtocolState, DriverParameter, \
ResourceAgentState
from mi.core.instrument.data_particle import CommonDataParticleType, DataParticleKey, DataParticle, DataParticleValue
from mi.core.instrument.chunker import StringChunker
from mi.instrument.uw.hpies.crclib import crc3kerm
# newline.
NEWLINE = '\r\n'
log = get_logger()
# default timeout.
TIMEOUT = 10
common_matches = {
'float': r'-?\d*\.?\d+',
'int': r'-?\d+',
'str': r'\w+',
'fn': r'\S+',
'rest': r'.*',
'tod': r'\d{8}T\d{6}',
'data': r'#\d[^\*]+',
'crc': r'[0-9a-fA-F]{4}'
}
def build_command(address, command, *args):
"""
Create an instrument command string.
:param address: 1 - STM, 3 - HEF, 4 - IES
:param command: command string
:param args: arguments for command
:return: fully qualified command string
"""
s = '#' + address + '_' + command
formatted_list = [] # convert all booleans to integers
for x in args:
if type(x) is bool:
formatted_list.append(int(x))
else:
formatted_list.append(x)
if formatted_list:
s += ' ' + ' '.join([str(x) for x in formatted_list])
s = s + str.format('*{0:04x}', crc3kerm(s)) + NEWLINE
return s
def calc_crc(line):
"""
Check response for valid checksum.
@param line data line which may contain extra characters at beginning or end
@retval
- computed value of the CRC for the data
- regex match for the crc value provided with the data
"""
pattern = re.compile(
r'(?P<resp>%(data)s)\*(?P<crc>%(crc)s)' % common_matches)
matches = re.search(pattern, line)
if not matches: # skip any lines that do not have a checksum match
return 0, 0
resp_crc = int(matches.group('crc'), 16)
data = matches.group('resp')
crc = crc3kerm(data)
return crc, resp_crc
def valid_response(resp):
"""
Check response for valid checksum.
@param resp response line
@return - whether or not checksum matches data
"""
crc, resp_crc = calc_crc(resp)
return crc == resp_crc
def stm_command(s, *args):
"""
Create fully qualified STM command (add prefix and postfix the CRC).
"""
return build_command('1', s, *args)
def hef_command(s, *args):
"""
Create fully qualified HEF command (add prefix and postfix the CRC).
"""
return build_command('3', s, *args)
def ies_command(s, *args):
"""
Create fully qualified IES command (add prefix and postfix the CRC).
"""
return build_command('4', s, *args)
# ##
# Driver Constant Definitions
###
class HPIESUnits(Units):
CYCLE = 'cycle'
HALF_CYCLE = 'half cycle'
class ProtocolState(BaseEnum):
"""
Instrument protocol states
"""
UNKNOWN = DriverProtocolState.UNKNOWN
COMMAND = DriverProtocolState.COMMAND
AUTOSAMPLE = DriverProtocolState.AUTOSAMPLE
DIRECT_ACCESS = DriverProtocolState.DIRECT_ACCESS
class ProtocolEvent(BaseEnum):
"""
Protocol events
"""
ENTER = DriverEvent.ENTER
EXIT = DriverEvent.EXIT
GET = DriverEvent.GET
SET = DriverEvent.SET
DISCOVER = DriverEvent.DISCOVER
START_DIRECT = DriverEvent.START_DIRECT
STOP_DIRECT = DriverEvent.STOP_DIRECT
START_AUTOSAMPLE = DriverEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = DriverEvent.STOP_AUTOSAMPLE
EXECUTE_DIRECT = DriverEvent.EXECUTE_DIRECT
class Capability(BaseEnum):
"""
Protocol events that should be exposed to users (subset of above).
"""
GET = ProtocolEvent.GET
SET = ProtocolEvent.SET
START_AUTOSAMPLE = ProtocolEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = ProtocolEvent.STOP_AUTOSAMPLE
START_DIRECT = DriverEvent.START_DIRECT
STOP_DIRECT = DriverEvent.STOP_DIRECT
DISCOVER = DriverEvent.DISCOVER
class Parameter(DriverParameter):
"""
Instrument specific parameters
"""
# HEF parameters
SERIAL = 'serno'
DEBUG_LEVEL = 'debug'
WSRUN_PINCH = 'wsrun pinch secs' # half cycle interval between water switch tube pinch
# EF_SKIP = 'ef skip secs' # time in seconds to wait before using EF data after moving motors
NFC_CALIBRATE = 'nfc calibrate' # number of cycles of water switch between applying 'cal'
CAL_HOLD = 'cal hold secs' # hold time of calibration voltage
CAL_SKIP = 'cal skip' # time in seconds to wait before using data after changing the calibration signal state
NHC_COMPASS = 'nhc compass' # number of half cycles between compass measurements
COMPASS_SAMPLES = 'compass nget' # number of compass samples to acquire in a burst
COMPASS_DELAY = 'compass dsecs' # time between measurements in a burst
INITIAL_COMPASS = 'icompass run' # initial compass measurement (in seconds)
INITIAL_COMPASS_DELAY = 'icompass dsecs' #
# FILE_LENGTH = 'secs per ofile' # seconds per file (default 86400 - one day)
MOTOR_SAMPLES = 'navg mot' # number of samples to average
EF_SAMPLES = 'navg ef' # number of samples to average
CAL_SAMPLES = 'navg cal' # number of samples to average
CONSOLE_TIMEOUT = 'console off timeout' # sleep timeout for UART drivers (use hef_wake to resume)
WSRUN_DELAY = 'wsrun delay secs' #
MOTOR_DIR_NHOLD = 'motor dir nhold' #
MOTOR_DIR_INIT = 'motor dir init'
# 'ies baud'
# 'ies hcvals use'
# 'ies delay'
# 'ies sf efo'
# 'ies sf cal'
# 'ies sf efm'
POWER_COMPASS_W_MOTOR = 'dcpwm' # false
KEEP_AWAKE_W_MOTOR = 'dkawm' # true
MOTOR_TIMEOUTS_1A = 'm1a_tmoc' # timeout counts for motor - 200
MOTOR_TIMEOUTS_1B = 'm1b_tmoc' # timeout counts for motor - 200
MOTOR_TIMEOUTS_2A = 'm2a_tmoc' # timeout counts for motor - 200
MOTOR_TIMEOUTS_2B = 'm2b_tmoc' # timeout counts for motor - 200
RSN_CONFIG = 'do_rsn' # configured for RSN (instead of autonomous) - true
INVERT_LED_DRIVERS = 'led_drivers_invert' # false
M1A_LED = 'm1a_led' # 1
M2A_LED = 'm2a_led' # 3
# Inverter Echo Sounder parameters - all these are read-only
ECHO_SAMPLES = 'Travel Time Measurements: 4 pings every 10 minutes'
WATER_DEPTH = 'Estimated Water Depth: 3000 meters'
ACOUSTIC_LOCKOUT = 'Acoustic Lockout: 3.60 seconds'
ACOUSTIC_OUTPUT = 'Acoustic output set at 186 dB'
RELEASE_TIME = 'Release Time: Thu Dec 25 12:00:00 2014'
COLLECT_TELEMETRY = 'Telemetry data file enabled'
MISSION_STATEMENT = 'Mission Statement: No mission statement has been entered'
PT_SAMPLES = 'Pressure and Temperature measured every 10 minutes'
TEMP_COEFF_U0 = 'temp coeff u0' # default 5.814289
TEMP_COEFF_Y1 = 'temp coeff y1' # default -3978.811
TEMP_COEFF_Y2 = 'temp coeff y2' # default -10771.79
TEMP_COEFF_Y3 = 'temp coeff y3' # default 0.00
PRES_COEFF_C1 = 'pressure coeff c1' # default -30521.42
PRES_COEFF_C2 = 'pressure coeff c2' # default -2027.363
PRES_COEFF_C3 = 'pressure coeff c3' # default 95228.34
PRES_COEFF_D1 = 'pressure coeff d1' # default 0.039810
PRES_COEFF_D2 = 'pressure coeff d2' # default 0.00
PRES_COEFF_T1 = 'pressure coeff t1' # default 30.10050
PRES_COEFF_T2 = 'pressure coeff t2' # default 0.096742
PRES_COEFF_T3 = 'pressure coeff t3' # default 56.45416
PRES_COEFF_T4 = 'pressure coeff t4' # default 151.539900
PRES_COEFF_T5 = 'pressure coeff t5' # default 0.00
TEMP_OFFSET = 'temp offset' # default -0.51 degrees C
PRES_OFFSET = 'press offset' # default 0.96 psi
BLILEY_0 = 'bliley B0' # -0.575100
BLILEY_1 = 'bliley B1' # -0.5282501
BLILEY_2 = 'bliley B2' # -0.013084390
BLILEY_3 = 'bliley B3' # 0.00004622697
@classmethod
def reverse_dict(cls):
return dict((v, k) for k, v in cls.dict().iteritems())
class ParameterConstraints(BaseEnum):
"""
type, minimum, maximum values for each settable parameter
"""
DEBUG_LEVEL = (int, 0, 3)
WSRUN_PINCH = (int, 1, 3600)
NFC_CALIBRATE = (int, 1, 3600)
NHC_COMPASS = (int, 1, 3600)
COMPASS_SAMPLES = (int, 1, 3600)
COMPASS_DELAY = (int, 1, 3600)
MOTOR_SAMPLES = (int, 1, 100)
EF_SAMPLES = (int, 1, 100)
CAL_SAMPLES = (int, 1, 100)
MOTOR_TIMEOUTS_1A = (int, 10, 1000)
MOTOR_TIMEOUTS_1B = (int, 10, 1000)
MOTOR_TIMEOUTS_2A = (int, 10, 1000)
MOTOR_TIMEOUTS_2B = (int, 10, 1000)
class Command(BaseEnum):
"""
Instrument command strings - base strings, use [stm|hef|ies]_command to build command
"""
# STM commands
REBOOT = 'reboot'
ACQUISITION_START = 'daq_start'
ACQUISITION_STOP = 'daq_stop'
IES_PORT_ON = 'ies_opto_on' # should only be on to change parameters and start mission
IES_PORT_OFF = 'ies_opto_off'
IES_POWER_ON = 'ies_pwr_on'
IES_POWER_OFF = 'ies_pwr_off' # must power cycle to apply changed parameters
HEF_PORT_ON = 'hef_opto_on' # should remain on during mission
HEF_PORT_OFF = 'hef_opto_off'
HEF_POWER_ON = 'hef_pwr_on'
HEF_POWER_OFF = 'hef_pwr_off'
HEF_WAKE = 'hef_wake'
HEF_PARAMS = 'params'
HEF_SAVE = 'params save'
SYNC_CLOCK = 'force_RTC_update' # align STM clock to RSN date/time
# HEF specific commands
PREFIX = 'prefix'
MISSION_START = 'mission start'
MISSION_STOP = 'mission stop'
# Commands which set parameters
DEBUG_LEVEL = Parameter.DEBUG_LEVEL
WSRUN_PINCH = Parameter.WSRUN_PINCH
NFC_CALIBRATE = Parameter.NFC_CALIBRATE
CAL_HOLD = Parameter.CAL_HOLD
NHC_COMPASS = Parameter.NHC_COMPASS
COMPASS_SAMPLES = Parameter.COMPASS_SAMPLES
COMPASS_DELAY = Parameter.COMPASS_DELAY
MOTOR_SAMPLES = Parameter.MOTOR_SAMPLES
EF_SAMPLES = Parameter.EF_SAMPLES
CAL_SAMPLES = Parameter.CAL_SAMPLES
CONSOLE_TIMEOUT = Parameter.CONSOLE_TIMEOUT
WSRUN_DELAY = Parameter.WSRUN_DELAY
MOTOR_DIR_NHOLD = Parameter.MOTOR_DIR_NHOLD
MOTOR_DIR_INIT = Parameter.MOTOR_DIR_INIT
POWER_COMPASS_W_MOTOR = Parameter.POWER_COMPASS_W_MOTOR
KEEP_AWAKE_W_MOTOR = Parameter.KEEP_AWAKE_W_MOTOR
MOTOR_TIMEOUTS_1A = Parameter.MOTOR_TIMEOUTS_1A
MOTOR_TIMEOUTS_1B = Parameter.MOTOR_TIMEOUTS_1B
MOTOR_TIMEOUTS_2A = Parameter.MOTOR_TIMEOUTS_2A
MOTOR_TIMEOUTS_2B = Parameter.MOTOR_TIMEOUTS_2B
RSN_CONFIG = Parameter.RSN_CONFIG
INVERT_LED_DRIVERS = Parameter.INVERT_LED_DRIVERS
M1A_LED = Parameter.M1A_LED
M2A_LED = Parameter.M2A_LED
# The following are not implemented commands
# 'term hef' # change HEF parameters interactively
# 'term ies' # change IES parameters interactively
# 'term tod' # display RSN time of day
# 'term aux' # display IES AUX2 port
# 'baud' # display baud rate (serial RSN to STM)
# 'baud #' # set baud rate
class Timeout(BaseEnum):
"""
Timeouts for instrument commands
"""
# STM commands
DEFAULT = 3
REBOOT = 5
ACQUISITION_START = DEFAULT
ACQUISITION_STOP = DEFAULT
IES_PORT_ON = DEFAULT
IES_PORT_OFF = DEFAULT
IES_POWER_ON = 30 # observations from 8-24 seconds
IES_POWER_OFF = DEFAULT
HEF_PORT_ON = DEFAULT
HEF_PORT_OFF = DEFAULT
HEF_POWER_ON = 6
HEF_POWER_OFF = DEFAULT
HEF_WAKE = DEFAULT
HEF_PARAMS = 6
HEF_SAVE = DEFAULT
SYNC_CLOCK = DEFAULT
# HEF specific commands
PREFIX = DEFAULT
MISSION_START = DEFAULT
MISSION_STOP = DEFAULT
class Prompt(BaseEnum):
"""
Device I/O prompts
"""
DEFAULT = 'STM>'
HEF_PARAMS = '#3_params'
HEF_PROMPT = '#3_HEF C>'
HEF_PORT_ON = DEFAULT # port on command doesn't return the HEF prompt (return is #3_\r\n)
class Response(BaseEnum):
"""
Expected responses from HPIES
"""
TIMESTAMP = re.compile(r'^(?P<tod>%(tod)s)' % common_matches)
UNKNOWN_COMMAND = re.compile(r'.*?unknown command: .*?')
PROMPT = re.compile(r'^STM> .*?')
HEF_POWER_ON = re.compile(r'#3_Use <BREAK> to enter command mode') # last line of HEF power on prompt
IES_POWER_ON = re.compile(r'#4_\s+Next scheduled 1 minute warning at:') # last line of IES power on prompt
ERROR = re.compile(r'.*?port.*?not open')
OPENED_FILE = re.compile(r'#3_Opened raw output file, (\S+)\\r')
SET_PARAMETER = re.compile(r'.+\s=\s(%(int)s)' % common_matches)
###############################################################################
# Data Particles
###############################################################################
class DataParticleType(BaseEnum):
"""
Data particle types produced by this driver.
"""
RAW = CommonDataParticleType.RAW
HPIES_DATA_HEADER = 'hpies_data_header' # DataHeaderParticle
HORIZONTAL_FIELD = 'horizontal_electric_field' # HEFDataParticle
MOTOR_CURRENT = 'motor_current' # HEFMotorCurrentParticle
CALIBRATION_STATUS = 'calibration_status' # CalStatusParticle
HPIES_STATUS = 'hpies_status' # HEFStatusParticle
ECHO_SOUNDING = 'echo_sounding' # IESDataParticle
ECHO_STATUS = 'ies_status' # IESStatusParticle
TIMESTAMP = 'stm_timestamp' # TimestampParticle
class DataHeaderParticleKey(BaseEnum):
"""
Horizontal Electrical Field data field header stream
Precedes each series of HEF data particles
"""
DATA_VALID = 'hpies_data_valid'
VERSION = 'hpies_ver'
TYPE = 'hpies_type'
DESTINATION = 'hpies_dest'
INDEX_START = 'hpies_ibeg'
INDEX_STOP = 'hpies_iend'
HCNO = 'hpies_hcno'
TIME = 'hpies_secs'
TICKS = 'hpies_tics'
MOTOR_SAMPLES = 'hpies_navg_mot'
EF_SAMPLES = 'hpies_navg_ef'
CAL_SAMPLES = 'hpies_navg_cal'
STM_TIME = 'hpies_stm_timestamp'
class HPIESDataParticle(DataParticle):
_compiled_regex = None
__metaclass__ = get_logging_metaclass(log_level='trace')
def __init__(self, *args, **kwargs):
super(HPIESDataParticle, self).__init__(*args, **kwargs)
self.match = self.regex_compiled().match(self.raw_data)
if not self.match:
raise SampleException("No regex match of parsed sample data: [%r]" % self.raw_data)
self.check_crc()
def check_crc(self):
crc_compute, crc = calc_crc(self.raw_data)
data_valid = crc_compute == crc
if not data_valid:
self.contents[DataParticleKey.QUALITY_FLAG] = DataParticleValue.CHECKSUM_FAILED
log.warning("Corrupt data detected: [%r] - CRC %s != %s" % (self.raw_data, hex(crc_compute), hex(crc)))
return data_valid
@staticmethod
def regex():
raise NotImplemented()
def _encode_all(self):
raise NotImplemented()
@classmethod
def regex_compiled(cls):
if cls._compiled_regex is None:
cls._compiled_regex = re.compile(cls.regex())
return cls._compiled_regex
def _build_parsed_values(self):
"""
@throws SampleException If there is a problem with sample creation
"""
try:
result = self._encode_all()
except Exception as e:
raise SampleException("Exception [%s] while converting data: [%s]" % (e, self.raw_data))
return result
class DataHeaderParticle(HPIESDataParticle):
_data_particle_type = DataParticleType.HPIES_DATA_HEADER
@staticmethod
def regex():
"""
@return regex string for matching HPIES data header particle
Sample Data:
#3__HE05 E a 0 983 130 3546345513 13126 3 3 3 1398912144*f7aa
#3__HE05 f a 0 382 0 3546329882 17917 3 3 3 1398896422*d6fe
#3__HE05 C a 0 978 22 3546332553 34259 3 3 3 1398899184*3e0e
"""
pattern = r"""
(?x)
\#3__HE
(?P<version> \d{2}) \s (?# 05)
(?P<type> ([ECfr])) \s (?# E)
(?P<dest> ([ab])) \s (?# a)
(?P<ibegin> %(int)s) \s (?# 0)
(?P<iend> %(int)s) \s (?# 983)
(?P<hcno> %(int)s) \s (?# 130)
(?P<secs> %(int)s) \s (?# 3546345513)
(?P<tics> %(int)s) \s (?# 13126)
(?P<navg_mot> %(int)s) \s (?# 3)
(?P<navg_ef> %(int)s) \s (?# 3)
(?P<navg_cal> %(int)s) \s (?# 3)
(?P<stm_time> %(int)s) (?# 1398912144)
\*
(?P<crc> %(crc)s) (?# f7a9)
""" % common_matches
return pattern
def _encode_all(self):
return [
self._encode_value(DataHeaderParticleKey.DATA_VALID, self.check_crc(), int),
self._encode_value(DataHeaderParticleKey.VERSION, self.match.group('version'), int),
self._encode_value(DataHeaderParticleKey.TYPE, self.match.group('type'), str),
self._encode_value(DataHeaderParticleKey.DESTINATION, self.match.group('dest'), str),
self._encode_value(DataHeaderParticleKey.INDEX_START, self.match.group('ibegin'), int),
self._encode_value(DataHeaderParticleKey.INDEX_STOP, self.match.group('iend'), int),
self._encode_value(DataHeaderParticleKey.HCNO, self.match.group('hcno'), int),
self._encode_value(DataHeaderParticleKey.TIME, self.match.group('secs'), int),
self._encode_value(DataHeaderParticleKey.TICKS, self.match.group('tics'), int),
self._encode_value(DataHeaderParticleKey.MOTOR_SAMPLES, self.match.group('navg_mot'), int),
self._encode_value(DataHeaderParticleKey.EF_SAMPLES, self.match.group('navg_ef'), int),
self._encode_value(DataHeaderParticleKey.CAL_SAMPLES, self.match.group('navg_cal'), int),
self._encode_value(DataHeaderParticleKey.STM_TIME, self.match.group('stm_time'), int)
]
class HEFDataParticleKey(BaseEnum):
"""
Horizontal Electrical Field data stream
"""
DATA_VALID = 'hpies_data_valid'
INDEX = 'hpies_eindex'
CHANNEL_1 = 'hpies_e1c'
CHANNEL_2 = 'hpies_e1a'
CHANNEL_3 = 'hpies_e1b'
CHANNEL_4 = 'hpies_e2c'
CHANNEL_5 = 'hpies_e2a'
CHANNEL_6 = 'hpies_e2b'
class HEFDataParticle(HPIESDataParticle):
_data_particle_type = DataParticleType.HORIZONTAL_FIELD
@staticmethod
def regex():
"""
@return regex string for matching HPIES horizontal electric field data particle
Sample Data:
#3__DE 797 79380 192799 192803 192930*56a8
"""
pattern = r"""
(?x)
\#3__DE \s
(?P<index> %(int)s) \s
(?P<channel_1> %(int)s) \s
(?P<channel_2> %(int)s) \s
(?P<channel_3> %(int)s) \s
(?P<channel_4> %(int)s) \s
(?P<channel_5> %(int)s) \s
(?P<channel_6> %(int)s)
\*
(?P<crc> %(crc)s)
""" % common_matches
return pattern
def _encode_all(self):
return [
self._encode_value(HEFDataParticleKey.DATA_VALID, self.check_crc(), int),
self._encode_value(HEFDataParticleKey.INDEX, self.match.group('index'), int),
self._encode_value(HEFDataParticleKey.CHANNEL_1, self.match.group('channel_1'), int),
self._encode_value(HEFDataParticleKey.CHANNEL_2, self.match.group('channel_2'), int),
self._encode_value(HEFDataParticleKey.CHANNEL_3, self.match.group('channel_3'), int),
self._encode_value(HEFDataParticleKey.CHANNEL_4, self.match.group('channel_4'), int),
self._encode_value(HEFDataParticleKey.CHANNEL_5, self.match.group('channel_5'), int),
self._encode_value(HEFDataParticleKey.CHANNEL_6, self.match.group('channel_6'), int),
]
class HEFMotorCurrentParticleKey(BaseEnum):
"""
HEF Motor Current data stream
"""
DATA_VALID = 'hpies_data_valid'
INDEX = 'hpies_mindex'
CURRENT = 'hpies_motor_current'
class HEFMotorCurrentParticle(HPIESDataParticle):
_data_particle_type = DataParticleType.MOTOR_CURRENT
@staticmethod
def regex():
"""
@return regex string for matching HPIES motor current particle
Sample Data:
#3__DM 11 24425*396b
"""
pattern = r"""
(?x)
\#3__DM \s
(?P<index> %(int)s) \s
(?P<motor_current> %(int)s)
\*
(?P<crc> %(crc)s)
""" % common_matches
return pattern
def _encode_all(self):
return [
self._encode_value(HEFMotorCurrentParticleKey.DATA_VALID, self.check_crc(), int),
self._encode_value(HEFMotorCurrentParticleKey.INDEX, self.match.group('index'), int),
self._encode_value(HEFMotorCurrentParticleKey.CURRENT, self.match.group('motor_current'), int),
]
class CalStatusParticleKey(BaseEnum):
"""
Calibration status data particle
Sent every two minutes during autosample.
"""
DATA_VALID = 'hpies_data_valid'
INDEX = 'hpies_cindex'
E1C = 'hpies_c1c'
E1A = 'hpies_c1a'
E1B = 'hpies_c1b'
E2C = 'hpies_c2c'
E2A = 'hpies_c2a'
E2B = 'hpies_c2b'
class CalStatusParticle(HPIESDataParticle):
_data_particle_type = DataParticleType.CALIBRATION_STATUS
@staticmethod
def regex():
"""
@return regex string for matching HPIES calibration status particle
Sample Data:
#3__DC 2 192655 192637 135611 80036 192554 192644*5c28
"""
pattern = r"""
(?x)
\#3__DC \s
(?P<index> %(int)s) \s
(?P<e1c> %(int)s) \s
(?P<e1a> %(int)s) \s
(?P<e1b> %(int)s) \s
(?P<e2c> %(int)s) \s
(?P<e2a> %(int)s) \s
(?P<e2b> %(int)s)
\*
(?P<crc> %(crc)s)
""" % common_matches
return pattern
def _encode_all(self):
return [
self._encode_value(CalStatusParticleKey.DATA_VALID, self.check_crc(), int),
self._encode_value(CalStatusParticleKey.INDEX, self.match.group('index'), int),
self._encode_value(CalStatusParticleKey.E1C, self.match.group('e1c'), int),
self._encode_value(CalStatusParticleKey.E1A, self.match.group('e1a'), int),
self._encode_value(CalStatusParticleKey.E1B, self.match.group('e1b'), int),
self._encode_value(CalStatusParticleKey.E2C, self.match.group('e2c'), int),
self._encode_value(CalStatusParticleKey.E2A, self.match.group('e2a'), int),
self._encode_value(CalStatusParticleKey.E2B, self.match.group('e2b'), int),
]
class HEFStatusParticleKey(BaseEnum):
"""
HPIES status data particle
HPIES status is sent every X minutes during autosample
"""
DATA_VALID = 'hpies_data_valid'
UNIX_TIME = 'hpies_secs' # elapsed time since unix epoch
HCNO = 'hpies_hcno' # Half cycle number (int)
HCNO_LAST_CAL = 'hpies_hcno_last_cal' # Half cycle number of last calibration (int)
HCNO_LAST_COMP = 'hpies_hcno_last_comp' # Half cycle number of last compass value 1 int
OFILE = 'hpies_ofile' # Current output filename 1 str remove?
IFOK = 'hpies_ifok' # File write status 1 str "NG" on error, "OK" if still appending remove?
N_COMPASS_WRITES = 'hpies_compass_fwrite_attempted' # Number of compass records written to <ofile> 1 int
# Number of attempts to write compass data when <ofile> is corrupt 1 int
N_COMPASS_FAIL_WRITES = 'hpies_compass_fwrite_ofp_null'
MOTOR_POWER_UPS = 'hpies_mot_pwr_count' # Up/down counter of motor power on/off. Should be zero. 1 int
# Number of main service loops while motor current is being sampled. 1 int
N_SERVICE_LOOPS = 'hpies_start_motor_count'
SERIAL_PORT_ERRORS = 'hpies_compass_port_open_errs' # Number of failures to open the compass serial port. 1 int
COMPASS_PORT_ERRORS = 'hpies_compass_port_nerr' # int Always zero (never changed in code). Remove?
# Number of times compass port is found closed when trying to read it.
COMPASS_PORT_CLOSED_COUNT = 'hpies_tuport_compass_null_count'
IRQ2_COUNT = 'hpies_irq2_count' # Number of interrupt requests on IRQ2 line of 68332. 1 int Should be zero.
SPURIOUS_COUNT = 'hpies_spurious_count' # Number of spurious interrupts to the 68332. 1 int Should be zero.
# Number of times the SPSR register bits 5 and 6 are set. 1 int Should be zero.
SPSR_BITS56_COUNT = 'hpies_spsr_unknown_count'
# Number of times the programable interval timer (PIT) is zero. 1 int Should be zero.
PIT_ZERO_COUNT = 'hpies_pitperiod_zero_count'
# Number of times the analog to digital converter circular buffer overflows. 1 int Should be zero.
ADC_BUFFER_OVERFLOWS = 'hpies_adc_raw_overflow_count'
# Number of times the max7317 queue overflows. 1 int Should be zero.
MAX7317_QUEUE_OVERFLOWS = 'hpies_max7317_add_queue_errs'
# Number of times water switch pinch timing is incorrect. 1 int Should be zero.
PINCH_TIMING_ERRORS = 'hpies_wsrun_rtc_pinch_end_nerr'
class HEFStatusParticle(HPIESDataParticle):
_data_particle_type = DataParticleType.HPIES_STATUS
@staticmethod
def regex():
"""
@return regex string for matching HPIES horizontal electric field status particle
Sample Data:
#3__s1 -748633661 31 23 0 C:\DATA\12345.000 OK*3e90
#3__s2 10 0 0 984001 0 0 0*ac87
#3__s3 0 0 0 0 0 0 1*35b7
"""
pattern = r"""
(?x)
\#3__s1 \s+
(?P<secs> %(int)s) \s+
(?P<hcno> %(int)s) \s+
(?P<hcno_last_cal> %(int)s) \s+
(?P<hcno_last_comp> %(int)s) \s+
(?P<ofile> %(fn)s) \s+
(?P<ifok> %(str)s)
\*
(?P<crc1> %(crc)s) \s+
\#3__s2 \s+
(?P<compass_writes> %(int)s) \s+
(?P<compass_fails> %(int)s) \s+
(?P<motor_power_cycles> %(int)s) \s+
(?P<service_loops> %(int)s) \s+
(?P<serial_failures> %(int)s) \s+
(?P<port_failures> %(int)s) \s+
(?P<port_closures> %(int)s)
\*
(?P<crc2> %(crc)s) \s+
\#3__s3 \s+
(?P<irq2_count> %(int)s) \s+
(?P<spurious_count> %(int)s) \s+
(?P<spsr_count> %(int)s) \s+
(?P<zero_count> %(int)s) \s+
(?P<adc_overflows> %(int)s) \s+
(?P<queue_overflows> %(int)s) \s+
(?P<pinch_errors> %(int)s)
\*
(?P<crc3> %(crc)s)
""" % common_matches
return pattern
def check_crc(self):
"""
Overridden because HEF Status has multiple lines with CRC
"""
valid = True
for line in self.raw_data.split(NEWLINE):
crc_compute, crc_parse = calc_crc(line)
data_valid = crc_compute == crc_parse
if not data_valid:
self.contents[DataParticleKey.QUALITY_FLAG] = DataParticleValue.CHECKSUM_FAILED
log.warning("Corrupt data detected: [%r] - CRC %s != %s" % (line, hex(crc_compute), hex(crc_parse)))
valid = False
return valid
def _encode_all(self):
return [
self._encode_value(HEFStatusParticleKey.DATA_VALID, self.check_crc(), int),
self._encode_value(HEFStatusParticleKey.UNIX_TIME, self.match.group('secs'), int),
self._encode_value(HEFStatusParticleKey.HCNO, self.match.group('hcno'), int),
self._encode_value(HEFStatusParticleKey.HCNO_LAST_CAL, self.match.group('hcno_last_cal'), int),
self._encode_value(HEFStatusParticleKey.HCNO_LAST_COMP, self.match.group('hcno_last_comp'), int),
self._encode_value(HEFStatusParticleKey.OFILE, self.match.group('ofile'), str),
self._encode_value(HEFStatusParticleKey.IFOK, self.match.group('ifok'), str),
self._encode_value(HEFStatusParticleKey.N_COMPASS_WRITES, self.match.group('compass_writes'), int),
self._encode_value(HEFStatusParticleKey.N_COMPASS_FAIL_WRITES, self.match.group('compass_fails'), int),
self._encode_value(HEFStatusParticleKey.MOTOR_POWER_UPS, self.match.group('motor_power_cycles'), int),
self._encode_value(HEFStatusParticleKey.N_SERVICE_LOOPS, self.match.group('service_loops'), int),
self._encode_value(HEFStatusParticleKey.SERIAL_PORT_ERRORS, self.match.group('serial_failures'), int),
self._encode_value(HEFStatusParticleKey.COMPASS_PORT_CLOSED_COUNT, self.match.group('port_failures'), int),
self._encode_value(HEFStatusParticleKey.COMPASS_PORT_ERRORS, self.match.group('port_closures'), int),
self._encode_value(HEFStatusParticleKey.IRQ2_COUNT, self.match.group('irq2_count'), int),
self._encode_value(HEFStatusParticleKey.SPURIOUS_COUNT, self.match.group('spurious_count'), int),
self._encode_value(HEFStatusParticleKey.SPSR_BITS56_COUNT, self.match.group('spsr_count'), int),
self._encode_value(HEFStatusParticleKey.PIT_ZERO_COUNT, self.match.group('zero_count'), int),
self._encode_value(HEFStatusParticleKey.ADC_BUFFER_OVERFLOWS, self.match.group('adc_overflows'), int),
self._encode_value(HEFStatusParticleKey.MAX7317_QUEUE_OVERFLOWS, self.match.group('queue_overflows'), int),
self._encode_value(HEFStatusParticleKey.PINCH_TIMING_ERRORS, self.match.group('pinch_errors'), int),
]
class IESDataParticleKey(BaseEnum):
"""
Inverted Echo-Sounder data stream
"""
DATA_VALID = 'hpies_data_valid'
IES_TIMESTAMP = 'hpies_ies_timestamp'
TRAVEL_TIMES = 'hpies_n_travel_times'
TRAVEL_TIME_1 = 'hpies_travel_time1'
TRAVEL_TIME_2 = 'hpies_travel_time2'
TRAVEL_TIME_3 = 'hpies_travel_time3'
TRAVEL_TIME_4 = 'hpies_travel_time4'
PRESSURE = 'hpies_pressure'
TEMPERATURE = 'hpies_temperature'
BLILEY_TEMPERATURE = 'hpies_bliley_temperature'
BLILEY_FREQUENCY = 'hpies_bliley_frequency'
STM_TIMESTAMP = 'hpies_stm_timestamp'
class IESDataParticle(HPIESDataParticle):
_data_particle_type = DataParticleType.ECHO_SOUNDING
@staticmethod
def regex():
"""
@return regex string for matching HPIES echo sounding data particle
Sample Data:
#5_AUX,1398880200,04,999999,999999,999999,999999,0010848,021697,022030,04000005.252,1B05,1398966715*c69e
#4_AUX,1439251200,04,390262,390286,390213,390484,2954625,001426,001420,04000018.093,4851\r\r\n*46cc
"""
pattern = r"""
(?x)
\#[45]_AUX ,
(?P<ies_timestamp> %(int)s) ,
(?P<n_travel_times> %(int)s) ,
(?P<travel_1> %(int)s) ,
(?P<travel_2> %(int)s) ,
(?P<travel_3> %(int)s) ,
(?P<travel_4> %(int)s) ,
(?P<pressure> %(int)s) ,
(?P<temp> %(int)s) ,
(?P<bliley_temp> %(int)s) ,
(?P<bliley_freq> %(float)s) ,
%(crc)s ,?
(?P<stm_timestamp> %(int)s | \\r\\r\\n)
\*
(?P<crc> %(crc)s)
""" % common_matches
return pattern
def _encode_all(self):
"""
Parse data sample for individual values (statistics)
@throws SampleException If there is a problem with sample creation
"""
#Sample Data:
#5_AUX,1398880200,04,999999,999999,999999,999999,0010848,021697,022030,04000005.252,1B05,1398966715*c69e
#4_AUX,1439251200,04,390262,390286,390213,390484,2954625,001426,001420,04000018.093,4851\r\r\n*46cc
results = [
self._encode_value(IESDataParticleKey.DATA_VALID, self.check_crc(), int),
self._encode_value(IESDataParticleKey.IES_TIMESTAMP, self.match.group('ies_timestamp'), int),
self._encode_value(IESDataParticleKey.TRAVEL_TIMES, self.match.group('n_travel_times'), int),
self._encode_value(IESDataParticleKey.TRAVEL_TIME_1, self.match.group('travel_1'), int),
self._encode_value(IESDataParticleKey.TRAVEL_TIME_2, self.match.group('travel_2'), int),
self._encode_value(IESDataParticleKey.TRAVEL_TIME_3, self.match.group('travel_3'), int),
self._encode_value(IESDataParticleKey.TRAVEL_TIME_4, self.match.group('travel_4'), int),
self._encode_value(IESDataParticleKey.PRESSURE, self.match.group('pressure'), int),
self._encode_value(IESDataParticleKey.TEMPERATURE, self.match.group('temp'), int),
self._encode_value(IESDataParticleKey.BLILEY_TEMPERATURE, self.match.group('bliley_temp'), int),
self._encode_value(IESDataParticleKey.BLILEY_FREQUENCY, self.match.group('bliley_freq'), float),
]
#the 5_AUX type contains an stm_timestamp value.
if self.match.group(1) == '5':
results.append(self._encode_value(IESDataParticleKey.STM_TIMESTAMP, self.match.group('stm_timestamp'), int),)
return results
class IESStatusParticleKey(BaseEnum):
"""
HEF Motor Current data stream
"""
DATA_VALID = 'hpies_data_valid'
IES_TIME = 'hpies_ies_timestamp'
TRAVEL_TIMES = 'hpies_status_travel_times'
PRESSURES = 'hpies_status_pressures'
TEMPERATURES = 'hpies_status_temperatures'
PFREQUENCIES = 'hpies_status_pressure_frequencies'
TFREQUENCIES = 'hpies_status_temperature_frequencies'
BACKUP_BATTERY = 'hpies_backup_battery_voltage'
RELEASE_DRAIN = 'hpies_release_drain'
SYSTEM_DRAIN = 'hpies_system_drain'
RELEASE_BATTERY = 'hpies_release_battery_voltage'
SYSTEM_BATTERY = 'hpies_system_battery_voltage'
RELEASE_SYSTEM = 'hpies_release_system_voltage'
INTERNAL_TEMP = 'hpies_internal_temperature'
MEAN_TRAVEL = 'hpies_average_travel_time'
MEAN_PRESSURE = 'hpies_average_pressure'
MEAN_TEMPERATURE = 'hpies_average_temperature'
LAST_PRESSURE = 'hpies_last_pressure'
LAST_TEMPERATURE = 'hpies_last_temperature'
IES_OFFSET = 'hpies_ies_clock_error'
class IESStatusParticle(HPIESDataParticle):
_data_particle_type = DataParticleType.ECHO_STATUS
@staticmethod
def regex():
"""
@return regex string for matching HPIES IES status particle
Sample Data:
#5_T:388559 999999 999999 999999 999999 999999 999999 999999 999999 999999 999999 999999 999999 999999 999999 999999 999999 999999 999999 999999 999999 999999 999999 999999 999999 \r\n*cb7a
#5_P:388559 10932 23370 10935 23397 10934 23422 10934 23446 10933 23472 10932 23492 \r\n*9c3e
#5_F:388559 33228500 172170704 33228496 172170928 33228492 172171120 33228488 172171312 33228484 172171504 33228480 172171664 \r\n*e505
#5_E:388559 2.29 0.01 0.00 14.00 6.93 5.05 23.83 0.0000 10935 1623 33228.480 172171.656 0.109 \r\n*1605
"""
pattern = r"""
(?x)
\#5_T:
(?P<ies_time> %(int)s) \s
(?P<travel_times> (%(int)s \s){24})
\\r\\n\*
(?P<crc> %(crc)s) \s+
\#5_P:
(?P<ies_time2> %(int)s) \s
(?P<pt> (%(int)s \s+){12}) \s+
\\r\\n\*
(?P<crc2> %(crc)s) \s+
\#5_F:
(?P<ies_time3> %(int)s) \s
(?P<ptf> (%(int)s \s+){12}) \s+
\\r\\n\*
(?P<crc3> %(crc)s) \s+
\#5_E:
(?P<ies_time4> %(int)s) \s
(?P<backup_battery> %(float)s) \s
(?P<release_drain> %(float)s) \s
(?P<system_drain> %(float)s) \s
(?P<release_battery> %(float)s) \s
(?P<system_battery> %(float)s) \s
(?P<release_system> %(float)s) \s
(?P<internal_temp> %(float)s) \s
(?P<mean_travel> %(float)s) \s
(?P<mean_pressure> %(int)s) \s
(?P<mean_temp> %(int)s) \s
(?P<last_pressure> %(float)s) \s
(?P<last_temp> %(float)s) \s
(?P<clock_offset> %(float)s) \s
\\r\\n\*
(?P<crc4> %(crc)s)
""" % common_matches
return pattern
def check_crc(self):
valid = True
for line in self.raw_data.split(NEWLINE):
crc_compute, crc_parse = calc_crc(line)
data_valid = crc_compute == crc_parse
if not data_valid:
self.contents[DataParticleKey.QUALITY_FLAG] = DataParticleValue.CHECKSUM_FAILED
log.warning("Corrupt data detected: [%r] - CRC %s != %s" % (line, hex(crc_compute), hex(crc_parse)))
valid = False
return valid
def _encode_all(self):
travel_times = [int(x) for x in self.match.group('travel_times').split()]
temp = [int(x) for x in self.match.group('pt').split()]
pressures = temp[::2]
temperatures = temp[1::2]
temp = [int(x) for x in self.match.group('ptf').split()]
pfrequencies = temp[::2]
tfrequencies = temp[1::2]
return [
self._encode_value(IESStatusParticleKey.DATA_VALID, self.check_crc(), int),
self._encode_value(IESStatusParticleKey.IES_TIME, self.match.group('ies_time'), int),
self._encode_value(IESStatusParticleKey.TRAVEL_TIMES, travel_times, int),
self._encode_value(IESStatusParticleKey.PRESSURES, pressures, int),
self._encode_value(IESStatusParticleKey.TEMPERATURES, temperatures, int),
self._encode_value(IESStatusParticleKey.PFREQUENCIES, pfrequencies, int),
self._encode_value(IESStatusParticleKey.TFREQUENCIES, tfrequencies, int),
self._encode_value(IESStatusParticleKey.BACKUP_BATTERY, self.match.group('backup_battery'), float),
self._encode_value(IESStatusParticleKey.RELEASE_DRAIN, self.match.group('release_drain'), float),
self._encode_value(IESStatusParticleKey.SYSTEM_DRAIN, self.match.group('system_drain'), float),
self._encode_value(IESStatusParticleKey.RELEASE_BATTERY, self.match.group('release_battery'), float),
self._encode_value(IESStatusParticleKey.SYSTEM_BATTERY, self.match.group('system_battery'), float),
self._encode_value(IESStatusParticleKey.RELEASE_SYSTEM, self.match.group('release_system'), float),
self._encode_value(IESStatusParticleKey.INTERNAL_TEMP, self.match.group('internal_temp'), float),
self._encode_value(IESStatusParticleKey.MEAN_TRAVEL, self.match.group('mean_travel'), float),
self._encode_value(IESStatusParticleKey.MEAN_PRESSURE, self.match.group('mean_pressure'), int),
self._encode_value(IESStatusParticleKey.MEAN_TEMPERATURE, self.match.group('mean_temp'), int),
self._encode_value(IESStatusParticleKey.LAST_PRESSURE, self.match.group('last_pressure'), float),
self._encode_value(IESStatusParticleKey.LAST_TEMPERATURE, self.match.group('last_temp'), float),
self._encode_value(IESStatusParticleKey.IES_OFFSET, self.match.group('clock_offset'), float),
]
class TimestampParticleKey(BaseEnum):
"""
HEF Motor Current data stream
"""
DATA_VALID = 'hpies_data_valid'
RSN_TIME = 'hpies_rsn_timestamp'
STM_TIME = 'hpies_stm_timestamp'
class TimestampParticle(HPIESDataParticle):
_data_particle_type = DataParticleType.TIMESTAMP
@staticmethod
def regex():
"""
@return regex string for matching HPIES STM timestamp particle
Sample Data:
#2_TOD,1398883295,1398883288*0059
"""
pattern = r"""
(?x)
\#2_TOD ,
(?P<rsn_time> %(int)s) ,
(?P<stm_time> %(int)s)
\*
(?P<crc> %(crc)s)
""" % common_matches
return pattern
def _build_parsed_values(self):
"""
Parse data sample for individual values (statistics)
@throws SampleException If there is a problem with sample creation
"""
return [
self._encode_value(TimestampParticleKey.DATA_VALID, self.check_crc(), int),
self._encode_value(TimestampParticleKey.RSN_TIME, self.match.group('rsn_time'), int),
self._encode_value(TimestampParticleKey.STM_TIME, self.match.group('stm_time'), int),
]
###############################################################################
# Driver
###############################################################################
class InstrumentDriver(SingleConnectionInstrumentDriver):
"""
InstrumentDriver subclass
Subclasses SingleConnectionInstrumentDriver with connection state
machine.
"""
def _build_protocol(self):
"""
Construct the driver protocol state machine.
"""
self._protocol = Protocol(Prompt, NEWLINE, self._driver_event)
###########################################################################
# Protocol
###########################################################################
class Protocol(CommandResponseInstrumentProtocol):
"""
Instrument protocol class
Subclasses CommandResponseInstrumentProtocol
"""
__metaclass__ = get_logging_metaclass(log_level='debug')
particles = [
DataHeaderParticle, # HPIES_DATA_HEADER
HEFDataParticle, # HORIZONTAL_FIELD
HEFMotorCurrentParticle, # MOTOR_CURRENT
CalStatusParticle, # CALIBRATION_STATUS
HEFStatusParticle, # HPIES_STATUS
IESDataParticle, # ECHO_SOUNDING
IESStatusParticle, # ECHO_STATUS
TimestampParticle, # TIMESTAMP
]
def __init__(self, prompts, newline, driver_event):
"""
Protocol constructor.
@param prompts A BaseEnum class containing instrument prompts.
@param newline The newline.
@param driver_event Driver process event callback.
"""
# Construct protocol superclass.
CommandResponseInstrumentProtocol.__init__(self, prompts, newline, driver_event)
# Build protocol state machine.
self._protocol_fsm = ThreadSafeFSM(ProtocolState, ProtocolEvent,
ProtocolEvent.ENTER, ProtocolEvent.EXIT)
# Add event handlers for protocol state machine.
handlers = {
ProtocolState.UNKNOWN: {
(ProtocolEvent.ENTER, self._handler_unknown_enter),
(ProtocolEvent.EXIT, self._handler_unknown_exit),
(ProtocolEvent.DISCOVER, self._handler_unknown_discover),
},
ProtocolState.COMMAND: {
(ProtocolEvent.ENTER, self._handler_command_enter),
(ProtocolEvent.EXIT, self._handler_command_exit),
(ProtocolEvent.GET, self._handler_command_get),
(ProtocolEvent.SET, self._handler_command_set),
(ProtocolEvent.START_AUTOSAMPLE, self._handler_command_start_autosample),
(ProtocolEvent.START_DIRECT, self._handler_command_start_direct),
},
ProtocolState.DIRECT_ACCESS: {
(ProtocolEvent.ENTER, self._handler_direct_access_enter),
(ProtocolEvent.EXIT, self._handler_direct_access_exit),
(ProtocolEvent.STOP_DIRECT, self._handler_direct_access_stop_direct),
(ProtocolEvent.EXECUTE_DIRECT, self._handler_direct_access_execute_direct),
},
ProtocolState.AUTOSAMPLE: {
(ProtocolEvent.ENTER, self._handler_autosample_enter),
(ProtocolEvent.EXIT, self._handler_autosample_exit),
(ProtocolEvent.STOP_AUTOSAMPLE, self._handler_autosample_stop_autosample),
},
}
for state in handlers:
for event, handler in handlers[state]:
self._protocol_fsm.add_handler(state, event, handler)
# Construct the parameter dictionary containing device parameters,
# current parameter values, and set formatting functions.
self._build_param_dict()
# Add build handlers for device commands.
# Add response handlers for device commands.
for cmd in Command.list():
self._add_build_handler(cmd, self._build_command)
self._add_response_handler(cmd, self._check_command)
self._add_response_handler(Command.HEF_PARAMS, self._parse_hef_params_response)
self._add_response_handler(Command.PREFIX, self._parse_prefix_response)
for cmd in (Command.DEBUG_LEVEL,
Command.WSRUN_PINCH,
Command.NFC_CALIBRATE,
Command.CAL_HOLD,
Command.NHC_COMPASS,
Command.COMPASS_SAMPLES,
Command.COMPASS_DELAY,
Command.MOTOR_SAMPLES,
Command.EF_SAMPLES,
Command.CAL_SAMPLES,
Command.CONSOLE_TIMEOUT,
Command.WSRUN_DELAY,
Command.MOTOR_DIR_NHOLD,
Command.POWER_COMPASS_W_MOTOR,
Command.KEEP_AWAKE_W_MOTOR,
Command.MOTOR_TIMEOUTS_1A,
Command.MOTOR_TIMEOUTS_1B,
Command.MOTOR_TIMEOUTS_2A,
Command.MOTOR_TIMEOUTS_2B,
Command.RSN_CONFIG,
Command.INVERT_LED_DRIVERS,
Command.M1A_LED,
Command.M2A_LED, ):
self._add_response_handler(cmd, self._parse_set_param_response)
# Add sample handlers.
self._build_command_dict()
self._build_driver_dict()
# State state machine in UNKNOWN state.
self._protocol_fsm.start(ProtocolState.UNKNOWN)
# commands sent sent to device to be filtered in responses for telnet DA
self._sent_cmds = []
self._chunker = StringChunker(Protocol.sieve_function)
@staticmethod
def sieve_function(raw_data):
"""
The method that splits samples
"""
matchers = []
return_list = []
for particle in Protocol.particles:
matchers.append(particle.regex_compiled())
for matcher in matchers:
for match in matcher.finditer(raw_data):
return_list.append((match.start(), match.end()))
return return_list
def _build_param_dict(self):
"""
Populate the parameter dictionary with parameters.
For each parameter key, add match string, match lambda function,
and value formatting function for set commands.
"""
# Add parameter handlers to parameter dict.
self._param_dict.add(Parameter.SERIAL,
r'serno\s+= (%(int)s)' % common_matches,
lambda match: int(match.group(1)),
None,
type=ParameterDictType.STRING,
display_name='Serial Number',
description='Instrument serial number.',
visibility=ParameterDictVisibility.READ_ONLY,
startup_param=False,
direct_access=False)
self._param_dict.add(Parameter.DEBUG_LEVEL,
r'debug\s+= (%(int)s)' % common_matches,
lambda match: int(match.group(1)),
None,
type=ParameterDictType.INT,
display_name='Debug Level',
description='Debug logging control value, 0 means no output: (0-3)',
visibility=ParameterDictVisibility.IMMUTABLE,
default_value=0,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.WSRUN_PINCH,
r'wsrun pinch secs\s+= (%(int)s)' % common_matches,
lambda match: int(match.group(1)),
None,
type=ParameterDictType.INT,
units=Units.SECOND,
display_name='WS Run Pinch',
description='Half cycle interval between water switch tube pinch: (1-3600)',
visibility=ParameterDictVisibility.READ_WRITE,
default_value=120,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.NFC_CALIBRATE,
r'nfc calibrate\s+= (%(int)s)' % common_matches,
lambda match: int(match.group(1)),
None,
type=ParameterDictType.INT,
units=HPIESUnits.CYCLE,
display_name='Calibration Periodicity',
description='Number of cycles of water switch between applying cal: (1-3600)',
visibility=ParameterDictVisibility.READ_WRITE,
default_value=15,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.CAL_HOLD,
r'cal hold secs\s+= (%(float)s)' % common_matches,
lambda match: float(match.group(1)),
None,
type=ParameterDictType.FLOAT,
units=Units.SECOND,
display_name='Calibrate Hold',
description='Hold time of calibration voltage.',
visibility=ParameterDictVisibility.IMMUTABLE,
default_value=20,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.CAL_SKIP,
r'cal skip secs\s+= (%(int)s)' % common_matches,
lambda match: int(match.group(1)),
None,
type=ParameterDictType.INT,
units=Units.SECOND,
display_name='Calibrate Skip',
description='Time to wait before using data after changing the calibration signal state.',
visibility=ParameterDictVisibility.READ_ONLY,
startup_param=False,
direct_access=False)
self._param_dict.add(Parameter.NHC_COMPASS,
r'nhc compass\s+= (%(int)s)' % common_matches,
lambda match: int(match.group(1)),
None,
type=ParameterDictType.INT,
units=HPIESUnits.HALF_CYCLE,
display_name='Compass Measurement Periodicity',
description='Number of half cycles between compass measurements: (1-3600)',
visibility=ParameterDictVisibility.READ_WRITE,
default_value=30,
direct_access=True,
startup_param=True)
self._param_dict.add(Parameter.COMPASS_SAMPLES,
r'compass nget\s+= (%(int)s)' % common_matches,
lambda match: int(match.group(1)),
None,
type=ParameterDictType.INT,
display_name='Compass Samples',
description='Number of compass samples to acquire in a burst: (1-3600)',
visibility=ParameterDictVisibility.READ_WRITE,
default_value=1,
direct_access=True,
startup_param=True)
# time between measurements in a burst
self._param_dict.add(Parameter.COMPASS_DELAY,
r'compass dsecs\s+= (%(int)s)' % common_matches,
lambda match: int(match.group(1)),
None,
type=ParameterDictType.INT,
units=Units.SECOND,
display_name='Compass Delay',
description='Time between measurements in a burst.',
visibility=ParameterDictVisibility.READ_WRITE,
default_value=10,
startup_param=True,
direct_access=True)
# initial compass measurement (in seconds)
self._param_dict.add(Parameter.INITIAL_COMPASS,
r'icompass run secs\s+= (%(int)s)' % common_matches,
lambda match: int(match.group(1)),
None,
type=ParameterDictType.INT,
units=Units.SECOND,
display_name='Initial Compass Run',
description='Initial compass measurement.',
visibility=ParameterDictVisibility.READ_ONLY,
startup_param=False,
direct_access=False)
self._param_dict.add(Parameter.INITIAL_COMPASS_DELAY,
r'icompass dsecs\s+= (%(float)s)' % common_matches,
lambda match: float(match.group(1)),
None,
type=ParameterDictType.FLOAT,
units=Units.SECOND,
display_name='Initial Compass Delay',
description='Delay prior to the first compass measurement: (1-3600)',
visibility=ParameterDictVisibility.READ_ONLY,
startup_param=False,
direct_access=False)
# FILE_LENGTH = 'secs per ofile' # seconds per file (default 86400 - one day)
self._param_dict.add(Parameter.MOTOR_SAMPLES,
r'navg mot\s+= (%(int)s)' % common_matches,
lambda match: int(match.group(1)),
None,
type=ParameterDictType.INT,
display_name='Number of Motor Samples',
description='Number of samples to average - motor is sampled every 25 ms: (1-100)',
visibility=ParameterDictVisibility.READ_WRITE,
default_value=10,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.EF_SAMPLES,
r'navg ef\s+= (%(int)s)' % common_matches,
lambda match: int(match.group(1)),
None,
type=ParameterDictType.INT,
display_name='Number of HEF Samples',
description='Number of samples to average - EF is sampled every 0.1024 s: (1-100)',
visibility=ParameterDictVisibility.READ_WRITE,
default_value=10,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.CAL_SAMPLES,
r'navg cal\s+= (%(int)s)' % common_matches,
lambda match: int(match.group(1)),
None,
type=ParameterDictType.INT,
display_name='Number of Calibration Samples',
description='Number of samples to average - EF is sampled every 0.1024 s during cal: (1-100)',
visibility=ParameterDictVisibility.READ_WRITE,
default_value=10,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.CONSOLE_TIMEOUT,
r'console off timeout\s+= (%(int)s)' % common_matches,
lambda match: int(match.group(1)),
None,
type=ParameterDictType.INT,
units=Units.SECOND,
display_name='Console Timeout',
description='UART drivers turns off for console port (will come on temporarily for data out).',
visibility=ParameterDictVisibility.IMMUTABLE,
default_value=300,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.WSRUN_DELAY,
r'wsrun delay secs\s+= (%(int)s)' % common_matches,
lambda match: int(match.group(1)),
None,
type=ParameterDictType.INT,
units=Units.SECOND,
display_name='WS Run Delay',
description='',
visibility=ParameterDictVisibility.IMMUTABLE,
default_value=0,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.MOTOR_DIR_NHOLD,
r'motor dir nhold\s+= (%(int)s)' % common_matches,
lambda match: int(match.group(1)),
None,
type=ParameterDictType.INT,
display_name='Motor Direction',
description='',
visibility=ParameterDictVisibility.IMMUTABLE,
default_value=0,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.MOTOR_DIR_INIT,
r'motor dir init\s+= (\w+)',
lambda match: match.group(1),
None,
type=ParameterDictType.STRING,
display_name='Initial Motor Direction',
description='Initial motion direction (f:forward | r: reverse)',
value_description='f - forward, r - reverse',
visibility=ParameterDictVisibility.IMMUTABLE,
default_value='f', # forward
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.POWER_COMPASS_W_MOTOR,
r'do_compass_pwr_with_motor\s+= (%(int)s)' % common_matches,
lambda match: bool(int(match.group(1))),
None,
type=ParameterDictType.BOOL,
display_name='Power Compass with Motor',
description='Apply power to compass when motor is on: (true | false)',
visibility=ParameterDictVisibility.IMMUTABLE,
default_value=False,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.KEEP_AWAKE_W_MOTOR,
r'do_keep_awake_with_motor\s+= (%(int)s)' % common_matches,
lambda match: bool(int(match.group(1))),
None,
type=ParameterDictType.BOOL,
display_name='Keep Awake with Motor',
description='Keep instrument awake while motor is running: (true | false)',
visibility=ParameterDictVisibility.IMMUTABLE,
default_value=True,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.MOTOR_TIMEOUTS_1A,
r'm1a_tmoc\s+= (%(int)s)' % common_matches,
lambda match: int(match.group(1)),
None,
type=ParameterDictType.INT,
units='25 ' + Units.MILLISECOND,
display_name='Motor Timeouts 1A',
description='Timeout counts for motor 1A: (10-1000)',
visibility=ParameterDictVisibility.READ_WRITE,
default_value=200,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.MOTOR_TIMEOUTS_1B,
r'm1b_tmoc\s+= (%(int)s)' % common_matches,
lambda match: int(match.group(1)),
None,
type=ParameterDictType.INT,
units='25 ' + Units.MILLISECOND,
display_name='Motor Timeouts 1B',
description='Timeout counts for motor 1B: (10-1000)',
visibility=ParameterDictVisibility.READ_WRITE,
default_value=200,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.MOTOR_TIMEOUTS_2A,
r'm2a_tmoc\s+= (%(int)s)' % common_matches,
lambda match: int(match.group(1)),
None,
type=ParameterDictType.INT,
units='25 ' + Units.MILLISECOND,
display_name='Motor Timeouts 2A',
description='Timeout counts for motor 2A: (10-1000)',
visibility=ParameterDictVisibility.READ_WRITE,
default_value=200,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.MOTOR_TIMEOUTS_2B,
r'm2b_tmoc\s+= (%(int)s)' % common_matches,
lambda match: int(match.group(1)),
None,
type=ParameterDictType.INT,
units='25 ' + Units.MILLISECOND,
display_name='Motor Timeouts 2B',
description='Timeout counts for motor 2B: (10-1000)',
visibility=ParameterDictVisibility.READ_WRITE,
default_value=200,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.RSN_CONFIG,
r'do_rsn\s+= (%(int)s)' % common_matches,
lambda match: bool(int(match.group(1))),
None,
type=ParameterDictType.BOOL,
display_name='Configured for RSN',
description='Use RSN configuration: (true | false)',
visibility=ParameterDictVisibility.IMMUTABLE,
default_value=True,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.INVERT_LED_DRIVERS,
r'led_drivers_invert\s+= (%(int)s)' % common_matches,
lambda match: bool(int(match.group(1))),
None,
type=ParameterDictType.BOOL,
display_name='Invert LED Drivers',
description='Invert the LED drivers: (true | false)',
visibility=ParameterDictVisibility.IMMUTABLE,
default_value=False,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.M1A_LED,
r'm1a_led\s+= (%(int)s)' % common_matches,
lambda match: int(match.group(1)),
None,
type=ParameterDictType.INT,
display_name='M1A LED',
description='M1A LED: (0-3)',
visibility=ParameterDictVisibility.IMMUTABLE,
default_value=1,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.M2A_LED,
r'm2a_led\s+= (%(int)s)' % common_matches,
lambda match: int(match.group(1)),
None,
type=ParameterDictType.INT,
display_name='M2A LED',
description='M2A LED: (0-3)',
visibility=ParameterDictVisibility.IMMUTABLE,
default_value=3,
startup_param=True,
direct_access=True)
# IES Parameters - read only - no defaults
self._param_dict.add(Parameter.ECHO_SAMPLES,
r'Travel Time Measurements: (%(int)s)' % common_matches,
lambda match: int(match.group(1)),
None,
type=ParameterDictType.INT,
units='1/600 ' + Units.HERTZ,
display_name='Echo Samples',
description='Number of travel time measurements.',
value_description='number of pings every 10 minutes',
startup_param=False,
direct_access=False,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.WATER_DEPTH,
r'Estimated Water Depth: (%(int)s)' % common_matches,
lambda match: int(match.group(1)),
None,
type=ParameterDictType.INT,
units=Units.METER,
display_name='Estimated Water Depth',
description='Estimate of water depth at instrument location.',
startup_param=False,
direct_access=False,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.ACOUSTIC_LOCKOUT,
r'Acoustic Lockout: (%(float)s)' % common_matches,
lambda match: float(match.group(1)),
None,
type=ParameterDictType.FLOAT,
units=Units.SECOND,
display_name='Acoustic Lockout',
description='',
startup_param=False,
direct_access=False,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.ACOUSTIC_OUTPUT,
r'Acoustic Output: (%(int)s)' % common_matches,
lambda match: int(match.group(1)),
None,
type=ParameterDictType.INT,
units=Units.DECIBEL,
display_name='Acoustic output: (170-197)',
description='',
startup_param=False,
direct_access=False,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.RELEASE_TIME,
r'Release Time: (%(rest)s)' % common_matches,
lambda match: match.group(1),
None,
type=ParameterDictType.STRING,
display_name='Release Time',
description='',
startup_param=False,
direct_access=False,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.COLLECT_TELEMETRY,
r'Telemetry data file (enabled|disabled)',
lambda match: True if match.group(1) == 'enabled' else False,
None,
type=ParameterDictType.BOOL,
display_name='Collect Telemetry Data',
description='Enable collection of telemetry data: (enabled | disabled)',
startup_param=False,
direct_access=False,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.MISSION_STATEMENT,
r'Mission Statement: (%(rest)s)' % common_matches,
lambda match: match.group(1),
None,
type=ParameterDictType.STRING,
display_name='Mission Statement',
description='Descriptive statement of the mission purpose.',
startup_param=False,
direct_access=False,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.PT_SAMPLES,
r'Pressure and Temperature measured every (%(int)s)' % common_matches,
lambda match: int(match.group(1)),
None,
type=ParameterDictType.INT,
units='1/600 ' + Units.HERTZ,
display_name='Pressure/Temperature Samples',
description='Periodicity of pressure and temperature sampling.',
startup_param=False,
direct_access=False,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.TEMP_COEFF_U0,
r'U0 = (%(float)s)' % common_matches,
lambda match: float(match.group(1)),
None,
type=ParameterDictType.FLOAT,
display_name='Temp Coeff-U0',
description='Temperature coefficient U0.',
startup_param=False,
direct_access=False,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.TEMP_COEFF_Y1,
r'Y1 = (%(float)s)' % common_matches,
lambda match: float(match.group(1)),
None,
type=ParameterDictType.FLOAT,
display_name='Temp Coeff-Y1',
description='Temperature coefficient Y1.',
startup_param=False,
direct_access=False,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.TEMP_COEFF_Y2,
r'Y2 = (%(float)s)' % common_matches,
lambda match: float(match.group(1)),
None,
type=ParameterDictType.FLOAT,
display_name='Temp Coeff-Y2',
description='Temperature coefficient Y2.',
startup_param=False,
direct_access=False,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.TEMP_COEFF_Y3,
r'Y3 = (%(float)s)' % common_matches,
lambda match: float(match.group(1)),
None,
type=ParameterDictType.FLOAT,
display_name='Temp Coeff-Y3',
description='Temperature coefficient Y3.',
startup_param=False,
direct_access=False,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.PRES_COEFF_C1,
r'C1 = (%(float)s)' % common_matches,
lambda match: float(match.group(1)),
None,
type=ParameterDictType.FLOAT,
display_name='Pressure Coeff-C1',
description='Pressure coefficient C1.',
startup_param=False,
direct_access=False,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.PRES_COEFF_C2,
r'C2 = (%(float)s)' % common_matches,
lambda match: float(match.group(1)),
None,
type=ParameterDictType.FLOAT,
display_name='Pressure Coeff-C2',
description='Pressure coefficient C2.',
startup_param=False,
direct_access=False,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.PRES_COEFF_C3,
r'C3 = (%(float)s)' % common_matches,
lambda match: float(match.group(1)),
None,
type=ParameterDictType.FLOAT,
display_name='Pressure Coeff-C3',
description='Pressure coefficient C3.',
startup_param=False,
direct_access=False,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.PRES_COEFF_D1,
r'D1 = (%(float)s)' % common_matches,
lambda match: float(match.group(1)),
None,
type=ParameterDictType.FLOAT,
display_name='Pressure Coeff-D1',
description='Pressure coefficient D1.',
startup_param=False,
direct_access=False,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.PRES_COEFF_D2,
r'D2 = (%(float)s)' % common_matches,
lambda match: float(match.group(1)),
None,
type=ParameterDictType.FLOAT,
display_name='Pressure Coeff-D2',
description='Pressure coefficient D2.',
startup_param=False,
direct_access=False,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.PRES_COEFF_T1,
r'T1 = (%(float)s)' % common_matches,
lambda match: float(match.group(1)),
None,
type=ParameterDictType.FLOAT,
display_name='Pressure Coeff-T1',
description='Pressure coefficient T1.',
startup_param=False,
direct_access=False,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.PRES_COEFF_T2,
r'T2 = (%(float)s)' % common_matches,
lambda match: float(match.group(1)),
None,
type=ParameterDictType.FLOAT,
display_name='Pressure Coeff-T2',
description='Pressure coefficient T2.',
startup_param=False,
direct_access=False,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.PRES_COEFF_T3,
r'T3 = (%(float)s)' % common_matches,
lambda match: float(match.group(1)),
None,
type=ParameterDictType.FLOAT,
display_name='Pressure Coeff-T3',
description='Pressure coefficient T3.',
startup_param=False,
direct_access=False,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.PRES_COEFF_T4,
r'T4 = (%(float)s)' % common_matches,
lambda match: float(match.group(1)),
None,
type=ParameterDictType.FLOAT,
display_name='Pressure Coeff-T4',
description='Pressure coefficient T4.',
startup_param=False,
direct_access=False,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.PRES_COEFF_T5,
r'T5 = (%(float)s)' % common_matches,
lambda match: float(match.group(1)),
None,
type=ParameterDictType.FLOAT,
display_name='Pressure Coeff-T5',
description='Pressure coefficient T5.',
startup_param=False,
direct_access=False,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.TEMP_OFFSET,
r'Temperature offset = (%(float)s)' % common_matches,
lambda match: float(match.group(1)),
None,
type=ParameterDictType.FLOAT,
units=Units.DEGREE_CELSIUS,
display_name='Temperature Offset',
description='',
startup_param=False,
direct_access=False,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.PRES_OFFSET,
r'Pressure offset = (%(float)s)' % common_matches,
lambda match: float(match.group(1)),
None,
type=ParameterDictType.FLOAT,
units=Units.POUND_PER_SQUARE_INCH,
display_name='Pressure Offset',
description='',
startup_param=False,
direct_access=False,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.BLILEY_0,
r'B0 = (%(float)s)' % common_matches,
lambda match: float(match.group(1)),
None,
type=ParameterDictType.FLOAT,
display_name='Temperature Coefficient B0',
description='Bliley temperature coefficient B0.',
startup_param=False,
direct_access=False,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.BLILEY_1,
r'B1 = (%(float)s)' % common_matches,
lambda match: float(match.group(1)),
None,
type=ParameterDictType.FLOAT,
display_name='Temperature Coefficient B1',
description='Bliley temperature coefficient B1.',
startup_param=False,
direct_access=False,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.BLILEY_2,
r'B2 = (%(float)s)' % common_matches,
lambda match: float(match.group(1)),
None,
type=ParameterDictType.FLOAT,
display_name='Temperature Coefficient B2',
description='Bliley temperature coefficient B2.',
startup_param=False,
direct_access=False,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.BLILEY_3,
r'B3 = (%(float)s)' % common_matches,
lambda match: float(match.group(1)),
None,
type=ParameterDictType.FLOAT,
display_name='Temperature Coefficient B3',
description='Bliley temperature coefficient B3.',
startup_param=False,
direct_access=False,
visibility=ParameterDictVisibility.READ_ONLY)
def _got_chunk(self, chunk, timestamp):
"""
The base class got_data has gotten a chunk from the chunker. Pass it to extract_sample
with the appropriate particle objects and REGEXes.
"""
for particle in Protocol.particles:
self._extract_sample(particle, particle.regex_compiled(), chunk, timestamp)
def _filter_capabilities(self, events):
"""
Return a list of currently available capabilities.
"""
return [x for x in events if Capability.has(x)]
def _wakeup(self, wakeup_timeout=10, response_timeout=3):
"""
Override the default wakeup to do nothing. Instead, an explicit call to _hef_wakeup is required prior
to sending commands to the instrument.
"""
pass
def _do_cmd_resp(self, cmd, *args, **kwargs):
"""
Calls parent _do_cmd_resp and auto-retries if a timeout occurs.
@param cmd instrument command
@param retries (optional) number of retries (default = 3)
"""
attempts = 0
retries = kwargs.get('retries', 3)
while True:
try:
return super(Protocol, self)._do_cmd_resp(cmd, *args, **kwargs)
except InstrumentTimeoutException as e:
attempts += 1
if attempts == retries:
raise e
log.warn('timeout for command (%s): retrying...' % cmd)
def _hef_wakeup(self):
"""
wakeup the instrument
The only current deterministic way to know if the instrument is awake is to see if it responds to a
parameter request. If it does not, it must be restarted.
MUST BE CALLED PRIOR TO SENDING A SERIES OF COMMANDS TO THE INSTRUMENT
@throw InstrumentTimeoutException if the device could not be woken.
"""
# if we are able to get the parameters from the HEF, it is already awake
try:
self._do_cmd_resp(Command.HEF_WAKE, expected_prompt=Prompt.DEFAULT)
self._do_cmd_resp(Command.HEF_PARAMS, expected_prompt=Prompt.HEF_PROMPT, timeout=Timeout.HEF_PARAMS)
log.debug('HPIES is awake')
# otherwise, we need to restart
except InstrumentTimeoutException:
self._do_cmd_resp(Command.REBOOT, expected_prompt=Prompt.DEFAULT, timeout=Timeout.REBOOT)
self._do_cmd_resp(Command.ACQUISITION_START, expected_prompt=Prompt.DEFAULT,
timeout=Timeout.ACQUISITION_START)
self._do_cmd_hef_on()
self._do_cmd_resp(Command.HEF_PARAMS, expected_prompt=Prompt.HEF_PROMPT, timeout=Timeout.HEF_PARAMS)
log.debug('HPIES is awake')
def _build_command(self, cmd, *args):
"""
@brief assemble command string to send to instrument
Called by _do_cmd_* functions to build a command string for @a cmd.
@retval command string to send to the instrument
"""
if cmd in (Command.REBOOT,
Command.ACQUISITION_START,
Command.ACQUISITION_STOP,
Command.IES_PORT_ON,
Command.IES_PORT_OFF,
Command.IES_POWER_ON,
Command.IES_POWER_OFF,
Command.HEF_PORT_ON,
Command.HEF_PORT_OFF,
Command.HEF_POWER_ON,
Command.HEF_POWER_OFF,
Command.HEF_WAKE,
Command.SYNC_CLOCK):
return stm_command(cmd, *args)
elif cmd in (Command.PREFIX,
Command.HEF_PARAMS,
Command.HEF_SAVE,
Command.MISSION_START,
Command.MISSION_STOP,
Command.DEBUG_LEVEL,
Command.WSRUN_PINCH,
Command.NFC_CALIBRATE,
Command.CAL_HOLD,
Command.NHC_COMPASS,
Command.COMPASS_SAMPLES,
Command.COMPASS_DELAY,
Command.MOTOR_SAMPLES,
Command.EF_SAMPLES,
Command.CAL_SAMPLES,
Command.CONSOLE_TIMEOUT,
Command.WSRUN_DELAY,
Command.MOTOR_DIR_NHOLD,
Command.MOTOR_DIR_INIT,
Command.POWER_COMPASS_W_MOTOR,
Command.KEEP_AWAKE_W_MOTOR,
Command.MOTOR_TIMEOUTS_1A,
Command.MOTOR_TIMEOUTS_1B,
Command.MOTOR_TIMEOUTS_2A,
Command.MOTOR_TIMEOUTS_2B,
Command.RSN_CONFIG,
Command.INVERT_LED_DRIVERS,
Command.M1A_LED,
Command.M2A_LED):
return hef_command(cmd, *args)
raise InstrumentProtocolException('attempt to process unknown command: %r' % cmd)
def _check_command(self, resp, prompt):
for line in resp.split(NEWLINE):
if not valid_response(line):
raise InstrumentProtocolException('checksum failed (%r)' % line)
def _build_driver_dict(self):
"""
@brief Populate the driver dictionary with options
"""
self._driver_dict.add(DriverDictKey.VENDOR_SW_COMPATIBLE, False)
def _build_command_dict(self):
"""
@brief Populate the command dictionary with command.
"""
self._cmd_dict.add(Capability.START_AUTOSAMPLE, display_name='Start Autosample')
self._cmd_dict.add(Capability.STOP_AUTOSAMPLE, display_name='Stop Autosample')
self._cmd_dict.add(Capability.DISCOVER, display_name='Discover')
def _parse_hef_params_response(self, response, prompt):
"""
@brief process the response for request to get HEF parameters
@param response command string
@retval True if able to update parameters, False otherwise
"""
log.debug('djm parameter dictionary:\r%s', self._param_dict.get_all())
if re.match(Response.ERROR, response):
raise InstrumentParameterException('unable to get parameters - data acquisition has not been started')
param_lines = []
for line in response.split(NEWLINE):
log.debug('checking line for parameter: %r' % line)
if ' = ' in line:
if valid_response(line):
log.debug('checksum valid, setting value')
param_lines.append(line)
dictionary = self._param_dict.update_many(response)
if dictionary:
log.debug('djm updated dictionary: %r', self._param_dict.get_all())
return True
return False
def _do_cmd_prefix(self):
"""
Establish a valid filename to store data.
"""
prefix_bad = True
prefix_file = ''
while prefix_bad:
prefix_root = tempfile.mktemp(prefix='', dir='')
prefix_file = self._do_cmd_resp(Command.PREFIX, prefix_root,
expected_prompt=Prompt.HEF_PROMPT,
timeout=Timeout.PREFIX)
if prefix_file is not None:
prefix_bad = False
log.debug('opened file with prefix: %s', prefix_file)
return prefix_file
def _do_cmd_ies_on(self):
"""
Turn on the IES
"""
try:
self._do_cmd_resp(Command.IES_PORT_ON, expected_prompt=Prompt.DEFAULT, timeout=Timeout.IES_PORT_ON)
self._do_cmd_resp(Command.IES_POWER_ON, response_regex=Response.IES_POWER_ON, timeout=Timeout.IES_POWER_ON)
self._do_cmd_resp(Command.IES_PORT_OFF, expected_prompt=Prompt.DEFAULT, timeout=Timeout.IES_PORT_OFF)
except InstrumentTimeoutException:
raise InstrumentTimeoutException('IES did not respond to power on sequence')
def _do_cmd_hef_on(self):
"""
Turn on the HEF
"""
try:
self._do_cmd_resp(Command.HEF_PORT_ON, expected_prompt=Prompt.HEF_PORT_ON, timeout=Timeout.HEF_PORT_ON)
self._do_cmd_resp(Command.HEF_POWER_ON, response_regex=Response.HEF_POWER_ON, timeout=Timeout.HEF_POWER_ON)
self._do_cmd_resp(Command.HEF_WAKE, expected_prompt=Prompt.DEFAULT)
except InstrumentTimeoutException:
raise InstrumentTimeoutException('HEF did not respond to power on sequence')
def _parse_prefix_response(self, response, prompt):
"""
Check @a response from request to set prefix filename.
@param response response from instrument
@param prompt - ??
@retval filename to be used or None if requested prefix is already in use
"""
filename = None
matches = re.search(Response.OPENED_FILE, response)
if matches:
filename = matches.group(1)
return filename
def _parse_set_param_response(self, response, prompt):
"""
Check @a response from request to set HEF parameter
@param response response from instrument
@param prompt - ??
@retval value from set or None if there was an error setting value
"""
try:
self._check_command(response, prompt)
self._param_dict.update(response)
except InstrumentProtocolException:
pass
return response
########################################################################
# Unknown handlers
########################################################################
def _handler_unknown_enter(self):
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_unknown_exit(self, *args, **kwargs):
pass
def _handler_unknown_discover(self):
# any existing mission needs to be stopped. If one is not already running, no harm in sending the stop.
self._do_cmd_no_resp(Command.MISSION_STOP)
# delay so the instrument doesn't overwrite the next response
time.sleep(2)
return ProtocolState.COMMAND, ResourceAgentState.IDLE
########################################################################
# Command handlers.
########################################################################
def _handler_command_enter(self):
"""
Enter command state.
Startup HPIES and get it into a state where we can get/set parameters.
@throws InstrumentTimeoutException if the device cannot be woken.
@throws InstrumentProtocolException if the update commands and not recognized.
"""
self._init_params()
try:
self._do_cmd_resp(Command.REBOOT, expected_prompt=Prompt.DEFAULT, timeout=Timeout.REBOOT)
self._do_cmd_resp(Command.ACQUISITION_START, expected_prompt=Prompt.DEFAULT,
timeout=Timeout.ACQUISITION_START)
self._do_cmd_ies_on()
self._do_cmd_hef_on()
self._do_cmd_resp(Command.HEF_PARAMS, expected_prompt=Prompt.HEF_PROMPT, timeout=Timeout.HEF_PARAMS)
except InstrumentTimeoutException as e:
log.error('Unable to initialize HPIES: %r', e.message)
self._async_raise_fsm_event(ProtocolEvent.EXIT)
raise e
# Command device to update parameters and send a config change event.
self._update_params()
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_command_get(self, *args, **kwargs):
"""
Get device parameters from the parameter dict
First we set a baseline timestamp that all data expirations will be calculated against.
Then we try to get parameter value. If we catch an expired parameter then we will update
all parameters and get values using the original baseline time that we set at the beginning of this method.
Assuming our _update_params is updating all parameter values properly then we can
ensure that all data will be fresh. Nobody likes stale data!
@param args[0] list of parameters to retrieve, or DriverParameter.ALL.
@raise InstrumentParameterException if missing or invalid parameter.
@raise InstrumentParameterExpirationException If we fail to update a parameter
on the second pass this exception will be raised on expired data
"""
return self._handler_get(*args, **kwargs)
def _handler_command_set(self, *args):
"""
perform a set command
@param args[0] parameter : value dict.
@param args[1] parameter : startup parameters?
@retval (next_state, result) tuple, (None, None).
@throws InstrumentParameterException if missing set parameters, if set parameters not ALL and
not a dict, or if parameter can't be properly formatted.
@throws InstrumentTimeoutException if device cannot be woken for set command.
@throws InstrumentProtocolException if set command could not be built or misunderstood.
"""
startup = False
try:
params = args[0]
except IndexError:
raise InstrumentParameterException('_handler_command_set Set command requires a parameter dict.')
try:
startup = args[1]
except IndexError:
pass
if not isinstance(params, dict):
raise InstrumentParameterException('Set parameters not a dict.')
# For each key, val in the dict, issue set command to device.
# Raise if the command not understood.
self._set_params(params, startup)
return None, None
def _handler_command_start_autosample(self):
return ProtocolState.AUTOSAMPLE, (ResourceAgentState.STREAMING, None)
def _handler_command_start_direct(self):
return ProtocolState.DIRECT_ACCESS, (ResourceAgentState.DIRECT_ACCESS, None)
def _handler_command_exit(self, *args, **kwargs):
pass
########################################################################
# Autosample handlers
########################################################################
def _handler_autosample_enter(self):
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
try:
self._do_cmd_prefix()
self._do_cmd_resp(Command.MISSION_START, expected_prompt=Prompt.HEF_PROMPT, timeout=Timeout.MISSION_START)
log.debug('mission start completed')
except InstrumentTimeoutException as e:
log.error('Unable to start autosample: %r', e.message)
self._async_raise_fsm_event(ProtocolEvent.STOP_AUTOSAMPLE)
raise e
def _handler_autosample_stop_autosample(self):
"""
Process command to stop auto-sampling. Return to command state.
"""
try:
self._do_cmd_resp(Command.MISSION_STOP, expected_prompt=Prompt.DEFAULT, timeout=Timeout.MISSION_STOP)
self._do_cmd_resp(Command.ACQUISITION_STOP, expected_prompt=Prompt.DEFAULT,
timeout=Timeout.ACQUISITION_STOP)
except InstrumentTimeoutException as e:
log.warning('Unable to terminate mission cleanly: %r', e.message)
return ProtocolState.COMMAND, (ResourceAgentState.COMMAND, None)
def _handler_autosample_exit(self, *args, **kwargs):
# no special cleanup required
pass
########################################################################
# Direct access handlers.
########################################################################
def _handler_direct_access_enter(self):
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
self._sent_cmds = []
def _handler_direct_access_execute_direct(self, data):
self._do_cmd_direct(data)
# add sent command to list for 'echo' filtering in callback
self._sent_cmds.append(data)
return None, (None, None)
def _handler_direct_access_stop_direct(self):
return ProtocolState.COMMAND, (ResourceAgentState.COMMAND, None)
def _handler_direct_access_exit(self):
pass
def apply_startup_params(self):
"""
Apply all startup parameters. First we check the instrument to see
if we need to set the parameters. If they are they are set
correctly then we don't do anything.
If we need to set parameters then we might need to transition to
command first. Then we will transition back when complete.
@raise: InstrumentProtocolException if not in command or streaming
"""
# Let's give it a try in unknown state
log.debug("CURRENT STATE: %s", self.get_current_state())
if (self.get_current_state() != DriverProtocolState.COMMAND and
self.get_current_state() != DriverProtocolState.AUTOSAMPLE):
raise InstrumentProtocolException("Not in command or autosample state. Unable to apply startup params")
# If we are in streaming mode and our configuration on the
# instrument matches what we think it should be then we
# don't need to do anything.
if not self._instrument_config_dirty():
log.debug("configuration not dirty. Nothing to do here")
return True
error = None
try:
log.debug("apply_startup_params now")
self._set_params(self.get_startup_config(), True)
self._do_cmd_resp(Command.HEF_SAVE, expected_prompt=Prompt.HEF_PROMPT, timeout=Timeout.HEF_SAVE)
# Catch all errors so we can put driver back into streaming. Then rethrow the error.
except Exception as e:
error = e
if error:
log.error("Error in apply_startup_params: %s", error)
raise error
def _instrument_config_dirty(self):
"""
Read the startup config and compare that to what the instrument
is configured too. If they differ then return True
@return: True if the startup config doesn't match the instrument
@raise: InstrumentParameterException
"""
# Refresh the param dict cache
self._update_params()
startup_params = self._param_dict.get_startup_list()
log.debug("Startup Parameters: %s", startup_params)
for param in startup_params:
if self._param_dict.get(param) != self._param_dict.get_config_value(param):
log.debug("DIRTY: %s %s != %s", param, self._param_dict.get(param),
self._param_dict.get_config_value(param))
return True
log.debug("Clean instrument config")
return False
def _update_params(self):
"""
Update the parameter dictionary. Wake the device then issue
display status and display calibration commands. The parameter
dict will match line output and udpate itself.
@throws InstrumentTimeoutException if device cannot be timely woken.
@throws InstrumentProtocolException if ds/dc misunderstood.
"""
# Get old param dict config.
old_config = self._param_dict.get_config()
# wakeup will get the latest parameters from the instrument
self._hef_wakeup()
# Get new param dict config. If it differs from the old config,
# tell driver superclass to publish a config change event.
new_config = self._param_dict.get_config()
log.debug("Old Config: %s", old_config)
log.debug("New Config: %s", new_config)
if not dict_equal(new_config, old_config) and self._protocol_fsm.get_current_state() != ProtocolState.UNKNOWN:
log.debug("parameters updated, sending event")
self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)
def _verify_set_values(self, params):
"""
Verify supplied values are in range, if applicable
@param params: Dictionary of Parameter:value to be verified
@throws InstrumentParameterException
"""
constraints = ParameterConstraints.dict()
parameters = Parameter.reverse_dict()
# step through the list of parameters
for key, val in params.iteritems():
# if constraint exists, verify we have not violated it
constraint_key = parameters.get(key)
if constraint_key in constraints:
var_type, minimum, maximum = constraints[constraint_key]
constraint_string = 'Parameter: %s Value: %s Type: %s Real Type: %s Minimum: %s Maximum: %s' % \
(key, val, var_type, type(val), minimum, maximum)
log.debug('SET CONSTRAINT: %s', constraint_string)
# check bool values are actual booleans
if var_type == bool:
if val not in [True, False]:
raise InstrumentParameterException('Non-boolean value!: %s' % constraint_string)
# else, check if we can cast to the correct type
else:
try:
val = var_type(val)
except ValueError:
raise InstrumentParameterException('Type mismatch: %s' % constraint_string)
# now, verify we are within min/max
if val < minimum or val > maximum:
raise InstrumentParameterException('Out of range: %s' % constraint_string)
def _set_params(self, *args, **kwargs):
"""
Issue commands to the instrument to set various parameters
"""
try:
params = args[0]
except IndexError:
raise InstrumentParameterException('Set command requires a parameter dict.')
old_config = self._param_dict.get_all()
self._verify_set_values(params)
self._verify_not_readonly(*args, **kwargs)
for key, val in params.iteritems():
if not key in old_config:
raise InstrumentParameterException('attempted to set unknown parameter: %s to %s' % (key, val))
command_response = self._do_cmd_resp(key, val, expected_prompt=Prompt.HEF_PROMPT)
log.debug('command: %r returned: %r', key, command_response)
new_config = self._param_dict.get_all()
log.debug('djm old config: %r', old_config)
log.debug('djm new config: %r', new_config)
if new_config != old_config:
log.debug('djm configuration differs, saving parameters and signaling event')
self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)
def create_playback_protocol(callback):
return Protocol(None, None, callback)
|
'''
## Train ##
# Code to train Deep Q Network on gym-sokoban environment
@author: Kolin Guo
'''
from datetime import datetime
import json, os, sys, argparse, logging, random, time, shutil
import gym, gym_sokoban
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from utils.experience_replay import ReplayMemory
from utils.state_buffer import StateBuffer
from utils.network import DQNModel
from utils.utils import preprocess_observation, reset_env_and_state_buffer
def get_train_args():
train_args = argparse.ArgumentParser()
# Random Seed
train_args.add_argument("--random_seed", type=int, default=1234, help="Random seed for reproducability")
# Environment parameters
train_args.add_argument("--env", type=str, default='Boxoban-Train-v0', help="Environment to use for training")
train_args.add_argument("--num_surfaces", type=int, default=7, help="Number of room surfaces for one-hot encoding")
train_args.add_argument("--max_step", type=int, default=200, help="Maximum number of steps in a single game episode")
train_args.add_argument("--render", type=bool, default=False, help="Whether or not to display the environment during training")
train_args.add_argument("--grid_width", type=int, default=10, help="Grid width")
train_args.add_argument("--grid_height", type=int, default=10, help="Grid height")
train_args.add_argument("--grids_per_state", type=int, default=4, help="Sequence of grids which constitutes a single state")
# Environment rewards
train_args.add_argument("--env_penalty_for_step", type=float, default=-0.1, help="Reward of performing a step")
train_args.add_argument("--env_reward_box_on_target", type=float, default=10.0, help="Reward of pushing a box on target")
train_args.add_argument("--env_penalty_box_off_target", type=float, default=-10.0, help="Reward of pushing a box off target")
train_args.add_argument("--env_reward_finished", type=float, default=100.0, help="Reward of winning (pushed all boxes on targets)")
# Training parameters
train_args.add_argument("--save_tb_trace", type=bool, default=False, help="Save TensorBoard trace for first 1000 step")
train_args.add_argument("--num_steps_train", type=int, default=50000000, help="Number of steps to train for")
train_args.add_argument("--batch_size", type=int, default=32, help="Batch size of state transitions")
train_args.add_argument("--learning_rate", type=float, default=0.00025, help="Learning rate")
train_args.add_argument("--replay_mem_size", type=int, default=1000000, help="Maximum number of steps in replay memory buffer")
train_args.add_argument("--initial_replay_mem_size", type=int, default=50000, help="Initial number of steps in replay memory (populated by random actions) before learning can start")
train_args.add_argument("--epsilon_start", type=float, default=1.0, help="Exploration rate at the beginning of training")
train_args.add_argument("--epsilon_end", type=float, default=0.1, help="Fixed exploration rate at the end of epsilon decay")
train_args.add_argument("--epsilon_decay_step", type=int, default=1000000, help="After how many steps to stop decaying the exploration rate")
train_args.add_argument("--discount_rate", type=float, default=0.99, help="Discount rate (gamma) for future rewards")
train_args.add_argument("--update_target_step", type=int, default=10000, help="Copy current network parameters to target network every N steps")
train_args.add_argument("--save_checkpoint_step", type=int, default=100000, help="Save checkpoint every N steps")
train_args.add_argument("--save_log_step", type=int, default=1000, help="Save logs (training_time, avg_reward, num_episodes) every N steps")
# Files/directories
train_args.add_argument("--checkpoint_dir", type=str, default='./checkpoints', help="Directory for saving/loading checkpoints")
train_args.add_argument("--checkpoint_file", type=str, default=None, help="Checkpoint file to load and resume training from (if None, train from scratch)")
train_args.add_argument("--log_dir", type=str, default='./logs/train', help="Directory for saving logs")
log_filename = datetime.now().strftime("%Y%m%d_%H%M%S.log")
train_args.add_argument("--log_filename", type=str, default=log_filename, help="Log file name (current timestamp) DON'T MODIFY")
return train_args.parse_args()
def log_train_args(args):
# Create summary writer to write summaries to disk
if not os.path.exists(args.log_dir):
os.makedirs(args.log_dir)
# Set up logging to file
log_filepath = os.path.join(args.log_dir, args.log_filename)
logging.basicConfig(level=logging.INFO,
format='[%(asctime)s] [%(name)-12s] [%(levelname)-8s] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
filename=log_filepath,
filemode='w')
# define a Handler which writes INFO messages or higher to the sys.stdout
console = logging.StreamHandler(sys.stdout)
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('[%(name)-12s] [%(levelname)-8s] %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
# Get logger for training args
logger = logging.getLogger('args')
# Write the training arguments
for key, value in vars(args).items():
logger.info('{%s: %s}', key, value)
def train(args):
ACTION_SPACE = np.array([1, 2, 3, 4], dtype=np.uint8)
# Function to get a random actionID
def sample_action_space():
return random.choice(ACTION_SPACE)
# Function to convert actionID (1, 2, 3, 4) to actionQID (0, 1, 2, 3)
def actionID_to_actionQID(actionID):
return actionID-1
# Function to convert actionQID (0, 1, 2, 3) to actionID (1, 2, 3, 4)
def actionQID_to_actionID(actionQID):
return actionQID+1
# Function to return epsilon based on current step
def get_epsilon(current_step, epsilon_start, epsilon_end, epsilon_decay_step):
if current_step < epsilon_decay_step:
return epsilon_start + (epsilon_end - epsilon_start) / float(epsilon_decay_step) * current_step
else:
return epsilon_end
# Get logger for training
logger = logging.getLogger('train')
# Check if GPU is available
logger.info("Num GPUs Available: %d", len(tf.config.experimental.list_physical_devices('GPU')))
# Create environment
env = gym.make(args.env)
num_actions = 4 # Push (up, down, left, right): 1, 2, 3, 4
env.unwrapped.set_maxsteps(args.max_step)
env.unwrapped.set_rewards(
[args.env_penalty_for_step,
args.env_reward_box_on_target,
args.env_penalty_box_off_target,
args.env_reward_finished])
# Set random seeds for reproducability
random.seed(args.random_seed)
env.seed(args.random_seed)
np.random.seed(args.random_seed)
tf.random.set_seed(args.random_seed)
# Initialize replay memory and state buffer
replay_mem = ReplayMemory(args)
state_buf = StateBuffer(args)
# Check if resume from training
load_model_path = None
if args.checkpoint_file is not None: # Resume training
load_model_path = os.path.join(args.checkpoint_dir, args.checkpoint_file)
assert os.path.exists(load_model_path+'.index'), 'Path "{}" does not exist!'.format(load_model_path+'.index')
start_step = args.checkpoint_file.split('/')[-1].split('-')[-1]
assert len(start_step)>0, "Invalid checkpoint file for extracting start_step"
start_step = int(start_step)
else: # Train from scratch
# Create another directory for this training
args.checkpoint_dir = os.path.join(args.checkpoint_dir, args.log_filename.split('.')[0])
start_step = 0
# Create checkpoint directory
if not os.path.exists(args.checkpoint_dir):
os.makedirs(args.checkpoint_dir)
# Instantiate DQN and DQN_target
state_shape = (args.grid_height, args.grid_width, args.num_surfaces, args.grids_per_state)
DQN = DQNModel(state_shape, num_actions, args.learning_rate, load_model_path=load_model_path, name='DQN')
DQN_target = DQNModel(state_shape, num_actions, load_model_path=load_model_path, name='DQN_target')
## Begin training
env.reset()
# Populate replay memory to initial_replay_mem_size
logger.info("Populating replay memory with random actions...")
for si in range(args.initial_replay_mem_size):
if args.render:
env.render(mode='human')
else:
env.render(mode='tiny_rgb_array')
actionID = sample_action_space()
observation, reward, terminal, _ = env.step(actionID, observation_mode='tiny_rgb_array')
grid = preprocess_observation(args, observation)
replay_mem.add(actionID, reward, grid, terminal)
if terminal:
env.reset()
sys.stdout.write('\x1b[2K\rStep {:d}/{:d}'.format(si+1, args.initial_replay_mem_size))
sys.stdout.flush()
# Start training
reward_one_episode = 0
reward_episodes = []
step_one_episode = 0
step_episodes = []
Qval_steps = []
duration_steps = []
# Create tf summary writer to write summaries to disk
# ./logs/train/20200318_120026
tf_train_log_dir = os.path.join(args.log_dir.replace('train', 'tf_train'), args.log_filename.split('.')[0])
if not os.path.exists(tf_train_log_dir):
os.makedirs(tf_train_log_dir)
train_summary_writer = tf.summary.create_file_writer(tf_train_log_dir)
train_summary_writer.set_as_default()
if args.save_tb_trace:
# Model graphs
tf.summary.trace_on(graph=True, profiler=True)
reset_env_and_state_buffer(env, state_buf, args)
logger.info("Start training...")
for si in range(start_step+1, args.num_steps_train+1):
start_time = time.time()
## Playing Step
# Perform a step
if args.render:
env.render(mode='human')
else:
env.render(mode='tiny_rgb_array')
# Select a random action based on epsilon-greedy algorithm
epsilon = get_epsilon(si, args.epsilon_start, args.epsilon_end, args.epsilon_decay_step)
if random.random() < epsilon: # Take random action
actionID = sample_action_space()
else: # Take greedy action
state = tf.convert_to_tensor(state_buf.get_state(), dtype=tf.float32)
state = state[tf.newaxis, ...] # Add an axis for batch
actionQID = DQN.predict(state)
actionID = actionQID_to_actionID(int(actionQID)) # convert from Tensor to int
# Take the action and store state transition
observation, reward, terminal, _ = env.step(actionID, observation_mode='tiny_rgb_array')
grid = preprocess_observation(args, observation)
state_buf.add(grid)
replay_mem.add(actionID, reward, grid, terminal)
# Accumulate reward and increment step
reward_one_episode += reward
step_one_episode += 1
if terminal:
# Save the accumulate reward for this episode
reward_episodes.append(reward_one_episode)
reward_one_episode = 0
# Save the number of steps for this episode
step_episodes.append(step_one_episode)
step_one_episode = 0
# Reset environment and state buffer
reset_env_and_state_buffer(env, state_buf, args)
## Training Step
# Sample a random minibatch of transitions from ReplayMemory
states_batch, actionID_batch, rewards_batch, next_states_batch, terminals_batch = replay_mem.getMinibatch()
actionQID_batch = actionID_to_actionQID(actionID_batch)
# Infer DQN_target for Q(S', A)
next_states_batch = tf.convert_to_tensor(next_states_batch, dtype=tf.float32)
next_states_Qvals = DQN_target.infer(next_states_batch)
max_next_states_Qvals = tf.math.reduce_max(next_states_Qvals, axis=1, name='maxQ')
assert max_next_states_Qvals.shape == (args.batch_size,), "Wrong dimention for predicted next state Q vals"
# Set Q(S', A) for all terminal state S'
max_next_states_Qvals = tf.math.multiply(max_next_states_Qvals, np.invert(terminals_batch), name='remove_terminals')
# Save average maximum predicted Q values
Qval_steps.append(np.mean(max_next_states_Qvals[max_next_states_Qvals != 0]))
# Calculate the traget Q values
targetQs = rewards_batch + args.discount_rate * max_next_states_Qvals
# Pass to DQN
states_batch = tf.cast(states_batch, tf.float32)
targetQs = tf.cast(targetQs, tf.float32)
DQN.train_step(states_batch, actionQID_batch, targetQs)
# Update DQN_target every args.update_target_step steps
if si % args.update_target_step == 0:
update_save_path = os.path.join(args.checkpoint_dir, 'DQN_Update')
DQN.save_model(update_save_path)
DQN_target.load_model(update_save_path)
duration = time.time() - start_time
duration_steps.append(duration)
# Save log
if si % args.save_log_step == 0:
avg_training_loss = DQN.get_training_loss()
logger.info("{Training Step: %d/%d}", si, args.num_steps_train)
logger.info("Number of Episodes: %d", len(reward_episodes))
logger.info("Recent Step Exploration Rate: %.5f", epsilon)
logger.info("Average Per-Episode Reward: %.5f", sum(reward_episodes)/float(len(reward_episodes)))
logger.info("Average Per-Episode Step: %.3f", sum(step_episodes)/float(len(step_episodes)))
logger.info("Average Per-Step Maximum Predicted Q Value: %.8f", sum(Qval_steps)/float(len(Qval_steps)))
logger.info("Average Per-Step Training Loss: %.8f", avg_training_loss)
logger.info("Average Per-Step Training Time: %.5f second", sum(duration_steps)/float(len(duration_steps)))
tf.summary.scalar('Episodes', len(reward_episodes), step=si, description='Number of Episodes')
tf.summary.scalar('epsilon', epsilon, step=si, description='Recent Step Exploration Rate')
tf.summary.scalar('avgReward', sum(reward_episodes)/float(len(reward_episodes)), step=si, description='Average Per-Episode Reward')
tf.summary.scalar('avgStep', sum(step_episodes)/float(len(step_episodes)), step=si, description='Average Per-Episode Step Count')
tf.summary.scalar('avgQval', sum(Qval_steps)/float(len(Qval_steps)), step=si, description='Average Per-Step Maximum Predicted Q Value')
tf.summary.scalar('avgTrainLoss', avg_training_loss, step=si, description='Average Per-Step Training Loss')
tf.summary.scalar('avgTrainTime', sum(duration_steps)/float(len(duration_steps)), step=si, description='Average Per-Step Training Time')
if args.save_tb_trace:
# Save computation graph
tf.summary.trace_export(name="model_trace", step=si, profiler_outdir=tf_train_log_dir)
# Reset the parameters
reward_episodes = []
step_episodes = []
duration_steps = []
Qval_steps = []
# Save checkpoint
if si % args.save_checkpoint_step == 0:
save_checkpoint_path = os.path.join(args.checkpoint_dir,
'DQN_Train')
DQN.save_model(save_checkpoint_path, ckpt_number=si)
# Duplicate the current logfile
src_log_filepath = os.path.join(args.log_dir, args.log_filename)
dst_log_filepath = os.path.join(args.checkpoint_dir,
'DQN_Train_{}.log'.format(si))
shutil.copyfile(src_log_filepath, dst_log_filepath)
# Training finished
logger.info("Finished training...")
# Save trained network
save_final_network_path = os.path.join(args.checkpoint_dir, 'DQN_Trained')
DQN.save_model(save_final_network_path, ckpt_number=args.num_steps_train)
if __name__ == '__main__':
# Change back to repository directory
os.chdir(os.path.realpath(os.path.join(os.path.abspath(__file__), '../../')))
train_args = get_train_args()
log_train_args(train_args)
train(train_args)
|
# coding: utf-8
"""Provides a place for functions/modules which have been reogranized in the
python 2/3 switch use in this library to be located regardless of their
location in the running Python's standard library."""
__all__ = ['cookielib', 'urllib2', 'HTTPError', 'URLError', 'urlsplit',
'urljoin', 'urlunsplit', 'urlencode', 'quote', 'string_type',
'ensure_string', 'ensure_bytes', 'get_headers']
try:
import cookielib
except ImportError:
import http.cookiejar as cookielib
try:
import urllib2
except ImportError:
import urllib.request as urllib2
try:
from urllib2 import HTTPError, URLError
except ImportError:
from urllib.error import HTTPError, URLError
try:
from urlparse import urlsplit, urljoin, urlunsplit
except ImportError:
from urllib.parse import urlsplit, urljoin, urlunsplit
try:
from urllib import urlencode, quote
except ImportError:
from urllib.parse import urlencode, quote
string_type = str
try:
unicode
string_type = basestring
bytes = str
except NameError:
unicode = str
string_type = str
def ensure_string(payload_bytes):
if isinstance(payload_bytes, bytes):
return payload_bytes.decode("utf-8")
return payload_bytes
def ensure_bytes(payload_string):
if isinstance(payload_string, unicode):
return payload_string.encode("utf-8")
return payload_string
def get_headers(handle):
if hasattr(handle.headers, 'headers'):
return handle.headers.headers
return dict(handle.headers.items())
|
a = 0
b = 0
c = 0
|
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
def initialize(context):
from Products.PluginIndexes.FieldIndex.FieldIndex import FieldIndex
from Products.PluginIndexes.FieldIndex.FieldIndex \
import manage_addFieldIndex
from Products.PluginIndexes.FieldIndex.FieldIndex \
import manage_addFieldIndexForm
context.registerClass(FieldIndex,
permission='Add Pluggable Index',
constructors=(manage_addFieldIndexForm,
manage_addFieldIndex),
icon='www/index.gif',
visibility=None,
)
from Products.PluginIndexes.KeywordIndex.KeywordIndex import KeywordIndex
from Products.PluginIndexes.KeywordIndex.KeywordIndex \
import manage_addKeywordIndex
from Products.PluginIndexes.KeywordIndex.KeywordIndex \
import manage_addKeywordIndexForm
context.registerClass(KeywordIndex,
permission='Add Pluggable Index',
constructors=(manage_addKeywordIndexForm,
manage_addKeywordIndex),
icon='www/index.gif',
visibility=None,
)
from Products.PluginIndexes.TopicIndex.TopicIndex import TopicIndex
from Products.PluginIndexes.TopicIndex.TopicIndex \
import manage_addTopicIndex
from Products.PluginIndexes.TopicIndex.TopicIndex \
import manage_addTopicIndexForm
context.registerClass(TopicIndex,
permission='Add Pluggable Index',
constructors=(manage_addTopicIndexForm,
manage_addTopicIndex),
icon='www/index.gif',
visibility=None,
)
from Products.PluginIndexes.DateIndex.DateIndex import DateIndex
from Products.PluginIndexes.DateIndex.DateIndex \
import manage_addDateIndex
from Products.PluginIndexes.DateIndex.DateIndex \
import manage_addDateIndexForm
context.registerClass(DateIndex,
permission='Add Pluggable Index',
constructors=(manage_addDateIndexForm,
manage_addDateIndex),
icon='www/index.gif',
visibility=None,
)
from Products.PluginIndexes.DateRangeIndex.DateRangeIndex \
import DateRangeIndex
from Products.PluginIndexes.DateRangeIndex.DateRangeIndex \
import manage_addDateRangeIndex
from Products.PluginIndexes.DateRangeIndex.DateRangeIndex \
import manage_addDateRangeIndexForm
context.registerClass(DateRangeIndex,
permission='Add Pluggable Index',
constructors=(manage_addDateRangeIndexForm,
manage_addDateRangeIndex),
icon='www/index.gif',
visibility=None,
)
from Products.PluginIndexes.PathIndex.PathIndex import PathIndex
from Products.PluginIndexes.PathIndex.PathIndex \
import manage_addPathIndex
from Products.PluginIndexes.PathIndex.PathIndex \
import manage_addPathIndexForm
context.registerClass(PathIndex,
permission='Add Pluggable Index',
constructors=(manage_addPathIndexForm,
manage_addPathIndex),
icon='www/index.gif',
visibility=None,
)
from Products.PluginIndexes.BooleanIndex.BooleanIndex import BooleanIndex
from Products.PluginIndexes.BooleanIndex.BooleanIndex import \
manage_addBooleanIndex
from Products.PluginIndexes.BooleanIndex.BooleanIndex import \
manage_addBooleanIndexForm
context.registerClass(BooleanIndex,
permission='Add Pluggable Index',
constructors=(manage_addBooleanIndexForm,
manage_addBooleanIndex),
icon='www/index.gif',
visibility=None,
)
from Products.PluginIndexes.UUIDIndex.UUIDIndex import UUIDIndex
from Products.PluginIndexes.UUIDIndex.UUIDIndex import \
manage_addUUIDIndex
from Products.PluginIndexes.UUIDIndex.UUIDIndex import \
manage_addUUIDIndexForm
context.registerClass(UUIDIndex,
permission='Add Pluggable Index',
constructors=(manage_addUUIDIndexForm,
manage_addUUIDIndex),
icon='www/index.gif',
visibility=None,
)
|
# Copyright (c) 2018,2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Provide accessors to enhance interoperability between xarray and MetPy.
MetPy relies upon the `CF Conventions <http://cfconventions.org/>`_. to provide helpful
attributes and methods on xarray DataArrays and Dataset for working with
coordinate-related metadata. Also included are several attributes and methods for unit
operations.
These accessors will be activated with any import of MetPy. Do not use the
``MetPyDataArrayAccessor`` or ``MetPyDatasetAccessor`` classes directly, instead, utilize the
applicable properties and methods via the ``.metpy`` attribute on an xarray DataArray or
Dataset.
See Also: :doc:`xarray with MetPy Tutorial </tutorials/xarray_tutorial>`.
"""
import functools
import logging
import re
import warnings
import numpy as np
import xarray as xr
from ._vendor.xarray import either_dict_or_kwargs, expanded_indexer, is_dict_like
from .units import DimensionalityError, UndefinedUnitError, units
__all__ = []
metpy_axes = ['time', 'vertical', 'y', 'latitude', 'x', 'longitude']
# Define the criteria for coordinate matches
coordinate_criteria = {
'standard_name': {
'time': 'time',
'vertical': {'air_pressure', 'height', 'geopotential_height', 'altitude',
'model_level_number', 'atmosphere_ln_pressure_coordinate',
'atmosphere_sigma_coordinate',
'atmosphere_hybrid_sigma_pressure_coordinate',
'atmosphere_hybrid_height_coordinate', 'atmosphere_sleve_coordinate',
'height_above_geopotential_datum', 'height_above_reference_ellipsoid',
'height_above_mean_sea_level'},
'y': 'projection_y_coordinate',
'latitude': 'latitude',
'x': 'projection_x_coordinate',
'longitude': 'longitude'
},
'_CoordinateAxisType': {
'time': 'Time',
'vertical': {'GeoZ', 'Height', 'Pressure'},
'y': 'GeoY',
'latitude': 'Lat',
'x': 'GeoX',
'longitude': 'Lon'
},
'axis': {
'time': 'T',
'vertical': 'Z',
'y': 'Y',
'x': 'X'
},
'positive': {
'vertical': {'up', 'down'}
},
'units': {
'vertical': {
'match': 'dimensionality',
'units': 'Pa'
},
'latitude': {
'match': 'name',
'units': {'degree_north', 'degree_N', 'degreeN', 'degrees_north', 'degrees_N',
'degreesN'}
},
'longitude': {
'match': 'name',
'units': {'degree_east', 'degree_E', 'degreeE', 'degrees_east', 'degrees_E',
'degreesE'}
},
},
'regular_expression': {
'time': r'time[0-9]*',
'vertical': (r'(lv_|bottom_top|sigma|h(ei)?ght|altitude|depth|isobaric|pres|'
r'isotherm)[a-z_]*[0-9]*'),
'y': r'y',
'latitude': r'x?lat[a-z0-9]*',
'x': r'x',
'longitude': r'x?lon[a-z0-9]*'
}
}
log = logging.getLogger(__name__)
_axis_identifier_error = ('Given axis is not valid. Must be an axis number, a dimension '
'coordinate name, or a standard axis type.')
@xr.register_dataarray_accessor('metpy')
class MetPyDataArrayAccessor:
r"""Provide custom attributes and methods on xarray DataArrays for MetPy functionality.
This accessor provides several convenient attributes and methods through the `.metpy`
attribute on a DataArray. For example, MetPy can identify the coordinate corresponding
to a particular axis (given sufficent metadata):
>>> import xarray as xr
>>> from metpy.units import units
>>> temperature = xr.DataArray([[0, 1], [2, 3]] * units.degC, dims=('lat', 'lon'),
... coords={'lat': [40, 41], 'lon': [-105, -104]})
>>> temperature.metpy.x
<xarray.DataArray 'lon' (lon: 2)>
array([-105, -104])
Coordinates:
* lon (lon) int64 -105 -104
Attributes:
_metpy_axis: x,longitude
"""
def __init__(self, data_array): # noqa: D107
# Initialize accessor with a DataArray. (Do not use directly).
self._data_array = data_array
@property
def units(self):
"""Return the units of this DataArray as a `pint.Unit`."""
if isinstance(self._data_array.data, units.Quantity):
return self._data_array.data.units
else:
return units.parse_units(self._data_array.attrs.get('units', 'dimensionless'))
@property
def magnitude(self):
"""Return the magnitude of the data values of this DataArray (i.e., without units)."""
if isinstance(self._data_array.data, units.Quantity):
return self._data_array.data.magnitude
else:
return self._data_array.data
@property
def unit_array(self):
"""Return the data values of this DataArray as a `pint.Quantity`.
Notes
-----
If not already existing as a `pint.Quantity` or Dask array, the data of this DataArray
will be loaded into memory by this operation.
"""
if isinstance(self._data_array.data, units.Quantity):
return self._data_array.data
else:
return units.Quantity(self._data_array.data, self.units)
def convert_units(self, units):
"""Return new DataArray with values converted to different units.
Notes
-----
Any cached/lazy-loaded data (except that in a Dask array) will be loaded into memory
by this operation. Do not utilize on moderate- to large-sized remote datasets before
subsetting!
"""
return self.quantify().copy(data=self.unit_array.to(units))
def convert_coordinate_units(self, coord, units):
"""Return new DataArray with coordinate converted to different units.
Notes
-----
Any cached/lazy-loaded coordinate data (except that in a Dask array) will be loaded
into memory by this operation.
"""
new_coord_var = self._data_array[coord].copy(
data=self._data_array[coord].metpy.unit_array.m_as(units)
)
new_coord_var.attrs['units'] = str(units)
return self._data_array.assign_coords(coords={coord: new_coord_var})
def quantify(self):
"""Return a DataArray with the data converted to a `pint.Quantity`.
Notes
-----
Any cached/lazy-loaded data (except that in a Dask array) will be loaded into memory
by this operation. Do not utilize on moderate- to large-sized remote datasets before
subsetting!
"""
if (
not isinstance(self._data_array.data, units.Quantity)
and np.issubdtype(self._data_array.data.dtype, np.number)
):
# Only quantify if not already quantified and is quantifiable
quantified_dataarray = self._data_array.copy(data=self.unit_array)
if 'units' in quantified_dataarray.attrs:
del quantified_dataarray.attrs['units']
else:
quantified_dataarray = self._data_array
return quantified_dataarray
def dequantify(self):
"""Return a DataArray with the data as magnitude and the units as an attribute."""
if isinstance(self._data_array.data, units.Quantity):
# Only dequantify if quantified
dequantified_dataarray = self._data_array.copy(
data=self._data_array.data.magnitude
)
dequantified_dataarray.attrs['units'] = str(self.units)
else:
dequantified_dataarray = self._data_array
return dequantified_dataarray
@property
def crs(self):
"""Return the coordinate reference system (CRS) as a CFProjection object."""
if 'crs' in self._data_array.coords:
return self._data_array.coords['crs'].item()
raise AttributeError('crs attribute is not available.')
@property
def cartopy_crs(self):
"""Return the coordinate reference system (CRS) as a cartopy object."""
return self.crs.to_cartopy()
@property
def cartopy_globe(self):
"""Return the globe belonging to the coordinate reference system (CRS)."""
return self.crs.cartopy_globe
@property
def cartopy_geodetic(self):
"""Return the Geodetic CRS associated with the native CRS globe."""
return self.crs.cartopy_geodetic
def _fixup_coordinate_map(self, coord_map):
"""Ensure sure we have coordinate variables in map, not coordinate names."""
new_coord_map = {}
for axis in coord_map:
if coord_map[axis] is not None and not isinstance(coord_map[axis], xr.DataArray):
new_coord_map[axis] = self._data_array[coord_map[axis]]
else:
new_coord_map[axis] = coord_map[axis]
return new_coord_map
def assign_coordinates(self, coordinates):
"""Return new DataArray with given coordinates assigned to the given MetPy axis types.
Parameters
----------
coordinates : dict or None
Mapping from axis types ('time', 'vertical', 'y', 'latitude', 'x', 'longitude') to
coordinates of this DataArray. Coordinates can either be specified directly or by
their name. If ``None``, clears the `_metpy_axis` attribute on all coordinates,
which will trigger reparsing of all coordinates on next access.
"""
coord_updates = {}
if coordinates:
# Assign the _metpy_axis attributes according to supplied mapping
coordinates = self._fixup_coordinate_map(coordinates)
for axis in coordinates:
if coordinates[axis] is not None:
coord_updates[coordinates[axis].name] = (
coordinates[axis].assign_attrs(
_assign_axis(coordinates[axis].attrs.copy(), axis)
)
)
else:
# Clear _metpy_axis attribute on all coordinates
for coord_name, coord_var in self._data_array.coords.items():
coord_updates[coord_name] = coord_var.copy(deep=False)
# Some coordinates remained linked in old form under other coordinates. We
# need to remove from these.
sub_coords = coord_updates[coord_name].coords
for sub_coord in sub_coords:
coord_updates[coord_name].coords[sub_coord].attrs.pop('_metpy_axis', None)
# Now we can remove the _metpy_axis attr from the coordinate itself
coord_updates[coord_name].attrs.pop('_metpy_axis', None)
return self._data_array.assign_coords(coord_updates)
def _generate_coordinate_map(self):
"""Generate a coordinate map via CF conventions and other methods."""
coords = self._data_array.coords.values()
# Parse all the coordinates, attempting to identify x, longitude, y, latitude,
# vertical, time
coord_lists = {'time': [], 'vertical': [], 'y': [], 'latitude': [], 'x': [],
'longitude': []}
for coord_var in coords:
# Identify the coordinate type using check_axis helper
for axis in coord_lists:
if check_axis(coord_var, axis):
coord_lists[axis].append(coord_var)
# Fill in x/y with longitude/latitude if x/y not otherwise present
for geometric, graticule in (('y', 'latitude'), ('x', 'longitude')):
if len(coord_lists[geometric]) == 0 and len(coord_lists[graticule]) > 0:
coord_lists[geometric] = coord_lists[graticule]
# Filter out multidimensional coordinates where not allowed
require_1d_coord = ['time', 'vertical', 'y', 'x']
for axis in require_1d_coord:
coord_lists[axis] = [coord for coord in coord_lists[axis] if coord.ndim <= 1]
# Resolve any coordinate type duplication
axis_duplicates = [axis for axis in coord_lists if len(coord_lists[axis]) > 1]
for axis in axis_duplicates:
self._resolve_axis_duplicates(axis, coord_lists)
# Collapse the coord_lists to a coord_map
return {axis: (coord_lists[axis][0] if len(coord_lists[axis]) > 0 else None)
for axis in coord_lists}
def _resolve_axis_duplicates(self, axis, coord_lists):
"""Handle coordinate duplication for an axis type if it arises."""
# If one and only one of the possible axes is a dimension, use it
dimension_coords = [coord_var for coord_var in coord_lists[axis] if
coord_var.name in coord_var.dims]
if len(dimension_coords) == 1:
coord_lists[axis] = dimension_coords
return
# Ambiguous axis, raise warning and do not parse
varname = (' "' + self._data_array.name + '"'
if self._data_array.name is not None else '')
warnings.warn('More than one ' + axis + ' coordinate present for variable'
+ varname + '.')
coord_lists[axis] = []
def _metpy_axis_search(self, metpy_axis):
"""Search for cached _metpy_axis attribute on the coordinates, otherwise parse."""
# Search for coord with proper _metpy_axis
coords = self._data_array.coords.values()
for coord_var in coords:
if metpy_axis in coord_var.attrs.get('_metpy_axis', '').split(','):
return coord_var
# Opportunistically parse all coordinates, and assign if not already assigned
# Note: since this is generally called by way of the coordinate properties, to cache
# the coordinate parsing results in coord_map on the coordinates means modifying the
# DataArray in-place (an exception to the usual behavior of MetPy's accessor). This is
# considered safe because it only effects the "_metpy_axis" attribute on the
# coordinates, and nothing else.
coord_map = self._generate_coordinate_map()
for axis, coord_var in coord_map.items():
if (coord_var is not None
and not any(axis in coord.attrs.get('_metpy_axis', '').split(',')
for coord in coords)):
_assign_axis(coord_var.attrs, axis)
# Return parsed result (can be None if none found)
return coord_map[metpy_axis]
def _axis(self, axis):
"""Return the coordinate variable corresponding to the given individual axis type."""
if axis in metpy_axes:
coord_var = self._metpy_axis_search(axis)
if coord_var is not None:
return coord_var
else:
raise AttributeError(axis + ' attribute is not available.')
else:
raise AttributeError("'" + axis + "' is not an interpretable axis.")
def coordinates(self, *args):
"""Return the coordinate variables corresponding to the given axes types.
Parameters
----------
args : str
Strings describing the axes type(s) to obtain. Currently understood types are
'time', 'vertical', 'y', 'latitude', 'x', and 'longitude'.
Notes
-----
This method is designed for use with multiple coordinates; it returns a generator. To
access a single coordinate, use the appropriate attribute on the accessor, or use tuple
unpacking.
"""
for arg in args:
yield self._axis(arg)
@property
def time(self):
"""Return the time coordinate."""
return self._axis('time')
@property
def vertical(self):
"""Return the vertical coordinate."""
return self._axis('vertical')
@property
def y(self):
"""Return the y coordinate."""
return self._axis('y')
@property
def latitude(self):
"""Return the latitude coordinate (if it exists)."""
return self._axis('latitude')
@property
def x(self):
"""Return the x coordinate."""
return self._axis('x')
@property
def longitude(self):
"""Return the longitude coordinate (if it exists)."""
return self._axis('longitude')
def coordinates_identical(self, other):
"""Return whether or not the coordinates of other match this DataArray's."""
# If the number of coordinates do not match, we know they can't match.
if len(self._data_array.coords) != len(other.coords):
return False
# If same length, iterate over all of them and check
for coord_name, coord_var in self._data_array.coords.items():
if coord_name not in other.coords or not other[coord_name].identical(coord_var):
return False
# Otherwise, they match.
return True
@property
def time_deltas(self):
"""Return the time difference of the data in seconds (to microsecond precision)."""
return (np.diff(self._data_array.values).astype('timedelta64[us]').astype('int64')
/ 1e6 * units.s)
def find_axis_name(self, axis):
"""Return the name of the axis corresponding to the given identifier.
Parameters
----------
axis : str or int
Identifier for an axis. Can be an axis number (integer), dimension coordinate
name (string) or a standard axis type (string).
"""
if isinstance(axis, int):
# If an integer, use the corresponding dimension
return self._data_array.dims[axis]
elif axis not in self._data_array.dims and axis in metpy_axes:
# If not a dimension name itself, but a valid axis type, get the name of the
# coordinate corresponding to that axis type
return self._axis(axis).name
elif axis in self._data_array.dims and axis in self._data_array.coords:
# If this is a dimension coordinate name, use it directly
return axis
else:
# Otherwise, not valid
raise ValueError(_axis_identifier_error)
def find_axis_number(self, axis):
"""Return the dimension number of the axis corresponding to the given identifier.
Parameters
----------
axis : str or int
Identifier for an axis. Can be an axis number (integer), dimension coordinate
name (string) or a standard axis type (string).
"""
if isinstance(axis, int):
# If an integer, use it directly
return axis
elif axis in self._data_array.dims:
# Simply index into dims
return self._data_array.dims.index(axis)
elif axis in metpy_axes:
# If not a dimension name itself, but a valid axis type, first determine if this
# standard axis type is present as a dimension coordinate
try:
name = self._axis(axis).name
return self._data_array.dims.index(name)
except AttributeError as exc:
# If x or y requested, but x or y not available, attempt to interpret dim
# names using regular expressions from coordinate parsing to allow for
# multidimensional lat/lon without y/x dimension coordinates
if axis in ('y', 'x'):
for i, dim in enumerate(self._data_array.dims):
if re.match(coordinate_criteria['regular_expression'][axis],
dim.lower()):
return i
raise exc
except ValueError:
# Intercept ValueError when axis type found but not dimension coordinate
raise AttributeError(f'Requested {axis} dimension coordinate but {axis} '
f'coordinate {name} is not a dimension')
else:
# Otherwise, not valid
raise ValueError(_axis_identifier_error)
class _LocIndexer:
"""Provide the unit-wrapped .loc indexer for data arrays."""
def __init__(self, data_array):
self.data_array = data_array
def expand(self, key):
"""Parse key using xarray utils to ensure we have dimension names."""
if not is_dict_like(key):
labels = expanded_indexer(key, self.data_array.ndim)
key = dict(zip(self.data_array.dims, labels))
return key
def __getitem__(self, key):
key = _reassign_quantity_indexer(self.data_array, self.expand(key))
return self.data_array.loc[key]
def __setitem__(self, key, value):
key = _reassign_quantity_indexer(self.data_array, self.expand(key))
self.data_array.loc[key] = value
@property
def loc(self):
"""Wrap DataArray.loc with an indexer to handle units and coordinate types."""
return self._LocIndexer(self._data_array)
def sel(self, indexers=None, method=None, tolerance=None, drop=False, **indexers_kwargs):
"""Wrap DataArray.sel to handle units and coordinate types."""
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, 'sel')
indexers = _reassign_quantity_indexer(self._data_array, indexers)
return self._data_array.sel(indexers, method=method, tolerance=tolerance, drop=drop)
def assign_crs(self, cf_attributes=None, **kwargs):
"""Assign a CRS to this DataArray based on CF projection attributes.
Parameters
----------
cf_attributes : dict, optional
Dictionary of CF projection attributes
kwargs : optional
CF projection attributes specified as keyword arguments
Returns
-------
`xarray.DataArray`
New xarray DataArray with CRS coordinate assigned
Notes
-----
CF projection arguments should be supplied as a dictionary or collection of kwargs,
but not both.
"""
return _assign_crs(self._data_array, cf_attributes, kwargs)
def assign_latitude_longitude(self, force=False):
"""Assign latitude and longitude coordinates derived from y and x coordinates.
Parameters
----------
force : bool, optional
If force is true, overwrite latitude and longitude coordinates if they exist,
otherwise, raise a RuntimeError if such coordinates exist.
Returns
-------
`xarray.DataArray`
New xarray DataArray with latitude and longtiude auxilary coordinates assigned.
Notes
-----
A valid CRS coordinate must be present. Cartopy is used for the coordinate
transformations.
"""
# Check for existing latitude and longitude coords
if (not force and (self._metpy_axis_search('latitude') is not None
or self._metpy_axis_search('longitude'))):
raise RuntimeError('Latitude/longitude coordinate(s) are present. If you wish to '
'overwrite these, specify force=True.')
# Build new latitude and longitude DataArrays
latitude, longitude = _build_latitude_longitude(self._data_array)
# Assign new coordinates, refresh MetPy's parsed axis attribute, and return result
new_dataarray = self._data_array.assign_coords(latitude=latitude, longitude=longitude)
return new_dataarray.metpy.assign_coordinates(None)
def assign_y_x(self, force=False, tolerance=None):
"""Assign y and x dimension coordinates derived from 2D latitude and longitude.
Parameters
----------
force : bool, optional
If force is true, overwrite y and x coordinates if they exist, otherwise, raise a
RuntimeError if such coordinates exist.
tolerance : `pint.Quantity`
Maximum range tolerated when collapsing projected y and x coordinates from 2D to
1D. Defaults to 1 meter.
Returns
-------
`xarray.DataArray`
New xarray DataArray with y and x dimension coordinates assigned.
Notes
-----
A valid CRS coordinate must be present. Cartopy is used for the coordinate
transformations.
"""
# Check for existing latitude and longitude coords
if (not force and (self._metpy_axis_search('y') is not None
or self._metpy_axis_search('x'))):
raise RuntimeError('y/x coordinate(s) are present. If you wish to overwrite '
'these, specify force=True.')
# Build new y and x DataArrays
y, x = _build_y_x(self._data_array, tolerance)
# Assign new coordinates, refresh MetPy's parsed axis attribute, and return result
new_dataarray = self._data_array.assign_coords(**{y.name: y, x.name: x})
return new_dataarray.metpy.assign_coordinates(None)
@xr.register_dataset_accessor('metpy')
class MetPyDatasetAccessor:
"""Provide custom attributes and methods on XArray Datasets for MetPy functionality.
This accessor provides parsing of CF metadata and unit-/coordinate-type-aware selection.
>>> import xarray as xr
>>> from metpy.cbook import get_test_data
>>> ds = xr.open_dataset(get_test_data('narr_example.nc', False)).metpy.parse_cf()
>>> print(ds['crs'].item())
Projection: lambert_conformal_conic
"""
def __init__(self, dataset): # noqa: D107
# Initialize accessor with a Dataset. (Do not use directly).
self._dataset = dataset
def parse_cf(self, varname=None, coordinates=None):
"""Parse Climate and Forecasting (CF) convention metadata.
Parameters
----------
varname : str or iterable of str, optional
Name of the variable(s) to extract from the dataset while parsing for CF metadata.
Defaults to all variables.
coordinates : dict, optional
Dictionary mapping CF axis types to coordinates of the variable(s). Only specify
if you wish to override MetPy's automatic parsing of some axis type(s).
Returns
-------
`xarray.DataArray` or `xarray.Dataset`
Parsed DataArray (if varname is a string) or Dataset
"""
from .cbook import iterable
from .plots.mapping import CFProjection
if varname is None:
# If no varname is given, parse all variables in the dataset
varname = list(self._dataset.data_vars)
if iterable(varname) and not isinstance(varname, str):
# If non-string iterable is given, apply recursively across the varnames
subset = xr.merge([self.parse_cf(single_varname, coordinates=coordinates)
for single_varname in varname])
subset.attrs = self._dataset.attrs
return subset
var = self._dataset[varname]
# Assign coordinates if the coordinates argument is given
if coordinates is not None:
var = var.metpy.assign_coordinates(coordinates)
# Attempt to build the crs coordinate
crs = None
if 'grid_mapping' in var.attrs:
# Use given CF grid_mapping
proj_name = var.attrs['grid_mapping']
try:
proj_var = self._dataset.variables[proj_name]
except KeyError:
log.warning(
'Could not find variable corresponding to the value of '
f'grid_mapping: {proj_name}')
else:
crs = CFProjection(proj_var.attrs)
if crs is None and not check_axis(var, 'latitude', 'longitude'):
# This isn't a lat or lon coordinate itself, so determine if we need to fall back
# to creating a latitude_longitude CRS. We do so if there exists valid coordinates
# for latitude and longitude, even if they are not the dimension coordinates of
# the variable.
def _has_coord(coord_type):
return any(check_axis(coord_var, coord_type)
for coord_var in var.coords.values())
if _has_coord('latitude') and _has_coord('longitude'):
crs = CFProjection({'grid_mapping_name': 'latitude_longitude'})
log.warning('Found valid latitude/longitude coordinates, assuming '
'latitude_longitude for projection grid_mapping variable')
# Rebuild the coordinates of the dataarray, and return quantified DataArray
var = self._rebuild_coords(var, crs)
if crs is not None:
var = var.assign_coords(coords={'crs': crs})
return var
def _rebuild_coords(self, var, crs):
"""Clean up the units on the coordinate variables."""
for coord_name, coord_var in var.coords.items():
if (check_axis(coord_var, 'x', 'y')
and not check_axis(coord_var, 'longitude', 'latitude')):
try:
var = var.metpy.convert_coordinate_units(coord_name, 'meters')
except DimensionalityError:
# Radians! Attempt to use perspective point height conversion
if crs is not None:
height = crs['perspective_point_height']
new_coord_var = coord_var.copy(
data=(
coord_var.metpy.unit_array
* (height * units.meter)
).m_as('meter')
)
new_coord_var.attrs['units'] = 'meter'
var = var.assign_coords(coords={coord_name: new_coord_var})
return var
class _LocIndexer:
"""Provide the unit-wrapped .loc indexer for datasets."""
def __init__(self, dataset):
self.dataset = dataset
def __getitem__(self, key):
parsed_key = _reassign_quantity_indexer(self.dataset, key)
return self.dataset.loc[parsed_key]
@property
def loc(self):
"""Wrap Dataset.loc with an indexer to handle units and coordinate types."""
return self._LocIndexer(self._dataset)
def sel(self, indexers=None, method=None, tolerance=None, drop=False, **indexers_kwargs):
"""Wrap Dataset.sel to handle units."""
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, 'sel')
indexers = _reassign_quantity_indexer(self._dataset, indexers)
return self._dataset.sel(indexers, method=method, tolerance=tolerance, drop=drop)
def assign_crs(self, cf_attributes=None, **kwargs):
"""Assign a CRS to this Datatset based on CF projection attributes.
Parameters
----------
cf_attributes : dict, optional
Dictionary of CF projection attributes
kwargs : optional
CF projection attributes specified as keyword arguments
Returns
-------
`xarray.Dataset`
New xarray Dataset with CRS coordinate assigned
Notes
-----
CF projection arguments should be supplied as a dictionary or collection of kwargs,
but not both.
"""
return _assign_crs(self._dataset, cf_attributes, kwargs)
def assign_latitude_longitude(self, force=False):
"""Assign latitude and longitude coordinates derived from y and x coordinates.
Parameters
----------
force : bool, optional
If force is true, overwrite latitude and longitude coordinates if they exist,
otherwise, raise a RuntimeError if such coordinates exist.
Returns
-------
`xarray.Dataset`
New xarray Dataset with latitude and longitude coordinates assigned to all
variables with y and x coordinates.
Notes
-----
A valid CRS coordinate must be present. Cartopy is used for the coordinate
transformations.
"""
# Determine if there is a valid grid prototype from which to compute the coordinates,
# while also checking for existing lat/lon coords
grid_prototype = None
for data_var in self._dataset.data_vars.values():
if hasattr(data_var.metpy, 'y') and hasattr(data_var.metpy, 'x'):
if grid_prototype is None:
grid_prototype = data_var
if (not force and (hasattr(data_var.metpy, 'latitude')
or hasattr(data_var.metpy, 'longitude'))):
raise RuntimeError('Latitude/longitude coordinate(s) are present. If you '
'wish to overwrite these, specify force=True.')
# Calculate latitude and longitude from grid_prototype, if it exists, and assign
if grid_prototype is None:
warnings.warn('No latitude and longitude assigned since horizontal coordinates '
'were not found')
return self._dataset
else:
latitude, longitude = _build_latitude_longitude(grid_prototype)
return self._dataset.assign_coords(latitude=latitude, longitude=longitude)
def assign_y_x(self, force=False, tolerance=None):
"""Assign y and x dimension coordinates derived from 2D latitude and longitude.
Parameters
----------
force : bool, optional
If force is true, overwrite y and x coordinates if they exist, otherwise, raise a
RuntimeError if such coordinates exist.
tolerance : `pint.Quantity`
Maximum range tolerated when collapsing projected y and x coordinates from 2D to
1D. Defaults to 1 meter.
Returns
-------
`xarray.Dataset`
New xarray Dataset with y and x dimension coordinates assigned to all variables
with valid latitude and longitude coordinates.
Notes
-----
A valid CRS coordinate must be present. Cartopy is used for the coordinate
transformations.
"""
# Determine if there is a valid grid prototype from which to compute the coordinates,
# while also checking for existing y and x coords
grid_prototype = None
for data_var in self._dataset.data_vars.values():
if hasattr(data_var.metpy, 'latitude') and hasattr(data_var.metpy, 'longitude'):
if grid_prototype is None:
grid_prototype = data_var
if (not force and (hasattr(data_var.metpy, 'y')
or hasattr(data_var.metpy, 'x'))):
raise RuntimeError('y/x coordinate(s) are present. If you wish to '
'overwrite these, specify force=True.')
# Calculate y and x from grid_prototype, if it exists, and assign
if grid_prototype is None:
warnings.warn('No y and x coordinates assigned since horizontal coordinates '
'were not found')
return self._dataset
else:
y, x = _build_y_x(grid_prototype, tolerance)
return self._dataset.assign_coords(**{y.name: y, x.name: x})
def update_attribute(self, attribute, mapping):
"""Return new Dataset with specified attribute updated on all Dataset variables.
Parameters
----------
attribute : str,
Name of attribute to update
mapping : dict or callable
Either a dict, with keys as variable names and values as attribute values to set,
or a callable, which must accept one positional argument (variable name) and
arbitrary keyword arguments (all existing variable attributes). If a variable name
is not present/the callable returns None, the attribute will not be updated.
Returns
-------
`xarray.Dataset`
New Dataset with attribute updated
"""
# Make mapping uniform
if not callable(mapping):
old_mapping = mapping
def mapping(varname, **kwargs):
return old_mapping.get(varname, None)
# Define mapping function for Dataset.map
def mapping_func(da):
new_value = mapping(da.name, **da.attrs)
if new_value is None:
return da
else:
return da.assign_attrs(**{attribute: new_value})
# Apply across all variables and coordinates
return (
self._dataset
.map(mapping_func, keep_attrs=True)
.assign_coords({
coord_name: mapping_func(coord_var)
for coord_name, coord_var in self._dataset.coords.items()
})
)
def quantify(self):
"""Return new dataset with all numeric variables quantified and cached data loaded."""
return self._dataset.map(lambda da: da.metpy.quantify(), keep_attrs=True)
def dequantify(self):
"""Return new dataset with variables cast to magnitude and units on attribute."""
return self._dataset.map(lambda da: da.metpy.dequantify(), keep_attrs=True)
def _assign_axis(attributes, axis):
"""Assign the given axis to the _metpy_axis attribute."""
existing_axes = attributes.get('_metpy_axis', '').split(',')
if ((axis == 'y' and 'latitude' in existing_axes)
or (axis == 'latitude' and 'y' in existing_axes)):
# Special case for combined y/latitude handling
attributes['_metpy_axis'] = 'y,latitude'
elif ((axis == 'x' and 'longitude' in existing_axes)
or (axis == 'longitude' and 'x' in existing_axes)):
# Special case for combined x/longitude handling
attributes['_metpy_axis'] = 'x,longitude'
else:
# Simply add it/overwrite past value
attributes['_metpy_axis'] = axis
return attributes
def check_axis(var, *axes):
"""Check if the criteria for any of the given axes are satisfied.
Parameters
----------
var : `xarray.DataArray`
DataArray belonging to the coordinate to be checked
axes : str
Axis type(s) to check for. Currently can check for 'time', 'vertical', 'y', 'latitude',
'x', and 'longitude'.
"""
for axis in axes:
# Check for
# - standard name (CF option)
# - _CoordinateAxisType (from THREDDS)
# - axis (CF option)
# - positive (CF standard for non-pressure vertical coordinate)
for criterion in ('standard_name', '_CoordinateAxisType', 'axis', 'positive'):
if (var.attrs.get(criterion, 'absent') in
coordinate_criteria[criterion].get(axis, set())):
return True
# Check for units, either by dimensionality or name
try:
if (axis in coordinate_criteria['units'] and (
(
coordinate_criteria['units'][axis]['match'] == 'dimensionality'
and (units.get_dimensionality(var.metpy.units)
== units.get_dimensionality(
coordinate_criteria['units'][axis]['units']))
) or (
coordinate_criteria['units'][axis]['match'] == 'name'
and str(var.metpy.units)
in coordinate_criteria['units'][axis]['units']
))):
return True
except UndefinedUnitError:
pass
# Check if name matches regular expression (non-CF failsafe)
if re.match(coordinate_criteria['regular_expression'][axis], var.name.lower()):
return True
# If no match has been made, return False (rather than None)
return False
def _assign_crs(xarray_object, cf_attributes, cf_kwargs):
from .plots.mapping import CFProjection
# Handle argument options
if cf_attributes is not None and len(cf_kwargs) > 0:
raise ValueError('Cannot specify both attribute dictionary and kwargs.')
elif cf_attributes is None and len(cf_kwargs) == 0:
raise ValueError('Must specify either attribute dictionary or kwargs.')
attrs = cf_attributes if cf_attributes is not None else cf_kwargs
# Assign crs coordinate to xarray object
return xarray_object.assign_coords(crs=CFProjection(attrs))
def _build_latitude_longitude(da):
"""Build latitude/longitude coordinates from DataArray's y/x coordinates."""
y, x = da.metpy.coordinates('y', 'x')
xx, yy = np.meshgrid(x.values, y.values)
lonlats = da.metpy.cartopy_geodetic.transform_points(da.metpy.cartopy_crs, xx, yy)
longitude = xr.DataArray(lonlats[..., 0], dims=(y.name, x.name),
coords={y.name: y, x.name: x},
attrs={'units': 'degrees_east', 'standard_name': 'longitude'})
latitude = xr.DataArray(lonlats[..., 1], dims=(y.name, x.name),
coords={y.name: y, x.name: x},
attrs={'units': 'degrees_north', 'standard_name': 'latitude'})
return latitude, longitude
def _build_y_x(da, tolerance):
"""Build y/x coordinates from DataArray's latitude/longitude coordinates."""
# Initial sanity checks
latitude, longitude = da.metpy.coordinates('latitude', 'longitude')
if latitude.dims != longitude.dims:
raise ValueError('Latitude and longitude must have same dimensionality')
elif latitude.ndim != 2:
raise ValueError('To build 1D y/x coordinates via assign_y_x, latitude/longitude '
'must be 2D')
# Convert to projected y/x
xxyy = da.metpy.cartopy_crs.transform_points(da.metpy.cartopy_geodetic,
longitude.values,
latitude.values)
# Handle tolerance
tolerance = 1 if tolerance is None else tolerance.m_as('m')
# If within tolerance, take median to collapse to 1D
try:
y_dim = latitude.metpy.find_axis_number('y')
x_dim = latitude.metpy.find_axis_number('x')
except AttributeError:
warnings.warn('y and x dimensions unable to be identified. Assuming [..., y, x] '
'dimension order.')
y_dim, x_dim = 0, 1
if (np.all(np.ptp(xxyy[..., 0], axis=y_dim) < tolerance)
and np.all(np.ptp(xxyy[..., 1], axis=x_dim) < tolerance)):
x = np.median(xxyy[..., 0], axis=y_dim)
y = np.median(xxyy[..., 1], axis=x_dim)
x = xr.DataArray(x, name=latitude.dims[x_dim], dims=(latitude.dims[x_dim],),
coords={latitude.dims[x_dim]: x},
attrs={'units': 'meter', 'standard_name': 'projection_x_coordinate'})
y = xr.DataArray(y, name=latitude.dims[y_dim], dims=(latitude.dims[y_dim],),
coords={latitude.dims[y_dim]: y},
attrs={'units': 'meter', 'standard_name': 'projection_y_coordinate'})
return y, x
else:
raise ValueError('Projected y and x coordinates cannot be collapsed to 1D within '
'tolerance. Verify that your latitude and longitude coordinates '
'correpsond to your CRS coordinate.')
def preprocess_xarray(func):
"""Decorate a function to convert all DataArray arguments to pint.Quantities.
This uses the metpy xarray accessors to do the actual conversion.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
args = tuple(a.metpy.unit_array if isinstance(a, xr.DataArray) else a for a in args)
kwargs = {name: (v.metpy.unit_array if isinstance(v, xr.DataArray) else v)
for name, v in kwargs.items()}
return func(*args, **kwargs)
return wrapper
def check_matching_coordinates(func):
"""Decorate a function to make sure all given DataArrays have matching coordinates."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
data_arrays = ([a for a in args if isinstance(a, xr.DataArray)]
+ [a for a in kwargs.values() if isinstance(a, xr.DataArray)])
if len(data_arrays) > 1:
first = data_arrays[0]
for other in data_arrays[1:]:
if not first.metpy.coordinates_identical(other):
raise ValueError('Input DataArray arguments must be on same coordinates.')
return func(*args, **kwargs)
return wrapper
def _reassign_quantity_indexer(data, indexers):
"""Reassign a units.Quantity indexer to units of relevant coordinate."""
def _to_magnitude(val, unit):
try:
return val.m_as(unit)
except AttributeError:
return val
# Update indexers keys for axis type -> coord name replacement
indexers = {(key if not isinstance(data, xr.DataArray) or key in data.dims
or key not in metpy_axes else
next(data.metpy.coordinates(key)).name): indexers[key]
for key in indexers}
# Update indexers to handle quantities and slices of quantities
reassigned_indexers = {}
for coord_name in indexers:
coord_units = data[coord_name].metpy.units
if isinstance(indexers[coord_name], slice):
# Handle slices of quantities
start = _to_magnitude(indexers[coord_name].start, coord_units)
stop = _to_magnitude(indexers[coord_name].stop, coord_units)
step = _to_magnitude(indexers[coord_name].step, coord_units)
reassigned_indexers[coord_name] = slice(start, stop, step)
else:
# Handle quantities
reassigned_indexers[coord_name] = _to_magnitude(indexers[coord_name], coord_units)
return reassigned_indexers
__all__ = ('MetPyDataArrayAccessor', 'MetPyDatasetAccessor')
|
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import numpy as np
from graph.types import ConstantInputParameters
from importer.onnx.common import logger
from ..backend_handler import BackendHandler
from ..handler import onnx_op, partial_support, ps_description
from .broadcast_mixin import BroadcastMixin
from importer.common.constant_mixin import ConstantMixin
@onnx_op("Expand")
@partial_support(True)
@ps_description("only implemented on constants at import")
class Expand(BroadcastMixin, ConstantMixin, BackendHandler):
@classmethod
def _common(cls, node, **kwargs):
all_nodes = kwargs['all_nodes']
valid_name = kwargs['valid_name']
inputs = [all_nodes[inp] for inp in node.input]
x = inputs[0]
y = inputs[1]
shape = cls.get_constant(y)
pshape = cls.broadcast_to(x, shape)
if cls.is_constant(x):
logger.info("reducing %s to a constant", valid_name)
x_val = cls.get_constant(x)
params = ConstantInputParameters(valid_name, value=x_val * np.ones(shape))
else:
raise ValueError("Expand is only implemented on constants")
all_nodes[node.output[0]] = (params, 0, pshape)
return params
@classmethod
def version_8(cls, node, **kwargs):
return cls._common(node, **kwargs)
@classmethod
def version_13(cls, node, **kwargs):
return cls._common(node, **kwargs)
|
from datetime import datetime
import urlparse
import urllib2
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.contrib.sites.models import Site
from django.utils.translation import ugettext_lazy as _
from bookmarks.models import Bookmark, BookmarkInstance
from bookmarks.forms import BookmarkInstanceForm
def bookmarks(request):
bookmarks = Bookmark.objects.all().order_by("-added")
if request.user.is_authenticated():
user_bookmarks = Bookmark.objects.filter(saved_instances__user=request.user)
else:
user_bookmarks = []
return render_to_response("bookmarks/bookmarks.html", {
"bookmarks": bookmarks,
"user_bookmarks": user_bookmarks,
}, context_instance=RequestContext(request))
@login_required
def your_bookmarks(request):
bookmark_instances = BookmarkInstance.objects.filter(user=request.user).order_by("-saved")
return render_to_response("bookmarks/your_bookmarks.html", {
"bookmark_instances": bookmark_instances,
}, context_instance=RequestContext(request))
@login_required
def add(request):
if request.method == "POST":
bookmark_form = BookmarkInstanceForm(request.user, request.POST)
if bookmark_form.is_valid():
bookmark_instance = bookmark_form.save(commit=False)
bookmark_instance.user = request.user
bookmark_instance.save()
bookmark = bookmark_instance.bookmark
try:
headers = {
"Accept" : "text/xml,application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5",
"Accept-Language" : "en-us,en;q=0.5",
"Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.7",
"Connection" : "close",
##"User-Agent": settings.URL_VALIDATOR_USER_AGENT
}
req = urllib2.Request(bookmark.get_favicon_url(force=True), None, headers)
u = urllib2.urlopen(req)
has_favicon = True
except:
has_favicon = False
bookmark.has_favicon = has_favicon
bookmark.favicon_checked = datetime.now()
bookmark.save()
if bookmark_form.should_redirect():
return HttpResponseRedirect(bookmark.url)
else:
request.user.message_set.create(message=_("You have saved bookmark '%(description)s'") % {'description': bookmark_instance.description})
return HttpResponseRedirect(reverse("bookmarks.views.bookmarks"))
else:
initial = {}
if "url" in request.GET:
initial["url"] = request.GET["url"]
if "description" in request.GET:
initial["description"] = request.GET["description"]
if "redirect" in request.GET:
initial["redirect"] = request.GET["redirect"]
if initial:
bookmark_form = BookmarkInstanceForm(initial=initial)
else:
bookmark_form = BookmarkInstanceForm()
bookmarks_add_url = "http://" + Site.objects.get_current().domain + reverse(add)
bookmarklet = "javascript:location.href='%s?url='+encodeURIComponent(location.href)+';description='+encodeURIComponent(document.title)+';redirect=on'" % bookmarks_add_url
return render_to_response("bookmarks/add.html", {
"bookmarklet": bookmarklet,
"bookmark_form": bookmark_form,
}, context_instance=RequestContext(request))
@login_required
def delete(request, bookmark_instance_id):
bookmark_instance = get_object_or_404(BookmarkInstance, id=bookmark_instance_id)
if request.user == bookmark_instance.user:
bookmark_instance.delete()
request.user.message_set.create(message="Bookmark Deleted")
if "next" in request.GET:
next = request.GET["next"]
else:
next = reverse("bookmarks.views.bookmarks")
return HttpResponseRedirect(next)
|
#!/usr/bin/env python
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from google.api_core.client_options import ClientOptions
import os
import logging
import googleapiclient.discovery
logging.basicConfig()
# In this sample, we will pass all available features in order.
instances = [
[0.2, 660, 1175, 8, 10, 7, 8, 33, 17031081402, 17031330100, 41.892,
-87.613, 41.859, -87.617, 'Credit Card', 'Taxi Affiliation Services'],
[1.0, 300, 545, 9, 22,4 ,32, 8, 17031320100, 17031081500, 41.885,
-87.621, 41.893, -87.626, 'Cash', 'Northwest Management LLC'],
[1.1, 300, 565, 3, 2, 1, 28, 32, 17031833000, 17031839100, 41.885,
-87.657, 41.881, -87.633, 'Credit Card', 'Taxi Affiliation Services']
]
PROJECT_ID = os.getenv('PROJECT_ID')
MODEL_NAME = os.getenv('MODEL_NAME')
MODEL_VERSION = os.getenv('MODEL_VERSION')
REGION = os.getenv('REGION')
logging.info('PROJECT_ID: %s', PROJECT_ID)
logging.info('MODEL_NAME: %s', MODEL_NAME)
logging.info('MODEL_VERSION: %s', MODEL_VERSION)
logging.info('REGION: %s', REGION)
prefix = "{}-ml".format(REGION) if REGION else "ml"
api_endpoint = "https://{}.googleapis.com".format(prefix)
client_options = ClientOptions(api_endpoint=api_endpoint)
service = googleapiclient.discovery.build('ml', 'v1')
name = 'projects/{}/models/{}/versions/{}'.format(PROJECT_ID, MODEL_NAME,
MODEL_VERSION)
response = service.projects().predict(
name=name,
body={'instances': instances}
).execute()
if 'error' in response:
logging.error(response['error'])
else:
print(response['predictions'])
|
"""Decoder modules that help interfacing model states with output data.
All decoder modules generate a function that given an specific model state
return the observable data of the same structure as provided to the Encoder.
Decoders can be either fixed functions, decorators, or learned modules.
"""
from typing import Any, Callable, Optional
import gin
import jax.numpy as jnp
from jax_cfd.base import array_utils
from jax_cfd.base import grids
from jax_cfd.ml import physics_specifications
from jax_cfd.ml import towers
from jax_cfd.spectral import utils as spectral_utils
DecodeFn = Callable[[Any], Any] # maps model state to data time slice.
DecoderModule = Callable[..., DecodeFn] # generate DecodeFn closed over args.
TowerFactory = towers.TowerFactory
@gin.register
def identity_decoder(
grid: grids.Grid,
dt: float,
physics_specs: physics_specifications.BasePhysicsSpecs,
) -> DecodeFn:
"""Identity decoder module that returns model state as is."""
del grid, dt, physics_specs # unused.
def decode_fn(inputs):
return inputs
return decode_fn
# TODO(dkochkov) generalize this to arbitrary pytrees.
@gin.register
def aligned_array_decoder(
grid: grids.Grid,
dt: float,
physics_specs: physics_specifications.BasePhysicsSpecs,
) -> DecodeFn:
"""Generates decoder that extracts data from GridVariables."""
del grid, dt, physics_specs # unused.
def decode_fn(inputs):
return tuple(x.data for x in inputs)
return decode_fn
@gin.register
def channels_split_decoder(
grid: grids.Grid,
dt: float,
physics_specs: physics_specifications.BasePhysicsSpecs,
) -> DecodeFn:
"""Generates decoder that splits channels into data tuples."""
del grid, dt, physics_specs # unused.
def decode_fn(inputs):
return array_utils.split_axis(inputs, -1)
return decode_fn
@gin.register
def latent_decoder(
grid: grids.Grid,
dt: float,
physics_specs: physics_specifications.BasePhysicsSpecs,
tower_factory: TowerFactory,
num_components: Optional[int] = None,
):
"""Generates trainable decoder that maps latent representation to data tuple.
Decoder first computes an array of outputs using network specified by a
`tower_factory` and then splits the channels into `num_components` components.
Args:
grid: grid representing spatial discritization of the system.
dt: time step to use for time evolution.
physics_specs: physical parameters of the simulation.
tower_factory: factory that produces trainable tower network module.
num_components: number of data tuples in the data representation of the
state. If None, assumes num_components == grid.ndims. Default is None.
Returns:
decode function that maps latent state `inputs` at given time to a tuple of
`num_components` data arrays representing the same state at the same time.
"""
split_channels_fn = channels_split_decoder(grid, dt, physics_specs)
def decode_fn(inputs):
num_channels = num_components or grid.ndim
decoder_tower = tower_factory(num_channels, grid.ndim, name='decoder')
return split_channels_fn(decoder_tower(inputs))
return decode_fn
@gin.register
def spectral_vorticity_decoder(
grid: grids.Grid,
dt: float,
physics_specs: physics_specifications.BasePhysicsSpecs,
) -> DecodeFn:
"""Solves for velocity and converts into GridVariables."""
del dt, physics_specs # unused.
velocity_solve = spectral_utils.vorticity_to_velocity(grid)
def decode_fn(vorticity_hat):
uhat, vhat = velocity_solve(vorticity_hat)
v = (jnp.fft.irfft2(uhat), jnp.fft.irfft2(vhat))
return v
return decode_fn
|
# Copyright 2019-2020 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools to help discretise data."""
import logging
from abc import ABC, abstractmethod
from typing import List
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator
class AbstractSupervisedDiscretiserMethod(BaseEstimator, ABC):
"""
Base class for advanced discretisation methods
"""
def __init__(self):
self.map_thresholds = {}
self.feat_names = None
@abstractmethod
def fit(
self,
feat_names: List[str],
target: str,
dataframe: pd.DataFrame,
target_continuous: bool,
):
"""
Discretise the features in `feat_names` in such a way that maximises the prediction of `target`.
Args:
feat_names (List[str]): List of feature names to be discretised.
target (str): Name of the target variable - the node that adjusts how `feat_names` will be discretised
dataframe: The full dataset prior to discretisation.
target_continuous (bool): Boolean indicates if target variable is continuous
Raises:
NotImplementedError: AbstractSupervisedDiscretiserMethod should not be called directly
"""
raise NotImplementedError("The method is not implemented")
def _transform_one_column(self, dataframe_one_column: pd.DataFrame) -> np.array:
"""
Given one "original" feature (continuous), discretise it.
Args:
dataframe_one_column: dataframe with a single continuous feature, to be transformed into discrete
Returns:
Discrete feature, as an np.array of shape (len(df),)
"""
cols = list(dataframe_one_column.columns)
if cols[0] in self.map_thresholds:
split_points = self.map_thresholds[cols[0]]
return np.digitize(dataframe_one_column.values.reshape(-1), split_points)
if cols[0] not in self.feat_names:
logging.warning(
"%s is not in feat_names. The column is left unchanged", cols[0]
)
return dataframe_one_column.values.reshape(-1)
def transform(self, data: pd.DataFrame) -> np.array:
"""
Given one "original" dataframe, discretise it.
Args:
data: dataframe with continuous features, to be transformed into discrete
Returns:
discretised version of the input data
"""
outputs = {}
for col in data.columns:
outputs[col] = self._transform_one_column(data[[col]])
transformed_df = pd.DataFrame.from_dict(outputs)
return transformed_df
def fit_transform(self, *args, **kwargs):
"""
Raises:
NotImplementedError: fit_transform is not implemented
"""
raise NotImplementedError(
"fit_transform is not implemented. Please use .fit() and .transform() separately"
)
|
"""
"""
#
# Set all run arguments
#
BACKEND_NAME = 'aer_simulator'
N_QUBITS = 8
DEPTH = 10
TYPE_CIRCUIT = 3
TYPE_DATASET = 5
N_REPEAT = 100
APPLY_STRATIFY = True
RESCALE_FACTOR = 1.
N_PCA_FEATURES = 0
N_BOOTSTRAPS = 0
CIRCUIT_RANDOM_SEED = None
DATA_RANDOM_SEED = None
CROSSFID_RANDOM_SEED = 53
CIRCUIT_INITIAL_ANGLES = 'zeros'
CROSSFID_MODE = 'RzRy'
N_SHOTS = 8192
N_UNITARIES = 50
DATA_BATCH_SIZE = 10
# user's IBMQ access
HUB = 'ibm-q'
GROUP = 'open'
PROJECT = 'main'
# used to name log files
JOB_TAG = 'randomdata_YZ'
# make output filename
JOB_FILENAME = (
','.join([
BACKEND_NAME,
'n_qubits'+f'{N_QUBITS}',
'depth'+f'{DEPTH}',
'n_shots'+f'{N_SHOTS}',
'n_unitaries'+f'{N_UNITARIES}',
'crossfid_mode'+f'{CROSSFID_MODE}',
'n_repeat'+f'{N_REPEAT}',
])
)
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'json_schema_compiler_tests',
'type': 'static_library',
'variables': {
'chromium_code': 1,
'json_schema_files': [
'any.json',
'additionalProperties.json',
'arrays.json',
'choices.json',
'crossref.json',
'enums.json',
'functionsOnTypes.json',
'objects.json',
'simple_api.json',
],
'idl_schema_files': [
],
'cc_dir': 'tools/json_schema_compiler/test',
'root_namespace': 'test::api',
},
'sources': [
'<@(json_schema_files)',
'<@(idl_schema_files)',
],
'includes': ['../../../build/json_schema_compile.gypi'],
},
],
}
|
from .initializer import initialize_processors
from .registry import register
__all__ = ['register']
|
# -*- coding: utf-8 -*-
"""An-Automated-Traditional-Chinese-Dialogue-Generating-System Main file"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from builtins import range
import argparse
import time
import os
import sys
import random
import math
import json
import torch
from torch import optim
import torch.nn as nn
from torch.autograd import Variable
import model
from ops_seq2seq import train, validate, sample
import utils
from utils import check_cuda_for_var, check_directory
parser = argparse.ArgumentParser(description=\
'Pytorch Traditional Chinese Dialouge Generating System builded on Hierarchical RNN.')
parser.add_argument('--data', type=str,
help='location of the data corpus(json file)')
parser.add_argument('--validation_p', type=float, default=0.2,
help='percentage of validation data / all data')
parser.add_argument('--embedsize', type=int, default=250,
help='size of word embeddings')
parser.add_argument('--encoder_hidden', type=int, default=250,
help='number of hidden units per layer in encoder')
parser.add_argument('--decoder_hidden', type=int, default=250,
help='number of hidden units per layer in decoder')
parser.add_argument('--encoder_layer', type=int, default=2,
help='number of layers in encoder')
parser.add_argument('--decoder_layer', type=int, default=2,
help='number of layers in decoder')
parser.add_argument('--tie', dest='tie', action='store_true',
help='tie the weight of embedding and output linear')
parser.add_argument('--no-tie', dest='tie', action='store_false',
help='don\'t tie the weight of embedding and output linear')
parser.set_defaults(tie=True)
parser.add_argument('--lr', type=float, default=0.001,
help='initial learning rate')
parser.add_argument('--clip', type=float, default=5.0,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=1000,
help='upper epoch limit')
parser.add_argument('--dropout', type=float, default=0.25,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--seed', type=int, default=55665566,
help='random seed')
parser.add_argument('--teacher', dest='teacher', action='store_true',
help='teacher force')
parser.add_argument('--no-teacher', dest='teacher', action='store_false',
help='no teacher force')
parser.set_defaults(teacher=True)
parser.add_argument('--ss', dest='ss', action='store_true',
help='scheduled sampling')
parser.add_argument('--no-ss', dest='ss', action='store_false',
help='no scheduled sampling')
parser.set_defaults(ss=True)
parser.add_argument('--save', type=str, default='model/',
help='path to save the final model\'s directory')
parser.add_argument('--test', dest='test', action='store_true',
help='test mode')
parser.set_defaults(test=False)
parser.add_argument('--limit', type=int, default=0,
help='limit the size of whole data set')
parser.add_argument('--startepoch', type=int, default=0,
help='epoch\'s number when starting(for scheduled sampling\'s ratio)')
parser.add_argument('--restore', dest='restore', action='store_true',
help='Reload the saved model')
parser.set_defaults(restore=False)
args = parser.parse_args()
torch.manual_seed(args.seed)
random.seed(args.seed)
check_directory(args.save)
# Read data
my_lang, document_list = utils.build_lang(args.data)
max_length = 20
random.shuffle(document_list)
if args.limit != 0:
document_list = document_list[:args.limit]
cut = int(len(document_list) * args.validation_p)
training_data, validation_data = \
document_list[cut:], document_list[:cut]
# Test mode
if args.test:
# Load last model
number = torch.load(os.path.join(args.save, 'checkpoint.pt'))
encoder = torch.load(os.path.join(args.save, 'encoder'+str(number)+'.pt'))
decoder = torch.load(os.path.join(args.save, 'decoder'+str(number)+'.pt'))
if torch.cuda.is_available():
encoder = encoder.cuda()
decoder = decoder.cuda()
for dialog in validation_data:
sample(my_lang, dialog, encoder, decoder, max_length)
time.sleep(3)
sys.exit(0)
learning_rate = args.lr
criterion = nn.NLLLoss()
if not args.restore:
encoder = model.EncoderRNN(len(my_lang.word2index), args.encoder_hidden, \
args.encoder_layer, args.dropout)
decoder = model.DecoderRNNSeq(args.decoder_hidden, len(my_lang.word2index), \
args.decoder_layer, args.dropout, max_length)
else:
print("Load last model in %s" % (args.save))
number = torch.load(os.path.join(args.save, 'checkpoint.pt'))
encoder = torch.load(os.path.join(args.save, 'encoder'+str(number)+'.pt'))
decoder = torch.load(os.path.join(args.save, 'decoder'+str(number)+'.pt'))
if torch.cuda.is_available():
print("Make encoder & decoder cuda")
encoder = encoder.cuda()
encoder.is_cuda = True
decoder = decoder.cuda()
decoder.is_cuda = True
criterion = criterion.cuda()
encoder_optimizer = optim.Adam(encoder.parameters(), lr=learning_rate)
decoder_optimizer = optim.Adam(decoder.parameters(), lr=learning_rate)
if args.tie:
# Tying two Embedding matrix and output Linear layer
# "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
encoder.embedding.weight = decoder.embedding.weight = decoder.out.weight
since = time.time()
best_validation_score = 10000
patient = 10
model_number = 0
teacher_lazy_period = 40
if args.teacher:
teacher_forcing_ratio = 1.
else:
teacher_forcing_ratio = 0.
# Save info. for loss.
save_training_loss = []
save_validation_loss = []
def save_loss(train, val):
with open(os.path.join(args.save, "loss.json"), "w") as outfile:
json.dump([train, val], outfile)
for epoch in range(args.startepoch + 1, args.epochs + 1):
training_loss = 0
iter_since = time.time()
try:
for index, dialog in enumerate(training_data):
if args.ss:
teacher_forcing_ratio = (teacher_lazy_period - epoch + 1) / teacher_lazy_period
if teacher_forcing_ratio < 0.5:
teacher_forcing_ratio = 0.5
training_loss += train(my_lang, criterion, teacher_forcing_ratio,\
dialog, encoder, decoder, \
encoder_optimizer, decoder_optimizer, max_length)
if (index) % 100 == 0:
print(" @ Iter [", index + 1, "/", len(training_data),"] | avg. loss: ", training_loss / (index + 1), \
" | perplexity: ", math.exp(training_loss / (index + 1))," | usage ", time.time() - iter_since, " seconds | teacher_force: ", \
teacher_forcing_ratio)
sample(my_lang, dialog, encoder, decoder, max_length)
iter_since = time.time()
if (index + 1) % 2000 == 0:
val_since = time.time()
validation_score_100 = validate(my_lang, criterion,
validation_data[:100], encoder, decoder, max_length)
print(" @ Val. [", index + 1, "/", len(training_data),"] | avg. val. loss: ", validation_score_100, \
" | perplexity: ", math.exp(validation_score_100)," | usage ", time.time() - val_since, " seconds")
print(" % Best validation score: ", best_validation_score)
if validation_score_100 < best_validation_score:
best_validation_score = validation_score_100
patient = 5
elif patient > 0:
patient -= 1
else:
print("****Learining rate decay****")
learning_rate /= 2.
patient = 10
best_validation_score = validation_score_100
print(" % After validation best validation score: ", best_validation_score)
validation_score = validate(my_lang, criterion, \
validation_data, encoder, decoder, max_length)
save_training_loss.append(training_loss / (index + 1))
save_validation_loss.append(validation_score)
save_loss(save_training_loss, save_validation_loss)
print("# ", epoch, " | ", time.time() - since," seconds | validation loss: ", validation_score, " | validation perplexity: ", \
math.exp(validation_score))
since = time.time()
model_number += 1
print("Saving better model number ",model_number)
best_validation_score = validation_score
torch.save(encoder, os.path.join(args.save, "encoder" + str(model_number) + ".pt"))
torch.save(decoder, os.path.join(args.save, "decoder" + str(model_number) + ".pt"))
torch.save(model_number, os.path.join(args.save, "checkpoint.pt"))
except ValueError:
print(sys.exc_info())
model_number += 1
print("Get stopped, saving the latest model")
torch.save(encoder, os.path.join(args.save, "encoder" + str(model_number) + ".pt"))
torch.save(decoder, os.path.join(args.save, "decoder" + str(model_number) + ".pt"))
torch.save(model_number, os.path.join(args.save, "checkpoint.pt"))
break
|
# -*- coding: utf-8 -*-
"""Each process has an environment block (which may be empty). It
consists of a set of key-value pairs, each of which is a string.
The value string may be formed partly or wholly from other environment
variables using the %envvar% notation. By default, this module will
reinterpret those embedded variables but this can be overriden.
The process environment is derived on startup from a combination
of the system environment variables and the user's environment
variable, some of which are generated automatically by the
system to reflect the user's profile location and home drive etc.
All three environments are available as a dictalike class whose
interface matches the :class:`Env` base class. Each environment
object quacks like a dict in respect of item access, :meth:`Env.get`,
:meth:`Env.keys`, :meth:`Env.items` and :meth:`Env.update` methods
and the system and user objects supply an additional :meth:`Persistent.broadcast`
method which sends a message to top-level windows, such as the shell, to
indicate that the environment has changed.
"""
from __future__ import unicode_literals
import os, sys
import win32api
import win32profile
import win32gui
import win32con
import winerror
from winsys._compat import *
from winsys import core, exc, utils, registry
class x_environment(exc.x_winsys):
"Base exception for all env exceptions"
WINERROR_MAP = {
winerror.ERROR_ENVVAR_NOT_FOUND : exc.x_not_found,
}
wrapped = exc.wrapper(WINERROR_MAP, x_environment)
class _DelimitedText(list):
"""Helper class for values such as PATH and PATHEXT which are
consistently semicolon-delimited text but which can helpfully
be treated as a list of individual values. Subclasseed from
list, it keeps track of the delimited list while exposing
the more familiar Pythonesque list interface.
"""
def __init__(self, env, key, delimiter=";", initialiser=None):
super(_DelimitedText, self).__init__(env[key].split(delimiter) if initialiser is None else initialiser)
self.env = env
self.key = key
self.delimiter = unicode(delimiter)
def _update(self):
self.env[self.key] = self.delimiter.join(self)
def __delitem__(self, *args):
super(_DelimitedText, self).__delitem__(*args)
self._update()
def __delslice__(self, *args):
super(_DelimitedText, self).__delslice__(*args)
self._update()
def __iadd__(self, iterator):
super(_DelimitedText, self).__iadd__(self.munge_item(unicode(i)) for i in iterator)
self._update()
return self
def __setitem__(self, index, item):
super(_DelimitedText, self).__setitem__(index, self.munge_item(unicode(item)))
self._update()
def __setslice__(self, index0, index1, iterator):
super(_DelimitedText, self).__setitem__(index0, index1,(self.munge_item(unicode(item)) for item in iterator))
self._update()
def append(self, item):
super(_DelimitedText, self).append(self.munge_item(unicode(item)))
self._update()
def extend(self, item):
super(_DelimitedText, self).extend(self.munge_item(unicode(item)))
self._update()
def insert(self, index, item):
super(_DelimitedText, self).insert(index, self.munge_item(unicode(object)))
self._update()
def pop(self, index=-1):
result = super(_DelimitedText, self).pop(index)
self._update()
return result
def remove(self, item):
super(_DelimitedText, self).remove(self.munge_item(unicode(item)))
self._update()
def reverse(self):
super(_DelimitedText, self).reverse()
self._update()
def sort(self):
super(_DelimitedText, self).sort()
self._update()
def munge_item(self, item):
return item
class _DelimitedPath(_DelimitedText):
"""Subclass of delimited text to ensure that valid filesystem paths
are stored in the env var
"""
def munge_item(self, item):
return os.path.normpath(item).rstrip("\\")
class Env(core._WinSysObject):
"""Semi-abstract base class for all environment classes. Outlines
a dict-like interface which relies on subclasses to implement simple
:meth:`_get` and :meth:`_items` methods.
"""
def __getitem__(self, item):
"""Get environment strings like dictionary items::
from winsys import environment
print environment.system()['windir']
"""
raise NotImplementedError
def __setitem__(self, item, value):
"""Set environment strings like dictionary items::
from winsys import environment
environment.user()['winsys'] = 'TEST'
"""
raise NotImplementedError
def __delitem__(self, item):
"""Remove an item from the environment::
from winsys import environment
del environment.process()['winsys']
"""
raise NotImplementedError
def __repr__(self):
return repr(dict(self).items())
def dumped(self, level):
return utils.dumped_dict(dict(self).items(), level)
def keys(self):
"""Yield environment variable names
"""
raise NotImplementedError
def items(self, expand=True):
"""Yield key-value pairs of environment variables
:param expand: whether to expand embedded environment variables [True]
"""
return(
(k, self.expand(v) if expand else v)
for k, v
in self._items()
)
iteritems = items
def _get_path(self):
if self.get("PATH"):
return _DelimitedPath(self, "PATH")
else:
return _DelimitedPath(self, "PATH", initialiser=[])
def _set_path(self, iterator):
self['PATH'] = ";".join(_DelimitedPath(self, "PATH", initialiser=iterator))
def _del_path(self):
del self['PATH']
path = property(_get_path, _set_path, _del_path)
def get(self, item, default=None, expand=True):
"""Return an environment value if it exists, otherwise
`[default]`. This is the only way to get an unexpanded
environment value by setting `expand` to False.
:param item: name of an environment variable
:param default: value to return if no such environment variable exists.
This default is expanded if `expand` is True.
:param expand: whether to expand embedded environment variables [True]
"""
try:
v = self._get(item)
except KeyError:
return default
else:
return self.expand(v) if expand else v
def update(self, dict_initialiser):
"""Update this environment from a dict-like object, typically
another environment::
from winsys import environment
penv = environment.process()
penv.update(environment.system())
"""
for k, v in dict(dict_initialiser).iteritems():
self[k] = v
@staticmethod
def expand(item):
"""Return a version of `item` with internal environment variables
expanded to their corresponding value. This is done automatically
by the functions in this class unless you specify `expand=False`.
"""
return wrapped(win32api.ExpandEnvironmentStrings, unicode(item))
class Process(Env):
"""The environment corresponding to the current process. This is visible
only to the current process and its children (assuming the environment block
is passed). Any changes you make here apply only for the lifetime of this
process and do not affect the permanent user or system environment. See
the :func:`system` and :func:`user` functions for ways to update the
environment permanently.
"""
def __init__(self):
super(Process, self).__init__()
def keys(self):
return (k for k in wrapped(win32profile.GetEnvironmentStrings).keys())
def _items(self):
return (item for item in wrapped(win32profile.GetEnvironmentStrings).iteritems())
def _get(self, item):
return wrapped(win32api.GetEnvironmentVariable, item)
def __getitem__(self, item):
value = self._get(item)
if value is None:
raise KeyError
else:
return unicode(value)
def __setitem__(self, item, value):
if value is None:
wrapped(win32api.SetEnvironmentVariable, item, None)
else:
wrapped(win32api.SetEnvironmentVariable, item, unicode(value))
def __delitem__(self, item):
wrapped(win32api.SetEnvironmentVariable, item, None)
class Persistent(Env):
"""Represent persistent (registry-based) environment variables. These
are held at system and at user level, the latter overriding the former
when an process environment is put together. Don't instantiate this
class directly: use the :func:`user` and :func:`system` functions.
"""
@staticmethod
def broadcast(timeout_ms=2000):
"""Broadcast a message to all top-level windows informing them that
an environment change has occurred. The message must be sent, not posted,
and times out after `timeout_ms` ms since some top-level windows handle this
badly. NB This is a static method.
"""
win32gui.SendMessageTimeout(
win32con.HWND_BROADCAST, win32con.WM_SETTINGCHANGE,
0, "Environment",
win32con.SMTO_ABORTIFHUNG, timeout_ms
)
def __init__(self, root):
super(Persistent, self).__init__()
self.registry = registry.registry(root)
def _get(self, item):
try:
return unicode(self.registry.get_value(item))
except exc.x_not_found:
raise KeyError
def keys(self):
return (name for name, value in self.registry.itervalues())
def _items(self):
return list(self.registry.itervalues())
def __getitem__(self, item):
value = self._get(item)
if value is None:
raise KeyError
else:
return value
def __setitem__(self, item, value):
self.registry.set_value(item, unicode(value))
def __delitem__(self, item):
del self.registry[item]
def process():
"""Return a dict-like object representing the environment block of the
current process.
"""
return Process()
def system(machine=None):
"""Return a dict-like object representing the system-level persistent
environment variables, optionally selecting a different machine.
:param machine: name or address of a different machine whose system
environment is to be represented.
"""
ROOT = r"HKLM\System\CurrentControlSet\Control\Session Manager\Environment"
if machine:
root = r"\\%s\%s" % (machine, ROOT)
else:
root = ROOT
return Persistent(root)
def user():
"""Return a dict-like object representing the user-level persistent
environment for the logged-on user.
TODO: include alternate user functionality via logon token
"""
return Persistent("HKCU\Environment")
def broadcast(timeout_ms=2000):
return Persistent.broadcast(timeout_ms=timeout_ms)
|
__author__ = 'NoNotCar'
import pygame, sys
pygame.init()
screen = pygame.display.set_mode((640, 704))
import World
import Tiles
import Img
clock = pygame.time.Clock()
selmenu=0
selobjs=[0 for _ in Tiles.tilemenus+Tiles.objmenus]
w=World.World(True)
expimg=Img.img2("Exp")
pexpimg=Img.img2("ExpPen")
bombimg=Img.img2("Bomb")
while True:
kmods=pygame.key.get_mods()
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type==pygame.KEYDOWN and event.key==pygame.K_s and kmods&pygame.KMOD_LCTRL:
w.save()
print "SAVED"
elif event.type==pygame.KEYDOWN:
menus=Tiles.tilemenus+Tiles.objmenus
menu=menus[selmenu]
if event.key==pygame.K_w:
selobjs[selmenu]=(selobjs[selmenu]-1)%len(menu)
elif event.key==pygame.K_s:
selobjs[selmenu]=(selobjs[selmenu]+1)%len(menu)
elif event.key==pygame.K_a:
selmenu=(selmenu-1)%len(menus)
elif event.key==pygame.K_d:
selmenu=(selmenu+1)%len(menus)
elif event.key==pygame.K_t:
screen = pygame.display.set_mode((640, 672))
w.save()
wo=World.World(False,"sav")
Img.musplay("Cumulo.ogg")
while not (wo.playerdead or wo.done):
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
sys.exit()
screen.fill((125, 255, 255))
wo.update(events)
wo.render(screen)
pygame.draw.rect(screen,(200,200,200),pygame.Rect(0,640,640,32))
ap=wo.get_activeplayer()
screen.blit(ap.iconv[(0,1)],(0,640))
for n in range(ap.rng):
screen.blit(pexpimg if ap.pen else expimg, (32+n*32,640))
for x in range(ap.bombs):
screen.blit(bombimg,(64+n*32+x*32,640))
pygame.display.flip()
clock.tick(60)
screen = pygame.display.set_mode((640, 704))
pygame.mixer.music.stop()
seltype=selmenu<len(Tiles.tilemenus)
if pygame.mouse.get_pressed()[0]:
mpos=pygame.mouse.get_pos()
if mpos[0]<640 and mpos[1]<640:
if seltype:
w.t[mpos[0]//32][mpos[1]//32]=0 if kmods&pygame.KMOD_LSHIFT else Tiles.tilemenus[selmenu][selobjs[selmenu]]+1
else:
w.o[mpos[0]//32][mpos[1]//32]=0 if kmods&pygame.KMOD_LSHIFT else Tiles.objmenus[selmenu-len(Tiles.tilemenus)][selobjs[selmenu]]+1
screen.fill((125, 255, 255))
w.render(screen)
for n in range(19):
pygame.draw.line(screen,(125,125,125),(n*32+32,0),(n*32+32,640),2)
pygame.draw.line(screen,(125,125,125),(0,n*32+32),(640,n*32+32),2)
for n,tm in enumerate(Tiles.tilemenus):
screen.blit(Tiles.tiles[tm[selobjs[n]]].img,(n*32,656))
for on,om in enumerate(Tiles.objmenus):
screen.blit(Tiles.eobjs[om[selobjs[n+on+1]]][0],(n*32+64+on*32,656))
off=selmenu*32+(0 if selmenu<len(Tiles.tilemenus) else 32)
pygame.draw.polygon(screen,(0,0,0),[(off+16,688),(off,704),(off+32,704)])
pygame.display.flip()
clock.tick(60)
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import json
import logging
import sys
SLOW = False
PHONY_ADD_JOBS = True
def get_init_args():
return {
"kwargs": {
"project": "foo",
}
}
def get_jobs():
return []
def get_set_jobs_args():
jobs = [{
"command": "echo foo",
"pipeline_name": "foo",
"ci_stage": "build",
}]
return {
"kwargs": {
"from_string": json.dumps(jobs)
}
}
def get_run_build_args():
return {}
def check_run(run):
job = run["pipelines"][0]["ci_stages"][0]["jobs"][0]
return job["stdout"][0].strip() == "foo"
|
#!/usr/bin/python3
import sqlite3
import time
import praw
import prawcore
import requests
import os
import datetime
import Config
import logging
import re
import dateparser
import yaml
os.environ['TZ'] = 'UTC'
from bs4 import BeautifulSoup
reddit = praw.Reddit(client_id=Config.cid,
client_secret=Config.secret,
password=Config.password,
user_agent=Config.agent,
username=Config.user)
subreddit = reddit.subreddit(Config.subreddit)
apppath='./'
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename=apppath+'reddit_response.log',
filemode='a')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
class Error(Exception):
"""Base class"""
pass
class LinkError(Error):
"""Could not parse the URL"""
pass
# make an empty file for first run
f = open(apppath+"postids.txt","a+")
f.close()
def getsteamexpiry(steamurl):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36'}
cookies = {
'wants_mature_content': '1',
'birthtime': '-2148631199',
'lastagecheckage': '1-0-1902' }
r = requests.get(steamurl, headers=headers, cookies=cookies )
# Offer ends 13 June</p>
if re.search("\$DiscountCountdown", r.text) is not None:
match1 = re.search("\$DiscountCountdown, ([\d]+)", r.text)
return match1.group(1)
elif re.search("Offer ends ([\w\ ]+)</p>", r.text) is not None:
match1 = re.search("Offer ends ([\w\ ]+)</p>", r.text)
enddate= dateparser.parse( "10am " + match1.group(1) , settings={'PREFER_DATES_FROM': 'future', 'TIMEZONE': 'US/Pacific','TO_TIMEZONE': 'UTC' } )
return time.mktime( enddate.timetuple() )
return
def logID(postid):
f = open(apppath+"postids.txt","a+")
f.write(postid + "\n")
f.close()
def respond(submission):
con = sqlite3.connect(apppath+'gamedealsbot.db', timeout=20)
cursorObj = con.cursor()
cursorObj.execute('DELETE from schedules WHERE postid = "' + submission.id + '"' )
cursorObj.execute('INSERT into schedules(postid, schedtime) values(?,?)',(submission.id,(submission.created_utc + 2592000)) )
con.commit()
con.close()
post_footer = True
footer = """
If this deal has expired, you can reply to this comment with `"""+Config.expired_trigger+"""` to automatically close it.
If this deal has been mistakenly closed or has been restocked, you can open it again by replying with `"""+Config.restore_trigger+"""`.
[^(more information)](https://www.reddit.com/r/GameDeals/wiki/gamedealsbot)
^(Note: To prevent abuse, requests are logged publicly. Intentional abuse will likely result in a ban.)
"""
reply_reason = "Generic Post"
reply_text = ""
### Find all URLS inside a .self post
urls = []
if submission.author.name == "gamedealsmod":
logging.info("gamedealsmod posted, skipping: " + submission.title)
return
if submission.is_self:
urls = re.findall('(?:(?:https?):\/\/)?[\w/\-?=%.]+\.[\w/\-?=%.]+', submission.selftext)
if len(urls) == 0:
logging.info("NO LINK FOUND skipping: " + submission.title)
logID(submission.id)
return
# remove duplicate URLs
unique_urls = []
for url in urls:
if url in unique_urls:
continue
else:
unique_urls.append(url)
url = urls[0] ### use only the first url
### get url for link post
if not submission.is_self:
url = submission.url
if "epicgames.com" in url.lower():
if "free" in submission.title.lower():
postdate = dateparser.parse( str(submission.created_utc) , settings={'TO_TIMEZONE': 'US/Pacific', 'TIMEZONE': 'UTC' } )
# if postdate.hour < 8 or postdate.hour > 9: # used for xmas rule, before being permanently disabled via AM to block community posting due to excessive need to moderate
if postdate.weekday() == 3 and postdate.hour < 8: # removed for EGS's 15 days of games to make the rule more active
logging.info( "removing early EGS post | https://redd.it/" + submission.id )
reply = "* We require a deal to be live before posting a submission."
reply = "* Either this deal has already been submitted,\n\n* Or this deal has been submitted before it is live."
comment = submission.reply("Unfortunately, your submission has been removed for the following reasons:\n\n" +
reply +
"\n\nI am a bot, and this action was performed automatically. Please [contact the moderators of this subreddit](https://www.reddit.com/message/compose/?to=/r/GameDeals) if you have any questions or concerns."
)
submission.mod.remove()
comment.mod.distinguish(sticky=True)
logID(submission.id)
return
if re.search("store.steampowered.com/(sub|app)", url) is not None:
if submission.author_flair_css_class is not None and submission.is_self:
return
r = requests.get( url )
if re.search("WEEK LONG DEAL", r.text) is not None:
today = datetime.datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
monday = today - datetime.timedelta(days=today.weekday())
datetext = monday.strftime('%Y%m%d')
con = sqlite3.connect(apppath+'gamedealsbot.db', timeout=20)
cursorObj = con.cursor()
cursorObj.execute('SELECT * FROM weeklongdeals WHERE week = ' + datetext )
rows = cursorObj.fetchall()
if len(rows) == 0:
removereason = "* It appears to be a part of the Weeklong deals. \n\nAs there are multiple games on sale, please post a thread with more games in the title [with this link](https://store.steampowered.com/search/?filter=weeklongdeals).\n\nIf you are the developer or publisher of this game, please leave a detailed disclosure as a top level comment as per [Rule 9](https://www.reddit.com/r/GameDeals/wiki/rules#wiki_9._developers_and_publishers), then [contact the mods for approval](https://www.reddit.com/message/compose?to=%2Fr%2FGameDeals)."
else:
removereason = "* It appears to be a part of the [Weeklong deals](https://redd.it/" + rows[0][2] + "). \n\nAs there are multiple games on sale, please include a comment within the existing thread to discuss this deal.\n\nIf you are the developer or publisher of this game, please leave a detailed disclosure as a top level comment as per [Rule 9](https://www.reddit.com/r/GameDeals/wiki/rules#wiki_9._developers_and_publishers), then [contact the mods for approval](https://www.reddit.com/message/compose?to=%2Fr%2FGameDeals)."
comment = submission.reply("Unfortunately, your submission has been removed for the following reasons:\n\n" +
removereason +
"\n\nI am a bot, and this action was performed automatically. Please [contact the moderators of this subreddit](https://www.reddit.com/message/compose/?to=/r/GameDeals) if you have any questions or concerns."
)
comment.mod.distinguish(sticky=True)
submission.mod.remove()
return
getexp = getsteamexpiry( url )
if getexp is not None:
try:
con = sqlite3.connect(apppath+'gamedealsbot.db', timeout=20)
cursorObj = con.cursor()
cursorObj.execute('DELETE from schedules WHERE postid = "' + submission.id + '"' )
cursorObj.execute('INSERT into schedules(postid, schedtime) values(?,?)',(submission.id,getexp) )
con.commit()
con.close()
logging.info("[Steam] | " + submission.title + " | https://redd.it/" + submission.id )
logging.info("setting up schedule: bot for: " + submission.id)
reply_reason = "Steam Game"
post_footer = False
#reply_text = "^(automatic deal expiry set for " + datetime.datetime.fromtimestamp(int(getexp)).strftime('%Y-%m-%d %H:%M:%S') + " UTC)\n\n"
except:
pass
try:
rules = yaml.safe_load_all( reddit.subreddit('gamedeals').wiki['gamedealsbot-storenotes'].content_md )
except:
rules = working_rules
working_rules = rules
for rule in rules:
if rule is not None:
if re.search( rule['match'] , url ):
if "disabled" not in rule or rule['disabled'] == False:
reply_reason = rule['reply_reason']
reply_text = rule['reply']
if "match-group" in rule:
search1 = re.search( rule['match'] , url)
match1 = search1.group(rule['match-group'])
reply_text.replace('{{match}}', match1)
if post_footer:
if reply_text is not "":
comment = submission.reply(reply_text+"\n\n*****\n\n"+footer)
else:
comment = submission.reply(footer)
comment.mod.distinguish(sticky=True)
logging.info("Replied to: " + submission.title + " Reason: " + reply_reason)
logID(submission.id)
return
#submission = reddit.submission("qiixoa")
#submission = reddit.submission("qijjlf")
#submission = reddit.submission("qijsq0")
#respond( submission )
#exit()
while True:
try:
logging.info("Initializing bot...")
for submission in subreddit.stream.submissions():
if submission.created < int(time.time()) - 86400:
continue
if submission.title[0:1].lower() == "[" or submission.title[0:1].lower() == "[":
if submission.id in open(apppath+'postids.txt').read():
continue
#logging.info("Week: "+time.strftime('%Y%W'))
#logging.info("Day: "+time.strftime('%Y%m%d'))
#logging.info("User: "+submission.author.name)
donotprocess=False
### handle weeklong deals
if re.search("steampowered.com.*?filter=weeklongdeals", submission.url) is not None:
con = sqlite3.connect(apppath+'gamedealsbot.db', timeout=20)
today = datetime.datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
monday = today - datetime.timedelta(days=today.weekday())
datetext = monday.strftime('%Y%m%d')
cursorObj = con.cursor()
cursorObj.execute('SELECT * FROM weeklongdeals WHERE week = ' + datetext )
rows = cursorObj.fetchall()
if len(rows) == 0:
today = datetime.datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
monday = today - datetime.timedelta(days=today.weekday())
cursorObj.execute('INSERT INTO weeklongdeals (week, post) VALUES (?, ?)', (monday.strftime('%Y%m%d'), submission.id))
con.commit()
###
### Weekly Post Limit
if Config.WeeklyPostLimit > 0:
currentweek = time.strftime('%Y%W')
con = sqlite3.connect(apppath+'gamedealsbot.db', timeout=20)
cursorObj = con.cursor()
cursorObj.execute('SELECT * FROM weeklyposts WHERE username = "'+submission.author.name+'" AND currentweek = '+currentweek)
rows = cursorObj.fetchall()
if len(rows) is 0:
cursorObj.execute('INSERT INTO weeklyposts(username, postcount, currentweek) VALUES("'+submission.author.name+'",1,'+currentweek+')')
con.commit()
else:
curcount = rows[0][2]
if int(curcount) > int(Config.WeeklyPostLimit):
donotprocess=True
logging.info(submission.author.name+' is over their weekly post limit')
submission.mod.remove()
comment = submission.reply("Thank you for your submission, but you have reached your weekly post limit\n\n^^^^^\n\nYou may contact the modderators if you feel you are being picked on")
comment.mod.distinguish(sticky=True)
else:
curcount=curcount+1
cursorObj.execute("UPDATE weeklyposts SET postcount = " + str(curcount) + ' WHERE id = ' + str(rows[0][0]))
con.commit()
con.close()
###
### Daily Post Limit
if Config.DailyPostLimit > 0:
currentday = time.strftime('%Y%m%d')
con = sqlite3.connect(apppath+'gamedealsbot.db', timeout=20)
cursorObj = con.cursor()
cursorObj.execute('SELECT * FROM dailyposts WHERE username = "'+submission.author.name+'" AND currentday = '+currentday)
rows = cursorObj.fetchall()
if len(rows) is 0:
cursorObj.execute('INSERT INTO dailyposts(username, postcount, currentday) VALUES("'+submission.author.name+'",1,'+currentday+')')
con.commit()
else:
curcount = rows[0][2]
if int(curcount) > int(Config.DailyPostLimit):
donotprocess=True
logging.info(submission.author.name+' is over their daily post limit')
submission.mod.remove()
comment = submission.reply("Thank you for your submission, but you have reached your daily post limit\n\n^^^^^\n\nYou may contact the modderators if you feel you are being picked on")
comment.mod.distinguish(sticky=True)
else:
curcount=curcount+1
cursorObj.execute("UPDATE dailyposts SET postcount = " + str(curcount) + ' WHERE id = ' + str(rows[0][0]))
con.commit()
con.close
###
for top_level_comment in submission.comments:
try:
if top_level_comment.author and top_level_comment.author.name == Config.user:
logID(submission.id)
break
except AttributeError:
pass
else: # no break before, so no comment from GDB
if not donotprocess:
respond(submission)
continue
except (prawcore.exceptions.RequestException, prawcore.exceptions.ResponseException):
logging.info("Error connecting to reddit servers. Retrying in 1 minute...")
time.sleep(60)
except praw.exceptions.APIException:
logging.info("Rate limited, waiting 5 seconds")
time.sleep(5)
|
# Copyright (C) 2012-2013 Andy Balaam and The Pepper Developers
# Released under the MIT License. See the file COPYING.txt for details.
from libpepper.values import PepValue
class PepTuple( PepValue ):
def __init__( self, items ):
PepValue.__init__( self )
self.items = items
def construction_args( self ):
return ( self.items, )
def do_evaluate( self, env ):
return PepTuple( tuple( item.evaluate( env ) for item in self.items ) )
|
# import os
# import boto3
# import json
# import sys
# import time
#
# AWS_ACCESS_KEY_ID = os.getenv("AWS_ACCESS_KEY_ID")
# AWS_SECRET_ACCESS_KEY = os.getenv("AWS_SECRET_ACCESS_KEY")
# region_name = "us-west-1"
#
#
# class VideoDetect:
# """Analyze videos using Rekognition Video API."""
#
# rek = boto3.client("rekognition", region_name)
# sqs = boto3.client("sqs", region_name)
# sns = boto3.client("sns", region_name)
# startJobId = ""
# queueUrl = ""
# snsTopicArn = ""
# processType = ""
#
# def __init__(self, role, bucket, video):
# self.roleArn = role
# self.bucket = bucket
# self.video = video
#
# def GetResultsFaces(self, jobId):
# """
# Return an array of detected faces (Faces) sorted by the time the faces were detected.
# Get the results of face detection by calling get_face_detection().
#
# Expected output:
# Emotions: [
# {'Type': string, 'Confidence': number},
# ]
# """
# maxResults = 30
# paginationToken = ""
# finished = False
#
# while finished == False:
# response = self.rek.get_face_detection(
# JobId=jobId, MaxResults=maxResults, NextToken=paginationToken
# )
#
# for faceDetection in response["Faces"]:
# max = faceDetection["Face"]["Emotions"][0]
# for emotion in faceDetection["Face"]["Emotions"]:
# if emotion["Confidence"] > max["Confidence"]:
# max = emotion
# print(max)
# print()
#
# if "NextToken" in response:
# paginationToken = response["NextToken"]
# else:
# finished = True
#
# def GetResultsPersons(self, jobId):
# """Get person tracking information by calling get_person_tracking()."""
# maxResults = 30
# paginationToken = ""
# finished = False
#
# while finished is False:
# response = self.rek.get_person_tracking(
# JobId=jobId, MaxResults=maxResults, NextToken=paginationToken
# )
#
# print(response["VideoMetadata"]["Codec"])
# print(str(response["VideoMetadata"]["DurationMillis"]))
# print(response["VideoMetadata"]["Format"])
# print(response["VideoMetadata"]["FrameRate"])
#
# for personDetection in response["Persons"]:
# print("Index: " + str(personDetection["Person"]["Index"]))
# print("Timestamp: " + str(personDetection["Timestamp"]))
# print()
#
# if "NextToken" in response:
# paginationToken = response["NextToken"]
# else:
# finished = True
#
# def CreateTopicandQueue(self):
# """Create a topic to which notifications can be published."""
# millis = str(int(round(time.time() * 1000)))
#
# # Create SNS topic
# snsTopicName = "AmazonRekognition-TinyDoor" + millis
#
# topicResponse = self.sns.create_topic(Name=snsTopicName)
# self.snsTopicArn = topicResponse["TopicArn"]
#
# # create SQS queue
# sqsQueueName = "AmazonRekognitionQueue" + millis
# self.sqs.create_queue(QueueName=sqsQueueName)
# self.queueUrl = self.sqs.get_queue_url(QueueName=sqsQueueName)["QueueUrl"]
#
# attribs = self.sqs.get_queue_attributes(
# QueueUrl=self.queueUrl, AttributeNames=["QueueArn"]
# )["Attributes"]
#
# sqsQueueArn = attribs["QueueArn"]
#
# # Subscribe SQS queue to SNS topic
# self.sns.subscribe(
# TopicArn=self.snsTopicArn, Protocol="sqs", Endpoint=sqsQueueArn
# )
#
# # Authorize SNS to write SQS queue
# policy = """{{
# "Version":"2012-10-17",
# "Statement":[
# {{
# "Sid":"MyPolicy",
# "Effect":"Allow",
# "Principal" : {{"AWS" : "*"}},
# "Action":"SQS:SendMessage",
# "Resource": "{}",
# "Condition":{{
# "ArnEquals":{{
# "aws:SourceArn": "{}"
# }}
# }}
# }}
# ]
# }}""".format(
# sqsQueueArn, self.snsTopicArn
# )
#
# response = self.sqs.set_queue_attributes(
# QueueUrl=self.queueUrl, Attributes={"Policy": policy}
# )
#
# def DeleteTopicandQueue(self):
# """Deletes a topic and all its subscriptions."""
# self.sqs.delete_queue(QueueUrl=self.queueUrl)
# self.sns.delete_topic(TopicArn=self.snsTopicArn)
#
# def main(self):
# """
# Start analysis of video in specified bucket.
# Face detection is started by a call to start_face_detection.
# """
# jobFound = False
# response = self.rek.start_face_detection(
# Video={"S3Object": {"Bucket": self.bucket, "Name": self.video}},
# NotificationChannel={
# "RoleArn": self.roleArn,
# "SNSTopicArn": self.snsTopicArn,
# },
# FaceAttributes="ALL",
# )
#
# # response = self.rek.start_person_tracking(Video={'S3Object':{'Bucket':self.bucket,'Name':self.video}},
# # NotificationChannel={'RoleArn':self.roleArn, 'SNSTopicArn':self.snsTopicArn})
#
# print("Start Job Id: " + response["JobId"])
# dotLine = 0
# while jobFound is False:
# sqsResponse = self.sqs.receive_message(
# QueueUrl=self.queueUrl,
# MessageAttributeNames=["ALL"],
# MaxNumberOfMessages=10,
# )
#
# if sqsResponse:
# if "Messages" not in sqsResponse:
# if dotLine < 20:
# print(".", end="")
# dotLine = dotLine + 1
# else:
# print()
# dotLine = 0
# sys.stdout.flush()
# continue
#
# for message in sqsResponse["Messages"]:
# notification = json.loads(message["Body"])
# rekMessage = json.loads(notification["Message"])
# print(rekMessage["JobId"])
# print(rekMessage["Status"])
# if str(rekMessage["JobId"]) == response["JobId"]:
# print("Matching Job Found:" + rekMessage["JobId"])
# jobFound = True
# self.GetResultsFaces(rekMessage["JobId"])
# self.sqs.delete_message(
# QueueUrl=self.queueUrl,
# ReceiptHandle=message["ReceiptHandle"],
# )
# else:
# print(
# "Job didn't match:"
# + str(rekMessage["JobId"])
# + " : "
# + str(response["JobId"])
# )
# # Delete the unknown message. Consider sending to dead letter queue
# self.sqs.delete_message(
# QueueUrl=self.queueUrl, ReceiptHandle=message["ReceiptHandle"]
# )
#
# print("done")
#
#
# if __name__ == "__main__":
# roleArn = "arn:aws:iam::623782584215:role/tinydoor-rekognition"
# bucket = "tinydoor-client-uploads"
# video = "emotion-test/Screen Recording 2020-06-28 at 12.52.49 PM.mov"
#
# analyzer = VideoDetect(roleArn, bucket, video)
# analyzer.CreateTopicandQueue()
# analyzer.main()
# analyzer.DeleteTopicandQueue()
|
import logging
import sys
import traceback
from functools import (
partial,
)
from pathlib import (
Path,
)
import jinete as jit
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def main():
logger.info("Starting...")
file_path = Path(sys.argv[1])
solver = jit.Solver(
loader_kwargs={"file_path": file_path},
algorithm=jit.GraspAlgorithm,
algorithm_kwargs={"first_solution_kwargs": {"episodes": 1, "randomized_size": 2}, "episodes": 5},
storer=jit.StorerSet,
storer_kwargs={
"storer_cls_set": {
jit.PromptStorer,
partial(jit.GraphPlotStorer, file_path=file_path.parent / f"{file_path.name}.png"),
partial(jit.FileStorer, file_path=file_path.parent / f"{file_path.name}.output"),
}
},
)
result = solver.solve() # noqa
logger.info("Finished...")
if __name__ == "__main__":
try:
main()
except Exception as exc:
traceback.print_exc()
raise exc
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads/v6/common/user_lists.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v6.proto.enums import customer_match_upload_key_type_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_customer__match__upload__key__type__pb2
from google.ads.google_ads.v6.proto.enums import user_list_combined_rule_operator_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_user__list__combined__rule__operator__pb2
from google.ads.google_ads.v6.proto.enums import user_list_crm_data_source_type_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_user__list__crm__data__source__type__pb2
from google.ads.google_ads.v6.proto.enums import user_list_date_rule_item_operator_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_user__list__date__rule__item__operator__pb2
from google.ads.google_ads.v6.proto.enums import user_list_logical_rule_operator_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_user__list__logical__rule__operator__pb2
from google.ads.google_ads.v6.proto.enums import user_list_number_rule_item_operator_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_user__list__number__rule__item__operator__pb2
from google.ads.google_ads.v6.proto.enums import user_list_prepopulation_status_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_user__list__prepopulation__status__pb2
from google.ads.google_ads.v6.proto.enums import user_list_rule_type_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_user__list__rule__type__pb2
from google.ads.google_ads.v6.proto.enums import user_list_string_rule_item_operator_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_user__list__string__rule__item__operator__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads/v6/common/user_lists.proto',
package='google.ads.googleads.v6.common',
syntax='proto3',
serialized_options=b'\n\"com.google.ads.googleads.v6.commonB\016UserListsProtoP\001ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v6/common;common\242\002\003GAA\252\002\036Google.Ads.GoogleAds.V6.Common\312\002\036Google\\Ads\\GoogleAds\\V6\\Common\352\002\"Google::Ads::GoogleAds::V6::Common',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n/google/ads/googleads/v6/common/user_lists.proto\x12\x1egoogle.ads.googleads.v6.common\x1a\x42google/ads/googleads/v6/enums/customer_match_upload_key_type.proto\x1a\x44google/ads/googleads/v6/enums/user_list_combined_rule_operator.proto\x1a\x42google/ads/googleads/v6/enums/user_list_crm_data_source_type.proto\x1a\x45google/ads/googleads/v6/enums/user_list_date_rule_item_operator.proto\x1a\x43google/ads/googleads/v6/enums/user_list_logical_rule_operator.proto\x1aGgoogle/ads/googleads/v6/enums/user_list_number_rule_item_operator.proto\x1a\x42google/ads/googleads/v6/enums/user_list_prepopulation_status.proto\x1a\x37google/ads/googleads/v6/enums/user_list_rule_type.proto\x1aGgoogle/ads/googleads/v6/enums/user_list_string_rule_item_operator.proto\x1a\x1cgoogle/api/annotations.proto\"E\n\x13SimilarUserListInfo\x12\x1b\n\x0eseed_user_list\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x11\n\x0f_seed_user_list\"\x9b\x02\n\x14\x43rmBasedUserListInfo\x12\x13\n\x06\x61pp_id\x18\x04 \x01(\tH\x00\x88\x01\x01\x12q\n\x0fupload_key_type\x18\x02 \x01(\x0e\x32X.google.ads.googleads.v6.enums.CustomerMatchUploadKeyTypeEnum.CustomerMatchUploadKeyType\x12p\n\x10\x64\x61ta_source_type\x18\x03 \x01(\x0e\x32V.google.ads.googleads.v6.enums.UserListCrmDataSourceTypeEnum.UserListCrmDataSourceTypeB\t\n\x07_app_id\"\xc0\x01\n\x10UserListRuleInfo\x12W\n\trule_type\x18\x01 \x01(\x0e\x32\x44.google.ads.googleads.v6.enums.UserListRuleTypeEnum.UserListRuleType\x12S\n\x10rule_item_groups\x18\x02 \x03(\x0b\x32\x39.google.ads.googleads.v6.common.UserListRuleItemGroupInfo\"e\n\x19UserListRuleItemGroupInfo\x12H\n\nrule_items\x18\x01 \x03(\x0b\x32\x34.google.ads.googleads.v6.common.UserListRuleItemInfo\"\xc3\x02\n\x14UserListRuleItemInfo\x12\x11\n\x04name\x18\x05 \x01(\tH\x01\x88\x01\x01\x12V\n\x10number_rule_item\x18\x02 \x01(\x0b\x32:.google.ads.googleads.v6.common.UserListNumberRuleItemInfoH\x00\x12V\n\x10string_rule_item\x18\x03 \x01(\x0b\x32:.google.ads.googleads.v6.common.UserListStringRuleItemInfoH\x00\x12R\n\x0e\x64\x61te_rule_item\x18\x04 \x01(\x0b\x32\x38.google.ads.googleads.v6.common.UserListDateRuleItemInfoH\x00\x42\x0b\n\trule_itemB\x07\n\x05_name\"\xd8\x01\n\x18UserListDateRuleItemInfo\x12n\n\x08operator\x18\x01 \x01(\x0e\x32\\.google.ads.googleads.v6.enums.UserListDateRuleItemOperatorEnum.UserListDateRuleItemOperator\x12\x12\n\x05value\x18\x04 \x01(\tH\x00\x88\x01\x01\x12\x1b\n\x0eoffset_in_days\x18\x05 \x01(\x03H\x01\x88\x01\x01\x42\x08\n\x06_valueB\x11\n\x0f_offset_in_days\"\xae\x01\n\x1aUserListNumberRuleItemInfo\x12r\n\x08operator\x18\x01 \x01(\x0e\x32`.google.ads.googleads.v6.enums.UserListNumberRuleItemOperatorEnum.UserListNumberRuleItemOperator\x12\x12\n\x05value\x18\x03 \x01(\x01H\x00\x88\x01\x01\x42\x08\n\x06_value\"\xae\x01\n\x1aUserListStringRuleItemInfo\x12r\n\x08operator\x18\x01 \x01(\x0e\x32`.google.ads.googleads.v6.enums.UserListStringRuleItemOperatorEnum.UserListStringRuleItemOperator\x12\x12\n\x05value\x18\x03 \x01(\tH\x00\x88\x01\x01\x42\x08\n\x06_value\"\xa0\x02\n\x18\x43ombinedRuleUserListInfo\x12\x46\n\x0cleft_operand\x18\x01 \x01(\x0b\x32\x30.google.ads.googleads.v6.common.UserListRuleInfo\x12G\n\rright_operand\x18\x02 \x01(\x0b\x32\x30.google.ads.googleads.v6.common.UserListRuleInfo\x12s\n\rrule_operator\x18\x03 \x01(\x0e\x32\\.google.ads.googleads.v6.enums.UserListCombinedRuleOperatorEnum.UserListCombinedRuleOperator\"\xaa\x01\n\x1c\x44\x61teSpecificRuleUserListInfo\x12>\n\x04rule\x18\x01 \x01(\x0b\x32\x30.google.ads.googleads.v6.common.UserListRuleInfo\x12\x17\n\nstart_date\x18\x04 \x01(\tH\x00\x88\x01\x01\x12\x15\n\x08\x65nd_date\x18\x05 \x01(\tH\x01\x88\x01\x01\x42\r\n\x0b_start_dateB\x0b\n\t_end_date\"\\\n\x1a\x45xpressionRuleUserListInfo\x12>\n\x04rule\x18\x01 \x01(\x0b\x32\x30.google.ads.googleads.v6.common.UserListRuleInfo\"\xcd\x03\n\x15RuleBasedUserListInfo\x12x\n\x14prepopulation_status\x18\x01 \x01(\x0e\x32Z.google.ads.googleads.v6.enums.UserListPrepopulationStatusEnum.UserListPrepopulationStatus\x12[\n\x17\x63ombined_rule_user_list\x18\x02 \x01(\x0b\x32\x38.google.ads.googleads.v6.common.CombinedRuleUserListInfoH\x00\x12\x64\n\x1c\x64\x61te_specific_rule_user_list\x18\x03 \x01(\x0b\x32<.google.ads.googleads.v6.common.DateSpecificRuleUserListInfoH\x00\x12_\n\x19\x65xpression_rule_user_list\x18\x04 \x01(\x0b\x32:.google.ads.googleads.v6.common.ExpressionRuleUserListInfoH\x00\x42\x16\n\x14rule_based_user_list\"]\n\x13LogicalUserListInfo\x12\x46\n\x05rules\x18\x01 \x03(\x0b\x32\x37.google.ads.googleads.v6.common.UserListLogicalRuleInfo\"\xda\x01\n\x17UserListLogicalRuleInfo\x12l\n\x08operator\x18\x01 \x01(\x0e\x32Z.google.ads.googleads.v6.enums.UserListLogicalRuleOperatorEnum.UserListLogicalRuleOperator\x12Q\n\rrule_operands\x18\x02 \x03(\x0b\x32:.google.ads.googleads.v6.common.LogicalUserListOperandInfo\"B\n\x1aLogicalUserListOperandInfo\x12\x16\n\tuser_list\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x0c\n\n_user_list\"X\n\x11\x42\x61sicUserListInfo\x12\x43\n\x07\x61\x63tions\x18\x01 \x03(\x0b\x32\x32.google.ads.googleads.v6.common.UserListActionInfo\"c\n\x12UserListActionInfo\x12\x1b\n\x11\x63onversion_action\x18\x03 \x01(\tH\x00\x12\x1c\n\x12remarketing_action\x18\x04 \x01(\tH\x00\x42\x12\n\x10user_list_actionB\xe9\x01\n\"com.google.ads.googleads.v6.commonB\x0eUserListsProtoP\x01ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v6/common;common\xa2\x02\x03GAA\xaa\x02\x1eGoogle.Ads.GoogleAds.V6.Common\xca\x02\x1eGoogle\\Ads\\GoogleAds\\V6\\Common\xea\x02\"Google::Ads::GoogleAds::V6::Commonb\x06proto3'
,
dependencies=[google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_customer__match__upload__key__type__pb2.DESCRIPTOR,google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_user__list__combined__rule__operator__pb2.DESCRIPTOR,google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_user__list__crm__data__source__type__pb2.DESCRIPTOR,google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_user__list__date__rule__item__operator__pb2.DESCRIPTOR,google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_user__list__logical__rule__operator__pb2.DESCRIPTOR,google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_user__list__number__rule__item__operator__pb2.DESCRIPTOR,google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_user__list__prepopulation__status__pb2.DESCRIPTOR,google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_user__list__rule__type__pb2.DESCRIPTOR,google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_user__list__string__rule__item__operator__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_SIMILARUSERLISTINFO = _descriptor.Descriptor(
name='SimilarUserListInfo',
full_name='google.ads.googleads.v6.common.SimilarUserListInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='seed_user_list', full_name='google.ads.googleads.v6.common.SimilarUserListInfo.seed_user_list', index=0,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='_seed_user_list', full_name='google.ads.googleads.v6.common.SimilarUserListInfo._seed_user_list',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=730,
serialized_end=799,
)
_CRMBASEDUSERLISTINFO = _descriptor.Descriptor(
name='CrmBasedUserListInfo',
full_name='google.ads.googleads.v6.common.CrmBasedUserListInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='app_id', full_name='google.ads.googleads.v6.common.CrmBasedUserListInfo.app_id', index=0,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='upload_key_type', full_name='google.ads.googleads.v6.common.CrmBasedUserListInfo.upload_key_type', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='data_source_type', full_name='google.ads.googleads.v6.common.CrmBasedUserListInfo.data_source_type', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='_app_id', full_name='google.ads.googleads.v6.common.CrmBasedUserListInfo._app_id',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=802,
serialized_end=1085,
)
_USERLISTRULEINFO = _descriptor.Descriptor(
name='UserListRuleInfo',
full_name='google.ads.googleads.v6.common.UserListRuleInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='rule_type', full_name='google.ads.googleads.v6.common.UserListRuleInfo.rule_type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='rule_item_groups', full_name='google.ads.googleads.v6.common.UserListRuleInfo.rule_item_groups', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1088,
serialized_end=1280,
)
_USERLISTRULEITEMGROUPINFO = _descriptor.Descriptor(
name='UserListRuleItemGroupInfo',
full_name='google.ads.googleads.v6.common.UserListRuleItemGroupInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='rule_items', full_name='google.ads.googleads.v6.common.UserListRuleItemGroupInfo.rule_items', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1282,
serialized_end=1383,
)
_USERLISTRULEITEMINFO = _descriptor.Descriptor(
name='UserListRuleItemInfo',
full_name='google.ads.googleads.v6.common.UserListRuleItemInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.ads.googleads.v6.common.UserListRuleItemInfo.name', index=0,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='number_rule_item', full_name='google.ads.googleads.v6.common.UserListRuleItemInfo.number_rule_item', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='string_rule_item', full_name='google.ads.googleads.v6.common.UserListRuleItemInfo.string_rule_item', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='date_rule_item', full_name='google.ads.googleads.v6.common.UserListRuleItemInfo.date_rule_item', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='rule_item', full_name='google.ads.googleads.v6.common.UserListRuleItemInfo.rule_item',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_name', full_name='google.ads.googleads.v6.common.UserListRuleItemInfo._name',
index=1, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=1386,
serialized_end=1709,
)
_USERLISTDATERULEITEMINFO = _descriptor.Descriptor(
name='UserListDateRuleItemInfo',
full_name='google.ads.googleads.v6.common.UserListDateRuleItemInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='operator', full_name='google.ads.googleads.v6.common.UserListDateRuleItemInfo.operator', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='google.ads.googleads.v6.common.UserListDateRuleItemInfo.value', index=1,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='offset_in_days', full_name='google.ads.googleads.v6.common.UserListDateRuleItemInfo.offset_in_days', index=2,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='_value', full_name='google.ads.googleads.v6.common.UserListDateRuleItemInfo._value',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_offset_in_days', full_name='google.ads.googleads.v6.common.UserListDateRuleItemInfo._offset_in_days',
index=1, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=1712,
serialized_end=1928,
)
_USERLISTNUMBERRULEITEMINFO = _descriptor.Descriptor(
name='UserListNumberRuleItemInfo',
full_name='google.ads.googleads.v6.common.UserListNumberRuleItemInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='operator', full_name='google.ads.googleads.v6.common.UserListNumberRuleItemInfo.operator', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='google.ads.googleads.v6.common.UserListNumberRuleItemInfo.value', index=1,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='_value', full_name='google.ads.googleads.v6.common.UserListNumberRuleItemInfo._value',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=1931,
serialized_end=2105,
)
_USERLISTSTRINGRULEITEMINFO = _descriptor.Descriptor(
name='UserListStringRuleItemInfo',
full_name='google.ads.googleads.v6.common.UserListStringRuleItemInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='operator', full_name='google.ads.googleads.v6.common.UserListStringRuleItemInfo.operator', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='google.ads.googleads.v6.common.UserListStringRuleItemInfo.value', index=1,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='_value', full_name='google.ads.googleads.v6.common.UserListStringRuleItemInfo._value',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=2108,
serialized_end=2282,
)
_COMBINEDRULEUSERLISTINFO = _descriptor.Descriptor(
name='CombinedRuleUserListInfo',
full_name='google.ads.googleads.v6.common.CombinedRuleUserListInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='left_operand', full_name='google.ads.googleads.v6.common.CombinedRuleUserListInfo.left_operand', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='right_operand', full_name='google.ads.googleads.v6.common.CombinedRuleUserListInfo.right_operand', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='rule_operator', full_name='google.ads.googleads.v6.common.CombinedRuleUserListInfo.rule_operator', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2285,
serialized_end=2573,
)
_DATESPECIFICRULEUSERLISTINFO = _descriptor.Descriptor(
name='DateSpecificRuleUserListInfo',
full_name='google.ads.googleads.v6.common.DateSpecificRuleUserListInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='rule', full_name='google.ads.googleads.v6.common.DateSpecificRuleUserListInfo.rule', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='start_date', full_name='google.ads.googleads.v6.common.DateSpecificRuleUserListInfo.start_date', index=1,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='end_date', full_name='google.ads.googleads.v6.common.DateSpecificRuleUserListInfo.end_date', index=2,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='_start_date', full_name='google.ads.googleads.v6.common.DateSpecificRuleUserListInfo._start_date',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_end_date', full_name='google.ads.googleads.v6.common.DateSpecificRuleUserListInfo._end_date',
index=1, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=2576,
serialized_end=2746,
)
_EXPRESSIONRULEUSERLISTINFO = _descriptor.Descriptor(
name='ExpressionRuleUserListInfo',
full_name='google.ads.googleads.v6.common.ExpressionRuleUserListInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='rule', full_name='google.ads.googleads.v6.common.ExpressionRuleUserListInfo.rule', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2748,
serialized_end=2840,
)
_RULEBASEDUSERLISTINFO = _descriptor.Descriptor(
name='RuleBasedUserListInfo',
full_name='google.ads.googleads.v6.common.RuleBasedUserListInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='prepopulation_status', full_name='google.ads.googleads.v6.common.RuleBasedUserListInfo.prepopulation_status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='combined_rule_user_list', full_name='google.ads.googleads.v6.common.RuleBasedUserListInfo.combined_rule_user_list', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='date_specific_rule_user_list', full_name='google.ads.googleads.v6.common.RuleBasedUserListInfo.date_specific_rule_user_list', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='expression_rule_user_list', full_name='google.ads.googleads.v6.common.RuleBasedUserListInfo.expression_rule_user_list', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='rule_based_user_list', full_name='google.ads.googleads.v6.common.RuleBasedUserListInfo.rule_based_user_list',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=2843,
serialized_end=3304,
)
_LOGICALUSERLISTINFO = _descriptor.Descriptor(
name='LogicalUserListInfo',
full_name='google.ads.googleads.v6.common.LogicalUserListInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='rules', full_name='google.ads.googleads.v6.common.LogicalUserListInfo.rules', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3306,
serialized_end=3399,
)
_USERLISTLOGICALRULEINFO = _descriptor.Descriptor(
name='UserListLogicalRuleInfo',
full_name='google.ads.googleads.v6.common.UserListLogicalRuleInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='operator', full_name='google.ads.googleads.v6.common.UserListLogicalRuleInfo.operator', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='rule_operands', full_name='google.ads.googleads.v6.common.UserListLogicalRuleInfo.rule_operands', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3402,
serialized_end=3620,
)
_LOGICALUSERLISTOPERANDINFO = _descriptor.Descriptor(
name='LogicalUserListOperandInfo',
full_name='google.ads.googleads.v6.common.LogicalUserListOperandInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='user_list', full_name='google.ads.googleads.v6.common.LogicalUserListOperandInfo.user_list', index=0,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='_user_list', full_name='google.ads.googleads.v6.common.LogicalUserListOperandInfo._user_list',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=3622,
serialized_end=3688,
)
_BASICUSERLISTINFO = _descriptor.Descriptor(
name='BasicUserListInfo',
full_name='google.ads.googleads.v6.common.BasicUserListInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='actions', full_name='google.ads.googleads.v6.common.BasicUserListInfo.actions', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3690,
serialized_end=3778,
)
_USERLISTACTIONINFO = _descriptor.Descriptor(
name='UserListActionInfo',
full_name='google.ads.googleads.v6.common.UserListActionInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='conversion_action', full_name='google.ads.googleads.v6.common.UserListActionInfo.conversion_action', index=0,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='remarketing_action', full_name='google.ads.googleads.v6.common.UserListActionInfo.remarketing_action', index=1,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='user_list_action', full_name='google.ads.googleads.v6.common.UserListActionInfo.user_list_action',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=3780,
serialized_end=3879,
)
_SIMILARUSERLISTINFO.oneofs_by_name['_seed_user_list'].fields.append(
_SIMILARUSERLISTINFO.fields_by_name['seed_user_list'])
_SIMILARUSERLISTINFO.fields_by_name['seed_user_list'].containing_oneof = _SIMILARUSERLISTINFO.oneofs_by_name['_seed_user_list']
_CRMBASEDUSERLISTINFO.fields_by_name['upload_key_type'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_customer__match__upload__key__type__pb2._CUSTOMERMATCHUPLOADKEYTYPEENUM_CUSTOMERMATCHUPLOADKEYTYPE
_CRMBASEDUSERLISTINFO.fields_by_name['data_source_type'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_user__list__crm__data__source__type__pb2._USERLISTCRMDATASOURCETYPEENUM_USERLISTCRMDATASOURCETYPE
_CRMBASEDUSERLISTINFO.oneofs_by_name['_app_id'].fields.append(
_CRMBASEDUSERLISTINFO.fields_by_name['app_id'])
_CRMBASEDUSERLISTINFO.fields_by_name['app_id'].containing_oneof = _CRMBASEDUSERLISTINFO.oneofs_by_name['_app_id']
_USERLISTRULEINFO.fields_by_name['rule_type'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_user__list__rule__type__pb2._USERLISTRULETYPEENUM_USERLISTRULETYPE
_USERLISTRULEINFO.fields_by_name['rule_item_groups'].message_type = _USERLISTRULEITEMGROUPINFO
_USERLISTRULEITEMGROUPINFO.fields_by_name['rule_items'].message_type = _USERLISTRULEITEMINFO
_USERLISTRULEITEMINFO.fields_by_name['number_rule_item'].message_type = _USERLISTNUMBERRULEITEMINFO
_USERLISTRULEITEMINFO.fields_by_name['string_rule_item'].message_type = _USERLISTSTRINGRULEITEMINFO
_USERLISTRULEITEMINFO.fields_by_name['date_rule_item'].message_type = _USERLISTDATERULEITEMINFO
_USERLISTRULEITEMINFO.oneofs_by_name['rule_item'].fields.append(
_USERLISTRULEITEMINFO.fields_by_name['number_rule_item'])
_USERLISTRULEITEMINFO.fields_by_name['number_rule_item'].containing_oneof = _USERLISTRULEITEMINFO.oneofs_by_name['rule_item']
_USERLISTRULEITEMINFO.oneofs_by_name['rule_item'].fields.append(
_USERLISTRULEITEMINFO.fields_by_name['string_rule_item'])
_USERLISTRULEITEMINFO.fields_by_name['string_rule_item'].containing_oneof = _USERLISTRULEITEMINFO.oneofs_by_name['rule_item']
_USERLISTRULEITEMINFO.oneofs_by_name['rule_item'].fields.append(
_USERLISTRULEITEMINFO.fields_by_name['date_rule_item'])
_USERLISTRULEITEMINFO.fields_by_name['date_rule_item'].containing_oneof = _USERLISTRULEITEMINFO.oneofs_by_name['rule_item']
_USERLISTRULEITEMINFO.oneofs_by_name['_name'].fields.append(
_USERLISTRULEITEMINFO.fields_by_name['name'])
_USERLISTRULEITEMINFO.fields_by_name['name'].containing_oneof = _USERLISTRULEITEMINFO.oneofs_by_name['_name']
_USERLISTDATERULEITEMINFO.fields_by_name['operator'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_user__list__date__rule__item__operator__pb2._USERLISTDATERULEITEMOPERATORENUM_USERLISTDATERULEITEMOPERATOR
_USERLISTDATERULEITEMINFO.oneofs_by_name['_value'].fields.append(
_USERLISTDATERULEITEMINFO.fields_by_name['value'])
_USERLISTDATERULEITEMINFO.fields_by_name['value'].containing_oneof = _USERLISTDATERULEITEMINFO.oneofs_by_name['_value']
_USERLISTDATERULEITEMINFO.oneofs_by_name['_offset_in_days'].fields.append(
_USERLISTDATERULEITEMINFO.fields_by_name['offset_in_days'])
_USERLISTDATERULEITEMINFO.fields_by_name['offset_in_days'].containing_oneof = _USERLISTDATERULEITEMINFO.oneofs_by_name['_offset_in_days']
_USERLISTNUMBERRULEITEMINFO.fields_by_name['operator'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_user__list__number__rule__item__operator__pb2._USERLISTNUMBERRULEITEMOPERATORENUM_USERLISTNUMBERRULEITEMOPERATOR
_USERLISTNUMBERRULEITEMINFO.oneofs_by_name['_value'].fields.append(
_USERLISTNUMBERRULEITEMINFO.fields_by_name['value'])
_USERLISTNUMBERRULEITEMINFO.fields_by_name['value'].containing_oneof = _USERLISTNUMBERRULEITEMINFO.oneofs_by_name['_value']
_USERLISTSTRINGRULEITEMINFO.fields_by_name['operator'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_user__list__string__rule__item__operator__pb2._USERLISTSTRINGRULEITEMOPERATORENUM_USERLISTSTRINGRULEITEMOPERATOR
_USERLISTSTRINGRULEITEMINFO.oneofs_by_name['_value'].fields.append(
_USERLISTSTRINGRULEITEMINFO.fields_by_name['value'])
_USERLISTSTRINGRULEITEMINFO.fields_by_name['value'].containing_oneof = _USERLISTSTRINGRULEITEMINFO.oneofs_by_name['_value']
_COMBINEDRULEUSERLISTINFO.fields_by_name['left_operand'].message_type = _USERLISTRULEINFO
_COMBINEDRULEUSERLISTINFO.fields_by_name['right_operand'].message_type = _USERLISTRULEINFO
_COMBINEDRULEUSERLISTINFO.fields_by_name['rule_operator'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_user__list__combined__rule__operator__pb2._USERLISTCOMBINEDRULEOPERATORENUM_USERLISTCOMBINEDRULEOPERATOR
_DATESPECIFICRULEUSERLISTINFO.fields_by_name['rule'].message_type = _USERLISTRULEINFO
_DATESPECIFICRULEUSERLISTINFO.oneofs_by_name['_start_date'].fields.append(
_DATESPECIFICRULEUSERLISTINFO.fields_by_name['start_date'])
_DATESPECIFICRULEUSERLISTINFO.fields_by_name['start_date'].containing_oneof = _DATESPECIFICRULEUSERLISTINFO.oneofs_by_name['_start_date']
_DATESPECIFICRULEUSERLISTINFO.oneofs_by_name['_end_date'].fields.append(
_DATESPECIFICRULEUSERLISTINFO.fields_by_name['end_date'])
_DATESPECIFICRULEUSERLISTINFO.fields_by_name['end_date'].containing_oneof = _DATESPECIFICRULEUSERLISTINFO.oneofs_by_name['_end_date']
_EXPRESSIONRULEUSERLISTINFO.fields_by_name['rule'].message_type = _USERLISTRULEINFO
_RULEBASEDUSERLISTINFO.fields_by_name['prepopulation_status'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_user__list__prepopulation__status__pb2._USERLISTPREPOPULATIONSTATUSENUM_USERLISTPREPOPULATIONSTATUS
_RULEBASEDUSERLISTINFO.fields_by_name['combined_rule_user_list'].message_type = _COMBINEDRULEUSERLISTINFO
_RULEBASEDUSERLISTINFO.fields_by_name['date_specific_rule_user_list'].message_type = _DATESPECIFICRULEUSERLISTINFO
_RULEBASEDUSERLISTINFO.fields_by_name['expression_rule_user_list'].message_type = _EXPRESSIONRULEUSERLISTINFO
_RULEBASEDUSERLISTINFO.oneofs_by_name['rule_based_user_list'].fields.append(
_RULEBASEDUSERLISTINFO.fields_by_name['combined_rule_user_list'])
_RULEBASEDUSERLISTINFO.fields_by_name['combined_rule_user_list'].containing_oneof = _RULEBASEDUSERLISTINFO.oneofs_by_name['rule_based_user_list']
_RULEBASEDUSERLISTINFO.oneofs_by_name['rule_based_user_list'].fields.append(
_RULEBASEDUSERLISTINFO.fields_by_name['date_specific_rule_user_list'])
_RULEBASEDUSERLISTINFO.fields_by_name['date_specific_rule_user_list'].containing_oneof = _RULEBASEDUSERLISTINFO.oneofs_by_name['rule_based_user_list']
_RULEBASEDUSERLISTINFO.oneofs_by_name['rule_based_user_list'].fields.append(
_RULEBASEDUSERLISTINFO.fields_by_name['expression_rule_user_list'])
_RULEBASEDUSERLISTINFO.fields_by_name['expression_rule_user_list'].containing_oneof = _RULEBASEDUSERLISTINFO.oneofs_by_name['rule_based_user_list']
_LOGICALUSERLISTINFO.fields_by_name['rules'].message_type = _USERLISTLOGICALRULEINFO
_USERLISTLOGICALRULEINFO.fields_by_name['operator'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_user__list__logical__rule__operator__pb2._USERLISTLOGICALRULEOPERATORENUM_USERLISTLOGICALRULEOPERATOR
_USERLISTLOGICALRULEINFO.fields_by_name['rule_operands'].message_type = _LOGICALUSERLISTOPERANDINFO
_LOGICALUSERLISTOPERANDINFO.oneofs_by_name['_user_list'].fields.append(
_LOGICALUSERLISTOPERANDINFO.fields_by_name['user_list'])
_LOGICALUSERLISTOPERANDINFO.fields_by_name['user_list'].containing_oneof = _LOGICALUSERLISTOPERANDINFO.oneofs_by_name['_user_list']
_BASICUSERLISTINFO.fields_by_name['actions'].message_type = _USERLISTACTIONINFO
_USERLISTACTIONINFO.oneofs_by_name['user_list_action'].fields.append(
_USERLISTACTIONINFO.fields_by_name['conversion_action'])
_USERLISTACTIONINFO.fields_by_name['conversion_action'].containing_oneof = _USERLISTACTIONINFO.oneofs_by_name['user_list_action']
_USERLISTACTIONINFO.oneofs_by_name['user_list_action'].fields.append(
_USERLISTACTIONINFO.fields_by_name['remarketing_action'])
_USERLISTACTIONINFO.fields_by_name['remarketing_action'].containing_oneof = _USERLISTACTIONINFO.oneofs_by_name['user_list_action']
DESCRIPTOR.message_types_by_name['SimilarUserListInfo'] = _SIMILARUSERLISTINFO
DESCRIPTOR.message_types_by_name['CrmBasedUserListInfo'] = _CRMBASEDUSERLISTINFO
DESCRIPTOR.message_types_by_name['UserListRuleInfo'] = _USERLISTRULEINFO
DESCRIPTOR.message_types_by_name['UserListRuleItemGroupInfo'] = _USERLISTRULEITEMGROUPINFO
DESCRIPTOR.message_types_by_name['UserListRuleItemInfo'] = _USERLISTRULEITEMINFO
DESCRIPTOR.message_types_by_name['UserListDateRuleItemInfo'] = _USERLISTDATERULEITEMINFO
DESCRIPTOR.message_types_by_name['UserListNumberRuleItemInfo'] = _USERLISTNUMBERRULEITEMINFO
DESCRIPTOR.message_types_by_name['UserListStringRuleItemInfo'] = _USERLISTSTRINGRULEITEMINFO
DESCRIPTOR.message_types_by_name['CombinedRuleUserListInfo'] = _COMBINEDRULEUSERLISTINFO
DESCRIPTOR.message_types_by_name['DateSpecificRuleUserListInfo'] = _DATESPECIFICRULEUSERLISTINFO
DESCRIPTOR.message_types_by_name['ExpressionRuleUserListInfo'] = _EXPRESSIONRULEUSERLISTINFO
DESCRIPTOR.message_types_by_name['RuleBasedUserListInfo'] = _RULEBASEDUSERLISTINFO
DESCRIPTOR.message_types_by_name['LogicalUserListInfo'] = _LOGICALUSERLISTINFO
DESCRIPTOR.message_types_by_name['UserListLogicalRuleInfo'] = _USERLISTLOGICALRULEINFO
DESCRIPTOR.message_types_by_name['LogicalUserListOperandInfo'] = _LOGICALUSERLISTOPERANDINFO
DESCRIPTOR.message_types_by_name['BasicUserListInfo'] = _BASICUSERLISTINFO
DESCRIPTOR.message_types_by_name['UserListActionInfo'] = _USERLISTACTIONINFO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SimilarUserListInfo = _reflection.GeneratedProtocolMessageType('SimilarUserListInfo', (_message.Message,), {
'DESCRIPTOR' : _SIMILARUSERLISTINFO,
'__module__' : 'google.ads.googleads.v6.common.user_lists_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.common.SimilarUserListInfo)
})
_sym_db.RegisterMessage(SimilarUserListInfo)
CrmBasedUserListInfo = _reflection.GeneratedProtocolMessageType('CrmBasedUserListInfo', (_message.Message,), {
'DESCRIPTOR' : _CRMBASEDUSERLISTINFO,
'__module__' : 'google.ads.googleads.v6.common.user_lists_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.common.CrmBasedUserListInfo)
})
_sym_db.RegisterMessage(CrmBasedUserListInfo)
UserListRuleInfo = _reflection.GeneratedProtocolMessageType('UserListRuleInfo', (_message.Message,), {
'DESCRIPTOR' : _USERLISTRULEINFO,
'__module__' : 'google.ads.googleads.v6.common.user_lists_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.common.UserListRuleInfo)
})
_sym_db.RegisterMessage(UserListRuleInfo)
UserListRuleItemGroupInfo = _reflection.GeneratedProtocolMessageType('UserListRuleItemGroupInfo', (_message.Message,), {
'DESCRIPTOR' : _USERLISTRULEITEMGROUPINFO,
'__module__' : 'google.ads.googleads.v6.common.user_lists_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.common.UserListRuleItemGroupInfo)
})
_sym_db.RegisterMessage(UserListRuleItemGroupInfo)
UserListRuleItemInfo = _reflection.GeneratedProtocolMessageType('UserListRuleItemInfo', (_message.Message,), {
'DESCRIPTOR' : _USERLISTRULEITEMINFO,
'__module__' : 'google.ads.googleads.v6.common.user_lists_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.common.UserListRuleItemInfo)
})
_sym_db.RegisterMessage(UserListRuleItemInfo)
UserListDateRuleItemInfo = _reflection.GeneratedProtocolMessageType('UserListDateRuleItemInfo', (_message.Message,), {
'DESCRIPTOR' : _USERLISTDATERULEITEMINFO,
'__module__' : 'google.ads.googleads.v6.common.user_lists_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.common.UserListDateRuleItemInfo)
})
_sym_db.RegisterMessage(UserListDateRuleItemInfo)
UserListNumberRuleItemInfo = _reflection.GeneratedProtocolMessageType('UserListNumberRuleItemInfo', (_message.Message,), {
'DESCRIPTOR' : _USERLISTNUMBERRULEITEMINFO,
'__module__' : 'google.ads.googleads.v6.common.user_lists_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.common.UserListNumberRuleItemInfo)
})
_sym_db.RegisterMessage(UserListNumberRuleItemInfo)
UserListStringRuleItemInfo = _reflection.GeneratedProtocolMessageType('UserListStringRuleItemInfo', (_message.Message,), {
'DESCRIPTOR' : _USERLISTSTRINGRULEITEMINFO,
'__module__' : 'google.ads.googleads.v6.common.user_lists_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.common.UserListStringRuleItemInfo)
})
_sym_db.RegisterMessage(UserListStringRuleItemInfo)
CombinedRuleUserListInfo = _reflection.GeneratedProtocolMessageType('CombinedRuleUserListInfo', (_message.Message,), {
'DESCRIPTOR' : _COMBINEDRULEUSERLISTINFO,
'__module__' : 'google.ads.googleads.v6.common.user_lists_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.common.CombinedRuleUserListInfo)
})
_sym_db.RegisterMessage(CombinedRuleUserListInfo)
DateSpecificRuleUserListInfo = _reflection.GeneratedProtocolMessageType('DateSpecificRuleUserListInfo', (_message.Message,), {
'DESCRIPTOR' : _DATESPECIFICRULEUSERLISTINFO,
'__module__' : 'google.ads.googleads.v6.common.user_lists_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.common.DateSpecificRuleUserListInfo)
})
_sym_db.RegisterMessage(DateSpecificRuleUserListInfo)
ExpressionRuleUserListInfo = _reflection.GeneratedProtocolMessageType('ExpressionRuleUserListInfo', (_message.Message,), {
'DESCRIPTOR' : _EXPRESSIONRULEUSERLISTINFO,
'__module__' : 'google.ads.googleads.v6.common.user_lists_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.common.ExpressionRuleUserListInfo)
})
_sym_db.RegisterMessage(ExpressionRuleUserListInfo)
RuleBasedUserListInfo = _reflection.GeneratedProtocolMessageType('RuleBasedUserListInfo', (_message.Message,), {
'DESCRIPTOR' : _RULEBASEDUSERLISTINFO,
'__module__' : 'google.ads.googleads.v6.common.user_lists_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.common.RuleBasedUserListInfo)
})
_sym_db.RegisterMessage(RuleBasedUserListInfo)
LogicalUserListInfo = _reflection.GeneratedProtocolMessageType('LogicalUserListInfo', (_message.Message,), {
'DESCRIPTOR' : _LOGICALUSERLISTINFO,
'__module__' : 'google.ads.googleads.v6.common.user_lists_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.common.LogicalUserListInfo)
})
_sym_db.RegisterMessage(LogicalUserListInfo)
UserListLogicalRuleInfo = _reflection.GeneratedProtocolMessageType('UserListLogicalRuleInfo', (_message.Message,), {
'DESCRIPTOR' : _USERLISTLOGICALRULEINFO,
'__module__' : 'google.ads.googleads.v6.common.user_lists_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.common.UserListLogicalRuleInfo)
})
_sym_db.RegisterMessage(UserListLogicalRuleInfo)
LogicalUserListOperandInfo = _reflection.GeneratedProtocolMessageType('LogicalUserListOperandInfo', (_message.Message,), {
'DESCRIPTOR' : _LOGICALUSERLISTOPERANDINFO,
'__module__' : 'google.ads.googleads.v6.common.user_lists_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.common.LogicalUserListOperandInfo)
})
_sym_db.RegisterMessage(LogicalUserListOperandInfo)
BasicUserListInfo = _reflection.GeneratedProtocolMessageType('BasicUserListInfo', (_message.Message,), {
'DESCRIPTOR' : _BASICUSERLISTINFO,
'__module__' : 'google.ads.googleads.v6.common.user_lists_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.common.BasicUserListInfo)
})
_sym_db.RegisterMessage(BasicUserListInfo)
UserListActionInfo = _reflection.GeneratedProtocolMessageType('UserListActionInfo', (_message.Message,), {
'DESCRIPTOR' : _USERLISTACTIONINFO,
'__module__' : 'google.ads.googleads.v6.common.user_lists_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.common.UserListActionInfo)
})
_sym_db.RegisterMessage(UserListActionInfo)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
# Copyright (c) 2017 Mark D. Hill and David A. Wood
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Sean Wilson
'''
Test file for the util m5 exit assembly instruction.
'''
from testlib import *
test_progs = {
'x86': ('hello64-static', 'hello64-dynamic', 'hello32-static'),
'arm': ('hello64-static', 'hello32-static'),
}
for isa in test_progs:
for binary in test_progs[isa]:
import os
path = os.path.join('hello', 'bin', isa, 'linux')
hello_program = DownloadedProgram(path, binary)
ref_path = joinpath(getcwd(), 'ref')
verifiers = (
verifier.MatchStdoutNoPerf(joinpath(ref_path, 'simout')),
)
gem5_verify_config(
name='test'+binary,
fixtures=(hello_program,),
verifiers=verifiers,
config=joinpath(config.base_dir, 'configs', 'example','se.py'),
config_args=['--cmd', hello_program.path],
valid_isas=(isa.upper(),),
)
|
import math
a=eval(raw_input("enter an inteager:>>"))
c=0
for i in range(4):
b=a%10
a=a/10
c=c+b*(math.pow(10,3-i))
print(c)
|
""" Connected Component Consolidation """
import numpy as np
from .. import continuation
from ... import utils
from ... import colnames as cn
def pair_continuation_files(contin_files):
pairs = dict()
for contin_file in contin_files:
if contin_file.face.hi_index:
hash_input = (*contin_file.bbox.max(), contin_file.face.axis)
else:
bbox_min, bbox_max = contin_file.bbox.min(), contin_file.bbox.max()
chunk_id = list(bbox_max)
chunk_id[contin_file.face.axis] = bbox_min[contin_file.face.axis]
hash_input = (*chunk_id, contin_file.face.axis)
pairs[hash_input] = pairs.get(hash_input, []) + [contin_file]
# Remove duplicates
unique_files = list(list(set(value)) for value in pairs.values())
return unique_files
def merge_continuations(continuation_arr, overlap_df=None,
max_face_shape=(1152, 1152), overlap_col=cn.ovl_segid):
"""
Finds an id mapping to merge the continuations which match across faces
"""
matches = find_connected_continuations(continuation_arr,
max_face_shape=max_face_shape)
if overlap_df is not None:
matches = filter_matches_by_overlap(
matches, overlap_df, overlap_col=overlap_col)
ccs = utils.find_connected_components(matches)
return utils.make_id_map(ccs)
def filter_matches_by_overlap(matches, overlap_df,
segid_col=cn.seg_id, overlap_col=cn.ovl_segid):
"""
Filters component matches by their overlapping base seg id. Assumes that
each row has only one overlap entry.
"""
if overlap_df.index.name == segid_col:
overlap_dict = dict(zip(overlap_df.index,
overlap_df[overlap_col]))
else:
assert segid_col in overlap_df.columns, "column {segid_col} not found"
overlap_dict = dict(zip(overlap_df[segid_col],
overlap_df[overlap_col]))
return [m for m in matches if overlap_dict[m[0]] == overlap_dict[m[1]]]
def find_connected_continuations(continuation_arr,
max_face_shape=(1152, 1152)):
"""
Finds the edges of a graph which describes the continuation connectivity
"""
sizes = continuation_arr.shape
face_checked = np.zeros((6,) + continuation_arr.shape, dtype=np.bool)
matches = []
for index in np.ndindex(sizes):
for face in continuation.Face.all_faces():
# bounds checking
if face.hi_index and index[face.axis] == sizes[face.axis] - 1:
continue
if not face.hi_index and index[face.axis] == 0:
continue
# if we've processed the other side already
if face_checked[(face.axis + 3*face.hi_index,) + index]:
continue
else:
face_checked[(face.axis + 3*face.hi_index,) + index] = True
index_to_check = list(index)
if face.hi_index:
index_to_check[face.axis] += 1
else:
index_to_check[face.axis] -= 1
index_to_check = tuple(index_to_check)
oppface = face.opposite()
face_checked[(oppface.axis + 3*oppface.hi_index,)
+ index_to_check] = True
conts_here = continuation_arr[index][face]
conts_there = continuation_arr[index_to_check][oppface]
new_matches = match_continuations(conts_here, conts_there,
face_shape=max_face_shape)
matches.extend(new_matches)
return matches
def match_continuations(conts1, conts2, face_shape=(1152, 1152)):
""" Determines which continuations match within the two lists """
face1 = reconstruct_face(conts1, shape=face_shape)
face2 = reconstruct_face(conts2, shape=face_shape)
intersection_mask = np.logical_and(face1 != 0, face2 != 0)
matches1 = face1[intersection_mask]
matches2 = face2[intersection_mask]
return list(set(zip(matches1, matches2)))
def reconstruct_face(continuations, shape=(1152, 1152)):
face = np.zeros(shape, dtype=np.uint32)
for c in continuations:
face[tuple(c.face_coords.T)] = c.segid
return face
|
import os
def create_project_folder(dir): # Create seperate folder for each website
if not os.path.exists(dir):
print('Creating directory ' + dir)
os.makedirs(dir)
def create_data_files(folder_name, start_link): # Append to queue and crawled list
queue = os.path.join(folder_name , 'queue.txt')
data_crawled = os.path.join(folder_name,"crawled.txt")
if not os.path.isfile(queue):
write_to_file(queue, start_link)
if not os.path.isfile(data_crawled):
write_to_file(data_crawled, '')
def write_to_file(path, url): # Create a new file for the task
with open(path, 'w') as f:
f.write(url)
def append_file(path, url): # Append new data to existing file
with open(path, 'a') as file:
file.write(url + '\n')
def empty_queue(path): # Delete contents of a file
open(path, 'w').close()
def convert_to_set(file_name): # Read a file and convert each line to set items
results = set()
with open(file_name, 'rt') as f:
for line in f:
results.add(line.replace('\n', ''))
return results
def set_to_file(urls, file_name): # Iterate through a set, each item will be a line in a file
with open(file_name,"w") as f:
for l in sorted(urls):
f.write(l+"\n")
|
#!/usr/bin/env python3
import re
import sys
import os
import platform
from setuptools import setup
SRC = os.path.abspath(os.path.dirname(__file__))
def get_version():
with open(os.path.join(SRC, 'instaloader/__init__.py')) as f:
for line in f:
m = re.match("__version__ = '(.*)'", line)
if m:
return m.group(1)
raise SystemExit("Could not find version string.")
if sys.version_info < (3, 5):
sys.exit('Instaloader requires Python >= 3.5.')
requirements = ['requests>=2.4']
if platform.system() == 'Windows' and sys.version_info < (3, 6):
requirements.append('win_unicode_console')
keywords = (['instagram', 'instagram-scraper', 'instagram-client', 'instagram-feed', 'downloader', 'videos', 'photos',
'pictures', 'instagram-user-photos', 'instagram-photos', 'instagram-metadata', 'instagram-downloader',
'instagram-stories'])
# NOTE that many of the values defined in this file are duplicated on other places, such as the
# documentation.
setup(
name='instaloader',
version=get_version(),
packages=['instaloader'],
url='https://instaloader.github.io/',
license='MIT',
author='Alexander Graf, André Koch-Kramer',
author_email='mail@agraf.me, koch-kramer@web.de',
description='Download pictures (or videos) along with their captions and other metadata '
'from Instagram.',
long_description=open(os.path.join(SRC, 'README.rst')).read(),
install_requires=requirements,
python_requires='>=3.5',
entry_points={'console_scripts': ['instaloader=instaloader.__main__:main']},
zip_safe=True,
keywords=keywords,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Internet',
'Topic :: Multimedia :: Graphics'
]
)
|
# Copyright 2016 Coursera
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import json
import logging
import argparse
from tabulate import tabulate
from courseraresearchexports.exports import api
from courseraresearchexports.constants.api_constants import \
ANONYMITY_LEVEL_COORDINATOR, EXPORT_TYPE_CLICKSTREAM, \
EXPORT_TYPE_TABLES, SCHEMA_NAMES
from courseraresearchexports.models.ClickstreamDownloadLinksRequest import \
ClickstreamDownloadLinksRequest
from courseraresearchexports.models.ExportRequest import ExportRequest
from courseraresearchexports.exports import utils
def request_clickstream(args):
"""
Create and send an clickstream data export request with Coursera. Only
available for data coordinators.
"""
export_request = ExportRequest.from_args(
course_id=args.course_id,
course_slug=args.course_slug,
partner_id=args.partner_id,
partner_short_name=args.partner_short_name,
group_id=args.group_id,
anonymity_level=ANONYMITY_LEVEL_COORDINATOR,
statement_of_purpose=args.purpose,
export_type=EXPORT_TYPE_CLICKSTREAM,
interval=args.interval,
ignore_existing=args.ignore_existing)
export_request_with_metadata = api.post(export_request)[0]
logging.info('Successfully created clickstream export request {id}.'
.format(id=export_request_with_metadata.id))
logging.debug('Request created with json body:\n{json}'
.format(json=json.dumps(
export_request_with_metadata.to_json(), indent=2)))
def request_tables(args):
"""
Create and send a tables data export request with Coursera.
"""
export_request = ExportRequest.from_args(
course_id=args.course_id,
course_slug=args.course_slug,
partner_id=args.partner_id,
partner_short_name=args.partner_short_name,
group_id=args.group_id,
user_id_hashing=args.user_id_hashing,
statement_of_purpose=args.purpose,
export_type=EXPORT_TYPE_TABLES,
schema_names=args.schemas)
export_request_with_metadata = api.post(export_request)[0]
logging.info('Successfully created tables export request {id}.'
.format(id=export_request_with_metadata.id))
logging.debug('Request created with json body:\n{json}'
.format(json=json.dumps(
export_request_with_metadata.to_json(), indent=2)))
def get(args):
"""
Get the details and status of a data export request using a job id.
"""
export_request = api.get(args.id)[0]
export_request_info = [
['Export Job Id:', export_request.id],
['Export Type:', export_request.export_type_display],
['Status:', export_request.status],
['Scope Context:', export_request.scope_context],
['Scope Id:', export_request.scope_id],
['Scope Name:', export_request.scope_name],
['User id Hashing: ', export_request.formatted_anonymity_level],
['Created:', export_request.created_at.strftime('%c')]]
if export_request.schema_names:
export_request_info.append(
['Schemas:', export_request.schema_names_display])
if export_request.download_link:
export_request_info.append(
['Download Link:', export_request.download_link])
if export_request.interval:
export_request_info.append(
['Interval:', ' to '.join(export_request.interval)])
print(tabulate(export_request_info, tablefmt="plain"))
def get_all(args):
"""
Get the details and status of your data export requests.
"""
export_requests = api.get_all()
export_requests_table = [['Created', 'Request Id', 'Status', 'Type',
'User Id Hashing', 'Scope', 'Schemas']]
for export_request in sorted(export_requests, key=lambda x: x.created_at):
export_requests_table.append([
export_request.created_at.strftime('%Y-%m-%d %H:%M'),
export_request.id,
export_request.status,
export_request.export_type_display,
export_request.formatted_anonymity_level,
export_request.scope_id,
export_request.schema_names_display])
print(tabulate(export_requests_table, headers='firstrow'))
def download(args):
"""
Download a data export job using a request id.
"""
try:
export_request = api.get(args.id)[0]
dest = args.dest
utils.download(export_request, dest)
except Exception as err:
logging.error('Download failed with exception:\n{}'.format(err))
raise
def get_clickstream_links(args):
"""
Generate links for clickstream data exports
"""
clickstream_links_request = ClickstreamDownloadLinksRequest.from_args(
course_id=args.course_id,
course_slug=args.course_slug,
partner_id=args.partner_id,
partner_short_name=args.partner_short_name,
group_id=args.group_id,
interval=args.interval)
clickstream_download_links = api.get_clickstream_download_links(
clickstream_links_request)
# TODO: add more descriptive information or option write to text file
print(tabulate(
[[link] for link in clickstream_download_links],
tablefmt="plain"))
def parser(subparsers):
parser_jobs = subparsers.add_parser(
'jobs',
help='Get status of current/completed research export job(s)',
description='Command line tools for requesting and reviewing the '
'status of Coursera research data exports. Please first authenticate '
'with the OAuth2 client before making requests (courseraoauth2client '
'config authorize --app manage-research-exports).',
epilog='Please file bugs on github at: '
'https://github.com/coursera/courseraresearchexports/issues. If you '
'would like to contribute to this tool\'s development, check us out '
'at: https://github.com/coursera/courseraresarchexports')
jobs_subparsers = parser_jobs.add_subparsers()
create_request_parser(jobs_subparsers)
parser_get_all = jobs_subparsers.add_parser(
'get_all',
help=get_all.__doc__,
description=get_all.__doc__)
parser_get_all.set_defaults(func=get_all)
parser_get = jobs_subparsers.add_parser(
'get',
help=get.__doc__,
description=get.__doc__)
parser_get.set_defaults(func=get)
parser_get.add_argument(
'id',
help='Export request ID')
parser_download = jobs_subparsers.add_parser(
'download',
help=download.__doc__,
description=download.__doc__)
parser_download.set_defaults(func=download)
parser_download.add_argument(
'id',
help='Export request ID')
parser_download.add_argument(
'--dest',
default='.',
help='Destination folder')
parser_clickstream_links = jobs_subparsers.add_parser(
'clickstream_download_links',
help='Get download links for completed eventing exports.')
parser_clickstream_links.set_defaults(func=get_clickstream_links)
create_scope_subparser(parser_clickstream_links)
parser_clickstream_links.add_argument(
'--interval',
nargs=2,
metavar=('START', 'END'),
help='Interval of exported clickstream data, inclusive. '
'(i.e. 2016-08-01 2016-08-04).')
return parser_jobs
def create_scope_subparser(parser):
scope_subparser = parser.add_mutually_exclusive_group(
required=True)
scope_subparser.add_argument(
'--course_id',
help='Export rows corresponding to learners within a course according '
'to the unique id assigned by Coursera.')
scope_subparser.add_argument(
'--course_slug',
help='Export rows corresponding to learners within a course according '
'to the unique name of your course defined as the part after '
'/learn in the course url. (e.g. machine-learning for '
'https://www.coursera.org/learn/machine-learning).')
scope_subparser.add_argument(
'--partner_id',
type=int,
help='Export rows corresponding to learners within a partner.')
scope_subparser.add_argument(
'--partner_short_name',
help='Export rows corresponding to learners within a partner.')
scope_subparser.add_argument(
'--group_id',
help='Export rows corresponding to learners without a group.')
def create_request_parser(subparsers):
parser_request = subparsers.add_parser(
'request',
help='Create and send a data export request with Coursera.',
description='Create and send a data export request with Coursera. '
'Use subcommands to specify the export request type.')
request_subparsers = parser_request.add_subparsers()
# common arguments between schema and eventing exports
request_args_parser = argparse.ArgumentParser(add_help=False)
create_scope_subparser(request_args_parser)
request_args_parser.add_argument(
'--purpose',
required=True,
help='Please let us know how you plan to use the '
'data, what types of research questions you\'re asking, who will '
'be working with the data primarily, and with whom you plan to '
'share it.')
# tables subcommand
parser_tables = request_subparsers.add_parser(
'tables',
help=request_tables.__doc__,
description=request_tables.__doc__,
parents=[request_args_parser])
parser_tables.set_defaults(func=request_tables)
parser_tables.add_argument(
'--user_id_hashing',
choices=['linked', 'isolated'],
default='isolated',
help='The level of user_id hashing in the data export. With \'linked\''
' user_id hashing, users can be identified between table schemas. '
'With \'isolated\' user_id hashing, users have independent ids in'
'different schemas and cannot be linked. Only data coordinators have '
'access to \'linked\' users_ids to restrict PII.')
parser_tables.add_argument(
'--schemas',
choices=SCHEMA_NAMES,
nargs='+',
default=SCHEMA_NAMES,
help='Data schemas to export. Any combination of: {}. By default this '
'will be all available schemas.'.format(
', '.join(SCHEMA_NAMES)))
# clickstream subcommand
parser_clickstream = request_subparsers.add_parser(
'clickstream',
help=request_clickstream.__doc__,
description=request_clickstream.__doc__,
parents=[request_args_parser])
parser_clickstream.set_defaults(func=request_clickstream)
parser_clickstream.add_argument(
'--interval',
nargs=2,
metavar=('START', 'END'),
help='Interval of clickstream data to be exported '
'(i.e. 2016-08-01 2016-08-04). By default this will be the past day.')
parser_clickstream.add_argument(
'--ignore_existing',
action='store_true',
help='If flag is set, we will recompute clickstream data for all dates'
'in the interval. Otherwise, previously computed days are skipped.')
|
import asyncio
from types import TracebackType
from typing import Any, AsyncGenerator, Dict, List, Optional, Tuple, Union
import asyncpg
from buildpg import BuildError, render
ValuesType = Union[Dict[str, Any], List[Any], None]
class UndefinedParameterError(Exception):
pass
class Transaction:
def __init__(self, connection: "Connection", *, force_rollback: bool = False) -> None:
self._connection = connection
self._transaction: Optional[asyncpg.Transaction] = None
self._force_rollback = force_rollback
async def __aenter__(self) -> "Transaction":
await self.start()
return self
async def __aexit__(self, exc_type: type, exc_value: BaseException, tb: TracebackType) -> None:
if self._force_rollback or exc_type is not None:
await self.rollback()
else:
await self.commit()
async def start(self) -> None:
async with self._connection._lock:
self._transaction = self._connection._connection.transaction()
await self._transaction.start()
async def commit(self) -> None:
async with self._connection._lock:
await self._transaction.commit()
self._transaction = None
async def rollback(self) -> None:
async with self._connection._lock:
await self._transaction.rollback()
self._transaction = None
class Connection:
def __init__(self, connection: asyncpg.Connection) -> None:
self._connection = connection
self._lock = asyncio.Lock()
async def execute(self, query: str, values: ValuesType = None) -> None:
"""Execute a query, with bind values if needed
The query can either use positional arguments i.e. `$1` with
values being a list or named arguments i.e. `:name` with
values being a dictionary.
"""
compiled_query, args = self._compile(query, values)
try:
async with self._lock:
return await self._connection.execute(compiled_query, *args)
except asyncpg.exceptions.UndefinedParameterError as error:
raise UndefinedParameterError(str(error))
async def execute_many(self, query: str, values: List[Dict[str, Any]]) -> None:
"""Execute a query for each set of values
The query can either use positional arguments i.e. `$1` with
values set being a list or named arguments i.e. `:name` with
values set being a dictionary.
"""
if not values:
return
compiled_queries = [self._compile(query, value) for value in values]
compiled_query = compiled_queries[0][0]
args = [query[1] for query in compiled_queries]
try:
async with self._lock:
return await self._connection.executemany(compiled_query, args)
except asyncpg.exceptions.UndefinedParameterError as error:
raise UndefinedParameterError(str(error))
async def fetch_all(
self,
query: str,
values: ValuesType = None,
) -> List[asyncpg.Record]:
"""Execute a query, returning all the result rows
The query can either use positional arguments i.e. `$1` with
values being a list or named arguments i.e. `:name` with
values being a dictionary.
"""
compiled_query, args = self._compile(query, values)
try:
async with self._lock:
return await self._connection.fetch(compiled_query, *args)
except asyncpg.exceptions.UndefinedParameterError as error:
raise UndefinedParameterError(str(error))
async def fetch_one(
self,
query: str,
values: ValuesType = None,
) -> asyncpg.Record:
"""Execute a query, returning only the first result rows
The query can either use positional arguments i.e. `$1` with
values being a list or named arguments i.e. `:name` with
values being a dictionary.
"""
compiled_query, args = self._compile(query, values)
try:
async with self._lock:
return await self._connection.fetchrow(compiled_query, *args)
except asyncpg.exceptions.UndefinedParameterError as error:
raise UndefinedParameterError(str(error))
async def fetch_val(
self,
query: str,
values: ValuesType = None,
) -> Any:
"""Execute a query, returning only a value
The query can either use positional arguments i.e. `$1` with
values being a list or named arguments i.e. `:name` with
values being a dictionary.
"""
compiled_query, args = self._compile(query, values)
try:
async with self._lock:
return await self._connection.fetchval(compiled_query, *args)
except asyncpg.exceptions.UndefinedParameterError as error:
raise UndefinedParameterError(str(error))
async def iterate(
self,
query: str,
values: ValuesType = None,
) -> AsyncGenerator[asyncpg.Record, None]:
"""Execute a query, and iterate over the result rows
The query can either use positional arguments i.e. `$1` with
values being a list or named arguments i.e. `:name` with
values being a dictionary.
"""
compiled_query, args = self._compile(query, values)
async with self._lock:
async with self._connection.transaction():
try:
async for record in self._connection.cursor(compiled_query, *args):
yield record
except asyncpg.exceptions.UndefinedParameterError as error:
raise UndefinedParameterError(str(error))
def transaction(self, *, force_rollback: bool = False) -> "Transaction":
"""Open a transaction
.. code-block:: python
async with quart_db.connection() as connection:
async with connection.transaction():
await connection.execute("SELECT 1")
Arguments:
force_rollback: Force the transaction to rollback on completion.
"""
return Transaction(self, force_rollback=force_rollback)
def _compile(self, query: str, values: ValuesType = None) -> Tuple[str, List[Any]]:
if isinstance(values, list):
return query, []
else:
try:
return render(query, **(values or {}))
except BuildError as error:
raise UndefinedParameterError(str(error))
|
"""
inclusive-or-expression:
exclusive-or-expression
inclusive-or-expression | exclusive-or-expression
"""
import glrp
from ....parser import cxx98
from be_typing import TYPE_CHECKING
@glrp.rule('inclusive-or-expression : exclusive-or-expression')
@glrp.rule('inclusive-or-expression : inclusive-or-expression "|" exclusive-or-expression')
@cxx98
def inclusive_or_expression(self, p):
# type: (CxxParser, glrp.Production) -> None
pass
if TYPE_CHECKING:
from ....parser import CxxParser
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from ._enums import *
__all__ = ['LoggerArgs', 'Logger']
@pulumi.input_type
class LoggerArgs:
def __init__(__self__, *,
logger_type: pulumi.Input[Union[str, 'LoggerType']],
resource_group_name: pulumi.Input[str],
service_name: pulumi.Input[str],
credentials: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
is_buffered: Optional[pulumi.Input[bool]] = None,
logger_id: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Logger resource.
:param pulumi.Input[Union[str, 'LoggerType']] logger_type: Logger type.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] service_name: The name of the API Management service.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] credentials: The name and SendRule connection string of the event hub for azureEventHub logger.
Instrumentation key for applicationInsights logger.
:param pulumi.Input[str] description: Logger description.
:param pulumi.Input[bool] is_buffered: Whether records are buffered in the logger before publishing. Default is assumed to be true.
:param pulumi.Input[str] logger_id: Logger identifier. Must be unique in the API Management service instance.
:param pulumi.Input[str] resource_id: Azure Resource Id of a log target (either Azure Event Hub resource or Azure Application Insights resource).
"""
pulumi.set(__self__, "logger_type", logger_type)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "service_name", service_name)
if credentials is not None:
pulumi.set(__self__, "credentials", credentials)
if description is not None:
pulumi.set(__self__, "description", description)
if is_buffered is not None:
pulumi.set(__self__, "is_buffered", is_buffered)
if logger_id is not None:
pulumi.set(__self__, "logger_id", logger_id)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter(name="loggerType")
def logger_type(self) -> pulumi.Input[Union[str, 'LoggerType']]:
"""
Logger type.
"""
return pulumi.get(self, "logger_type")
@logger_type.setter
def logger_type(self, value: pulumi.Input[Union[str, 'LoggerType']]):
pulumi.set(self, "logger_type", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> pulumi.Input[str]:
"""
The name of the API Management service.
"""
return pulumi.get(self, "service_name")
@service_name.setter
def service_name(self, value: pulumi.Input[str]):
pulumi.set(self, "service_name", value)
@property
@pulumi.getter
def credentials(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The name and SendRule connection string of the event hub for azureEventHub logger.
Instrumentation key for applicationInsights logger.
"""
return pulumi.get(self, "credentials")
@credentials.setter
def credentials(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "credentials", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Logger description.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="isBuffered")
def is_buffered(self) -> Optional[pulumi.Input[bool]]:
"""
Whether records are buffered in the logger before publishing. Default is assumed to be true.
"""
return pulumi.get(self, "is_buffered")
@is_buffered.setter
def is_buffered(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_buffered", value)
@property
@pulumi.getter(name="loggerId")
def logger_id(self) -> Optional[pulumi.Input[str]]:
"""
Logger identifier. Must be unique in the API Management service instance.
"""
return pulumi.get(self, "logger_id")
@logger_id.setter
def logger_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "logger_id", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
"""
Azure Resource Id of a log target (either Azure Event Hub resource or Azure Application Insights resource).
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
class Logger(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
credentials: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
is_buffered: Optional[pulumi.Input[bool]] = None,
logger_id: Optional[pulumi.Input[str]] = None,
logger_type: Optional[pulumi.Input[Union[str, 'LoggerType']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Logger details.
API Version: 2020-12-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] credentials: The name and SendRule connection string of the event hub for azureEventHub logger.
Instrumentation key for applicationInsights logger.
:param pulumi.Input[str] description: Logger description.
:param pulumi.Input[bool] is_buffered: Whether records are buffered in the logger before publishing. Default is assumed to be true.
:param pulumi.Input[str] logger_id: Logger identifier. Must be unique in the API Management service instance.
:param pulumi.Input[Union[str, 'LoggerType']] logger_type: Logger type.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] resource_id: Azure Resource Id of a log target (either Azure Event Hub resource or Azure Application Insights resource).
:param pulumi.Input[str] service_name: The name of the API Management service.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: LoggerArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Logger details.
API Version: 2020-12-01.
:param str resource_name: The name of the resource.
:param LoggerArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(LoggerArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
credentials: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
is_buffered: Optional[pulumi.Input[bool]] = None,
logger_id: Optional[pulumi.Input[str]] = None,
logger_type: Optional[pulumi.Input[Union[str, 'LoggerType']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = LoggerArgs.__new__(LoggerArgs)
__props__.__dict__["credentials"] = credentials
__props__.__dict__["description"] = description
__props__.__dict__["is_buffered"] = is_buffered
__props__.__dict__["logger_id"] = logger_id
if logger_type is None and not opts.urn:
raise TypeError("Missing required property 'logger_type'")
__props__.__dict__["logger_type"] = logger_type
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["resource_id"] = resource_id
if service_name is None and not opts.urn:
raise TypeError("Missing required property 'service_name'")
__props__.__dict__["service_name"] = service_name
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:apimanagement:Logger"), pulumi.Alias(type_="azure-native:apimanagement/v20160707:Logger"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20160707:Logger"), pulumi.Alias(type_="azure-native:apimanagement/v20161010:Logger"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20161010:Logger"), pulumi.Alias(type_="azure-native:apimanagement/v20170301:Logger"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20170301:Logger"), pulumi.Alias(type_="azure-native:apimanagement/v20180101:Logger"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180101:Logger"), pulumi.Alias(type_="azure-native:apimanagement/v20180601preview:Logger"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180601preview:Logger"), pulumi.Alias(type_="azure-native:apimanagement/v20190101:Logger"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20190101:Logger"), pulumi.Alias(type_="azure-native:apimanagement/v20191201:Logger"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201:Logger"), pulumi.Alias(type_="azure-native:apimanagement/v20191201preview:Logger"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201preview:Logger"), pulumi.Alias(type_="azure-native:apimanagement/v20200601preview:Logger"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20200601preview:Logger"), pulumi.Alias(type_="azure-native:apimanagement/v20201201:Logger"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20201201:Logger"), pulumi.Alias(type_="azure-native:apimanagement/v20210101preview:Logger"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20210101preview:Logger")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Logger, __self__).__init__(
'azure-native:apimanagement:Logger',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Logger':
"""
Get an existing Logger resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = LoggerArgs.__new__(LoggerArgs)
__props__.__dict__["credentials"] = None
__props__.__dict__["description"] = None
__props__.__dict__["is_buffered"] = None
__props__.__dict__["logger_type"] = None
__props__.__dict__["name"] = None
__props__.__dict__["resource_id"] = None
__props__.__dict__["type"] = None
return Logger(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def credentials(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
The name and SendRule connection string of the event hub for azureEventHub logger.
Instrumentation key for applicationInsights logger.
"""
return pulumi.get(self, "credentials")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Logger description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="isBuffered")
def is_buffered(self) -> pulumi.Output[Optional[bool]]:
"""
Whether records are buffered in the logger before publishing. Default is assumed to be true.
"""
return pulumi.get(self, "is_buffered")
@property
@pulumi.getter(name="loggerType")
def logger_type(self) -> pulumi.Output[str]:
"""
Logger type.
"""
return pulumi.get(self, "logger_type")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> pulumi.Output[Optional[str]]:
"""
Azure Resource Id of a log target (either Azure Event Hub resource or Azure Application Insights resource).
"""
return pulumi.get(self, "resource_id")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
|
#
# Copyright (c) 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from requests.auth import AuthBase
import urlparse
import requests
import json
import logging
class Authorization(AuthBase):
def __init__(self, token):
self.token = token
def __call__(self, request):
request.headers['Authorization'] = self.token
return request
def get_org_guid(name, token, cf_url):
logging.info("looking for guid of organisation: {}".format(name))
filter_q = "name IN {}".format(name)
params = {"q": filter_q}
url = urlparse.urljoin(cf_url, '/v2/organizations')
r = requests.get(url, auth=Authorization(token), params=params)
data = json.loads(r.content)
for row in data['resources']:
logging.debug("found row with name {} and guid {}".format(
row['entity']['name'],
row['metadata']['guid']))
if row['entity']['name'] == name:
return row['metadata']['guid']
return None
def setup_logging(debug=False):
log_format = '%(levelname)s: %(message)s'
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(format=log_format, level=level)
# silencing requests logger
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
|
# Copyright (c) 2015, Imperial College London
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of their
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# -----------------------------------------------------------------------------
#
# This file is part of the libhpc-deployer Python library, developed as part
# of the libhpc projects (http://www.imperial.ac.uk/lesc/projects/libhpc).
#
# We gratefully acknowledge the Engineering and Physical Sciences Research
# Council (EPSRC) for their support of the projects:
# - libhpc: Intelligent Component-based Development of HPC Applications
# (EP/I030239/1).
# - libhpc Stage II: A Long-term Solution for the Usability, Maintainability
# and Sustainability of HPC Software (EP/K038788/1).
#
# -----------------------------------------------------------------------------
import importlib
import logging
LOG = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M')
logging.getLogger(__name__).setLevel(logging.DEBUG)
__all__ = [
'PLATFORM_CONFIGS',
'SOFTWARE_CONFIGS',
'get_platform_config_class',
'get_software_config',
]
PLATFORM_CONFIGS = {
'OPENSTACK': ('deployer.config.platform.openstack','OpenStackPlatformConfig'),
'OPENSTACK_EC2': ('deployer.config.platform.ec2','EC2PlatformConfig'),
'EC2': ('deployer.config.platform.ec2','EC2PlatformConfig'),
'PBS_PRO': ('deployer.config.platform.pbs','PBSProPlatformConfig'),
'SSH_FORK': ('deployer.config.platform.ssh','SSHPlatformConfig'),
}
SOFTWARE_CONFIGS = {
'LINUX_APT': ('deployer.config.software.linux','LinuxAPTConfig'),
}
def get_platform_config_class(platform):
try:
platform = platform.upper()
config_package, config_class = PLATFORM_CONFIGS[platform]
except KeyError:
LOG.error('No configuration can be found for a platform with '
'name: <%s>' % platform)
return None
try:
mod = importlib.import_module(config_package)
cls = getattr(mod, config_class)
except ImportError:
LOG.error('Error loading the module <%s> specified for the '
'configuration class <%s>.' % (config_package, config_class))
return None
except AttributeError:
LOG.error('Error loading the class <%s> within the configuration '
'package <%s>.' % (config_class, config_package))
return None
return cls
# Takes a string defining the operating system and package manager
# to obtain a software configuration class from.
def get_software_config_class(os_pm):
try:
os_pm_id = os_pm.upper()
config_package, config_class = SOFTWARE_CONFIGS[os_pm_id]
except KeyError:
LOG.error('No configuration can be found for a s with '
'name: <%s>' % os_pm)
return None
try:
mod = importlib.import_module(config_package)
cls = getattr(mod, config_class)
except ImportError:
LOG.error('Error loading the module <%s> specified for the '
'configuration class <%s>.' % (config_package, config_class))
return None
except AttributeError:
LOG.error('Error loading the class <%s> within the configuration '
'package <%s>.' % (config_class, config_package))
return None
return cls
|
from base import BaseTest
import os
import shutil
import subprocess
class Test(BaseTest):
def test_base(self):
"""
Basic test with exiting Mockbeat normally
"""
self.render_config_template(
)
proc = self.start_beat()
self.wait_until(lambda: self.log_contains("Setup Beat"))
proc.check_kill_and_wait()
def test_no_config(self):
"""
Tests starting without a config
"""
exit_code = self.run_beat()
assert exit_code == 1
assert self.log_contains("error loading config file") is True
assert self.log_contains("no such file or directory") is True
def test_invalid_config(self):
"""
Checks stop on invalid config
"""
shutil.copy("../files/invalid.yml",
os.path.join(self.working_dir, "invalid.yml"))
exit_code = self.run_beat(config="invalid.yml")
assert exit_code == 1
assert self.log_contains("error loading config file") is True
def test_invalid_config_cli_param(self):
"""
Checks CLI overwrite actually overwrites some config variable by
writing an invalid value.
"""
self.render_config_template(
console={"pretty": "false"}
)
# first run with default config, validating config being
# actually correct.
proc = self.start_beat()
self.wait_until(lambda: self.log_contains("Setup Beat"))
proc.check_kill_and_wait()
# start beat with invalid config setting on command line
exit_code = self.run_beat(
extra_args=["-E", "output.console=invalid"])
assert exit_code == 1
assert self.log_contains("error unpacking config data") is True
def test_config_test(self):
"""
Checks if -configtest works as expected
"""
shutil.copy("../../_meta/config.yml",
os.path.join(self.working_dir, "libbeat.yml"))
with open(self.working_dir + "/mockbeat.template.json", "w") as f:
f.write('{"template": true}')
with open(self.working_dir + "/mockbeat.template-es2x.json", "w") as f:
f.write('{"template": true}')
exit_code = self.run_beat(
config="libbeat.yml",
extra_args=["-configtest",
"-path.config", self.working_dir])
assert exit_code == 0
assert self.log_contains("Config OK") is True
def test_version_simple(self):
"""
Tests -version prints a version and exits.
"""
self.start_beat(extra_args=["-version"]).check_wait()
assert self.log_contains("beat version") is True
def test_version(self):
"""
Checks if version param works
"""
args = ["../../libbeat.test"]
args.extend(["-version",
"-e",
"-systemTest",
"-v",
"-d", "*",
"-test.coverprofile",
os.path.join(self.working_dir, "coverage.cov")
])
assert self.log_contains("error loading config file") is False
with open(os.path.join(self.working_dir, "mockbeat.log"), "wb") \
as outputfile:
proc = subprocess.Popen(args,
stdout=outputfile,
stderr=subprocess.STDOUT)
exit_code = proc.wait()
assert exit_code == 0
assert self.log_contains("mockbeat") is True
assert self.log_contains("version") is True
assert self.log_contains("9.9.9") is True
def test_console_output_timed_flush(self):
"""
outputs/console - timed flush
"""
self.render_config_template(
console={"pretty": "false"}
)
proc = self.start_beat(logging_args=["-e"])
self.wait_until(lambda: self.log_contains("Mockbeat is alive"),
max_timeout=2)
proc.check_kill_and_wait()
def test_console_output_size_flush(self):
"""
outputs/console - size based flush
"""
self.render_config_template(
console={
"pretty": "false",
"bulk_max_size": 1,
"flush_interval": "1h"
}
)
proc = self.start_beat(logging_args=["-e"])
self.wait_until(lambda: self.log_contains("Mockbeat is alive"),
max_timeout=2)
proc.check_kill_and_wait()
def test_logging_metrics(self):
self.render_config_template(
metrics_period="0.1s"
)
proc = self.start_beat(logging_args=["-e"])
self.wait_until(
lambda: self.log_contains("No non-zero metrics in the last 100ms"),
max_timeout=2)
proc.check_kill_and_wait()
|
import pynbody
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import readcol
import astropy.units as u
import pylab
import itertools as it
from itertools import tee
import warnings
import decimal
import statistics
import numpy.core.defchararray as npd
resultdataset = npd.equal(dataset1, dataset2)
# Loading files
#files = readcol.readcol('/media/jillian/cptmarvel/cptmarvel.cosmo25cmb.4096g5HbwK1BH.orbit')
#files = readcol.readcol('/media/jillian/storm/storm.cosmo25cmb.4096g5HbwK1BH.orbit')
#files = readcol.readcol('/data/rogue/rogue.cosmo25cmb.4096g5HbwK1BH.orbit')
#files = readcol.readcol('/media/jillian/h148/h148.cosmo50PLK.3072g3HbwK1BH.orbit')
files = readcol.readcol('/media/jillian/h229/h229.cosmo50PLK.3072gst5HbwK1BH.orbit')
BHID=60353246
#h148=101863739,101864796
#243778457
#rogue307622464
#storm: 243778457,243771992
#cpt marvel:89425759
i =np.where(files[:,0]== BHID)
#print (files[:,0][i])
# The following numbers are from the simulation
m_sol= 1.5928853e16 # justice league simulations
l_kpc = 50000
#m_sol = 2.31e15 # marvelous simulations
#l_kpc = 25000 #marvelous simulations
timee = 38.78 # Time conversion: simulation units time to Gyr
d_timee = 1.22386438e18 # Time conversion: simulation units to seconds
#t_square = d_time ^2
t_square = 1.49784401e36
m_g = 1.989e33 # Sun mass in gram
l_cm = 3.086e21 # kpc to cm
# delta energy
Denergy=( files[:,13][i]* m_sol*( l_kpc**2) *m_g *(l_cm**2))/t_square #units here are ergs
#delta time
Dtime = files[:,14][i]*d_timee
dEdt = Denergy/Dtime
Time=((files[:,1][i]))*timee
#print(Time)
#print(timee)
# Functions:
''' Create 2 parallel iterators (a,b) pointing to the first element of the original iterable.
The second iterator, b is moved 1 step forward (the next(b, None)) call). a points to c0 and b points to c1.
Both a and b can traverse the original iterator independently - the izip function takes the two iterators and makes pairs
of the returned elements, advancing both iterators at the same pace.'''
def pair(iterable):
"c -> (c0,c1), (c1,c2), (c2, c3), ..." # This function creates ordered pairs
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def float_range(start, stop, step):
while start < stop: # Float Range function
yield float(start)
start += decimal.Decimal(step)
#this is time interval from 0 to 13.8 Gyr with an interval of 0.01 which equals to 10 million years
intervals = pair(float_range(0,13.8,0.01))
# tmin,tmax for each time interval
centers = [(tmin+tmax)/2. for tmin, tmax in intervals]
def combining(Time,dEdt,intervals):
# Calculate median valuea given intervals
warnings.simplefilter("ignore")
out = []
for tmin, tmax in intervals:
filter = (Time >= tmin) & (Time < tmax)
out.append(np.mean(dEdt[filter]))
return np.array(out)
b = len(intervals)
#print(centers)
#print(combining(Time, dEdt, intervals))
combined= combining(Time, dEdt, intervals)
filez = readcol.readcol('h229.dat',fsep=',')
ID=60353246
j= np.where(filez[:,1]==ID )
Time= filez[:,2][j]
BHmass = filez [:,4][j]
#BHmass=np.log10(mass.to_value(u.Msun))
BHDistance= filez[:,5][j]
fig, ax = plt.subplots(3,1,figsize=(10,20))
#fig, (axs1,axs2) = plt.subplots(2, 1)
ax[0].plot(Time, BHDistance, "k", linewidth=2)
#ax[0].set_xlabel("Time 'Gyr'")
ax[0].set_ylabel("BH Distance 'Kpc'")
ax[1].plot(Time, BHmass,"b", linewidth=2)
#ax[0].set_yscale('log')
#plt.yscale(u'log')
#axs[1].semilogy()
ax[1].ticklabel_format(style='sci', axis='y', scilimits=(0,0))
#ax[1].set_xlabel("Time 'Gyr'")
ax[1].set_ylabel("BH Mass ")
ax[2].plot(centers,combined ,'ro', markersize=6)
#plt.scatter(Time, dEdt)
ax[2].set_title(" $\Delta$E/$\Delta$t vs Time")
#plt.legend(loc = 'upper right')
ax[2].set_xlabel("Time(Gyrs)")
ax[2].set_ylabel("$\Delta$E/$\Delta$t(Erg/s)")
plt.yscale('log')
plt.ylim(10e35,10e38)
plt.subplots_adjust(left=None,bottom=None,right=None,top=0.8,wspace=None,hspace=None)
plt.savefig(filename='ruth.png')
plt.show()
|
# -*- coding=utf-8 -*-
# Copyright © Mhank BarBar (Muhamad Royyani)
# Recode ? Silahkan
# Tolong Kembangkan Lagi Tools Ini, Meskipun Unfaedah:v
# Maaf Kalo Ada Yang Error Wkwk
# Tools Ini Di Cari² Para Recoder
# Banyak Yang Memperjual Belikan Tools Seperti Ini
# Tapi Disini Free Dan Juga Open Source :)
# ------------* Maaf Kalo Codingannya Berantakan *--------------
from sys import stdout
import subprocess as sp
import os, sys, time, random, base64, marshal, getpass, re, zlib
m = '\x1b[1;91m'
u = '\x1b[1;95m'
h = '\x1b[1;92m'
p = '\x1b[1;37m'
k = '\x1b[1;33m'
b = '\x1b[1;34m'
bm = '\x1b[96m'
about = '''
{}Creator : {}Mhank BarBar
{}About : {}Encrypt And Decrypt Tool
{}Version : {}1.0 (beta)
{}Special thanks to : {}Allah SWT and YOU
{}Code name : {}Tytyd:v
{}Team : {}UNDERGROUND SCIENCE
{}E-mail : {}royani7896@gmail.com
{}Github : {}github.com/MhankBarBar
{}Telegram : {}t.me/MhankBarBar
{}Facebook : {}Upss (Fb gua masih kena cp:v)
{}Date : {}16.49 02-02-2020
{}Region : {}Tangerang,Banten, Indonesia'''.format(p,k,p,k,p,k,p,k,p,k,p,k,p,k,p,k,p,k,p,k,p,k,p,k,p,k)
def chat():
load('Silahkan Tunggu Sebentar---')
os.system('xdg-open https://api.whatsapp.com/send?phone=6285693587969&text=Assalamualaikum+Roy+Lu+Masih+Jual+Janda?')
kunci = '''
{} ▄▀▀▀▄{} *Hello Dude!!
{} █ █{} *Where Are You?
███████ ▄▀▀▄
██─▀─██ █▀█▀▀▀▀█ █
███▄███ ▀ ▀ ▀▀'''.format(m,p,m,p)
def lisensi_enc():
clr()
print kunci
print('{}[{}!{}] {}Upss!! Sorry Dude This Tools Requiress Password'.format(m,p,m,p))
paswd = getpass.getpass('{}[{}×{}]{} Input Password {}>> {}'.format(m,p,m,p,k,p))
if paswd == ('IhhAkuMah'):
jalan('Password Benar ✓ ',0.1)
time.sleep(2)
menu_enc()
elif paswd == (''):
run('Masukkan Password Nya !!')
time.sleep(1.5)
lisensi_enc()
else:
jalan('Password Salah!!',0.1)
chat()
def lisensi_dec():
clr()
print kunci
print('{}[{}!{}] {}Upss!! Sorry Dude This Tools Requiress Password'.format(m,p,m,p))
paswd = getpass.getpass('{}[{}×{}]{} Input Password {}>> {}'.format(m,p,m,p,k,p))
if paswd == ('IhhAkuMah'):
run('Password Benar')
time.sleep(2)
menu_dec()
elif paswd == (''):
run('Masukkan Password Nya !!')
time.sleep(1.5)
lisensi_dec()
else:
run('Password Salah!!')
chat()
try:
from uncompyle6.main import decompile
except Exception as e:
sp.call('pip2 install uncompyle6', shell=True, stderr=sp.STDOUT)
'''
(((((((((((((((((((((LIST COLORS)))))))))))))))))))))
'''
red = '\x1b[31m' # Merah
green = '\x1b[32m' # Hijau
yellow = '\x1b[33m' # Kuning
blue = '\x1b[34m' # Biru
magenta = '\x1b[35m' # Ungu
cyan = '\x1b[36m' # Biru Muda
white = '\x1b[37m' # Putih
reset = '\x1b[39m' # Reset Warna ( Kembali Ke Warna Awal )
brblack = '\x1b[90m' # Hitam Terang
R = '\x1b[91m' # Merah Terang
brgreen = '\x1b[92m' # Hijau Terang
k = '\x1b[93m' # Kuning Terang
brblue = '\x1b[94m' # Biru Terang
brmgnt = '\x1b[95m' # Ungu Terang
brcyan = '\x1b[96m' # Biru Muda Terang
G = '\x1b[97m' # Putih Terang
'''
# Jika Ingin Membuat Tulisan Miring Tambahkan "3;"
# Example : print('\x1b[3;31mAso')
# Outputnya Seperti Ini = Aso ( Tapi Miring:v )
# Coba Aja Aso Kalo Gapercaya
'''
#((((((((((((((((((((((ANIMATION :V))))))))))))))))))))))
def jalan(z, t):
for e in z:
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(t)
def load(word):
lix = [
'/', '-', '╲', '|']
for i in range(5):
for x in range(len(lix)):
sys.stdout.write(('\r{}{}').format(str(word), lix[x]))
time.sleep(0.2)
sys.stdout.flush()
def banner_dec():
banner = '''
{}:::::::-. .,:::::: .,-::::: ... . : ::::::::::. ::: ::: .,::::::
;;, ``;,;;;;```` ,;;;`````` .;;;;;;;. ;;,. ;;; `;;;```.;;;;;; ;;; ;;;;````
`[[ [[ [[cccc [[[ ,[[ \[[,[[[[, ,[[[[, `]]nnn]]' [[[ [[[ [[cccc
{} $$, $$ $$"""" $$$ $$$, $$$$$$$$$$$"$$$ $$$"" $$$ $$' $$""""
888_,o8P' 888oo,__ `88bo,__,o, "888,_ _,88P888 Y88" 888o 888o 888o88oo,.__ 888oo,__
MMMMP"` """"YUMMM "YUMMMMMP" "YMMMMMP" MMM M' "MMM YMMMb MMM""""YUMMM """"YUMMM'''.format(m,p)
running(banner)
def banner_enc():
banner = '''
{} █▀▀ █▄─█ ▄▀ █▀▀▄ ▀▄─▄▀ █▀▄ ▀█▀
█▀▀ █─▀█ █─ █▐█▀ ──█── █─█ ─█─
▀▀▀ ▀──▀ ─▀ ▀─▀▀ ──▀── █▀─ ─▀─'''.format(m)
running(banner)
def running(s):
try:
for c in s + '\n':
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(0.001)
except (KeyboardInterrupt,EOFError):
run('Nonaktif!!!')
def run(x):
pt = '\x1b[1;37m'
rd = '\x1b[1;37m\x1b[1;31m'
rg = '\x1b[6;32m'
try:
num = 0
while num < 1:
for i, char in enumerate(x):
if i == 0:
print '\r%s%s%s%s' % (rg, char.lower(), rd, x[1:]),
sys.stdout.flush()
else:
if i == 1:
roy = x[0].lower()
print '\r%s%s%s%s%s%s' % (rd, roy, pt, char.lower(), rg, x[2:]),
sys.stdout.flush()
elif i == i:
roy = x[0:i].lower()
print '\r%s%s%s%s%s%s' % (rd, roy, pt, char.lower(), rg, x[i + 1:]),
sys.stdout.flush()
time.sleep(0.07)
num += 1
except:
exit()
def clr():
os.system('clear')
def logo():
banner_enc()
def b_menu():
bm = '''{}╔═════════════════════════════════════════════╗
║{}[{}+{}]{}Coded : .me ║
║{}[{}+{}]{}Github : ║
║{}[{}+{}]{}Tools : Encrypt And Decrypt ║
║{}[{}+{}]{}Create : 2021- 08 - 01 ║
╚═════════════════════════════════════════════╝'''.format(p,m,p,m,p,m,p,m,p,m,p,m,p,m,p,m,p)
jalan(bm,0.001)
def menu():
clr()
b_menu()
print('\n{}Happy Encompile And Decompile Dude :){}'.format(brblue,reset))
running('\n{}[{}1{}]{}.Encrypt\n{}[{}2{}]{}.Decrypt\n{}[{}3{}]{}.Report Bug\n{}[{}4{}]{}.About\n{}[{}5{}]{}.Update Tools'.format(m,p,m,p,m,p,m,p,m,p,m,p,m,p,m,p,m,p,m,p))
fuck = raw_input('{}[{}?{}]{} Choose {}>> {}'.format(m,p,m,p,k,p))
if fuck == '1' or fuck == '01':
load('Silahkan Tunggu Sebentar >--')
menu_enc()
elif fuck == '2' or fuck == '02':
load('Silahkan Tunggu Sebentar >--')
menu_dec()
elif fuck == '':
run('Jangan Kosong Gblk!!')
menu()
elif fuck == '3' or fuck == '03':
jalan(p + 31 * '\xe2\x95\x90' + h + '[' + bm + 'Haii' + h + ']' + p + '>', 0.008)
print '\n' + h + '[' + m + '!' + h + ']' + p + ' Chat Via WhatsApp '
chat = raw_input(h + '[' + k + '?' + h + ']' + p + ' Enter your message : ')
chat.replace(' ', '%20')
load(h + '[' + k + '~' + h + ']' + p + 'Loading please wait ...')
try:
sp.check_output([
'am', 'start',
'https://api.whatsapp.com/send?phone=6285693587969&text=Report : ' + chat + ''])
except:
sys.exit('\n' + h + '[' + k + '!' + h + ']' + p + ' Failed to send message ')
elif fuck == '4' or fuck == '04':
load('Silahkan Tunggu Sebentar >--')
clr()
running(about)
time.sleep(5)
menu()
elif fuck == '5' or fuck == '05':
load('Silahkan Tunggu Sebentat >--')
clr()
os.system('git pull')
else:
run('Pilihannya Cuma 1,2,3,4 & 5 Doang Kontol!!')
menu()
def menu_enc():
clr()
banner_enc()
running('-'*15)
running('{}[{}01{}]{} Encrypt Base16'.format(m,p,m,k))
running('{}[{}02{}]{} Encrypt Base32'.format(m,p,m,k))
running('{}[{}03{}]{} Encrypt Base64'.format(m,p,m,k))
running('{}[{}04{}]{} Encrypt Hex'.format(m,p,m,k))
running('{}[{}05{}]{} Encrypt Marshal'.format(m,p,m,k))
running('{}[{}06{}]{} Compile py > pyc'.format(m,p,m,k))
running('{}[{}07{}]{} Encrypt Marshal Zlib Base64'.format(m,p,m,k))
running('{}[{}08{}]{} Encrypt Zlib '.format(m,p,m,k))
running('{}[{}00{}]{} Exit'.format(m,p,m,k))
running('-'*15)
try:
inp = raw_input('{}[{}??{}]{} Choose {}>>{} '.format(m,p,m,k,h,p))
except (KeyboardInterrupt,EOFError):
run ('Nonaktif!!')
menu()
if inp == '1' or inp == '01':
clr()
Satu()
elif inp == '2' or inp == '02':
clr()
Dua()
elif inp == '3' or inp == '03':
clr()
Tiga()
elif inp == '4' or inp == '04':
clr()
Empat()
elif inp == '5' or inp == '05':
clr()
Lima()
elif inp == '6' or inp == '06':
clr()
pyc()
elif inp == '7' or inp == '07':
clr()
emzb()
elif inp == '8' or inp == '08':
clr()
ezl()
elif inp == '':
run ('Pilih Nomornya Woe!!!')
time.sleep(2)
menu_enc()
elif inp == '0' or inp == '00':
exit()
else:
run ('Salah Memasukkan Pilihan!!')
time.sleep(2)
menu_enc()
def menu_dec():
clr()
banner_dec()
running('-'*15)
running('{}[{}01{}]{} Decrypt base16'.format(m,p,m,k))
running('{}[{}02{}]{} Decrypt base32'.format(m,p,m,k))
running('{}[{}03{}]{} Decrypt base64'.format(m,p,m,k))
running('{}[{}04{}]{} Decrypt Hex'.format(m,p,m,k))
running('{}[{}05{}]{} Decrypt Marshal'.format(m,p,m,k))
running('{}[{}06{}]{} Uncompyle6 pyc > py'.format(m,p,m,k))
running('{}[{}07{}]{} Decrypt Marshal,Zlib,Base64'.format(m,p,m,k))
running('{}[{}08{}]{} Decrypt Zlib'.format(m,p,m,k))
running('{}[{}00{}]{} Exit'.format(m,p,m,k))
running('-'*15)
try:
inp = raw_input('{}[{}??{}]{} Choose {}>>{} '.format(m,p,m,k,h,p))
except (KeyboardInterrupt,EOFError):
run ('Nonaktif!!')
menu()
if inp == '1' or inp == '01':
clr()
Enam()
elif inp == '2' or inp == '02':
clr()
Tujuh()
elif inp == '3' or inp == '03':
clr()
Delapan()
elif inp == '4' or inp == '04':
clr()
Sembilan()
elif inp == '5' or inp == '05':
clr()
unmarsh()
elif inp == '6' or inp == '06':
clr()
unpyc()
elif inp == '7' or inp == '07':
clr()
mzb()
elif inp == '8' or inp == '08':
clr()
zl()
elif inp == '':
run ('Pilih Nomornya Woe!!!')
time.sleep(2)
menu_dec()
elif inp == '0' or inp == '00':
exit()
else:
run ('Salah Memasukkan Pilihan!!')
time.sleep(2)
menu_dec()
def Satu():
clr()
logo()
try:
f = raw_input('Filenames: ')
except:
exit()
try:
bk = open(f, 'r').read()
except:
run('file %s tidak ditemukan ' % f)
time.sleep(1.5)
Satu()
en = base64.b16encode(bk)
ff = f + 'c'
open(ff, 'w').write('import base64\nexec(base64.b16decode("%s"))' % en)
nm = ('').join(f.split('.')[:1]) + '-enc.py'
os.rename(ff, nm)
run('file berhasil di encrypt menjadi %s ' % nm)
def Dua():
clr()
logo()
try:
f = raw_input('Filenames: ')
except:
exit()
try:
bk = open(f, 'r').read()
except:
run('file %s tidak ditemukan ' % f)
exit()
en = base64.b32encode(bk)
ff = f + 'c'
open(ff, 'w').write('import base64\nexec(base64.b32decode("' + en + '"))')
nm = ('').join(f.split('.')[:1]) + '-enc.py'
os.rename(ff, nm)
run('file berhasil di encrypt menjadi %s ' % nm)
def Tiga():
clr()
logo()
try:
f = raw_input('Filenames: ')
except:
exit()
try:
bk = open(f, 'r').read()
except:
run('file %s tidak ditemukan ' % f)
exit()
en = base64.b64encode(bk)
ff = f + 'c'
open(ff, 'w').write('import base64\nexec(base64.b64decode("' + en + '"))')
nm = ('').join(f.split('.')[:1]) + '-enc.py'
os.rename(ff, nm)
run('file berhasil di encrypt menjadi %s ' % nm)
def Empat():
clr()
logo()
try:
f = raw_input('Filenames: ')
except:
exit()
try:
bk = open(f, 'r').read()
except:
run('file %s tidak ditemukan ' % f)
exit()
en = bk.encode('hex')
ff = f + 'c'
open(ff, 'w').write('exec("' + en + '").decode("hex")')
nm = ('').join(f.split('.')[:1]) + '-enc.py'
os.rename(ff, nm)
run('file berhasil di encrypt menjadi %s ' % nm)
def Lima():
clr()
logo()
try:
f = raw_input('Filenames: ')
except:
exit()
try:
bk = open(f, 'r').read()
except:
run('file %s tidak ditemukan ' % f)
exit()
c = compile(bk, '<roy>', 'exec')
en = marshal.dumps(c)
ff = f + 'c'
open(ff, 'w').write('import marshal\nexec(marshal.loads(' + repr(en) + '))')
nm = ('').join(f.split('.')[:1]) + '-enc.py'
os.rename(ff, nm)
run('file berhasil di encrypt menjadi %s ' % nm)
def emzb():
clr()
logo()
try:
file = raw_input('File: ')
fileopen = open(file).read()
no = compile(fileopen,'aso','exec')
b = marshal.dumps(no)
c = zlib.compress(b)
d = base64.b64encode(c)
e = ('import marshal,zlib,base64\nexec(marshal.loads(zlib.decompress(base64.b64decode("' + d + '"))))')
f = file.replace('.py', '-enc.py')
g = open(f, 'w')
g.write(e)
g.close()
run('file berhasil di encrypt menjadi %s ' % f)
raw_input('Tekan Enter Untuk Kembali Ke Menu')
menu()
except IOError:
run('file tidak ditemukan ')
raw_input('Tekan Enter Untuk Kembali Ke Menu')
emzb()
def ezl():
print "Encrypt Zlib"
file = raw_input('File : ')
out = file.replace('.py', '-enc.py')
oa = open(file).read()
xs = zlib.compress(oa)
s = open(out, 'w')
s.write('import zlib\nexec(zlib.decompress(' +repr(xs)+ '))')
s.close()
print ('File saved as '+ out)
def Enam():
clr()
banner_dec()
try:
print 'Dec base64.b16decocde'
f = raw_input('Filenames: ')
except:
exit()
try:
bk = open(f, 'r').read()
except:
run('file %s tidak ditemukan ' % f)
exit()
bk = bk.replace('exec(base64.b16decode("', '')
bk = bk.replace('"))', '')
bk = bk.replace('import base64\n', '')
en = base64.b16decode(bk)
ff = f + 'c'
open(ff, 'w').write(en)
nm = ('').join(f.split('.')[:1]) + '-dec.py'
os.rename(ff, nm)
run('file berhasil di decrypt menjadi %s ' % nm)
def Tujuh():
clr()
banner_dec()()
try:
print 'Dec base64.b32decode'
f = raw_input('Filenames: ')
except:
exit()
try:
bk = open(f, 'r').read()
except:
run('file %s tidak ditemukan ' % f)
exit()
bk = bk.replace('exec(base64.b32decode("', '')
bk = bk.replace('"))', '')
bk = bk.replace('import base64\n', '')
en = base64.b32decode(bk)
ff = f + 'c'
open(ff, 'w').write(en)
nm = ('').join(f.split('.')[:1]) + '-dec.py'
os.rename(ff, nm)
run('file berhasil di decrypt menjadi %s ' % nm)
def Delapan():
clr()
banner_dec()
try:
print 'Dec base64.b64decode'
f = raw_input('Filenames: ')
except:
exit()
try:
bk = open(f, 'r').read()
except:
run('file %s tidak ditemukan ' % f)
exit()
bk = bk.replace(+'exec(base64.b64decode("', '')
bk = bk.replace('"))', '')
bk = bk.replace('import base64\n', '')
en = base64.b64decode(bk)
ff = f + 'c'
open(ff, 'w').write(en)
nm = ('').join(f.split('.')[:1]) + '-dec.py'
os.rename(ff, nm)
run('file berhasil di decrypt menjadi %s ' % nm)
def Sembilan():
clr()
banner_dec()
try:
print 'Dec hex'
f = raw_input('Filenames: ')
except:
exit()
try:
bk = open(f, 'r').read()
except:
run('file %s tidak ditemukan ' % f)
exit()
bk = bk.replace('exec("', '') or bk.replace("exec('", '')
bk = bk.replace('").decode("hex")', '') or bk.replace("').decode('hex')", '')
en = str(bk).decode('hex')
ff = f + 'c'
open(ff, 'w').write(en)
nm = ('').join(f.split('.')[:1]) + '-dec.py'
os.rename(ff, nm)
run('file berhasil di decrypt menjadi %s ' % nm)
def unmarsh():
jalan(p + 31 * '\xe2\x95\x90' + h + '[' + bm + 'UNMARSH' + h + ']' + p + '>', 0.008)
print h + '\nMenu ' + p + ':\n [' + h + '1' + p + ']. Automatic Detection Version Script\n [' + h + '2' + p + ']. Back To Menu'
try:
pil = raw_input(h + '[' + k + '?' + h + ']' + p + ' Choice--> ')
except IOError:
unmarsh()
else:
if pil == '1':
pass
elif pil == '2':
menu()
else:
print h + '[' + m + '!' + h + ']' + p + ' Choose the right one'
unmarsh()
cek = 1
try:
print h + '[' + k + '#' + h + ']' + p + ' For Example : /path/marsh.py'
file = raw_input(h + '[' + k + '?' + h + ']' + p + ' Input File : ')
f = open(file, 'r').readlines()
for i in range(len(f)):
if f[i][0:4] == 'exec':
if f[i][19] == 'b':
cek = 3
elif f[i][20] == 'c':
cek = 2
else:
cek = 1
except IndexError:
print h + '[' + m + '!' + h + ']' + p + ' Program Error!!!'
sys.exit()
except KeyboardInterrupt:
print h + '[' + k + '^' + h + ']' + p + ' ctrl+c \n'
print h + '[' + k + '#' + h + ']' + p + ' Exit!!!\n'
time.sleep(3)
sys.exit()
except EOFError:
print h + '[' + k + '^' + h + ']' + p + ' ctrl+d \n'
print h + '[' + k + '#' + h + ']' + p + ' Exit!!!\n'
time.sleep(3)
sys.exit()
else:
try:
string = open(file, 'r').read()
except IOError:
print '\n' + h + '[' + m + '!' + h + ']' + p + ' File Not Found'
raw_input(h + '[' + k + '^' + h + ']' + p + ' Press Enter to Return to the menu ')
os.system('clear')
menu()
if cek == 2:
py = 'python2'
dec = 'decompile(2.7, x, stdout)'
sys.stdout.write(h + '[' + k + '#' + h + ']')
jalan(p + ' check the script version', 0.1)
time.sleep(1.5)
print '\n' + h + '[' + m + '*' + h + ']' + p + ' python version 2 was detected'
time.sleep(1)
try:
x = re.search('((?<![\\\\])[\\\'"])((?:.(?!(?<![\\\\])\\1))*.?)\\1', string).group()
except Exception as e:
raise e
elif cek == 3:
py = 'python3'
dec = 'decompile(3.8, x, stdout)'
sys.stdout.write(h + '[' + k + '#' + h + ']')
jalan(p + ' check the script version', 0.1)
time.sleep(1.5)
print '\n' + h + '[' + m + '*' + h + ']' + p + ' python version 3 was detected'
time.sleep(1)
try:
x = 'b' + re.search('((?<![\\\\])[\\\'"])((?:.(?!(?<![\\\\])\\1))*.?)\\1', string).group()
except Exception as e:
raise e
else:
print h + '[' + m + '!' + h + ']' + p + ' File Not Suport'
raw_input(h + '[' + k + '^' + h + ']' + p + ' Press Enter to Return to the menu ')
menu()
fileout = open('un.py', 'w')
fileout.write('from sys import stdout\nfrom uncompyle6.main import decompile\nimport marshal\n\n')
fileout.write('x = marshal.loads(' + x + ')\n')
fileout.write(dec)
fileout.close()
load(h + '[' + k + '#' + h + ']' + p + ' Unmarshal process Wait a minute ...')
sp.call(py + ' un.py > unpyc/dec.py', shell=True, stderr=sp.STDOUT)
os.system('rm un.py')
os.system('clear')
time.sleep(1)
delay = open('unpyc/dec.py', 'r').readlines()
for x in range(len(delay)):
jalan(delay[x], 0.0001)
print '\n\n' + h + '[' + k + '#' + h + ']' + p + ' Successfully Decompiled'
print h + '[' + k + '#' + h + ']' + p + ' file saved : unpyc/dec.py'
ask = raw_input(h + '[' + k + '?' + h + ']' + p + ' Decompile Again? y/t ')
if ask == 'y' or ask == 'Y':
menu()
elif ask == 't' or ask == 'T':
sys.exit()
else:
print h + '[' + m + '!' + h + ']' + p + ' Choose the right one ' + m + '!!!'
raw_input(h + '[' + k + '^' + h + ']' + p + ' Press Enter to Return to the menu ')
os.system('clear')
def pyc():
print m + '[' + p + '#' + m + ']' + p + ' For Example : /path/marsh.py'
f = raw_input(m + '[' + p + '?' + m + ']' + p + ' Enter Your File : ')
from py_compile import compile
compile(f)
load(m + '[' + p + '#' + m + ']' + p + ' Compile process Wait a minute ...')
jalan('\n' + m + '[' + p + '#' + m + ']' + p + ' file successfully compiled', 0.01)
print '\n' + m + '[' + p + '#' + m + ']' + p + (' File Saved: {}c').format(f)
ask = raw_input(m + '[' + p + '?' + m + ']' + p + ' Compile Again? y/t >> ')
if ask == 'y' or ask == 'Y':
menu()
elif ask == 't' or ask == 'T':
sys.exit()
else:
print m + '[' + m + '!' + m + ']' + p + ' Choose the right one ' + m + '!!!'
raw_input(m + '[' + p + '^' + m + ']' + p + ' Press Enter to Return to the menu ')
os.system('clear')
menu()
def unpyc():
print m + '[' + p + '#' + m + ']' + p + ' For Example : /path/file.pyc'
f = raw_input(m + '[' + p + '?' + m + ']' + p + ' Enter Your File : ')
try:
open(f, 'r').read()
except IOError:
print m + '[' + m + '!' + m + ']' + p + ' File Not Found'
raw_input(m + '[' + p + '^' + m + ']' + p + ' Press Enter to Return to the menu ')
menu()
else:
load(m + '[' + p + '#' + m + ']' + p + ' Decompile process Wait a minute ...')
try:
os.system('uncompyle6 ' + f + '> unpyc/jadi.py')
except Exception as e:
print m + '[' + m + '!' + m + ']' + p + ' Failed to decompile because : ' + e
print '\n\n' + m + '[' + p + '#' + m + ']' + p + ' Successfully Decompiled'
print m + '[' + p + '#' + m + ']' + p + ' file saved : unpyc/jadi.py'
ask = raw_input(m + '[' + p + '?' + m + ']' + p + ' Decompile Again? y/t >> ')
if ask == 'y' or ask == 'Y':
menu()
elif ask == 't' or ask == 'T':
sys.exit()
else:
print m + '[' + m + '!' + m + ']' + p + ' Choose the right one ' + m + '!!!'
raw_input(m + '[' + p + '^' + m + ']' + p + ' Press Enter to Return to the menu ')
os.system('clear')
menu()
def mzb():
print 'Decompile Marshal,Zlib,Base64'
a = raw_input('File : ' )
b = open(a).read().replace('exec(', 'x = ').replace('))))',')))')
note = 'DECOMPILED BY MHANK BARBAR'
c = open('mi.py', 'w')
if 'marshal' in b:
c.write('from sys import stdout\nfrom uncompyle6.main import decompile\n' + b + '\ndecompile(2.7, x, stdout)')
c.close()
elif 'marshal' not in b:
c.write(b + '\nprint (x)')
c.close()
d = a.replace('.py', '-d.py')
os.system('python2 mi.py > ' + d)
e = open(d).read()
f = open(d, 'w')
f.write(e + ' \n\n\n\t' + note )
f.close()
os.system('rm -rf mi.py')
print('\x1b[31;1m[\x1b[0;37m+\x1b[31;1m]\x1b[0;37m File saved as\x1b[32;1m ' + d)
print('Mau Dec Lagi Y or N ?')
cuk = raw_input('Pilih : ')
if cuk == 'y':
mzb()
elif cuk == 'n':
exit()
def zl():
print 'Decompile Zlib'
a = raw_input('File : ')
b = open(a).read().replace('exec', 'print')
c = open('ma.py', 'w')
if 'zlib' in b:
c.write('# Bacod\n' + b + '# Loe Kontol')
c.close()
elif 'zlib' not in b:
c.write(b + '\nprint (print)')
c.close()
d = a.replace('.py', '-d.py')
os.system('python2 ma.py > '+ d)
e = open(d).read().replace('# uncompyle6 version 3.6.2', '# Versi Unkompel 0.0 :v').replace('# Embedded file name: ','# Ini Nih:v ').replace('# Decompiled from: Python 2.7.17 (default, Oct 23 2019, 08:28:22)', '# Decompel By Mhank BarBar Gans').replace('# [GCC 4.2.1 Compatible Android (5220042 based on r346389c) Clang 8.0.7 (https://', 'Halo Om').replace('# Python bytecode 2.7', '# Piton Bitkode 2.7')
f = open(d, 'w')
f.write('# Suksess Decompile ✓ \n'+ e)
f.close()
os.system('rm -rf ma.py')
print('File saced as '+ d)
sys.exit()
def exit():
run('thanks for using my tools dude :)')
sys.exit()
if __name__ == '__main__':
if os.path.exists('unpyc'):
menu()
else:
os.system('mkdir unpyc')
# logo()
menu()
|
######################################################################
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
######################################################################
"""
Shopcart Service with UI
Paths:
------
GET / - Displays a usage information for Selenium testing
GET /shopcarts - Returns a list of all the Shopcarts
POST /shopcarts - Creates a new Shopcart record in the database
GET /shopcarts/{id} - Returns the Shopcart with a given id number
DELETE /shopcarts/{id} - Deletes a Shopcart record in the database
PUT /shopcarts/{id}/place-order - Places an order
GET /shopcarts/{id}/items - Gets Shopcart Item list from a Shopcart
POST /shopcarts/{id}/items - Creates a new Shopcart Item record in the database
GET /shopcarts/{id}/items/{item_id} - Returns the Shopcart Item with given id and item_id number
PUT /shopcarts/{id}/items/{item_id} - Updates the Shopcart Item
DELETE /shopcarts/{id}/items/{item_id} - Deletes the Shopcart Item
"""
import os
import json
import requests
from flask import jsonify, request, make_response, abort
from flask.logging import create_logger
from flask_api import status # HTTP Status Codes
from flask_restplus import Api, Resource, fields, reqparse
from service.models import Shopcart, ShopcartItem, DataValidationError
from . import app, constants
# use create_logger function to avoid no-member errors for logger in pylint
logger = create_logger(app)
ORDER_ENDPOINT = os.getenv('ORDER_ENDPOINT',
'https://nyu-order-service-f20.us-south.cf.appdomain.cloud/orders')
######################################################################
# Configure Swagger before initializing it
######################################################################
api = Api(app,
version='1.0.0',
title='Shopcart REST API Service',
description='This is a Shopcart server.',
default='shopcarts',
default_label='Shopcart operations',
doc='/apidocs',
# authorizations=authorizations,
prefix='/api'
)
# Define the model so that the docs reflect what can be sent
shopcart_item_model = api.model('ShopcartItem', {
'id': fields.Integer(readOnly=True,
description='The unique id assigned internally by service'),
'sid': fields.Integer(readOnly=True,
description='The id of the Shopcart this item belongs to'),
'sku': fields.Integer(required=True,
description='The product id'),
'name': fields.String(required=True,
description='The product name'),
'price': fields.Float(required=True,
description='The price for one item'),
'amount': fields.Integer(required=True,
description='The number of product'),
'create_time': fields.DateTime(readOnly=True,
description='The time the record is created'),
'update_time': fields.DateTime(readOnly=True,
description='The time the record is updated')
})
shopcart_model = api.model('Shopcart', {
'id': fields.Integer(readOnly=True,
description='The unique id assigned internally by service'),
'user_id': fields.Integer(required=True,
description='The id of the User'),
'create_time': fields.DateTime(readOnly=True,
description='The time the record is created'),
'update_time': fields.DateTime(readOnly=True,
description='The time the record is updated'),
'items': fields.List(fields.Nested(shopcart_item_model))
})
create_shopcart_model = api.model('Shopcart', {
'user_id': fields.Integer(required=True,
description='The id of the User')
})
create_shopcart_item_model = api.model('ShopcartItem', {
'sku': fields.Integer(required=True,
description='The product id'),
'name': fields.String(required=True,
description='The product name'),
'price': fields.Float(required=True,
description='The price for one item'),
'amount': fields.Integer(required=True,
description='The number of product')
})
# query string arguments
shopcart_args = reqparse.RequestParser()
shopcart_args.add_argument('user_id', type=int, required=False, help='Find Shopcart by User Id')
shopcart_item_args = reqparse.RequestParser()
shopcart_item_args.add_argument('sku',
type=int,
required=False,
help='Find Shopcart Item by Product Id')
shopcart_item_args.add_argument('name',
type=str,
required=False,
help='Find Shopcart Item by Product Name')
shopcart_item_args.add_argument('price',
type=float,
required=False,
help='Find Shopcart Item by Product Price')
shopcart_item_args.add_argument('amount',
type=int,
required=False,
help='Find Shopcart Item by Product Amount')
######################################################################
# Error Handlers
######################################################################
@app.errorhandler(DataValidationError)
def request_validation_error(error):
""" Handles Value Errors from bad data """
return bad_request(error)
@app.errorhandler(status.HTTP_400_BAD_REQUEST)
def bad_request(error):
""" Handles bad reuests with 400_BAD_REQUEST """
logger.warning(str(error))
return (
jsonify(
status=status.HTTP_400_BAD_REQUEST, error="Bad Request", message=str(error)
),
status.HTTP_400_BAD_REQUEST,
)
@app.errorhandler(status.HTTP_404_NOT_FOUND)
def not_found(error):
""" Handles resources not found with 404_NOT_FOUND """
logger.warning(str(error))
return (
jsonify(
status=status.HTTP_404_NOT_FOUND, error=constants.NOT_FOUND, message=str(error)
),
status.HTTP_404_NOT_FOUND,
)
@app.errorhandler(status.HTTP_405_METHOD_NOT_ALLOWED)
def method_not_supported(error):
""" Handles unsuppoted HTTP methods with 405_METHOD_NOT_SUPPORTED """
logger.warning(str(error))
return (
jsonify(
status=status.HTTP_405_METHOD_NOT_ALLOWED,
error="Method not Allowed",
message=str(error),
),
status.HTTP_405_METHOD_NOT_ALLOWED,
)
@app.errorhandler(status.HTTP_415_UNSUPPORTED_MEDIA_TYPE)
def mediatype_not_supported(error):
""" Handles unsuppoted media requests with 415_UNSUPPORTED_MEDIA_TYPE """
logger.warning(str(error))
return (
jsonify(
status=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE,
error="Unsupported media type",
message=str(error),
),
status.HTTP_415_UNSUPPORTED_MEDIA_TYPE,
)
######################################################################
# GET HEALTH CHECK
######################################################################
@app.route('/healthcheck')
def healthcheck():
""" Let them know our heart is still beating """
return make_response(jsonify(status=200, message='Healthy'), status.HTTP_200_OK)
######################################################################
# GET INDEX
######################################################################
@app.route('/')
def index():
""" Root URL response """
return app.send_static_file('index.html')
######################################################################
# PATH: /shopcarts
######################################################################
@api.route('/shopcarts', strict_slashes=False)
class ShopcartCollection(Resource):
"""LIST ALL Shopcarts"""
@api.doc('list_shopcarts')
@api.expect(shopcart_args, validate=True)
@api.marshal_list_with(shopcart_model)
def get(self):
""" Returns all of the Shopcarts """
logger.info('Request to list Shopcarts...')
args = shopcart_args.parse_args()
if args['user_id']:
logger.info('Find by user')
shopcarts = Shopcart.find_by_user(args['user_id'])
else:
logger.info('Find all')
shopcarts = Shopcart.all()
results = [shopcart.serialize() for shopcart in shopcarts]
for shopcart in results:
items = ShopcartItem.find_by_shopcartid(shopcart["id"])
shopcart["items"] = [item.serialize() for item in items]
logger.info('[%s] Shopcarts returned', len(results))
return results, status.HTTP_200_OK
# ------------------------------------------------------------------
# ADD A NEW Shopcart
# ------------------------------------------------------------------
@api.doc('create_shopcarts')
@api.expect(create_shopcart_model)
@api.response(400, 'The posted data was not valid')
@api.response(201, 'Shopcart created successfully')
@api.marshal_with(shopcart_model, code=201)
def post(self):
""" Create a Shopcart """
logger.info("Request to create a shopcart")
check_content_type("application/json")
logger.debug('Payload = %s', api.payload)
shopcart = None
if 'user_id' in api.payload:
shopcart = Shopcart.find_by_user(api.payload['user_id']).first()
if shopcart is None:
shopcart = Shopcart()
shopcart.deserialize(api.payload)
shopcart.create()
logger.info("Shopcart with ID [%s] created.", shopcart.id)
location_url = api.url_for(ShopcartResource, shopcart_id=shopcart.id, _external=True)
shopcart_result = shopcart.serialize()
items = ShopcartItem.find_by_shopcartid(shopcart_result["id"])
shopcart_result["items"] = [item.serialize() for item in items]
return shopcart_result, status.HTTP_201_CREATED, {"Location": location_url}
######################################################################
# G E T A N D D E L E T E S H O P C A R T
######################################################################
@api.route('/shopcarts/<int:shopcart_id>')
@api.param('shopcart_id', 'The Shopcart identifier')
class ShopcartResource(Resource):
"""
ShopcartResource class
Allows the manipulation of a single shopcart
GET /shopcart/{id} - Returns a shopcart with the id
DELETE /shopcart/{id} - Deletes a shopcart with the id
"""
@api.doc('get_shopcart')
@api.response(404, 'Shopcart not found')
@api.response(200, 'Shopcart returned successfully')
@api.marshal_with(shopcart_model)
def get(self, shopcart_id):
"""
Gets information about a Shopcart
This endpoint will get information about a shopcart
"""
logger.info("Request to get information of a shopcart")
shopcart = Shopcart.find(shopcart_id)
if shopcart is None:
logger.info("Shopcart with ID [%s] not found.", shopcart_id)
api.abort(
status.HTTP_404_NOT_FOUND,
"Shopcart with id '{}' was not found.".format(shopcart_id)
)
shopcart_items = ShopcartItem.find_by_shopcartid(shopcart_id)
response = shopcart.serialize()
response["items"] = [item.serialize() for item in shopcart_items]
logger.info("Shopcart with ID [%s] fetched.", shopcart.id)
return response, status.HTTP_200_OK
@api.doc('delete_shopcart')
@api.response(204, 'Shopcart has been deleted')
def delete(self, shopcart_id):
"""
Delete a Shopcart
This endpoint will delete a Shopcart based the id specified in the path
"""
logger.info('Request to delete Shopcart with id: %s', shopcart_id)
item = Shopcart.find(shopcart_id)
if item:
item.delete()
logger.info('Shopcart with id: %s has been deleted', shopcart_id)
return make_response("", status.HTTP_204_NO_CONTENT)
######################################################################
# G E T S H O P C A R T I T E M
######################################################################
@api.route('/shopcarts/<int:shopcart_id>/items/<int:item_id>')
@api.param('shopcart_id', 'The Shopcart identifier')
@api.param('item_id', 'The Shopcart Item identifier')
class ShopcartItemResource(Resource):
"""
ShopcartResource class
Allows the manipulation of a single shopcart
GET /shopcart/{id}/items/{id} - Returns a shopcart Item with the id
DELETE /shopcart/{id}/items/{id} - Deletes a shopcart Item with the id
PUT /shopcart/{id}/items/{id} - Updates a shopcart Item with the id
"""
@api.doc('get_shopcart_item')
@api.response(404, 'Shopcart Item not found')
@api.response(200, 'Shopcart Item found')
@api.marshal_with(shopcart_item_model, code=200)
def get(self, shopcart_id, item_id):
"""
Get a shopcart item
This endpoint will return an item in the shop cart
"""
logger.info("Request to get an item in a shopcart")
shopcart_item = ShopcartItem.find(item_id)
if shopcart_item is None or shopcart_item.sid != shopcart_id:
logger.info(
"Shopcart item with ID [%s] not found in shopcart [%s].", item_id, shopcart_id
)
api.abort(
status.HTTP_404_NOT_FOUND,
"Shopcart item with ID [%s] not found in shopcart [%s]." % (item_id, shopcart_id)
)
logger.info("Fetched shopcart item with ID [%s].", item_id)
return shopcart_item.serialize(), status.HTTP_200_OK
@api.doc('update_shopcart_item')
@api.response(404, 'Shopcart Item not found')
@api.response(400, 'The posted Item data was not valid')
@api.response(200, 'Shopcart Item updated')
@api.expect(shopcart_item_model)
@api.marshal_with(shopcart_item_model, code=200)
def put(self, shopcart_id, item_id):
"""
Update a Shopcart item
This endpoint will update a Shopcart item based the body that is posted
"""
logger.info("Request to update Shopcart item with id: %s", item_id)
check_content_type("application/json")
shopcart_item = ShopcartItem.find(item_id)
if shopcart_item is None or shopcart_item.sid != shopcart_id:
logger.info(
"Shopcart item with ID [%s] not found in shopcart [%s].", item_id, shopcart_id
)
api.abort(
status.HTTP_404_NOT_FOUND,
"Shopcart item with id '{}' was not found.".format(item_id)
)
data = api.payload
data["sid"] = shopcart_id
data["id"] = item_id
shopcart_item.deserialize(data)
shopcart_item.update()
logger.info("Shopcart item with ID [%s] updated.", shopcart_item.id)
return shopcart_item.serialize(), status.HTTP_200_OK
@api.doc('delete_shopcart_item')
@api.response(204, 'Shopcart Item has been deleted')
def delete(self, shopcart_id, item_id):
"""
Delete a ShopcartItem
This endpoint will delete a ShopcartItem based the id specified in the path
"""
logger.info(
'Request to delete ShopcartItem with id: %s from Shopcart %s', item_id, shopcart_id
)
shopcart_item = ShopcartItem.find(item_id)
if shopcart_item is not None and shopcart_item.sid == shopcart_id:
shopcart_item.delete()
logger.info('ShopcartItem with id: %s has been deleted', item_id)
return "", status.HTTP_204_NO_CONTENT
######################################################################
# PATH: /shopcarts/:id/items
######################################################################
@api.route('/shopcarts/<int:shopcart_id>/items', strict_slashes=False)
@api.param('shopcart_id', 'The Shopcart identifier')
class ShopcartItemCollection(Resource):
""" Handles all interactions with collections of Shopcart Items """
@api.doc('list_shopcart_items')
@api.response(200, 'Shopcart Items returned successfully')
@api.marshal_list_with(shopcart_item_model)
def get(self, shopcart_id):
"""
Get information of a shopcart
This endpoint will return items in the shop cart
"""
logger.info("Request to get items in a shopcart")
shopcart_items = ShopcartItem.find_by_shopcartid(shopcart_id)
result = [item.serialize() for item in shopcart_items]
logger.info("Fetched items for Shopcart with ID [%s].", shopcart_id)
return result, status.HTTP_200_OK
@api.doc('create_shopcart_item')
@api.response(201, 'Shopcart Items has been created')
@api.response(400, 'The posted data was not valid')
@api.expect(shopcart_item_model)
@api.marshal_with(shopcart_item_model, code=201)
def post(self, shopcart_id):
"""
Create a new Shopcart Item
"""
logger.info("Request to create a shopcart item")
check_content_type("application/json")
shopcart_item = ShopcartItem()
data = request.get_json()
if "id" in data:
data.pop("id")
data["sid"] = shopcart_id
shopcart_item.deserialize(data)
shopcart_item.add()
location_url = api.url_for(ShopcartItemResource,
shopcart_id=shopcart_item.sid, item_id=shopcart_item.id,
_external=True)
logger.info("ShopcartItem with ID [%s] created.", shopcart_item.id)
return shopcart_item, status.HTTP_201_CREATED, {"Location": location_url}
######################################################################
# PATH: /shopcarts/items
######################################################################
@api.route('/shopcarts/items', strict_slashes=False)
class ShopcartItemQueryCollection(Resource):
"""LIST ALL Shopcart Items or Query by sku, name, price, or amount"""
@api.doc('list_shopcart_items')
@api.expect(shopcart_item_args, validate=True)
@api.marshal_list_with(shopcart_item_model)
def get(self):
""" Returns all of the ShopcartItems """
logger.info('Request to list ShopcartItems...')
args = shopcart_item_args.parse_args()
if args['sku']:
logger.info('Find by sku')
shopcart_items = ShopcartItem.find_by_sku(args['sku'])
elif args['name']:
logger.info('Find by name')
shopcart_items = ShopcartItem.find_by_name(args['name'])
elif args['price']:
logger.info('Find by price')
shopcart_items = ShopcartItem.find_by_price(args['price'])
elif args['amount']:
logger.info('Find by amount')
shopcart_items = ShopcartItem.find_by_amount(args['amount'])
else:
logger.info('Find all')
shopcart_items = ShopcartItem.all()
results = [shopcart_item.serialize() for shopcart_item in shopcart_items]
logger.info('[%s] Shopcart Items returned', len(results))
return results, status.HTTP_200_OK
######################################################################
# PATH: /shopcarts/{id}/place-order
######################################################################
@api.route('/shopcarts/<int:shopcart_id>/place-order')
@api.param('shopcart_id', 'The Shopcart identifier')
class PlaceOrderResource(Resource):
""" Place Order action on a Shopcart"""
@api.doc('place_order')
@api.response(404, 'Shopcart not found or is empty')
@api.response(400, 'Unable to place order for shopcart')
@api.response(204, 'Shopcart has been deleted')
def put(self, shopcart_id):
"""
Place Order for a Shopcart
This endpoint will place an order for a Shopcart based the id specified in the path
"""
logger.info('Request to place order for Shopcart with id: %s', shopcart_id)
shopcart = Shopcart.find(shopcart_id)
if not shopcart:
logger.info("Shopcart with ID [%s] is does not exist.", shopcart_id)
api.abort(
status.HTTP_404_NOT_FOUND,
"Shopcart with ID [%s] is does not exist." % shopcart_id
)
shopcart_items = ShopcartItem.find_by_shopcartid(shopcart_id)
if shopcart_items is None or len(shopcart_items) == 0:
logger.info("Shopcart with ID [%s] is empty.", shopcart_id)
api.abort(
status.HTTP_404_NOT_FOUND,
"Shopcart with ID [%s] is empty." % shopcart_id
)
shopcart_items_list = [item.serialize() for item in shopcart_items]
# once we have the list of shopcart items we can send in JSON format to the orders team
#add the order status as PLACED for a new order
order_items= []
for item in shopcart_items_list:
order_item = {}
order_item["item_id"] = int(item["id"])
order_item["product_id"] = int(item["sku"])
order_item["quantity"] = int(item["amount"])
order_item["price"] = item["price"]
order_item["status"] = "PLACED"
order_items.append(order_item)
order = {
"customer_id": int(shopcart.serialize()["user_id"]),
"order_items": order_items,
}
payload = json.dumps(order)
headers = {'content-type': 'application/json'}
res = requests.post(
ORDER_ENDPOINT, data=payload, headers=headers
)
logger.info('Put Order response %d %s', res.status_code, res.text)
if res.status_code != 201:
api.abort(
status.HTTP_400_BAD_REQUEST,
"Unable to place order for shopcart [%s]." % shopcart_id
)
shopcart.delete()
logger.info('Shopcart with id: %s has been deleted', shopcart_id)
return make_response("", status.HTTP_204_NO_CONTENT)
######################################################################
# U T I L I T Y F U N C T I O N S
######################################################################
def check_content_type(content_type):
""" Checks that the media type is correct """
if 'Content-Type' not in request.headers:
logger.error('No Content-Type specified.')
abort(status.HTTP_415_UNSUPPORTED_MEDIA_TYPE,
'Content-Type must be {}'.format(content_type))
if request.headers['Content-Type'] == content_type:
return
logger.error('Invalid Content-Type: %s', request.headers['Content-Type'])
abort(status.HTTP_415_UNSUPPORTED_MEDIA_TYPE, 'Content-Type must be {}'.format(content_type))
def init_db():
""" Initialies the SQLAlchemy app """
Shopcart.init_db(app)
ShopcartItem.init_db(app)
logger.info("Database has been initialized!")
|
print(
'-----------------------------------------\n'\
'Practical python education || Exercise-15:\n'\
'-----------------------------------------\n'
)
print(
'Task:\n'\
'-----------------------------------------\n'\
'Write a Python program to get the the volume of a sphere with radius 6."\n'
)
print(
'Solution:\n'\
'-----------------------------------------'\
)
from math import pi
r = 6.0
d = r*2
V1 = 4.0/3.0 * pi * r**3
V2 = pi * (d**3 / 6.0)
print('Volume of a sphere with radius 6(m) = ', V1, '(m^3)')
print('Volume of a sphere with diameter 12(m) = ', V2, '(m^3)')
print(
'\n-----------------------------------------\n'\
'Copyright 2018 Vladimir Pavlov. All Rights Reserved.\n'\
'-----------------------------------------'
)
|
#!/usr/bin/env python
import argparse
import sys
import os
import json
from collections import defaultdict
pkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) # noqa
sys.path.insert(0, pkg_root) # noqa
"""A script to add/subtract auth from the data-store's swagger file."""
default_auth = {"/files/{uuid}": ["put"],
"/subscriptions": ["get", "put"],
"/subscriptions/{uuid}": ["get", "delete"],
"/collections": ["get", "put"],
"/collections/{uuid}": ["get", "patch", "delete"],
"/bundles/{uuid}": ["put", "patch", "delete"]
}
# all endpoints
full_auth = {"/search": ["post"],
"/files/{uuid}": ["head", "get", "put"],
"/subscriptions": ["get", "put"],
"/subscriptions/{uuid}": ["get", "delete"],
"/collections": ["get", "put"],
"/collections/{uuid}": ["get", "patch", "delete"],
"/bundles/all": ["get"],
"/bundles/{uuid}": ["get", "put", "patch", "delete"],
"/bundles/{uuid}/checkout": ["post"],
"/bundles/checkout/{checkout_job_id}": ["get"],
"/events": ["get"],
"/events/{uuid}": ["get"],
"/ga4gh/drs/v1/objects/{object_id}": ["get"],
}
class SecureSwagger(object):
def __init__(self, infile: str=None, outfile: str=None, auth_name: str=None, config: dict=None):
"""
A class for modifying a swagger yml file with auth on endpoints specified in
a config file.
:param infile: Swagger yml file.
:param outfile: The name of the generated swagger yml file (defaults to the same file).
:param config: A json file containing the api endpoints that need auth.
"""
# used to track which section we're when parsing the yml
self.path_section = False # bool flag to notify if we're in the section containing the API call definitions
self.call_section = None # an api endpoint, e.g.: /subscription, /file/{uuid}, etc.
self.request_section = None # a request call, e.g.: get, put, delete, etc.
self.infile = infile or os.path.join(pkg_root, 'dss-api.yml')
self.intermediate_file = os.path.join(pkg_root, 'tmp.yml')
self.outfile = outfile or os.path.join(pkg_root, 'dss-api.yml')
self.auth_name = auth_name
self.config = default_auth if config is None else config
for endpoint in self.config:
if not isinstance(self.config[endpoint], list):
raise TypeError('Auth config dict keys are strings, values are lists of strings. '
'Example: {"/search": ["put"]}. Check your input!')
self.security_endpoints = defaultdict(list)
def security_line(self, line: str, checking_flags: bool, all_endpoints=False):
"""
Checks a line from the swagger/yml file and updates section values appropriately.
If checking_flags is True, this will return True/False:
True if a call/path matches one in self.security_endpoints.
False otherwise.
If checking_flags is False, this will create the self.security_endpoints dictionary:
If all_endpoints=True, self.security_endpoints will include all endpoints in the swagger file.
If all_endpoints=False, self.security_endpoints will include only auth endpoints in the swagger file.
"""
# If not indented at all, we're in a new section, so reset.
if not line.startswith(' ') and self.path_section and line.strip() != '':
self.path_section = False
# Check if we're in the paths section.
if line.startswith('paths:'):
self.path_section = True
# Check if we're in an api path section.
elif line.startswith(' /') and line.strip().endswith(':'):
self.parse_api_section(line, checking_flags)
# Check for an endpoint's security flag
elif line.startswith(' security:'):
if not checking_flags and not all_endpoints:
# If we're checking for secured endpoints only, record the path and call.
self.security_endpoints[self.call_section].append(self.request_section)
# If properly indented and we're in the correct 2 sections, this will be a call request.
elif self.call_indent(line) and self.path_section and self.call_section and line.strip().endswith(':'):
if checking_flags:
for call in self.call_section:
# Verify it's one of the specified calls we need to secure_auth.
if line.startswith(f' {call}:'):
return True
else:
self.request_section = line.strip()[:-1]
if all_endpoints:
# If we're checking for all endpoints present, record the path and call.
self.security_endpoints[self.call_section].append(self.request_section)
@staticmethod
def call_indent(line: str) -> bool:
return line.startswith(' ') and not line.startswith(' ')
def parse_api_section(self, line: str, checking_flags: bool) -> None:
if checking_flags:
self.call_section = None
for api_path in self.security_endpoints:
# Make sure it's one of the specified api paths, otherwise ignore.
if line.startswith(f' {api_path}:'):
self.call_section = self.security_endpoints[api_path]
else:
self.call_section = line.strip()[:-1]
def make_swagger_from_authconfig(self) -> None:
"""Modify a swagger file's auth based on a config dict."""
self.security_endpoints = self.config
# generate a new swagger as an intermediate file
with open(self.intermediate_file, 'w') as w:
with open(self.infile, 'r') as r:
for line in r:
# ignore security lines already in the swagger yml
if not (line.startswith(' security:') or line.startswith(f' - {self.auth_name}: []')):
w.write(line)
# returns true based on config file paths
if self.security_line(line, checking_flags=True):
w.write(' security:\n')
w.write(f' - {self.auth_name}: []\n')
# the contents of the intermediate file become the contents of the output file
if os.path.exists(self.outfile):
os.remove(self.outfile)
os.rename(self.intermediate_file, self.outfile)
def get_authconfig_from_swagger(self, all_endpoints=False):
"""
Return a dictionary representing the endpoints that have auth enabled in a swagger file.
If all_endpoints is True, instead return all endpoints, not just those with auth.
"""
self.security_endpoints = defaultdict(list)
with open(self.infile, 'r') as f:
for line in f:
self.security_line(line, checking_flags=False, all_endpoints=all_endpoints)
return self.security_endpoints
def ensure_auth_defaults_are_still_set():
"""To be run on travis to make sure that no one makes a PR with a custom swagger accidentally."""
if SecureSwagger().get_authconfig_from_swagger() != default_auth:
raise TypeError('Swagger file auth does not match defaults. Please modify accordingly.')
def main(argv=sys.argv[1:]):
parser = argparse.ArgumentParser(description='Swagger Security Endpoint')
parser.add_argument('-i', '--input_swagger', dest="input_swagger", default=None,
help='An input swagger yaml file/path that will be modified to contain '
'new security auth based on the input config.')
parser.add_argument('-o', '--output_swagger', dest="output_swagger", default=None,
help='The file/path of the swagger output yaml.')
parser.add_argument('-c', '--config_security', dest="config_security", default=default_auth,
type=json.loads, help='''A dict of API calls stating which calls to add
auth to. For example: -s='{"/path": "call"}'.''')
parser.add_argument('-s', '--secure', dest="secure", default=False, action='store_true',
help='Change the swagger file to include auth on all endpoints.')
parser.add_argument('-t', '--travis', dest="travis", action='store_true', default=False,
help='Run on travis to check that swagger has default auth.')
parser.add_argument('-n', '--auth-name', dest="auth_name", default="OauthSecurity",
help='The name of the security definition being added to protect endpoints.')
o = parser.parse_args(argv)
if o.travis:
ensure_auth_defaults_are_still_set()
else:
config = full_auth if o.secure else o.config_security
s = SecureSwagger(o.input_swagger, o.output_swagger, o.auth_name, config)
s.make_swagger_from_authconfig()
if __name__ == '__main__':
main(sys.argv[1:])
|
'''An example of functional test.
Note that the declarations
`
MASTER = MasterAccount()
HOST = Account()
ALICE = Account()
BOB = Account()
CAROL = Account()
`
are abundant: they are in place to satisfy the linter, whu complains about
dynamically created objects.
'''
import unittest
from eosfactory.eosf import *
verbosity([Verbosity.INFO, Verbosity.OUT, Verbosity.TRACE, Verbosity.DEBUG])
CONTRACT_WORKSPACE = "_iqhgcqllgnpkirjwwkms"
# Actors of the test:
MASTER = MasterAccount()
HOST = Account()
ALICE = Account()
BOB = Account()
CAROL = Account()
class Test(unittest.TestCase):
'''Unittest class definition.
'''
@classmethod
def setUpClass(cls):
SCENARIO('''
Create a contract from template, then build and deploy it.
Also, initialize the token and run a couple of transfers between different accounts.
''')
reset()
create_master_account("MASTER")
COMMENT('''
Create test accounts:
''')
create_account("ALICE", MASTER)
create_account("BOB", MASTER)
create_account("CAROL", MASTER)
def test_functionality(self):
COMMENT('''
Create, build and deploy the contract:
''')
create_account("HOST", MASTER)
smart = Contract(HOST, project_from_template(
CONTRACT_WORKSPACE, template="eosio_token", remove_existing=True))
smart.build()
smart.deploy()
COMMENT('''
Initialize the token and send some tokens to one of the accounts:
''')
HOST.push_action(
"create",
{
"issuer": MASTER,
"maximum_supply": "1000000000.0000 EOS",
"can_freeze": "0",
"can_recall": "0",
"can_whitelist": "0"
},
force_unique=True,
permission=[(MASTER, Permission.ACTIVE), (HOST, Permission.ACTIVE)])
print("'trace[\"console\"]' sum is '{}'".format(HOST.action.console))
logger.DEBUG(HOST.action.act)
HOST.push_action(
"issue",
{
"to": ALICE, "quantity": "100.0000 EOS", "memo": ""
},
force_unique=True,
permission=(MASTER, Permission.ACTIVE))
print("'trace[\"console\"]' sum is '{}'".format(HOST.action.console))
logger.DEBUG(HOST.action.act)
COMMENT('''
Execute a series of transfers between the accounts:
''')
HOST.push_action(
"transfer",
{
"from": ALICE, "to": CAROL,
"quantity": "25.0000 EOS", "memo":""
},
force_unique=True,
permission=(ALICE, Permission.ACTIVE))
logger.DEBUG(HOST.action.act)
HOST.push_action(
"transfer",
{
"from": CAROL, "to": BOB,
"quantity": "11.0000 EOS", "memo": ""
},
permission=(CAROL, Permission.ACTIVE))
logger.DEBUG(HOST.action.act)
HOST.push_action(
"transfer",
{
"from": CAROL, "to": BOB,
"quantity": "2.0000 EOS", "memo": ""
},
force_unique=True,
permission=(CAROL, Permission.ACTIVE))
logger.DEBUG(HOST.action.act)
HOST.push_action(
"transfer",
{
"from": BOB, "to": ALICE, \
"quantity": "2.0000 EOS", "memo":""
},
force_unique=True,
permission=(BOB, Permission.ACTIVE))
logger.DEBUG(HOST.action.act)
COMMENT('''
Verify the outcome:
''')
table_ALICE = HOST.table("accounts", ALICE)
table_BOB = HOST.table("accounts", BOB)
table_CAROL = HOST.table("accounts", CAROL)
self.assertEqual(
table_ALICE.json["rows"][0]["balance"], '77.0000 EOS',
'''assertEqual(table_ALICE.json["rows"][0]["balance"], '77.0000 EOS')''')
self.assertEqual(
table_BOB.json["rows"][0]["balance"], '11.0000 EOS',
'''assertEqual(table_BOB.json["rows"][0]["balance"], '11.0000 EOS')''')
self.assertEqual(
table_CAROL.json["rows"][0]["balance"], '12.0000 EOS',
'''assertEqual(table_CAROL.json["rows"][0]["balance"], '12.0000 EOS')''')
@classmethod
def tearDownClass(cls):
stop()
if __name__ == "__main__":
unittest.main()
|
import sys
sys.path.insert(0, '/home/rhou/caffe/python')
import caffe
import numpy as np
from os import mkdir
from os.path import exists, join
import cv2
import matplotlib.pyplot as plt
class DataLayer():
def __init__(self, net, model):
self._batch_size = 1
self._depth = 8
self._height = 240
self._width = 320
caffe.set_mode_gpu()
self._net = caffe.Net(net, model, caffe.TEST)
self._net.blobs['data'].reshape(1, 3, 8, self._height, self._width)
self.images = self.load_images()
def load_images(self):
import glob
files = glob.glob('images/*.jpg')
return np.array([cv2.resize(cv2.imread(f), (320, 240)) for f in files])
def show(self):
num_frames = self.images.shape[0]
num_clips = num_frames // self._depth
for i in range(num_clips):
curr_clip = self.images[i * self._depth:i * self._depth + self._depth]
batch_clip = curr_clip.transpose((3, 0, 1, 2))
self._net.blobs['data'].data[0] = batch_clip
self._net.forward()
curr_prob = self._net.blobs['prob'].data[0, 1, :, :, :]
curr_prediction = curr_prob > 0.32
plt.subplot(241)
plt.imshow(curr_prediction[0] * 1.0)
plt.subplot(242)
plt.imshow(curr_prediction[1] * 1.0)
plt.subplot(243)
plt.imshow(curr_prediction[2] * 1.0)
plt.subplot(244)
plt.imshow(curr_prediction[3] * 1.0)
plt.subplot(245)
plt.imshow(curr_prediction[4] * 1.0)
plt.subplot(246)
plt.imshow(curr_prediction[5] * 1.0)
plt.subplot(247)
plt.imshow(curr_prediction[6] * 1.0)
plt.subplot(248)
plt.imshow(curr_prediction[7] * 1.0)
plt.show()
if __name__ == '__main__':
net = 'models/davis/c3d_deconv_test.prototxt'
if len(sys.argv) > 1:
model = sys.argv[1]
else:
import urllib
urllib.urlretrieve ("http://www.cs.ucf.edu/~rhou/files/davis_240_320.caffemodel", "davis_240_320.caffemodel")
model = 'davis_240_320.caffemodel'
d = DataLayer(net, model)
d.show()
|
"""Testing methods that need Handle server read access"""
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
import requests
import json
import mock
import b2handle
from b2handle.handleclient import EUDATHandleClient
from b2handle.handleexceptions import *
# Load some data that is needed for testing
PATH_RES = b2handle.util.get_neighbour_directory(__file__, 'resources')
RESOURCES_FILE = json.load(open(PATH_RES+'/testvalues_for_integration_tests_IGNORE.json'))
# This file is not public, as it contains valid credentials for server
# write access. However, by providing such a file, you can run the tests.
# A template can be found in resources/testvalues_for_integration_tests_template.json
class EUDATHandleClientReadaccessTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
# Read resources from file:
self.testvalues = RESOURCES_FILE
# Test values that need to be given by user:
self.handle = self.testvalues['handle_for_read_tests']
self.handle_global = self.testvalues['handle_globally_resolvable']
self.user = self.testvalues['user']
# Optional:
self.https_verify = True
if 'HTTPS_verify' in self.testvalues:
self.https_verify = self.testvalues['HTTPS_verify']
self.url = 'http://hdl.handle.net'
if 'handle_server_url_read' in self.testvalues.keys():
self.url = self.testvalues['handle_server_url_read']
self.path_to_api = None
if 'url_extension_REST_API' in self.testvalues.keys():
self.path_to_api = self.testvalues['url_extension_REST_API']
# Others
prefix = self.handle.split('/')[0]
self.inexistent_handle = prefix+'/07e1fbf3-2b72-430a-a035-8584d4eada41'
self.randompassword = 'some_random_password_shrgfgh345345'
def setUp(self):
""" For most test, provide a client instance with the user-specified
handle server url."""
self.inst = EUDATHandleClient(
HTTPS_verify=self.https_verify,
handle_server_url=self.url,
url_extension_REST_API=self.path_to_api)
# Before being able to run these tests without write access,
# the handle that we use for testing must exist. With this code,
# you can create it. You only need to create it once and leave it
# on the server, it will not be modified and can be used eternally.
if False:
# This should always be false!!! Except for creating the
# required handle once!
self.create_required_test_handles()
def tearDown(self):
pass
pass
def create_required_test_handles(self):
# Creating an instance that knows how to write:
pw = self.testvalues['password']
inst = EUDATHandleClient.instantiate_with_username_and_password(
self.testvalues['handle_server_url_write'],
self.user,
pw,
HTTPS_verify=self.https_verify)
authstring = b2handle.utilhandle.create_authentication_string(self.user, pw)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Basic '+authstring
}
list_of_all_entries = [
{
"index":100,
"type":"HS_ADMIN",
"data":{
"format":"admin",
"value":{
"handle":"21.T14999/B2HANDLE_INTEGRATION_TESTS",
"index":300,
"permissions":"011111110011"
}
}
},
{
"index":111,
"type":"TEST1",
"data":"val1"
},
{
"index":2222,
"type":"TEST2",
"data":"val2"
},
{
"index":333,
"type":"TEST3",
"data":"val3"
},
{
"index":4,
"type":"TEST4",
"data":"val4"
}
]
testhandle = self.handle
url = self.testvalues['handle_server_url_write']+self.testvalues['url_extension_REST_API']+testhandle
veri = self.https_verify
head = headers
data = json.dumps({'values':list_of_all_entries})
resp = requests.put(url, data=data, headers=head, verify=veri)
# retrieve_handle_record_json
def test_retrieve_handle_record_json(self):
"""Test reading handle record from server."""
rec = self.inst.retrieve_handle_record_json(self.handle)
received_type = rec['values'][2]['type']
received_value = rec['values'][2]['data']['value']
self.assertEqual(received_type, 'TEST1',
'The type should be "TEST3" but was "%s" (%s).'% (received_type, self.handle))
self.assertEqual(received_value, 'val1',
'The value should be "val3" but is "%s" (%s).' % (received_value, self.handle))
# get_value_from_handle
def test_get_value_from_handle_normal(self):
"""Test reading existent and inexistent handle value from server."""
val = self.inst.get_value_from_handle(self.handle, 'TEST1')
self.assertEqual(val, 'val1',
'Retrieving "TEST1" from %s should lead to "val1", but it lead to "%s"' % (self.handle,val))
def test_get_value_from_handle_inexistent_key(self):
val = self.inst.get_value_from_handle(self.handle, 'TEST100')
self.assertIsNone(val,
'Retrieving "TEST100" from %s should lead to "None", but it lead to "%s"' % (self.handle,val))
def test_get_value_from_handle_inexistent_record(self):
"""Test reading handle value from inexistent handle."""
with self.assertRaises(HandleNotFoundException):
val = self.inst.get_value_from_handle(self.inexistent_handle, 'anykey')
# instantiate
def test_instantiate_with_username_and_wrong_password(self):
"""Test instantiation of client: No exception if password wrong."""
# Create client instance with username and password
inst = EUDATHandleClient.instantiate_with_username_and_password(
self.url,
self.user,
self.randompassword,
HTTPS_verify=self.https_verify)
self.assertIsInstance(inst, EUDATHandleClient)
def test_instantiate_with_username_without_index_and_password(self):
"""Test instantiation of client: Exception if username has no index."""
testusername_without_index = self.user.split(':')[1]
# Run code to be tested + check exception:
with self.assertRaises(HandleSyntaxError):
# Create client instance with username and password
inst = EUDATHandleClient.instantiate_with_username_and_password(
self.url,
testusername_without_index,
self.randompassword,
HTTPS_verify=self.https_verify)
def test_instantiate_with_nonexistent_username_and_password(self):
"""Test instantiation of client: Exception if username does not exist."""
testusername_inexistent = '100:'+self.inexistent_handle
# Run code to be tested + check exception:
with self.assertRaises(HandleNotFoundException):
# Create client instance with username and password
inst = EUDATHandleClient.instantiate_with_username_and_password(
self.url,
testusername_inexistent,
self.randompassword,
HTTPS_verify=self.https_verify)
def test_instantiate_with_credentials(self):
"""Test instantiation of client: No exception if password wrong."""
# Test variables
credentials = b2handle.clientcredentials.PIDClientCredentials(
handle_server_url=self.url,
username=self.user,
password=self.randompassword)
# Run code to be tested
# Create instance with credentials
inst = EUDATHandleClient.instantiate_with_credentials(
credentials,
HTTPS_verify=self.https_verify)
# Check desired outcomes
self.assertIsInstance(inst, EUDATHandleClient)
def test_instantiate_with_credentials_inexistentuser(self):
"""Test instantiation of client: Exception if username does not exist."""
# Test variables
testusername_inexistent = '100:'+self.inexistent_handle
credentials = b2handle.clientcredentials.PIDClientCredentials(
handle_server_url=self.url,
username=testusername_inexistent,
password=self.randompassword)
# Run code to be tested + check exception:
# Create instance with credentials
with self.assertRaises(HandleNotFoundException):
inst = EUDATHandleClient.instantiate_with_credentials(credentials,
HTTPS_verify=self.https_verify)
# If the user name has no index, exception is already thrown in credentials creation!
#self.assertRaises(HandleSyntaxError, b2handle.PIDClientCredentials, 'url', 'prefix/suffix', randompassword)
def test_instantiate_with_credentials_config_override(self):
"""Test instantiation of client: No exception if password wrong."""
# Test variables
credentials = mock.MagicMock()
config_from_cred = {}
valuefoo = 'foo/foo/foo/' # passed via credentials
valuebar = 'bar/bar/bar' # passed directly to constructor
config_from_cred['REST_API_url_extension'] = valuefoo
credentials = b2handle.clientcredentials.PIDClientCredentials(
handle_server_url=self.url,
username=self.user,
password=self.randompassword,
handleowner=self.user,
REST_API_url_extension=valuefoo
)
self.assertEqual(credentials.get_config()['REST_API_url_extension'],valuefoo,
'Config: '+str(credentials.get_config()))
# foo/foo/ from the credentials should be overridden by bar/bar/ which is directly passed
# Run code to be tested - we expect an exception, as it will try to do a GET on the bogus rest api:
with self.assertRaises(GenericHandleError):
inst = EUDATHandleClient.instantiate_with_credentials(
credentials,
HTTPS_verify=self.https_verify,
REST_API_url_extension=valuebar)
# So this code can only be reached if something went wrong:
self.assertIsInstance(inst, EUDATHandleClient)
# Check if bar/bar instead of foo/foo was stored as path!
serverconn = inst._EUDATHandleClient__handlesystemconnector
self.assertIn('/bar/', serverconn._HandleSystemConnector__REST_API_url_extension)
self.assertNotIn('/foo/', serverconn._HandleSystemConnector__REST_API_url_extension)
self.assertEquals(serverconn._HandleSystemConnector__REST_API_url_extension, valuebar)
def test_instantiate_with_credentials_config(self):
"""Test instantiation of client: No exception if password wrong."""
# Test variables
credentials = mock.MagicMock()
config_from_cred = {}
valuefoo = 'foo/foo/foo/'
config_from_cred['REST_API_url_extension'] = valuefoo
credentials = b2handle.clientcredentials.PIDClientCredentials(
handle_server_url=self.url,
username=self.user,
password=self.randompassword,
handleowner=self.user,
REST_API_url_extension=valuefoo
)
self.assertEqual(credentials.get_config()['REST_API_url_extension'],valuefoo,
'Config: '+str(credentials.get_config()))
# foo/foo/ from the credentials should override default api/handles/
# Run code to be tested - we expect an exception, as it will try to do a GET on the bogus rest api:
with self.assertRaises(GenericHandleError):
inst = EUDATHandleClient.instantiate_with_credentials(
credentials,
HTTPS_verify=self.https_verify)
# So this code can only be reached if something went wrong:
self.assertIsInstance(inst, EUDATHandleClient)
# Check if foo/foo instead of api/handles was stored as path!
serverconn = inst._EUDATHandleClient__handlesystemconnector
self.assertIn('/foo/', serverconn._HandleSystemConnector__REST_API_url_extension)
self.assertEquals(serverconn._HandleSystemConnector__REST_API_url_extension, valuefoo)
def test_global_resolve(self):
"""Testing if instantiating with default handle server'works
and if a handle is correctly retrieved. """
# Create instance with default server url:
inst = EUDATHandleClient(HTTPS_verify=self.https_verify)
rec = inst.retrieve_handle_record_json(self.handle_global)
self.assertIn('handle', rec,
'Response lacks "handle".')
self.assertIn('responseCode', rec,
'Response lacks "responseCode".')
def test_instantiate_for_read_access(self):
"""Testing if instantiating with default handle server works
and if a handle is correctly retrieved. """
# Create client instance with username and password
inst = EUDATHandleClient.instantiate_for_read_access(HTTPS_verify=self.https_verify)
rec = self.inst.retrieve_handle_record_json(self.handle)
self.assertIsInstance(inst, EUDATHandleClient)
self.assertIn('handle', rec,
'Response lacks "handle".')
self.assertIn('responseCode', rec,
'Response lacks "responseCode".')
|
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Service for listening and handling Queue Messages.
This service registers interest in listening to a Queue and processing received messages.
"""
import asyncio
import functools
import json
import signal
from typing import Dict
from nats.aio.client import Client as NATS # noqa N814; by convention the name is NATS
from stan.aio.client import Client as STAN # noqa N814; by convention the name is STAN
from queue_common.probes import Probes # noqa I001; sort issue due to comments on the NATS & STAN lines
from queue_common.service_utils import error_cb, logger, signal_handler # noqa I001; sort issue due to comments on the NATS & STAN lines
from queue_common.version import __version__ # noqa I001; sort issue due to comments on the NATS & STAN lines
class ServiceWorker:
"""Wrap a service that will listen to the Queue Stream."""
__version__ = __version__
def __init__(self, *,
loop=None,
cb_handler=None,
nats_connection_options=None,
stan_connection_options=None,
subscription_options=None,
config=None,
name=None,
version=None
):
"""Initialize the service to a working state."""
self.sc = None
self.nc = None
self._start_seq = 0
self._loop = loop
self.cb_handler = cb_handler
self.nats_connection_options = nats_connection_options or {}
self.stan_connection_options = stan_connection_options or {}
self.subscription_options = subscription_options or {}
self.config = config
self._name = name
self._version = version
async def conn_lost_cb(error):
logger.info('Connection lost:%s', error)
for i in range(0, 100):
try:
logger.info('Reconnecting, attempt=%i...', i)
await self.connect()
except Exception as e: # pylint: disable=broad-except # noqa B902
# catch all errors from client framework
logger.error('Error %s', e.with_traceback(), stack_info=True)
continue
break
self._stan_conn_lost_cb = conn_lost_cb
@property
async def is_healthy(self):
"""Determine if the service is working."""
if self.nc and self.nc.is_connected:
return True
return False
@property
async def is_ready(self):
"""Determine if the service is ready to perform."""
if self.nc and self.nc.is_connected:
return True
return False
@property
def name(self):
"""Return worker name of this service."""
return self._name
@name.setter
def name(self, value):
"""Set worker name of this service."""
self._name = value
@property
def version(self):
"""Return worker version of this service."""
return self._version
@version.setter
def version(self, value):
"""Set worker version of this service."""
self._version = value
async def connect(self):
"""Connect the service worker to th3e NATS/STAN Queue.
Also handles reconnecting when the network has dropped the connection.
Both the NATS and the STAN clients need to be reinstantiated to work correctly.
"""
if not self.config:
logger.error('missing configuration object.')
raise AttributeError('missing configuration object.')
logger.info('Connecting...')
if self.nc:
try:
logger.debug('close old NATS client')
await self.nc.close()
except asyncio.CancelledError as err:
logger.debug('closing stale connection err:%s', err)
finally:
self.nc = None
self.nc = NATS()
self.sc = STAN()
nats_connection_options = {
**self.config.NATS_CONNECTION_OPTIONS,
** {'loop': self._loop,
'error_cb': error_cb},
**self.nats_connection_options
}
stan_connection_options = {
**self.config.STAN_CONNECTION_OPTIONS,
**{'nats': self.nc,
'conn_lost_cb': self._stan_conn_lost_cb,
'loop': self._loop},
**self.stan_connection_options
}
subscription_options = {
**self.config.SUBSCRIPTION_OPTIONS,
**{'cb': self.cb_handler},
**self.subscription_options
}
await self.nc.connect(**nats_connection_options)
await self.sc.connect(**stan_connection_options)
await self.sc.subscribe(**subscription_options)
logger.info('Subscribe the callback: %s to the queue: %s.',
subscription_options.get('cb').__name__ if subscription_options.get('cb') else 'no_call_back',
subscription_options.get('queue'))
async def close(self):
"""Close the stream and nats connections."""
try:
await self.sc.close()
await self.nc.close()
except Exception as err: # pylint: disable=broad-except # noqa B902
# catch all errors to log out when closing the service.
logger.debug('error when closing the streams: %s', err, stack_info=True)
async def publish(self, subject: str, msg: Dict):
"""Publish the msg as a JSON struct to the subject, using the streaming NATS connection."""
await self.sc.publish(subject=subject,
payload=json.dumps(msg).encode('utf-8'))
class QueueServiceManager:
"""Manages the running of the Queue Client and Probes."""
def __init__(self):
"""Initialize the manager and declaring placeholders for the service & probe."""
self.service = None
self.probe = None
async def close(self):
"""Close all of the services as cleanly as possible."""
await self.service.close()
await self.probe.stop()
my_loop = asyncio.get_running_loop()
await asyncio.sleep(0.1, loop=my_loop)
my_loop.stop()
async def run(self, loop, config, callback): # pylint: disable=too-many-locals
"""Run the main application loop for the service.
This runs the main top level service functions for working with the Queue.
"""
self.service = ServiceWorker(loop=loop, cb_handler=callback, config=config)
self.probe = Probes(components=[self.service], loop=loop)
try:
await self.probe.start()
await self.service.connect()
# register the signal handler
for sig in ('SIGINT', 'SIGTERM'):
loop.add_signal_handler(getattr(signal, sig),
functools.partial(signal_handler, sig_loop=loop, task=self.close)
)
except Exception as e: # pylint: disable=broad-except # noqa B902
# TODO tighten this error and decide when to bail on the infinite reconnect
logger.error(e)
if __name__ == '__main__':
try:
event_loop = asyncio.get_event_loop()
event_loop.run_until_complete(run(event_loop))
event_loop.run_forever()
except Exception as err: # pylint: disable=broad-except; Catching all errors from the frameworks
logger.error('problem in running the service: %s', err, stack_info=True, exc_info=True)
finally:
event_loop.close()
|
#!/usr/bin/python
import os
import sys
import getopt
import signal
import logging
from DICOMHandler import DICOMListener
from logsetting import getHandler
# create logger
logger = logging.getLogger('serverWrapper')
logger.setLevel(logging.INFO)
#########################################################
#
"""
Wrapper script called by DICOM server plugin
"""
#
#########################################################
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def killServer(application, storescp_cmd, dcmqrscp_cmd):
"""
Kill DICOM server associated processes
"""
storescp_cmd = storescp_cmd.strip()
dcmqrscp_cmd = dcmqrscp_cmd.strip()
logger.info("Stopping DICOM listener ...")
for line in os.popen("ps ax"):
fields = line.split()
pid = fields[0]
process = fields[4]
shell_called_process = ''
if len(fields) > 6:
shell_called_process = fields[6]
if process.find('grep') == 0:
continue
elif process.find('python') == 0 or process.find(storescp_cmd) == 0 or shell_called_process.find(storescp_cmd) == 0 or process.find(dcmqrscp_cmd) == 0 or shell_called_process.find(dcmqrscp_cmd) == 0:
#Kill the Process. Change signal.SIGHUP to signal.SIGKILL if you like
os.kill(int(pid), signal.SIGTERM)
logger.info("killed this process - %s" % line)
logger.info("DICOM listener stopped")
def main():
"""
Wrapper function to start and stop dicom server
"""
try:
opts, args = getopt.getopt(sys.argv[1:], "hros:p:t:k:c:i:u:e:a:d:q:f:", ["help", "start", "stop", \
"storescp=", "port=", "timeout=", "scriptpath=", "dcm2xml=", "incoming=", \
"url=", "email=", "apikey=", "dest=", "dcmqrscp=", "qrscpcfg="])
except getopt.error, msg:
raise Usage(msg)
start = False
url = ''
user_email = ''
apikey = ''
dest_folder = ''
incoming_dir = ''
storescp_cmd = ''
script_path = ''
dcm2xml_cmd = ''
storescp_port = ''
storescp_timeout = ''
dcmqrscp_cmd = ''
dcmqrscp_cfg = ''
for opt, arg in opts:
if opt in ('-h', "--help"):
sample = 'server.py --start -s <storescp_cmd> ' \
'-p <storescp_port> -t <storescp_studay_timeout> ' \
'-k <script_path> -c <dcm2xml_cmd> -i <incoming_dir> ' \
'-u <midas_url> -e <midas_user_email> ' \
'-a <midas_api_key> -d <midas_destination_folder>' \
'-q <dcmqrscp_cmd> -f <dcmqrscp_cfg_file>'
print sample
sys.exit()
elif opt in ("-r", "--start"):
start = True
elif opt in ("-o", "--stop"):
start = False
elif opt in ("-s", "--storescp"):
storescp_cmd = arg
elif opt in ("-p", "--port"):
storescp_port = arg
elif opt in ("-t", "--timeout"):
storescp_timeout = arg
elif opt in ("-k", "--scriptpath"):
script_path = arg
elif opt in ("-c", "--dcm2xml"):
dcm2xml_cmd = arg
elif opt in ("-i", "--incoming"):
incoming_dir = arg
elif opt in ("-u", "--url"):
url = arg
elif opt in ("-e", "--email"):
user_email = arg
elif opt in ("-a", "--apikey"):
apikey = arg
elif opt in ("-d", "--dest"):
dest_folder = arg
elif opt in ("-q", "--dcmqrscp"):
dcmqrscp_cmd = arg
elif opt in ("-f", "--qrscpcfg"):
dcmqrscp_cfg = arg
# set up logger
logger.addHandler(getHandler(incoming_dir.strip()))
# start/stop dicom server
myListener = DICOMListener()
if start:
# callback command used by storescp '--eostudy-timeout' option
callback_cmd = "'python %s -c %s -i %s -u %s -e %s -a %s -d %s'" % ( \
script_path, dcm2xml_cmd, incoming_dir, url, user_email, apikey, dest_folder)
logger.info("Starting DICOM listener ...")
retcode = myListener.start(incoming_dir, callback_cmd, \
storescp_cmd, storescp_port, storescp_timeout, \
dcmqrscp_cmd, dcmqrscp_cfg)
return retcode
else:
if not storescp_cmd:
storescp_cmd = 'storescp'
if not dcmqrscp_cmd:
dcmqrscp_cmd = 'dcmqrscp'
app_name = 'serverWapper'
retcode = killServer(app_name, storescp_cmd, dcmqrscp_cmd)
return retcode
if __name__ == "__main__":
sys.exit(main())
|
import time
import logging
from collections import OrderedDict
from pyinstrument import Profiler
from django.http import HttpResponse, HttpRequest
LOG = logging.getLogger(__name__)
_PROFILER_RECORDS = OrderedDict()
class RssantProfilerMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request: HttpRequest):
response = self._render_profiler_record(request)
if response:
return response
profiler = None
try:
profiler = Profiler()
profiler.start()
response = self.get_response(request)
finally:
if profiler is not None:
profiler.stop()
print(profiler.output_text(unicode=True, color=True))
link = self._output_html(request, profiler)
print(f'* Profiler HTML: {link}\n')
return response
def _output_html(self, request: HttpRequest, profiler: Profiler):
html = profiler.output_html()
t = int(time.time() * 1000)
key = '{}-{}-{}'.format(t, request.method, request.path)
_PROFILER_RECORDS[key] = html
while len(_PROFILER_RECORDS) > 20:
_PROFILER_RECORDS.popitem(False)
port = request.META['SERVER_PORT']
link = f'http://localhost:{port}/__profiler__/{key}'
return link
def _render_profiler_record(self, request: HttpRequest):
prefix = '/__profiler__/'
if not request.path.startswith(prefix):
return None
key = request.path[len(prefix):]
html = _PROFILER_RECORDS.get(key)
if not html:
return None
content = html.encode('utf-8')
return HttpResponse(content, content_type='text/html', charset='utf-8')
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import signal
from sppm import settings
from sppm.cmd_action import action_shutdown
from sppm.process_status_lock import ProcessStatusLock
from sppm.settings import hlog, SPPM_CONFIG
# noinspection PyUnusedLocal
def sigint_handler(sig, frame):
hlog.debug('收到 Ctrl+C 信号,退出......')
child_pid = ProcessStatusLock.get_pid_from_file(SPPM_CONFIG.child_pid_file)
action_shutdown(child_pid)
exit(0)
# noinspection PyUnusedLocal
def sigterm_handler(sig, frame):
settings.signals[signal.SIGTERM] = True
hlog.debug('收到 SIGTERM 信号,准备退出......')
|
from .import_vtf import load_texture, texture_from_data
from ...content_providers.content_manager import ContentManager
from ...utilities.thirdparty.equilib.cube2equi_numpy import run as convert_to_eq
from ..vmt.valve_material import VMT
import numpy as np
def pad_to(im: np.ndarray, s_size: int):
new = np.zeros((s_size, s_size, 4), np.uint8)
new[:, :, 3] = 255
new[s_size - im.shape[0]:, s_size - im.shape[1]:, :] = im
return new
class SkyboxException(Exception):
pass
def load_skybox_texture(skyname, width=1024):
content_manager = ContentManager()
sides_names = {'F': 'ft', 'R': 'rt', 'B': 'bk', 'L': 'lf', 'U': 'dn', 'D': 'up'}
sides = {}
max_s = 0
use_hdr = False
for k, n in sides_names.items():
file_path = content_manager.find_material(f'skybox/{skyname}{n}')
material = VMT(file_path)
use_hdr |= bool(material.get_param('$hdrbasetexture', material.get_param('$hdrcompressedtexture', False)))
texture_path = material.get_param('$basetexture', None)
if texture_path is None:
raise SkyboxException('Missing $basetexture in skybox material')
texture_file = content_manager.find_texture(texture_path)
if texture_file is None:
raise SkyboxException(f'Failed to find skybox texture {texture_path}')
side, h, w = load_texture(texture_file)
side = side.reshape((w, h, 4))
max_s = max(max(side.shape), max_s)
if side.shape[0] < max_s or side.shape[1] < max_s:
side = pad_to(side, max_s)
side = np.rot90(side, 1)
if k == 'D':
side = np.rot90(side, 1)
if k == 'U':
side = np.flipud(side)
sides[k] = side.T
eq_map = convert_to_eq(sides, 'dict', width, width // 2, 'default', 'bilinear').T
rgba_data = np.rot90(eq_map)
rgba_data = np.flipud(rgba_data)
main_texture = texture_from_data(skyname, rgba_data, width, width // 2, False)
del rgba_data
hdr_main_texture = None
hdr_alpha_texture = None
if use_hdr:
for k, n in sides_names.items():
file_path = content_manager.find_material(f'skybox/{skyname}_hdr{n}')
if file_path is None:
file_path = content_manager.find_material(f'skybox/{skyname}{n}')
material = VMT(file_path)
texture_path = material.get_param('$hdrbasetexture', material.get_param('$hdrcompressedTexture',
material.get_param('$basetexture',
None)))
if texture_path is None:
raise Exception('Missing $basetexture in skybox material')
texture_file = content_manager.find_texture(texture_path)
if texture_file is None:
raise Exception(f'Failed to find skybox texture {texture_path}')
side, h, w = load_texture(texture_file)
side = side.reshape((w, h, 4))
max_s = max(max(side.shape), max_s)
if side.shape[0] < max_s or side.shape[1] < max_s:
side = pad_to(side, max_s)
side = np.rot90(side, 1)
if k == 'D':
side = np.rot90(side, 1)
if k == 'U':
side = np.flipud(side)
sides[k] = side.T
eq_map = convert_to_eq(sides, 'dict', width // 2, width // 4, 'default', 'bilinear').T
rgba_data = np.rot90(eq_map)
rgba_data = np.flipud(rgba_data)
a_data = rgba_data[:, :, 3]
a_data = np.dstack([a_data, a_data, a_data, np.full_like(a_data, 255)])
hdr_alpha_texture = texture_from_data(skyname + '_HDR_A', a_data, width // 2, width // 4, False)
hdr_main_texture = texture_from_data(skyname + '_HDR', rgba_data, width // 2, width // 4, False)
del rgba_data
return main_texture, hdr_main_texture, hdr_alpha_texture
|
from common import BaseTestCase
from struct import pack
from binascii import unhexlify
from mtpdevice.mtp_proto import ContainerTypes, ResponseCodes
from mtpdevice.mtp_exception import MtpProtocolException
from mtpdevice.mtp_msg import msg_from_buff
class CommandMessageTest(BaseTestCase):
def buildVanillabuffer(self, code, tid, params):
length = len(params) * 4 + 0xc
ctype = ContainerTypes.Command
buff = pack('<IHHI', length, ctype, code, tid)
for param in params:
buff += pack('<I', param)
return buff
def vanillaTest(self, code, tid, params):
buff = self.buildVanillabuffer(code, tid, params)
command_message = msg_from_buff(buff)
self.assertEqual(command_message.code, code)
self.assertEqual(command_message.tid, tid)
self.assertEqual(command_message.params, params)
self.assertEqual(command_message.num_params(), len(params))
for i in range(len(params)):
self.assertEqual(command_message.get_param(i), params[i])
for i in range(len(params), len(params) + 5):
self.assertIsNone(command_message.get_param(i))
def testCorrectValuesFromBufferNoParams(self):
self.vanillaTest(1, 2, [])
def testCorrectValuesFromBufferSingleParam(self):
self.vanillaTest(1, 2, [0x12345678])
def testCorrectValuesFromBufferMultipleParams(self):
self.vanillaTest(1, 2, [0x01020304, 0x11121314, 0x21222324, 0x31323334, 0x41424344])
def invalidBufferTest(self, buff, expected_response=ResponseCodes.INVALID_CODE_FORMAT):
with self.assertRaises(MtpProtocolException) as cm:
msg_from_buff(buff)
if expected_response is not None:
self.assertEqual(cm.exception.response, expected_response)
def testInvalidLengthTooLongNoParams(self):
buff = unhexlify('100000000100010002000000')
self.invalidBufferTest(buff)
def testInvalidLengthTooShortNoParams(self):
buff = unhexlify('080000000100010002000000')
self.invalidBufferTest(buff)
def testInvalidLengthTooLongWithParams(self):
buff = unhexlify('14000000010001000200000011111111')
self.invalidBufferTest(buff)
def testInvalidLengthTooShortWithParams(self):
buff = unhexlify('0c000000010001000200000011111111')
self.invalidBufferTest(buff)
def testInvalidLengthNotMultipleOfFour(self):
buff = unhexlify('0d0000000100010002000000ff')
self.invalidBufferTest(buff)
def testInvalidRequestTooshort(self):
buff = unhexlify('0800000001000100')
self.invalidBufferTest(buff)
|
class Come_on_over:
def __init__(self, come, on, over):
print come, on, over
come = 'come'
on = 'on'
over = 'over'
a = Come_on_over(come, on, over)
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Verify that starting mudracoin with -h works as expected."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class HelpTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self):
self.add_nodes(self.num_nodes)
# Don't start the node
def get_node_output(self, *, ret_code_expected):
ret_code = self.nodes[0].process.wait(timeout=5)
assert_equal(ret_code, ret_code_expected)
self.nodes[0].stdout.seek(0)
self.nodes[0].stderr.seek(0)
out = self.nodes[0].stdout.read()
err = self.nodes[0].stderr.read()
self.nodes[0].stdout.close()
self.nodes[0].stderr.close()
# Clean up TestNode state
self.nodes[0].running = False
self.nodes[0].process = None
self.nodes[0].rpc_connected = False
self.nodes[0].rpc = None
return out, err
def run_test(self):
self.log.info("Start mudracoin with -h for help text")
self.nodes[0].start(extra_args=['-h'])
# Node should exit immediately and output help to stdout.
output, _ = self.get_node_output(ret_code_expected=0)
assert b'Options' in output
self.log.info("Help text received: {} (...)".format(output[0:60]))
self.log.info("Start mudracoin with -version for version information")
self.nodes[0].start(extra_args=['-version'])
# Node should exit immediately and output version to stdout.
output, _ = self.get_node_output(ret_code_expected=0)
assert b'version' in output
self.log.info("Version text received: {} (...)".format(output[0:60]))
# Test that arguments not in the help results in an error
self.log.info("Start mudracoind with -fakearg to make sure it does not start")
self.nodes[0].start(extra_args=['-fakearg'])
# Node should exit immediately and output an error to stderr
_, output = self.get_node_output(ret_code_expected=1)
assert b'Error parsing command line arguments' in output
self.log.info("Error message received: {} (...)".format(output[0:60]))
if __name__ == '__main__':
HelpTest().main()
|
# -*- coding: utf-8 -*-
"""
This module contains form fields to work with.
"""
from django import forms
from django.core.exceptions import ValidationError, FieldError
from django.utils.translation import gettext_lazy as _
# Feel free to extend this, see
# http://www.iana.org/assignments/media-types/media-types.xhtml
MEDIA_TYPES = ['image', 'audio', 'video']
class MultiUploadMetaInput(forms.ClearableFileInput):
""" HTML5 <input> representation. """
def __init__(self, *args, **kwargs):
self.multiple = kwargs.pop('multiple', True)
super().__init__(*args, **kwargs)
def render(self, name, value, attrs=None, renderer=None):
if self.multiple:
if not attrs:
attrs = {}
attrs['multiple'] = 'multiple'
return super().render(name, value, attrs, renderer)
def value_from_datadict(self, data, files, name):
if hasattr(files, 'getlist'):
return files.getlist(name)
value = files.get(name)
if value is None or isinstance(value, list):
return value
return [value]
class MultiUploadMetaField(forms.FileField):
""" Base class for the all media types classes. """
default_error_messages = {
'min_num': _(
u'Ensure at least %(min_num)s files are '
u'uploaded (received %(num_files)s).'),
'max_num': _(
u'Ensure at most %(max_num)s files '
u'are uploaded (received %(num_files)s).'),
'file_size': _(
u'File %(uploaded_file_name)s '
u'exceeded maximum upload size.'),
}
def __init__(self, *args, **kwargs):
self.min_num = kwargs.pop('min_num', 0)
self.max_num = kwargs.pop('max_num', None)
self.maximum_file_size = kwargs.pop('max_file_size', None)
self.widget = MultiUploadMetaInput(
attrs=kwargs.pop('attrs', {}),
multiple=(self.max_num is None or self.max_num > 1),
)
if self.min_num == 0:
kwargs['required'] = False
super().__init__(*args, **kwargs)
def to_python(self, data):
ret = []
data = data or []
for item in data:
i = super().to_python(item)
if i:
ret.append(i)
return ret
def validate(self, value):
super().validate(value)
num_files = len(value)
if num_files and not value[0]:
num_files = 0
if not self.required and num_files == 0:
return
if num_files < self.min_num:
raise ValidationError(
self.error_messages['min_num'] % {
'min_num': self.min_num,
'num_files': num_files,
}
)
if self.max_num and num_files > self.max_num:
raise ValidationError(
self.error_messages['max_num'] % {
'max_num': self.max_num,
'num_files': num_files,
}
)
for uploaded_file in value:
if (self.maximum_file_size and
uploaded_file.size > self.maximum_file_size):
raise ValidationError(
self.error_messages['file_size'] % {
'uploaded_file_name': uploaded_file.name,
}
)
class MultiFileField(MultiUploadMetaField):
""" Handles plain files. """
class MultiMediaField(MultiUploadMetaField):
""" Handles multimedia files."""
error_messages = {
'wrong_type': _(
u'Invalid media_type. Valid types are: %(valid_types)s')
}
def __init__(self, *args, **kwargs):
self.media_type = kwargs.pop('media_type', 'image')
if self.media_type not in MEDIA_TYPES:
raise FieldError(
self.error_messages['wrong_type'] % {
'valid_types': ', '.join(MEDIA_TYPES),
}
)
kwargs.update({
'attrs': {
'accept': '{0}/*'.format(self.media_type),
}
})
super().__init__(*args, **kwargs)
class MultiImageField(MultiMediaField, forms.ImageField):
""" Handles multiple image uploads, requires Pillow to be installed. """
def __init__(self, *args, **kwargs):
kwargs.update({'media_type': 'image'})
super().__init__(*args, **kwargs)
def to_python(self, data):
ret = []
for item in data:
i = forms.ImageField.to_python(self, item)
if i:
ret.append(i)
return ret
|
# -*- coding: utf-8 -*-
import sys
import numpy as np
from numpy import exp
from scipy.special import erf
from math import factorial as fact
from math import pi, sqrt
def Hermite(m,x):
if m < 0:
value = 0
elif m == 0:
value = 1
elif m == 1:
value = 2*x
else:
i = 1
P1 = 2*x
P2 = 1
while True:
i += 1
value = 2*x*P1-2*(i-1)*P2
P2 = P1
P1 = value
if i == m:
break
return value
def IndefInt(m,n,x):
x_max = np.sqrt(np.log(sys.float_info[0]))
value = 0
if x > x_max:
if m == n:
value += pow(2.,n)*fact(n)*sqrt(pi)/2
elif x < -x_max:
if m == n:
value -= pow(2.,n)*fact(n)*sqrt(pi)/2
else:
if m != n:
value = exp(-pow(x,2))*(n*Hermite(m,x)*Hermite(n-1,x)-m*Hermite(n,x)*Hermite(m-1,x))/(m-n)
else:
value += pow(2.,n)*sqrt(pi)/2*erf(x)
for i in range(n):
value -= exp(-pow(x,2))*Hermite(i+1,x)*Hermite(i,x)*pow(2.,n-1-i)/fact(i+1)
value *= fact(n)
return value
def IntHmHnexp(m,n,x1,x2):
return (IndefInt(m,n,x2)- IndefInt(m,n,x1))
def IntXHmHnexp(m,n,x1,x2,a):
y=0
if (a[0]!=0): y += a[0]*IntHmHnexp(m,n,x1,x2)
if (a[1]!=0): y += a[1]*(1/2.0*IntHmHnexp(m,n+1,x1,x2)+n*IntHmHnexp(m,n-1,x1,x2))
if (a[2]!=0): y += a[2]*(1/4.0*IntHmHnexp(m,n+2,x1,x2)+(n+1/2.0)*IntHmHnexp(m,n,x1,x2)\
+n*(n-1)*IntHmHnexp(m,n-2,x1,x2))
if (a[3]!=0): y += a[3]*(1/8.0*IntHmHnexp(m,n+3,x1,x2)+3/4.0*(n+1)*IntHmHnexp(m,n+1,x1,x2)\
+3/2.0*n*n*IntHmHnexp(m,n-1,x1,x2) + n*(n-1)*(n-2)*IntHmHnexp(m,n-3,x1,x2))
return y
|
'''
download_data.py
Adapted from https://github.com/paarthneekhara/text-to-image
'''
import os
import sys
import errno
import tarfile
if sys.version_info >= (3,):
from urllib.request import urlretrieve
else:
from urllib import urlretrieve
DATA_DIR = 'Data'
# http://stackoverflow.com/questions/273192/how-to-check-if-a-directory-exists-and-create-it-if-necessary
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def create_data_paths():
if not os.path.isdir(DATA_DIR):
raise EnvironmentError('Needs to be run from project directory containing ' + DATA_DIR)
needed_paths = [
os.path.join(DATA_DIR, 'samples'),
os.path.join(DATA_DIR, 'val_samples'),
os.path.join(DATA_DIR, 'Models'),
]
for p in needed_paths:
make_sure_path_exists(p)
# adapted from http://stackoverflow.com/questions/51212/how-to-write-a-download-progress-indicator-in-python
def dl_progress_hook(count, blockSize, totalSize):
percent = int(count * blockSize * 100 / totalSize)
sys.stdout.write("\r" + "...%d%%" % percent)
sys.stdout.flush()
def download_dataset(data_name):
if data_name == 'flowers':
print('== Flowers dataset ==')
flowers_dir = os.path.join(DATA_DIR, 'flowers')
flowers_jpg_tgz = os.path.join(flowers_dir, '102flowers.tgz')
make_sure_path_exists(flowers_dir)
# the original google drive link at https://drive.google.com/file/d/0B0ywwgffWnLLcms2WWJQRFNSWXM/view
# from https://github.com/reedscot/icml2016 is problematic to download automatically, so included
# the text_c10 directory from that archive as a bzipped file in the repo
captions_tbz = os.path.join(flowers_dir, 'flowers_text_c10.tar.bz2')
print('Extracting ' + captions_tbz)
captions_tar = tarfile.open(captions_tbz, 'r:bz2')
captions_tar.extractall(flowers_dir)
flowers_url = 'http://www.robots.ox.ac.uk/~vgg/data/flowers/102/102flowers.tgz'
print('Downloading ' + flowers_jpg_tgz + ' from ' + flowers_url)
urlretrieve(flowers_url, flowers_jpg_tgz,
reporthook=dl_progress_hook)
print('Extracting ' + flowers_jpg_tgz)
flowers_jpg_tar = tarfile.open(flowers_jpg_tgz, 'r:gz')
flowers_jpg_tar.extractall(flowers_dir) # archive contains jpg/ folder
elif data_name == 'skipthoughts':
print('== Skipthoughts models ==')
SKIPTHOUGHTS_DIR = os.path.join(DATA_DIR, 'skipthoughts')
SKIPTHOUGHTS_BASE_URL = 'http://www.cs.toronto.edu/~rkiros/models/'
make_sure_path_exists(SKIPTHOUGHTS_DIR)
# following https://github.com/ryankiros/skip-thoughts#getting-started
skipthoughts_files = [
'dictionary.txt', 'utable.npy', 'btable.npy', 'uni_skip.npz', 'uni_skip.npz.pkl', 'bi_skip.npz',
'bi_skip.npz.pkl',
]
for filename in skipthoughts_files:
src_url = SKIPTHOUGHTS_BASE_URL + filename
print('Downloading ' + src_url)
urlretrieve(src_url, os.path.join(SKIPTHOUGHTS_DIR, filename),
reporthook=dl_progress_hook)
elif data_name == 'nltk_punkt':
import nltk
print('== NLTK pre-trained Punkt tokenizer for English ==')
nltk.download('punkt')
elif data_name == 'pretrained_model':
print('== Pretrained model ==')
MODEL_DIR = os.path.join(DATA_DIR, 'Models')
pretrained_model_filename = 'latest_model_flowers_temp.ckpt'
src_url = 'https://bitbucket.org/paarth_neekhara/texttomimagemodel/raw/74a4bbaeee26fe31e148a54c4f495694680e2c31/' + pretrained_model_filename
print('Downloading ' + src_url)
urlretrieve(
src_url,
os.path.join(MODEL_DIR, pretrained_model_filename),
reporthook=dl_progress_hook,
)
else:
raise ValueError('Unknown dataset name: ' + data_name)
def main():
create_data_paths()
# download_dataset('flowers')
download_dataset('skipthoughts')
# download_dataset('nltk_punkt')
# download_dataset('pretrained_model')
print('Done')
if __name__ == '__main__':
main()
|
from fastapi import HTTPException, status
from fastapi.security import HTTPBearer
from jose import jwt
from jose.exceptions import ExpiredSignatureError, JWTError
from settings import Settings
settings = Settings()
class CredentialException(HTTPException):
def __init__(
self,
status_code: int = status.HTTP_401_UNAUTHORIZED,
detail: str = "Could not validate credentials",
) -> None:
self.headers = {"WWW-Authenticate": "Bearer"}
super().__init__(status_code, detail=detail, headers=self.headers)
class Auth:
security = HTTPBearer()
@staticmethod
def decode_token(token: str) -> str:
"""Retrieve an decoded JWT Token."""
try:
payload = jwt.decode(
token, settings.secret_key, algorithms=[settings.algorithm]
)
if payload["scope"] != "access_token":
raise CredentialException(detail="Invalid token scope.")
return payload["sub"]
except ExpiredSignatureError:
raise CredentialException(detail="Signature has expired.")
except JWTError as e:
raise CredentialException(detail=str(e))
|
import logging
from pyzabbix import ZabbixAPI
from monitor_provider.credentials.zabbix import (
CredentialZabbix, CredentialAddZabbix
)
from monitor_provider.providers.base import ProviderBase
from monitor_provider.settings import LOGGING_LEVEL
logging.basicConfig(
level=LOGGING_LEVEL,
format='%(asctime)s %(filename)s(%(lineno)d) %(levelname)s: %(message)s')
class ProviderZabbix(ProviderBase):
def __init__(self, environment):
self._zapi = None
super(ProviderZabbix, self).__init__(environment)
@classmethod
def get_provider(cls):
return 'zabbix'
def build_credential(self):
return CredentialZabbix(self.provider, self.environment)
def get_credential_add(self):
return CredentialAddZabbix
@property
def zapi(self):
if not self._zapi:
self._zapi = ZabbixAPI(self.credential.endpoint)
self._zapi.login(self.credential.user, self.credential.password)
return self._zapi
def _create_host_monitor(self, host, **kwargs):
host.identifier = host.host_name
host.environment = self.credential.default_environment
host.locality = self.credential.default_locality
host.hostgroups = self.credential.default_hostgroups
host.alarm = self.credential.alarm
data = {
'host': host.host_name,
'ip': host.ip,
'environment': host.environment,
'locality': host.locality,
'hostgroups': host.hostgroups,
'alarm': host.alarm
}
self.zapi.globo.createLinuxMonitors(**data)
def _delete_host_monitor(self, host):
data = {'host': host.identifier}
self.zapi.globo.deleteMonitors(**data)
def _create_web_monitor(self, web, **kwargs):
mandatory_fields = ['host_name', 'url', 'required_string']
self.check_mandatory_fields(mandatory_fields, **kwargs)
web.environment = self.credential.default_environment
web.locality = self.credential.default_locality
web.hostgroups = self.credential.default_hostgroups
web.alarm = self.credential.alarm
web.url = kwargs.get("url", None)
web.required_string = kwargs.get("required_string", None)
web.host = web.url.replace('http://', 'web_')
web.identifier = web.host.replace(':', '_').replace('/', '_')
data = {
'environment': web.environment,
'locality': web.locality,
'hostgroups': web.hostgroups,
'alarm': web.alarm,
'url': web.url,
'required_string': web.required_string
}
self.zapi.globo.createWebMonitors(**data)
def _delete_web_monitor(self, web):
data = {'host': web.host}
self.zapi.globo.deleteMonitors(**data)
def _delete_tcp_monitor(self, tcp):
data = {'host': tcp.identifier}
self.zapi.globo.deleteMonitors(**data)
def _create_tcp_monitor(self, tcp, **kwargs):
if not tcp.environment:
tcp.environment = self.credential.default_environment
if not tcp.locality:
tcp.locality = self.credential.default_locality
if not tcp.hostgroups:
tcp.hostgroups = self.credential.default_hostgroups
if not tcp.alarm:
tcp.alarm = self.credential.alarm
tcp.identifier = "tcp_{}-{}".format(tcp.host, tcp.port)
data = {
'environment': tcp.environment,
'locality': tcp.locality,
'hostgroups': tcp.hostgroups,
'host': tcp.host,
'port': tcp.port,
'alarm': tcp.alarm
}
opt = ('doc', 'notes', 'notification_email', 'notification_slack', 'zbx_proxy')
for option in opt:
if kwargs.get(option, None) is None:
continue
data[option] = kwargs.get(option)
self.zapi.globo.createTCPMonitors(**data)
def _delete_mysql_monitor(self, db):
data = {'host': db.identifier}
self.zapi.globo.deleteMonitors(**data)
def _create_mysql_monitor(self, db, **kwargs):
db.identifier = db.host
if not db.environment:
db.environment = self.credential.default_db_environment
if not db.locality:
db.locality = self.credential.default_locality
if not db.hostgroups:
db.hostgroups = self.credential.default_hostgroups
if not db.alarm:
db.alarm = self.credential.alarm
if not db.user:
db.user = self.credential.user
password = kwargs.get("password")
if password is None:
password = self.credential.password
data = {
'environment': db.environment,
'locality': db.locality,
'hostgroups': db.hostgroups,
'alarm': db.alarm,
'host': db.host,
'port': db.port,
'user': db.user,
'version': db.version,
'password': password,
}
if kwargs.get('healthcheck'):
data['healthcheck'] = {
'host': db.host,
'port': 80,
'string': 'WORKING',
'uri': 'health-check/'
}
data['healthcheck_monitor'] = {
'host': db.host,
'port': 80,
'string': 'WORKING',
'uri': 'health-check/monitor/'
}
opt = (
'clone', 'ssl_expire', 'notification_email', 'notification_slack',
'slave_running', 'notification_telegram', 'seconds_behind_master',
'ssl_support'
)
for option in opt:
if kwargs.get(option, None) is None:
continue
data[option] = kwargs.get(option)
self.zapi.globo.createMySQLMonitors(**data)
|
"""
type-requirement:
typename nested-name-specifier? type-name ;
"""
import glrp
from .....parser import cxx20
from motor_typing import TYPE_CHECKING
@glrp.rule('type-requirement : type-name ";"')
# TODO: template not allowed
@glrp.rule('type-requirement : nested-name-specifier template? type-name ";"')
@cxx20
def type_requirement_cxx20(self, p):
# type: (CxxParser, glrp.Production) -> None
pass
if TYPE_CHECKING:
from .....parser import CxxParser
|
# Generated by Django 2.0.10 on 2019-01-25 21:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('classy', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='classification',
name='notes',
field=models.CharField(max_length=400, null=True),
),
migrations.AlterField(
model_name='classification',
name='masking',
field=models.CharField(max_length=200, null=True),
),
]
|
# Copyright (c) 2020 NVIDIA CORPORATION.
# Copyright (c) 2018-2020 Chris Choy (chrischoy@ai.stanford.edu).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
__version__ = "0.5.0"
import os
import sys
import warnings
file_dir = os.path.dirname(__file__)
sys.path.append(file_dir)
# Force OMP_NUM_THREADS setup
if os.cpu_count() > 16 and "OMP_NUM_THREADS" not in os.environ:
warnings.warn(
" ".join(
[
"The environment variable `OMP_NUM_THREADS` not set. MinkowskiEngine will automatically set `OMP_NUM_THREADS=16`.",
"If you want to set `OMP_NUM_THREADS` manually, please export it on the command line before running a python script.",
"e.g. `export OMP_NUM_THREADS=12; python your_program.py`.",
"It is recommended to set it below 24.",
]
)
)
os.environ["OMP_NUM_THREADS"] = str(16)
# Must be imported first to load all required shared libs
import torch
from diagnostics import print_diagnostics
from MinkowskiEngineBackend._C import (
MinkowskiAlgorithm,
CoordinateMapKey,
GPUMemoryAllocatorType,
CoordinateMapType,
RegionType,
PoolingMode,
BroadcastMode,
is_cuda_available,
cuda_version,
get_gpu_memory_info,
)
from MinkowskiKernelGenerator import (
KernelRegion,
KernelGenerator,
convert_region_type,
get_kernel_volume,
)
from MinkowskiTensor import (
SparseTensorOperationMode,
SparseTensorQuantizationMode,
set_sparse_tensor_operation_mode,
sparse_tensor_operation_mode,
global_coordinate_manager,
set_global_coordinate_manager,
clear_global_coordinate_manager,
)
from MinkowskiSparseTensor import SparseTensor
from MinkowskiTensorField import TensorField
from MinkowskiCommon import (
convert_to_int_tensor,
MinkowskiModuleBase,
)
from MinkowskiCoordinateManager import (
set_memory_manager_backend,
set_gpu_allocator,
CoordsManager,
CoordinateManager,
)
from MinkowskiConvolution import (
MinkowskiConvolutionFunction,
MinkowskiConvolution,
MinkowskiConvolutionTransposeFunction,
MinkowskiConvolutionTranspose,
MinkowskiGenerativeConvolutionTranspose,
)
from MinkowskiChannelwiseConvolution import MinkowskiChannelwiseConvolution
from MinkowskiPooling import (
MinkowskiLocalPoolingFunction,
MinkowskiSumPooling,
MinkowskiAvgPooling,
MinkowskiMaxPooling,
MinkowskiLocalPoolingTransposeFunction,
MinkowskiPoolingTranspose,
MinkowskiGlobalPoolingFunction,
MinkowskiGlobalPooling,
MinkowskiGlobalSumPooling,
MinkowskiGlobalAvgPooling,
MinkowskiGlobalMaxPooling,
)
from MinkowskiBroadcast import (
MinkowskiBroadcastFunction,
MinkowskiBroadcastAddition,
MinkowskiBroadcastMultiplication,
MinkowskiBroadcast,
MinkowskiBroadcastConcatenation,
)
from MinkowskiNonlinearity import (
MinkowskiELU,
MinkowskiHardshrink,
MinkowskiHardsigmoid,
MinkowskiHardtanh,
MinkowskiHardswish,
MinkowskiLeakyReLU,
MinkowskiLogSigmoid,
MinkowskiPReLU,
MinkowskiReLU,
MinkowskiReLU6,
MinkowskiRReLU,
MinkowskiSELU,
MinkowskiCELU,
MinkowskiGELU,
MinkowskiSigmoid,
MinkowskiSiLU,
MinkowskiSoftplus,
MinkowskiSoftshrink,
MinkowskiSoftsign,
MinkowskiTanh,
MinkowskiTanhshrink,
MinkowskiThreshold,
MinkowskiSoftmin,
MinkowskiSoftmax,
MinkowskiLogSoftmax,
MinkowskiAdaptiveLogSoftmaxWithLoss,
MinkowskiDropout,
MinkowskiAlphaDropout,
MinkowskiSinusoidal,
)
from MinkowskiNormalization import (
MinkowskiBatchNorm,
MinkowskiSyncBatchNorm,
MinkowskiInstanceNorm,
MinkowskiInstanceNormFunction,
MinkowskiStableInstanceNorm,
)
from MinkowskiPruning import MinkowskiPruning, MinkowskiPruningFunction
from MinkowskiUnion import MinkowskiUnion, MinkowskiUnionFunction
from MinkowskiInterpolation import (
MinkowskiInterpolation,
MinkowskiInterpolationFunction,
)
from MinkowskiNetwork import MinkowskiNetwork
import MinkowskiOps
from MinkowskiOps import (
MinkowskiLinear,
MinkowskiToSparseTensor,
MinkowskiToDenseTensor,
cat,
to_sparse,
dense_coordinates,
)
import MinkowskiFunctional
import MinkowskiEngine.utils as utils
import MinkowskiEngine.modules as modules
from sparse_matrix_functions import spmm, MinkowskiSPMMFunction
if not is_cuda_available():
warnings.warn(
" ".join(
[
"The MinkowskiEngine was compiled with CPU_ONLY flag.",
"If you want to compile with CUDA support, make sure `torch.cuda.is_available()` is True when you install MinkowskiEngine.",
]
)
)
|
from typing import Dict, Iterable, Sequence, Tuple, NamedTuple, Union, Any
class Stat(NamedTuple):
st_dev: int = None
st_ino: int = None
st_nlink: int = None
st_mode: int = None
st_uid: int = None
st_gid: int = None
st_rdev: int = None
st_size: int = None
st_blksize: int = None
st_blocks: int = None
st_flags: int = None
st_gen: int = None
st_lspare: int = None
st_qspare: int = None
st_atime: float = None
st_mtime: float = None
st_ctime: float = None
st_birthtime: float = None
def with_values(self, **kwargs):
values = self.as_dict()
values.update(kwargs)
return Stat(**values)
def as_dict(self) -> dict:
return {
k: v
for (k, v) in self._asdict().items()
if v is not None
}
class StatVFS(NamedTuple):
f_bsize: int = None
f_frsize: int = None
f_blocks: int = None
f_bfree: int = None
f_bavail: int = None
f_files: int = None
f_ffree: int = None
f_favail: int = None
f_fsid: int = None
f_flag: int = None
f_namemax: int = None
def as_dict(self) -> dict:
return {
k: v
for (k, v) in self._asdict().items()
if v is not None
}
class Path:
def __init__(self, elements: Sequence[Tuple[str, 'core.Node']]) -> None:
self.elements = elements
@property
def target_node(self):
return self.elements[-1][1]
@property
def parent_path(self):
return Path(self.elements[:-1])
@property
def parent_node(self):
return self.elements[:-2][1]
def __str__(self):
return '/'.join([name for name, node in self.elements])
def __repr__(self):
return 'Path([' + repr
Bytes_Like = Union[bytes, str]
Stat_Like = Union[Stat, dict, int]
StatVFS_Like = Union[StatVFS, dict]
Node_Like = Union['core.Node', Bytes_Like, Dict[str, Any]]
FileHandle_Like = Union['core.FileHandle', Bytes_Like, Iterable[Union[str, bytes]]]
DirEntry = Union[str, Tuple[str, Node_Like]]
DirHandle_Like = Union['core.DirHandle', Iterable[str], Iterable[Tuple[str, int]]]
from . import core
|
import altair as alt
import pandas as pd
tuples = []
with open(snakemake.input[0], "r") as infile:
sample = "???"
for l in infile.read().splitlines():
split = l.split("\t")
if len(split) > 1:
if len(split) == 7:
tuples.append(
(
sample,
int(split[0]),
split[1],
split[2],
split[3],
split[4],
split[5],
split[6],
)
)
elif len(split) == 6:
tuples.append(
(
sample,
int(split[0]),
split[1],
split[2],
split[3],
"",
split[4],
split[5],
)
)
else:
sid = l.split("/")[-1].split("_")
if len(sid) == 3:
# print(sid)
sample = sid[0] + "/" + sid[1]
elif len(sid) == 4:
sample = sid[0] + "_" + sid[1] + "/" + sid[2]
df = pd.DataFrame(
tuples,
columns=[
"sample",
"pos",
"ref",
"alt",
"rejected",
"comment",
"pileupillu",
"pileupnano",
],
)
make = pd.DataFrame({"sample": list(df["sample"].unique())})
selection = alt.selection_multi(fields=["sample"])
color = alt.condition(selection, alt.value("green"), alt.value("lightgray"))
make_selector = (
alt.Chart(make).mark_rect().encode(x="sample", color=color).add_selection(selection)
)
chart = (
alt.Chart(df)
.mark_rect()
.encode(
y="pos:O",
x="sample:N",
color=alt.Color(
"rejected",
scale=alt.Scale(
domain=["Rejected", "Verified", "IlluminaDropout"],
range=["orange", "blue", "grey"],
),
),
tooltip=["ref", "alt", "comment", "pileupillu", "pileupnano", "comment"],
)
.transform_filter(selection)
.interactive()
)
alt.vconcat(make_selector, chart, padding=64).save(snakemake.output["full"])
########## REDUCED PLOT
df = df[df.rejected != "Verified"]
make = pd.DataFrame({"sample": list(df["sample"].unique())})
selection = alt.selection_multi(fields=["sample"])
color = alt.condition(selection, alt.value("green"), alt.value("lightgray"))
make_selector = (
alt.Chart(make).mark_rect().encode(x="sample", color=color).add_selection(selection)
)
chart = (
alt.Chart(df)
.mark_rect()
.encode(
y="pos:O",
x="sample:N",
color=alt.Color(
"rejected",
scale=alt.Scale(
domain=["Rejected", "Verified", "IlluminaDropout"],
range=["orange", "blue", "grey"],
),
),
tooltip=["ref", "alt", "comment", "pileupillu", "pileupnano", "comment"],
)
.transform_filter(selection)
.interactive()
)
alt.vconcat(make_selector, chart, padding=64).save(snakemake.output["reduced"])
|
from django.contrib import admin
from .models import Offer
@admin.register(Offer)
class OfferAdmin(admin.ModelAdmin):
pass
|
from flask import Flask
app = Flask(__name__)
# Setup the app with the config.py file
app.config.from_object('app.config')
# Setup the logger
from app.logger_setup import logger
# Setup the database
from flask.ext.sqlalchemy import SQLAlchemy
db = SQLAlchemy(app)
# Setup the mail server
from flask.ext.mail import Mail
mail = Mail(app)
# Setup the debug toolbar
from flask_debugtoolbar import DebugToolbarExtension
app.config['DEBUG_TB_TEMPLATE_EDITOR_ENABLED'] = False
app.config['DEBUG_TB_PROFILER_ENABLED'] = False
toolbar = DebugToolbarExtension(app)
# Setup the password crypting
from flask.ext.bcrypt import Bcrypt
bcrypt = Bcrypt(app)
# Import the views
from app.views import main, user, error
app.register_blueprint(user.userbp)
# Setup the user login process
from flask.ext.login import LoginManager
from app.models import User
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'userbp.signin'
@login_manager.user_loader
def load_user(email):
return User.query.filter(User.email == email).first()
from app import admin
|
# Copyright 2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, Optional, Tuple, Union, cast
import numpy as np
import nnabla as nn
import nnabla.functions as NF
import nnabla.parametric_functions as NPF
import nnabla_rl as rl
import nnabla_rl.initializers as RI
from nnabla_rl.models.q_function import DiscreteQFunction
class DQNQFunction(DiscreteQFunction):
'''
Q function proposed by DeepMind in DQN paper for atari environment.
See: https://deepmind.com/research/publications/human-level-control-through-deep-reinforcement-learning
Args:
scope_name (str): the scope name
n_action (int): the number of discrete action
'''
# type declarations to type check with mypy
# NOTE: declared variables are instance variable and NOT class variable, unless it is marked with ClassVar
# See https://mypy.readthedocs.io/en/stable/class_basics.html for details
_n_action: int
def __init__(self, scope_name: str, n_action: int):
super(DQNQFunction, self).__init__(scope_name)
self._n_action = n_action
def all_q(self, s: nn.Variable) -> nn.Variable:
''' Predict all q values of the given state
'''
with nn.parameter_scope(self.scope_name):
with nn.parameter_scope("conv1"):
h = NF.relu(NPF.convolution(s, 32, (8, 8), stride=(4, 4),
w_init=RI.HeNormal(s.shape[1],
32,
kernel=(8, 8))
))
with nn.parameter_scope("conv2"):
h = NF.relu(NPF.convolution(h, 64, (4, 4), stride=(2, 2),
w_init=RI.HeNormal(h.shape[1],
64,
kernel=(4, 4))
))
with nn.parameter_scope("conv3"):
h = NF.relu(NPF.convolution(h, 64, (3, 3), stride=(1, 1),
w_init=RI.HeNormal(h.shape[1],
64,
kernel=(3, 3))
))
h = NF.reshape(h, (-1, 3136))
with nn.parameter_scope("affine1"):
h = NF.relu(NPF.affine(h, 512,
w_init=RI.HeNormal(h.shape[1], 512)
))
with nn.parameter_scope("affine2"):
h = NPF.affine(h, self._n_action,
w_init=RI.HeNormal(h.shape[1], self._n_action)
)
return h
class DRQNQFunction(DiscreteQFunction):
'''
Q function with LSTM layer proposed by M. Hausknecht et al. used in DRQN paper for atari environment.
See: https://arxiv.org/pdf/1507.06527.pdf
Args:
scope_name (str): the scope name
n_action (int): the number of discrete action
'''
# type declarations to type check with mypy
# NOTE: declared variables are instance variable and NOT class variable, unless it is marked with ClassVar
# See https://mypy.readthedocs.io/en/stable/class_basics.html for details
_n_action: int
_lstm_cell: NPF.LSTMCell
_h: Union[nn.Variable, None]
_c: Union[nn.Variable, None]
def __init__(self, scope_name: str, n_action: int):
super(DRQNQFunction, self).__init__(scope_name)
self._n_action = n_action
self._h = None
self._c = None
self._lstm_state_size = 512
def __deepcopy__(self, memodict: Dict[Any, Any] = {}):
# nn.Variable cannot be deepcopied directly
return self.__class__(self._scope_name, self._n_action)
def all_q(self, s: nn.Variable) -> nn.Variable:
''' Predict all q values of the given state
'''
with nn.parameter_scope(self.scope_name):
with nn.parameter_scope("conv1"):
h = NF.relu(NPF.convolution(s, 32, (8, 8), stride=(4, 4),
w_init=RI.HeNormal(s.shape[1],
32,
kernel=(8, 8))
))
with nn.parameter_scope("conv2"):
h = NF.relu(NPF.convolution(h, 64, (4, 4), stride=(2, 2),
w_init=RI.HeNormal(h.shape[1],
64,
kernel=(4, 4))
))
with nn.parameter_scope("conv3"):
h = NF.relu(NPF.convolution(h, 64, (3, 3), stride=(1, 1),
w_init=RI.HeNormal(h.shape[1],
64,
kernel=(3, 3))
))
h = NF.reshape(h, (-1, 3136))
with nn.parameter_scope("lstm1"):
if not self._is_internal_state_created():
# automaatically create internal states if not provided
batch_size = h.shape[0]
self._create_internal_states(batch_size)
w_init = RI.HeNormal(h.shape[1], self._lstm_state_size)
self._h, self._c = NPF.lstm_cell(h, self._h, self._c, self._lstm_state_size, w_init=w_init)
if not rl.is_eval_scope():
# Add gradient clip if it is training
self._h = cast(nn.Variable, self._h)
self._c = cast(nn.Variable, self._c)
h_clip_min = nn.Variable.from_numpy_array(np.full(self._h.shape, -10))
h_clip_max = nn.Variable.from_numpy_array(np.full(self._h.shape, 10))
c_clip_min = nn.Variable.from_numpy_array(np.full(self._h.shape, -10))
c_clip_max = nn.Variable.from_numpy_array(np.full(self._h.shape, 10))
self._h = NF.clip_grad_by_value(self._h, min=h_clip_min, max=h_clip_max)
self._c = NF.clip_grad_by_value(self._c, min=c_clip_min, max=c_clip_max)
h = self._h
with nn.parameter_scope("affine2"):
h = NPF.affine(h, self._n_action,
w_init=RI.HeNormal(h.shape[1], self._n_action))
return h
def is_recurrent(self) -> bool:
return True
def internal_state_shapes(self) -> Dict[str, Tuple[int, ...]]:
shapes: Dict[str, nn.Variable] = {}
shapes['lstm_hidden'] = (self._lstm_state_size, )
shapes['lstm_cell'] = (self._lstm_state_size, )
return shapes
def get_internal_states(self) -> Dict[str, nn.Variable]:
states: Dict[str, nn.Variable] = {}
states['lstm_hidden'] = self._h
states['lstm_cell'] = self._c
return states
def set_internal_states(self, states: Optional[Dict[str, nn.Variable]] = None):
if states is None:
if self._h is not None:
self._h.data.zero()
if self._c is not None:
self._c.data.zero()
else:
self._h = states['lstm_hidden']
self._c = states['lstm_cell']
def _create_internal_states(self, batch_size):
self._h = nn.Variable((batch_size, self._lstm_state_size))
self._c = nn.Variable((batch_size, self._lstm_state_size))
self._h.data.zero()
self._c.data.zero()
def _is_internal_state_created(self) -> bool:
return self._h is not None and self._c is not None
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" auto mixed precision """
import numpy as np
import pytest
import mindspore.context as context
from mindspore import Tensor
from mindspore import amp
from mindspore import nn
from mindspore.communication.management import init
from mindspore.context import ParallelMode
from mindspore.train import Model
from ....dataset_mock import MindData
def setup_module(module):
_ = module
context.set_context(mode=context.GRAPH_MODE)
class Net(nn.Cell):
def __init__(self, in_features, out_features):
super(Net, self).__init__()
self.dense = nn.Dense(in_features, out_features)
self.loss = nn.MSELoss()
def construct(self, input_x, label):
output = self.dense(input_x)
loss = self.loss(output, label)
return loss
class NetNoLoss(nn.Cell):
def __init__(self, in_features, out_features):
super(NetNoLoss, self).__init__()
self.dense = nn.Dense(in_features, out_features)
def construct(self, input_x):
return self.dense(input_x)
def test_amp_o0():
inputs = Tensor(np.ones([16, 16]).astype(np.float32))
label = Tensor(np.zeros([16, 16]).astype(np.float32))
net = Net(16, 16)
optimizer = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_network = amp.build_train_network(net, optimizer, level="O0")
_ = train_network(inputs, label)
def test_amp_o2():
inputs = Tensor(np.ones([16, 16]).astype(np.float32))
label = Tensor(np.zeros([16, 16]).astype(np.float32))
net = Net(16, 16)
optimizer = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_network = amp.build_train_network(net, optimizer, level="O2")
_ = train_network(inputs, label)
def test_amp_o2_loss():
inputs = Tensor(np.ones([16, 16]).astype(np.float32))
label = Tensor(np.zeros([16, 16]).astype(np.float32))
net = NetNoLoss(16, 16)
loss = nn.MSELoss()
optimizer = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_network = amp.build_train_network(net, optimizer, loss, level="O2")
_ = train_network(inputs, label)
def test_amp_o0_loss():
inputs = Tensor(np.ones([16, 16]).astype(np.float32))
label = Tensor(np.zeros([16, 16]).astype(np.float32))
net = NetNoLoss(16, 16)
loss = nn.MSELoss()
optimizer = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_network = amp.build_train_network(net, optimizer, loss)
_ = train_network(inputs, label)
class MindDataSet(MindData):
def __init__(self, dataset_types, dataset_shapes):
super(MindDataSet, self).__init__(size=2, batch_size=32,
np_types=dataset_types,
output_shapes=dataset_shapes,
input_indexs=(0, 1))
def __next__(self):
if self._size < self._iter_num:
raise StopIteration
self._iter_num += 1
lst = []
for shape_, type_ in zip(self._output_shapes, self._np_types):
lst.append(Tensor(np.ones(shape_).astype(type_)))
return tuple(lst)
def test_compile_model_train_O0():
dataset_types = (np.float32, np.float32)
dataset_shapes = ((16, 16), (16, 16))
dataset = MindDataSet(dataset_types, dataset_shapes)
net = NetNoLoss(16, 16)
loss = nn.MSELoss()
optimizer = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
model = Model(net, loss_fn=loss, optimizer=optimizer, metrics={"acc"}, amp_level="O0")
model.train(2, dataset, dataset_sink_mode=False)
with pytest.raises(ValueError):
# not actual run, the metrics step will fail, check if compile ok.
model.eval(dataset)
def test_compile_model_train_O2():
dataset_types = (np.float32, np.float32)
dataset_shapes = ((16, 16), (16, 16))
dataset = MindDataSet(dataset_types, dataset_shapes)
net = NetNoLoss(16, 16)
loss = nn.MSELoss()
optimizer = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
model = Model(net, loss_fn=loss, optimizer=optimizer, metrics={"acc"}, amp_level="O2")
model.train(2, dataset, dataset_sink_mode=False)
with pytest.raises(ValueError):
# not actual run, the metrics step will fail, check if compile ok.
model.eval(dataset)
def test_compile_model_train_O2_parallel():
dataset_types = (np.float32, np.float32)
dataset_shapes = ((16, 16), (16, 16))
context.set_auto_parallel_context(
global_rank=0, device_num=8,
gradients_mean=True, parameter_broadcast=True,
parallel_mode=ParallelMode.DATA_PARALLEL)
dataset = MindDataSet(dataset_types, dataset_shapes)
net = NetNoLoss(16, 16)
loss = nn.MSELoss()
optimizer = nn.Momentum(net.trainable_params(), 0.1, 0.9, 0.00004, 1024.0)
init()
model = Model(net, loss_fn=loss, optimizer=optimizer, metrics={"acc"}, amp_level="O2")
model.train(2, dataset, dataset_sink_mode=False)
|
from statsmodels.tsa.holtwinters import ExponentialSmoothing
from Load_And_Visualize_Time_Data import Load_and_Visualize_Time_Data
import sys
import numpy as np
import pandas as pd
from sktime.forecasting.model_selection import temporal_train_test_split
from sklearn.metrics import mean_absolute_error
from sktime.utils.plotting import plot_series
from sktime.performance_metrics.forecasting import smape_loss
import itertools
class Triple_Exponential_Smoothing:
def __init__(self, path, time_freq, trend="add", seasonal="add", time_column=0, sp=12, day_to_month=False,
month_to_year=False, test_size=0.2, model="additive"):
preload = Load_and_Visualize_Time_Data(path, time_column, model)
self.data, self.columns = preload.return_data()
preload.visualize_data()
preload.decompose_time_series()
if day_to_month and time_freq == 'M':
self.day_to_month()
elif month_to_year and time_freq == 'Y':
self.day_to_month()
self.month_to_year()
else:
sys.exit("time frequency and converted frequency does not match")
self.time_freq = time_freq
self.trend = trend
self.seasonal = seasonal
self.test_size = test_size
self.sp = sp
def train_test_split(self):
self.data.index = pd.PeriodIndex(self.data.index, freq=self.time_freq)
self.y_train, self.y_test = temporal_train_test_split(self.data, test_size=self.test_size)
def day_to_month(self):
self.data = self.data.resample('M').sum()
def month_to_year(self):
self.data = self.data.resample('Y').sum()
def best_alpha__beta_value(self, abg, step):
results = []
for comb in abg:
tes_model = ExponentialSmoothing(self.y_train, trend=self.trend, seasonal=self.seasonal,
seasonal_periods=self.sp).fit(smoothing_level=comb[0],
smoothing_slope=comb[1],
smoothing_seasonal=comb[2])
y_pred = tes_model.forecast(step)
mae = mean_absolute_error(self.y_test, y_pred)
results.append([round(comb[0], 2), round(comb[1], 2), round(comb[2], 2), round(mae, 2)])
results = pd.DataFrame(results, columns=["alpha", "beta", "gamma", "mae"]).sort_values("mae")
best_alpha, best_beta, best_gamma, best_mae = results.iloc[0]
return best_alpha, best_beta, best_gamma
def forecast_and_visualize(self):
alphas = betas = gammas = np.arange(0.10, 1, 0.20)
abg = list(itertools.product(alphas, betas, gammas))
alpha, beta, gamma = self.best_alpha__beta_value(abg, len(self.y_test))
# alpha: 0.11 mae: 82.528
# Modelin en uygun alpha değeri ile oluşturulması
forecaster = ExponentialSmoothing(self.y_train, trend=self.trend, seasonal=self.seasonal,
seasonal_periods=self.sp).fit(smoothing_level=alpha, smoothing_slope=beta,
smoothing_seasonal=gamma)
fh = np.arange(1, len(self.y_test) + 1)
# 24 aylık tahmin
y_pred = forecaster.forecast(len(self.y_test))
plot_series(self.y_train, self.y_test, y_pred, labels=["y_train", "y_test", "y_pred"]);
print("Loss is:", smape_loss(self.y_test, y_pred))
|
#!/usr/bin/env python
DEBUG = False
if DEBUG:
# This code only exists to help us visually inspect the images.
# It's in an `if DEBUG:` block to illustrate that we don't need it for our code to work.
from PIL import Image
import numpy as np
def read_image(path):
return np.asarray(Image.open(path).convert('L'))
def write_image(image, path):
img = Image.fromarray(np.array(image), 'L')
img.save(path)
DATA_DIR = 'data/'
TEST_DIR = 'test/'
DATASET = 'fashion-mnist' # `'mnist'` or `'fashion-mnist'`
TEST_DATA_FILENAME = DATA_DIR + DATASET + '/t10k-images-idx3-ubyte'
TEST_LABELS_FILENAME = DATA_DIR + DATASET + '/t10k-labels-idx1-ubyte'
TRAIN_DATA_FILENAME = DATA_DIR + DATASET + '/train-images-idx3-ubyte'
TRAIN_LABELS_FILENAME = DATA_DIR + DATASET + '/train-labels-idx1-ubyte'
def bytes_to_int(byte_data):
return int.from_bytes(byte_data, 'big')
def read_images(filename, n_max_images=None):
images = []
with open(filename, 'rb') as f:
_ = f.read(4) # magic number
n_images = bytes_to_int(f.read(4))
if n_max_images:
n_images = n_max_images
n_rows = bytes_to_int(f.read(4))
n_columns = bytes_to_int(f.read(4))
for image_idx in range(n_images):
image = []
for row_idx in range(n_rows):
row = []
for col_idx in range(n_columns):
pixel = f.read(1)
row.append(pixel)
image.append(row)
images.append(image)
return images
def read_labels(filename, n_max_labels=None):
labels = []
with open(filename, 'rb') as f:
_ = f.read(4) # magic number
n_labels = bytes_to_int(f.read(4))
if n_max_labels:
n_labels = n_max_labels
for label_idx in range(n_labels):
label = bytes_to_int(f.read(1))
labels.append(label)
return labels
def flatten_list(l):
return [pixel for sublist in l for pixel in sublist]
def extract_features(X):
return [flatten_list(sample) for sample in X]
def dist(x, y):
"""
Returns the Euclidean distance between vectors `x` and `y`.
"""
return sum(
[
(bytes_to_int(x_i) - bytes_to_int(y_i)) ** 2
for x_i, y_i in zip(x, y)
]
) ** (0.5)
def get_training_distances_for_test_sample(X_train, test_sample):
return [dist(train_sample, test_sample) for train_sample in X_train]
def get_most_frequent_element(l):
return max(l, key=l.count)
def knn(X_train, y_train, X_test, k=3):
y_pred = []
for test_sample_idx, test_sample in enumerate(X_test):
print(test_sample_idx, end=' ', flush=True)
training_distances = get_training_distances_for_test_sample(
X_train, test_sample
)
sorted_distance_indices = [
pair[0]
for pair in sorted(
enumerate(training_distances),
key=lambda x: x[1]
)
]
candidates = [
y_train[idx]
for idx in sorted_distance_indices[:k]
]
top_candidate = get_most_frequent_element(candidates)
y_pred.append(top_candidate)
print()
return y_pred
def get_garment_from_label(label):
return [
'T-shirt/top',
'Trouser',
'Pullover',
'Dress',
'Coat',
'Sandal',
'Shirt',
'Sneaker',
'Bag',
'Ankle boot',
][label]
def main():
n_train = 1000
n_test = 10
k = 7
print(f'Dataset: {DATASET}')
print(f'n_train: {n_train}')
print(f'n_test: {n_test}')
print(f'k: {k}')
X_train = read_images(TRAIN_DATA_FILENAME, n_train)
y_train = read_labels(TRAIN_LABELS_FILENAME, n_train)
X_test = read_images(TEST_DATA_FILENAME, n_test)
y_test = read_labels(TEST_LABELS_FILENAME, n_test)
if DEBUG:
# Write some images out just so we can see them visually.
for idx, test_sample in enumerate(X_test):
write_image(test_sample, f'{TEST_DIR}{idx}.png')
# Load in the `our_test.png` we drew ourselves!
# X_test = [read_image(f'{DATA_DIR}our_test.png')]
# y_test = [5]
X_train = extract_features(X_train)
X_test = extract_features(X_test)
y_pred = knn(X_train, y_train, X_test, k)
accuracy = sum([
int(y_pred_i == y_test_i)
for y_pred_i, y_test_i
in zip(y_pred, y_test)
]) / len(y_test)
if DATASET == 'fashion-mnist':
garments_pred = [
get_garment_from_label(label)
for label in y_pred
]
print(f'Predicted garments: {garments_pred}')
else:
print(f'Predicted labels: {y_pred}')
print(f'Accuracy: {accuracy * 100}%')
if __name__ == '__main__':
main()
|
from __future__ import print_function, division
import numpy as np
import unittest
"""
Do reddening and de-reddening of optical emission-line fluxes using the
equations in the appendix of Vogt 2013
http://adsabs.harvard.edu/abs/2013ApJ...768..151V
There are 3 public functions; these are to redden and deredden fluxes using the
Balmer decrement, and to calculate the extinction in magnitudes from the low
and high Balmer decrements.
The method used here is valid for wavelengths in [2480, 12390] Angstroem.
This module may be imported to be used in python code, or run as a script to
run some basic tests. The code works with both Python 2 and Python 3.
Adam D. Thomas 2017 - 2020
Research School of Astronomy and Astrophysics
Australian National University
Keywords - extinction, extragalactic reddening, Fischera & Dopita 2005, 2011,
attenuation curve, relative color excess, Calzetti 2001, dust
"""
__version__ = 0.8
# version 0.8 2020 July: Made propagating errors in Balmer decrements optional
def _calc_relative_colour_excess(lambda_AA):
"""
Calculate the "relative colour excess" E_(lambda-V) / E_(B-V) using Equation
A11 in the appendix of Vogt13. Valid for lambda in [2480, 12390] Angstroem,
with the relative color excess decreasing monotonically from 3.86 to -2.79
over this range in lambda.
lambda_AA: Wavelength in Angstroems. A list or array of floats.
Returns the relative colour excess as an array of floats.
"""
lambda_AA = np.asarray(lambda_AA, dtype=np.float64)
assert np.all(lambda_AA > 2480) and np.all(lambda_AA < 12390)
lambda_um = lambda_AA * 1e-4 # Convert wavelength to um
# Coefficients in Equation A11 in Vogt13
A11_coeffs = {0:-4.61777, -1:1.41612, -2:1.52077, -3:-0.63269, -4:0.07386}
# Calculate relative color excess using Equation A11
return sum( (A11_coeffs[i] * lambda_um**i) for i in [0,-1,-2,-3,-4] )
def _find_BD(line_lambdas, line_fluxes, flux_errs=None):
"""
Given lists or arrays of emission line wavelengths and corresponding fluxes,
find the Balmer decrement (BD). We do some checks.
line_lambdas: 1D list or array of line wavelengths in Angstroems
line_fluxes: 1D list or array, or alternatively a list of arrays of arbitrary
shape (but all the same shape), containing line fluxes.
flux_errs: Errors corresponding to line_fluxes; optional.
Returns the Balmer decrement F_Halpha/F_Hbeta as a float or array of floats.
If flux_errs is supplied, the error in the Balmer decrement is also returned,
with the error in Halpha and Hbeta propagated into the Balmer decrement.
"""
line_lambdas = np.asarray(line_lambdas, dtype=np.float64)
line_fluxes = np.asarray(line_fluxes, dtype=np.float64)
assert line_lambdas.size == len(line_fluxes)
assert line_lambdas.ndim == 1
if flux_errs is not None:
assert len(flux_errs) == len(line_fluxes)
# Find Halpha and Hbeta fluxes:
rounded_lambdas = np.round(line_lambdas, 0)
where_Halpha = (rounded_lambdas == 6563) # Halpha is at 6562.819 AA
where_Hbeta = (rounded_lambdas == 4861) # Hbeta is at 4861.333 AA
n_where_Halpha, n_where_Hbeta = np.sum(where_Halpha), np.sum(where_Hbeta)
if n_where_Halpha == 0:
raise ValueError("Input lines do not include Halpha (6563 AA)")
elif n_where_Halpha == 1:
ind_Ha = where_Halpha.nonzero()[0][0] # An integer index
halpha_flux = line_fluxes[ind_Ha]
# So halpha_flux is a float if "line_fluxes" is 1D, or an array
# if "line_fluxes" is a list of arrays.
else:
raise ValueError("Multiple lines at a wavelength similar to Halpha!")
if n_where_Hbeta == 0:
raise ValueError("Input lines do not include Hbeta (4861 AA)")
elif n_where_Hbeta == 1:
ind_Hb = where_Hbeta.nonzero()[0][0] # An integer index
hbeta_flux = line_fluxes[ind_Hb]
else:
raise ValueError("Multiple lines at a wavelength similar to Hbeta!")
decrement = halpha_flux / hbeta_flux
if flux_errs is None:
# Return Balmer decrement F_Halpha / F_Hbeta (possibly and array of
# Balmer decrements)
return decrement
else: # Also return the error in the Balmer decrement.
# We propagate the errors in Halpha and Hbeta
halpha_rel_err = flux_errs[ind_Ha] / halpha_flux
hbeta_rel_err = flux_errs[ind_Hb] / hbeta_flux
decrement_err = decrement * np.hypot(halpha_rel_err, hbeta_rel_err)
return decrement, decrement_err
def _apply_BD(line_lambdas, line_fluxes, flux_errs, BD, normalise,
propagate_errors):
"""
De-redden or redden emission line fluxes by considering a target Balmer
decrement (BD), using Equation A12 in the Appendix of Vogt13. This
function is its own inverse, i.e. reddens or dereddens.
line_lambdas, line_fluxes, flux_errs: As in functions "deredden" and "redden".
BD: Desired Balmer decrement (F_Halpha / F_Hbeta) to use in (de)reddening
(will be the Balmer decrement in the output). If BD is a scalar, the
output will be a 1D numpy array. If BD is an array, the (de)reddening
will be applied for all supplied values of BD and the output will be a
list of flux arrays. The output arrays will all have the same shape as
BD and the list will have the same length as line_lambdas.
normalise: Boolean. Normalise output to Hbeta==1?
propagate_errors: Boolean. Propagate error in observed Balmer decrement
into the errors on the (de)reddened fluxes?
Returns out_fluxes, an array or list of arrays of (de)reddened line fluxes
corresponding to the input line_lambdas. If flux_errs are supplied then
the output will be (out_fluxes, out_flux_errs), where out_flux_errs has
the same format as out_fluxes.
"""
if BD is None:
raise ValueError("BD must be specified")
line_lambdas = np.asarray(line_lambdas, dtype=np.float64)
in_fluxes = np.asarray(line_fluxes, dtype=np.float64)
assert line_lambdas.ndim == 1
assert line_lambdas.shape == in_fluxes.shape
if flux_errs is not None:
in_flux_errs = np.asarray(flux_errs, dtype=np.float64)
assert line_lambdas.shape == in_flux_errs.shape
assert isinstance(normalise, bool)
assert isinstance(propagate_errors, bool)
if flux_errs is None and propagate_errors:
raise Exception("Can't propagate errors - no flux errors given")
BD2 = np.asarray(BD, dtype=np.float64)
is_multiple = (BD2.size > 1) # Output multiple fluxes for each line?
# Find observed Balmer decrement:
if flux_errs is None:
BD1 = _find_BD(line_lambdas, in_fluxes)
else:
BD1, BD1_err = _find_BD(line_lambdas, in_fluxes, in_flux_errs)
# Apply Equation A12 in Vogt13
r_c_e = _calc_relative_colour_excess(line_lambdas) # Vector of RCE
p = 0.76*(r_c_e + 4.5) # Vector of exponents (p=Powers) for (de)reddening equation
# (De)redden fluxes
# f2 = f1 * (BD1/BD2)^p
if is_multiple: # BD2 is an array; construct a list of (de)reddened flux arrays
out_fluxes = [f * (BD1 / BD2)**p_i for f,p_i in zip(in_fluxes, p)]
else: # BD2 is a scalar; make a 1D vector of (de)reddened fluxes
out_fluxes = in_fluxes * (BD1 / BD2)**p
# (De)redden errors
if flux_errs is not None:
if not propagate_errors:
if is_multiple:
# BD2 is an array; construct a list of (de)reddened error arrays
out_errs = [e * (BD1 / BD2)**p_i for e,p_i in zip(in_flux_errs, p)]
else: # BD2 is a scalar; make a 1D vector of (de)reddened errors
out_errs = in_flux_errs * (BD1 / BD2)**p
else:
# Propagate errors on the fluxes and on the starting BD
# f2 = f1 * (BD1/BD2)^p where f1 and B1 have uncertainties.
# => df2 = f1 BD2**-p |p BD1**(p-1)| dBD1 + (BD1/BD2)**p df1
if is_multiple:
out_errs = [
(f * BD2**-p_i * np.abs(p_i * BD1**(p_i-1)) * BD1_err
+ (BD1 / BD2)**p_i * f_err
) for f, f_err, p_i in zip(in_fluxes, in_flux_errs, p)]
else:
out_errs = (in_fluxes * BD2**-p * np.abs(p * BD1**(p-1)) * BD1_err
+ (BD1 / BD2)**p * in_flux_errs
)
# Normalise output fluxes and errors
if normalise:
where_Hbeta = (np.round(line_lambdas, 0) == 4861) # Hbeta is at 4861.333 AA
# We've already checked, and there's exactly one line qualifying as Hbeta
ind_Hb = where_Hbeta.nonzero()[0][0] # An integer index
hbeta_unnormed_out_flux = out_fluxes[ind_Hb].copy()
# The "copy" on the above line is important!
# We don't propagate errors in the normalisation
# Scale fluxes:
if is_multiple:
for f in out_fluxes:
f /= hbeta_unnormed_out_flux # In-place division
else:
out_fluxes /= hbeta_unnormed_out_flux
# Scale errors:
if flux_errs is not None:
if is_multiple:
for err in out_errs:
err /= hbeta_unnormed_out_flux # In-place division
else:
out_errs /= hbeta_unnormed_out_flux
if flux_errs is None:
return out_fluxes
else:
return out_fluxes, out_errs
def _BD_from_Av_for_dereddening(line_lambdas, line_fluxes, A_v):
"""
Find the de-reddened Balmer decrement (BD) that would arise from "removing"
an extinction of A_v (magnitudes) from the line_fluxes.
line_lambdas, line_fluxes: As in the function "deredden".
A_v: The extinction (magnitudes), as a scalar or array of extinction values.
Returns the Balmer decrement dereddened_BD (F_Halpha / F_Hbeta), as a float
or array of floats with the same shape as A_v.
"""
assert np.all(np.asarray(A_v) >= 0)
initial_BD = _find_BD(line_lambdas, line_fluxes)
# Calculate the Balmer decrement (BD) that would result from "removing" an
# extinction of A_v, using an inverted form of Equation A14 in Vogt13.
dereddened_BD = initial_BD / 10**(A_v / 8.55)
return dereddened_BD
def _BD_from_Av_for_reddening(line_lambdas, line_fluxes, A_v):
"""
Find the reddened Balmer decrement (BD) that would arise from "applying"
an extinction of A_v (magnitudes) to the line_fluxes.
line_lambdas, line_fluxes: As in the function "redden".
A_v: The extinction (magnitudes), as a scalar or array of extinction values.
Returns the Balmer decrement reddened_BD (F_Halpha / F_Hbeta), as a float
or array of floats with the same shape as A_v.
"""
assert np.all(np.asarray(A_v) >= 0)
initial_BD = _find_BD(line_lambdas, line_fluxes)
# Calculate the Balmer decrement (BD) that would result from an extinction
# of A_v, using an inverted form of Equation A14 in Vogt13.
reddened_BD = initial_BD * 10**(A_v / 8.55)
# This equation differs slightly from that in "_BD_from_Av_for_dereddening"!
return reddened_BD
def Av_from_BD(BD_low, BD_high):
"""
Calculate the extinction in magnitudes associated with in increase in the
Balmer decrement, BD = F_Halpha / F_Hbeta.
BD_low: The intrinsic (lower) Balmer decrement
BD_high: The reddened (higher) Balmer decrement
If both BD_low and BD_high are non-scalar arrays, they must have the same shape.
Returns A_v, the extinction in magnitudes as a float or array of floats.
"""
BD_low_arr, BD_high_arr = np.asarray(BD_low), np.asarray(BD_high)
if BD_low_arr.size > 1 and BD_high_arr.size > 1:
assert BD_low_arr.shape == BD_high_arr.shape
assert np.all(BD_low_arr <= BD_high_arr)
A_v = 8.55 * np.log10( BD_high / BD_low ) # Equation A14 in Vogt13
return A_v
def deredden(line_lambdas, line_fluxes, line_errs=None, BD=None, A_v=None,
normalise=False, propagate_errors=False):
"""
Deredden emission line fluxes by either specifying a target Balmer decrement
(F_Halpha/F_Hbeta), or an extinction A_v to be "removed".
This is the inverse function of "redden".
line_lambdas: List/array of floats. Wavelengths in Angstroems.
line_fluxes: Corresponding float, list or array of reddened fluxes. Note
that line_lambdas and line_fluxes must include Halpha
(6562.8 A) and Hbeta (4861.3 A).
line_errs: Flux errors corresponding to line_fluxes (optional).
A_v: The assumed extinction (magnitudes) to "remove" when dereddening.
BD: The target (intrinsic) Balmer decrement.
Only one of "A_v" or "BD" may be specified. If neither is set, BD=2.85 is
used. If the supplied value is a scalar, the output fluxes will be a 1D
numpy array. If BD (or A_v) is an array, the de-reddening will be applied
for all supplied values of BD (or A_v) and the output fluxes will be a list
of arrays. The output arrays will all have the same shape as BD (or A_v)
and the list containing them will have the same length as line_lambdas.
normalise: Normalise output to Hbeta==1? Default: False
propagate_errors: Propagate error in observed Balmer decrement into the
errors on the dereddened fluxes? Default: False
Returns dered_fluxes if line_errs is not specified, or alternatively returns
(dered_fluxes, dered_errs) if line_errs is given. The array (or list of
arrays) dered_fluxes contains de-reddened fluxes corresponding to the input
line_lambdas. The output dered_errs contains corresponding errors propagated
from the input errors. The dereddened fluxes (and errors) are normalised to
Hbeta == 1 if normalise == True.
"""
# Look at what was specified, and determine the Balmer decrement to use.
if A_v is None:
if BD is None:
BD = 2.85
else: # A_v is specified
if BD is not None:
raise ValueError("Must specify only one of A_v or BD, not both")
BD = _BD_from_Av_for_dereddening(line_lambdas, line_fluxes, A_v)
return _apply_BD(line_lambdas, line_fluxes, line_errs, BD=BD,
normalise=normalise, propagate_errors=propagate_errors)
def redden(line_lambdas, line_fluxes, line_errs=None, BD=None, A_v=None,
normalise=False, propagate_errors=False):
"""
Redden emission line fluxes by either specifying a target Balmer decrement
(F_Halpha/F_Hbeta), or an extinction A_v to be "applied".
This is the inverse function of "deredden".
line_lambdas: Float, or list/array of floats. Wavelengths in Angstroems.
line_fluxes: Corresponding float, list or array of intrinsic fluxes. Note
that line_lambdas and line_fluxes must include Halpha
(6562.8 A) and Hbeta (4861.3 A).
line_errs: Flux errors corresponding to line_fluxes (optional).
A_v: The assumed extinction (magnitudes) to "apply" when reddening.
BD: The target Balmer decrement.
Only one of "A_v" or "BD" may be specified. If the supplied value is a
scalar, the output fluxes will be a 1D numpy array. If BD (or A_v) is an
array, the reddening will be applied for all supplied values of BD (or A_v)
and the output fluxes will be a list of arrays. The output arrays will all
have the same shape as BD (or A_v) and the list containing them will have
the same length as line_lambdas.
normalise: Normalise output to Hbeta==1? Default: False
propagate_errors: Propagate error in observed Balmer decrement into the
errors on the reddened fluxes? Default: False
Returns red_fluxes if line_errs is not specified, or alternatively returns
(red_fluxes, red_errs) if line_errs is given. The array (or list of
arrays) red_fluxes contains reddened fluxes corresponding to the input
line_lambdas. The output red_errs contains corresponding errors propagated
from the input errors. The reddened fluxes (and errors) are normalised to
Hbeta == 1 if normalise == True.
"""
# Look at what was specified, and determine the balmer decrement to use.
if A_v is None:
if BD is None:
raise ValueError("Must specify one of A_v or BD")
else: # A_v is specified
if BD is not None:
raise ValueError("Must specify only one of A_v or BD, not both")
BD = _BD_from_Av_for_reddening(line_lambdas, line_fluxes, A_v)
return _apply_BD(line_lambdas, line_fluxes, line_errs, BD=BD,
normalise=normalise, propagate_errors=propagate_errors)
class _Tests(unittest.TestCase):
""" A collection of test cases for testing this module """
def test_simple_red_dered_single_BD(self):
# Test that the Balmer decrement behaves as expected
l_HbHa = [4861, 6563] # Hbeta and Halpha wavelengths in Angstroems
kw = {"normalise": True, "propagate_errors": False}
self.assertTrue(np.isclose(
_apply_BD(l_HbHa,[1.0,3.5], None, BD=2.9, **kw)[1], 2.9, atol=atol))
self.assertTrue(np.isclose(
_apply_BD(l_HbHa,[1.0,2.9], None, BD=3.5, **kw)[1], 3.5, atol=atol))
self.assertTrue(np.isclose(
deredden( l_HbHa,[1.0,3.5], None, BD=2.9, **kw)[1], 2.9, atol=atol))
self.assertTrue(np.isclose(
redden( l_HbHa,[1.0,2.9], None, BD=3.5, **kw)[1], 3.5, atol=atol))
def test_simple_red_dered_multiple_BD(self):
# Test that the Balmer decrement behaves as expected
l_HbHa = [4861, 6563] # Hbeta and Halpha wavelengths in Angstroems
arr_29, arr_31 = np.array([2.9, 2.9, 2.9]), np.array([3.1, 3.1, 3.1])
kw = {"normalise": True, "propagate_errors": False}
self.assertTrue(np.allclose(
_apply_BD(l_HbHa,[1.0,3.5], None, BD=arr_29, **kw)[1], arr_29, atol=atol))
self.assertTrue(np.allclose(
_apply_BD(l_HbHa,[1.0,2.9], None, BD=arr_31, **kw)[1], arr_31, atol=atol))
self.assertTrue(np.allclose(
deredden( l_HbHa,[1.0,3.5], None, BD=arr_29, **kw)[1], arr_29, atol=atol))
self.assertTrue(np.allclose(
redden( l_HbHa,[1.0,2.9], None, BD=arr_31, **kw)[1], arr_31, atol=atol))
def test_1D_outputs(self):
# Some simple tests of the functionality of this module for 1D outputs
# (i.e. reddening or dereddening by only one input BD or A_v)
# Test data
BD1, BD_intrinsic = 3.41, 2.9
obs_lambdas = [6563, 6583, 3726.032, 4861.33] # Halpha, [NII], [OII], Hbeta
obs_fluxes = [BD1, 4.1, 1.35, 0.99999]
# Test that "_apply_BD" is its own inverse function
dered_fluxes_1 = _apply_BD(obs_lambdas, obs_fluxes, None, BD=BD_intrinsic,
normalise=True, propagate_errors=False)
rered_fluxes_1 = _apply_BD(obs_lambdas, dered_fluxes_1, None, BD=BD1,
normalise=True, propagate_errors=False)
self.assertTrue(np.allclose(rered_fluxes_1, obs_fluxes, atol=atol))
dered_fluxes_1a = _apply_BD(obs_lambdas, obs_fluxes, None,
BD=BD_intrinsic, normalise=False,
propagate_errors=False)
rered_fluxes_1a = _apply_BD(obs_lambdas, dered_fluxes_1a, None, BD=BD1,
normalise=True, propagate_errors=False)
self.assertTrue(np.allclose(rered_fluxes_1a, obs_fluxes, atol=atol))
# Test that "_BD_from_Av_for_dereddening", "_BD_from_Av_for_dereddening"
# and "Av_from_BD" are consistent
Av1 = Av_from_BD(BD_low=BD_intrinsic, BD_high=BD1)
BD_a = _BD_from_Av_for_dereddening(obs_lambdas, obs_fluxes, A_v=Av1)
self.assertTrue(np.isclose(BD_a, BD_intrinsic, atol=atol))
BD_b = _BD_from_Av_for_reddening(obs_lambdas, dered_fluxes_1, A_v=Av1)
self.assertTrue(np.isclose(BD_b, BD1, atol=atol))
# Test that "deredden" and "redden" are inverse functions, using A_v
dered_fluxes_2 = deredden(obs_lambdas, obs_fluxes, A_v=Av1, normalise=True)
self.assertTrue(np.allclose(dered_fluxes_2, dered_fluxes_1, atol=atol))
rered_fluxes_2 = redden(obs_lambdas, dered_fluxes_2, A_v=Av1, normalise=True)
self.assertTrue(np.allclose(rered_fluxes_2, rered_fluxes_1, atol=atol))
self.assertTrue(np.allclose(rered_fluxes_2, obs_fluxes, atol=atol))
# Test that an extinction of 0 results in negligible flux change
same_fluxes = redden(obs_lambdas, obs_fluxes, A_v=0, normalise=True)
self.assertTrue(np.allclose(same_fluxes, obs_fluxes, atol=atol))
def test_nD_outputs(self):
# Some simple tests of the functionality of this module for nD outputs
# (i.e. reddening or dereddening for all of an array of BD or A_v)
# Test data
BD1, BD_intrinsic = 3.41, np.array([[2.85, 2.9, 2.95],[3, 3.05, 3.1]])
obs_lambdas = [6563, 6583, 3726.032, 4861.33] # Halpha, [NII], [OII], Hbeta
obs_fluxes = [BD1, 4.1, 1.35, 0.99999]
# Test that "_apply_BD" and "_apply_BD" are inverse functions
dered_fluxes_1 = _apply_BD(obs_lambdas, obs_fluxes, None, BD=BD_intrinsic,
normalise=True, propagate_errors=False)
# As a list of 1D tuples of dereddened [Halpha, [NII], [OII], Hbeta] fluxes:
dered_fluxes_1a = list(zip(*[a.ravel() for a in dered_fluxes_1]))
rered_fluxes_1a = [_apply_BD(obs_lambdas, f, None, BD=BD1,
normalise=True, propagate_errors=False) for f in dered_fluxes_1a]
for rered_fluxes_1_i in rered_fluxes_1a:
self.assertTrue(np.allclose(rered_fluxes_1_i, obs_fluxes, atol=atol))
# Test that "_BD_from_Av_for_dereddening", "_BD_from_Av_for_dereddening"
# and "Av_from_BD" are consistent
Av1 = Av_from_BD(BD_low=BD_intrinsic, BD_high=BD1) # Array of A_v
BD_a = _BD_from_Av_for_dereddening(obs_lambdas, obs_fluxes, A_v=Av1)
self.assertTrue(np.allclose(BD_a, BD_intrinsic, atol=atol))
for i, dered_fluxes_1_i in enumerate(dered_fluxes_1a):
BD_b = _BD_from_Av_for_reddening(obs_lambdas, dered_fluxes_1_i, A_v=Av1.flat[i])
self.assertTrue(np.allclose(BD_b, BD1, atol=atol))
# Test that "deredden" and "redden" are inverse functions, using A_v
dered_fluxes_2 = deredden(obs_lambdas, obs_fluxes, A_v=Av1, normalise=True)
dered_fluxes_2a = list(zip(*[a.ravel() for a in dered_fluxes_2]))
for i, dered_fluxes_1_i in enumerate(dered_fluxes_1a):
self.assertTrue(np.allclose(dered_fluxes_2a[i], dered_fluxes_1_i, atol=atol))
rered_fluxes_2_i = redden(obs_lambdas, dered_fluxes_2a[i],
A_v=Av1.flat[i], normalise=True)
self.assertTrue(np.allclose(rered_fluxes_2_i, rered_fluxes_1a[i], atol=atol))
self.assertTrue(np.allclose(rered_fluxes_2_i, obs_fluxes, atol=atol))
def test_simple_uncertainty_handling_1D(self):
# Test uncertainty handling in this module, for cases with 1D outputs,
# and without propagating uncertainties in (de)reddening
# Also tests that the default propagate_errors in functions "redden"
# and "deredden" is False.
BD1, BD_intrinsic = 3.41, 2.9
# Halpha, [NII], [OII], Hbeta
obs_lambdas = [6562.819, 6583, 3726.032, 4861.333]
obs_fluxes = [BD1, 4.1, 1.35, 1.0 ]
obs_flux_errs = [BD1/3., 0.1, 0.001, 123.456 ]
obs_rel_errs = np.array(obs_flux_errs) / np.array(obs_fluxes)
# Test that "_apply_BD" is its own inverse function
dered_fluxes_1, dered_errs_1 = _apply_BD(obs_lambdas, obs_fluxes,
obs_flux_errs, BD=BD_intrinsic, normalise=True, propagate_errors=False)
rered_fluxes_1, rered_errs_1 = _apply_BD(obs_lambdas, dered_fluxes_1,
dered_errs_1, BD=BD1, normalise=True, propagate_errors=False)
self.assertTrue(np.allclose(rered_fluxes_1, obs_fluxes, atol=atol))
dered_rel_errs = dered_errs_1 / dered_fluxes_1
self.assertTrue(np.allclose(dered_rel_errs, obs_rel_errs, atol=atol))
self.assertTrue(np.allclose(rered_errs_1, obs_flux_errs, atol=atol))
# Same but without normalising fluxes
dered_fluxes_1a, dered_errs_1a = _apply_BD(obs_lambdas, obs_fluxes,
obs_flux_errs, BD=BD_intrinsic, normalise=False,
propagate_errors=False)
rered_fluxes_1a, rered_errs_1a = _apply_BD(obs_lambdas, dered_fluxes_1a,
dered_errs_1a, BD=BD1, normalise=False,
propagate_errors=False)
# Need bigger tolerances here...
# It would be nice to increase the precision of _apply_BD
big_tols = {"atol": 0.002, "rtol": 0.0005}
self.assertTrue(np.allclose(rered_fluxes_1a, obs_fluxes, **big_tols))
self.assertTrue(np.allclose(rered_errs_1a, obs_flux_errs, **big_tols))
# Test that "deredden" and "redden" are inverse functions, using A_v
# Also tests that the default for "propagate_errors" is False
Av1 = Av_from_BD(BD_low=BD_intrinsic, BD_high=BD1)
dered_fluxes_2, dered_errs_2 = deredden(obs_lambdas, obs_fluxes,
obs_flux_errs, A_v=Av1, normalise=True)
self.assertTrue(np.allclose(dered_fluxes_2, dered_fluxes_1, atol=atol))
self.assertTrue(np.allclose(dered_errs_2, dered_errs_1, atol=atol))
rered_fluxes_2, rered_errs_2 = redden(obs_lambdas, dered_fluxes_2,
dered_errs_2, A_v=Av1, normalise=True)
self.assertTrue(np.allclose(rered_fluxes_2, rered_fluxes_1, atol=atol))
self.assertTrue(np.allclose(rered_fluxes_2, obs_fluxes, atol=atol))
self.assertTrue(np.allclose(rered_errs_2, rered_errs_1, atol=atol))
self.assertTrue(np.allclose(rered_errs_2, obs_flux_errs, atol=atol))
# Test that an extinction of 0 results in negligible error change
same_fluxes, same_errs = redden(obs_lambdas, obs_fluxes, obs_flux_errs,
A_v=0, normalise=True)
self.assertTrue(np.allclose(same_errs, obs_flux_errs, atol=atol))
def test_propagating_uncertainties_1D(self):
# Test uncertainty handling in this module, for cases with 1D outputs,
# and where uncertainties in Balmer decrements are propagated
# We can rewrite the formula for the uncertainty (in _apply_BD) as follows:
# df2 = f2 / SN1 * (p+1) where SN1 = f_Ha / dF_Ha
# where we're only dereddening Halpha, and assuming the uncertainty in
# the initial BD is the uncertainty in the initial Ha flux.
# The maximum value of p+1 is 7.36 (for rce=3.86 for lambda=2480AA), so an
# upper bound is df_Ha2 < 7.36 * f2 / (f1 / err1), where df_Ha2 is not
# normalised to Hbeta == 1 (which would decrease df_Ha2).
for BD2 in [2.9, 5.3]: # De-reddening and reddening
l1, f1, err1 = [4861, 6563], [1.0, 3.5], [0, 0.81] # Hbeta, Halpha
# Set zero error in Hbeta for convenience
f2_l, err2_l = _apply_BD(l1, f1, err1, BD=BD2, normalise=True,
propagate_errors=True)
f2, err2 = f2_l[1], err2_l[1]
# Check that errors are in expected range
self.assertTrue(err2 > (f2 / f1[1]) * err1[1]) # Check lower bound (no error propagation)
SN1 = f1[1] / err1[1] # Signal-to-noise
self.assertTrue(err2 < 7.36 * f2 / SN1) # Check upper bound on error
p_Ha = 0.76 * (_calc_relative_colour_excess(l1[1]) + 4.5) # Exponent
# Check actual value using alternative equation:
# Note that f2 has already been scaled by Hbeta, so now the equation
# df2 = f2 / SN1 * (p+1) will give the error normalised to Hbeta == 1.
self.assertTrue(np.isclose(err2, (f2 / SN1 * (p_Ha+1)), atol=atol))
def test_simple_uncertainty_handling_nD(self):
# Some simple tests of the functionality of this module for nD outputs
# (i.e. reddening or dereddening for all of an array of BD or A_v),
# and without propagating uncertainties in (de)reddening
# The default is "propagate_errors=False" in "redden" and deredden".
# Test data
BD1, BD_intrinsic = 3.41, np.array([[2.85, 2.9, 2.95],[3, 3.05, 3.1]])
# Halpha, [NII], [OII], Hbeta
obs_lambdas = [6563, 6583, 3726.032, 4861.33]
obs_fluxes = [BD1, 4.1, 1.35, 1.0 ]
obs_flux_errs = [BD1/3., 0.1, 0.001, 123.456]
# Test that "_apply_BD" and "_apply_BD" are inverse functions
dered_fluxes_1, dered_errs_1 = _apply_BD(obs_lambdas, obs_fluxes,
obs_flux_errs, BD=BD_intrinsic, normalise=True, propagate_errors=False)
# As a list of 1D tuples of dereddened [Halpha, [NII], [OII], Hbeta]:
dered_fluxes_1a = list(zip(*[a.ravel() for a in dered_fluxes_1]))
dered_errs_1a = list(zip(*[a.ravel() for a in dered_errs_1]))
rered_fluxes_errs_1a = [_apply_BD(obs_lambdas, f, e, BD=BD1, normalise=True,
propagate_errors=False) for f,e in zip(dered_fluxes_1a, dered_errs_1a)]
for rered_fluxes_1_i, rered_errs_1_i in rered_fluxes_errs_1a:
self.assertTrue(np.allclose(rered_fluxes_1_i, obs_fluxes, atol=atol))
self.assertTrue(np.allclose(rered_errs_1_i, obs_flux_errs, atol=atol))
# Test that "deredden" and "redden" are inverse functions, using A_v
Av1 = Av_from_BD(BD_low=BD_intrinsic, BD_high=BD1) # Array of A_v
dered_fluxes_2, dered_errs_2 = deredden(obs_lambdas, obs_fluxes,
obs_flux_errs, A_v=Av1, normalise=True)
# Each entry in dered_fluxes_2 is an array over Av1.
dered_fluxes_2a = list(zip(*[a.ravel() for a in dered_fluxes_2]))
dered_errs_2a = list(zip(*[a.ravel() for a in dered_errs_2]))
for i in range(len(dered_fluxes_1a)):
self.assertTrue(np.allclose(dered_fluxes_2a[i], dered_fluxes_1a[i], atol=atol))
self.assertTrue(np.allclose(dered_errs_2a[i], dered_errs_1a[i], atol=atol))
rered_fluxes_2_i, rered_errs_2_i = redden(obs_lambdas, dered_fluxes_2a[i],
dered_errs_2a[i], A_v=Av1.flat[i], normalise=True)
rered_fluxes_1a_i, rered_errs_1a_i = rered_fluxes_errs_1a[i]
self.assertTrue(np.allclose(rered_fluxes_2_i, rered_fluxes_1a_i, atol=atol))
self.assertTrue(np.allclose(rered_fluxes_2_i, obs_fluxes, atol=atol))
self.assertTrue(np.allclose(rered_errs_2_i, rered_errs_1a_i, atol=atol))
self.assertTrue(np.allclose(rered_errs_2_i, obs_flux_errs, atol=atol))
def test_uncertainty_handling_nD(self):
# Test uncertainty handling in this module, for cases with nD outputs,
# and where uncertainties in Balmer decrements are propagated
BDa = np.array([[2.9, 3.0], [2.8, 2.7], [2.6,2.85]])
for BD2 in [BDa, BDa + 2.5]: # De-reddening and reddening
l1, f1, err1 = [4861, 6563], [1.0, 3.5], [0, 0.81] # Hbeta, Halpha
# Make lists of arrays:
f2, err2 = _apply_BD(l1, f1, err1, BD=BD2, normalise=True,
propagate_errors=True)
# Check that errors are in expected range
# Check lower bound (no error propagation):
self.assertTrue(np.all(err2[1] > (f2[1] / f1[1]) * err1[1]))
SN1 = f1[1] / err1[1] # Signal-to-noise
self.assertTrue(np.all(err2[1] < 7.36 * f2[1] / SN1)) # Check upper bound on error
p_Ha = 0.76 * (_calc_relative_colour_excess(l1[1]) + 4.5) # Exponent
self.assertTrue(np.allclose(err2[1], (f2[1] / SN1 * (p_Ha+1)), atol=atol))
if __name__ == "__main__":
# If we run this module as a script, do some tests
print("Testing ADT dereddening module version {0} ...".format(__version__))
atol = 5e-4 # Absolute tolerance for comparing fluxes (Hbeta == 1)
# Decrease this tolerance to watch the unit tests fail!
unittest.main()
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dmp_project.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .linked_service import LinkedService
class Db2LinkedService(LinkedService):
"""Linked service for DB2 data source.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param connect_via: The integration runtime reference.
:type connect_via:
~azure.mgmt.datafactory.models.IntegrationRuntimeReference
:param description: Linked service description.
:type description: str
:param parameters: Parameters for linked service.
:type parameters: dict[str,
~azure.mgmt.datafactory.models.ParameterSpecification]
:param annotations: List of tags that can be used for describing the
Dataset.
:type annotations: list[object]
:param type: Constant filled by server.
:type type: str
:param server: Server name for connection. Type: string (or Expression
with resultType string).
:type server: object
:param database: Database name for connection. Type: string (or Expression
with resultType string).
:type database: object
:param schema: Schema name for connection. Type: string (or Expression
with resultType string).
:type schema: object
:param authentication_type: AuthenticationType to be used for connection.
Possible values include: 'Basic'
:type authentication_type: str or
~azure.mgmt.datafactory.models.Db2AuthenticationType
:param username: Username for authentication. Type: string (or Expression
with resultType string).
:type username: object
:param password: Password for authentication.
:type password: ~azure.mgmt.datafactory.models.SecretBase
:param encrypted_credential: The encrypted credential used for
authentication. Credentials are encrypted using the integration runtime
credential manager. Type: string (or Expression with resultType string).
:type encrypted_credential: object
"""
_validation = {
'type': {'required': True},
'server': {'required': True},
'database': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'connect_via': {'key': 'connectVia', 'type': 'IntegrationRuntimeReference'},
'description': {'key': 'description', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '{ParameterSpecification}'},
'annotations': {'key': 'annotations', 'type': '[object]'},
'type': {'key': 'type', 'type': 'str'},
'server': {'key': 'typeProperties.server', 'type': 'object'},
'database': {'key': 'typeProperties.database', 'type': 'object'},
'schema': {'key': 'typeProperties.schema', 'type': 'object'},
'authentication_type': {'key': 'typeProperties.authenticationType', 'type': 'str'},
'username': {'key': 'typeProperties.username', 'type': 'object'},
'password': {'key': 'typeProperties.password', 'type': 'SecretBase'},
'encrypted_credential': {'key': 'typeProperties.encryptedCredential', 'type': 'object'},
}
def __init__(self, server, database, additional_properties=None, connect_via=None, description=None, parameters=None, annotations=None, schema=None, authentication_type=None, username=None, password=None, encrypted_credential=None):
super(Db2LinkedService, self).__init__(additional_properties=additional_properties, connect_via=connect_via, description=description, parameters=parameters, annotations=annotations)
self.server = server
self.database = database
self.schema = schema
self.authentication_type = authentication_type
self.username = username
self.password = password
self.encrypted_credential = encrypted_credential
self.type = 'Db2'
|
KIND_ASSETS = {
"0.10.0": {
"linux": (
"https://github.com/kubernetes-sigs/kind/releases/download/v0.10.0/kind-linux-amd64",
"74767776488508d847b0bb941212c1cb76ace90d9439f4dee256d8a04f1309c6",
),
"darwin": (
"https://github.com/kubernetes-sigs/kind/releases/download/v0.10.0/kind-darwin-amd64",
"a934e573621917a2785f3ddfa7b6187d18fa1c20c94c013919736b3256d37f57",
),
},
"0.9.0": {
"linux": (
"https://github.com/kubernetes-sigs/kind/releases/download/v0.9.0/kind-linux-amd64",
"35a640e0ca479192d86a51b6fd31c657403d2cf7338368d62223938771500dc8",
),
"darwin": (
"https://github.com/kubernetes-sigs/kind/releases/download/v0.9.0/kind-darwin-amd64",
"849034ffaea8a0e50f9153078890318d5863bafe01495418ea0ad037b518de90",
),
},
}
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module implements the main abstractions used to model distributed clusters on a single
host through the use of networked Docker containers. In particular, classes for clusters (Cluster),
nodes (Node), and groups of nodes that may be convenient referenced together (NodeGroup) are
implemented here.
"""
import logging
import re
import threading
from os.path import dirname, join
from time import time, sleep
from docker import Client
from docker.errors import APIError
from docker.utils import create_ipam_pool
from clusterdock.docker_utils import (get_container_ip_address,
get_network_container_hostnames, get_network_subnet,
get_available_network_subnet, is_container_reachable,
is_network_present, NetworkNotFoundException)
from clusterdock.ssh import ssh
# We disable a couple of Pylint conventions because it assumes that module level variables must be
# named as if they're constants (which isn't the case here).
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
logger.setLevel(logging.INFO)
client = Client() # pylint: disable=invalid-name
class Cluster(object):
"""The central abstraction for dealing with Docker container clusters. Instances of this class
can be created as needed, but no Docker-specific behavior is done until start() is invoked.
"""
def __init__(self, topology, node_groups, network_name):
"""Creates a cluster instance from a given topology name, list of NodeGroups, and network
name."""
self.topology = topology
self.ssh_key = join(dirname(__file__), 'topologies', self.topology, 'ssh', 'id_rsa')
self.node_groups = node_groups
self.network_name = network_name
self.nodes = [node for node_group in self.node_groups for node in node_group.nodes]
def setup_network(self):
"""If the network doesn't already exist, create it, being careful to pick a subnet that
doesn't collide with that of any other Docker networks already present."""
if not is_network_present(self.network_name):
logger.info("Network (%s) not present, creating it...", self.network_name)
next_network_subnet = get_available_network_subnet()
while True:
try:
client.create_network(name=self.network_name, driver='bridge', ipam={
'Config': [create_ipam_pool(subnet=next_network_subnet)]
})
except APIError as api_error:
if 'networks have overlapping IPv4' not in api_error.explanation:
raise api_error
else:
# The hash after "conflicts with network" is the name with the overlapping
# subnet. Save this to speed up finding the next available subnet the next
# time around in the while loop.
conflicting_network = re.findall(r'conflicts with network (\S+)',
api_error.explanation)[0]
logger.info("Conflicting network:(%s)", conflicting_network)
# Try up get the next network subnet up to 5 times (looks like there's a
# race where the conflicting network is known, but not yet visible through
# the API).
for _ in range(0, 5):
try:
next_network_subnet = get_available_network_subnet(
get_network_subnet(conflicting_network)
)
except NetworkNotFoundException as network_not_found_exception:
if 'Cannot find network' not in network_not_found_exception.message:
raise network_not_found_exception
sleep(1)
else:
break
else:
logger.info("Successfully setup network (name: %s).", self.network_name)
break
def ssh(self, command, nodes=None):
"""Execute command on all nodes (unless a list of Node instances is passed) in parallel."""
ssh(command=command,
hosts=[node.ip_address for node in self.nodes if not nodes or node in nodes],
ssh_key=self.ssh_key)
def start(self):
"""Actually start Docker containers, mimicking the cluster layout specified in the Cluster
instance."""
start = time()
self.setup_network()
# Before starting any containers, make sure that there aren't any containers in the
# network with the same hostname.
network_container_hostnames = (
get_network_container_hostnames(self.network_name))
for node in self.nodes:
# Set the Node instance's cluster attribute to the Cluster instance to give the node
# access to the topology's SSH keys.
node.cluster = self
if node.hostname in network_container_hostnames:
raise Exception(
"A container with hostname {0} already exists in network {1}".format(
node.hostname, self.network_name))
threads = [threading.Thread(target=node.start) for
node in self.nodes]
for thread in threads:
thread.start()
# Sleep shortly between node starts to bring some determinacy to the order of the IP
# addresses that we get.
sleep(0.25)
for thread in threads:
thread.join()
etc_hosts_string = ''.join("{0} {1}.{2} # Added by clusterdock\n".format(node.ip_address,
node.hostname,
node.network) for
node in self.nodes)
with open('/etc/hosts', 'a') as etc_hosts:
etc_hosts.write(etc_hosts_string)
end = time()
logger.info("Started cluster in %.2f seconds.", end - start)
def __iter__(self):
for node in self.nodes:
yield node
def __len__(self):
return len(self.nodes)
class NodeGroup(object):
"""A node group denotes a set of Nodes that share some characteristic so as to make it desirable
to refer to them separately from other sets of Nodes. For example, in a typical HDFS cluster,
one node would run the HDFS NameNode while the remaining nodes would run HDFS DataNodes. In
this case, the former might comprise the "primary" node group while the latter may be part of
the "secondary" node group.
"""
def __init__(self, name, nodes=None):
"""Initialize a Group instance called name with a list of nodes."""
self.name = name
self.nodes = nodes
def __iter__(self):
for node in self.nodes:
yield node
def add_node(self, node):
"""Add a Node instance to the list of nodes in the NodeGroup."""
self.nodes.append(node)
def ssh(self, command):
"""Run command over SSH across all nodes in the NodeGroup in parallel."""
ssh_key = self[0].cluster.ssh_key
ssh(command=command, hosts=[node.ip_address for node in self.nodes], ssh_key=ssh_key)
class Node(object):
"""The abstraction will eventually be actualized as a running Docker container. This container,
unlike the typical Docker container, does not house a single process, but tends to run an
init to make the container act more or less like a regular cluster node.
"""
# pylint: disable=too-many-instance-attributes
# 11 instance attributes to keep track of node properties isn't too many (Pylint sets the limit
# at 7), and while we could create a single dictionary attribute, that doesn't really improve
# readability.
def __init__(self, hostname, network, image, **kwargs):
"""volumes must be a list of dictionaries with keys being the directory on the host and the
values being the corresponding directory in the container to mount."""
self.hostname = hostname
self.network = network
self.fqdn = "{0}.{1}".format(hostname, network)
self.image = image
# Optional arguments are relegated to the kwargs dictionary, in part to keep Pylint happy.
self.command = kwargs.get('command')
self.ports = kwargs.get('ports')
# /etc/localtime is always volume mounted so that containers have the same timezone as their
# host machines.
self.volumes = [{'/etc/localtime': '/etc/localtime'}] + kwargs.get('volumes', [])
# Define a number of instance attributes that will get assigned proper values when the node
# starts.
self.cluster = None
self.container_id = None
self.host_config = None
self.ip_address = None
def _get_binds(self):
"""docker-py takes binds in the form "/host/dir:/container/dir:rw" as host configs. This
method returns a list of binds in that form."""
return ["{0}:{1}:rw".format(host_location, volume[host_location]) for volume in self.volumes
for host_location in volume]
def start(self):
"""Actually start a Docker container-based node on the host."""
# Create a host_configs dictionary to populate and then pass to Client.create_host_config().
host_configs = {}
# To make them act like real hosts, Nodes must have all Linux capabilities enabled. For
# some reason, we discovered that doing this causes less trouble than starting containers in
# privileged mode (see KITCHEN-10073). We also disable the default seccomp profile (see #3)
# and pass in the volumes list at this point.
host_configs['cap_add'] = ['ALL']
host_configs['security_opt'] = ['seccomp:unconfined']
host_configs['publish_all_ports'] = True
if self.volumes:
host_configs['binds'] = self._get_binds()
self.host_config = client.create_host_config(**host_configs)
# docker-py runs containers in a two-step process: first it creates a container and then
# it starts the container using the container ID.
container_configs = {
'hostname': self.fqdn,
'image': self.image,
'host_config': self.host_config,
'detach': True,
'command': self.command,
'ports': self.ports,
'volumes': [volume[host_location] for volume in self.volumes
for host_location in volume if self.volumes],
'labels': {"volume{0}".format(i): volume
for i, volume in enumerate([volume.keys()[0]
for volume in self.volumes
if volume.keys()[0] not in ['/etc/localtime']],
start=1)
}
}
self.container_id = client.create_container(**container_configs)['Id']
# Don't start up containers on the default 'bridge' network for better isolation.
client.disconnect_container_from_network(container=self.container_id, net_id='bridge')
client.connect_container_to_network(container=self.container_id, net_id=self.network,
aliases=[self.hostname])
client.start(container=self.container_id)
self.ip_address = get_container_ip_address(container_id=self.container_id,
network=self.network)
if not is_container_reachable(container_id=self.container_id, network=self.network,
ssh_key=self.cluster.ssh_key):
raise Exception("Timed out waiting for {0} to become reachable.".format(self.hostname))
else:
logger.info("Successfully started %s (IP address: %s).", self.fqdn, self.ip_address)
def ssh(self, command):
"""Run command over SSH on the node."""
ssh(command=command, hosts=[self.ip_address], ssh_key=self.cluster.ssh_key)
|
import numpy as np
from openmdao.api import Component
class Inverter(Component):
def __init__(self):
super(Inverter, self).__init__()
self.add_param('efficiency', 1.0, desc='power out / power in')
self.add_param('output_voltage',
120.0,
desc='Amplitude of AC output voltage',
units='V')
self.add_param('output_current',
2.0,
desc='Amplitude of AC output current',
units='A')
self.add_param('output_frequency',
60.0,
desc='Frequency of AC output',
units='Hz')
self.add_param('input_voltage',
28.46,
desc='Amplitude of DC input voltage',
units='V')
self.add_output('input_current',
0.48,
desc='Amplitude of DC input current',
units='A')
self.add_output('input_power', 10.0, units='W')
def solve_nonlinear(self, params, unknowns, resids):
output_power = params['output_voltage'] * params[
'output_current'] * 3.0 * np.sqrt(2.0 / 3.0)
# TODO perform efficiency lookup
unknowns['input_power'] = output_power / params['efficiency']
unknowns['input_current'] = unknowns['input_power'] / params['input_voltage']
print unknowns['input_current']
print params['input_voltage']
|
from pprint import pprint
from cloudmesh.common.console import Console
from cloudmesh.common.util import path_expand
from cloudmesh.shell.command import PluginCommand
from cloudmesh.shell.command import command
import importlib
class RegisterCommand(PluginCommand):
# noinspection PyUnusedLocal
@command
def do_register(self, args, arguments):
"""
::
Usage:
register aws yaml
register aws [FILENAME] [--keep]
register azure [FILENAME] [--keep]
register google [FILENAME] [--keep]
register chameleon [FILENAME] [--keep]
This command adds the registration information the th cloudmesh
yaml file. The permissions of theFILENAME will also be changed.
A y/n question will be asked if the files with the filename should
be deleted after integration
Arguments:
FILENAME a filename in which the cloud credentials are stored
Options:
--keep keeps the file with the filename.
Description:
AWS
AWS dependent on how you downloaded the credentials will either
use the filename `credentials.csv` or `accessKey.csv`
Our command is smart provides some convenience functionality.
1. If either file is found in ~/Downloads, it is moved to
~/.cloudmesh and the permissions are changed
2. If such a file already exists there it will ask if it should
be overwritten in case the content is not the same
3. The content of the file will be read to determine if it is
likely to be an AWS credential
4. The credential will be added to the cloudmesh yaml file
Azure
Is not yet implemented
Google
Is not yet implemented
Chameleon Cloud
is not yet implemented
"""
if arguments.aws:
# Pandas should not be used, but
# TODO: change csv
# import csv
# the csv code needs to be changed
if arguments.yaml:
AWSReg = importlib.import_module("cloudmesh.register.AWSRegister")
AWSregisterer = AWSReg.AWSRegister()
AWSregisterer.register()
else:
Console.error("not yet implemented")
elif arguments.azure:
Console.error("not yet implemented")
elif arguments.google:
Console.error("not yet implemented")
elif arguments.chameleon:
Console.error("not yet implemented")
return ""
|
# -*- coding: utf-8 -*-
import sublime
import sublime_plugin
import os
import sys
import json
import functools
import webbrowser
import tempfile
import traceback
import threading
import shutil
PY3 = sys.version > '3'
if PY3:
from .request import *
from .settings import *
from .helpers import *
from . import frontmatter
else:
from request import *
from settings import *
from helpers import *
def plugin_loaded():
settings.loaded_settings = sublime.load_settings('Gist.sublime-settings')
settings.get = settings.loaded_settings.get
settings.set = settings.loaded_settings.set
def catch_errors(fn):
@functools.wraps(fn)
def _fn(*args, **kwargs):
try:
return fn(*args, **kwargs)
except MissingCredentialsException:
sublime.error_message("Gist: GitHub token isn't provided in Gist.sublime-settings file. All other authorization methods is deprecated.")
user_settings_path = os.path.join(sublime.packages_path(), 'User', 'Gist.sublime-settings')
if not os.path.exists(user_settings_path):
default_settings_path = os.path.join(sublime.packages_path(), 'Gist', 'Gist.sublime-settings')
shutil.copy(default_settings_path, user_settings_path)
sublime.active_window().open_file(user_settings_path)
except:
traceback.print_exc()
sublime.error_message("Gist: unknown error (please, report a bug!)")
return _fn
def create_gist(public, description, files):
for filename, text in list(files.items()):
if not text:
sublime.error_message("Gist: Unable to create a Gist with empty content")
return
file_data = dict((filename, {'content': frontmatter.loads(text).content}) for filename, text in list(files.items()))
data = json.dumps({'description': description, 'public': public, 'files': file_data})
gist = api_request(settings.GISTS_URL, data)
return gist
def update_gist(gist_url, file_changes, auth_token=None, https_proxy=None, new_description=None):
request = {'files': file_changes}
# print('Request:', request)
if new_description is not None:
request['description'] = new_description
data = json.dumps(request)
# print('Data:', data)
result = api_request(gist_url, data, token=auth_token, https_proxy=https_proxy, method="PATCH")
if PY3:
sublime.status_message("Gist updated") # can only be called by main thread in sublime text 2
# print('Result:', result)
return result
def open_gist(gist_url):
gist = api_request(gist_url)
# print('Gist:', gist)
files = sorted(gist['files'].keys())
for gist_filename in files:
allowedTypes = ['text', 'application']
type = gist['files'][gist_filename]['type'].split('/')[0]
if type not in allowedTypes:
continue
view = sublime.active_window().new_file()
gistify_view(view, gist, gist_filename)
if PY3:
view.run_command('append', {
'characters': gist['files'][gist_filename]['content'],
})
else:
edit = view.begin_edit()
view.insert(edit, 0, gist['files'][gist_filename]['content'])
view.end_edit(edit)
if settings.get('supress_save_dialog'):
view.set_scratch(True)
if settings.get('save-update-hook'):
view.retarget(tempfile.gettempdir() + '/' + gist_filename)
# Save over it (to stop us reloading from that file in case it exists)
# But don't actually do a gist update
view.settings().set('do-update', False)
view.run_command('save')
set_syntax(view, gist['files'][gist_filename])
def insert_gist(gist_url):
gist = api_request(gist_url)
files = sorted(gist['files'].keys())
for gist_filename in files:
view = sublime.active_window().active_view()
is_auto_indent = view.settings().get('auto_indent')
if PY3:
if is_auto_indent == True:
view.settings().set('auto_indent',False)
view.run_command('insert', {
'characters': gist['files'][gist_filename]['content'],
})
view.settings().set('auto_indent',True)
else:
view.run_command('insert', {
'characters': gist['files'][gist_filename]['content'],
})
else:
edit = view.begin_edit()
for region in view.sel():
view.replace(edit, region, gist['files'][gist_filename]['content'])
view.end_edit(edit)
def insert_gist_embed(gist_url):
gist = api_request(gist_url)
files = sorted(gist['files'].keys())
for gist_filename in files:
view = sublime.active_window().active_view()
template = '<script src="{0}"></script>'.format(gist['files'][gist_filename]['raw_url'])
if PY3:
view.run_command('insert', {
'characters': template,
})
else:
edit = view.begin_edit()
for region in view.sel():
view.replace(edit, region, template)
view.end_edit(edit)
class GistCommand(sublime_plugin.TextCommand):
public = True
def mode(self):
return "Public" if self.public else "Private"
@catch_errors
def run(self, edit):
regions = [region for region in self.view.sel() if not region.empty()]
if len(regions) == 0:
regions = [sublime.Region(0, self.view.size())]
gistify = True
else:
gistify = False
region_data = [self.view.substr(region) for region in regions]
window = self.view.window()
def on_gist_description(description):
filename = os.path.basename(self.view.file_name() if self.view.file_name() else '')
@catch_errors
def on_gist_filename(filename):
# We need to figure out the filenames. Right now, the following logic is used:
# If there's only 1 selection, just pass whatever the user typed to Github. It'll rename empty files for us.
# If there are multiple selections and user entered a filename, rename the files from foo.js to
# foo (1).js, foo (2).js, etc.
# If there are multiple selections and user didn't enter anything, post the files as
# $SyntaxName 1, $SyntaxName 2, etc.
if len(region_data) == 1:
gist_data = {filename: region_data[0]}
else:
if filename:
(namepart, extpart) = os.path.splitext(filename)
make_filename = lambda num: "%s (%d)%s" % (namepart, num, extpart)
else:
syntax_name, _ = os.path.splitext(os.path.basename(self.view.settings().get('syntax')))
make_filename = lambda num: "%s %d" % (syntax_name, num)
gist_data = dict((make_filename(idx), data) for idx, data in enumerate(region_data, 1))
gist = create_gist(self.public, description, gist_data)
if not gist:
return
gist_html_url = gist['html_url']
sublime.set_clipboard(gist_html_url)
sublime.status_message("%s Gist: %s" % (self.mode(), gist_html_url))
# Todo: PY3 check required?
self.view.run_command('gist_set_id', {'gistid': gist['id']})
if gistify:
gistify_view(self.view, gist, list(gist['files'].keys())[0])
# else:
# open_gist(gist['url'])
window.show_input_panel('Gist File Name: (optional):', filename, on_gist_filename, None, None)
desc = frontmatter.loads(region_data[0]).get('desc')
if desc:
on_gist_description(desc)
else:
window.show_input_panel("Gist Description (optional):", '', on_gist_description, None, None)
class GistViewCommand(object):
"""A base class for commands operating on a gistified view"""
def is_enabled(self):
return self.gist_url() is not None
def gist_url(self):
return self.view.settings().get("gist_url")
def gist_html_url(self):
return self.view.settings().get("gist_html_url")
def gist_filename(self):
return self.view.settings().get("gist_filename")
def gist_description(self):
return self.view.settings().get("gist_description")
class GistSetId(sublime_plugin.TextCommand):
def run(self, edit, gistid):
region = self.view.find(r'(?s)^\s*---(.*)---\s*$', 0)
content = self.view.substr(region).replace('id:', 'id: ' + gistid)
self.view.replace(edit, region, content)
class GistCopyUrl(GistViewCommand, sublime_plugin.TextCommand):
def run(self, edit):
sublime.set_clipboard(self.gist_html_url())
class GistOpenBrowser(GistViewCommand, sublime_plugin.TextCommand):
def run(self, edit):
webbrowser.open(self.gist_html_url())
class GistRenameFileCommand(GistViewCommand, sublime_plugin.TextCommand):
def run(self, edit):
old_filename = self.gist_filename()
@catch_errors
def on_filename(filename):
if filename and filename != old_filename:
text = self.view.substr(sublime.Region(0, self.view.size()))
file_changes = {old_filename: {'filename': filename, 'content': text}}
new_gist = update_gist(self.gist_url(), file_changes)
gistify_view(self.view, new_gist, filename)
sublime.status_message('Gist file renamed')
self.view.window().show_input_panel('New File Name:', old_filename, on_filename, None, None)
class GistChangeDescriptionCommand(GistViewCommand, sublime_plugin.TextCommand):
def run(self, edit):
@catch_errors
def on_gist_description(description):
if description and description != self.gist_description():
gist_url = self.gist_url()
new_gist = update_gist(gist_url, {}, description)
for window in sublime.windows():
for view in window.views():
if view.settings().get('gist_url') == gist_url:
gistify_view(view, new_gist, view.settings().get('gist_filename'))
sublime.status_message('Gist description changed')
self.view.window().show_input_panel('New Description:', self.gist_description() or '', on_gist_description, None, None)
class GistUpdateFileCommand(GistViewCommand, sublime_plugin.TextCommand):
@catch_errors
def run(self, edit):
text = self.view.substr(sublime.Region(0, self.view.size()))
changes = {self.gist_filename(): {'content': text}}
update_gist(self.gist_url(), changes)
sublime.status_message("Gist updated")
class GistDeleteFileCommand(GistViewCommand, sublime_plugin.TextCommand):
@catch_errors
def run(self, edit):
changes = {self.gist_filename(): None}
update_gist(self.gist_url(), changes)
ungistify_view(self.view)
sublime.status_message("Gist file deleted")
class GistDeleteCommand(GistViewCommand, sublime_plugin.TextCommand):
@catch_errors
def run(self, edit):
gist_url = self.gist_url()
api_request(gist_url, method='DELETE')
for window in sublime.windows():
for view in window.views():
if view.settings().get("gist_url") == gist_url:
ungistify_view(view)
sublime.status_message("Gist deleted")
class GistPrivateCommand(GistCommand):
public = False
class GistListCommandBase(object):
gists = orgs = users = []
@catch_errors
def run(self, *args):
filtered = gists_filter(api_request(settings.GISTS_URL))
filtered_stars = gists_filter(api_request(settings.STARRED_GISTS_URL))
self.gists = filtered[0] + filtered_stars[0]
gist_names = filtered[1] + list(map(lambda x: [u"★ " + x[0]], filtered_stars[1]))
if settings.get('include_users'):
self.users = list(settings.get('include_users'))
gist_names = [["> " + user] for user in self.users] + gist_names
if settings.get('include_orgs'):
if settings.get('include_orgs') == True:
self.orgs = [org.get("login") for org in api_request(settings.ORGS_URL)]
else:
self.orgs = settings.get('include_orgs')
gist_names = [["> " + org] for org in self.orgs] + gist_names
# print(gist_names)
def on_gist_num(num):
offOrgs = len(self.orgs)
offUsers = offOrgs + len(self.users)
if num < 0:
pass
elif num < offOrgs:
self.gists = []
members = [member.get("login") for member in api_request(settings.ORG_MEMBERS_URL % self.orgs[num])]
for member in members:
self.gists += api_request(settings.USER_GISTS_URL % member)
filtered = gists_filter(self.gists)
self.gists = filtered[0]
gist_names = filtered[1]
# print(gist_names)
self.orgs = self.users = []
self.get_window().show_quick_panel(gist_names, on_gist_num)
elif num < offUsers:
filtered = gists_filter(api_request(settings.USER_GISTS_URL % self.users[num - offOrgs]))
self.gists = filtered[0]
gist_names = filtered[1]
# print(gist_names)
self.orgs = self.users = []
self.get_window().show_quick_panel(gist_names, on_gist_num)
else:
self.handle_gist(self.gists[num - offUsers])
self.get_window().show_quick_panel(gist_names, on_gist_num)
class GistListCommand(GistListCommandBase, sublime_plugin.WindowCommand):
@catch_errors
def handle_gist(self, gist):
open_gist(gist['url'])
def get_window(self):
return self.window
class GistListener(GistViewCommand, sublime_plugin.EventListener):
@catch_errors
def on_pre_save(self, view):
if view.settings().get('gist_filename') != None:
if settings.get('save-update-hook'):
# we ignore the first update, it happens upon loading a gist
if not view.settings().get('do-update'):
view.settings().set('do-update', True)
return
text = view.substr(sublime.Region(0, view.size()))
changes = {view.settings().get('gist_filename'): {'content': text}}
gist_url = view.settings().get('gist_url')
# Start update_gist in a thread so we don't stall the save
threading.Thread(target=update_gist, args=(gist_url, changes, settings.get('token'), settings.get('https_proxy'))).start()
class InsertGistListCommand(GistListCommandBase, sublime_plugin.WindowCommand):
@catch_errors
def handle_gist(self, gist):
insert_gist(gist['url'])
def get_window(self):
return self.window
class InsertGistEmbedListCommand(GistListCommandBase, sublime_plugin.WindowCommand):
@catch_errors
def handle_gist(self, gist):
insert_gist_embed(gist['url'])
def get_window(self):
return self.window
class GistAddFileCommand(GistListCommandBase, sublime_plugin.TextCommand):
def is_enabled(self):
return self.view.settings().get('gist_url') is None
def handle_gist(self, gist):
@catch_errors
def on_filename(filename):
if filename:
text = self.view.substr(sublime.Region(0, self.view.size()))
changes = {filename: {'content': text}}
new_gist = update_gist(gist['url'], changes)
gistify_view(self.view, new_gist, filename)
sublime.status_message("File added to Gist")
filename = os.path.basename(self.view.file_name() if self.view.file_name() else '')
self.view.window().show_input_panel('File Name:', filename, on_filename, None, None)
def get_window(self):
return self.view.window()
|
import os, sys
import pytest
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + '/../src')
from key_file import KeyFile
class TestKeyFile:
def file_path(self):
return os.path.join(os.getcwd(), 'test', 'fixtures', 'example.key')
def test_read_key_file(self):
with open(self.file_path()) as f:
kf = KeyFile(f)
assert kf.classes == ['a', 'b', 'c', 'd', 'e']
|
from django.shortcuts import render
from django.http import HttpRequest, HttpResponse, JsonResponse
from .models import Station,Data,MeanDay,Intensity, MeanWeek, MeanYear
from datetime import timedelta, date, datetime
# Create your views here.
def dynamic_lookup_view(request: HttpRequest, my_id) -> HttpResponse:
# The concerned station
station = Station.objects.get(id=my_id)
# 10 latests data for the station
data = Data.objects.order_by('-date','-heure').filter(station=station)[:10000][::-1]
# 10 latests means for the station (counting per day !!!)
meandaytable = MeanDay.objects.filter(station=station).order_by('-mean_day')[::-1]
meanweektable = MeanWeek.objects.filter(station=station).order_by('-mean_week')[::-1]
meanyeartable = MeanYear.objects.filter(station=station).order_by('-mean_year')[::-1]
intensitytable = Intensity.objects.filter(station=station).order_by('-intensity_day')[::-1]
intensityData = []
intensityDuration = []
meandayData = []
meandayDate = []
maxdayData = []
mindayData = []
meanweekData = []
meanweekDate = []
meanyearData = []
meanyearDate = []
precipitation = []
precipitationDate = []
precipitationTime = []
precipitation = []
for elem in meandaytable:
meandayData.append(float(elem.mean_per_day))
meandayDate.append(str(elem.mean_day))
mindayData.append(float(elem.min_per_day))
maxdayData.append(float(elem.max_per_day))
for elem in meanweektable:
meanweekData.append(float(elem.mean_per_week))
meanweekDate.append(str(elem.mean_week))
for elem in meanyeartable:
meanyearData.append(float(elem.mean_per_year))
meanyearDate.append(str(elem.mean_year))
lastday = data[len(data)-1].date
intensityDic = {}
for elem in intensitytable:
if(elem.intensity_day == lastday):
intensityDic[int(elem.duration)] = float(elem.intensity)
newKeys = sorted(intensityDic)
for key in newKeys:
intensityData.append(intensityDic[key])
dataDic = {}
timeCounter = datetime(2000, 1, 1, hour= 0, minute= 0, second=0)
while(timeCounter.day == 1):
dataDic[timeCounter.strftime("%H:%M:%S")] = 0
timeCounter = timeCounter + timedelta(minutes= 1)
for elem in data:
if elem.date == lastday:
dataDic[str(elem.heure)] = float(elem.mesure)
for key in dataDic:
precipitationTime.append(key)
precipitation.append(dataDic[key])
maxdayDataShort = maxdayData[-7:]
mindayDataShort = mindayData[-7:]
meandayDataShort = meandayData[-7:]
meandayDateShort = meandayDate[-7:]
lastDay = [str(lastday)]
print(intensityData)
context = {
'id': my_id,
"data": data,
"meandaytable": meandaytable,
"meanweektable": meanweektable,
"meanyeartable": meanyeartable,
"intensitytable": intensitytable,
"station": station,
"intensityData": intensityData,
"intensityDuration":intensityDuration,
"meandayData": meandayDataShort,
"meandayDate": meandayDateShort,
"maxdayData" : maxdayDataShort,
"mindayData" : mindayDataShort,
"meanweekData": meanweekData,
"meanweekDate": meanweekDate,
"meanyearData": meanyearData,
"meanyearDate": meanyearDate,
"shorterData": precipitation,
"shorterTime": precipitationTime,
"lastDayRegistered": lastDay,
}
return render(request, "data.html", context)
def data(request: HttpRequest) -> HttpResponse:
return render(request, "data.html")
def addDailyData(request,my_id):
station = Station.objects.get(id=my_id)
dataTable = Data.objects.order_by('-date').filter(station=station)[::-1]
dataDic = {}
for elem in dataTable:
if str(elem.date) not in dataDic:
dataDic[str(elem.date)] = {}
timeCounter = datetime(2000, 1, 1, hour= 0, minute= 0, second=0)
while(timeCounter.day == 1):
dataDic[str(elem.date)][timeCounter.strftime("%H:%M:%S")] = 0
timeCounter = timeCounter + timedelta(minutes= 1)
for elem in dataTable:
dataDic[str(elem.date)][str(elem.heure)] = str(elem.mesure)
for dic in dataDic:
newList = []
for key in dataDic[dic]:
newList.append(dataDic[dic][key])
dataDic[dic] = newList
return JsonResponse(dataDic, safe = False)
def getMeanDayData(request,my_id):
station = Station.objects.get(id=my_id)
meandayData = []
meandaytable = MeanDay.objects.filter(station=station).order_by('-mean_day')[::-1]
for elem in meandaytable:
meandayData.append(str(elem.mean_per_day))
return JsonResponse(meandayData, safe = False)
def addMeanDayData(request,my_id):
station = Station.objects.get(id=my_id)
meandaytable = MeanDay.objects.filter(station=station).order_by('-mean_day')[::-1]
meandayDic = {}
for elem in meandaytable:
meandayDic[str(elem.mean_day)] = str(elem.mean_per_day)
return JsonResponse(meandayDic, safe = False)
def getMeanWeekData(request,my_id):
station = Station.objects.get(id=my_id)
meanweekData = []
meanweektable = MeanWeek.objects.filter(station=station).order_by('-mean_week')[::-1]
for elem in meanweektable:
meanweekData.append(str(elem.mean_per_week))
return JsonResponse(meanweekData, safe = False)
def addMeanWeekData(request,my_id):
station = Station.objects.get(id=my_id)
meanweektable = MeanWeek.objects.filter(station=station).order_by('-mean_week')[::-1]
meanweekDic = {}
for elem in meanweektable:
meanweekDic[str(elem.mean_week)] = str(elem.mean_per_week)
return JsonResponse(meanweekDic, safe = False)
def getMeanYearData(request,my_id):
station = Station.objects.get(id=my_id)
meanyearData = []
meanyeartable = MeanYear.objects.filter(station=station).order_by('-mean_year')[::-1]
for elem in meanyeartable:
meanyearData.append(str(elem.mean_per_year))
return JsonResponse(meanyearData, safe = False)
def addMeanYearData(request,my_id):
station = Station.objects.get(id=my_id)
meanyeartable = MeanYear.objects.filter(station=station).order_by('-mean_year')[::-1]
meanyearDic = {}
for elem in meanyeartable:
meanyearDic[str(elem.mean_year)] = str(elem.mean_per_year)
return JsonResponse(meanyearDic, safe = False)
def getMaxDayData(request,my_id):
station = Station.objects.get(id=my_id)
maxdaytable = MeanDay.objects.filter(station=station).order_by('-mean_day')[::-1]
maxdayDic = {}
for elem in maxdaytable:
maxdayDic[str(elem.mean_day)] = str(elem.max_per_day)
return JsonResponse(maxdayDic, safe = False)
def getMinDayData(request,my_id):
station = Station.objects.get(id=my_id)
mindaytable = MeanDay.objects.filter(station=station).order_by('-mean_day')[::-1]
mindayDic = {}
for elem in mindaytable:
mindayDic[str(elem.mean_day)] = str(elem.min_per_day)
return JsonResponse(mindayDic, safe = False)
def getMaxWeekData(request,my_id):
station = Station.objects.get(id=my_id)
maxweektable = MeanWeek.objects.filter(station=station).order_by('-mean_week')[::-1]
maxweekDic = {}
for elem in maxweektable:
maxweekDic[str(elem.mean_week)] = str(elem.max_per_week)
return JsonResponse(maxweekDic, safe = False)
def getMinWeekData(request,my_id):
station = Station.objects.get(id=my_id)
minweektable = MeanWeek.objects.filter(station=station).order_by('-mean_week')[::-1]
minweekDic = {}
for elem in minweektable:
minweekDic[str(elem.mean_week)] = str(elem.min_per_week)
return JsonResponse(minweekDic, safe = False)
def getMaxYearData(request,my_id):
station = Station.objects.get(id=my_id)
maxyeartable = MeanYear.objects.filter(station=station).order_by('-mean_year')[::-1]
maxyearDic = {}
for elem in maxyeartable:
maxyearDic[str(elem.mean_year)] = str(elem.max_per_year)
return JsonResponse(maxyearDic, safe = False)
def getMinYearData(request,my_id):
station = Station.objects.get(id=my_id)
minyeartable = MeanYear.objects.filter(station=station).order_by('-mean_year')[::-1]
minyearDic = {}
for elem in minyeartable:
minyearDic[str(elem.mean_year)] = str(elem.min_per_year)
return JsonResponse(minyearDic, safe = False)
def addDailyIntensity(request,my_id):
station = Station.objects.get(id=my_id)
intensityTable = Intensity.objects.filter(station=station).order_by('-intensity_day')[::-1]
intensityDic = {}
for elem in intensityTable:
if str(elem.intensity_day) not in intensityDic:
intensityDic[str(elem.intensity_day)] = {int(elem.duration):str(elem.intensity)}
else:
intensityDic[str(elem.intensity_day)][int(elem.duration)] = (str(elem.intensity))
for dic in intensityDic:
newKeys = sorted(intensityDic[dic])
newList = []
for key in newKeys:
newList.append(intensityDic[dic][key])
intensityDic[dic] = newList
return JsonResponse(intensityDic, safe = False)
def isEmpty(request,my_id):
station = Station.objects.get(id=my_id)
data = Data.objects.order_by('-date','-heure').filter(station=station)[:10000][::-1]
if len(data) == 0:
return JsonResponse(True, safe = False)
else:
return JsonResponse(False, safe = False)
|
import tarfile
from fastai import *
from fastai.vision import *
from fastai.text import *
__all__ = ['DATA_PATH', 'MNIST_PATH', 'IMDB_PATH', 'ADULT_PATH', 'ML_PATH', 'DOGS_PATH', 'PLANET_PATH',
'untar_data', 'get_adult', 'get_mnist', 'get_imdb', 'get_movie_lens', 'download_wt103_model']
URL = 'http://files.fast.ai/data/examples/'
DATA_PATH = Path('..')/'data'
MNIST_PATH = DATA_PATH/'mnist_sample'
IMDB_PATH = DATA_PATH/'imdb_sample'
ADULT_PATH = DATA_PATH/'adult_sample'
ML_PATH = DATA_PATH/'movie_lens_sample'
CIFAR_PATH = DATA_PATH/'cifar10'
PLANET_PATH = DATA_PATH/'planet_sample'
# kaggle competitions download dogs-vs-cats -p {DOGS_PATH.absolute()}
DOGS_PATH = DATA_PATH/'dogscats'
def f_name(name): return f'{name}.tgz'
def download_data(name):
dest = DATA_PATH/f_name(name)
if not dest.exists(): download_url(f'{URL}{f_name(name)}', dest)
def untar_data(path):
download_data(path.name)
if not path.exists(): tarfile.open(f_name(path), 'r:gz').extractall(DATA_PATH)
def get_adult():
untar_data(ADULT_PATH)
return pd.read_csv(ADULT_PATH/'adult.csv')
def get_mnist():
untar_data(MNIST_PATH)
return image_data_from_folder(MNIST_PATH)
def get_imdb(classifier=False):
untar_data(IMDB_PATH)
data_func = classifier_data if classifier else lm_data
return text_data_from_csv(IMDB_PATH, data_func=data_func)
def get_movie_lens():
untar_data(ML_PATH)
return pd.read_csv(ML_PATH/'ratings.csv')
def download_wt103_model():
model_path = IMDB_PATH/'models'
os.makedirs(model_path, exist_ok=True)
download_url('http://files.fast.ai/models/wt103_v1/lstm_wt103.pth', model_path/'lstm_wt103.pth')
download_url('http://files.fast.ai/models/wt103_v1/itos_wt103.pkl', model_path/'itos_wt103.pkl')
|
config = {
"interfaces": {
"google.ads.googleads.v1.services.AdGroupAdService": {
"retry_codes": {
"idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"],
"non_idempotent": []
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 5000,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 3600000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 3600000,
"total_timeout_millis": 3600000
}
},
"methods": {
"GetAdGroupAd": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
},
"MutateAdGroupAds": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
}
}
}
}
}
|
import json
import os
config = {}
settings_file_wd = os.path.join(os.getcwd(), ".cdash-client.json")
home_directory = os.path.expanduser("~")
settings_file_home = os.path.join(home_directory, ".cdash-client.json")
settings_file = ""
# First of all, we look for our settings file inside the user's home folder.
# This is cross-platform. On Windows, it will look into "C:\Users\<User>\.cdash-client.json"
if os.path.isfile(settings_file_home) and os.access(settings_file_home, os.R_OK):
settings_file = settings_file_home
else:
print("Settings file not found in home directory. Looking in working directory...")
if os.path.isfile(settings_file_wd) and os.access(settings_file_wd, os.R_OK):
settings_file = settings_file_wd
else:
raise Exception("Settings file was not found neither in the home directory, or current working directory")
with open(settings_file, "r") as f:
config = json.load(f)
|
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.sites.shortcuts import get_current_site
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.http import (HttpResponseRedirect, HttpResponsePermanentRedirect,
JsonResponse)
from django.shortcuts import get_object_or_404, render
from job_board.forms import CompanyForm
from job_board.models.company import Company
from job_board.models.job import Job
def companies_index(request):
companies_list = Company.objects \
.filter(site_id=get_current_site(request).id)
for c in companies_list:
if len(c.paid_jobs()) == 0:
companies_list = companies_list.exclude(id=c.id)
paginator = Paginator(companies_list, 25)
page = request.GET.get('page')
meta_desc = 'Browse an extensive list of companies with active and ' \
'expired jobs'
title = 'Companies'
try:
companies = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
companies = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
companies = paginator.page(paginator.num_pages)
context = {'meta_desc': meta_desc, 'title': title, 'companies': companies}
return render(request, 'job_board/companies_index.html', context)
@login_required(login_url='/login/')
def companies_new(request):
title = 'Add a Company'
site = get_current_site(request)
if request.method == 'POST':
form = CompanyForm(request.POST)
if form.is_valid():
company = form.save(commit=False)
company.site_id = site.id
company.user_id = request.user.id
company.save()
if request.is_ajax():
return JsonResponse({'id': company.id, 'name': company.name})
else:
messages.success(
request,
'Your company has been successfully added'
)
return HttpResponseRedirect(company.get_absolute_url())
else:
if request.is_ajax():
return render(
request,
'job_board/_errors.html',
{'form': form},
status=400
)
else:
form = CompanyForm()
context = {'form': form, 'title': title}
return render(request, 'job_board/companies_new.html', context)
def companies_show(request, company_id, slug=None):
company = get_object_or_404(
Company,
pk=company_id,
site_id=get_current_site(request).id
)
if slug is None:
return HttpResponsePermanentRedirect(company.get_absolute_url())
# We don't use get_list_or_404 here as we redirect to this view after
# adding a new company and at that point it won't have any jobs assigned
# to it.
jobs = Job.objects.filter(site_id=get_current_site(request).id) \
.filter(company=company) \
.filter(paid_at__isnull=False) \
.order_by('-paid_at')
title = company.name
meta_desc = 'Browse a list of all active and expired %s jobs' % \
company.name
context = {'meta_desc': meta_desc,
'title': title,
'company': company,
'jobs': jobs}
return render(request, 'job_board/companies_show.html', context)
@login_required(login_url='/login/')
def companies_edit(request, company_id):
company = get_object_or_404(
Company, pk=company_id, site_id=get_current_site(request).id
)
title = 'Edit a Company'
if (request.user.id != company.user.id) and not request.user.is_staff:
return HttpResponseRedirect(company.get_absolute_url())
if request.method == 'POST':
form = CompanyForm(request.POST, instance=company)
if form.is_valid():
form.save()
messages.success(
request,
'Your company has been successfully updated'
)
return HttpResponseRedirect(company.get_absolute_url())
else:
form = CompanyForm(instance=company)
context = {'company': company, 'form': form, 'title': title}
return render(request, 'job_board/companies_edit.html', context)
|
# Uses python3
import sys
def get_change(m):
minNumCoins = [0] * (m + 1)
for i in range(1, m + 1):
minNumCoins[i] = float('inf')
for j in [1, 3, 4]:
if i >= j:
NumCoins = minNumCoins[i - j] + 1
if NumCoins < minNumCoins[i]:
minNumCoins[i] = NumCoins
return minNumCoins[m]
if __name__ == '__main__':
m = int(sys.stdin.read())
print(get_change(m))
|
import click
from virl.api import VIRLServer
from subprocess import call
from virl import helpers
from virl.helpers import get_mgmt_lxc_ip, get_node_from_roster
@click.command()
@click.argument('node', nargs=-1)
def ssh(node):
"""
ssh to a node
"""
if len(node) == 2:
# we received env and node name
env = node[0]
running = helpers.check_sim_running(env)
node = node[1]
elif len(node) == 1:
# assume default env
env = 'default'
running = helpers.check_sim_running(env)
node = node[0]
else:
exit(call(['virl', 'ssh', '--help']))
if running:
sim_name = running
server = VIRLServer()
details = server.get_sim_roster(sim_name)
if node:
try:
node_dict = get_node_from_roster(node, details)
node_name = node_dict.get("NodeName")
ip = node_dict['managementIP']
proxy = node_dict.get("managementProxy")
if proxy == 'lxc':
lxc = get_mgmt_lxc_ip(details)
if lxc:
click.secho("Attemping ssh connection"
"to {} at {} via {}".format(node_name,
ip, lxc))
cmd = 'ssh -o "ProxyCommand ssh -W %h:%p {}@{}" {}@{}'
cmd = cmd.format(server.user, lxc, 'cisco', ip)
exit(call(cmd, shell=True))
else:
# handle the "flat" networking case
click.secho("Attemping ssh connection"
"to {} at {}".format(node_name,
ip))
exit(call(['ssh', 'cisco@{}'.format(ip)]))
except AttributeError:
click.secho("Could not find management info"
"for {}:{}".format(env, node), fg="red")
except KeyError:
click.secho("Unknown node {}:{}".format(env, node), fg="red")
else:
return details.json()
|
# -*- coding: utf-8 -*-
import argparse
import torch
from tqdm import tqdm
from models.embedding import ProtoNetEmbedding
from protoNet.prototy_head import ClassificationHead
from utilities import set_gpu, count_accuracy, log, setup_seed
import numpy as np
import os
from dataloaders.tieredImageNet import tieredImageNet, FewShotDataloader
def get_model():
network = ProtoNetEmbedding().cuda()
cls_head = ClassificationHead(False).cuda()
return (network, cls_head)
if __name__ == '__main__':
setup_seed(1234)
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', default='0')
parser.add_argument('--load', default='/data/save_models/best_model.pth.tar',
help='path of the checkpoint file')
parser.add_argument('--episode', type=int, default=2000,
help='number of episodes to test')
parser.add_argument('--way', type=int, default=5,
help='number of classes in one test episode')
parser.add_argument('--shot', type=int, default=5,
help='number of support examples per training class')
parser.add_argument('--query', type=int, default=10,
help='number of query examples per training class')
opt = parser.parse_args()
set_gpu(opt.gpu)
log_file_path = os.path.join(os.path.dirname(opt.load), "test_log.txt")
log(log_file_path, str(vars(opt)))
# Define the models
(embedding_net, cls_head) = get_model()
# Load saved model checkpoints
saved_models = torch.load(opt.load)
embedding_net.load_state_dict(saved_models['embedding'])
embedding_net.eval()
cls_head.load_state_dict(saved_models['head'])
cls_head.eval()
#-----------------------------
dataset_test = tieredImageNet(phase='test')
data_loader = FewShotDataloader
dloader_test = data_loader(
dataset=dataset_test,
nKnovel=opt.way,
nKbase=0,
nExemplars=opt.shot, # num training examples per novel category
nTestNovel=opt.query * opt.way, # num test examples for all the novel categories
nTestBase=0, # num test examples for all the base categories
batch_size=1,
num_workers=1,
epoch_size=opt.episode, # num of batches per epoch
)
#-----------------------------------
# Evaluate on test set
test_accuracies = []
for i, batch in enumerate(tqdm(dloader_test()), 1):
data_support, labels_support, data_query, labels_query, _, _ = [x.cuda() for x in batch]
data_support = data_support.float()
data_query = data_query.float()
labels_support = labels_support.long()
labels_query = labels_query.long()
n_support = opt.way * opt.shot
n_query = opt.way * opt.query
emb_support = embedding_net(data_support.reshape([-1] + list(data_support.shape[-3:])))
emb_support = emb_support.reshape(1, n_support, -1)
emb_query = embedding_net(data_query.reshape([-1] + list(data_query.shape[-3:])))
emb_query = emb_query.reshape(1, n_query, -1)
logits = cls_head(emb_query, emb_support, labels_support, opt.way, opt.shot)
acc = count_accuracy(logits.reshape(-1, opt.way), labels_query.reshape(-1))
test_accuracies.append(acc.item())
avg = np.mean(np.array(test_accuracies))
std = np.std(np.array(test_accuracies))
ci95 = 1.96 * std / np.sqrt(i + 1)
if i % 50 == 0:
print('Episode [{}/{}]:\t\t\tAccuracy: {:.2f} ± {:.2f} % ({:.2f} %)' \
.format(i, opt.episode, avg, ci95, acc))
|
import pandas as pd
from sklearn.model_selection import RandomizedSearchCV
from sklearn.ensemble import RandomForestRegressor
import warnings
warnings.filterwarnings("ignore")
if __name__ == "__main__":
dataset = pd.read_csv("./datasets/felicidad.csv")
# La razón de eliminar el rank y el score,
# es porque se quiere que los features no tengan ninguna correlación entre ellos.
# Lo ideal es que exista correlación solo entre las features y la variable objetivo.
data = dataset.drop(['country', 'rank', 'score'], axis=1)
target = dataset[['score']]
reg = RandomForestRegressor()
parameters = {
'n_estimators': range(4, 16), # cuantos arboles compondran mi arbol
'criterion': ['mse', 'mae'],
'max_depth': range(2, 11)
}
# son 10 iteracion del optimizador. Toma 10 combinaciones al azar del diccionario
# cv = 3, parte en 3 parte el set de datos que le pasemos, para hacer Cross validation
rand_est = RandomizedSearchCV(reg, parameters,
n_iter=10,
cv=3,
scoring='neg_mean_absolute_error',
).fit(data, target)
print('='*64)
print("Mejores estimadores")
print('-'*64)
print(rand_est.best_estimator_)
print('='*64)
print("Mejores parametros")
print('-'*64)
print(rand_est.best_params_)
print('='*64)
print('Pruebas')
print('-'*64)
y_hat = rand_est.predict(data.loc[[0]])
print(f'Predict: {y_hat[0]}')
print(f'Real: {target.loc[0]}')
print('='*64)
|
#!/usr/bin/env python3
##############################################################################
# EVOLIFE http://evolife.telecom-paris.fr Jean-Louis Dessalles #
# Telecom Paris 2021 www.dessalles.fr #
# -------------------------------------------------------------------------- #
# License: Creative Commons BY-NC-SA #
##############################################################################
##############################################################################
# Population #
##############################################################################
""" EVOLIFE: Module Population:
A population is a set of semi-permeable groups
"""
import sys
if __name__ == '__main__': # for tests
sys.path.append('../..')
from Evolife.Scenarii.MyScenario import InstantiateScenario
InstantiateScenario('Cooperation','../Evolife')
from random import randint, choice
from Evolife.Tools.Tools import error
from Evolife.Ecology.Group import Group, EvolifeGroup # definition of groups
class Population:
""" class Population: list of Groups
Minimal version """
def __init__(self, Scenario, Observer):
""" Creation of the groups """
self.Scenario = Scenario
self.popSize = self.Scenario.Parameter('PopulationSize')
self.groupMaxSize = self.popSize + 1
self.groups = []
self.year = -1 # to keep track of time
self.Observer = Observer # contains instantaneous data for statistics and display
self.best_score = 0
nb_groups = self.Scenario.Parameter('NumberOfGroups', Default=1)
group_size = self.popSize // nb_groups
self.groupMaxSize = 2 * group_size # groups beyond that size split
while (nb_groups > 0):
self.groups.append(self.createGroup(ID=nb_groups, Size=group_size))
nb_groups -= 1
self.statistics(Display=True) # updates popSize
def createGroup(self, ID=0, Size=0):
return Group(self.Scenario, ID=ID, Size=Size)
def selectIndividual(self):
" random selection of an individual in the population "
(group, winner) = self.lottery()
return group.whoIs(winner)
def lottery(self):
" random selection of an individual by number in the population "
winner = randint(0,self.popSize-1)
for gr in self.groups:
if gr.size > winner: return (gr,winner)
else: winner -= gr.size
error(f"Population: wrong population size: {self.popSize}")
def season(self):
self.year += 1 # keeps track of time
self.Observer.season(self.year)
for gr in self.groups: gr.season(self.year)
def migration(self):
" migration between groups of some percentage of individuals "
if len(self.groups) < 2 or self.Scenario.Parameter('MigrationRate', Default=0) == 0:
return # no migration if only one group
migrants = int(self.Scenario.Parameter('MigrationRate') * self.popSize/100.0 + 0.5)
while migrants:
(gr_out, migrant) = self.lottery() # choosing the migrant
(gr_in,dummy) = self.lottery() # choosing where to go
gr_in.receive(gr_out.remove_(migrant)) # symbolically murdered, and then born-again
migrants -= 1
def group_splitting(self):
""" groups that are too big are split in two,
and too small groups are dispersed """
##############
## TO BE REWRITTEN: restart the whole splitting process after one split
##############
grps = self.groups[:] # copy of the list, necessary since 'groups' is modified within the loop
for gr in grps:
if gr.size > self.groupMaxSize:
effectif = int(gr.size/2.0 + .5)
newgroup = self.createGroup(ID=len(self.groups)+1) # create empty group
while effectif:
newgroup.receive(gr.remove_(randint(0,gr.size-1))) # symbolically murdered, and then born-again
effectif -= 1
newgroup.update_()
self.groups.append(newgroup)
##############
## TO BE REWRITTEN: restart the whole destruction process after one destruction
##############
if self.Scenario.Parameter('GroupMinSize', Default=0) ==0: return # No group minimum size
grps = self.groups[:] # copy of the list, necessary since 'groups' is modified within the loop
for gr in grps:
if gr.size < self.Scenario.Parameter('GroupMinSize'):
self.groups.remove(gr)
self.popSize -= gr.size # necessary for lottery()
# for dummy in gr.members:
for dummy in list(gr):
try:
gr_in = choice(self.groups) # dispersed members join groups independently of their size
except IndexError:
return # dying population
## (gr_in,dummy) = self.lottery() # choosing where to go
gr_in.receive(gr.remove_(0)) # symbolically murdered, and then born-again
self.popSize += 1
def limit(self):
" randomly kills individuals until size is reached "
## MaxLives = self.Scenario.Parameter('SelectionPressure')
self.update()
while self.popSize > self.Scenario.Parameter('PopulationSize'):
(gr,Unfortunate) = self.lottery()
if gr.kill(Unfortunate) is not None:
self.popSize -= 1
self.update(display=True)
def update(self, flagRanking = False, display=False):
" updates groups and looks for empty groups "
self.popSize = 0 # population size will be recomputed
toBeRemoved = []
for gr in self.groups:
gr.location = self.popSize # useful for separating groups when displaying them on an axis
grsize = gr.update_(flagRanking, display=display)
if grsize == 0: toBeRemoved.append(gr)
self.popSize += grsize
for gr in toBeRemoved: self.groups.remove(gr)
if self.popSize == 0: error("Population is empty")
self.best_score = max([gr.best_score for gr in self.groups])
return self.popSize
def statistics(self, Complete=True, Display=False):
" Updates statistics about the population "
self.update(display=Display) # updates facts
self.Observer.reset()
if Complete:
self.Observer.open_()
for gr in self.groups:
gr.statistics()
self.Observer.store(gr.Examiner)
self.Observer.close_() # computes statistics in Observer
def one_year(self):
" one year of life "
if self.year < 0:
# just to get a snapshot of the initial situation
self.season() # annual resetting and time increment
self.statistics()
return True
try:
self.limit() # some individuals die to limit population size
self.migration() # some individuals change group
self.group_splitting() # big groups split and small groups are dissolved
self.season() # annual resetting and time increment
if self.Observer.Visible():
self.statistics(Complete=True, Display=True) # compute statistics before reproduction
try: self.Observer.recordInfo('Best', self.groups[0].get_best())
except (IndexError, AttributeError): pass # no record of best individual
return True
except Exception as Msg:
error("Population", str(Msg))
return False
def members(self):
for gr in self.groups:
for i in gr:
yield i
def display(self):
" calling 'display' for all individuals in the population "
for i in self.members(): i.display()
def __str__(self):
# printing global statistics
# and then a list of groups, one per line
return "\n Population Statistics:\n" + \
"> Popul: %d members\tbest: %d\tavg: %.02f\tyear: %d\n" \
% (self.Observer.Statistics['Properties']['length'],
self.Observer.Statistics['Properties']['best'][1],
self.Observer.Statistics['Properties']['average'][1], self.year) + \
"\n".join(["group %d: %d members\tbest: %d\tavg: %.02f" \
% (i, grObs.storages['Properties'].length, grObs.storages['Properties'].best[1],
grObs.storages['Properties'].average[1]) \
for (i,grObs) in enumerate(self.Observer.storage)]) + "\n"
class EvolifePopulation(Population):
" Population + reproduction + call to Scenario life_game "
def __init__(self, Scenario, Evolife_Obs):
""" Creation of the groups """
Population.__init__(self, Scenario, Evolife_Obs)
# Possibility of intialiazing genomes from file
if self.Scenario.Parameter('StartFromFile', Default=0):
StartFile = open('EvoStart.gen','r')
self.Observer.TextDisplay('Retrieving population from EvoStart.gen\n')
Genomes = StartFile.readlines() # put lines in a list
StartFile.close()
self.popSize = len(Genomes) # priority over configuration file
for gr in self.groups: gr.uploadDNA(Genomes)
else:
Genomes = []
self.statistics() # updates popSize
def createGroup(self, ID=0, Size=0):
return EvolifeGroup(self.Scenario, ID=ID, Size=Size)
def reproduction(self):
" launches reproduction in groups "
for gr in self.groups:
gr.reproduction()
self.update()
def life_game(self):
for gr in self.groups:
gr.life_game()
def one_year(self):
if self.year >= 0:
self.reproduction() # reproduction depends on scores
self.life_game() # where individual earn their score
Res = Population.one_year(self)
return Res
if __name__ == "__main__":
print(__doc__)
print(Population.__doc__ + '\n\nTest:\n')
###################################
# Test #
###################################
if __name__ == "__main__":
from Evolife.Ecology.Observer import Meta_Observer
Obs = Meta_Observer('PopObs')
Pop = Population(Obs)
print(Pop)
for ii in range(16):
Pop.one_year()
print(Pop)
raw_input('[Return]')
__author__ = 'Dessalles'
|
from itertools import permutations
from functools import reduce
def swap(a, b):
return (b, a)
def build_chain(chain, domino):
if chain is not None:
last = chain[-1]
if len(chain) == 1 and last[0] == domino[0]:
return [swap(*last), domino]
elif len(chain) == 1 and last[0] == domino[1]:
return [swap(*last), swap(*domino)]
elif last[1] == domino[0]:
return chain + [domino]
elif last[1] == domino[1]:
return chain + [swap(*domino)]
return None
def chain(dominoes):
if not any(dominoes):
return []
for perm in permutations(dominoes):
chain = reduce(build_chain, perm[1:], [perm[0]])
if chain is not None and chain[0][0] == chain[-1][1]:
return chain
return None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.